Python torch.ones_like() Examples

The following are 30 code examples of torch.ones_like(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: adaptation_networks.py    From cnaps with MIT License 6 votes vote down vote up
def forward(self, x):
        """
        Forward pass through adaptation network.
        :param x: (torch.tensor) Input representation to network (task level representation z).
        :return: (list::dictionaries) Dictionary for every block in layer. Dictionary contains all the parameters
                 necessary to adapt layer in base network. Base network is aware of dict structure and can pull params
                 out during forward pass.
        """
        x = self.shared_layer(x)
        block_params = []
        for block in range(self.num_blocks):
            block_param_dict = {
                'gamma1': self.gamma1_processors[block](x).squeeze() * self.gamma1_regularizers[block] +
                          torch.ones_like(self.gamma1_regularizers[block]),
                'beta1': self.beta1_processors[block](x).squeeze() * self.beta1_regularizers[block],
                'gamma2': self.gamma2_processors[block](x).squeeze() * self.gamma2_regularizers[block] +
                          torch.ones_like(self.gamma2_regularizers[block]),
                'beta2': self.beta2_processors[block](x).squeeze() * self.beta2_regularizers[block]
            }
            block_params.append(block_param_dict)
        return block_params 
Example #2
Source File: timeseries.py    From pyfilter with MIT License 6 votes vote down vote up
def test_MultiDimensional(self):
        mu = torch.zeros(2)
        scale = torch.ones_like(mu)

        shape = 1000, 100

        mvn = Independent(Normal(mu, scale), 1)
        mvn = AffineProcess((f, g), (1., 1.), mvn, mvn)

        # ===== Initialize ===== #
        x = mvn.i_sample(shape)

        # ===== Propagate ===== #
        num = 100
        samps = [x]
        for t in range(num):
            samps.append(mvn.propagate(samps[-1]))

        samps = torch.stack(samps)
        self.assertEqual(samps.size(), torch.Size([num + 1, *shape, *mu.shape]))

        # ===== Sample path ===== #
        path = mvn.sample_path(num + 1, shape)
        self.assertEqual(samps.shape, path.shape) 
Example #3
Source File: utils.py    From pyfilter with MIT License 6 votes vote down vote up
def test_Stacker(self):
        # ===== Define a mix of parameters ====== #
        zerod = Parameter(Normal(0., 1.)).sample_((1000,))
        oned_luring = Parameter(Normal(torch.tensor([0.]), torch.tensor([1.]))).sample_(zerod.shape)
        oned = Parameter(MultivariateNormal(torch.zeros(2), torch.eye(2))).sample_(zerod.shape)

        mu = torch.zeros((3, 3))
        norm = Independent(Normal(mu, torch.ones_like(mu)), 2)
        twod = Parameter(norm).sample_(zerod.shape)

        # ===== Stack ===== #
        params = (zerod, oned, oned_luring, twod)
        stacked = stacker(params, lambda u: u.t_values, dim=1)

        # ===== Verify it's recreated correctly ====== #
        for p, m, ps in zip(params, stacked.mask, stacked.prev_shape):
            v = stacked.concated[..., m]

            if len(p.c_shape) != 0:
                v = v.reshape(*v.shape[:-1], *ps)

            assert (p.t_values == v).all() 
Example #4
Source File: partial_convolution.py    From Text_Segmentation_Image_Inpainting with GNU General Public License v3.0 6 votes vote down vote up
def forward(self, args):
        x, mask = args
        output = self.feature_conv(x * mask)
        if self.feature_conv.bias is not None:
            output_bias = self.feature_conv.bias.view(1, -1, 1, 1).expand_as(output)
        else:
            output_bias = torch.zeros_like(output)

        with torch.no_grad():
            output_mask = self.mask_conv(mask)

        mask_sum = output_mask

        output = (output - output_bias) / mask_sum + output_bias
        new_mask = torch.ones_like(output)

        return output, new_mask 
Example #5
Source File: meanfield.py    From pyfilter with MIT License 6 votes vote down vote up
def initialize(self, parameters: Tuple[Parameter, ...], *args):
        stacked = stacker(parameters, lambda u: u.t_values)

        self._mean = torch.zeros(stacked.concated.shape[1:], device=stacked.concated.device)
        self._log_std = torch.ones_like(self._mean)

        for p, msk in zip(parameters, stacked.mask):
            try:
                self._mean[msk] = p.bijection.inv(p.distr.mean)
            except NotImplementedError:
                pass

        self._mean.requires_grad_(True)
        self._log_std.requires_grad_(True)

        return self 
Example #6
Source File: inverse_warp_loss.py    From DenseMatchingBenchmark with MIT License 6 votes vote down vote up
def lr_loss_per_level(self, leftEstDisp, rightEstDisp, leftImage, rightImage, leftMask=None, rightMask=None):
        from dmb.modeling.stereo.losses.utils import SSIM
        assert leftEstDisp.shape == rightEstDisp.shape, \
            'The shape of left and right disparity map should be the same!'
        N, C, H, W = leftEstDisp.shape
        leftImage = F.interpolate(leftImage, (H, W), mode='area')
        rightImage = F.interpolate(rightImage, (H, W), mode='area')

        leftImage_fromWarp = inverse_warp(rightImage, -leftEstDisp)
        rightImage_fromWarp = inverse_warp(leftImage, rightEstDisp)

        if leftMask is None:
            leftMask = torch.ones_like(leftImage > 0)
        loss = self.rms_weight * self.rms(leftImage[leftMask], leftImage_fromWarp[leftMask])
        loss += self.ssim_weight * SSIM(leftImage, leftImage_fromWarp, leftMask)

        if rightMask is None:
            rightMask = torch.ones_like(rightImage > 0)
        loss += self.rms_weight * self.rms(rightImage[rightMask], rightImage_fromWarp[rightMask])
        loss += self.ssim_weight * SSIM(rightImage, rightImage_fromWarp, leftMask)

        return loss 
Example #7
Source File: batch_norm.py    From SegmenTron with Apache License 2.0 6 votes vote down vote up
def _load_from_state_dict(
        self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
    ):
        version = local_metadata.get("version", None)

        if version is None or version < 2:
            # No running_mean/var in early versions
            # This will silent the warnings
            if prefix + "running_mean" not in state_dict:
                state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
            if prefix + "running_var" not in state_dict:
                state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)

        if version is not None and version < 3:
            # logger = logging.getLogger(__name__)
            logging.info("FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip(".")))
            # In version < 3, running_var are used without +eps.
            state_dict[prefix + "running_var"] -= self.eps

        super()._load_from_state_dict(
            state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
        ) 
Example #8
Source File: networks.py    From connecting_the_dots with MIT License 6 votes vote down vote up
def tforward(self, disp0, im, std=None):
    self.pattern = self.pattern.to(disp0.device)
    self.uv0 = self.uv0.to(disp0.device)

    uv0 = self.uv0.expand(disp0.shape[0], *self.uv0.shape[1:])
    uv1 = torch.empty_like(uv0)
    uv1[...,0] = uv0[...,0] - disp0.contiguous().view(disp0.shape[0],-1)
    uv1[...,1] = uv0[...,1]

    uv1[..., 0] = 2 * (uv1[..., 0] / (self.im_width-1) - 0.5)
    uv1[..., 1] = 2 * (uv1[..., 1] / (self.im_height-1) - 0.5)
    uv1 = uv1.view(-1, self.im_height, self.im_width, 2).clone()
    pattern = self.pattern.expand(disp0.shape[0], *self.pattern.shape[1:])
    pattern_proj = torch.nn.functional.grid_sample(pattern, uv1, padding_mode='border')
    mask = torch.ones_like(im)
    if std is not None:
      mask = mask*std

    diff = torchext.photometric_loss(pattern_proj.contiguous(), im.contiguous(), 9, self.loss_type, self.loss_eps)
    val = (mask*diff).sum() / mask.sum()
    return val, pattern_proj 
Example #9
Source File: transforms.py    From autoclint with Apache License 2.0 6 votes vote down vote up
def __call__(self, image):
        if self.height > 0 or self.width > 0:
            if isinstance(image, torch.Tensor):
                mask = torch.ones_like(image)
            elif isinstance(image, np.ndarray):
                mask = np.ones_like(image)
            else:
                raise NotImplementedError('support only tensor or numpy array')

            h, w = image.shape[-2:]

            y = np.random.randint(h)
            x = np.random.randint(w)

            y1 = np.clip(y - self.height // 2, 0, h)
            y2 = np.clip(y + self.height // 2, 0, h)
            x1 = np.clip(x - self.width // 2, 0, w)
            x2 = np.clip(x + self.width // 2, 0, w)

            if len(mask.shape) == 3:
                mask[:, y1: y2, x1: x2] = 0.
            else:
                mask[:, :, y1: y2, x1: x2] = 0.
            image *= mask
        return image 
Example #10
Source File: wrapper_bohamiann.py    From RoBO with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_default_network(input_dimensionality: int) -> torch.nn.Module:
    class AppendLayer(torch.nn.Module):
        def __init__(self, bias=True, *args, **kwargs):
            super().__init__(*args, **kwargs)
            if bias:
                self.bias = torch.nn.Parameter(torch.FloatTensor(1, 1))
            else:
                self.register_parameter('bias', None)

        def forward(self, x):
            return torch.cat((x, self.bias * torch.ones_like(x)), dim=1)

    def init_weights(module):
        if type(module) == AppendLayer:
            torch.nn.init.constant_(module.bias, val=np.log(1e-2))
        elif type(module) == torch.nn.Linear:
            torch.nn.init.kaiming_normal_(module.weight, mode="fan_in", nonlinearity="linear")
            torch.nn.init.constant_(module.bias, val=0.0)

    return torch.nn.Sequential(
        torch.nn.Linear(input_dimensionality, 50), torch.nn.Tanh(),
        torch.nn.Linear(50, 50), torch.nn.Tanh(),
        torch.nn.Linear(50, 1),
        AppendLayer()
    ).apply(init_weights) 
Example #11
Source File: total_inference.py    From scVI with MIT License 6 votes vote down vote up
def loss_discriminator(
        self, z, batch_index, predict_true_class=True, return_details=True
    ):

        n_classes = self.gene_dataset.n_batches
        cls_logits = torch.nn.LogSoftmax(dim=1)(self.discriminator(z))

        if predict_true_class:
            cls_target = one_hot(batch_index, n_classes)
        else:
            one_hot_batch = one_hot(batch_index, n_classes)
            cls_target = torch.zeros_like(one_hot_batch)
            # place zeroes where true label is
            cls_target.masked_scatter_(
                ~one_hot_batch.bool(), torch.ones_like(one_hot_batch) / (n_classes - 1)
            )

        l_soft = cls_logits * cls_target
        loss = -l_soft.sum(dim=1).mean()

        return loss 
Example #12
Source File: losses.py    From DexiNed with MIT License 6 votes vote down vote up
def weighted_cross_entropy_loss(preds, edges):
    """ Calculate sum of weighted cross entropy loss. """
    # Reference:
    #   hed/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp
    #   https://github.com/s9xie/hed/issues/7
    mask = (edges > 0.5).float()
    b, c, h, w = mask.shape
    num_pos = torch.sum(mask, dim=[1, 2, 3], keepdim=True).float()  # Shape: [b,].
    num_neg = c * h * w - num_pos                     # Shape: [b,].
    weight = torch.zeros_like(mask)
    #weight[edges > 0.5]  = num_neg / (num_pos + num_neg)
    #weight[edges <= 0.5] = num_pos / (num_pos + num_neg)
    weight.masked_scatter_(edges > 0.5,
        torch.ones_like(edges) * num_neg / (num_pos + num_neg))
    weight.masked_scatter_(edges <= 0.5,
        torch.ones_like(edges) * num_pos / (num_pos + num_neg))
    # Calculate loss.
    # preds=torch.sigmoid(preds)
    losses = F.binary_cross_entropy_with_logits(
        preds.float(), edges.float(), weight=weight, reduction='none')
    loss = torch.sum(losses) / b
    return loss 
Example #13
Source File: test_lorentz_math.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def test_parallel_transport0_preserves_inner_products(a, k):
    man = lorentz.Lorentz(k=k)
    a = man.projx(a)

    v_0 = torch.rand_like(a) + 1e-5
    u_0 = torch.rand_like(a) + 1e-5

    zero = torch.ones_like(a)
    d = zero.size(1) - 1
    zero = torch.cat(
        (zero.narrow(1, 0, 1) * torch.sqrt(k), zero.narrow(1, 1, d) * 0.0), dim=1
    )

    v_0 = man.proju(zero, v_0)  # project on tangent plane
    u_0 = man.proju(zero, u_0)  # project on tangent plane

    v_a = man.transp0(a, v_0)
    u_a = man.transp0(a, u_0)

    vu_0 = man.inner(v_0, u_0, keepdim=True)
    vu_a = man.inner(v_a, u_a, keepdim=True)
    np.testing.assert_allclose(vu_a, vu_0, atol=1e-5, rtol=1e-5) 
Example #14
Source File: test_lorentz_math.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def test_parallel_transport0_back(a, b, k):
    man = lorentz.Lorentz(k=k)
    a = man.projx(a)
    b = man.projx(b)

    v_0 = torch.rand_like(a) + 1e-5
    v_0 = man.proju(a, v_0)  # project on tangent plane

    zero = torch.ones_like(a)
    d = zero.size(1) - 1
    zero = torch.cat(
        (zero.narrow(1, 0, 1) * torch.sqrt(k), zero.narrow(1, 1, d) * 0.0), dim=1
    )

    v_t = man.transp0back(a, v_0)
    v_t = man.transp0(b, v_t)

    v_s = man.transp(a, zero, v_0)
    v_s = man.transp(zero, b, v_s)

    np.testing.assert_allclose(v_t, v_s, atol=1e-5, rtol=1e-5) 
Example #15
Source File: test_lorentz_math.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def test_zero_point_ops(a, k):
    man = lorentz.Lorentz(k=k)
    a = man.projx(a)

    zero = torch.ones_like(a)
    d = zero.size(1) - 1
    zero = torch.cat(
        (zero.narrow(1, 0, 1) * torch.sqrt(k), zero.narrow(1, 1, d) * 0.0), dim=1
    )
    inner_z = man.inner0(a)
    inner = man.inner(None, a, zero)
    np.testing.assert_allclose(inner, inner_z, atol=1e-5, rtol=1e-5)

    lmap_z = man.logmap0back(a)
    lmap = man.logmap(a, zero)

    np.testing.assert_allclose(lmap, lmap_z, atol=1e-5, rtol=1e-5) 
Example #16
Source File: network_blocks.py    From ASFF with GNU General Public License v3.0 6 votes vote down vote up
def forward(self, x):
        if (not self.training or self.keep_prob==1): #set keep_prob=1 to turn off dropblock
            return x
        if self.gamma is None:
            self.gamma = self.calculate_gamma(x)
        if x.type() == 'torch.cuda.HalfTensor': #TODO: not fully support for FP16 now 
            FP16 = True
            x = x.float()
        else:
            FP16 = False
        p = torch.ones_like(x) * (self.gamma)
        mask = 1 - torch.nn.functional.max_pool2d(torch.bernoulli(p),
                                                  self.kernel_size,
                                                  self.stride,
                                                  self.padding)

        out =  mask * x * (mask.numel()/mask.sum())

        if FP16:
            out = out.half()
        return out 
Example #17
Source File: DataSet.py    From ext_portrait_segmentation with MIT License 6 votes vote down vote up
def __getitem__(self, idx):
        '''

        :param idx: Index of the image file
        :return: returns the image and corresponding label file.
        '''
        image_name = self.imList[idx]
        label_name = self.labelList[idx]
        image = cv2.imread(image_name)
        label = cv2.imread(label_name, 0)
        label_bool = 255 * ((label > 200).astype(np.uint8))

        if self.transform:
            [image, label] = self.transform(image, label_bool)
        if self.edge:
            np_label = 255 * label.data.numpy().astype(np.uint8)
            kernel = np.ones((self.kernel_size , self.kernel_size ), np.uint8)
            erosion = cv2.erode(np_label, kernel, iterations=1)
            dilation = cv2.dilate(np_label, kernel, iterations=1)
            boundary = dilation - erosion
            edgemap = 255 * torch.ones_like(label)
            edgemap[torch.from_numpy(boundary) > 0] = label[torch.from_numpy(boundary) > 0]
            return (image, label, edgemap)
        else:
            return (image, label) 
Example #18
Source File: layers.py    From graph-cnn.pytorch with MIT License 6 votes vote down vote up
def forward(self, input, adj):
        h = torch.mm(input, self.W)
        N = h.size()[0]

        f_1 = torch.matmul(h, self.a1)
        f_2 = torch.matmul(h, self.a2)
        e = self.leakyrelu(f_1 + f_2.transpose(0,1))

        zero_vec = -9e15*torch.ones_like(e)
        attention = torch.where(adj > 0, e, zero_vec)
        attention = F.softmax(attention, dim=1)
        attention = F.dropout(attention, self.dropout, training=self.training)
        h_prime = torch.matmul(attention, h)

        if self.concat:
            return F.elu(h_prime)
        else:
            return h_prime 
Example #19
Source File: adaptation_networks.py    From cnaps with MIT License 6 votes vote down vote up
def forward(self, x, task_representation):
        """
        Forward pass through adaptation network.
        :param x: (torch.tensor) Input representation to network (task level representation z).
        :return: (list::dictionaries) Dictionary for every block in layer. Dictionary contains all the parameters
                 necessary to adapt layer in base network. Base network is aware of dict structure and can pull params
                 out during forward pass.
        """
        x = self.shared_layer(x)
        x = torch.mean(x, dim=0, keepdim=True)
        x = self.shared_layer_post(x)
        x = torch.cat([x, task_representation], dim=-1)
        block_params = []
        for block in range(self.num_blocks):
            block_param_dict = {
                'gamma1': self.gamma1_processors[block](x).squeeze() * self.gamma1_regularizers[block] +
                          torch.ones_like(self.gamma1_regularizers[block]),
                'beta1': self.beta1_processors[block](x).squeeze() * self.beta1_regularizers[block],
                'gamma2': self.gamma2_processors[block](x).squeeze() * self.gamma2_regularizers[block] +
                          torch.ones_like(self.gamma2_regularizers[block]),
                'beta2': self.beta2_processors[block](x).squeeze() * self.beta2_regularizers[block]
            }
            block_params.append(block_param_dict)
        return block_params 
Example #20
Source File: discrete_test.py    From nsf with MIT License 6 votes vote down vote up
def test_sample_and_log_prob_with_context(self):
        num_samples = 10
        context_size = 20
        input_shape = [2, 3, 4]
        context_shape = [2, 3, 4]

        dist = discrete.ConditionalIndependentBernoulli(input_shape)
        context = torch.randn(context_size, *context_shape)
        samples, log_prob = dist.sample_and_log_prob(num_samples, context=context)

        self.assertIsInstance(samples, torch.Tensor)
        self.assertIsInstance(log_prob, torch.Tensor)

        self.assertEqual(samples.shape, torch.Size([context_size, num_samples] + input_shape))
        self.assertEqual(log_prob.shape, torch.Size([context_size, num_samples]))

        self.assertFalse(torch.isnan(log_prob).any())
        self.assertFalse(torch.isinf(log_prob).any())
        self.assert_tensor_less_equal(log_prob, 0.0)

        self.assertFalse(torch.isnan(samples).any())
        self.assertFalse(torch.isinf(samples).any())
        binary = (samples == 1.0) | (samples == 0.0)
        self.assertEqual(binary, torch.ones_like(binary)) 
Example #21
Source File: wrappers.py    From autoclint with Apache License 2.0 6 votes vote down vote up
def forward(self, input):
        batch, channel, height, width = input.shape
        w = int(width * self.ratio)
        h = int(height * self.ratio)

        if self.training and w > 0 and h > 0:
            x = np.random.randint(width, size=(batch,))
            y = np.random.randint(height, size=(batch,))

            x1s = np.clip(x - w // 2, 0, width)
            x2s = np.clip(x + w // 2, 0, width)
            y1s = np.clip(y - h // 2, 0, height)
            y2s = np.clip(y + h // 2, 0, height)

            mask = torch.ones_like(input)
            for idx, (x1, x2, y1, y2) in enumerate(zip(x1s, x2s, y1s, y2s)):
                mask[idx, :, y1:y2, x1:x2] = 0.

            input = input * mask
        return input 
Example #22
Source File: modeling.py    From cmrc2019 with Creative Commons Attribution Share Alike 4.0 International 5 votes vote down vote up
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
        if attention_mask is None:
            attention_mask = torch.ones_like(input_ids)
        if token_type_ids is None:
            token_type_ids = torch.zeros_like(input_ids)

        # We create a 3D attention mask from a 2D tensor mask.
        # Sizes are [batch_size, 1, 1, to_seq_length]
        # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
        # this attention mask is more simple than the triangular masking of causal attention
        # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
        extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)

        # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        # masked positions, this operation will create a tensor which is 0.0 for
        # positions we want to attend and -10000.0 for masked positions.
        # Since we are adding it to the raw scores before the softmax, this is
        # effectively the same as removing these entirely.
        extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
        extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0

        embedding_output = self.embeddings(input_ids, token_type_ids)
        encoded_layers = self.encoder(embedding_output,
                                      extended_attention_mask,
                                      output_all_encoded_layers=output_all_encoded_layers)
        sequence_output = encoded_layers[-1]
        pooled_output = self.pooler(sequence_output)
        if not output_all_encoded_layers:
            encoded_layers = encoded_layers[-1]
        return encoded_layers, pooled_output 
Example #23
Source File: utils.py    From latent-treelstm with MIT License 5 votes vote down vote up
def log_prob(self, value):
        if value.dtype == torch.long:
            if self.mask is None:
                return self.cat_distr.log_prob(value)
            else:
                return self.cat_distr.log_prob(value) * (self.n != 0.).to(dtype=torch.float32)
        else:
            max_values, mv_idxs = value.max(dim=-1)
            relaxed = (max_values - torch.ones_like(max_values)).sum().item() != 0.0
            if relaxed:
                raise ValueError("The log_prob can't be calculated for the relaxed sample!")
            return self.cat_distr.log_prob(mv_idxs) * (self.n != 0.).to(dtype=torch.float32) 
Example #24
Source File: bilateral_filter.py    From DenseMatchingBenchmark with MIT License 5 votes vote down vote up
def forward(self, leftImage, estDisp):
        assert leftImage.shape == estDisp.shape
        assert estDisp.shape[1] == 1

        for i in range(len(self.disp_conv)):
            self.disp_conv[i] = self.disp_conv[i].to(leftImage.device)
        for i in range(len(self.image_conv)):
            self.image_conv[i] = self.image_conv[i].to(leftImage.device)

        index_image_conv = 0
        index_disp_conv = 0
        fineDisp = None
        weight = None
        for i in range(-(self.kernel_size // 2), (self.kernel_size // 2 + 1)):
            for j in range(-(self.kernel_size // 2), (self.kernel_size // 2 + 1)):
                if i == 0 and j == 0:
                    image_diff_weight = torch.ones_like(estDisp)
                else:
                    image_diff_weight = (
                        (-self.image_conv[index_image_conv](leftImage).pow(2.0) / (2 * self.sigma_image ** 2)).exp())
                    index_image_conv += 1

                dist = math.exp(-float(i ** 2 + j ** 2) / float(2 * self.sigma_gaussian ** 2))
                dist_diff_weight = torch.full_like(estDisp, dist)

                disp = self.disp_conv[index_disp_conv](estDisp)

                if index_disp_conv == 0:
                    weight = dist_diff_weight * image_diff_weight
                    fineDisp = disp * dist_diff_weight * image_diff_weight
                else:
                    weight += dist_diff_weight * image_diff_weight
                    fineDisp += disp * dist_diff_weight * image_diff_weight

        fineDisp = (fineDisp + eps) / (weight + eps)

        return fineDisp 
Example #25
Source File: qa.py    From CoupletAI with MIT License 5 votes vote down vote up
def predict(self, seq: List[List[str]]) -> str:
        seq = [self.word_dict.get(word, self.word_dict['[UNK]'])
               for word in seq]
        seq = torch.tensor(seq, dtype=torch.long).unsqueeze(0).to(self.device)
        attention_mask = create_transformer_attention_mask(torch.ones_like(seq).to(self.device))

        logits = self.model(seq, attention_mask)
        out_ids = torch.argmax(logits.squeeze(0), dim=-1)
        out_seq = [self.ix2word[idx.item()] for idx in out_ids]
        return ''.join(out_seq) 
Example #26
Source File: qa.py    From CoupletAI with MIT License 5 votes vote down vote up
def _build_traced_script_module(self):
        example = torch.ones(1, 3).long().to(self.device)
        mask = create_transformer_attention_mask(torch.ones_like(example).to(self.device))
        return torch.jit.trace(self.model, (example, mask)) 
Example #27
Source File: conv_type.py    From hidden-networks with Apache License 2.0 5 votes vote down vote up
def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.scores = nn.Parameter(torch.Tensor(self.weight.size()))
        if parser_args.score_init_constant is not None:
            self.scores.data = (
                torch.ones_like(self.scores) * parser_args.score_init_constant
            )
        else:
            nn.init.kaiming_uniform_(self.scores, a=math.sqrt(5)) 
Example #28
Source File: inverse_warp_loss.py    From DenseMatchingBenchmark with MIT License 5 votes vote down vote up
def loss_per_level(self, estDisp, leftImage, rightImage, mask=None):
        from dmb.modeling.stereo.losses.utils import SSIM
        N, C, H, W = estDisp.shape
        leftImage = F.interpolate(leftImage, (H, W), mode='area')
        rightImage = F.interpolate(rightImage, (H, W), mode='area')

        leftImage_fromWarp = inverse_warp(rightImage, -estDisp)

        if mask is None:
            mask = torch.ones_like(leftImage > 0)
        loss = self.rms_weight * self.rms(leftImage[mask], leftImage_fromWarp[mask])
        loss += self.ssim_weight * SSIM(leftImage, leftImage_fromWarp, mask)

        return loss 
Example #29
Source File: temporal_dim.py    From atari-representation-learning with MIT License 5 votes vote down vote up
def do_one_epoch(self, epoch, episodes):
        mode = "train" if self.encoder.training and self.classifier1.training else "val"
        epoch_loss, accuracy, steps = 0., 0., 0
        accuracy1 = 0.
        epoch_loss1 = 0.
        data_generator = self.generate_batch(episodes)
        for x_t, x_tprev, x_that, ts, thats in data_generator:
            f_t_maps, f_t_prev_maps = self.encoder(x_t, fmaps=True), self.encoder(x_tprev, fmaps=True)
            f_t_hat_maps = self.encoder(x_that, fmaps=True)

            # Loss 1: Global at time t, f5 patches at time t-1
            f_t, f_t_prev = f_t_maps['out'], f_t_prev_maps['f5']
            f_t_hat = f_t_hat_maps['f5']
            f_t = f_t.unsqueeze(1).unsqueeze(1).expand(-1, f_t_prev.size(1), f_t_prev.size(2), self.encoder.hidden_size)

            target = torch.cat((torch.ones_like(f_t[:, :, :, 0]),
                                torch.zeros_like(f_t[:, :, :, 0])), dim=0).to(self.device)

            x1, x2 = torch.cat([f_t, f_t], dim=0), torch.cat([f_t_prev, f_t_hat], dim=0)
            shuffled_idxs = torch.randperm(len(target))
            x1, x2, target = x1[shuffled_idxs], x2[shuffled_idxs], target[shuffled_idxs]
            self.optimizer.zero_grad()
            loss1 = self.loss_fn(self.classifier1(x1, x2).squeeze(), target)

            if mode == "train":
                loss1.backward()
                self.optimizer.step()

            epoch_loss1 += loss1.detach().item()
            preds1 = torch.sigmoid(self.classifier1(x1, x2).squeeze())
            accuracy1 += calculate_accuracy(preds1, target)
            steps += 1
        self.log_results(epoch, epoch_loss1 / steps,
                         accuracy1 / steps, prefix=mode)
        if mode == "val":
            self.early_stopper(accuracy1 / steps, self.encoder) 
Example #30
Source File: loss_functions.py    From SfmLearner-Pytorch with MIT License 5 votes vote down vote up
def explainability_loss(mask):
    if type(mask) not in [tuple, list]:
        mask = [mask]
    loss = 0
    for mask_scaled in mask:
        ones_var = torch.ones_like(mask_scaled)
        loss += nn.functional.binary_cross_entropy(mask_scaled, ones_var)
    return loss