Python torch.ge() Examples

The following are 30 code examples of torch.ge(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: hinge.py    From dfw with MIT License 6 votes vote down vote up
def _compute_xi(self, s, aug, y):

        # find argmax of augmented scores
        _, y_star = torch.max(aug, 1)
        # xi_max: one-hot encoding of maximal indices
        xi_max = torch.eq(y_star[:, None], self._range).float()

        if MultiClassHingeLoss.smooth:
            # find smooth argmax of scores
            xi_smooth = nn.functional.softmax(s, dim=1)
            # compute for each sample whether it has a positive contribution to the loss
            losses = torch.sum(xi_smooth * aug, 1)
            mask_smooth = torch.ge(losses, 0).float()[:, None]
            # keep only smoothing for positive contributions
            xi = mask_smooth * xi_smooth + (1 - mask_smooth) * xi_max
        else:
            xi = xi_max

        return xi 
Example #2
Source File: model.py    From AttentionDeepMIL with MIT License 6 votes vote down vote up
def forward(self, x):
        x = x.squeeze(0)

        H = self.feature_extractor_part1(x)
        H = H.view(-1, 50 * 4 * 4)
        H = self.feature_extractor_part2(H)  # NxL

        A_V = self.attention_V(H)  # NxD
        A_U = self.attention_U(H)  # NxD
        A = self.attention_weights(A_V * A_U) # element wise multiplication # NxK
        A = torch.transpose(A, 1, 0)  # KxN
        A = F.softmax(A, dim=1)  # softmax over N

        M = torch.mm(A, H)  # KxL

        Y_prob = self.classifier(M)
        Y_hat = torch.ge(Y_prob, 0.5).float()

        return Y_prob, Y_hat, A

    # AUXILIARY METHODS 
Example #3
Source File: model.py    From AttentionDeepMIL with MIT License 6 votes vote down vote up
def forward(self, x):
        x = x.squeeze(0)

        H = self.feature_extractor_part1(x)
        H = H.view(-1, 50 * 4 * 4)
        H = self.feature_extractor_part2(H)  # NxL

        A = self.attention(H)  # NxK
        A = torch.transpose(A, 1, 0)  # KxN
        A = F.softmax(A, dim=1)  # softmax over N

        M = torch.mm(A, H)  # KxL

        Y_prob = self.classifier(M)
        Y_hat = torch.ge(Y_prob, 0.5).float()

        return Y_prob, Y_hat, A

    # AUXILIARY METHODS 
Example #4
Source File: functional.py    From catalyst with Apache License 2.0 6 votes vote down vote up
def create_negative_mask(
    labels: torch.Tensor, neg_label: int = -1
) -> torch.Tensor:
    """@TODO: Docs. Contribution is welcome."""
    neg_labels = torch.ge(labels, neg_label)
    pos_labels = ~neg_labels

    i_less_neg = pos_labels.unsqueeze(1).unsqueeze(2)
    j_less_neg = pos_labels.unsqueeze(1).unsqueeze(0)
    k_less_neg = pos_labels.unsqueeze(0).unsqueeze(0)

    anchors = labels.unsqueeze(1).unsqueeze(2)
    negatives = labels.unsqueeze(0).unsqueeze(0)
    k_equal = torch.eq(anchors + neg_label, negatives)

    k_less_or_equal = k_equal | k_less_neg
    mask = i_less_neg & j_less_neg & k_less_or_equal

    return mask 
Example #5
Source File: frequent_word_embedding.py    From claf with MIT License 6 votes vote down vote up
def forward(self, words, frequent_tuning=False):
        if frequent_tuning and self.training:

            padding_mask = words.eq(0).long()

            # Fine-tuning - N the most frequent
            fine_tune_mask = torch.lt(words, self.threshold_index) * padding_mask.eq(
                0
            )  # < threshold_index
            fine_tune_words = words * fine_tune_mask.long()

            fine_tune_embedded = self.fine_tune_word_embedding(fine_tune_words)
            fine_tune_embedded = f.masked_zero(fine_tune_embedded, fine_tune_mask)

            # Fixed - under N frequent
            fixed_mask = torch.ge(words, self.threshold_index)  # >= threshold_index

            fixed_embedeed = self.fixed_word_embedding(words).detach()  # Fixed
            fixed_embedeed = f.masked_zero(fixed_embedeed, fixed_mask)

            embedded_words = fine_tune_embedded + fixed_embedeed
        else:
            embedded_words = self.fixed_word_embedding(words)

        return self.dropout(embedded_words) 
Example #6
Source File: sparse_net.py    From htmpapers with GNU Affero General Public License v3.0 6 votes vote down vote up
def pruneWeights(self, minWeight):
    """
    Prune all the weights whose absolute magnitude is less than minWeight
    :param minWeight: min weight to prune. If zero then no pruning
    :type minWeight: float
    """
    if minWeight == 0.0:
      return

    # Collect all weights
    weights = [v for k, v in self.named_parameters() if 'weight' in k]
    for w in weights:
      # Filter weights above threshold
      mask = torch.ge(torch.abs(w.data), minWeight)
      # Zero other weights
      w.data.mul_(mask.type(torch.float32)) 
Example #7
Source File: losses.py    From binseg_pytoch with Apache License 2.0 6 votes vote down vote up
def iouloss(input, target):
    smooth = 1.
    iflat = input.view(-1)
    tflat = target.view(-1)
    intersection = (iflat * tflat).sum()
    
    return 1. - ((2. * intersection + smooth) /
              (iflat.sum() + tflat.sum() + smooth))
    # works for one binary pred and associated target
    # make byte tensors
    #pred = torch.ge(pred, 0.5) 
    #pred = (pred == 1)
    #mask = (gt == 0)
    #gt = (gt == 1)
    #union = (gt | pred)[mask].long().sum()
    #if not union:
    #    return 0.
    #else:
    #    intersection = (gt & pred)[mask].long().sum()
    #    return 1. - intersection / union 
Example #8
Source File: computer_vision_fine_tuning.py    From pytorch-lightning with Apache License 2.0 6 votes vote down vote up
def training_step(self, batch, batch_idx):

        # 1. Forward pass:
        x, y = batch
        y_logits = self.forward(x)
        y_true = y.view((-1, 1)).type_as(x)
        y_bin = torch.ge(y_logits, 0)

        # 2. Compute loss & accuracy:
        train_loss = self.loss(y_logits, y_true)
        num_correct = torch.eq(y_bin.view(-1), y_true.view(-1)).sum()

        # 3. Outputs:
        tqdm_dict = {'train_loss': train_loss}
        output = OrderedDict({'loss': train_loss,
                              'num_correct': num_correct,
                              'log': tqdm_dict,
                              'progress_bar': tqdm_dict})

        return output 
Example #9
Source File: tensor.py    From condensa with Apache License 2.0 5 votes vote down vote up
def simple_mask(tensor, threshold, align=None):
    """
    Computes a simple binary mask for given magnitude threshold.

    :param tensor: PyTorch tensor
    :type tensor: `torch.Tensor`
    :param threshold: magnitude threshold for pruning
    :type threshold: `float`
    :return: Mask
    :rtype: `torch.Tensor`
    """
    assert tensor.dim() == 1
    if align is None:
        return torch.ge(tensor.abs(), threshold)
    else:
        size = tensor.size(0)
        if size < align:
            raise RuntimeError('Tensor too small for given alignment')
        t = tensor.abs()
        nnz = torch.ge(t, threshold).nonzero().size(0)
        nnz = int(nnz / align) * align
        _, indices = torch.topk(t, nnz)
        ones = torch.ones(nnz,
                          dtype=tensor.dtype,
                          layout=tensor.layout,
                          device=tensor.device)
        mask = torch.zeros_like(tensor).scatter_(0, indices, ones)
        return mask 
Example #10
Source File: nnutils.py    From hgraph2graph with MIT License 5 votes vote down vote up
def get_accuracy_bin(scores, labels):
    preds = torch.ge(scores, 0).long()
    acc = torch.eq(preds, labels).float()
    return torch.sum(acc) / labels.nelement() 
Example #11
Source File: evaluation_metrics.py    From PointFlow with MIT License 5 votes vote down vote up
def knn(Mxx, Mxy, Myy, k, sqrt=False):
    n0 = Mxx.size(0)
    n1 = Myy.size(0)
    label = torch.cat((torch.ones(n0), torch.zeros(n1))).to(Mxx)
    M = torch.cat((torch.cat((Mxx, Mxy), 1), torch.cat((Mxy.transpose(0, 1), Myy), 1)), 0)
    if sqrt:
        M = M.abs().sqrt()
    INFINITY = float('inf')
    val, idx = (M + torch.diag(INFINITY * torch.ones(n0 + n1).to(Mxx))).topk(k, 0, False)

    count = torch.zeros(n0 + n1).to(Mxx)
    for i in range(0, k):
        count = count + label.index_select(0, idx[i])
    pred = torch.ge(count, (float(k) / 2) * torch.ones(n0 + n1).to(Mxx)).float()

    s = {
        'tp': (pred * label).sum(),
        'fp': (pred * (1 - label)).sum(),
        'fn': ((1 - pred) * label).sum(),
        'tn': ((1 - pred) * (1 - label)).sum(),
    }

    s.update({
        'precision': s['tp'] / (s['tp'] + s['fp'] + 1e-10),
        'recall': s['tp'] / (s['tp'] + s['fn'] + 1e-10),
        'acc_t': s['tp'] / (s['tp'] + s['fn'] + 1e-10),
        'acc_f': s['tn'] / (s['tn'] + s['fp'] + 1e-10),
        'acc': torch.eq(label, pred).float().mean(),
    })
    return s 
Example #12
Source File: pytorch_backend_test.py    From TensorNetwork with Apache License 2.0 5 votes vote down vote up
def test_random_uniform_boundaries(dtype):
  lb = 1.2
  ub = 4.8
  backend = pytorch_backend.PyTorchBackend()
  a = backend.random_uniform((4, 4), seed=10, dtype=dtype)
  b = backend.random_uniform((4, 4), (lb, ub), seed=10, dtype=dtype)
  assert (torch.ge(a, 0).byte().all() and torch.le(a, 1).byte().all() and
          torch.ge(b, lb).byte().all() and torch.le(b, ub).byte().all()) 
Example #13
Source File: vgg_v1.py    From ACoL with MIT License 5 votes vote down vote up
def erase_feature_maps(self, atten_map_normed, feature_maps, threshold):
        # atten_map_normed = torch.unsqueeze(atten_map_normed, dim=1)
        # atten_map_normed = self.up_resize(atten_map_normed)
        if len(atten_map_normed.size())>3:
            atten_map_normed = torch.squeeze(atten_map_normed)
        atten_shape = atten_map_normed.size()

        pos = torch.ge(atten_map_normed, threshold)
        mask = torch.ones(atten_shape).cuda()
        mask[pos.data] = 0.0
        mask = torch.unsqueeze(mask, dim=1)
        #erase
        erased_feature_maps = feature_maps * Variable(mask)

        return erased_feature_maps 
Example #14
Source File: train.py    From DFace with Apache License 2.0 5 votes vote down vote up
def compute_accuracy(prob_cls, gt_cls):
    prob_cls = torch.squeeze(prob_cls)
    gt_cls = torch.squeeze(gt_cls)

    #we only need the detection which >= 0
    mask = torch.ge(gt_cls,0)
    #get valid element
    valid_gt_cls = torch.masked_select(gt_cls,mask)
    valid_prob_cls = torch.masked_select(prob_cls,mask)
    size = min(valid_gt_cls.size()[0], valid_prob_cls.size()[0])
    prob_ones = torch.ge(valid_prob_cls,0.6).float()
    right_ones = torch.eq(prob_ones,valid_gt_cls).float()

    return torch.div(torch.mul(torch.sum(right_ones),float(1.0)),float(size)) 
Example #15
Source File: models.py    From DFace with Apache License 2.0 5 votes vote down vote up
def cls_loss(self,gt_label,pred_label):
        pred_label = torch.squeeze(pred_label)
        gt_label = torch.squeeze(gt_label)
        # get the mask element which >= 0, only 0 and 1 can effect the detection loss
        mask = torch.ge(gt_label,0)
        valid_gt_label = torch.masked_select(gt_label,mask)
        valid_pred_label = torch.masked_select(pred_label,mask)
        return self.loss_cls(valid_pred_label,valid_gt_label)*self.cls_factor 
Example #16
Source File: computer_vision_fine_tuning.py    From pytorch-lightning with Apache License 2.0 5 votes vote down vote up
def validation_step(self, batch, batch_idx):

        # 1. Forward pass:
        x, y = batch
        y_logits = self.forward(x)
        y_true = y.view((-1, 1)).type_as(x)
        y_bin = torch.ge(y_logits, 0)

        # 2. Compute loss & accuracy:
        val_loss = self.loss(y_logits, y_true)
        num_correct = torch.eq(y_bin.view(-1), y_true.view(-1)).sum()

        return {'val_loss': val_loss,
                'num_correct': num_correct} 
Example #17
Source File: torch_ard.py    From pytorch_ard with MIT License 5 votes vote down vote up
def get_clip_mask(self):
        log_alpha = self.clip(self.log_alpha)
        return torch.ge(log_alpha, self.thresh) 
Example #18
Source File: torch_ard.py    From pytorch_ard with MIT License 5 votes vote down vote up
def get_clip_mask(self):
        log_alpha = self.clip(self.log_alpha)
        return torch.ge(log_alpha, self.thresh) 
Example #19
Source File: filter.py    From spectre with Apache License 2.0 5 votes vote down vote up
def compute(self, left, right) -> torch.Tensor:
        return torch.ge(left, right) 
Example #20
Source File: osvos_layers.py    From OSVOS-PyTorch with GNU General Public License v3.0 5 votes vote down vote up
def class_balanced_cross_entropy_loss(output, label, size_average=True, batch_average=True):
    """Define the class balanced cross entropy loss to train the network
    Args:
    output: Output of the network
    label: Ground truth label
    Returns:
    Tensor that evaluates the loss
    """

    labels = torch.ge(label, 0.5).float()

    num_labels_pos = torch.sum(labels)
    num_labels_neg = torch.sum(1.0 - labels)
    num_total = num_labels_pos + num_labels_neg

    output_gt_zero = torch.ge(output, 0).float()
    loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(
        1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))

    loss_pos = torch.sum(-torch.mul(labels, loss_val))
    loss_neg = torch.sum(-torch.mul(1.0 - labels, loss_val))

    final_loss = num_labels_neg / num_total * loss_pos + num_labels_pos / num_total * loss_neg

    if size_average:
        final_loss /= np.prod(label.size())
    elif batch_average:
        final_loss /= label.size()[0]

    return final_loss 
Example #21
Source File: image_utils.py    From rfnet with MIT License 5 votes vote down vote up
def nms(input, thresh=0.0, ksize=5):
    """
    non maximum depression in each pixel if it is not maximum probability in its ksize*ksize range
    :param input: (B, H, W, 1)
    :param thresh: float
    :param ksize: int
    :return: mask (B, H, W, 1)
    """
    dtype, device = input.dtype, input.device
    batch, height, width, channel = input.size()
    pad = ksize // 2
    zeros = torch.zeros_like(input)
    input = torch.where(input < thresh, zeros, input)
    input_pad = F.pad(
        input=input,
        pad=(0, 0, 2 * pad, 2 * pad, 2 * pad, 2 * pad, 0, 0),
        mode="constant",
        value=0,
    )
    slice_map = torch.tensor([], dtype=input_pad.dtype, device=device)
    for i in range(ksize):
        for j in range(ksize):
            slice = input_pad[:, i : height + 2 * pad + i, j : width + 2 * pad + j, :]
            slice_map = torch.cat((slice_map, slice), -1)

    max_slice = slice_map.max(dim=-1, keepdim=True)[0]
    center_map = slice_map[:, :, :, slice_map.size(-1) // 2].unsqueeze(-1)
    mask = torch.ge(center_map, max_slice)

    mask = mask[:, pad : height + pad, pad : width + pad, :]

    return mask.type_as(input) 
Example #22
Source File: helpers.py    From LSPS with GNU General Public License v3.0 5 votes vote down vote up
def _compute_true_acc(predictions):
  predictions = torch.ge(predictions.data, 0.5)
  if len(predictions.size()) == 3:
    predictions = predictions.view(predictions.size(0) * predictions.size(1) * predictions.size(2))
  acc = (predictions == 1).sum() / (1.0 * predictions.size(0))
  return acc 
Example #23
Source File: loss.py    From PlanarReconstruction with MIT License 5 votes vote down vote up
def class_balanced_cross_entropy_loss(output, label, size_average=True, batch_average=True):
    """Define the class balanced cross entropy loss to train the network
    Args:
    output: Output of the network
    label: Ground truth label
    Returns:
    Tensor that evaluates the loss
    """

    labels = label.float()

    num_labels_pos = torch.sum(labels)
    num_labels_neg = torch.sum(1.0 - labels)
    num_total = num_labels_pos + num_labels_neg

    output_gt_zero = torch.ge(output, 0).float()

    loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(
        1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))

    loss_pos = torch.sum(-torch.mul(labels, loss_val))
    loss_neg = torch.sum(-torch.mul(1.0 - labels, loss_val))

    final_loss = num_labels_neg / num_total * loss_pos + num_labels_pos / num_total * loss_neg

    if size_average:
        final_loss /= int(np.prod(label.size()))
    elif batch_average:
        final_loss /= int(label.size(0))

    return final_loss 
Example #24
Source File: relational.py    From heat with MIT License 5 votes vote down vote up
def ge(t1, t2):
    """
    Element-wise rich greater than or equal comparison between values from operand t1 with respect to values of
    operand t2 (i.e. t1 >= t2), not commutative.
    Takes the first and second operand (scalar or tensor) whose elements are to be compared as argument.

    Parameters
    ----------
    t1: tensor or scalar
        The first operand to be compared greater than or equal to second operand
    t2: tensor or scalar
       The second operand to be compared less than or equal to first operand

    Returns
    -------
    result: ht.DNDarray
        A uint8-tensor holding 1 for all elements in which values of t1 are greater than or equal tp values of t2,
        0 for all other elements

    Examples
    -------
    >>> import heat as ht
    >>> T1 = ht.float32([[1, 2],[3, 4]])
    >>> ht.ge(T1, 3.0)
    tensor([[0, 0],
            [1, 1]], dtype=torch.uint8)

    >>> T2 = ht.float32([[2, 2], [2, 2]])
    >>> ht.ge(T1, T2)
    tensor([[0, 1],
            [1, 1]], dtype=torch.uint8)
    """
    return operations.__binary_op(torch.ge, t1, t2) 
Example #25
Source File: nnutils.py    From hgraph2graph with MIT License 5 votes vote down vote up
def get_accuracy_bin(scores, labels):
    preds = torch.ge(scores, 0).long()
    acc = torch.eq(preds, labels).float()
    return torch.sum(acc) / labels.nelement() 
Example #26
Source File: model_re.py    From fastNLP with Apache License 2.0 5 votes vote down vote up
def distance_bin(self, mention_distance):
        bins = torch.zeros(mention_distance.size()).byte().to(self.device)
        rg = [[1, 1], [2, 2], [3, 3], [4, 4], [5, 7], [8, 15], [16, 31], [32, 63], [64, 300]]
        for t, k in enumerate(rg):
            i, j = k[0], k[1]
            b = torch.LongTensor([i]).unsqueeze(-1).expand(mention_distance.size()).to(self.device)
            m1 = torch.ge(mention_distance, b)
            e = torch.LongTensor([j]).unsqueeze(-1).expand(mention_distance.size()).to(self.device)
            m2 = torch.le(mention_distance, e)
            bins = bins + (t + 1) * (m1 & m2)
        return bins.long() 
Example #27
Source File: utils.py    From smooth-topk with MIT License 5 votes vote down vote up
def detect_large(x, k, tau, thresh):
    top, _ = x.topk(k + 1, 1)
    # switch to hard top-k if (k+1)-largest element is much smaller
    # than k-largest element
    hard = torch.ge(top[:, k - 1] - top[:, k], k * tau * math.log(thresh)).detach()
    smooth = hard.eq(0)
    return smooth, hard 
Example #28
Source File: loss.py    From Cross-Modal-Re-ID-baseline with MIT License 5 votes vote down vote up
def forward(self, inputs, targets):
        """
        Args:
        - inputs: feature matrix with shape (batch_size, feat_dim)
        - targets: ground truth labels with shape (num_classes)
        """
        n = inputs.size(0)
        
        # Compute pairwise distance, replace by the official when merged
        dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)
        dist = dist + dist.t()
        dist.addmm_(1, -2, inputs, inputs.t())
        dist = dist.clamp(min=1e-12).sqrt()  # for numerical stability
        
        # For each anchor, find the hardest positive and negative
        mask = targets.expand(n, n).eq(targets.expand(n, n).t())
        dist_ap, dist_an = [], []
        for i in range(n):
            dist_ap.append(dist[i][mask[i]].max().unsqueeze(0))
            dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0))
        dist_ap = torch.cat(dist_ap)
        dist_an = torch.cat(dist_an)
        
        # Compute ranking hinge loss
        y = torch.ones_like(dist_an)
        loss = self.ranking_loss(dist_an, dist_ap, y)
        
        # compute accuracy
        correct = torch.ge(dist_an, dist_ap).sum().item()
        return loss, correct



        
        
# Adaptive weights 
Example #29
Source File: loss.py    From Cross-Modal-Re-ID-baseline with MIT License 5 votes vote down vote up
def forward(self, inputs, targets, normalize_feature=False):
        if normalize_feature:
            inputs = normalize(inputs, axis=-1)
        dist_mat = pdist_torch(inputs, inputs)

        N = dist_mat.size(0)
        # shape [N, N]
        is_pos = targets.expand(N, N).eq(targets.expand(N, N).t()).float()
        is_neg = targets.expand(N, N).ne(targets.expand(N, N).t()).float()

        # `dist_ap` means distance(anchor, positive)
        # both `dist_ap` and `relative_p_inds` with shape [N, 1]
        dist_ap = dist_mat * is_pos
        dist_an = dist_mat * is_neg

        weights_ap = softmax_weights(dist_ap, is_pos)
        weights_an = softmax_weights(-dist_an, is_neg)
        furthest_positive = torch.sum(dist_ap * weights_ap, dim=1)
        closest_negative = torch.sum(dist_an * weights_an, dim=1)

        y = furthest_positive.new().resize_as_(furthest_positive).fill_(1)
        loss = self.ranking_loss(closest_negative - furthest_positive, y)


        # compute accuracy
        correct = torch.ge(closest_negative, furthest_positive).sum().item()
        return loss, correct 
Example #30
Source File: dec_pixelcnn_v2.py    From vae-lagging-encoder with MIT License 5 votes vote down vote up
def decode(self, z, deterministic):
        '''

        Args:
            z: Tensor
                the tensor of latent z shape=[batch, nz]
            deterministic: boolean
                randomly sample of decode via argmaximizing probability

        Returns: Tensor
            the tensor of decoded x shape=[batch, *]

        '''
        H = W = 28
        batch_size, nz = z.size()

        # [batch, -1] --> [batch, fm, H, W]
        z = self.z_transform(z).view(batch_size, self.fm_latent, H, W)
        img = Variable(z.data.new(batch_size, self.nc, H, W).zero_(), volatile=True)
        # [batch, nc+fm, H, W]
        img = torch.cat([img, z], dim=1)
        for i in range(H):
            for j in range(W):
                # [batch, nc, H, W]
                recon_img = self.forward(img)
                # [batch, nc]
                img[:, :self.nc, i, j] = torch.ge(recon_img[:, :, i, j], 0.5).float() if deterministic else torch.bernoulli(recon_img[:, :, i, j])
                # img[:, :self.nc, i, j] = torch.bernoulli(recon_img[:, :, i, j])

        # [batch, nc, H, W]
        img_probs = self.forward(img)
        return img[:, :self.nc], img_probs