Python torch.le() Examples

The following are 26 code examples of torch.le(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: lstmhelper.py    From FewShotLearning with MIT License 6 votes vote down vote up
def preProc2(x):
    # Access the global variables
    global P, expP, negExpP
    P = P.type_as(x)
    expP = expP.type_as(x)
    negExpP = negExpP.type_as(x)

    # Create a variable filled with -1. Second part of the condition
    z = Variable(torch.zeros(x.size())).type_as(x)
    absX = torch.abs(x)
    cond1 = torch.gt(absX, negExpP)
    cond2 = torch.le(absX, negExpP)
    if (torch.sum(cond1) > 0).data.all():
        x1 = torch.sign(x[cond1])
        z[cond1] = x1
    if (torch.sum(cond2) > 0).data.all():
        x2 = x[cond2]*expP
        z[cond2] = x2
    return z 
Example #2
Source File: filter.py    From spectre with Apache License 2.0 5 votes vote down vote up
def compute(self, left, right) -> torch.Tensor:
        return torch.le(left, right) 
Example #3
Source File: losses.py    From Det3D with Apache License 2.0 5 votes vote down vote up
def forward(self, prediction_tensor, target_tensor, weights=None):
        """Compute loss function.

        Args:
        prediction_tensor: A float tensor of shape [batch_size, num_anchors,
            code_size] representing the (encoded) predicted locations of objects.
        target_tensor: A float tensor of shape [batch_size, num_anchors,
            code_size] representing the regression targets
        weights: a float tensor of shape [batch_size, num_anchors]

        Returns:
        loss: a float tensor of shape [batch_size, num_anchors] tensor
            representing the value of the loss function.
        """
        diff = prediction_tensor - target_tensor
        if self._code_weights is not None:
            # code_weights = self._code_weights.type_as(prediction_tensor).to(diff.device)
            diff = self._code_weights.view(1, 1, -1).to(diff.device) * diff
        abs_diff = torch.abs(diff)
        abs_diff_lt_1 = torch.le(abs_diff, 1 / (self._sigma ** 2)).type_as(abs_diff)
        loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * self._sigma, 2) + (
            abs_diff - 0.5 / (self._sigma ** 2)
        ) * (1.0 - abs_diff_lt_1)
        if self._codewise:
            anchorwise_smooth_l1norm = loss
            if weights is not None:
                anchorwise_smooth_l1norm *= weights.unsqueeze(-1)
        else:
            anchorwise_smooth_l1norm = torch.sum(loss, 2)  #  * weights
            if weights is not None:
                anchorwise_smooth_l1norm *= weights

        return anchorwise_smooth_l1norm 
Example #4
Source File: helpers.py    From LSPS with GNU General Public License v3.0 5 votes vote down vote up
def _compute_fake_acc(predictions):
  predictions = torch.le(predictions.data, 0.5)
  if len(predictions.size()) == 3:
    predictions = predictions.view(predictions.size(0) * predictions.size(1) * predictions.size(2))
  acc = (predictions == 1).sum() / (1.0 * predictions.size(0))
  return acc 
Example #5
Source File: losses.py    From nutonomy_pointpillars with MIT License 5 votes vote down vote up
def _compute_loss(self, prediction_tensor, target_tensor, weights=None):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the (encoded) predicted locations of objects.
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the regression targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors] tensor
        representing the value of the loss function.
    """
    diff = prediction_tensor - target_tensor
    if self._code_weights is not None:
      code_weights = self._code_weights.type_as(prediction_tensor)
      diff = code_weights.view(1, 1, -1) * diff
    abs_diff = torch.abs(diff)
    abs_diff_lt_1 = torch.le(abs_diff, 1 / (self._sigma**2)).type_as(abs_diff)
    loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * self._sigma, 2) \
      + (abs_diff - 0.5 / (self._sigma**2)) * (1. - abs_diff_lt_1)
    if self._codewise:
      anchorwise_smooth_l1norm = loss
      if weights is not None:
        anchorwise_smooth_l1norm *= weights.unsqueeze(-1)
    else:
      anchorwise_smooth_l1norm = torch.sum(loss, 2)#  * weights
      if weights is not None:
        anchorwise_smooth_l1norm *= weights
    return anchorwise_smooth_l1norm 
Example #6
Source File: metrics.py    From incremental_learning.pytorch with MIT License 5 votes vote down vote up
def _pairwise_distance(a, squared=False):
    """Computes the pairwise distance matrix with numerical stability."""
    pairwise_distances_squared = torch.add(
        a.pow(2).sum(dim=1, keepdim=True).expand(a.size(0), -1),
        torch.t(a).pow(2).sum(dim=0, keepdim=True).expand(a.size(0), -1)
    ) - 2 * (torch.mm(a, torch.t(a)))

    # Deal with numerical inaccuracies. Set small negatives to zero.
    pairwise_distances_squared = torch.clamp(pairwise_distances_squared, min=0.0)

    # Get the mask where the zero distances are at.
    error_mask = torch.le(pairwise_distances_squared, 0.0)

    # Optionally take the sqrt.
    if squared:
        pairwise_distances = pairwise_distances_squared
    else:
        pairwise_distances = torch.sqrt(pairwise_distances_squared + error_mask.float() * 1e-16)

    # Undo conditionally adding 1e-16.
    pairwise_distances = torch.mul(pairwise_distances, (error_mask == False).float())

    # Explicitly set diagonals to zero.
    mask_offdiagonals = 1 - torch.eye(*pairwise_distances.size(), device=pairwise_distances.device)
    pairwise_distances = torch.mul(pairwise_distances, mask_offdiagonals)

    return pairwise_distances 
Example #7
Source File: distance.py    From incremental_learning.pytorch with MIT License 5 votes vote down vote up
def stable_cosine_distance(a, b, squared=True):
    """Computes the pairwise distance matrix with numerical stability."""
    mat = torch.cat([a, b])

    pairwise_distances_squared = torch.add(
        mat.pow(2).sum(dim=1, keepdim=True).expand(mat.size(0), -1),
        torch.t(mat).pow(2).sum(dim=0, keepdim=True).expand(mat.size(0), -1)
    ) - 2 * (torch.mm(mat, torch.t(mat)))

    # Deal with numerical inaccuracies. Set small negatives to zero.
    pairwise_distances_squared = torch.clamp(pairwise_distances_squared, min=0.0)

    # Get the mask where the zero distances are at.
    error_mask = torch.le(pairwise_distances_squared, 0.0)

    # Optionally take the sqrt.
    if squared:
        pairwise_distances = pairwise_distances_squared
    else:
        pairwise_distances = torch.sqrt(pairwise_distances_squared + error_mask.float() * 1e-16)

    # Undo conditionally adding 1e-16.
    pairwise_distances = torch.mul(pairwise_distances, (error_mask == False).float())

    # Explicitly set diagonals to zero.
    mask_offdiagonals = 1 - torch.eye(*pairwise_distances.size(), device=pairwise_distances.device)
    pairwise_distances = torch.mul(pairwise_distances, mask_offdiagonals)

    return pairwise_distances[:a.shape[0], a.shape[0]:] 
Example #8
Source File: pytorch_backend_test.py    From TensorNetwork with Apache License 2.0 5 votes vote down vote up
def test_random_uniform_boundaries(dtype):
  lb = 1.2
  ub = 4.8
  backend = pytorch_backend.PyTorchBackend()
  a = backend.random_uniform((4, 4), seed=10, dtype=dtype)
  b = backend.random_uniform((4, 4), (lb, ub), seed=10, dtype=dtype)
  assert (torch.ge(a, 0).byte().all() and torch.le(a, 1).byte().all() and
          torch.ge(b, lb).byte().all() and torch.le(b, ub).byte().all()) 
Example #9
Source File: base_model.py    From vae-audio with MIT License 5 votes vote down vote up
def _bound_logvar_lookup(self):
        self.logvar_lookup.weight.data[torch.le(self.logvar_lookup.weight, self.logvar_bound)] = self.logvar_bound 
Example #10
Source File: relational.py    From heat with MIT License 5 votes vote down vote up
def le(t1, t2):
    """
    Element-wise rich less than or equal comparison between values from operand t1 with respect to values of
    operand t2 (i.e. t1 <= t2), not commutative.
    Takes the first and second operand (scalar or tensor) whose elements are to be compared as argument.

    Parameters
    ----------
    t1: tensor or scalar
       The first operand to be compared less than or equal to second operand
    t2: tensor or scalar
       The second operand to be compared greater than or equal to first operand

    Returns
    -------
    result: ht.DNDarray
       A uint8-tensor holding 1 for all elements in which values of t1 are less than or equal to values of t2,
       0 for all other elements

    Examples
    -------
    >>> import heat as ht
    >>> T1 = ht.float32([[1, 2],[3, 4]])
    >>> ht.le(T1, 3.0)
    tensor([[1, 1],
            [1, 0]], dtype=torch.uint8)

    >>> T2 = ht.float32([[2, 2], [2, 2]])
    >>> ht.le(T1, T2)
    tensor([[1, 1],
            [0, 0]], dtype=torch.uint8)
    """
    return operations.__binary_op(torch.le, t1, t2) 
Example #11
Source File: losses.py    From second.pytorch with MIT License 5 votes vote down vote up
def _compute_loss(self, prediction_tensor, target_tensor, weights=None):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the (encoded) predicted locations of objects.
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the regression targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors] tensor
        representing the value of the loss function.
    """
    diff = prediction_tensor - target_tensor
    if self._code_weights is not None:
      code_weights = self._code_weights.type_as(prediction_tensor).to(target_tensor.device)
      diff = code_weights.view(1, 1, -1) * diff
    abs_diff = torch.abs(diff)
    abs_diff_lt_1 = torch.le(abs_diff, 1 / (self._sigma**2)).type_as(abs_diff)
    loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * self._sigma, 2) \
      + (abs_diff - 0.5 / (self._sigma**2)) * (1. - abs_diff_lt_1)
    if self._codewise:
      anchorwise_smooth_l1norm = loss
      if weights is not None:
        anchorwise_smooth_l1norm *= weights.unsqueeze(-1)
    else:
      anchorwise_smooth_l1norm = torch.sum(loss, 2)#  * weights
      if weights is not None:
        anchorwise_smooth_l1norm *= weights
    return anchorwise_smooth_l1norm 
Example #12
Source File: lstmcrf.py    From ner_with_dependency with GNU General Public License v3.0 5 votes vote down vote up
def neg_log_obj(self, words, word_seq_lens, batch_context_emb, chars, char_seq_lens, adj_matrixs, adjs_in, adjs_out, graphs, dep_label_adj, batch_dep_heads, tags, batch_dep_label, trees=None):
        features = self.neural_scoring(words, word_seq_lens, batch_context_emb, chars, char_seq_lens, adj_matrixs, adjs_in, adjs_out, graphs, dep_label_adj, batch_dep_heads, batch_dep_label, trees)

        all_scores = self.calculate_all_scores(features)

        batch_size = words.size(0)
        sent_len = words.size(1)

        maskTemp = torch.arange(1, sent_len + 1, dtype=torch.long).view(1, sent_len).expand(batch_size, sent_len).to(self.device)
        mask = torch.le(maskTemp, word_seq_lens.view(batch_size, 1).expand(batch_size, sent_len)).to(self.device)

        unlabed_score = self.forward_unlabeled(all_scores, word_seq_lens, mask)
        labeled_score = self.forward_labeled(all_scores, word_seq_lens, tags, mask)
        return unlabed_score - labeled_score 
Example #13
Source File: generic.py    From MatchLSTM-PyTorch with MIT License 5 votes vote down vote up
def forward(self, y_pred, y_true):
        _assert_no_grad(y_true)
        P = y_true.float() * y_pred  # batch x time x class
        P = torch.sum(P, dim=1)  # batch x class
        gt_zero = torch.gt(P, 0.0).float()  # batch x class
        epsilon = torch.le(P, 0.0).float() * _eps  # batch x class
        log_P = torch.log(P + epsilon) * gt_zero  # batch x class
        sum_log_P = torch.sum(log_P, dim=1)  # n_b
        return -sum_log_P 
Example #14
Source File: layers.py    From qait_public with MIT License 5 votes vote down vote up
def NegativeLogLoss(y_pred, y_true):
    """
    Shape:
        - y_pred:    batch x time
        - y_true:    batch
    """
    y_true_onehot = to_one_hot(y_true.unsqueeze(-1), y_pred.size(1))
    P = y_true_onehot.squeeze(-1) * y_pred  # batch x time
    P = torch.sum(P, dim=1)  # batch
    gt_zero = torch.gt(P, 0.0).float()  # batch
    epsilon = torch.le(P, 0.0).float() * 1e-8  # batch
    log_P = torch.log(P + epsilon) * gt_zero  # batch
    output = -log_P  # batch
    return output 
Example #15
Source File: netmath.py    From ibeis with Apache License 2.0 5 votes vote down vote up
def _siamese_metrics(output, label, margin=1):

        l2_dist_tensor = torch.from_numpy(output.data.cpu().numpy())
        label_tensor = torch.from_numpy(label.data.cpu().numpy())

        # Distance
        is_pos = torch.ByteTensor()
        POS_LABEL = 1
        NEG_LABEL = 0
        torch.eq(label_tensor, POS_LABEL, out=is_pos)  # y==1
        pos_dist = 0 if len(l2_dist_tensor[is_pos]) == 0 else l2_dist_tensor[is_pos].mean()
        neg_dist = 0 if len(l2_dist_tensor[~is_pos]) == 0 else l2_dist_tensor[~is_pos].mean()
        # print('same dis : diff dis  {} : {}'.format(l2_dist_tensor[is_pos == 0].mean(), l2_dist_tensor[is_pos].mean()))

        # accuracy
        pred_pos_flags = torch.ByteTensor()
        torch.le(l2_dist_tensor, margin, out=pred_pos_flags)  # y==1's idx

        cur_score = torch.FloatTensor(label.size(0))
        cur_score.fill_(NEG_LABEL)
        cur_score[pred_pos_flags] = POS_LABEL

        label_tensor_ = label_tensor.type(torch.FloatTensor)
        accuracy = torch.eq(cur_score, label_tensor_).sum() / label_tensor.size(0)

        metrics = {
            'accuracy': accuracy,
            'pos_dist': pos_dist,
            'neg_dist': neg_dist,
        }
        return metrics 
Example #16
Source File: model_re.py    From fastNLP with Apache License 2.0 5 votes vote down vote up
def distance_bin(self, mention_distance):
        bins = torch.zeros(mention_distance.size()).byte().to(self.device)
        rg = [[1, 1], [2, 2], [3, 3], [4, 4], [5, 7], [8, 15], [16, 31], [32, 63], [64, 300]]
        for t, k in enumerate(rg):
            i, j = k[0], k[1]
            b = torch.LongTensor([i]).unsqueeze(-1).expand(mention_distance.size()).to(self.device)
            m1 = torch.ge(mention_distance, b)
            e = torch.LongTensor([j]).unsqueeze(-1).expand(mention_distance.size()).to(self.device)
            m2 = torch.le(mention_distance, e)
            bins = bins + (t + 1) * (m1 & m2)
        return bins.long() 
Example #17
Source File: eval_util.py    From weakalign with MIT License 5 votes vote down vote up
def pck(source_points,warped_points,L_pck,alpha=0.1):
    # compute precentage of correct keypoints
    batch_size=source_points.size(0)
    pck=torch.zeros((batch_size))
    for i in range(batch_size):
        p_src = source_points[i,:]
        p_wrp = warped_points[i,:]
        N_pts = torch.sum(torch.ne(p_src[0,:],-1)*torch.ne(p_src[1,:],-1))
        point_distance = torch.pow(torch.sum(torch.pow(p_src[:,:N_pts]-p_wrp[:,:N_pts],2),0),0.5)
        L_pck_mat = L_pck[i].expand_as(point_distance)
        correct_points = torch.le(point_distance,L_pck_mat*alpha)
        pck[i]=torch.mean(correct_points.float())
    return pck 
Example #18
Source File: relative_loss.py    From DenseMatchingBenchmark with MIT License 5 votes vote down vote up
def loss_per_level(self, estDisp, gtDisp, label):
        N, C, H, W = estDisp.shape
        scaled_gtDisp = gtDisp
        scale = 1.0
        if gtDisp.shape[-2] != H or gtDisp.shape[-1] != W:
            # compute scale per level and scale gtDisp
            scale = gtDisp.shape[-1] / (W * 1.0)
            scaled_gtDisp = gtDisp / scale
            scaled_gtDisp = self.scale_func(scaled_gtDisp, (H, W))

        # mask for valid disparity
        # (start disparity, max disparity / scale)
        # Attention: the invalid disparity of KITTI is set as 0, be sure to mask it out
        mask = (scaled_gtDisp > self.start_disp) & (scaled_gtDisp < (self.max_disp / scale))
        if mask.sum() < 1.0:
            print('Relative loss: there is no point\'s disparity is in ({},{})!'.format(self.start_disp,
                                                                                        self.max_disp / scale))
            loss = (torch.abs(estDisp - scaled_gtDisp) * mask.float()).mean()
            return loss

        # relative loss
        valid_pixel_number = mask.float().sum()
        diff = scaled_gtDisp[mask] - estDisp[mask]
        label = label[mask]
        # some value which is over large for torch.exp() is not suitable for soft margin loss
        # get absolute value great than 66
        over_large_mask = torch.gt(torch.abs(diff), 66)
        over_large_diff = diff[over_large_mask]
        # get absolute value smaller than 66
        proper_mask = torch.le(torch.abs(diff), 66)
        proper_diff = diff[proper_mask]
        # generate lable for soft margin loss
        label = label[proper_mask]
        loss = F.soft_margin_loss(proper_diff, label, reduction='sum') + torch.abs(over_large_diff).sum()
        loss = loss / valid_pixel_number

        return loss 
Example #19
Source File: loss.py    From DeepGrabCut-PyTorch with MIT License 4 votes vote down vote up
def class_balanced_cross_entropy_loss(output, label, size_average=True, batch_average=True, void_pixels=None):
    """Define the class balanced cross entropy loss to train the network
    Args:
    output: Output of the network
    label: Ground truth label
    size_average: return per-element (pixel) average loss
    batch_average: return per-batch average loss
    void_pixels: pixels to ignore from the loss
    Returns:
    Tensor that evaluates the loss
    """
    assert(output.size() == label.size())

    labels = torch.ge(label, 0.5).float()

    num_labels_pos = torch.sum(labels)
    num_labels_neg = torch.sum(1.0 - labels)
    num_total = num_labels_pos + num_labels_neg

    output_gt_zero = torch.ge(output, 0).float()
    loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(
        1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))

    loss_pos_pix = -torch.mul(labels, loss_val)
    loss_neg_pix = -torch.mul(1.0 - labels, loss_val)

    if void_pixels is not None:
        w_void = torch.le(void_pixels, 0.5).float()
        loss_pos_pix = torch.mul(w_void, loss_pos_pix)
        loss_neg_pix = torch.mul(w_void, loss_neg_pix)
        num_total = num_total - torch.ge(void_pixels, 0.5).float().sum()

    loss_pos = torch.sum(loss_pos_pix)
    loss_neg = torch.sum(loss_neg_pix)

    final_loss = num_labels_neg / num_total * loss_pos + num_labels_pos / num_total * loss_neg

    if size_average:
        final_loss /= np.prod(label.size())
    elif batch_average:
        final_loss /= label.size()[0]

    return final_loss 
Example #20
Source File: util.py    From Visualizing-CNNs-for-monocular-depth-estimation with MIT License 4 votes vote down vote up
def evaluateError(output, target):
    # f = open('./record.txt', 'w')

    errors = {'MSE': 0, 'RMSE': 0, 'ABS_REL': 0, 'LG10': 0,
              'MAE': 0,  'DELTA1': 0, 'DELTA2': 0, 'DELTA3': 0}

    _output, _target, nanMask, nValidElement = setNanToZero(output, target)

    #
    if (nValidElement.data.cpu().numpy() > 0):
        diffMatrix = torch.abs(_output - _target)

        errors['MSE'] = torch.sum(torch.pow(diffMatrix, 2)) / nValidElement

        errors['RMSE'] = torch.sqrt(errors['MSE'])

        errors['MAE'] = torch.sum(diffMatrix) / nValidElement

        realMatrix = torch.div(diffMatrix, _target)
        realMatrix[nanMask] = 0
        errors['ABS_REL'] = torch.sum(realMatrix) / nValidElement

        #del realMatrix
        #del diffMatrix

        LG10Matrix = torch.abs(lg10(_output) - lg10(_target))
        LG10Matrix[nanMask] = 0
        errors['LG10'] = torch.sum(LG10Matrix) / nValidElement

        #del LG10Matrix

        yOverZ = torch.div(_output, _target)
        zOverY = torch.div(_target, _output)

        maxRatio = maxOfTwo(yOverZ, zOverY)

        errors['DELTA1'] = torch.sum(
            torch.le(maxRatio, 1.25).float()) / nValidElement
        errors['DELTA2'] = torch.sum(
            torch.le(maxRatio, math.pow(1.25, 2)).float()) / nValidElement
        errors['DELTA3'] = torch.sum(
            torch.le(maxRatio, math.pow(1.25, 3)).float()) / nValidElement

        errors['MSE'] = float(errors['MSE'].data.cpu().numpy())
        errors['RMSE'] = float(errors['RMSE'].data.cpu().numpy())
        errors['ABS_REL'] = float(errors['ABS_REL'].data.cpu().numpy())
        errors['LG10'] = float(errors['LG10'].data.cpu().numpy())
        errors['MAE'] = float(errors['MAE'].data.cpu().numpy())
        # errors['PERC'] = float(errors['PERC'].data.cpu().numpy())
        errors['DELTA1'] = float(errors['DELTA1'].data.cpu().numpy())
        errors['DELTA2'] = float(errors['DELTA2'].data.cpu().numpy())
        errors['DELTA3'] = float(errors['DELTA3'].data.cpu().numpy())

        #del yOverZ, zOverY, maxRatio
        # f.write(' nValidElement = ' + str(nValidElement) + ' _output ' + str(_output) + ' _target ' + str(_target) + 'maxRatio ' + str(maxRatio) + 'torch.le(maxRatio, 1.25).float()' + str(torch.le(maxRatio, 1.25).float()) + '\n')

        #pdb.set_trace()

    return errors 
Example #21
Source File: analyse_cnn_scores.py    From tracking_wo_bnw with GNU General Public License v3.0 4 votes vote down vote up
def calcScores(network, data, thresholds):
    # calculate labels
    ind = 0
    meta = []
    for d in data:
        meta += [ind]*len(d)
        ind += 1
    labels = torch.LongTensor(meta)

    # images have to be center cropped to right size from (288, 144) to (256, 128)
    images = []
    transformation = Compose([CenterCrop((256, 128)), ToTensor(),
                        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
    for d in data:
        tens = []
        for im in d:
            im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
            im = Image.fromarray(im)
            im = transformation(im)
            tens.append(im)
        images.append(torch.stack(tens, 0))

    embeddings = torch.cat([network(Variable(im.cuda(), volatile=True)).data for im in images],0).cpu()

    pos_mask = _get_anchor_positive_triplet_mask(labels)
    neg_mask = _get_anchor_negative_triplet_mask(labels)

    # compute pariwise square distance matrix
    n = embeddings.size(0)
    m = embeddings.size(0)
    d = embeddings.size(1)

    x = embeddings.unsqueeze(1).expand(n, m, d)
    y = embeddings.unsqueeze(0).expand(n, m, d)

    dist = torch.sqrt(torch.pow(x - y, 2).sum(2))

    pos_distances = dist * pos_mask.float()
    neg_distances = dist * neg_mask.float()
    num_pos = pos_mask.sum()
    num_neg = neg_mask.sum()
    # calculate the right classifications
    for t in thresholds:
        # every 0 entry is also le t so filter with mask here
        pos_right = torch.le(pos_distances, t) * pos_mask
        pos_right = pos_right.sum()
        neg_right = torch.gt(neg_distances, t).sum()
        
        print("[*] Threshold set to: {}".format(t))
        print("Positive right classifications: {:.2f}% {}/{}".format(pos_right/num_pos*100, pos_right, num_pos))
        print("Negative right classifications: {:.2f}% {}/{}".format(neg_right/num_neg*100, neg_right, num_neg))
        print("All right classifications: {:.2f}% {}/{}".format((pos_right+neg_right)/(num_pos+num_neg)*100,
                                                                pos_right+neg_right, num_pos+num_neg)) 
Example #22
Source File: tacotron2.py    From NeMo with Apache License 2.0 4 votes vote down vote up
def infer(self, memory, memory_lengths):
        """ Decoder inference
        PARAMS
        ------
        memory: Encoder outputs
        RETURNS
        -------
        mel_outputs: mel outputs from the decoder
        gate_outputs: gate outputs from the decoder
        alignments: sequence of attention weights from the decoder
        """
        decoder_input = self.get_go_frame(memory)

        if memory.size(0) > 1:
            mask = ~get_mask_from_lengths(memory_lengths)
        else:
            mask = None

        self.initialize_decoder_states(memory, mask=mask)

        mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32)
        not_finished = torch.ones([memory.size(0)], dtype=torch.int32)
        if torch.cuda.is_available():
            mel_lengths = mel_lengths.cuda()
            not_finished = not_finished.cuda()

        mel_outputs, gate_outputs, alignments = [], [], []
        while True:
            decoder_input = self.prenet(decoder_input, inference=True)
            mel_output, gate_output, alignment = self.decode(decoder_input)

            dec = torch.le(torch.sigmoid(gate_output.data), self.gate_threshold).to(torch.int32).squeeze(1)

            not_finished = not_finished * dec
            mel_lengths += not_finished

            if self.early_stopping and torch.sum(not_finished) == 0:
                break

            mel_outputs += [mel_output.squeeze(1)]
            gate_outputs += [gate_output]
            alignments += [alignment]

            if len(mel_outputs) == self.max_decoder_steps:
                logging.warning("Reached max decoder steps %d.", self.max_decoder_steps)
                break

            decoder_input = mel_output

        mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(mel_outputs, gate_outputs, alignments)

        return mel_outputs, gate_outputs, alignments, mel_lengths 
Example #23
Source File: similarity.py    From metric-learning-divide-and-conquer with GNU Lesser General Public License v3.0 4 votes vote down vote up
def pairwise_distance(a, squared=False):
    """Computes the pairwise distance matrix with numerical stability.
    output[i, j] = || feature[i, :] - feature[j, :] ||_2
    Args:
    feature: 2-D Tensor of size [number of data, feature dimension].
    squared: Boolean, whether or not to square the pairwise distances.
    Returns:
    pairwise_distances: 2-D Tensor of size [number of data, number of data].
    """
    a = torch.as_tensor(np.atleast_2d(a))
    pairwise_distances_squared = torch.add(
        a.pow(2).sum(dim=1, keepdim=True).expand(a.size(0), -1),
        torch.t(a).pow(2).sum(dim=0, keepdim=True).expand(a.size(0), -1)
    ) - 2 * (
        torch.mm(a, torch.t(a))
    )

    # Deal with numerical inaccuracies. Set small negatives to zero.
    pairwise_distances_squared = torch.clamp(
        pairwise_distances_squared, min=0.0
    )

    # Get the mask where the zero distances are at.
    error_mask = torch.le(pairwise_distances_squared, 0.0)

    # Optionally take the sqrt.
    if squared:
        pairwise_distances = pairwise_distances_squared
    else:
        pairwise_distances = torch.sqrt(
            pairwise_distances_squared + error_mask.float() * 1e-16
        )

    # Undo conditionally adding 1e-16.
    pairwise_distances = torch.mul(
        pairwise_distances,
        (error_mask == False).float()
    )

    # Explicitly set diagonals to zero.
    mask_offdiagonals = 1 - torch.eye(
        *pairwise_distances.size(),
        device=pairwise_distances.device
    )
    pairwise_distances = torch.mul(pairwise_distances, mask_offdiagonals).data.cpu().numpy()

    return pairwise_distances 
Example #24
Source File: test_fss.py    From PySyft with Apache License 2.0 4 votes vote down vote up
def test_fss_class(op):
    class_ = {"eq": DPF, "le": DIF}[op]
    th_op = {"eq": th.eq, "le": th.le}[op]
    gather_op = {"eq": "__add__", "le": "__xor__"}[op]

    # single value
    primitive = class_.keygen(n_values=1)
    alpha, s_00, s_01, *CW = primitive
    mask = th.randint(0, 2 ** n, alpha.shape)
    k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]

    x = th.tensor([0])
    x_masked = x + k0[0] + k1[0]
    y0 = class_.eval(0, x_masked, *k0[1:])
    y1 = class_.eval(1, x_masked, *k1[1:])

    assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()

    # 1D tensor
    primitive = class_.keygen(n_values=3)
    alpha, s_00, s_01, *CW = primitive
    mask = th.randint(0, 2 ** n, alpha.shape)
    k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]

    x = th.tensor([0, 2, -2])
    x_masked = x + k0[0] + k1[0]
    y0 = class_.eval(0, x_masked, *k0[1:])
    y1 = class_.eval(1, x_masked, *k1[1:])

    assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()

    # 2D tensor
    primitive = class_.keygen(n_values=4)
    alpha, s_00, s_01, *CW = primitive
    mask = th.randint(0, 2 ** n, alpha.shape)
    k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]

    x = th.tensor([[0, 2], [-2, 0]])
    x_masked = x + k0[0].reshape(x.shape) + k1[0].reshape(x.shape)
    y0 = class_.eval(0, x_masked, *k0[1:])
    y1 = class_.eval(1, x_masked, *k1[1:])

    assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()

    # 3D tensor
    primitive = class_.keygen(n_values=8)
    alpha, s_00, s_01, *CW = primitive
    mask = th.randint(0, 2 ** n, alpha.shape)
    k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]

    x = th.tensor([[[0, 2], [-2, 0]], [[0, 2], [-2, 0]]])
    x_masked = x + k0[0].reshape(x.shape) + k1[0].reshape(x.shape)
    y0 = class_.eval(0, x_masked, *k0[1:])
    y1 = class_.eval(1, x_masked, *k1[1:])

    assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all() 
Example #25
Source File: anchor.py    From A2J with MIT License 4 votes vote down vote up
def forward(self, heads, annotations):
        alpha = 0.25
        gamma = 2.0
        if self.is_3D:
            classifications, regressions, depthregressions = heads
        else:
            classifications, regressions = heads
        #classifications,scalar,mu = classifications_tuple
        batch_size = classifications.shape[0]
        classification_losses = []
        regression_losses = []

        anchor = self.all_anchors # num_anchors(w*h*A) x 2
        anchor_regression_loss_tuple = []

        for j in range(batch_size):

            classification = classifications[j, :, :] #N*(w*h*A)*P
            regression = regressions[j, :, :, :] #N*(w*h*A)*P*2
            if self.is_3D:
                depthregression = depthregressions[j, :, :]#N*(w*h*A)*P
            bbox_annotation = annotations[j, :, :]#N*P*3=>P*3
            reg_weight = F.softmax(classification,dim=0) #(w*h*A)*P
            reg_weight_xy = torch.unsqueeze(reg_weight,2).expand(reg_weight.shape[0],reg_weight.shape[1],2)#(w*h*A)*P*2         
            gt_xy = bbox_annotation[:,:2]#P*2 

            anchor_diff = torch.abs(gt_xy-(reg_weight_xy*torch.unsqueeze(anchor,1)).sum(0)) #P*2
            anchor_loss = torch.where(
                torch.le(anchor_diff, 1),
                0.5 * 1 * torch.pow(anchor_diff, 2),
                anchor_diff - 0.5 / 1
            )
            anchor_regression_loss = anchor_loss.mean()
            anchor_regression_loss_tuple.append(anchor_regression_loss)
#######################regression 4 spatial###################
            reg = torch.unsqueeze(anchor,1) + regression #(w*h*A)*P*2
            regression_diff = torch.abs(gt_xy-(reg_weight_xy*reg).sum(0)) #P*2
            regression_loss = torch.where(
                torch.le(regression_diff, 1),
                0.5 * 1 * torch.pow(regression_diff, 2),
                regression_diff - 0.5 / 1
                )
            regression_loss = regression_loss.mean()*self.spatialFactor
########################regression 4 depth###################
            if self.is_3D:
                gt_depth = bbox_annotation[:,2] #P
                regression_diff_depth = torch.abs(gt_depth - (reg_weight*depthregression).sum(0))#(w*h*A)*P       
                regression_loss_depth = torch.where(
                    torch.le(regression_diff_depth, 3),
                    0.5 * (1/3) * torch.pow(regression_diff_depth, 2),
                    regression_diff_depth - 0.5 / (1/3)
                    )
                regression_loss += regression_diff_depth.mean()           
############################################################
            regression_losses.append(regression_loss)
        return torch.stack(anchor_regression_loss_tuple).mean(dim=0, keepdim=True), torch.stack(regression_losses).mean(dim=0, keepdim=True) 
Example #26
Source File: loss.py    From DEXTR-PyTorch with GNU General Public License v3.0 4 votes vote down vote up
def class_balanced_cross_entropy_loss(output, label, size_average=True, batch_average=True, void_pixels=None):
    """Define the class balanced cross entropy loss to train the network
    Args:
    output: Output of the network
    label: Ground truth label
    size_average: return per-element (pixel) average loss
    batch_average: return per-batch average loss
    void_pixels: pixels to ignore from the loss
    Returns:
    Tensor that evaluates the loss
    """
    assert(output.size() == label.size())

    labels = torch.ge(label, 0.5).float()

    num_labels_pos = torch.sum(labels)
    num_labels_neg = torch.sum(1.0 - labels)
    num_total = num_labels_pos + num_labels_neg

    output_gt_zero = torch.ge(output, 0).float()
    loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(
        1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))

    loss_pos_pix = -torch.mul(labels, loss_val)
    loss_neg_pix = -torch.mul(1.0 - labels, loss_val)

    if void_pixels is not None:
        w_void = torch.le(void_pixels, 0.5).float()
        loss_pos_pix = torch.mul(w_void, loss_pos_pix)
        loss_neg_pix = torch.mul(w_void, loss_neg_pix)
        num_total = num_total - torch.ge(void_pixels, 0.5).float().sum()

    loss_pos = torch.sum(loss_pos_pix)
    loss_neg = torch.sum(loss_neg_pix)

    final_loss = num_labels_neg / num_total * loss_pos + num_labels_pos / num_total * loss_neg

    if size_average:
        final_loss /= np.prod(label.size())
    elif batch_average:
        final_loss /= label.size()[0]

    return final_loss