Python torch.sum() Examples

The following are 30 code examples of torch.sum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: losses.py    From ACAN with MIT License 6 votes vote down vote up
def forward(self, Q, P):
        """
        Parameters
        ----------
        P: ground truth probability distribution [batch_size, n, n]
        Q: predicted probability distribution [batch_size, n, n]

        Description
        -----------
        compute the KL divergence of attention maps. Here P and Q denote 
        the pixel-level attention map with n spatial positions.
        """
        kl_loss = P * safe_log(P / Q)
        pixel_loss = torch.sum(kl_loss, dim=-1)
        total_loss = torch.mean(pixel_loss)
        return total_loss 
Example #2
Source File: model2.py    From controllable-text-attribute-transfer with Apache License 2.0 6 votes vote down vote up
def forward(self, src, tgt, src_mask, tgt_mask):
        """
        Take in and process masked src and target sequences.
        """
        memory = self.encode(src, src_mask)  # (batch_size, max_src_seq, d_model)
        # attented_mem=self.attention(memory,memory,memory,src_mask)
        # memory=attented_mem
        score = self.attention(memory, memory, src_mask)
        attent_memory = score.bmm(memory)
        # memory=self.linear(torch.cat([memory,attent_memory],dim=-1))

        memory, _ = self.gru(attented_mem)
        '''
        score=torch.sigmoid(self.linear(memory))
        memory=memory*score
        '''
        latent = torch.sum(memory, dim=1)  # (batch_size, d_model)
        logit = self.decode(latent.unsqueeze(1), tgt, tgt_mask)  # (batch_size, max_tgt_seq, d_model)
        # logit,_=self.gru_decoder(logit)
        prob = self.generator(logit)  # (batch_size, max_seq, vocab_size)
        return latent, prob 
Example #3
Source File: model.py    From controllable-text-attribute-transfer with Apache License 2.0 6 votes vote down vote up
def forward(self, src, tgt, src_mask, tgt_mask):
        """
        Take in and process masked src and target sequences.
        """
        latent = self.encode(src, src_mask)  # (batch_size, max_src_seq, d_model)
        latent = self.sigmoid(latent)
        # memory = self.position_layer(memory)

        latent = torch.sum(latent, dim=1)  # (batch_size, d_model)

        # latent = self.memory2latent(memory)  # (batch_size, max_src_seq, latent_size)

        # latent = self.memory2latent(memory)
        # memory = self.latent2memory(latent)  # (batch_size, max_src_seq, d_model)

        logit = self.decode(latent.unsqueeze(1), tgt, tgt_mask)  # (batch_size, max_tgt_seq, d_model)
        prob = self.generator(logit)  # (batch_size, max_seq, vocab_size)
        return latent, prob 
Example #4
Source File: model2.py    From controllable-text-attribute-transfer with Apache License 2.0 6 votes vote down vote up
def forward(self, src, tgt, src_mask, tgt_mask):
        """
        Take in and process masked src and target sequences.
        """
        memory = self.encode(src, src_mask)  # (batch_size, max_src_seq, d_model)
        # attented_mem=self.attention(memory,memory,memory,src_mask)
        # memory=attented_mem
        score = self.attention(memory, memory, src_mask)
        attent_memory = score.bmm(memory)
        # memory=self.linear(torch.cat([memory,attent_memory],dim=-1))

        memory, _ = self.gru(attented_mem)
        '''
        score=torch.sigmoid(self.linear(memory))
        memory=memory*score
        '''
        latent = torch.sum(memory, dim=1)  # (batch_size, d_model)
        logit = self.decode(latent.unsqueeze(1), tgt, tgt_mask)  # (batch_size, max_tgt_seq, d_model)
        # logit,_=self.gru_decoder(logit)
        prob = self.generator(logit)  # (batch_size, max_seq, vocab_size)
        return latent, prob 
Example #5
Source File: model.py    From controllable-text-attribute-transfer with Apache License 2.0 6 votes vote down vote up
def forward(self, src, tgt, src_mask, tgt_mask):
        """
        Take in and process masked src and target sequences.
        """
        latent = self.encode(src, src_mask)  # (batch_size, max_src_seq, d_model)
        latent = self.sigmoid(latent)
        # memory = self.position_layer(memory)

        latent = torch.sum(latent, dim=1)  # (batch_size, d_model)

        # latent = self.memory2latent(memory)  # (batch_size, max_src_seq, latent_size)

        # latent = self.memory2latent(memory)
        # memory = self.latent2memory(latent)  # (batch_size, max_src_seq, d_model)

        logit = self.decode(latent.unsqueeze(1), tgt, tgt_mask)  # (batch_size, max_tgt_seq, d_model)
        prob = self.generator(logit)  # (batch_size, max_seq, vocab_size)
        return latent, prob 
Example #6
Source File: model.py    From controllable-text-attribute-transfer with Apache License 2.0 6 votes vote down vote up
def forward(self, src, tgt, src_mask, tgt_mask):
        """
        Take in and process masked src and target sequences.
        """
        latent = self.encode(src, src_mask)  # (batch_size, max_src_seq, d_model)
        latent = self.sigmoid(latent)
        # memory = self.position_layer(memory)

        latent = torch.sum(latent, dim=1)  # (batch_size, d_model)

        # latent = self.memory2latent(memory)  # (batch_size, max_src_seq, latent_size)

        # latent = self.memory2latent(memory)
        # memory = self.latent2memory(latent)  # (batch_size, max_src_seq, d_model)

        logit = self.decode(latent.unsqueeze(1), tgt, tgt_mask)  # (batch_size, max_tgt_seq, d_model)
        prob = self.generator(logit)  # (batch_size, max_seq, vocab_size)
        return latent, prob 
Example #7
Source File: gridgen.py    From cascade-rcnn_Pytorch with MIT License 6 votes vote down vote up
def forward(self, input1):
        self.batchgrid3d = torch.zeros(torch.Size([input1.size(0)]) + self.grid3d.size())

        for i in range(input1.size(0)):
            self.batchgrid3d[i] = self.grid3d

        self.batchgrid3d = Variable(self.batchgrid3d)
        #print(self.batchgrid3d)

        x = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,0:4]), 3)
        y = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,4:8]), 3)
        z = torch.sum(torch.mul(self.batchgrid3d, input1[:,:,:,8:]), 3)
        #print(x)
        r = torch.sqrt(x**2 + y**2 + z**2) + 1e-5

        #print(r)
        theta = torch.acos(z/r)/(np.pi/2)  - 1
        #phi = torch.atan(y/x)
        phi = torch.atan(y/(x + 1e-5))  + np.pi * x.lt(0).type(torch.FloatTensor) * (y.ge(0).type(torch.FloatTensor) - y.lt(0).type(torch.FloatTensor))
        phi = phi/np.pi


        output = torch.cat([theta,phi], 3)

        return output 
Example #8
Source File: hgnn.py    From hgraph2graph with MIT License 6 votes vote down vote up
def forward(self, x_graphs, x_tensors, y_graphs, y_tensors, y_orders, beta):
        x_tensors = make_cuda(x_tensors)
        y_tensors = make_cuda(y_tensors)
        x_root_vecs, x_tree_vecs, x_graph_vecs = self.encode(x_tensors)
        _, y_tree_vecs, y_graph_vecs = self.encode(y_tensors)

        diff_tree_vecs = y_tree_vecs.sum(dim=1) - x_tree_vecs.sum(dim=1)
        diff_graph_vecs = y_graph_vecs.sum(dim=1) - x_graph_vecs.sum(dim=1)
        diff_tree_vecs, tree_kl = self.rsample(diff_tree_vecs, self.T_mean, self.T_var)
        diff_graph_vecs, graph_kl = self.rsample(diff_graph_vecs, self.G_mean, self.G_var)
        kl_div = tree_kl + graph_kl

        diff_tree_vecs = diff_tree_vecs.unsqueeze(1).expand(-1, x_tree_vecs.size(1), -1)
        diff_graph_vecs = diff_graph_vecs.unsqueeze(1).expand(-1, x_graph_vecs.size(1), -1)
        x_tree_vecs = self.W_tree( torch.cat([x_tree_vecs, diff_tree_vecs], dim=-1) )
        x_graph_vecs = self.W_graph( torch.cat([x_graph_vecs, diff_graph_vecs], dim=-1) )

        loss, wacc, iacc, tacc, sacc = self.decoder((x_root_vecs, x_tree_vecs, x_graph_vecs), y_graphs, y_tensors, y_orders)
        return loss + beta * kl_div, kl_div.item(), wacc, iacc, tacc, sacc 
Example #9
Source File: losses.py    From ACAN with MIT License 6 votes vote down vote up
def __init__(self, ignore_index=None, reduction='sum', use_weights=False, weight=None):
        """
        Parameters
        ----------
        ignore_index : Specifies a target value that is ignored
                       and does not contribute to the input gradient
        reduction : Specifies the reduction to apply to the output: 
                    'mean' | 'sum'. 'mean': elemenwise mean, 
                    'sum': class dim will be summed and batch dim will be averaged.
        use_weight : whether to use weights of classes.
        weight : Tensor, optional
                a manual rescaling weight given to each class.
                If given, has to be a Tensor of size "nclasses"
        """
        super(_BaseEntropyLoss2d, self).__init__()
        self.ignore_index = ignore_index
        self.reduction = reduction
        self.use_weights = use_weights
        if use_weights:
            print("w/ class balance")
            print(weight)
            self.weight = torch.FloatTensor(weight).cuda()
        else:
            print("w/o class balance")
            self.weight = None 
Example #10
Source File: metrics.py    From dogTorch with MIT License 6 votes vote down vote up
def record_output(self, output, output_indices, target, prev_absolutes,
                      next_absolutes, batch_size=1):
        assert output.dim() == 4
        assert target.dim() == 3

        _, predictions = output.max(3)

        # Compute per class accuracy for unbalanced data.
        sequence_length = output.size(1)
        num_label = output.size(2)
        num_class = output.size(3)
        correct_alljoint = (target == predictions).float().sum(2)
        sum_of_corrects = correct_alljoint.sum(1)
        max_value = num_label * sequence_length
        count_correct = (sum_of_corrects == max_value).float().mean()
        correct_per_seq = ((correct_alljoint == num_label - 1).sum(1).float() /
                           sequence_length).mean()
        self.meter.update(
            torch.Tensor([count_correct * 100, correct_per_seq * 100]),
            batch_size) 
Example #11
Source File: hgnn.py    From hgraph2graph with MIT License 6 votes vote down vote up
def forward(self, x_graphs, x_tensors, y_graphs, y_tensors, y_orders, beta):
        x_tensors = make_cuda(x_tensors)
        y_tensors = make_cuda(y_tensors)
        x_root_vecs, x_tree_vecs, x_graph_vecs = self.encode(x_tensors)
        _, y_tree_vecs, y_graph_vecs = self.encode(y_tensors)

        diff_tree_vecs = y_tree_vecs.sum(dim=1) - x_tree_vecs.sum(dim=1)
        diff_graph_vecs = y_graph_vecs.sum(dim=1) - x_graph_vecs.sum(dim=1)
        diff_tree_vecs, tree_kl = self.rsample(diff_tree_vecs, self.T_mean, self.T_var)
        diff_graph_vecs, graph_kl = self.rsample(diff_graph_vecs, self.G_mean, self.G_var)
        kl_div = tree_kl + graph_kl

        diff_tree_vecs = diff_tree_vecs.unsqueeze(1).expand(-1, x_tree_vecs.size(1), -1)
        diff_graph_vecs = diff_graph_vecs.unsqueeze(1).expand(-1, x_graph_vecs.size(1), -1)
        x_tree_vecs = self.W_tree( torch.cat([x_tree_vecs, diff_tree_vecs], dim=-1) )
        x_graph_vecs = self.W_graph( torch.cat([x_graph_vecs, diff_graph_vecs], dim=-1) )

        loss, wacc, iacc, tacc, sacc = self.decoder((x_root_vecs, x_tree_vecs, x_graph_vecs), y_graphs, y_tensors, y_orders)
        return loss + beta * kl_div, kl_div.item(), wacc, iacc, tacc, sacc 
Example #12
Source File: demo_letter_duvenaud.py    From nmp_qc with MIT License 6 votes vote down vote up
def plot_examples(data_loader, model, epoch, plotter, ind = [0, 10, 20]):

    # switch to evaluate mode
    model.eval()

    for i, (g, h, e, target) in enumerate(data_loader):
        if i in ind:
            subfolder_path = 'batch_' + str(i) + '_t_' + str(int(target[0][0])) + '/epoch_' + str(epoch) + '/'
            if not os.path.isdir(args.plotPath + subfolder_path):
                os.makedirs(args.plotPath + subfolder_path)

            num_nodes = torch.sum(torch.sum(torch.abs(h[0, :, :]), 1) > 0)
            am = g[0, 0:num_nodes, 0:num_nodes].numpy()
            pos = h[0, 0:num_nodes, :].numpy()

            plotter.plot_graph(am, position=pos, fig_name=subfolder_path+str(i) + '_input.png')

            # Prepare input data
            if args.cuda:
                g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
            g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)

            # Compute output
            model(g, h, e, lambda cls, id: plotter.plot_graph(am, position=pos, cls=cls,
                                                          fig_name=subfolder_path+ id)) 
Example #13
Source File: metrics.py    From dogTorch with MIT License 6 votes vote down vote up
def final_report(self):
        correct_preds = self.confusion[:, :,
                                       range(self.args.num_classes),
                                       range(self.args.num_classes)]
        correct_percentage = correct_preds / (self.confusion.sum(3) + 1e-6) * 100
        balance_accuracy = correct_percentage.mean()
        per_sequence_element_accuracy = correct_percentage.view(
            correct_percentage.size(0), -1).mean(1)
        per_sequence_report = ', '.join(
            '{:.2f}'.format(acc) for acc in per_sequence_element_accuracy)
        report = ('Accuracy {meter.avg[0]:.2f}   Balanced {balanced:.2f}   '
                  'PerSeq [{per_seq}]').format(meter=self.meter,
                                               balanced=balance_accuracy,
                                               per_seq=per_sequence_report)
        report += '   Accuracy Matrix (seq x imu x label): {}'.format(
            correct_percentage)
        return report 
Example #14
Source File: lovasz_losses.py    From PolarSeg with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def lovasz_hinge_flat(logits, labels):
    """
    Binary Lovasz hinge loss
      logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
      labels: [P] Tensor, binary ground truth labels (0 or 1)
      ignore: label to ignore
    """
    if len(labels) == 0:
        # only void pixels, the gradients should be 0
        return logits.sum() * 0.
    signs = 2. * labels.float() - 1.
    errors = (1. - logits * Variable(signs))
    errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
    perm = perm.data
    gt_sorted = labels[perm]
    grad = lovasz_grad(gt_sorted)
    loss = torch.dot(F.relu(errors_sorted), Variable(grad))
    return loss 
Example #15
Source File: lovasz_losses.py    From PolarSeg with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
    """
    Array of IoU for each (non ignored) class
    """
    if not per_image:
        preds, labels = (preds,), (labels,)
    ious = []
    for pred, label in zip(preds, labels):
        iou = []    
        for i in range(C):
            if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
                intersection = ((label == i) & (pred == i)).sum()
                union = ((label == i) | ((pred == i) & (label != ignore))).sum()
                if not union:
                    iou.append(EMPTY)
                else:
                    iou.append(float(intersection) / float(union))
        ious.append(iou)
    ious = [mean(iou) for iou in zip(*ious)] # mean accross images if per_image
    return 100 * np.array(ious)


# --------------------------- BINARY LOSSES --------------------------- 
Example #16
Source File: lovasz_losses.py    From PolarSeg with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True):
    """
    IoU for foreground class
    binary: 1 foreground, 0 background
    """
    if not per_image:
        preds, labels = (preds,), (labels,)
    ious = []
    for pred, label in zip(preds, labels):
        intersection = ((label == 1) & (pred == 1)).sum()
        union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
        if not union:
            iou = EMPTY
        else:
            iou = float(intersection) / float(union)
        ious.append(iou)
    iou = mean(ious)    # mean accross images if per_image
    return 100 * iou 
Example #17
Source File: 49_word2vec.py    From deep-learning-note with MIT License 6 votes vote down vote up
def train(net, lr, num_epochs):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("train on", device)
    net = net.to(device)
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    for epoch in range(num_epochs):
        start, l_sum, n = time.time(), 0.0, 0
        for batch in data_iter:
            center, context_negative, mask, label = [d.to(device) for d in batch]

            pred = skip_gram(center, context_negative, net[0], net[1])

            # 使用掩码变量mask来避免填充项对损失函数计算的影响
            l = (loss(pred.view(label.shape), label, mask) *
                 mask.shape[1] / mask.float().sum(dim=1)).mean()  # 一个batch的平均loss
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            l_sum += l.cpu().item()
            n += 1
        print('epoch %d, loss %.2f, time %.2fs'
              % (epoch + 1, l_sum / n, time.time() - start)) 
Example #18
Source File: losses.py    From ACAN with MIT License 5 votes vote down vote up
def get_ohem_label(self, pred, label):
        n, c, h, w = pred.size()
        if self.ignore_index is None:
            self.ignore_index = c + 1

        input_label = label.data.cpu().numpy().ravel().astype(np.int32)
        x = np.rollaxis(pred.data.cpu().numpy(), 1).reshape((c, -1))
        input_prob = np.exp(x - x.max(axis=0, keepdims=True))
        input_prob /= input_prob.sum(axis=0, keepdims=True)

        valid_flag = input_label != self.ignore_index
        valid_inds = np.where(valid_flag)[0]
        valid_label = input_label[valid_flag]
        num_valid = valid_flag.sum()
        if self.min_kept >= num_valid:
            print('Labels: {}'.format(num_valid))
        elif num_valid > 0:
            valid_prob = input_prob[:,valid_flag]
            valid_prob = valid_prob[valid_label, np.arange(len(valid_label), dtype=np.int32)]
            threshold = self.thresh
            if self.min_kept > 0:
                index = valid_prob.argsort()
                threshold_index = index[ min(len(index), self.min_kept) - 1 ]
                if valid_prob[threshold_index] > self.thresh:
                    threshold = valid_prob[threshold_index]
            kept_flag = valid_prob <= threshold
            valid_kept_inds = valid_inds[kept_flag]
            valid_inds = valid_kept_inds

        self.ohem_ratio = len(valid_inds) / num_valid
        #print('Max prob: {:.4f}, hard ratio: {:.4f} = {} / {} '.format(input_prob.max(), self.ohem_ratio, len(valid_inds), num_valid))
        valid_kept_label = input_label[valid_inds].copy()
        input_label.fill(self.ignore_index)
        input_label[valid_inds] = valid_kept_label
        #valid_flag_new = input_label != self.ignore_index
        # print(np.sum(valid_flag_new))
        label = torch.from_numpy(input_label.reshape(label.size())).long().cuda()
        return label 
Example #19
Source File: train_asr.py    From End-to-end-ASR-Pytorch with MIT License 5 votes vote down vote up
def fetch_data(self, data):
        ''' Move data to device and compute text seq. length'''
        _, feat, feat_len, txt = data
        feat = feat.to(self.device)
        feat_len = feat_len.to(self.device)
        txt = txt.to(self.device)
        txt_len = torch.sum(txt != 0, dim=-1)

        return feat, feat_len, txt, txt_len 
Example #20
Source File: metrics.py    From dogTorch with MIT License 5 votes vote down vote up
def __init__(self):
        self.val = 0
        self.sum = 0
        self.count = 0 
Example #21
Source File: metrics.py    From dogTorch with MIT License 5 votes vote down vote up
def record_output(self, output, output_indices, target, prev_absolutes,
                      next_absolutes, batch_size=1):
        assert output.dim() == 4
        assert target.dim() == 3

        _, predictions = output.max(3)

        # Compute per class accuracy for unbalanced data.
        sequence_length = output.size(1)
        num_label = output.size(2)
        num_class = output.size(3)

        if self.confusion is None:
            # Confusion matrix is 4D because it's defined per label and sequence
            # element.
            self.confusion = torch.zeros(sequence_length, num_label, num_class,
                                         num_class)
        # Compute per class accuracy in this batch and update the confusion
        # matrix.
        per_class_acc = []
        for seq_id in range(sequence_length):
            for imu_id in range(num_label):
                imu_target = target[:, seq_id, imu_id].contiguous()
                imu_preds = predictions[:, seq_id, imu_id].contiguous()
                for label, pred in zip(imu_target.view(-1), imu_preds.view(-1)):
                    self.confusion[seq_id, imu_id, label, pred] += 1.0
                for class_id in range(num_class):
                    # Look at targets where label is class_id, and see what
                    # percentage of predictions are class_id.
                    preds_for_class = imu_preds[imu_target == class_id]
                    if len(preds_for_class) > 0:
                        per_class_acc.append(
                            (preds_for_class == class_id).float().mean())
        per_class_acc = sum(per_class_acc) / len(per_class_acc)
        accuracy = (predictions == target).float().mean()
        self.meter.update(
            torch.Tensor([100 * accuracy, 100 * per_class_acc]), batch_size) 
Example #22
Source File: lovasz_losses.py    From PolarSeg with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def hinge_jaccard_loss(probas, labels,ignore=None, classes = 'present', hinge = 0.1, smooth =100):
    """
    Multi-class Hinge Jaccard loss
      probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
              Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
      labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
      classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
      ignore: void class labels
    """
    vprobas, vlabels = flatten_probas(probas, labels, ignore)
    C = vprobas.size(1)
    losses = []
    class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
    for c in class_to_sum:
        if c in vlabels:
            c_sample_ind = vlabels == c
            cprobas = vprobas[c_sample_ind,:]
            non_c_ind =np.array([a for a in class_to_sum if a != c])
            class_pred = cprobas[:,c]
            max_non_class_pred = torch.max(cprobas[:,non_c_ind],dim = 1)[0]
            TP = torch.sum(torch.clamp(class_pred - max_non_class_pred, max = hinge)+1.) + smooth
            FN = torch.sum(torch.clamp(max_non_class_pred - class_pred, min = -hinge)+hinge)
            
            if (~c_sample_ind).sum() == 0:
                FP = 0
            else:
                nonc_probas = vprobas[~c_sample_ind,:]
                class_pred = nonc_probas[:,c]
                max_non_class_pred = torch.max(nonc_probas[:,non_c_ind],dim = 1)[0]
                FP = torch.sum(torch.clamp(class_pred - max_non_class_pred, max = hinge)+1.)
            
            losses.append(1 - TP/(TP+FP+FN))
    
    if len(losses) == 0: return 0
    return mean(losses)

# --------------------------- HELPER FUNCTIONS --------------------------- 
Example #23
Source File: lovasz_losses.py    From PolarSeg with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def lovasz_softmax_flat(probas, labels, classes='present'):
    """
    Multi-class Lovasz-Softmax loss
      probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
      labels: [P] Tensor, ground truth labels (between 0 and C - 1)
      classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
    """
    if probas.numel() == 0:
        # only void pixels, the gradients should be 0
        return probas * 0.
    C = probas.size(1)
    losses = []
    class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
    for c in class_to_sum:
        fg = (labels == c).float() # foreground for class c
        if (classes is 'present' and fg.sum() == 0):
            continue
        if C == 1:
            if len(classes) > 1:
                raise ValueError('Sigmoid output possible only with 1 class')
            class_pred = probas[:, 0]
        else:
            class_pred = probas[:, c]
        errors = (Variable(fg) - class_pred).abs()
        errors_sorted, perm = torch.sort(errors, 0, descending=True)
        perm = perm.data
        fg_sorted = fg[perm]
        losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
    return mean(losses) 
Example #24
Source File: gridgen.py    From cascade-rcnn_Pytorch with MIT License 5 votes vote down vote up
def forward(self, input1):
        self.batchgrid = torch.zeros(torch.Size([input1.size(0)]) + self.grid.size())

        for i in range(input1.size(0)):
            self.batchgrid[i] = self.grid

        self.batchgrid = Variable(self.batchgrid)
        #print self.batchgrid,  input1[:,:,:,0:3]
        #print self.batchgrid,  input1[:,:,:,4:6]
        x = torch.mul(self.batchgrid, input1[:,:,:,0:3])
        y = torch.mul(self.batchgrid, input1[:,:,:,3:6])

        output = torch.cat([torch.sum(x,3),torch.sum(y,3)], 3)
        return output 
Example #25
Source File: lovasz_losses.py    From PolarSeg with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def lovasz_grad(gt_sorted):
    """
    Computes gradient of the Lovasz extension w.r.t sorted errors
    See Alg. 1 in paper
    """
    p = len(gt_sorted)
    gts = gt_sorted.sum()
    intersection = gts - gt_sorted.float().cumsum(0)
    union = gts + (1 - gt_sorted).float().cumsum(0)
    jaccard = 1. - intersection / union
    if p > 1: # cover 1-pixel case
        jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
    return jaccard 
Example #26
Source File: model.py    From treelstm.pytorch with MIT License 5 votes vote down vote up
def node_forward(self, inputs, child_c, child_h):
        child_h_sum = torch.sum(child_h, dim=0, keepdim=True)

        iou = self.ioux(inputs) + self.iouh(child_h_sum)
        i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
        i, o, u = F.sigmoid(i), F.sigmoid(o), F.tanh(u)

        f = F.sigmoid(
            self.fh(child_h) +
            self.fx(inputs).repeat(len(child_h), 1)
        )
        fc = torch.mul(f, child_c)

        c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)
        h = torch.mul(o, F.tanh(c))
        return c, h 
Example #27
Source File: util.py    From DeepLab_v3_plus with MIT License 5 votes vote down vote up
def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count 
Example #28
Source File: util.py    From DeepLab_v3_plus with MIT License 5 votes vote down vote up
def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0 
Example #29
Source File: 49_word2vec.py    From deep-learning-note with MIT License 5 votes vote down vote up
def get_similar_tokens(query_token, k, embed):
    W = embed.weight.data
    x = W[token_to_idx[query_token]]
    # 添加的1e-9是为了数值稳定性
    cos = torch.matmul(W, x) / (torch.sum(W * W, dim=1) * torch.sum(x * x) + 1e-9).sqrt()
    _, topk = torch.topk(cos, k=k+1)
    topk = topk.cpu().numpy()
    for i in topk[1:]:  # 除去输入词
        print('cosine sim=%.3f: %s' % (cos[i], (idx_to_token[i]))) 
Example #30
Source File: metrics.py    From dogTorch with MIT License 5 votes vote down vote up
def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n