Python torch.mean() Examples

The following are 30 code examples of torch.mean(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: loss.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def forward(self, x, target):
        similarity_matrix = x @ x.T  # need gard here
        label_matrix = target.unsqueeze(1) == target.unsqueeze(0)
        negative_matrix = label_matrix.logical_not()
        positive_matrix = label_matrix.fill_diagonal_(False)

        sp = torch.where(positive_matrix, similarity_matrix,
                         torch.zeros_like(similarity_matrix))
        sn = torch.where(negative_matrix, similarity_matrix,
                         torch.zeros_like(similarity_matrix))

        ap = torch.clamp_min(1 + self.m - sp.detach(), min=0.)
        an = torch.clamp_min(sn.detach() + self.m, min=0.)

        logit_p = -self.gamma * ap * (sp - self.dp)
        logit_n = self.gamma * an * (sn - self.dn)

        logit_p = torch.where(positive_matrix, logit_p,
                              torch.zeros_like(logit_p))
        logit_n = torch.where(negative_matrix, logit_n,
                              torch.zeros_like(logit_n))

        loss = F.softplus(torch.logsumexp(logit_p, dim=1) +
                          torch.logsumexp(logit_n, dim=1)).mean()
        return loss 
Example #2
Source File: modules.py    From BAMnet with Apache License 2.0 6 votes vote down vote up
def enc_ans_features(self, x_type_bow, x_types, x_type_bow_len, x_path_bow, x_paths, x_path_bow_len, x_ctx_ents, x_ctx_ent_len, x_ctx_ent_num):
        '''
        x_types: answer type
        x_paths: answer path, i.e., bow of relation
        x_ctx_ents: answer context, i.e., bow of entity words, (batch_size, num_cands, num_ctx, L)
        '''
        # ans_types = torch.mean(self.ent_type_embed(x_types.view(-1, x_types.size(-1))), 1).view(x_types.size(0), x_types.size(1), -1)
        ans_type_bow = (self.lstm_enc_type(x_type_bow.view(-1, x_type_bow.size(-1)), x_type_bow_len.view(-1))[1]).view(x_type_bow.size(0), x_type_bow.size(1), -1)
        ans_path_bow = (self.lstm_enc_path(x_path_bow.view(-1, x_path_bow.size(-1)), x_path_bow_len.view(-1))[1]).view(x_path_bow.size(0), x_path_bow.size(1), -1)
        ans_paths = torch.mean(self.relation_embed(x_paths.view(-1, x_paths.size(-1))), 1).view(x_paths.size(0), x_paths.size(1), -1)

        # Avg over ctx
        ctx_num_mask = create_mask(x_ctx_ent_num.view(-1), x_ctx_ents.size(2), self.use_cuda).view(x_ctx_ent_num.shape + (-1,))
        ans_ctx_ent = (self.lstm_enc_ctx(x_ctx_ents.view(-1, x_ctx_ents.size(-1)), x_ctx_ent_len.view(-1))[1]).view(x_ctx_ents.size(0), x_ctx_ents.size(1), x_ctx_ents.size(2), -1)
        ans_ctx_ent = ctx_num_mask.unsqueeze(-1) * ans_ctx_ent
        ans_ctx_ent = torch.sum(ans_ctx_ent, dim=2) / torch.clamp(x_ctx_ent_num.float().unsqueeze(-1), min=VERY_SMALL_NUMBER)

        if self.ans_enc_dropout:
            # ans_types = F.dropout(ans_types, p=self.ans_enc_dropout, training=self.training)
            ans_type_bow = F.dropout(ans_type_bow, p=self.ans_enc_dropout, training=self.training)
            ans_path_bow = F.dropout(ans_path_bow, p=self.ans_enc_dropout, training=self.training)
            ans_paths = F.dropout(ans_paths, p=self.ans_enc_dropout, training=self.training)
            ans_ctx_ent = F.dropout(ans_ctx_ent, p=self.ans_enc_dropout, training=self.training)
        return ans_type_bow, None, ans_path_bow, ans_paths, ans_ctx_ent 
Example #3
Source File: norms.py    From JEM with Apache License 2.0 6 votes vote down vote up
def initialize(self, input):
        with torch.no_grad():
            flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
            mean = (
                flatten.mean(1)
                .unsqueeze(1)
                .unsqueeze(2)
                .unsqueeze(3)
                .permute(1, 0, 2, 3)
            )
            std = (
                flatten.std(1)
                .unsqueeze(1)
                .unsqueeze(2)
                .unsqueeze(3)
                .permute(1, 0, 2, 3)
            )

            self.loc.data.copy_(-mean)
            self.scale.data.copy_(1 / (std + 1e-6)) 
Example #4
Source File: mmd.py    From transferlearning with MIT License 6 votes vote down vote up
def cmmd(source, target, s_label, t_label, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
    s_label = s_label.cpu()
    s_label = s_label.view(32,1)
    s_label = torch.zeros(32, 31).scatter_(1, s_label.data, 1)
    s_label = Variable(s_label).cuda()

    t_label = t_label.cpu()
    t_label = t_label.view(32, 1)
    t_label = torch.zeros(32, 31).scatter_(1, t_label.data, 1)
    t_label = Variable(t_label).cuda()

    batch_size = int(source.size()[0])
    kernels = guassian_kernel(source, target,
                              kernel_mul=kernel_mul, kernel_num=kernel_num, fix_sigma=fix_sigma)
    loss = 0
    XX = kernels[:batch_size, :batch_size]
    YY = kernels[batch_size:, batch_size:]
    XY = kernels[:batch_size, batch_size:]
    loss += torch.mean(torch.mm(s_label, torch.transpose(s_label, 0, 1)) * XX +
                      torch.mm(t_label, torch.transpose(t_label, 0, 1)) * YY -
                      2 * torch.mm(s_label, torch.transpose(t_label, 0, 1)) * XY)
    return loss 
Example #5
Source File: norms.py    From JEM with Apache License 2.0 6 votes vote down vote up
def forward(self, x, y):
        means = torch.mean(x, dim=(2, 3))
        m = torch.mean(means, dim=-1, keepdim=True)
        v = torch.var(means, dim=-1, keepdim=True)
        means = (means - m) / (torch.sqrt(v + 1e-5))
        h = self.instance_norm(x)

        if self.bias:
            gamma, alpha, beta = self.embed(y).chunk(3, dim=-1)
            h = h + means[..., None, None] * alpha[..., None, None]
            out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
        else:
            gamma, alpha = self.embed(y).chunk(2, dim=-1)
            h = h + means[..., None, None] * alpha[..., None, None]
            out = gamma.view(-1, self.num_features, 1, 1) * h
        return out 
Example #6
Source File: utils.py    From audio with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def transcribe_file(args, task, generator, models, sp, tgt_dict):
    path = args.input_file
    if not os.path.exists(path):
        raise FileNotFoundError("Audio file not found: {}".format(path))
    waveform, sample_rate = torchaudio.load_wav(path)
    waveform = waveform.mean(0, True)
    waveform = torchaudio.transforms.Resample(
        orig_freq=sample_rate, new_freq=16000
    )(waveform)

    start = time.time()
    transcription = transcribe(
        waveform, args, task, generator, models, sp, tgt_dict
    )
    transcription_time = time.time() - start
    return transcription_time, transcription 
Example #7
Source File: train.py    From kuzushiji-recognition with MIT License 6 votes vote down vote up
def kuzushiji_loss(hm, centers, classes, hm_pred, classes_pred, weights=None):
  assert hm.shape == hm_pred.shape
  hm = hm.to(hm_pred.dtype)
  hm_loss = th.nn.functional.binary_cross_entropy_with_logits(
      hm_pred, hm, reduction='mean')

  classes_ = []
  for sample_ind in range(len(hm)):
    center = centers[sample_ind]
    center_mask = center[:, 0] != -1
    per_image_letters = center_mask.sum().item()
    if per_image_letters == 0:
      continue
    classes_per_img = classes[sample_ind][center_mask]
    classes_.append(classes_per_img)

  classes = th.cat(classes_, 0)
  classes_loss = th.nn.functional.cross_entropy(classes_pred, classes,
      reduction='mean')
  # print("hm: ", hm_loss.item(), " classes: ", classes_loss)
  total_loss = hm_loss + 0.1 * classes_loss
  return total_loss 
Example #8
Source File: losses.py    From ACAN with MIT License 6 votes vote down vote up
def __init__(self, ignore_index=None, reduction='sum', use_weights=False, weight=None):
        """
        Parameters
        ----------
        ignore_index : Specifies a target value that is ignored
                       and does not contribute to the input gradient
        reduction : Specifies the reduction to apply to the output: 
                    'mean' | 'sum'. 'mean': elemenwise mean, 
                    'sum': class dim will be summed and batch dim will be averaged.
        use_weight : whether to use weights of classes.
        weight : Tensor, optional
                a manual rescaling weight given to each class.
                If given, has to be a Tensor of size "nclasses"
        """
        super(_BaseEntropyLoss2d, self).__init__()
        self.ignore_index = ignore_index
        self.reduction = reduction
        self.use_weights = use_weights
        if use_weights:
            print("w/ class balance")
            print(weight)
            self.weight = torch.FloatTensor(weight).cuda()
        else:
            print("w/o class balance")
            self.weight = None 
Example #9
Source File: losses.py    From ACAN with MIT License 6 votes vote down vote up
def forward(self, Q, P):
        """
        Parameters
        ----------
        P: ground truth probability distribution [batch_size, n, n]
        Q: predicted probability distribution [batch_size, n, n]

        Description
        -----------
        compute the KL divergence of attention maps. Here P and Q denote 
        the pixel-level attention map with n spatial positions.
        """
        kl_loss = P * safe_log(P / Q)
        pixel_loss = torch.sum(kl_loss, dim=-1)
        total_loss = torch.mean(pixel_loss)
        return total_loss 
Example #10
Source File: loss.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(
            self,
            classes,
            m=0.5,
            s=64,
            easy_margin=True,
            weight=None,
            size_average=None,
            ignore_index=-100,
            reduce=None,
            reduction='mean'):
        super(ArcLoss, self).__init__(weight, size_average, reduce, reduction)
        self.ignore_index = ignore_index
        assert s > 0.
        assert 0 <= m <= (math.pi / 2)
        self.s = s
        self.m = m
        self.cos_m = math.cos(m)
        self.sin_m = math.sin(m)
        self.mm = math.sin(math.pi - m) * m
        self.threshold = math.cos(math.pi - m)
        self.classes = classes
        self.easy_margin = easy_margin 
Example #11
Source File: loss.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(
            self,
            classes,
            alpha,
            p=0.9,
            from_normx=False,
            weight=None,
            size_average=None,
            ignore_index=-100,
            reduce=None,
            reduction='mean'):
        super(L2Softmax, self).__init__(
            weight, size_average, reduce, reduction)
        alpha_low = math.log(p * (classes - 2) / (1 - p))
        assert alpha > alpha_low, "For given probability of p={}, alpha should higher than {}.".format(
            p, alpha_low)
        self.ignore_index = ignore_index
        self.alpha = alpha
        self.from_normx = from_normx 
Example #12
Source File: unified_information.py    From interpret-text with MIT License 6 votes vote down vote up
def forward(self):
        """ Calculate loss:
            $L(sigma) = (||Phi(embed + epsilon) - Phi(embed)||_2^2)
            // (regularization^2) - rate * log(sigma)$
        :return: a scalar, the target loss.
        :rtype: torch.FloatTensor
        """
        ratios = torch.sigmoid(self.ratio)  # S * 1
        x = self.input_embeddings + 0.0
        x_tilde = (
            x
            + ratios
            * torch.randn(self.input_size, self.input_dimension).to(x.device)
            * self.scale
        )  # S * D
        s = self.Phi(x)  # D or S * D
        s_tilde = self.Phi(x_tilde)
        loss = (s_tilde - s) ** 2
        if self.regular is not None:
            loss = torch.mean(loss / self.regular ** 2)
        else:
            loss = torch.mean(loss) / torch.mean(s ** 2)

        return loss - torch.mean(torch.log(ratios)) * self.rate 
Example #13
Source File: functional.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def logits_nll_loss(input, target, weight=None, reduction='mean'):
    """logits_nll_loss
    Different from nll loss, this is for sigmoid based loss.
    The difference is this will add along C(class) dim.
    """

    assert input.dim() == 2, 'Input shape should be (B, C).'
    if input.size(0) != target.size(0):
        raise ValueError(
            'Expected input batch_size ({}) to match target batch_size ({}).' .format(
                input.size(0), target.size(0)))

    ret = input.sum(dim=-1)
    if weight is not None:
        ret = _batch_weight(weight, target) * ret
    return reducing(ret, reduction) 
Example #14
Source File: merge_augs.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):
    """Merge augmented detection bboxes and scores.

    Args:
        aug_bboxes (list[Tensor]): shape (n, 4*#class)
        aug_scores (list[Tensor] or None): shape (n, #class)
        img_shapes (list[Tensor]): shape (3, ).
        rcnn_test_cfg (dict): rcnn test config.

    Returns:
        tuple: (bboxes, scores)
    """
    recovered_bboxes = []
    for bboxes, img_info in zip(aug_bboxes, img_metas):
        img_shape = img_info[0]['img_shape']
        scale_factor = img_info[0]['scale_factor']
        flip = img_info[0]['flip']
        flip_direction = img_info[0]['flip_direction']
        bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,
                                   flip_direction)
        recovered_bboxes.append(bboxes)
    bboxes = torch.stack(recovered_bboxes).mean(dim=0)
    if aug_scores is None:
        return bboxes
    else:
        scores = torch.stack(aug_scores).mean(dim=0)
        return bboxes, scores 
Example #15
Source File: components.py    From interpret-text with MIT License 5 votes vote down vote up
def _train_one_step(self, X_tokens, label, X_mask):
        """Train the classifier for one optimization step.

        :param X_tokens: Tokenized and embedded training example
        :type X_tokens: torch.int64
        :param label: Label of the training example
        :type label: torch.int64
        :param X_mask: Mask differentiating tokens vs not tokens
        :type X_mask: torch.FloatTensor
        :return: losses, classifier prediction logits
        :rtype: tuple
        """
        self.opt.zero_grad()
        self.model.zero_grad()

        cls_predict_logits, _, _ = self.model(
            X_tokens, attention_mask=X_mask
        )  # dimensions: (batch_size, hidden_dim, sequence_length)

        sup_loss = torch.mean(self.loss_func(cls_predict_logits, label))
        losses = {"g_sup_loss": sup_loss.cpu().data}
        sup_loss.backward()

        # Clip the norm of the gradients to 1.0.
        # This is to help prevent the "exploding gradients" problem.
        # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)

        self.opt.step()
        return losses, cls_predict_logits 
Example #16
Source File: ada_lanczos_net.py    From LanczosNetwork with MIT License 5 votes vote down vote up
def _get_graph_laplacian(self, node_feat, adj_mask):
    """ Compute graph Laplacian

      Args:
        node_feat: float tensor, shape B X N X D
        adj_mask: float tensor, shape B X N X N, binary mask, should contain self-loop

      Returns:
        L: float tensor, shape B X N X N
    """
    batch_size = node_feat.shape[0]
    num_node = node_feat.shape[1]
    dim_feat = node_feat.shape[2]

    # compute pairwise distance
    idx_row, idx_col = np.meshgrid(range(num_node), range(num_node))
    idx_row, idx_col = torch.Tensor(idx_row.reshape(-1)).long().to(node_feat.device), torch.Tensor(
        idx_col.reshape(-1)).long().to(node_feat.device)

    diff = node_feat[:, idx_row, :] - node_feat[:, idx_col, :]  # shape B X N^2 X D
    dist2 = (diff * diff).sum(dim=2)  # shape B X N^2
    
    # sigma2, _ = torch.median(dist2, dim=1, keepdim=True) # median is sometimes 0
    # sigma2 = sigma2 + 1.0e-7

    sigma2 = torch.mean(dist2, dim=1, keepdim=True)

    A = torch.exp(-dist2 / sigma2)  # shape B X N^2
    A = A.reshape(batch_size, num_node, num_node) * adj_mask  # shape B X N X N
    row_sum = torch.sum(A, dim=2, keepdim=True)
    pad_row_sum = torch.zeros_like(row_sum)
    pad_row_sum[row_sum == 0.0] = 1.0    
    alpha = 0.5
    D = 1.0 / (row_sum + pad_row_sum).pow(alpha)  # shape B X N X 1
    L = D * A * D.transpose(1, 2)  # shape B X N X N

    return L 
Example #17
Source File: result_stat.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def main(args):
    if args.hdr:
        # val: given value (for x-axis)
        # me_pos: mean error of estimated position (distance)
        # me_rad: mean error of estimated rotation (angle in radian)
        # me_twist: mean error represented as norm of twist vector
        # me_vel: translation part of the twist. (rotation part is me_rad)
        print('val, me_pos, me_rad, me_twist, me_vel')

    if args.infile:
        npdata = numpy.loadtxt(args.infile, delimiter=',', skiprows=1)  # --> (N, 12)
        res = torch.from_numpy(npdata).view(-1, 12)
        x_hat = res[:, 0:6]   # estimated twist vector
        x_mgt = -res[:, 6:12] # (minus) ground-truth

        g_hat = ptlk.se3.exp(x_hat) # [N, 4, 4], estimated matrices
        g_igt = ptlk.se3.exp(x_mgt) # [N, 4, 4], inverse of ground-truth
        dg = g_hat.bmm(g_igt) # [N, 4, 4]. if correct, dg == identity matrices.

        dp = dg[:, 0:3, 3]    # [N, 3], position error
        dx = ptlk.se3.log(dg) # [N, 6], twist error
        dw = dx[:, 0:3]       # [N, 3], rotation part of the twist error
        dv = dx[:, 3:6]       # [N, 3], translation part

        ep = dp.norm(p=2, dim=1) # [N]
        ex = dx.norm(p=2, dim=1) # [N]
        ew = dw.norm(p=2, dim=1) # [N]
        ev = dv.norm(p=2, dim=1) # [N]

        e = torch.stack((ep, ew, ex, ev)) # [4, N]
        me = torch.mean(e, dim=1) # [4]

        line = ','.join(map(str, me.numpy().tolist()))
        print('{},{}'.format(args.val, line)) 
Example #18
Source File: create_partial_data.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def sphere(data):
	# args: data (batch of pointclouds [B x N x 3])
	# returns: mask of visible points in data [B x N] and partial data
	data_trans = data + 2
	data_mag = np.linalg.norm(data_trans, 2, 2)
	data_COM = np.mean(data_trans, 1, keepdims=True)
	data_COM_mag = np.linalg.norm(data_COM, 2, 2)
	mask = data_mag < data_COM_mag
	data = [d[mask[idx]] for idx, d in enumerate(data)]
	data = [d[0:512] for d in data]
	return mask, np.array(data) 
Example #19
Source File: create_partial_data.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def sphere_torch(p):
	# args: p, batch of pointclouds [B x N x 3]
	# returns: mask of visible points in p [B x N]
	p_trans = p + 2
	p_mag = torch.norm(p_trans, 2, 2)
	p_COM = torch.mean(p_trans, 1, keepdim=True)
	p_COM_mag = torch.norm(p_COM, 2, 2)
	mask = p_mag < p_COM_mag
	return mask

# Numpy based Code 
Example #20
Source File: unified_information.py    From interpret-text with MIT License 5 votes vote down vote up
def _calculate_regularization(self, sampled_x, model, reduced_axes=None):
        """ Calculate the variance of the state generated from the perturbed inputs that is used for Interpreter
        :param sampled_x: A list of sampled input embeddings $x$, each $x$ is of shape ``[length, dimension]``.
        All the $x$s can have different length, but should have the same dimension. Sampled number should be higher
        to get a good estimation.
        :type sampled_x: list[torch.Tensor]
        :param reduced_axes: The axes that is variable in Phi (e.g., the sentence length axis). We will reduce
        these axes by mean along them.
        :type reduced_axes: list[int]
        :param model: A pytorch model
        :type model: torch.model
        :param explain_layer: The layer that needs to be explained. Defaults to the last layer
        :type explain_layer: int
        :param device: A pytorch device
        :type device: torch.device
        :param Phi: A function whose input is x (element in the first parameter) and returns a hidden
        state (of type ``torch.FloatTensor``, of any shape
        :type Phi: function
        :return: The regularization term calculated
        :rtype: torch.FloatTensor
        """
        sample_num = len(sampled_x)
        sample_s = []
        self.Phi = self._generate_Phi(layer=self.target_layer)
        for n in range(sample_num):
            x = sampled_x[n]
            if self.device is not None:
                x = x.to(self.device)

            s = self.Phi(x)
            if reduced_axes is not None:
                for axis in reduced_axes:
                    assert axis < len(s.shape)
                    s = s.mean(dim=axis, keepdim=True)
            sample_s.append(s.tolist())
        sample_s = np.array(sample_s)
        return np.std(sample_s, axis=0) 
Example #21
Source File: fast_text.py    From TaskBot with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, x):
        x = self.embed(x)
        x = torch.mean(x, dim=1, keepdim=False)
        x = self.dropout(x)
        output = self.fc(x)
        output = F.log_softmax(output, dim=1)
        return output 
Example #22
Source File: norms.py    From JEM with Apache License 2.0 5 votes vote down vote up
def forward(self, x, y):
        if self.init:
            scale, bias = self.embed(y).chunk(2, dim=-1)
            return x * scale[:, :, None, None] + bias[:, :, None, None]
        else:
            m, v = torch.mean(x, dim=(0, 2, 3)), torch.var(x, dim=(0, 2, 3))
            std = torch.sqrt(v + 1e-5)
            scale_init = 1. / std
            bias_init = -1. * m / std
            self.embed.weight.data[:, :self.num_features] = scale_init[None].repeat(self.num_classes, 1)
            self.embed.weight.data[:, self.num_features:] = bias_init[None].repeat(self.num_classes, 1)
            self.init = True
            return self(x, y) 
Example #23
Source File: ent_modules.py    From BAMnet with Apache License 2.0 5 votes vote down vote up
def enc_kg_features(self, x_ent_names, x_ent_name_len, x_type_names, x_types, x_type_name_len, x_rel_names, x_rels, x_rel_name_len, x_rel_mask):
        node_ent_names = (self.kg_enc_ent(x_ent_names.view(-1, x_ent_names.size(-1)), x_ent_name_len.view(-1))[1]).view(x_ent_names.size(0), x_ent_names.size(1), -1)
        node_type_names = (self.kg_enc_type(x_type_names.view(-1, x_type_names.size(-1)), x_type_name_len.view(-1))[1]).view(x_type_names.size(0), x_type_names.size(1), -1)
        node_types = None
        edge_rel_names = torch.mean((self.kg_enc_rel(x_rel_names.view(-1, x_rel_names.size(-1)), x_rel_name_len.view(-1))[1]).view(x_rel_names.size(0), x_rel_names.size(1), x_rel_names.size(2), -1), 2)
        edge_rels = torch.mean(self.relation_embed(x_rels.view(-1, x_rels.size(-1))), 1).view(x_rels.size(0), x_rels.size(1), -1)

        if self.ent_enc_dropout:
            node_ent_names = F.dropout(node_ent_names, p=self.ent_enc_dropout, training=self.training)
            node_type_names = F.dropout(node_type_names, p=self.ent_enc_dropout, training=self.training)
            # node_types = F.dropout(node_types, p=self.ent_enc_dropout, training=self.training)
            edge_rel_names = F.dropout(edge_rel_names, p=self.ent_enc_dropout, training=self.training)
            edge_rels = F.dropout(edge_rels, p=self.ent_enc_dropout, training=self.training)
        return node_ent_names, node_type_names, node_types, edge_rel_names, edge_rels 
Example #24
Source File: multiscale_blueprint.py    From L3C-PyTorch with GNU General Public License v3.0 5 votes vote down vote up
def get_p_y(y):
    """
    :param y: NLCHW float, logits
    :return: L dimensional vector p
    """
    Ldim = 1
    L = y.shape[Ldim]
    y = y.detach()
    p = F.softmax(y, dim=Ldim)
    p = p.transpose(Ldim, -1)
    p = p.contiguous().view(-1, L)  # nL
    p = torch.mean(p, dim=0)  # L
    return pe.tensor_to_np(p) 
Example #25
Source File: kaldi.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _subtract_column_mean(tensor: Tensor, subtract_mean: bool) -> Tensor:
    # subtracts the column mean of the tensor size (m, n) if subtract_mean=True
    # it returns size (m, n)
    if subtract_mean:
        col_means = torch.mean(tensor, dim=0).unsqueeze(0)
        tensor = tensor - col_means
    return tensor 
Example #26
Source File: model_utils.py    From medicaldetectiontoolkit with Apache License 2.0 5 votes vote down vote up
def batch_dice(pred, y, false_positive_weight=1.0, smooth=1e-6):
    '''
    compute soft dice over batch. this is a differentiable score and can be used as a loss function.
    only dice scores of foreground classes are returned, since training typically
    does not benefit from explicit background optimization. Pixels of the entire batch are considered a pseudo-volume to compute dice scores of.
    This way, single patches with missing foreground classes can not produce faulty gradients.
    :param pred: (b, c, y, x, (z)), softmax probabilities (network output). (c==classes)
    :param y: (b, c, y, x, (z)), one-hot-encoded segmentation mask.
    :param false_positive_weight: float [0,1]. For weighting of imbalanced classes,
    reduces the penalty for false-positive pixels. Can be beneficial sometimes in data with heavy fg/bg imbalances.
    :return: soft dice score (float). This function discards the background score and returns the mean of foreground scores.
    '''
    if len(pred.size()) == 4:
        axes = (0, 2, 3)
        intersect = sum_tensor(pred * y, axes, keepdim=False)
        denom = sum_tensor(false_positive_weight*pred + y, axes, keepdim=False)
        return torch.mean(( (2 * intersect + smooth) / (denom + smooth) )[1:]) # only fg dice here.

    elif len(pred.size()) == 5:
        axes = (0, 2, 3, 4)
        intersect = sum_tensor(pred * y, axes, keepdim=False)
        denom = sum_tensor(false_positive_weight*pred + y, axes, keepdim=False)
        return torch.mean(( (2*intersect + smooth) / (denom + smooth) )[1:]) # only fg dice here.

    else:
        raise ValueError('wrong input dimension in dice loss') 
Example #27
Source File: utils.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def calcMN(features):
    mean, invstddev = calc_mean_invstddev(features)
    res = (features - mean) * invstddev
    return res 
Example #28
Source File: utils.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def calc_mean_invstddev(feature):
    if len(feature.shape) != 2:
        raise ValueError("We expect the input feature to be 2-D tensor")
    mean = torch.mean(feature, dim=0)
    var = torch.var(feature, dim=0)
    # avoid division by ~zero
    if (var < sys.float_info.epsilon).any():
        return mean, 1.0 / (torch.sqrt(var) + sys.float_info.epsilon)
    return mean, 1.0 / torch.sqrt(var) 
Example #29
Source File: main.py    From PSMNet with MIT License 5 votes vote down vote up
def test(imgL,imgR,disp_true):

        model.eval()
  
        if args.cuda:
            imgL, imgR, disp_true = imgL.cuda(), imgR.cuda(), disp_true.cuda()
        #---------
        mask = disp_true < 192
        #----

        if imgL.shape[2] % 16 != 0:
            times = imgL.shape[2]//16       
            top_pad = (times+1)*16 -imgL.shape[2]
        else:
            top_pad = 0

        if imgL.shape[3] % 16 != 0:
            times = imgL.shape[3]//16                       
            right_pad = (times+1)*16-imgL.shape[3]
        else:
            right_pad = 0  

        imgL = F.pad(imgL,(0,right_pad, top_pad,0))
        imgR = F.pad(imgR,(0,right_pad, top_pad,0))

        with torch.no_grad():
            output3 = model(imgL,imgR)
            output3 = torch.squeeze(output3)
        
        if top_pad !=0:
            img = output3[:,top_pad:,:]
        else:
            img = output3

        if len(disp_true[mask])==0:
           loss = 0
        else:
           loss = F.l1_loss(img[mask],disp_true[mask]) #torch.mean(torch.abs(img[mask]-disp_true[mask]))  # end-point-error

        return loss.data.cpu() 
Example #30
Source File: model_utils.py    From medicaldetectiontoolkit with Apache License 2.0 5 votes vote down vote up
def batch_dice_mask(pred, y, mask, false_positive_weight=1.0, smooth=1e-6):
    '''
    compute soft dice over batch. this is a diffrentiable score and can be used as a loss function.
    only dice scores of foreground classes are returned, since training typically
    does not benefit from explicit background optimization. Pixels of the entire batch are considered a pseudo-volume to compute dice scores of.
    This way, single patches with missing foreground classes can not produce faulty gradients.
    :param pred: (b, c, y, x, (z)), softmax probabilities (network output).
    :param y: (b, c, y, x, (z)), one hote encoded segmentation mask.
    :param false_positive_weight: float [0,1]. For weighting of imbalanced classes,
    reduces the penalty for false-positive pixels. Can be beneficial sometimes in data with heavy fg/bg imbalances.
    :return: soft dice score (float). This function discards the background score and returns the mean of foreground scores.
    '''

    mask = mask.unsqueeze(1).repeat(1, 2, 1, 1)

    if len(pred.size()) == 4:
        axes = (0, 2, 3)
        intersect = sum_tensor(pred * y * mask, axes, keepdim=False)
        denom = sum_tensor(false_positive_weight*pred * mask + y * mask, axes, keepdim=False)
        return torch.mean(( (2*intersect + smooth) / (denom + smooth))[1:]) # only fg dice here.

    elif len(pred.size()) == 5:
        axes = (0, 2, 3, 4)
        intersect = sum_tensor(pred * y, axes, keepdim=False)
        denom = sum_tensor(false_positive_weight*pred + y, axes, keepdim=False)
        return torch.mean(( (2*intersect + smooth) / (denom + smooth) )[1:]) # only fg dice here.

    else:
        raise ValueError('wrong input dimension in dice loss')