Python torch.dist() Examples

The following are 30 code examples of torch.dist(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: utils.py    From transfer-nlp with MIT License 6 votes vote down vote up
def get_closest(target_word: str, word_to_idx: Dict, embeddings: torch.Tensor, n: int = 5) -> List[Tuple[str, torch.Tensor]]:
    """
    Get the n closest
    words to your word.
    """

    # Calculate distances to all other words

    word_embedding = embeddings[word_to_idx[target_word.lower()]]
    distances = []
    for word, index in word_to_idx.items():
        if word == "<MASK>" or word == target_word:
            continue
        distances.append((word, torch.dist(word_embedding, embeddings[index])))

    results = sorted(distances, key=lambda x: x[1])[1:n + 2]
    return results 
Example #2
Source File: Evaluation_prev_version.py    From CU-Net with Apache License 2.0 6 votes vote down vote up
def calc_dists(preds, target, normalize, use_zero=False):
    preds = preds.float()
    target = target.float()
    normalize = normalize.float()
    dists = torch.zeros(preds.size(1), preds.size(0))
    if use_zero:
        boundary = 0
    else:
        boundary = 1
    for n in range(preds.size(0)):
        for c in range(preds.size(1)):
            if target[n,c,0] > boundary and target[n, c, 1] > boundary:
                dists[c, n] = torch.dist(preds[n,c,:], target[n,c,:])/normalize[n]
            else:
                dists[c, n] = -1
    return dists 
Example #3
Source File: Evaluation.py    From CU-Net with Apache License 2.0 6 votes vote down vote up
def calc_dists(preds, target, normalize, use_zero=False):
    preds = preds.float()
    target = target.float()
    normalize = normalize.float()
    dists = torch.zeros(preds.size(1), preds.size(0))
    if use_zero:
        boundary = 0
    else:
        boundary = 1
    for n in range(preds.size(0)):
        for c in range(preds.size(1)):
            if target[n,c,0] > boundary and target[n, c, 1] > boundary:
                dists[c, n] = torch.dist(preds[n,c,:], target[n,c,:])/normalize[n]
            else:
                dists[c, n] = -1
    return dists 
Example #4
Source File: jointemb.py    From deep-code-search with MIT License 6 votes vote down vote up
def similarity(self, code_vec, desc_vec):
        """
        https://arxiv.org/pdf/1508.01585.pdf 
        """
        assert self.conf['sim_measure'] in ['cos', 'poly', 'euc', 'sigmoid', 'gesd', 'aesd'], "invalid similarity measure"
        if self.conf['sim_measure']=='cos':
            return F.cosine_similarity(code_vec, desc_vec)
        elif self.conf['sim_measure']=='poly':
            return (0.5*torch.matmul(code_vec, desc_vec.t()).diag()+1)**2
        elif self.conf['sim_measure']=='sigmoid':
            return torch.tanh(torch.matmul(code_vec, desc_vec.t()).diag()+1)
        elif self.conf['sim_measure'] in ['euc', 'gesd', 'aesd']:
            euc_dist = torch.dist(code_vec, desc_vec, 2) # or torch.norm(code_vec-desc_vec,2)
            euc_sim = 1 / (1 + euc_dist)
            if self.conf['sim_measure']=='euc': return euc_sim                
            sigmoid_sim = torch.sigmoid(torch.matmul(code_vec, desc_vec.t()).diag()+1)
            if self.conf['sim_measure']=='gesd': 
                return euc_sim * sigmoid_sim
            elif self.conf['sim_measure']=='aesd':
                return 0.5*(euc_sim+sigmoid_sim) 
Example #5
Source File: reid_loss.py    From ARN with MIT License 6 votes vote down vote up
def batch_euclidean_dist(x, y):
    """
    Args:
        x: pytorch Variable, with shape [N, m, d]
        y: pytorch Variable, with shape [N, n, d]
    Returns:
        dist: pytorch Variable, with shape [N, m, n]
    """
    assert len(x.size()) == 3
    assert len(y.size()) == 3
    assert x.size(0) == y.size(0)
    assert x.size(-1) == y.size(-1)

    N, m, d = x.size()
    N, n, d = y.size()

    # shape [N, m, n]
    xx = torch.pow(x, 2).sum(-1, keepdim=True).expand(N, m, n)
    yy = torch.pow(y, 2).sum(-1, keepdim=True).expand(N, n, m).permute(0, 2, 1)
    dist = xx + yy
    dist.baddbmm_(1, -2, x, y.permute(0, 2, 1))
    dist = dist.clamp(min=1e-12).sqrt()  # for numerical stability
    return dist 
Example #6
Source File: pytorch_load.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test(model):
    model.eval()
    from scipy import misc
    img = misc.imread('lena_299.png')
    inputs = torch.zeros(1,299,299,3)
    inputs[0] = torch.from_numpy(img)
    inputs.transpose_(1,3)
    inputs.transpose_(2,3)
    # 1, 3, 299, 299
    outputs = model.forward(torch.autograd.Variable(inputs))
    h5f = h5py.File('dump/InceptionV4/Logits.h5', 'r')
    outputs_tf = torch.from_numpy(h5f['out'][()])
    h5f.close()
    outputs = torch.nn.functional.softmax(outputs)
    print(torch.dist(outputs.data, outputs_tf))
    return outputs 
Example #7
Source File: pytorch_load.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_conv2d(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionResnetV2/'+name+'.h5', 'r')
    output_tf_conv = torch.from_numpy(h5f['conv_out'][()])
    output_tf_conv.transpose_(1,3)
    output_tf_conv.transpose_(2,3)
    output_tf_relu = torch.from_numpy(h5f['relu_out'][()])
    output_tf_relu.transpose_(1,3)
    output_tf_relu.transpose_(2,3)
    h5f.close()
    def test_dist_conv(self, input, output):
        print(name, 'conv', torch.dist(output.data, output_tf_conv))
    module.conv.register_forward_hook(test_dist_conv)
    def test_dist_relu(self, input, output):
        print(name, 'relu', torch.dist(output.data, output_tf_relu))
    module.relu.register_forward_hook(test_dist_relu) 
Example #8
Source File: pytorch_load.py    From Action_Recognition_Zoo with MIT License 6 votes vote down vote up
def test(model):
    model.eval()
    from scipy import misc
    img = misc.imread('lena_299.png')
    inputs = torch.zeros(1,299,299,3)
    inputs[0] = torch.from_numpy(img)
    inputs.transpose_(1,3)
    inputs.transpose_(2,3)
    # 1, 3, 299, 299
    outputs = model.forward(torch.autograd.Variable(inputs))
    h5f = h5py.File('dump/InceptionV4/Logits.h5', 'r')
    outputs_tf = torch.from_numpy(h5f['out'][()])
    h5f.close()
    outputs = torch.nn.functional.softmax(outputs)
    print(torch.dist(outputs.data, outputs_tf))
    return outputs 
Example #9
Source File: pytorch_load.py    From Action_Recognition_Zoo with MIT License 6 votes vote down vote up
def test_conv2d(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionResnetV2/'+name+'.h5', 'r')
    output_tf_conv = torch.from_numpy(h5f['conv_out'][()])
    output_tf_conv.transpose_(1,3)
    output_tf_conv.transpose_(2,3)
    output_tf_relu = torch.from_numpy(h5f['relu_out'][()])
    output_tf_relu.transpose_(1,3)
    output_tf_relu.transpose_(2,3)
    h5f.close()
    def test_dist_conv(self, input, output):
        print(name, 'conv', torch.dist(output.data, output_tf_conv))
    module.conv.register_forward_hook(test_dist_conv)
    def test_dist_relu(self, input, output):
        print(name, 'relu', torch.dist(output.data, output_tf_relu))
    module.relu.register_forward_hook(test_dist_relu) 
Example #10
Source File: inceptionresnetv2.py    From imagenet-fast with Apache License 2.0 6 votes vote down vote up
def test_conv2d(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionResnetV2/'+name+'.h5', 'r')
    output_tf_conv = torch.from_numpy(h5f['conv_out'][()])
    output_tf_conv.transpose_(1,3)
    output_tf_conv.transpose_(2,3)
    output_tf_relu = torch.from_numpy(h5f['relu_out'][()])
    output_tf_relu.transpose_(1,3)
    output_tf_relu.transpose_(2,3)
    h5f.close()
    def test_dist_conv(self, input, output):
        print(name, 'conv', torch.dist(output.data, output_tf_conv))
    module.conv.register_forward_hook(test_dist_conv)
    def test_dist_relu(self, input, output):
        print(name, 'relu', torch.dist(output.data, output_tf_relu))
    module.relu.register_forward_hook(test_dist_relu) 
Example #11
Source File: inceptionresnetv2.py    From imagenet-fast with Apache License 2.0 6 votes vote down vote up
def test_conv2d(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionResnetV2/'+name+'.h5', 'r')
    output_tf_conv = torch.from_numpy(h5f['conv_out'][()])
    output_tf_conv.transpose_(1,3)
    output_tf_conv.transpose_(2,3)
    output_tf_relu = torch.from_numpy(h5f['relu_out'][()])
    output_tf_relu.transpose_(1,3)
    output_tf_relu.transpose_(2,3)
    h5f.close()
    def test_dist_conv(self, input, output):
        print(name, 'conv', torch.dist(output.data, output_tf_conv))
    module.conv.register_forward_hook(test_dist_conv)
    def test_dist_relu(self, input, output):
        print(name, 'relu', torch.dist(output.data, output_tf_relu))
    module.relu.register_forward_hook(test_dist_relu) 
Example #12
Source File: inceptionv4.py    From imagenet-fast with Apache License 2.0 6 votes vote down vote up
def test(model):
    model.eval()
    from scipy import misc
    img = misc.imread('lena_299.png')
    inputs = torch.zeros(1,299,299,3)
    inputs[0] = torch.from_numpy(img)
    inputs.transpose_(1,3)
    inputs.transpose_(2,3)
    # 1, 3, 299, 299
    outputs = model.forward(torch.autograd.Variable(inputs))
    h5f = h5py.File('dump/InceptionV4/Logits.h5', 'r')
    outputs_tf = torch.from_numpy(h5f['out'][()])
    h5f.close()
    outputs = torch.nn.functional.softmax(outputs)
    print(torch.dist(outputs.data, outputs_tf))
    return outputs 
Example #13
Source File: measure.py    From tensorgrad with Apache License 2.0 6 votes vote down vote up
def get_obs(Asymm, H, Sx, Sy, Sz, C, E ):
    # A(phy,u,l,d,r), C(d,r), E(u,r,d)
    
    Da = Asymm.size()
    Td = torch.einsum('mefgh,nabcd->eafbgchdmn',(Asymm,Asymm)).contiguous().view(Da[1]**2, Da[2]**2, Da[3]**2, Da[4]**2, Da[0], Da[0])
    #print( torch.dist( Td, Td.permute(0,3,2,1,4,5) ) )    # test left-right reflection symmetry of Td

    CE = torch.tensordot(C,E,([1],[0]))         # C(1d)E(dga)->CE(1ga)
    EL = torch.tensordot(E,CE,([2],[0]))        # E(2e1)CE(1ga)->EL(2ega)  use E(2e1) == E(1e2) 
    EL = torch.tensordot(EL,Td,([1,2],[1,0]))   # EL(2ega)T(gehbmn)->EL(2ahbmn)
    EL = torch.tensordot(EL,CE,([0,2],[0,1]))   # EL(2ahbmn)CE(2hc)->EL(abmnc), use CE(2hc) == CE(1ga) 
    Rho = torch.tensordot(EL,EL,([0,1,4],[0,1,4])).permute(0,2,1,3).contiguous().view(Da[0]**2,Da[0]**2)
    
    # print( (Rho-Rho.t()).norm() )
    Rho = 0.5*(Rho + Rho.t())
    
    Tnorm = Rho.trace()
    Energy = torch.mm(Rho,H).trace()/Tnorm
    Mx = torch.mm(Rho,Sx).trace()/Tnorm
    My = torch.mm(Rho,Sy).trace()/Tnorm
    Mz = torch.mm(Rho,Sz).trace()/Tnorm
   
    #print("Tnorm = %g, Energy = %g " % (Tnorm.item(), Energy.item()) )

    return Energy, Mx, My, Mz 
Example #14
Source File: metrics.py    From tntorch with GNU Lesser General Public License v3.0 5 votes vote down vote up
def dist(t1, t2):
    """
    Computes the Euclidean distance between two tensors. Generally faster than `tn.norm(t1-t2)`.

    :param t1: a :class:`Tensor` (or a PyTorch tensor)
    :param t2: a :class:`Tensor` (or a PyTorch tensor)

    :return: a scalar :math:`\ge 0`
    """

    t1, t2 = _process(t1, t2)
    if isinstance(t1, torch.Tensor) and isinstance(t2, torch.Tensor):
        return torch.dist(t1, t2)
    return torch.sqrt(tn.dot(t1, t1) + tn.dot(t2, t2) - 2 * tn.dot(t1, t2).clamp(0)) 
Example #15
Source File: pytorch_utils.py    From TractSeg with Apache License 2.0 5 votes vote down vote up
def l2_loss(y_pred, y_true, weights=None):
    """
    Calculate the euclidian distance (=l2 norm / frobenius norm) between tensors.
    Expects a tensor image as input (6 channels per class).

    Args:
        y_pred: [bs, classes, x, y, z]
        y_true: [bs, classes, x, y, z]
        weights: None, just for keeping the interface the same for all loss functions

    Returns:
        loss
    """
    if len(y_pred.shape) == 4:  # 2D
        y_true = y_true.permute(0, 2, 3, 1)
        y_pred = y_pred.permute(0, 2, 3, 1)
    else:  # 3D
        y_true = y_true.permute(0, 2, 3, 4, 1)
        y_pred = y_pred.permute(0, 2, 3, 4, 1)

    nr_of_classes = int(y_true.shape[-1] / 6.)
    scores = torch.zeros(nr_of_classes)

    for idx in range(nr_of_classes):
        y_pred_bund = y_pred[:, :, :, (idx * 6):(idx * 6) + 6].contiguous()
        y_true_bund = y_true[:, :, :, (idx * 6):(idx * 6) + 6].contiguous()  # [x,y,z,6]

        dist = torch.dist(y_pred_bund, y_true_bund, 2)  # calc l2 norm / euclidian distance / frobenius norm
        scores[idx] = torch.mean(dist)

    return torch.mean(scores), None 
Example #16
Source File: metrics.py    From tntorch with GNU Lesser General Public License v3.0 5 votes vote down vote up
def relative_error(gt, approx):
    """
    Computes the relative error between two tensors (torch or tntorch).

    :param gt: a torch or tntorch tensor
    :param approx: a torch or tntorch tensor

    :return: a scalar :math:`\ge 0`
    """

    gt, approx = _process(gt, approx)
    if isinstance(gt, torch.Tensor) and isinstance(approx, torch.Tensor):
        return torch.dist(gt, approx) / torch.norm(gt)
    dotgt = tn.dot(gt, gt)
    return torch.sqrt((dotgt + tn.dot(approx, approx) - 2*tn.dot(gt, approx)).clamp(0)) / torch.sqrt(dotgt.clamp(0)) 
Example #17
Source File: linalg_utils.py    From PVN3D with MIT License 5 votes vote down vote up
def pdist2_slow(X, Z=None):
    if Z is None:
        Z = X
    D = torch.zeros(X.size(0), X.size(2), Z.size(2))

    for b in range(D.size(0)):
        for i in range(D.size(1)):
            for j in range(D.size(2)):
                D[b, i, j] = torch.dist(X[b, :, i], Z[b, :, j])
    return D 
Example #18
Source File: pytorch_load.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_conv2d(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionV4/'+name+'.h5', 'r')
    output_tf = torch.from_numpy(h5f['relu_out'][()])
    output_tf.transpose_(1,3)
    output_tf.transpose_(2,3)
    h5f.close()
    def test_dist(self, input, output):
        print(name, torch.dist(output.data, output_tf))
    module.register_forward_hook(test_dist) 
Example #19
Source File: linalg_utils.py    From sanet_relocal_demo with GNU General Public License v3.0 5 votes vote down vote up
def pdist2_slow(X, Z=None):
    if Z is None: Z = X
    D = torch.zeros(X.size(0), X.size(2), Z.size(2))

    for b in range(D.size(0)):
        for i in range(D.size(1)):
            for j in range(D.size(2)):
                D[b, i, j] = torch.dist(X[b, :, i], Z[b, :, j])
    return D 
Example #20
Source File: pytorch_load.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def test(model):
    from scipy import misc
    img = misc.imread('lena_299.png')
    inputs = torch.ones(1,299,299,3)
    #inputs[0] = torch.from_numpy(img)

    inputs[0,0,0,0] = -1
    inputs.transpose_(1,3)
    inputs.transpose_(2,3)

    print(inputs.mean())
    print(inputs.std())

    #inputs.sub_(0.5).div_(0.5)
    #inputs.sub_(inputs)
    # 1, 3, 299, 299

    outputs = model.forward(torch.autograd.Variable(inputs))
    h5f = h5py.File('dump/InceptionResnetV2/Logits.h5', 'r')
    outputs_tf = torch.from_numpy(h5f['out'][()])
    h5f.close()
    outputs = torch.nn.functional.softmax(outputs)
    print(outputs.sum())
    print(outputs[0])
    print(outputs_tf.sum())
    print(outputs_tf[0])
    print(torch.dist(outputs.data, outputs_tf))
    return outputs 
Example #21
Source File: inceptionresnetv2.py    From imagenet-fast with Apache License 2.0 5 votes vote down vote up
def test_conv2d_nobn(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionResnetV2/'+name+'.h5', 'r')
    output_tf = torch.from_numpy(h5f['conv_out'][()])
    output_tf.transpose_(1,3)
    output_tf.transpose_(2,3)
    h5f.close()
    def test_dist(self, input, output):
        print(name, 'conv+bias', torch.dist(output.data, output_tf))
    module.register_forward_hook(test_dist) 
Example #22
Source File: inceptionresnetv2.py    From imagenet-fast with Apache License 2.0 5 votes vote down vote up
def test(model):
    from scipy import misc
    img = misc.imread('lena_299.png')
    inputs = torch.ones(1,299,299,3)
    #inputs[0] = torch.from_numpy(img)

    inputs[0,0,0,0] = -1
    inputs.transpose_(1,3)
    inputs.transpose_(2,3)

    print(inputs.mean())
    print(inputs.std())

    #inputs.sub_(0.5).div_(0.5)
    #inputs.sub_(inputs)
    # 1, 3, 299, 299

    outputs = model.forward(torch.autograd.Variable(inputs))
    h5f = h5py.File('dump/InceptionResnetV2/Logits.h5', 'r')
    outputs_tf = torch.from_numpy(h5f['out'][()])
    h5f.close()
    outputs = torch.nn.functional.softmax(outputs)
    print(outputs.sum())
    print(outputs[0])
    print(outputs_tf.sum())
    print(outputs_tf[0])
    print(torch.dist(outputs.data, outputs_tf))
    return outputs 
Example #23
Source File: inceptionv4.py    From imagenet-fast with Apache License 2.0 5 votes vote down vote up
def test_conv2d(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionV4/'+name+'.h5', 'r')
    output_tf = torch.from_numpy(h5f['relu_out'][()])
    output_tf.transpose_(1,3)
    output_tf.transpose_(2,3)
    h5f.close()
    def test_dist(self, input, output):
        print(name, torch.dist(output.data, output_tf))
    module.register_forward_hook(test_dist) 
Example #24
Source File: inceptionresnetv2.py    From imagenet-fast with Apache License 2.0 5 votes vote down vote up
def test_conv2d_nobn(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionResnetV2/'+name+'.h5', 'r')
    output_tf = torch.from_numpy(h5f['conv_out'][()])
    output_tf.transpose_(1,3)
    output_tf.transpose_(2,3)
    h5f.close()
    def test_dist(self, input, output):
        print(name, 'conv+bias', torch.dist(output.data, output_tf))
    module.register_forward_hook(test_dist) 
Example #25
Source File: main.py    From pytorch-mula with MIT License 5 votes vote down vote up
def calc_dists(preds, labels, normalize):
    dists = torch.Tensor(preds.size(1), preds.size(0))
    for i in range(preds.size(0)):
        for j in range(preds.size(1)):
            if labels[i, j, 0] == 0 and labels[i, j, 1] == 0:
                dists[j, i] = -1
            else:
                dists[j, i] = torch.dist(labels[i, j, :], preds[i, j, :]) / normalize
    return dists 
Example #26
Source File: inceptionresnetv2.py    From imagenet-fast with Apache License 2.0 5 votes vote down vote up
def test(model):
    from scipy import misc
    img = misc.imread('lena_299.png')
    inputs = torch.ones(1,299,299,3)
    #inputs[0] = torch.from_numpy(img)

    inputs[0,0,0,0] = -1
    inputs.transpose_(1,3)
    inputs.transpose_(2,3)

    print(inputs.mean())
    print(inputs.std())

    #inputs.sub_(0.5).div_(0.5)
    #inputs.sub_(inputs)
    # 1, 3, 299, 299

    outputs = model.forward(torch.autograd.Variable(inputs))
    h5f = h5py.File('dump/InceptionResnetV2/Logits.h5', 'r')
    outputs_tf = torch.from_numpy(h5f['out'][()])
    h5f.close()
    outputs = torch.nn.functional.softmax(outputs)
    print(outputs.sum())
    print(outputs[0])
    print(outputs_tf.sum())
    print(outputs_tf[0])
    print(torch.dist(outputs.data, outputs_tf))
    return outputs 
Example #27
Source File: inceptionv4.py    From imagenet-fast with Apache License 2.0 5 votes vote down vote up
def test_conv2d(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionV4/'+name+'.h5', 'r')
    output_tf = torch.from_numpy(h5f['relu_out'][()])
    output_tf.transpose_(1,3)
    output_tf.transpose_(2,3)
    h5f.close()
    def test_dist(self, input, output):
        print(name, torch.dist(output.data, output_tf))
    module.register_forward_hook(test_dist) 
Example #28
Source File: eval.py    From video-to-pose3D with MIT License 5 votes vote down vote up
def calc_dists(preds, target, normalize):
    preds = preds.float().clone()
    target = target.float().clone()
    dists = torch.zeros(preds.size(1), preds.size(0))
    for n in range(preds.size(0)):
        for c in range(preds.size(1)):
            if target[n, c, 0] > 0 and target[n, c, 1] > 0:
                dists[c, n] = torch.dist(
                    preds[n, c, :], target[n, c, :]) / normalize[n]
            else:
                dists[c, n] = -1
    return dists 
Example #29
Source File: eval.py    From video-to-pose3D with MIT License 5 votes vote down vote up
def calc_dists(preds, target, normalize):
    preds = preds.float().clone()
    target = target.float().clone()
    dists = torch.zeros(preds.size(1), preds.size(0))
    for n in range(preds.size(0)):
        for c in range(preds.size(1)):
            if target[n, c, 0] > 0 and target[n, c, 1] > 0:
                dists[c, n] = torch.dist(
                    preds[n, c, :], target[n, c, :]) / normalize[n]
            else:
                dists[c, n] = -1
    return dists 
Example #30
Source File: pytorch_load.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def test_conv2d_nobn(module, name):
    #global output_tf
    h5f = h5py.File('dump/InceptionResnetV2/'+name+'.h5', 'r')
    output_tf = torch.from_numpy(h5f['conv_out'][()])
    output_tf.transpose_(1,3)
    output_tf.transpose_(2,3)
    h5f.close()
    def test_dist(self, input, output):
        print(name, 'conv+bias', torch.dist(output.data, output_tf))
    module.register_forward_hook(test_dist)