Python mxnet.ndarray.norm() Examples

The following are 11 code examples of mxnet.ndarray.norm(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.ndarray , or try the search function .
Example #1
Source File: score_fun.py    From dgl with Apache License 2.0 6 votes vote down vote up
def create_neg(self, neg_head):
        gamma = self.gamma
        if neg_head:
            def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
                relations = relations.reshape(num_chunks, -1, self.relation_dim)
                tails = tails - relations
                tails = tails.reshape(num_chunks, -1, 1, self.relation_dim)
                score = heads - tails
                return gamma - nd.norm(score, ord=1, axis=-1)
            return fn
        else:
            def fn(heads, relations, tails, num_chunks, chunk_size, neg_sample_size):
                relations = relations.reshape(num_chunks, -1, self.relation_dim)
                heads = heads - relations
                heads = heads.reshape(num_chunks, -1, 1, self.relation_dim)
                score = heads - tails
                return gamma - nd.norm(score, ord=1, axis=-1)
            return fn 
Example #2
Source File: algos.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def calc_potential(exe, params, label_name, noise_precision, prior_precision):
    exe.copy_params_from(params)
    exe.forward(is_train=False)
    ret = 0.0
    ret += (nd.norm(
        exe.outputs[0] - exe.arg_dict[label_name]).asscalar() ** 2) / 2.0 * noise_precision
    for v in params.values():
        ret += (nd.norm(v).asscalar() ** 2) / 2.0 * prior_precision
    return ret 
Example #3
Source File: utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def norm_clipping(params_grad, threshold):
    assert isinstance(params_grad, dict)
    norm_val = numpy.sqrt(sum([nd.norm(grad).asnumpy()[0]**2 for grad in params_grad.values()]))
    # print('grad norm: %g' % norm_val)
    ratio = 1.0
    if norm_val > threshold:
        ratio = threshold / norm_val
        for grad in params_grad.values():
            grad *= ratio
    return norm_val 
Example #4
Source File: score_fun.py    From dgl with Apache License 2.0 5 votes vote down vote up
def batched_l2_dist(a, b):
    a_squared = nd.power(nd.norm(a, axis=-1), 2)
    b_squared = nd.power(nd.norm(b, axis=-1), 2)

    squared_res = nd.add(nd.linalg_gemm(
        a, nd.transpose(b, axes=(0, 2, 1)), nd.broadcast_axes(nd.expand_dims(b_squared, axis=-2), axis=1, size=a.shape[1]), alpha=-2
    ), nd.expand_dims(a_squared, axis=-1))
    res = nd.sqrt(nd.clip(squared_res, 1e-30, np.finfo(np.float32).max))
    return res 
Example #5
Source File: score_fun.py    From dgl with Apache License 2.0 5 votes vote down vote up
def batched_l1_dist(a, b):
    a = nd.expand_dims(a, axis=-2)
    b = nd.expand_dims(b, axis=-3)
    res = nd.norm(a - b, ord=1, axis=-1)
    return res 
Example #6
Source File: score_fun.py    From dgl with Apache License 2.0 5 votes vote down vote up
def edge_func(self, edges):
        head = edges.src['emb']
        tail = edges.dst['emb']
        rel = edges.data['emb']
        score = head + rel - tail
        return {'score': self.gamma - nd.norm(score, ord=self.dist_ord, axis=-1)} 
Example #7
Source File: score_fun.py    From dgl with Apache License 2.0 5 votes vote down vote up
def edge_func(self, edges):
        head = edges.data['head_emb']
        tail = edges.data['tail_emb']
        rel = edges.data['emb']
        score = head + rel - tail
        return {'score': self.gamma - nd.norm(score, ord=1, axis=-1)} 
Example #8
Source File: algos.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def calc_potential(exe, params, label_name, noise_precision, prior_precision):
    exe.copy_params_from(params)
    exe.forward(is_train=False)
    ret = 0.0
    ret += (nd.norm(
        exe.outputs[0] - exe.arg_dict[label_name]).asscalar() ** 2) / 2.0 * noise_precision
    for v in params.values():
        ret += (nd.norm(v).asscalar() ** 2) / 2.0 * prior_precision
    return ret 
Example #9
Source File: utils.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def norm_clipping(params_grad, threshold):
    assert isinstance(params_grad, dict)
    norm_val = numpy.sqrt(sum([nd.norm(grad).asnumpy()[0]**2 for grad in params_grad.values()]))
    # print('grad norm: %g' % norm_val)
    ratio = 1.0
    if norm_val > threshold:
        ratio = threshold / norm_val
        for grad in params_grad.values():
            grad *= ratio
    return norm_val 
Example #10
Source File: algos.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def calc_potential(exe, params, label_name, noise_precision, prior_precision):
    exe.copy_params_from(params)
    exe.forward(is_train=False)
    ret = 0.0
    ret += (nd.norm(
        exe.outputs[0] - exe.arg_dict[label_name]).asscalar() ** 2) / 2.0 * noise_precision
    for v in params.values():
        ret += (nd.norm(v).asscalar() ** 2) / 2.0 * prior_precision
    return ret 
Example #11
Source File: utils.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def norm_clipping(params_grad, threshold):
    assert isinstance(params_grad, dict)
    norm_val = numpy.sqrt(sum([nd.norm(grad).asnumpy()[0]**2 for grad in params_grad.values()]))
    # print('grad norm: %g' % norm_val)
    ratio = 1.0
    if norm_val > threshold:
        ratio = threshold / norm_val
        for grad in params_grad.values():
            grad *= ratio
    return norm_val