Python mxnet.nd.sqrt() Examples

The following are 19 code examples of mxnet.nd.sqrt(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.nd , or try the search function .
Example #1
Source File: utils.py    From coach with Apache License 2.0 6 votes vote down vote up
def global_norm(arrays: Union[Generator[NDArray, NDArray, NDArray], List[NDArray], Tuple[NDArray]]) -> NDArray:
    """
    Calculate global norm on list or tuple of NDArrays using this formula:
        `global_norm = sqrt(sum([l2norm(p)**2 for p in parameters]))`

    :param arrays: list or tuple of parameters to calculate global norm on
    :return: single-value NDArray
    """
    def _norm(array):
        if array.stype == 'default':
            x = array.reshape((-1,))
            return nd.dot(x, x)
        return array.norm().square()

    total_norm = nd.add_n(*[_norm(arr) for arr in arrays])
    total_norm = nd.sqrt(total_norm)
    return total_norm 
Example #2
Source File: CapsuleNet.py    From CapsuleNet-Gluon with MIT License 6 votes vote down vote up
def hybrid_forward(self, F, X, y=None):
        # import pdb; pdb.set_trace()
        X = self.net[0](X) # Conv1
        X = self.net[1](X) # Primary Capsule
        X = self.net[2](X) # Digital Capsule
        # import pdb ; pdb.set_trace()
        X = X.reshape((X.shape[0],X.shape[2], X.shape[4]))
        # get length of vector for margin loss calculation
        X_l2norm = nd.sqrt((X**2).sum(axis=-1))
        # import pdb ; pdb.set_trace()
        prob = nd.softmax(X_l2norm, axis=-1)

        if y is not None:
            max_len_indices = y
        else:
            
            max_len_indices = nd.argmax(prob,axis=-1)


        y_tile = nd.tile(y.expand_dims(axis=1), reps=(1, X.shape[-1]))
        batch_activated_capsules = nd.pick(X, y_tile, axis=1, keepdims=True)

        reconstrcutions = self.net[3](batch_activated_capsules)

        return  prob, X_l2norm, reconstrcutions 
Example #3
Source File: metric.py    From ST-MetaNet with MIT License 5 votes vote down vote up
def get_value(self):
		return { self.name: nd.sqrt((self.loss / (self.cnt + 1e-8))[self.indices]) } 
Example #4
Source File: train.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def get_distance_matrix(x):
    """Get distance matrix given a matrix. Used in testing."""
    square = nd.sum(x ** 2.0, axis=1, keepdims=True)
    distance_square = square + square.transpose() - (2.0 * nd.dot(x, x.transpose()))
    return nd.sqrt(distance_square) 
Example #5
Source File: test_periodic_kernel.py    From gluon-ts with Apache License 2.0 5 votes vote down vote up
def test_periodic_kernel(x1, x2, amplitude, length_scale, exact) -> None:
    tol = 1e-5
    batch_size = amplitude.shape[0]
    history_length_1 = x1.shape[0]
    history_length_2 = x2.shape[0]
    num_features = x1.shape[1]
    if batch_size > 1:
        x1 = nd.tile(x1, reps=(batch_size, 1, 1))
        x2 = nd.tile(x2, reps=(batch_size, 1, 1))
        for i in range(1, batch_size):
            x1[i, :, :] = (i + 1) * x1[i, :, :]
            x2[i, :, :] = (i - 3) * x2[i, :, :]
    else:
        x1 = x1.reshape(batch_size, history_length_1, num_features)
        x2 = x2.reshape(batch_size, history_length_2, num_features)
    amplitude = amplitude.reshape(batch_size, 1, 1)
    length_scale = length_scale.reshape(batch_size, 1, 1)
    frequency = 1 / 24 * nd.ones_like(length_scale)
    periodic = PeriodicKernel(amplitude, length_scale, frequency)

    exact = amplitude * nd.exp(
        -2
        * nd.sin(frequency * math.pi * nd.sqrt(exact)) ** 2
        / length_scale ** 2
    )

    res = periodic.kernel_matrix(x1, x2)
    assert nd.norm(exact - res) < tol


# This test is based off of a simple single batch with different history lengths
# from gpytorch, where the exact is computed inside the test rather than hard-coded 
Example #6
Source File: CapsLayers.py    From CapsNet_Mxnet with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        #(batch_size, 1, 10, 16, 1) =>(batch_size,10, 16)=> (batch_size, 10, 1)
        x_shape = x.shape
        x = x.reshape(shape=(x_shape[0],x_shape[2],x_shape[3]))

        x_l2norm = nd.sqrt((x.square()).sum(axis=-1))
        # prob = nd.softmax(x_l2norm, axis=-1)
        return x_l2norm 
Example #7
Source File: CapsLayers.py    From CapsNet_Mxnet with Apache License 2.0 5 votes vote down vote up
def squash(self,vectors,axis):
        epsilon = 1e-9
        vectors_l2norm = nd.square(vectors).sum(axis=axis,keepdims=True)
        
        assert vectors_l2norm.shape == (self.batch_size, 1, self.num_capsule, 1, 1) # 1,10,1,1

        scale_factor = vectors_l2norm / (1 + vectors_l2norm) 
        vectors_squashed = scale_factor * (vectors / nd.sqrt(vectors_l2norm+epsilon)) # element-wise

        return vectors_squashed 
Example #8
Source File: CapsLayers.py    From CapsNet_Mxnet with Apache License 2.0 5 votes vote down vote up
def squash(self,vectors,axis):
        epsilon = 1e-9
        vectors_l2norm = nd.square(vectors).sum(axis=axis,keepdims=True)#.expand_dims(axis=axis)
    
        scale_factor = vectors_l2norm / (1 + vectors_l2norm) 
        vectors_squashed = scale_factor * (vectors / nd.sqrt(vectors_l2norm+epsilon)) # element-wise

        return vectors_squashed 
Example #9
Source File: utils.py    From EmotionClassifier with GNU General Public License v3.0 5 votes vote down vote up
def grad_clipping(params, clipping_norm, ctx):
    """Gradient clipping."""
    if clipping_norm is not None:
        norm = nd.array([0.0], ctx)
        for p in params:
            norm += nd.sum(p.grad ** 2)
        norm = nd.sqrt(norm).asscalar()
        if norm > clipping_norm:
            for p in params:
                p.grad[:] *= clipping_norm / norm 
Example #10
Source File: metric.py    From ST-MetaNet with MIT License 5 votes vote down vote up
def get_value(self):
		return { self.name: nd.sqrt((self.loss / (self.cnt + 1e-8))[self.indices]) } 
Example #11
Source File: metric.py    From ST-MetaNet with MIT License 5 votes vote down vote up
def get_value(self):
		return { self.name: nd.sqrt(self.loss / (self.cnt + 1e-8)) } 
Example #12
Source File: train.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def get_distance_matrix(x):
    """Get distance matrix given a matrix. Used in testing."""
    square = nd.sum(x ** 2.0, axis=1, keepdims=True)
    distance_square = square + square.transpose() - (2.0 * nd.dot(x, x.transpose()))
    return nd.sqrt(distance_square) 
Example #13
Source File: metric.py    From ST-MetaNet with MIT License 5 votes vote down vote up
def get_value(self):
		return { self.name: nd.sqrt(self.loss / (self.cnt + 1e-8)) } 
Example #14
Source File: capsule_block.py    From comment_toxic_CapsuleNet with MIT License 5 votes vote down vote up
def forward(self, x):
        x = nd.sqrt(nd.sum(nd.square(x), 1))
        return x 
Example #15
Source File: capsule_block.py    From comment_toxic_CapsuleNet with MIT License 5 votes vote down vote up
def squash(x, axis):
    s_squared_norm = nd.sum(nd.square(x), axis, keepdims=True)
    # if s_squared_norm is really small, we will be in trouble
    # so I removed the s_quare terms
    # scale = s_squared_norm / ((1 + s_squared_norm) * nd.sqrt(s_squared_norm + 1e-9))
    # return x * scale
    scale = nd.sqrt(s_squared_norm + 1e-9)
    return x / scale 
Example #16
Source File: conv_cap.py    From comment_toxic_CapsuleNet with MIT License 5 votes vote down vote up
def squash(x, axis):
    s_squared_norm = nd.sum(nd.square(x), axis, keepdims=True)
    # if s_squared_norm is really small, we will be in trouble
    # so I removed the s_quare terms
    # scale = s_squared_norm / ((1 + s_squared_norm) * nd.sqrt(s_squared_norm + 1e-9))
    # return x * scale
    scale = nd.sqrt(s_squared_norm + 1e-9)
    return x / scale 
Example #17
Source File: merge_bn.py    From Quantization.MXNet with MIT License 5 votes vote down vote up
def _merge_bn(net, conv_name="conv", bn_name="batchnorm", exclude=[]):
    conv_lst = []
    def _collect_conv(m):
        if isinstance(m, nn.Conv2D):
            assert not hasattr(m, "gamma"), "Don't merge bn to a conv with fake bn! ({})".format(m.name)
            conv_lst.append(m)
    net.apply(_collect_conv)

    bn_names = [c.name.replace(conv_name, bn_name) for c in conv_lst]
    for conv, bn in zip(conv_lst, bn_names):
        params = net.collect_params(bn + "_")
        if len(params.keys()) != 0 and conv not in exclude:
            print("Merge {} to {}".format(bn, conv.name))
            gamma = params[bn + "_gamma"].data()
            beta = params[bn + "_beta"].data()
            mean = params[bn + "_running_mean"].data()
            var = params[bn + "_running_var"].data()

            weight = conv.weight.data()
            w_shape = conv.weight.shape
            cout = w_shape[0]
            conv.weight.set_data( (weight.reshape(cout, -1) * gamma.reshape(-1, 1) \
                                  / nd.sqrt(var + 1e-10).reshape(-1, 1)).reshape(w_shape) )
            if conv.bias is None:
                conv._kwargs['no_bias'] = False
                conv.bias = conv.params.get('bias',
                                            shape=(cout,), init="zeros",
                                            allow_deferred_init=True)
                conv.bias.initialize()
            bias = conv.bias.data()
            conv.bias.set_data(gamma * (bias - mean) / nd.sqrt(var + 1e-10) + beta) 
Example #18
Source File: train.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def get_distance_matrix(x):
    """Get distance matrix given a matrix. Used in testing."""
    square = nd.sum(x ** 2.0, axis=1, keepdims=True)
    distance_square = square + square.transpose() - (2.0 * nd.dot(x, x.transpose()))
    return nd.sqrt(distance_square) 
Example #19
Source File: utils_final.py    From InsightFace_TF with MIT License 5 votes vote down vote up
def grad_clipping(params, clipping_norm, ctx):
    """Gradient clipping."""
    if clipping_norm is not None:
        norm = nd.array([0.0], ctx)
        for p in params:
            norm += nd.sum(p.grad ** 2)
        norm = nd.sqrt(norm).asscalar()
        if norm > clipping_norm:
            for p in params:
                p.grad[:] *= clipping_norm / norm