Python mxnet.ndarray.exp() Examples

The following are 30 code examples of mxnet.ndarray.exp(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.ndarray , or try the search function .
Example #1
Source File: bdk_demo.py    From training_results_v0.6 with Apache License 2.0 6 votes vote down vote up
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
    if grad is None:
        grad = nd.empty(theta.shape, theta.context)
    theta1 = theta.asnumpy()[0]
    theta2 = theta.asnumpy()[1]
    v1 = sigma1 ** 2
    v2 = sigma2 ** 2
    vx = sigmax ** 2
    denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
        -(X - theta1 - theta2) ** 2 / (2 * vx))
    grad_npy = numpy.zeros(theta.shape)
    grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
                                    + numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
                                    X - theta1 - theta2) / vx) / denominator).sum() \
                  + theta1 / v1
    grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
    X - theta1 - theta2) / vx) / denominator).sum() \
                  + theta2 / v2
    grad[:] = grad_npy
    return grad 
Example #2
Source File: bdk_demo.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
    if grad is None:
        grad = nd.empty(theta.shape, theta.context)
    theta1 = theta.asnumpy()[0]
    theta2 = theta.asnumpy()[1]
    v1 = sigma1 ** 2
    v2 = sigma2 ** 2
    vx = sigmax ** 2
    denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
        -(X - theta1 - theta2) ** 2 / (2 * vx))
    grad_npy = numpy.zeros(theta.shape)
    grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
                                    + numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
                                    X - theta1 - theta2) / vx) / denominator).sum() \
                  + theta1 / v1
    grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
    X - theta1 - theta2) / vx) / denominator).sum() \
                  + theta2 / v2
    grad[:] = grad_npy
    return grad 
Example #3
Source File: bdk_demo.py    From SNIPER-mxnet with Apache License 2.0 6 votes vote down vote up
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
    if grad is None:
        grad = nd.empty(theta.shape, theta.context)
    theta1 = theta.asnumpy()[0]
    theta2 = theta.asnumpy()[1]
    v1 = sigma1 ** 2
    v2 = sigma2 ** 2
    vx = sigmax ** 2
    denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
        -(X - theta1 - theta2) ** 2 / (2 * vx))
    grad_npy = numpy.zeros(theta.shape)
    grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
                                    + numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
                                    X - theta1 - theta2) / vx) / denominator).sum() \
                  + theta1 / v1
    grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
    X - theta1 - theta2) / vx) / denominator).sum() \
                  + theta2 / v2
    grad[:] = grad_npy
    return grad 
Example #4
Source File: bdk_demo.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def backward(self, out_grad, in_data, out_data, in_grad):
        l = in_data[1]
        y = out_data[0]
        dx = in_grad[0]
        dx[:] = (numpy.exp(y) - l).astype('float32') 
Example #5
Source File: lstm_crf.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def _forward_alg(self, feats):
        # Do the forward algorithm to compute the partition function
        alphas = [[-10000.] * self.tagset_size]
        alphas[0][self.tag2idx[START_TAG]] = 0.
        alphas = nd.array(alphas)

        # Iterate through the sentence
        for feat in feats:
            alphas_t = []  # The forward variables at this timestep
            for next_tag in range(self.tagset_size):
                # broadcast the emission score: it is the same regardless of
                # the previous tag
                emit_score = feat[next_tag].reshape((1, -1))
                # the ith entry of trans_score is the score of transitioning to
                # next_tag from i
                trans_score = self.transitions.data()[next_tag].reshape((1, -1))
                # The ith entry of next_tag_var is the value for the
                # edge (i -> next_tag) before we do log-sum-exp
                next_tag_var = alphas + trans_score + emit_score
                # The forward variable for this tag is log-sum-exp of all the
                # scores.
                alphas_t.append(log_sum_exp(next_tag_var))
            alphas = nd.concat(*alphas_t, dim=0).reshape((1, -1))
        terminal_var = alphas + self.transitions.data()[self.tag2idx[STOP_TAG]]
        alpha = log_sum_exp(terminal_var)
        return alpha 
Example #6
Source File: utils.py    From xfer with Apache License 2.0 5 votes vote down vote up
def softplus(x):
    return nd.log(1. + nd.exp(x)) 
Example #7
Source File: utils.py    From xfer with Apache License 2.0 5 votes vote down vote up
def softplus_inv(x):
    return nd.log(nd.exp(x) - 1.) 
Example #8
Source File: utils.py    From xfer with Apache License 2.0 5 votes vote down vote up
def softplus_inv_numpy(x):
    return np.log(np.exp(x) - 1.) 
Example #9
Source File: model.py    From NER_BiLSTM_CRF_Chinese with Apache License 2.0 5 votes vote down vote up
def log_sum_exp(vec):
    max_score = nd.max(vec).asscalar()
    return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score 
Example #10
Source File: bdk_demo.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def forward(self, in_data, out_data):
        x = in_data[0]
        y = out_data[0]
        y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1))).astype('float32')
        y /= y.sum(axis=1).reshape((x.shape[0], 1)) 
Example #11
Source File: bdk_demo.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def forward(self, in_data, out_data):
        x = in_data[0]
        y = out_data[0]
        y[:] = (x - x.max(axis=1, keepdims=True)).astype('float32')
        y -= numpy.log(numpy.exp(y).sum(axis=1, keepdims=True)).astype('float32')
        # y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
        # y /= y.sum(axis=1).reshape((x.shape[0], 1)) 
Example #12
Source File: test_contrib_autograd.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_unary_func():
    x = nd.uniform(shape=(4, 5))
    f_exp         = lambda x: nd.exp(x)
    f_exp_grad    = lambda x: [nd.exp(x)]
    autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
    f_half        = lambda x: x/2
    f_half_grad   = lambda x: [nd.ones(x.shape) * 0.5]
    autograd_assert(x, func=f_half, grad_func=f_half_grad)
    f_square      = lambda x: x**2
    f_square_grad = lambda x: [2*x]
    autograd_assert(x, func=f_square, grad_func=f_square_grad) 
Example #13
Source File: bdk_demo.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision):
    student_mean = student_outputs[0]
    student_var = student_outputs[1]
    grad_mean = nd.exp(-student_var) * (student_mean - teacher_pred)

    grad_var = (1 - nd.exp(-student_var) * (nd.square(student_mean - teacher_pred)
                                            + 1.0 / teacher_noise_precision)) / 2
    return [grad_mean, grad_var] 
Example #14
Source File: lstm_crf.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def log_sum_exp(vec):
    max_score = nd.max(vec).asscalar()
    return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score

# Model 
Example #15
Source File: lstm_crf.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def _forward_alg(self, feats):
        # Do the forward algorithm to compute the partition function
        alphas = [[-10000.] * self.tagset_size]
        alphas[0][self.tag2idx[START_TAG]] = 0.
        alphas = nd.array(alphas)

        # Iterate through the sentence
        for feat in feats:
            alphas_t = []  # The forward variables at this timestep
            for next_tag in range(self.tagset_size):
                # broadcast the emission score: it is the same regardless of
                # the previous tag
                emit_score = feat[next_tag].reshape((1, -1))
                # the ith entry of trans_score is the score of transitioning to
                # next_tag from i
                trans_score = self.transitions[next_tag].reshape((1, -1))
                # The ith entry of next_tag_var is the value for the
                # edge (i -> next_tag) before we do log-sum-exp
                next_tag_var = alphas + trans_score + emit_score
                # The forward variable for this tag is log-sum-exp of all the
                # scores.
                alphas_t.append(log_sum_exp(next_tag_var))
            alphas = nd.concat(*alphas_t, dim=0).reshape((1, -1))
        terminal_var = alphas + self.transitions[self.tag2idx[STOP_TAG]]
        alpha = log_sum_exp(terminal_var)
        return alpha 
Example #16
Source File: test_autograd.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_unary_func():
    def check_unary_func(x):
        f_exp         = lambda x: nd.exp(x)
        f_exp_grad    = lambda x: [nd.exp(x)]
        autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
        f_half        = lambda x: x/2
        f_half_grad   = lambda x: [nd.ones(x.shape) * 0.5]
        autograd_assert(x, func=f_half, grad_func=f_half_grad)
        f_square      = lambda x: x**2
        f_square_grad = lambda x: [2*x]
        autograd_assert(x, func=f_square, grad_func=f_square_grad)
    uniform = nd.uniform(shape=(4, 5))
    stypes = ['default', 'row_sparse', 'csr']
    for stype in stypes:
        check_unary_func(uniform.tostype(stype)) 
Example #17
Source File: test_autograd.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_gradient():
    x = mx.nd.ones((1,))
    x.attach_grad()

    with mx.autograd.record():
        z = mx.nd.elemwise_add(mx.nd.exp(x), x)
    dx, = mx.autograd.grad(z, [x], create_graph=True)
    assert abs(dx.asscalar() - 3.71828175) < 1e-7
    dx.backward()
    assert abs(x.grad.asscalar() - 2.71828175) < 1e-7 
Example #18
Source File: bdk_demo.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def forward(self, in_data, out_data):
        x = in_data[0]
        y = out_data[0]
        y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1))).astype('float32')
        y /= y.sum(axis=1).reshape((x.shape[0], 1)) 
Example #19
Source File: lstm_crf.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def log_sum_exp(vec):
    max_score = nd.max(vec).asscalar()
    return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score

# Model 
Example #20
Source File: bdk_demo.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision):
    student_mean = student_outputs[0]
    student_var = student_outputs[1]
    grad_mean = nd.exp(-student_var) * (student_mean - teacher_pred)

    grad_var = (1 - nd.exp(-student_var) * (nd.square(student_mean - teacher_pred)
                                            + 1.0 / teacher_noise_precision)) / 2
    return [grad_mean, grad_var] 
Example #21
Source File: bdk_demo.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def backward(self, out_grad, in_data, out_data, in_grad):
        l = in_data[1]
        y = out_data[0]
        dx = in_grad[0]
        dx[:] = (numpy.exp(y) - l).astype('float32') 
Example #22
Source File: bdk_demo.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def forward(self, in_data, out_data):
        x = in_data[0]
        y = out_data[0]
        y[:] = (x - x.max(axis=1, keepdims=True)).astype('float32')
        y -= numpy.log(numpy.exp(y).sum(axis=1, keepdims=True)).astype('float32')
        # y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
        # y /= y.sum(axis=1).reshape((x.shape[0], 1)) 
Example #23
Source File: bdk_demo.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def forward(self, in_data, out_data):
        x = in_data[0]
        y = out_data[0]
        y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1))).astype('float32')
        y /= y.sum(axis=1).reshape((x.shape[0], 1)) 
Example #24
Source File: tensor_models.py    From dgl with Apache License 2.0 5 votes vote down vote up
def logsigmoid(val):
    max_elem = nd.maximum(0., -val)
    z = nd.exp(-max_elem) + nd.exp(-val - max_elem)
    return -(max_elem + nd.log(z)) 
Example #25
Source File: tensor.py    From dgl with Apache License 2.0 5 votes vote down vote up
def exp(input):
    return nd.exp(input) 
Example #26
Source File: test_contrib_autograd.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_unary_func():
    x = nd.uniform(shape=(4, 5))
    f_exp         = lambda x: nd.exp(x)
    f_exp_grad    = lambda x: [nd.exp(x)]
    autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
    f_half        = lambda x: x/2
    f_half_grad   = lambda x: [nd.ones(x.shape) * 0.5]
    autograd_assert(x, func=f_half, grad_func=f_half_grad)
    f_square      = lambda x: x**2
    f_square_grad = lambda x: [2*x]
    autograd_assert(x, func=f_square, grad_func=f_square_grad) 
Example #27
Source File: test_autograd.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_gradient():
    x = mx.nd.ones((1,))
    x.attach_grad()

    with mx.autograd.record():
        z = mx.nd.elemwise_add(mx.nd.exp(x), x)
    dx, = mx.autograd.grad(z, [x], create_graph=True)
    assert abs(dx.asscalar() - 3.71828175) < 1e-7
    dx.backward()
    assert abs(x.grad.asscalar() - 2.71828175) < 1e-7 
Example #28
Source File: test_autograd.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_unary_func():
    def check_unary_func(x):
        f_exp         = lambda x: nd.exp(x)
        f_exp_grad    = lambda x: [nd.exp(x)]
        autograd_assert(x, func=f_exp, grad_func=f_exp_grad)
        f_half        = lambda x: x/2
        f_half_grad   = lambda x: [nd.ones(x.shape) * 0.5]
        autograd_assert(x, func=f_half, grad_func=f_half_grad)
        f_square      = lambda x: x**2
        f_square_grad = lambda x: [2*x]
        autograd_assert(x, func=f_square, grad_func=f_square_grad)
    uniform = nd.uniform(shape=(4, 5))
    stypes = ['default', 'row_sparse', 'csr']
    for stype in stypes:
        check_unary_func(uniform.tostype(stype)) 
Example #29
Source File: lstm_crf.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def _forward_alg(self, feats):
        # Do the forward algorithm to compute the partition function
        alphas = [[-10000.] * self.tagset_size]
        alphas[0][self.tag2idx[START_TAG]] = 0.
        alphas = nd.array(alphas)

        # Iterate through the sentence
        for feat in feats:
            alphas_t = []  # The forward variables at this timestep
            for next_tag in range(self.tagset_size):
                # broadcast the emission score: it is the same regardless of
                # the previous tag
                emit_score = feat[next_tag].reshape((1, -1))
                # the ith entry of trans_score is the score of transitioning to
                # next_tag from i
                trans_score = self.transitions.data()[next_tag].reshape((1, -1))
                # The ith entry of next_tag_var is the value for the
                # edge (i -> next_tag) before we do log-sum-exp
                next_tag_var = alphas + trans_score + emit_score
                # The forward variable for this tag is log-sum-exp of all the
                # scores.
                alphas_t.append(log_sum_exp(next_tag_var))
            alphas = nd.concat(*alphas_t, dim=0).reshape((1, -1))
        terminal_var = alphas + self.transitions.data()[self.tag2idx[STOP_TAG]]
        alpha = log_sum_exp(terminal_var)
        return alpha 
Example #30
Source File: lstm_crf.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def log_sum_exp(vec):
    max_score = nd.max(vec).asscalar()
    return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score

# Model