The following are code examples for showing how to use autograd.numpy.exp(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
```def _log_logistic_sigmoid(x_real):
''' Compute log of logistic sigmoid transform from real line to unit interval.

Numerically stable and fully vectorized.

Args
----
x_real : array-like, with values in (-infty, +infty)

Returns
-------
log_p_real : array-like, size of x_real, with values in <= 0
'''
if not isinstance(x_real, float):
out = np.zeros_like(x_real)
return out
return _log_logistic_sigmoid_not_vectorized(x_real) ```
Example 2
```def prior_error(mu_shift, w, n_u):

a = -numpy.abs(w[:, numpy.arange(0, n_u)*3] + mu_shift[1])

b = numpy.abs(w[:, numpy.arange(0, n_u)*3+1] + mu_shift[3])

c = w[:, numpy.arange(0, n_u)*3+2] + mu_shift[5]

q = numpy.linspace(1e-8, 1 - 1e-8, 128)

# q = q.ravel()

q_hat = numpy.mean(1 / (1 + numpy.exp(numpy.log(q)[None, None, :] * a[:, :, None] +
numpy.log(1 - q)[None, None, :] * b[:, :, None] +
c[:, :, None])), axis=0)

return numpy.mean((q - q_hat) ** 2) ```
Example 3
```def e_link_log_lik(w_0, w_1, w_2, q, ln_q, ln_1_q, ln_s):

a = -numpy.exp(w_0).reshape(-1, 1)

b = numpy.exp(w_1).reshape(-1, 1)

c = w_2.reshape(-1, 1)

tmp_sum = a * ln_q + c + b * ln_1_q

tmp_exp = numpy.exp(tmp_sum)

tmp_de = numpy.where(tmp_exp.ravel() <= 1e-16,
2 * numpy.log(1 + tmp_exp.ravel()),
2 * (tmp_sum.ravel() + numpy.log(1 + 1 / tmp_exp.ravel()))).reshape(-1, 1)

ln_s_hat = ln_s + tmp_sum + numpy.log((a + b) * q - a) - ln_q - ln_1_q - tmp_de

return ln_s_hat ```
Example 4
```def expm_eigh(h):
"""
Compute the unitary operator of a hermitian matrix.
U = expm(-1j * h)

Arguments:
h :: ndarray (N X N) - The matrix to exponentiate, which must be hermitian.

Returns:
expm_h :: ndarray(N x N) - The unitary operator of a.
"""
eigvals, p = anp.linalg.eigh(h)
p_dagger = anp.conjugate(anp.swapaxes(p, -1, -2))
d = anp.exp(-1j * eigvals)
return anp.matmul(p *d, p_dagger)

### EXPORT ### ```
Example 5
 Project: momi2   Author: popgenmethods   File: demo_utils.py    GNU General Public License v3.0 6 votes
```def simple_admixture_3pop(x=None):
if x is None:
x = np.random.normal(size=7)
t = np.cumsum(np.exp(x[:5]))
p = 1.0 / (1.0 + np.exp(x[5:]))

model = momi.DemographicModel(1., .25)
model.move_lineages("a", "c", t[1], p=1.-p[1])
model.move_lineages("a", "d", t[0], p=1.-p[0])
model.move_lineages("c", "d", t[2])
model.move_lineages("d", "b", t[3])
model.move_lineages("a", "b", t[4])
return model ```
Example 6
```def construct_z_theano(Xth, Yth, Tth, gwidth_th):
"""Construct the features Z to be used for testing with T^2 statistics.
Z is defined in Eq.14 of Chwialkovski et al., 2015 (NIPS).
Theano version.

Return a n x 2J numpy array. 2J because of sin and cos for each frequency.
"""
Xth = old_div(Xth,gwidth_th)
Yth = old_div(Yth,gwidth_th)
# inverse Fourier transform (upto scaling) of the unit-width Gaussian kernel
fx = tensor.exp(old_div(-(Xth**2).sum(1),2)).reshape((-1, 1))
fy = tensor.exp(old_div(-(Yth**2).sum(1),2)).reshape((-1, 1))
# n x J
x_freq = Xth.dot(Tth.T)
y_freq = Yth.dot(Tth.T)
# zx: n x 2J
zx = tensor.concatenate([tensor.sin(x_freq)*fx, tensor.cos(x_freq)*fx], axis=1)
zy = tensor.concatenate([tensor.sin(y_freq)*fy, tensor.cos(y_freq)*fy], axis=1)
z = zx-zy
return z ```
Example 7
```def pair_eval(self, X, Y):
"""
Evaluate k(x1, y1), k(x2, y2), ...

Parameters
----------
X, Y : n x d numpy array

Return
-------
a numpy array with length n
"""
(n1, d1) = X.shape
(n2, d2) = Y.shape
assert n1==n2, 'Two inputs must have the same number of instances'
assert d1==d2, 'Two inputs must have the same dimension'
D2 = np.sum( (X-Y)**2, 1)
Kvec = np.exp(old_div(-D2,self.sigma2))
return Kvec ```
Example 8
```def _compute_loss(self):
"""Compute and store loss value for the given batch of examples."""
if self._loss_computed:
return
self._compute_distances()

# NLL loss from the NIPS paper.
exp_negative_distances = np.exp(-self.euclidean_dists)  # (1 + neg_size, batch_size)
# Remove the value for the true edge (u,v) from the partition function
Z = exp_negative_distances[1:].sum(axis=0)  # (batch_size)
self.exp_negative_distances = exp_negative_distances  # (1 + neg_size, batch_size)
self.Z = Z # (batch_size)

self.pos_loss = self.euclidean_dists[0].sum()
self.neg_loss = np.log(self.Z).sum()
self.loss = self.pos_loss + self.neg_loss  # scalar

self._loss_computed = True ```
Example 9
```def _nll_loss_fn(poincare_dists):
"""
Parameters
----------
poincare_dists : numpy.array
All distances d(u,v) and d(u,v'), where v' is negative. Shape (1 + negative_size).

Returns
----------
log-likelihood loss function from the NIPS paper, Eq (6).
"""

# Remove the value for the true edge (u,v) from the partition function
# return -grad_np.log(exp_negative_distances[0] / (- exp_negative_distances[0] + exp_negative_distances.sum()))
Example 10
 Project: pymanopt   Author: pymanopt   File: packing_on_the_sphere.py    BSD 3-Clause "New" or "Revised" License 6 votes
```def packing_on_the_sphere(n, k, epsilon):
manifold = Elliptope(n, k)

def cost(X):
Y = np.dot(X, X.T)
# Shift the exponentials by the maximum value to reduce numerical
# trouble due to possible overflows.
s = np.triu(Y, 1).max()
expY = np.exp((Y - s) / epsilon)
# Zero out the diagonal
expY -= np.diag(np.diag(expY))
u = np.triu(expY, 1).sum()
return s + epsilon * np.log(u)

problem = Problem(manifold, cost)
return solver.solve(problem) ```
Example 11
```def _log_logistic_sigmoid_not_vectorized(x_real):
if x_real > 50.0:
return - np.log1p(np.exp(-x_real))
else:
return x_real - np.log1p(np.exp(x_real)) ```
Example 12
```def _vjp__log_logistic_sigmoid(ans, x):
x = np.asarray(x)
return np.full(x.shape, g) * (1 - np.exp(ans))
Example 13
```def _vjp__log_logistic_sigmoid(g, ans, vs, gvs, x):
x = np.asarray(x)
return np.full(x.shape, g) * (1 - np.exp(ans)) ```
Example 14
```def _make_grad_product(ans, x):
x = np.asarray(x)
return np.full(x.shape, g) * (1 - np.exp(ans))
Example 15
```def logistic_sigmoid(x_real):
''' Compute logistic sigmoid transform from real line to unit interval.

Numerically stable and fully vectorized.

Args
----
x_real : array-like, with values in (-infty, +infty)

Returns
-------
p_real : array-like, size of x_real, with values in (0, 1)

Examples
--------
>>> logistic_sigmoid(-55555.)
0.0
>>> logistic_sigmoid(0.0)
0.5
>>> logistic_sigmoid(55555.)
1.0
>>> logistic_sigmoid(np.asarray([-999999, 0, 999999.]))
array([ 0. ,  0.5,  1. ])
'''
if not isinstance(x_real, float):
out = np.zeros_like(x_real)
return out
if x_real > 50.0:
pos_real = np.exp(-x_real)
return 1.0 / (1.0 + pos_real)
else:
pos_real = np.exp(x_real)
return pos_real / (1.0 + pos_real) ```
Example 16
```def _logistic_sigmoid_not_vectorized(x_real):
if x_real > 50.0:
pos_real = np.exp(-x_real)
return 1.0 / (1.0 + pos_real)
else:
pos_real = np.exp(x_real)
return pos_real / (1.0 + pos_real)

# Definite gradient function via manual formula
# Supporting different versions of autograd software ```
Example 17
```def kernel(X, Xp, hyp):
output_scale = np.exp(hyp[0])
lengthscales = np.sqrt(np.exp(hyp[1:]))
X = X/lengthscales
Xp = Xp/lengthscales
X_SumSquare = np.sum(np.square(X),axis=1);
Xp_SumSquare = np.sum(np.square(Xp),axis=1);
mul = np.dot(X,Xp.T);
dists = X_SumSquare[:,np.newaxis]+Xp_SumSquare-2.0*mul
return output_scale * np.exp(-0.5 * dists) ```
Example 18
```def smooth_hinge(s, temp):
return temp * np.log(1 + np.exp((1 - s) / temp)) ```
Example 19
```def test_grad():
p = .05
def fun0(B, Bdims):
return einsum2.einsum2(np.exp(B**2), Bdims, np.transpose(B), Bdims[::-1], [])
def fun1(B, Bdims):
if Bdims: Bdims = list(range(len(Bdims)))
return np.einsum(np.exp(B**2), Bdims,
np.transpose(B), Bdims[::-1], [])
B, Bdims = random_tensor(p)
Example 20
```def sigmoid(x=None):
return 1.0 / (1 + np.exp(-x)) ```
Example 21
```def make_fwd_grad_logsumexp(g, ans, gvs, vs, x, axis=None, b=1.0, keepdims=False):
if not keepdims:
if isinstance(axis, int):
ans = anp.expand_dims(ans, axis)
elif isinstance(axis, tuple):
for ax in sorted(axis):
ans = anp.expand_dims(ans, ax)
return anp.sum(g * b * anp.exp(x - ans), axis=axis, keepdims=keepdims) ```
Example 22
`def test_logsumexp3(): combo_check(autograd.scipy.misc.logsumexp, [0], [R(4)], b = [np.exp(R(4))],         axis=[None, 0],    keepdims=[True, False]) `
Example 23
`def test_logsumexp4(): combo_check(autograd.scipy.misc.logsumexp, [0], [R(3,4),], b = [np.exp(R(3,4))],    axis=[None, 0, 1], keepdims=[True, False]) `
Example 24
`def test_logsumexp5(): combo_check(autograd.scipy.misc.logsumexp, [0], [R(2,3,4)], b = [np.exp(R(2,3,4))], axis=[None, 0, 1], keepdims=[True, False]) `
Example 25
`def test_exp():     unary_ufunc_check(np.exp) `
Example 26
```def link_log_lik(w, q, ln_q, ln_1_q, ln_s):

w = w.reshape(-1, 3)

a = -numpy.exp(w[:, 0]).reshape(-1, 1)

b = numpy.exp(w[:, 1]).reshape(-1, 1)

c = w[:, 2].reshape(-1, 1)

tmp_sum = a * ln_q + c + b * ln_1_q

tmp_exp = numpy.exp(tmp_sum)

tmp_de = numpy.where(tmp_exp.ravel() <= 1e-16,
2 * numpy.log(1 + tmp_exp.ravel()),
2 * (tmp_sum.ravel() + numpy.log(1 + 1 / tmp_exp.ravel()))).reshape(-1, 1)

ln_s_hat = ln_s + tmp_sum + numpy.log((a + b) * q - a) - ln_q - ln_1_q - tmp_de

# L = numpy.sum(ln_s_hat)

# if numpy.isnan(L):
#     import pdb; pdb.set_trace()
#
# if numpy.isinf(L):
#     import pdb; pdb.set_trace()

# print([numpy.mean(ln_s), numpy.mean(ln_s_hat)])

return ln_s_hat ```
Example 27
```def mc_link_lik(w, mu_shift, q, ln_q, ln_1_q, ln_s):

n = numpy.shape(q)[0]

w_a = w[:, numpy.arange(0, n)*3]

w_b = w[:, numpy.arange(0, n)*3+1]

a = -numpy.exp(w_a / mu_shift[0] + mu_shift[1])

b = numpy.exp(w_b / mu_shift[2] + mu_shift[3])

c = w[:, numpy.arange(0, n)*3+2] / mu_shift[4] + mu_shift[5]

tmp_sum = a * ln_q.ravel() + b * ln_1_q.ravel() + c

tmp_de = numpy.where(tmp_sum <= 0,
2 * numpy.log(1 + numpy.exp(tmp_sum)),
2 * (tmp_sum + numpy.log(1 + 1 / (numpy.exp(tmp_sum)))))

ln_s_hat = (tmp_sum + numpy.log((a + b) * q.ravel() - a) - ln_q.ravel() - ln_1_q.ravel() - tmp_de) + ln_s.ravel()

mean_exp = numpy.mean(numpy.exp(ln_s_hat), axis=0)

ln_mean_s_hat = numpy.where(mean_exp > 0, numpy.log(mean_exp), numpy.log(1e-16))

Example 28
```def get_real_coefficients(self, params):
log_a, log_b, log_c, log_P = params
b = anp.exp(log_b)
return (anp.exp(log_a) * (1.0 + b), anp.exp(log_c),) ```
Example 29
```def get_complex_coefficients(self, params):
log_a, log_b, log_c, log_P = params
b = anp.exp(log_b)
return (anp.exp(log_a), 0.0,anp.exp(log_c), 2*anp.pi*anp.exp(-log_P),) ```
Example 30
 Project: fftoptionlib   Author: arraystream   File: moment_generating_funs.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def heston_log_st_mgf(u, t, r, q, S0, V0, theta, k, sigma, rho):
dt = np.sqrt((sigma ** 2) * (u - u ** 2) + (k - rho * sigma * u) ** 2)
beta = k - u * rho * sigma
g = (beta - dt) / (beta + dt)
D_t = (beta - dt) / (sigma ** 2) * ((1 - np.exp(-dt * t)) / (1 - g * np.exp(-dt * t)))
C_t = u * (r - q) * t + k * theta / (sigma ** 2) * (
(beta - dt) * t - 2 * np.log((1 - g * np.exp(-dt * t)) / (1 - g)))
return np.exp(C_t + D_t * V0 + u * np.log(S0)) ```
Example 31
 Project: fftoptionlib   Author: arraystream   File: moment_generating_funs.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def general_ln_st_mgf(u, t, r, q, S0, mgf_xt, *args, **kwargs):
martingale_adjust = -(1 / t) * np.log(mgf_xt(1, t, *args, **kwargs))
normal_term = (np.log(S0) + (r - q + martingale_adjust) * t) * u
ln_st_mgf = np.exp(normal_term) * mgf_xt(u, t, *args, **kwargs)
return ln_st_mgf ```
Example 32
 Project: fftoptionlib   Author: arraystream   File: moment_generating_funs.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def norm_mgf(u, norm_mean, norm_sig):
return np.exp(norm_mean * u + 0.5 * (u ** 2) * (norm_sig ** 2)) ```
Example 33
 Project: fftoptionlib   Author: arraystream   File: moment_generating_funs.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def poisson_mgf(u, t, jump_rate):
return np.exp(t * jump_rate * (np.exp(u) - 1)) ```
Example 34
 Project: fftoptionlib   Author: arraystream   File: moment_generating_funs.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def diffusion_mgf(u, t, sigma):
return np.exp(0.5 * t * (u ** 2) * (sigma ** 2)) ```
Example 35
 Project: fftoptionlib   Author: arraystream   File: moment_generating_funs.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def nig_mgf(u, t, a, b, delta):
sqa = np.sqrt(a ** 2 - (b + u) ** 2)
sqb = np.sqrt(a ** 2 - b ** 2)
return np.exp(-delta * t * (sqa - sqb)) ```
Example 36
 Project: fftoptionlib   Author: arraystream   File: moment_generating_funs.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def cpp_normal_mgf(u, t, jump_rate, norm_m, norm_sig):
return np.exp(t * jump_rate * (norm_mgf(u, norm_m, norm_sig) - 1)) ```
Example 37
 Project: fftoptionlib   Author: arraystream   File: moment_generating_funs.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def cpp_double_exponential_mgf(u, t, jump_rate, exp_pos, exp_neg, prob_pos):
return np.exp(t * jump_rate * (double_exponential_mgf(u, exp_pos, exp_neg, prob_pos) - 1)) ```
Example 38
 Project: fftoptionlib   Author: arraystream   File: moment_generating_funs.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def general_log_moneyness_mgf(u, strike, mgf, **kwargs):
return np.exp(-u * np.log(strike)) * mgf(u, **kwargs) ```
Example 39
```def gauss_kernel(X, D, gamma=1.0):
"""
Compute the 1D Gaussian kernel between all elements of a
NxH matrix and a fixed L-dimensional dictionary, resulting in a NxHxL matrix of kernel
values.
"""
return np.exp(- gamma*np.square(X.reshape(-1, X.shape[1], 1) - D)) ```
Example 40
```def sample_z(self, lam, n_samps=1, eps=None):
""" sample from the variational distribution """
D = self.D
assert len(lam) == 2*D, "bad parameter length"
if eps is None:
eps = np.random.randn(n_samps, D)
z = np.exp(lam[D:]) * eps + lam[None, :D]
return z ```
Example 41
```def lnpoiss(y, lnlam):
""" log likelihood of poisson """
return y*lnlam - np.exp(lnlam) - gammaln(y+1) ```
Example 42
```def normal_lnpdf(x, mean, ln_std):
x = np.atleast_2d(x)
D = x.shape[1]
dcoef = 1.
if ln_std.shape[1] != D:
dcoef = D
qterm = -.5 * np.sum((x - mean)**2 / np.exp(2.*ln_std), axis=1)
coef  = -.5*D * np.log(2.*np.pi) - dcoef * np.sum(ln_std, axis=1)
return qterm + coef ```
Example 43
```def sigmoid(a):
return 1. / (1. + np.exp(-a)) ```
Example 44
```def mvn_diag_logpdf(x, mean, log_std):
D = len(mean)
qterm = -.5 * np.sum((x - mean)**2 / np.exp(2.*log_std), axis=1)
coef  = -.5*D * np.log(2.*np.pi) - np.sum(log_std)
return qterm + coef ```
Example 45
```def unconstrained_to_simplex(rhos):
rhosf = np.concatenate([rhos, [0.]])
pis   = np.exp(rhosf) / np.sum(np.exp(rhosf))
return pis ```
Example 46
```def acc(guesses, targets):
return np.true_divide(np.sum(guesses == targets), len(guesses))

# Returns the predicted outputs based on inputs, training weights, and bias
# exp=True will exponentiate the predicted values, transforming to [0, 1] ```
Example 47
```def platt_probs(A, B, preds):
p =  np.true_divide(1, (1 + np.exp(A*preds + B)))
p = p.reshape(p.shape[0], )
return p

#uses gradient descent to scale the ```
Example 48
 Project: momi2   Author: popgenmethods   File: demo_plotter.py    GNU General Public License v3.0 5 votes
```def step_time(self, nxt_t, add=True):
assert self.curr_t <= nxt_t
self.curr_N = self.curr_N * np.exp(
-self.curr_g * (nxt_t - self.curr_t))
self.curr_t = nxt_t

```def transformed_expi_naive(x):
```def expi(x):