# Python numpy.exp() Examples

The following are code examples for showing how to use numpy.exp(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
```def compute_mode(self):
"""
Pre-compute mode vectors from candidate locations (in spherical
coordinates).
"""
if self.num_loc is None:
raise ValueError('Lookup table appears to be empty. \
Run build_lookup().')
self.mode_vec = np.zeros((self.max_bin,self.M,self.num_loc),
dtype='complex64')
if (self.nfft % 2 == 1):
raise ValueError('Signal length must be even.')
f = 1.0 / self.nfft * np.linspace(0, self.nfft / 2, self.max_bin) \
* 1j * 2 * np.pi
for i in range(self.num_loc):
p_s = self.loc[:, i]
for m in range(self.M):
p_m = self.L[:, m]
if (self.mode == 'near'):
dist = np.linalg.norm(p_m - p_s, axis=1)
if (self.mode == 'far'):
dist = np.dot(p_s, p_m)
# tau = np.round(self.fs*dist/self.c) # discrete - jagged
tau = self.fs * dist / self.c  # "continuous" - smoother
self.mode_vec[:, m, i] = np.exp(f * tau) ```
Example 2
 Project: skylab   Author: coenders   File: ps_model.py    GNU General Public License v3.0 7 votes
```def background(self, ev):
r"""Spatial background distribution.

For IceCube is only declination dependent, in a more general scenario,
it is dependent on zenith and
azimuth, e.g. in ANTARES, KM3NET, or using time dependent information.

Parameters
-----------
ev : structured array
Event array, importand information *sinDec* for this calculation

Returns
--------
P : array-like
spatial background probability for each event to be found
at *sinDec*

"""
return 1. / 2. / np.pi * np.exp(self.bckg_spline(ev["sinDec"])) ```
Example 3
```def inv_digamma(y, eps=1e-8, max_iter=100):
'''Numerical inverse to the digamma function by root finding'''

if y >= -2.22:
xold = np.exp(y) + 0.5
else:
xold = -1 / (y - digamma(1))

for _ in range(max_iter):

xnew = xold - (digamma(xold) - y) / polygamma(1, xold)

if np.abs(xold - xnew) < eps:
break

xold = xnew

return xnew ```
Example 4
```def test_exp_ad_results():
# value defined at all real numbers
# positive numbers
x = AutoDiff(10, 2)
f = ef.exp(x)
assert f.val == np.exp(10)
assert f.der == 2*np.exp(10)
assert f.jacobian == 1*np.exp(10)
y = AutoDiff(-5, 2)
f = ef.exp(y)
assert f.val == np.exp(-5)
assert f.der == 2*np.exp(-5)
assert f.jacobian == 1*np.exp(-5)
z = AutoDiff(0, 2)
f = ef.exp(z)
assert f.val == np.exp(0)
assert f.der == 2*np.exp(0)
assert f.jacobian == 1*np.exp(0) ```
Example 5
```def test_exp_ad_results():
# Realue defined at all real numbers
# positive numbers
x = Dual(10, 2)
f = ef.exp(x)
assert f.Real == np.exp(10)
assert f.Dual == 2*np.exp(10)

y = Dual(-5, 2)
f = ef.exp(y)
assert f.Real == np.exp(-5)
assert f.Dual == 2*np.exp(-5)

z = Dual(0, 2)
f = ef.exp(z)
assert f.Real == np.exp(0)
assert f.Dual == 2*np.exp(0) ```
Example 6
```def compute_final_scores(self, average_loss, nums):
average_loss["total_macro"] /= nums["total_macro"]
average_loss["total_micro"] /= nums["total_micro"]

if nums["negative_micro"]:
average_loss["negative_macro"] /= nums["negative_macro"]
average_loss["negative_micro"] /= nums["negative_micro"]
else:
average_loss["negative_macro"] = 0
average_loss["negative_micro"] = 0

average_loss["macro_diff"] = (average_loss["negative_macro"] -
average_loss["total_macro"])
average_loss["micro_diff"] = (average_loss["negative_micro"] -
average_loss["total_micro"])

average_loss["ppl_macro"] = np.exp(average_loss["total_macro"])
average_loss["ppl_micro"] = np.exp(average_loss["total_micro"])

return average_loss ```
Example 7
```def gen_visibility(alphak, phi_k, pos_mic_x, pos_mic_y):
"""
generate visibility from the Dirac parameter and microphone array layout
:param alphak: Diracs' amplitudes
:param phi_k: azimuths
:param pos_mic_x: a vector that contains microphones' x coordinates
:param pos_mic_y: a vector that contains microphones' y coordinates
:return:
"""
xk, yk = polar2cart(1, phi_k)
num_mic = pos_mic_x.size
visi = np.zeros((num_mic, num_mic), dtype=complex)
for q in xrange(num_mic):
p_x_outer = pos_mic_x[q]
p_y_outer = pos_mic_y[q]
for qp in xrange(num_mic):
p_x_qqp = p_x_outer - pos_mic_x[qp]  # a scalar
p_y_qqp = p_y_outer - pos_mic_y[qp]  # a scalar
visi[qp, q] = np.dot(np.exp(-1j * (xk * p_x_qqp + yk * p_y_qqp)), alphak)
return visi ```
Example 8
```def mtx_freq2visi(M, p_mic_x, p_mic_y):
"""
build the matrix that maps the Fourier series to the visibility
:param M: the Fourier series expansion is limited from -M to M
:param p_mic_x: a vector that constains microphones x coordinates
:param p_mic_y: a vector that constains microphones y coordinates
:return:
"""
num_mic = p_mic_x.size
ms = np.reshape(np.arange(-M, M + 1, step=1), (1, -1), order='F')
G = np.zeros((num_mic * (num_mic - 1), 2 * M + 1), dtype=complex, order='C')
count_G = 0
for q in range(num_mic):
p_x_outer = p_mic_x[q]
p_y_outer = p_mic_y[q]
for qp in range(num_mic):
if not q == qp:
p_x_qqp = p_x_outer - p_mic_x[qp]
p_y_qqp = p_y_outer - p_mic_y[qp]
norm_p_qqp = np.sqrt(p_x_qqp ** 2 + p_y_qqp ** 2)
phi_qqp = np.arctan2(p_y_qqp, p_x_qqp)
G[count_G, :] = (-1j) ** ms * sp.special.jv(ms, norm_p_qqp) * \
np.exp(1j * ms * phi_qqp)
count_G += 1
return G ```
Example 9
```def mtx_updated_G(phi_recon, M, mtx_amp2visi_ri, mtx_fri2visi_ri):
"""
Update the linear transformation matrix that links the FRI sequence to the
visibilities by using the reconstructed Dirac locations.
:param phi_recon: the reconstructed Dirac locations (azimuths)
:param M: the Fourier series expansion is between -M to M
:param p_mic_x: a vector that contains microphones' x-coordinates
:param p_mic_y: a vector that contains microphones' y-coordinates
:param mtx_freq2visi: the linear mapping from Fourier series to visibilities
:return:
"""
L = 2 * M + 1
ms_half = np.reshape(np.arange(-M, 1, step=1), (-1, 1), order='F')
phi_recon = np.reshape(phi_recon, (1, -1), order='F')
mtx_amp2freq = np.exp(-1j * ms_half * phi_recon)  # size: (M + 1) x K
mtx_amp2freq_ri = np.vstack((mtx_amp2freq.real, mtx_amp2freq.imag[:-1, :]))  # size: (2M + 1) x K
mtx_fri2amp_ri = linalg.lstsq(mtx_amp2freq_ri, np.eye(L))[0]
# projection mtx_freq2visi to the null space of mtx_fri2amp
mtx_null_proj = np.eye(L) - np.dot(mtx_fri2amp_ri.T,
linalg.lstsq(mtx_fri2amp_ri.T, np.eye(L))[0])
G_updated = np.dot(mtx_amp2visi_ri, mtx_fri2amp_ri) + \
np.dot(mtx_fri2visi_ri, mtx_null_proj)
return G_updated ```
Example 10
```def predict_on_batch(self, x):
# run feature collection pipeline for the batch
soi = x.astype(str)  # make sure the type is right

for i in range(len(soi)):
if len(soi[i]) < 94:
soi[i] = elongate_intron(soi[i])

parameters_batch = self._construct_features_array(soi)

don_cleavage_time = self.don_model.predict(parameters_batch)
acc_cleavage_time = self.acc_model.predict(parameters_batch)

cleavage_time = {'acc_cleavage_time': np.exp(acc_cleavage_time), 'don_cleavage_time': np.exp(don_cleavage_time)}

return cleavage_time ```
Example 11
```def predict_on_batch(self, x):
# run feature collection pipeline for the batch
soi = x["soi"].astype(str)  # make sure the type is right
self.bp_indexes = x["bp_index"]

for i in range(len(soi)):
if len(soi[i]) < 94:
soi[i] = elongate_intron(soi[i])

parameters_batch = self._construct_features_array(soi)

don_cleavage_time = self.don_model.predict(parameters_batch)
acc_cleavage_time = self.acc_model.predict(parameters_batch)

cleavage_time = {'acc_cleavage_time': np.exp(acc_cleavage_time), 'don_cleavage_time': np.exp(don_cleavage_time)}

return cleavage_time ```
Example 12
 Project: skylab   Author: coenders   File: ps_model.py    GNU General Public License v3.0 6 votes
```def _setup(self, exp):
r"""Set up everything for weight calculation.

"""
# set up weights for background distribution, reset all cached values
self._w_spline_dict = dict()

expvars = [exp[p] for p in self.hist_pars]

self._wB_hist, self._wB_bins = self._hist(expvars)
self._wB_hist = kernel_func(self._wB_hist, self._XX)
self._wB_domain = self._wB_hist > 0

# overwrite bins
self._ndim_bins = self._wB_bins
self._ndim_range = tuple([(wB_i[0], wB_i[-1])
for wB_i in self._wB_bins])

return ```
Example 13
 Project: skylab   Author: coenders   File: data.py    GNU General Public License v3.0 6 votes
```def exp(N=100):
r"""Create uniformly distributed data on sphere. """
g = 3.7

arr = np.empty((N, ), dtype=[("ra", np.float), ("sinDec", np.float),
("sigma", np.float), ("logE", np.float)])

arr["ra"] = np.random.uniform(0., 2.*np.pi, N)
arr["sinDec"] = np.random.uniform(-1., 1., N)

E = np.log10(np.random.pareto(g, size=N) + 1)
arr["sigma"] = np.random.lognormal(mean=np.log((mrs - mrs_min) * np.exp(-np.log(10)*E) + mrs_min),
sigma=log_sig)
arr["logE"] = E + logE_res * np.random.normal(size=N)

return arr ```
Example 14
 Project: skylab   Author: coenders   File: data.py    GNU General Public License v3.0 6 votes
```def MC(N=1000):
r"""Create uniformly distributed MC data on sphere. """
g = 2.

arr = np.empty((N, ), dtype=[("ra", np.float), ("sinDec", np.float),
("sigma", np.float), ("logE", np.float),
("trueRa", np.float), ("trueDec", np.float),
("trueE", np.float), ("ow", np.float)])

# true information

arr["trueRa"] = np.random.uniform(0., 2.*np.pi, N)
arr["trueDec"] = np.arcsin(np.random.uniform(-1., 1., N))
arr["trueE"] = np.random.pareto(g, size=N) + 1
arr["ow"] = arr["trueE"]**(g)
arr["ow"] /= arr["ow"].sum()

eta = np.random.uniform(0., 2.*np.pi, len(arr))
arr["sigma"] = np.random.lognormal(mean=np.log((mrs - mrs_min) * np.exp(-np.log(10)*np.log10(arr["trueE"])) + mrs_min),
sigma=log_sig)
arr["ra"] = arr["trueRa"] + np.cos(eta) * arr["sigma"] / np.cos(arr["trueDec"])
arr["sinDec"] = np.sin(arr["trueDec"] + np.sin(eta) * arr["sigma"])
arr["logE"] = np.log10(arr["trueE"]) + logE_res * np.random.normal(size=len(arr))

return arr ```
Example 15
```def kernel_matrix(svm_model, original_X):

if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'):
K = (svm_model.zeta + svm_model.gamma * np.dot(original_X, original_X.T)) ** svm_model.Q
elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'):
pairwise_dists = squareform(pdist(original_X, 'euclidean'))
K = np.exp(-svm_model.gamma * (pairwise_dists ** 2))

'''
K = np.zeros((svm_model.data_num, svm_model.data_num))

for i in range(svm_model.data_num):
for j in range(svm_model.data_num):
if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'):
K[i, j] = Kernel.polynomial_kernel(svm_model, original_X[i], original_X[j])
elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'):
K[i, j] = Kernel.gaussian_kernel(svm_model, original_X[i], original_X[j])
'''

return K ```
Example 16
```def kernel_matrix_xX(svm_model, original_x, original_X):

if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'):
K = (svm_model.zeta + svm_model.gamma * np.dot(original_x, original_X.T)) ** svm_model.Q
elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'):
K = np.exp(-svm_model.gamma * (cdist(original_X, np.atleast_2d(original_x), 'euclidean').T ** 2)).ravel()

'''
K = np.zeros((svm_model.data_num, svm_model.data_num))

for i in range(svm_model.data_num):
for j in range(svm_model.data_num):
if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'):
K[i, j] = Kernel.polynomial_kernel(svm_model, original_x, original_X[j])
elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'):
K[i, j] = Kernel.gaussian_kernel(svm_model, original_x, original_X[j])
'''

return K ```
Example 17
```def get_new_pop(elite_pop, elite_pop_scores, pop_size):
scores_logits = np.exp(elite_pop_scores - elite_pop_scores.max())
elite_pop_probs = scores_logits / scores_logits.sum()
cand1 = elite_pop[np.random.choice(len(elite_pop), p=elite_pop_probs, size=pop_size)]
cand2 = elite_pop[np.random.choice(len(elite_pop), p=elite_pop_probs, size=pop_size)]
mask = np.random.rand(pop_size, elite_pop.shape[1]) < 0.5
return next_pop ```
Example 18
```def np_softmax(x, t=1):
x = x / t
x = x - np.max(x, axis=-1, keepdims=True)
ex = np.exp(x)
return ex / np.sum(ex, axis=-1, keepdims=True) ```
Example 19
```def scenario_values(cls, returns, neutral, current_vola):
scenarios = neutral * np.exp(current_vola * returns)
return scenarios ```
Example 20
```def compute_scenarios(self, d, n_scenarios=750):
# identify returns
dates = pd.to_datetime(d, unit='ms')
max_date = dates[0].date()
min_date = max_date.replace(year=max_date.year-3)

logging.info('Computing returns between ') #, str(max_date), ' and ', str(min_date))
self.returns_df = self.df[min_date:max_date].ix[-n_scenarios-1:]
neutral, vola = self.returns_df.ix[max_date][['Close', 'Vola']]
scenarios = neutral * np.exp( vola * self.returns_df.ix[:-1].DevolLogReturns )
return scenarios, neutral ```
Example 21
```def exp(x):
''' Compute the exponential of an AutoDiff object and its derivative.

INPUTS
======
x: an AutoDiff object

RETURNS
=======
A new AutoDiff object with calculated value and derivative.

EXAMPLES
========
>>> x = AutoDiff(10, 2)
>>> myAutoDiff = exp(x)
>>> myAutoDiff.val
22026.465794806718
>>> myAutoDiff.der
2*22026.465794806718
>>> myAutoDiff.jacobian
22026.465794806718
'''
try:
new_val = np.exp(x.val)
new_der = np.exp(x.val) * x.der
new_jacobian = np.exp(x.val) * x.jacobian
return AutoDiff(new_val, new_der, x.n, 0, new_jacobian)
except AttributeError:
try:
return Dual(np.exp(x.Real), x.Dual*np.exp(x.Real))
except AttributeError:
try:
return Dual(exp(x.Real), x.Dual*exp(x.Real))
except AttributeError:
# Constant
return_val = np.exp(x)
return return_val

# natural log ```
Example 22
```def logistic(x):
''' Compute logistic function for AutoDiff or Dual object.

INPUTS
======
x: an AutoDiff object or Dual object

RETURNS
=======
A new AutoDiff or Dual object with calculated value and derivative.

'''
try:
f_l = (1/(1+np.exp(-x.val)))
new_val = f_l
new_der = (1 - f_l)*f_l*x.der
new_jacobian = (1 - f_l)*f_l*x.jacobian
return AutoDiff(new_val, new_der, x.n, 0, new_jacobian)
except AttributeError:
try:
f_l = (1/(1 + np.exp(-x.Real)))
return Dual(f_l, (1 - f_l)*f_l*x.Dual)
except AttributeError:
try:
return Dual(logistic(x.Real), (1 - logistic(x.Real))*logistic(x.Real)*x.Dual)
except AttributeError:
# Constant
return_val = (1/(1+np.exp(-x)))
return return_val ```
Example 23
```def test_exp_constant_results():
a = ef.exp(0)
assert a == np.exp(0)
b = ef.exp(5)
assert b == np.exp(5)
c = ef.exp(-10)
assert c == np.exp(-10) ```
Example 24
```def test_exp_types():
with pytest.raises(TypeError):
ef.exp('x')
with pytest.raises(TypeError):
ef.exp("1234")

# ---------------LOG----------------# ```
Example 25
```def test_logistic_constant_results():
a = ef.logistic(5)
assert a == 1/(1+np.exp(-5))
b = ef.logistic(0)
assert b == 1/(1+np.exp(-0)) ```
Example 26
```def test_exp_constant_results():
a = ef.exp(0)
assert a == np.exp(0)
b = ef.exp(5)
assert b == np.exp(5)
c = ef.exp(-10)
assert c == np.exp(-10) ```
Example 27
```def test_exp_types():
with pytest.raises(TypeError):
ef.exp('x')
with pytest.raises(TypeError):
ef.exp("1234")

# ---------------LOG----------------# ```
Example 28
```def test_logistic_ad_results():
# Positive reals
x = Dual(0.5, 2.0)
f = ef.logistic(x)
assert f.Real == np.array([[1/(1+np.exp(-0.5))]])
assert f.Dual == np.array([[2*np.exp(-0.5)/((1+np.exp(-0.5))**2)]]) ```
Example 29
```def make_evaluator(opt, *args):
if opt.exp == "generation":
return AtomicGenerationEvaluator(opt, *args)
else:
return AtomicClassificationEvaluator(opt, *args) ```
Example 30
```def compute_final_scores(self, average_loss, nums):
average_loss["total_macro"] /= nums["total_macro"]
average_loss["total_micro"] /= nums["total_micro"]

average_loss["ppl_macro"] = np.exp(average_loss["total_macro"])
average_loss["ppl_micro"] = np.exp(average_loss["total_micro"])

return average_loss ```
Example 31
```def gaussian_single_peak(wl,alpha,beta,gamma):
"""
helper function to generate absorption data based on
lorentzian parameters
"""
return alpha*np.exp(-(wl-beta)**2/gamma) ```
Example 32
```def gen_sig_at_mic(sigmak2_k, phi_k, pos_mic_x,
pos_mic_y, omega_band, sound_speed,
SNR, Ns=256):
"""
generate complex base-band signal received at microphones
:param sigmak2_k: the variance of the circulant complex Gaussian signal
emitted by the K sources
:param phi_k: source locations (azimuths)
:param pos_mic_x: a vector that contains microphones' x coordinates
:param pos_mic_y: a vector that contains microphones' y coordinates
:param omega_band: mid-band (ANGULAR) frequency [radian/sec]
:param sound_speed: speed of sound
:param SNR: SNR for the received signal at microphones
:param Ns: number of snapshots used to estimate the covariance matrix
:return: y_mic: received (complex) signal at microphones
"""
num_mic = pos_mic_x.size
xk, yk = polar2cart(1, phi_k)  # source locations in cartesian coordinates
xk = np.reshape(xk, (1, -1), order='F')
yk = np.reshape(yk, (1, -1), order='F')
pos_mic_x = np.reshape(pos_mic_x, (-1, 1), order='F')
pos_mic_y = np.reshape(pos_mic_y, (-1, 1), order='F')

t = np.reshape(np.linspace(0, 10 * np.pi, num=Ns), (1, -1), order='F')
K = sigmak2_k.size
sigmak2_k = np.reshape(sigmak2_k, (-1, 1), order='F')

# x_tilde_k size: K x length_of_t
# circular complex Gaussian process
x_tilde_k = np.sqrt(sigmak2_k / 2.) * (np.random.randn(K, Ns) + 1j *
np.random.randn(K, Ns))
y_mic = np.dot(np.exp(-1j * (xk * pos_mic_x + yk * pos_mic_y) / (sound_speed / omega_band)),
x_tilde_k * np.exp(1j * omega_band * t))
signal_energy = linalg.norm(y_mic, 'fro') ** 2
noise_energy = signal_energy / 10 ** (SNR * 0.1)
sigma2_noise = noise_energy / (Ns * num_mic)
noise = np.sqrt(sigma2_noise / 2.) * (np.random.randn(*y_mic.shape) + 1j *
np.random.randn(*y_mic.shape))
y_mic_noisy = y_mic + noise
return y_mic_noisy, y_mic ```
Example 33
```def gen_dirty_img(visi, pos_mic_x, pos_mic_y, omega_band, sound_speed, phi_plt):
"""
Compute the dirty image associated with the given measurements. Here the Fourier transform
that is not measured by the microphone array is taken as zero.
:param visi: the measured visibilites
:param pos_mic_x: a vector contains microphone array locations (x-coordinates)
:param pos_mic_y: a vector contains microphone array locations (y-coordinates)
:param omega_band: mid-band (ANGULAR) frequency [radian/sec]
:param sound_speed: speed of sound
:param phi_plt: plotting grid (azimuth on the circle) to show the dirty image
:return:
"""
img = np.zeros(phi_plt.size, dtype=complex)
x_plt, y_plt = polar2cart(1, phi_plt)
num_mic = pos_mic_x.size

pos_mic_x_normalised = pos_mic_x / (sound_speed / omega_band)
pos_mic_y_normalised = pos_mic_y / (sound_speed / omega_band)

count_visi = 0
for q in xrange(num_mic):
p_x_outer = pos_mic_x_normalised[q]
p_y_outer = pos_mic_y_normalised[q]
for qp in xrange(num_mic):
if not q == qp:
p_x_qqp = p_x_outer - pos_mic_x_normalised[qp]  # a scalar
p_y_qqp = p_y_outer - pos_mic_y_normalised[qp]  # a scalar
# <= the negative sign converts DOA to propagation vector
img += visi[count_visi] * \
np.exp(-1j * (p_x_qqp * x_plt + p_y_qqp * y_plt))
count_visi += 1
return img / (num_mic * (num_mic - 1)) ```
Example 34
```def mtx_updated_G_multiband(phi_recon, M, mtx_amp2visi_ri,
mtx_fri2visi_ri, num_bands):
"""
Update the linear transformation matrix that links the FRI sequence to the
visibilities by using the reconstructed Dirac locations.
:param phi_recon: the reconstructed Dirac locations (azimuths)
:param M: the Fourier series expansion is between -M to M
:param p_mic_x: a vector that contains microphones' x-coordinates
:param p_mic_y: a vector that contains microphones' y-coordinates
:param mtx_freq2visi: the linear mapping from Fourier series to visibilities
:return:
"""
L = 2 * M + 1
ms_half = np.reshape(np.arange(-M, 1, step=1), (-1, 1), order='F')
phi_recon = np.reshape(phi_recon, (1, -1), order='F')
mtx_amp2freq = np.exp(-1j * ms_half * phi_recon)  # size: (M + 1) x K
mtx_amp2freq_ri = np.vstack((mtx_amp2freq.real, mtx_amp2freq.imag[:-1, :]))  # size: (2M + 1) x K
mtx_fri2amp_ri = linalg.lstsq(mtx_amp2freq_ri, np.eye(L))[0]
# projection mtx_freq2visi to the null space of mtx_fri2amp
mtx_null_proj = np.eye(L) - np.dot(mtx_fri2amp_ri.T,
linalg.lstsq(mtx_fri2amp_ri.T, np.eye(L))[0])
G_updated = np.dot(mtx_amp2visi_ri,
linalg.block_diag(*([mtx_fri2amp_ri] * num_bands))
) + \
np.dot(mtx_fri2visi_ri,
linalg.block_diag(*([mtx_null_proj] * num_bands))
)
return G_updated ```
Example 35
```def expit(x):
return 1. / (1. + np.exp(-x)) ```
Example 36
```def _softmax(x):
e_x = np.exp(x - np.max(x))
out = e_x / e_x.sum()
return out ```
Example 37
```def sigmoid_array(x):
return 1 / (1 + np.exp(-x))

#统计预测的准确率 ```
Example 38
```def sigmoid_array(x):
return 1 / (1 + np.exp(-x))

#统计预测的准确率 ```
Example 39
```def sigmoid_array(x):
return 1 / (1 + np.exp(-x)) ```
Example 40
```def bbox_transform_inv(boxes, deltas):
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)

boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights

dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]

pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]

pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h

return pred_boxes ```
Example 41
```def get_A(self):
if self.constant_vm:
return tf.exp(self.A)
else:
A = tf.linalg.band_part(self.A, 0, -1)
A = A + tf.transpose(A, perm=(0, 2, 1))
A = tf.exp(A)
return A ```
Example 42
```def guided_attention(g=0.2):
'''Guided attention. Refer to page 3 on the paper.'''
W = np.zeros((hp.max_N, hp.max_T), dtype=np.float32)
for n_pos in range(W.shape[0]):
for t_pos in range(W.shape[1]):
W[n_pos, t_pos] = 1 - np.exp(-(t_pos / float(hp.max_T) - n_pos / float(hp.max_N)) ** 2 / (2 * g * g))
return W ```
Example 43
```def ensembleVer2(input_folder, output_path):
print('Out:' + output_path)
csv_files = [f for f in os.listdir(input_folder) if f.endswith('.csv')]
model_scores = []
for i, csv in enumerate(csv_files):
if i == 0:
index = df.index
else:
assert index.equals(df.index), "Indices of one or more files do not match!"
model_scores.append(df)
print("Read %d files. Averaging..." % len(model_scores))

# print(model_scores)
concat_scores = pd.concat(model_scores)
concat_scores['is_iceberg'] = concat_scores['is_iceberg'].astype(np.float32)

averaged_scores = concat_scores.groupby(level=0).mean()
assert averaged_scores.shape[0] == len(list(index)), "Something went wrong when concatenating/averaging!"
averaged_scores = averaged_scores.reindex(index)

print(stacked_1.shape)
sub = pd.DataFrame()
sub['id'] = stacked_1['id']

sub['is_iceberg'] = np.exp(np.mean(
[
averaged_scores['is_iceberg'].apply(lambda x: np.log(x))
], axis=0))

print(sub.shape)
sub.to_csv(output_path, index=False, float_format='%.9f')
print("Averaged scores saved to %s" % output_path)

# Convert the np arrays into the correct dimention and type
# Note that BCEloss requires Float in X as well as in y ```
Example 44
 Project: neural-fingerprinting   Author: StephanZheng   File: test_utils_keras.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def test_get_logits(self):
import tensorflow as tf
model = KerasModelWrapper(self.model)
x = tf.placeholder(tf.float32, shape=(None, 100))
preds = model.get_probs(x)
logits = model.get_logits(x)

x_val = np.random.rand(2, 100)
tf.global_variables_initializer().run(session=self.sess)
p_val, logits = self.sess.run([preds, logits], feed_dict={x: x_val})
p_gt = np.exp(logits)/np.sum(np.exp(logits), axis=1, keepdims=True)
self.assertTrue(np.allclose(p_val, p_gt, atol=1e-6)) ```
Example 45
 Project: neural-fingerprinting   Author: StephanZheng   File: test_utils_tf.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def numpy_kl_with_logits(p_logits, q_logits):
def numpy_softmax(logits):
logits -= np.max(logits, axis=1, keepdims=True)
exp_logits = np.exp(logits)
return exp_logits / np.sum(exp_logits, axis=1, keepdims=True)

p = numpy_softmax(p_logits)
log_p = p_logits - np.log(np.sum(np.exp(p_logits), axis=1, keepdims=True))
log_q = q_logits - np.log(np.sum(np.exp(q_logits), axis=1, keepdims=True))
return (p * (log_p - log_q)).sum(axis=1).mean() ```
Example 46
```def _griffin_lim(S):
angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
S_complex = np.abs(S).astype(np.complex)
for i in range(hparams.griffin_lim_iters):
if i > 0:
angles = np.exp(1j * np.angle(_stft(y)))
y = _istft(S_complex * angles)
return y ```
Example 47
```def sigmoid(z):
"""
The sigmoid function, classic neural net activation function
@jit is used to speed up computation
"""
return 1.0 / (1.0 + np.exp(-z)) ```
Example 48
```def ilogit(log_odds):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return 1. / (1. + np.exp(-log_odds)) ```
Example 49
 Project: fip-walkgen   Author: stephane-caron   File: state_estimation.py    GNU General Public License v3.0 5 votes
```def estimate(self, dt, real, cur_est, noise_intensity):
"""
Update an estimation under noise and delays.

Parameters
----------
dt : scalar
Time since last estimation (usually one control cycle).
real : array
Ground-truth coordinates.
cur_est : array
Current estimation.
noise_intensity : scalar
Intensity of noise signal in [m] / [s].

Returns
-------
estimate : array
New estimate.
"""
Delta = cur_est - real
delay = Delta * exp(-dt / self.delay) if self.delay > 1e-4 else 0.
if noise_intensity < 1e-4:
return real + delay
sigma = noise_intensity * dt
noise = random.normal(0., sigma, size=real.shape)
return real + delay + noise ```
Example 50
 Project: fip-walkgen   Author: stephane-caron   File: fip_dynamics.py    GNU General Public License v3.0 5 votes
```def __update_zmp(self, target, dt):
dz = self.zmp_state.p - target
delay = dz * exp(-dt / self.zmp_delay) if self.zmp_delay > 1e-4 else 0.
if self.zmp_noise < 1e-4:
self.zmp_state.set_pos(target + delay)
return
sigma = self.zmp_noise * dt
noise = normal(0., sigma, size=target.shape)
self.zmp_state.set_pos(target + delay + noise) ```