The following are code examples for showing how to use autograd.numpy.sqrt(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
```def stochastic_update_Adam(w,grad_w,mt,vt,lrate,iteration):
beta1 = 0.9;
beta2 = 0.999;
epsilon = 1e-8;

mt_hat = mt/(1.0-beta1**iteration);
vt_hat = vt/(1.0-beta2**iteration);

scal = 1.0/(np.sqrt(vt_hat) + epsilon);

w = w - lrate*mt_hat*scal;

return w,mt,vt ```
Example 2
```def rms_norm(array):
"""
Compute the rms norm of the array.

Arguments:
array :: ndarray (N) - The array to compute the norm of.

Returns:
norm :: float - The rms norm of the array.
"""
square_norm = anp.sum(array * anp.conjugate(array))
size = anp.prod(anp.shape(array))
rms_norm_ = anp.sqrt(square_norm / size)

return rms_norm_

### ISOMORPHISMS ###

# A row vector is np.array([[0, 1, 2]])
# A column vector is np.array([[0], [1], [2]]) ```
Example 3
```def feature_matrix(self, tst_data):
"""
Compute the n x J feature matrix. The test statistic and other relevant
quantities can all be expressed as a function of this matrix. Here, n =
sample size, J = number of test locations.
"""
X, Y = tst_data.xy()
V = self.test_locs
# J = number of test locations
J = V.shape[0]
k = self.k

# n x J feature matrix
g = k.eval(X, V)/np.sqrt(J)
h = k.eval(Y, V)/np.sqrt(J)
Z = g-h
return Z ```
Example 4
```def loss_sat(self, m, s):
D = len(m)

W = self.W if hasattr(self, 'W') else np.eye(D)
z = self.z if hasattr(self, 'z') else np.zeros(D)
m, z = np.atleast_2d(m), np.atleast_2d(z)

sW = np.dot(s, W)
ispW = solve((np.eye(D) + sW).T, W.T).T
L = -exp(-(m - z) @ ispW @ (m - z).T / 2) / sqrt(det(np.eye(D) + sW))

i2spW = solve((np.eye(D) + 2 * sW).T, W.T).T
r2 = exp(-(m - z) @ i2spW @ (m - z).T) / sqrt(det(np.eye(D) + 2 * sW))
S = r2 - L**2

t = np.dot(W, z.T) - ispW @ (np.dot(sW, z.T) + m.T)
C = L * t

return L + 1, S, C ```
Example 5
```def create_pf():
ps = np.linspace(-1/np.sqrt(2),1/np.sqrt(2))
pf = []

for x1 in ps:
#generate solutions on the Pareto front:
x = np.array([x1,x1])

f, f_dx = concave_fun_eval(x)
pf.append(f)

pf = np.array(pf)

return pf

### optimization method ### ```
Example 6
```def gaussbern_rbm_tuple(var, dx=50, dh=10, n=sample_size):
"""
Get a tuple of Gaussian-Bernoulli RBM problems.
We follow the parameter settings as described in section 6 of Liu et al.,
2016.

- var: Gaussian noise variance for perturbing B.
- dx: observed dimension
- dh: latent dimension

Return p, a DataSource
"""
with util.NumpySeedContext(seed=1000):
B = np.random.randint(0, 2, (dx, dh))*2 - 1.0
b = np.random.randn(dx)
c = np.random.randn(dh)
p = density.GaussBernRBM(B, b, c)

B_perturb = B + np.random.randn(dx, dh)*np.sqrt(var)
gb_rbm = data.DSGaussBernRBM(B_perturb, b, c, burnin=50)

return p, gb_rbm ```
Example 7
```def gbrbm_perturb(var_perturb_B, dx=50, dh=10):
"""
Get a Gaussian-Bernoulli RBM problem where the first entry of the B matrix
(the matrix linking the latent and the observation) is perturbed.

- var_perturb_B: Gaussian noise variance for perturbing B.
- dx: observed dimension
- dh: latent dimension

Return p (density), data source
"""
with util.NumpySeedContext(seed=10):
B = np.random.randint(0, 2, (dx, dh))*2 - 1.0
b = np.random.randn(dx)
c = np.random.randn(dh)
p = density.GaussBernRBM(B, b, c)

B_perturb = np.copy(B)
if var_perturb_B > 1e-7:
B_perturb[0, 0] = B_perturb[0, 0] + \
np.random.randn(1)*np.sqrt(var_perturb_B)
ds = data.DSGaussBernRBM(B_perturb, b, c, burnin=2000)

return p, ds ```
Example 8
```def multivariate_normal_density(mean, cov, X):
"""
Exact density (not log density) of a multivariate Gaussian.
mean: length-d array
cov: a dxd covariance matrix
X: n x d 2d-array
"""

evals, evecs = np.linalg.eigh(cov)
cov_half_inv = evecs.dot(np.diag(evals**(-0.5))).dot(evecs.T)
#     print(evals)
half_evals = np.dot(X-mean, cov_half_inv)
full_evals = np.sum(half_evals**2, 1)
unden = np.exp(-0.5*full_evals)

Z = np.sqrt(np.linalg.det(2.0*np.pi*cov))
den = unden/Z
assert len(den) == X.shape[0]
return den ```
Example 9
```def perform_test(self, dat):
"""
dat: a instance of Data
"""
with util.ContextTimer() as t:
alpha = self.alpha
X = dat.data()
n = X.shape[0]

# H: length-n vector
_, H = self.compute_stat(dat, return_pointwise_stats=True)
test_stat = np.sqrt(old_div(n,2))*np.mean(H)
stat_var = np.mean(H**2)
pvalue = stats.norm.sf(test_stat, loc=0, scale=np.sqrt(stat_var) )

results = {'alpha': self.alpha, 'pvalue': pvalue, 'test_stat': test_stat,
'h0_rejected': pvalue < alpha, 'time_secs': t.secs,
}
return results ```
Example 10
```def _get_sym_matrix_inv_sqrt_funcs(mat, ev_min=None, ev_max=None):
"""
Get the inverse square root of a symmetric matrix with thresholds for the
eigenvalues.

This is useful for calculating preconditioners.
"""
mat = np.atleast_2d(mat)

# Symmetrize for numerical stability.
mat_sym = 0.5 * (mat + mat.T)
eig_val, eig_vec = np.linalg.eigh(mat_sym)

eig_val_trunc = truncate_eigenvalues(eig_val, ev_min=ev_min, ev_max=ev_max)

mult_mat_sqrt = \
transform_eigenspace(eig_vec, eig_val_trunc, np.sqrt)

mult_mat_inv_sqrt = \
transform_eigenspace(eig_vec, eig_val_trunc, lambda x: 1. / np.sqrt(x))

return mult_mat_sqrt, mult_mat_inv_sqrt ```
Example 11
```def kernel(X, Xp, hyp):
output_scale = np.exp(hyp[0])
lengthscales = np.sqrt(np.exp(hyp[1:]))
X = X/lengthscales
Xp = Xp/lengthscales
X_SumSquare = np.sum(np.square(X),axis=1);
Xp_SumSquare = np.sum(np.square(Xp),axis=1);
mul = np.dot(X,Xp.T);
dists = X_SumSquare[:,np.newaxis]+Xp_SumSquare-2.0*mul
return output_scale * np.exp(-0.5 * dists) ```
Example 12
```def fit(self, X, y):
# def loss function
def calc_linear_loss(W):
y_pred = np.dot(XMat, W)
return np.sqrt((np.power(yMat - y_pred, 2))).mean()

verbose = self.verbose
print_step = self.print_step
max_iters = self.max_iters

XMat = np.array(X)
yMat = np.array(y)

if XMat.shape[0] != yMat.shape[0]:
yMat = yMat.T
assert XMat.shape[0] == yMat.shape[0]

n_samples, n_features = X.shape
n_outdim = y.shape[1]
XMat = np.hstack([XMat, np.ones((n_samples, 1))])

self.W = np.random.randn(n_features+1, n_outdim)
for it in range(max_iters+1):
loss = calc_linear_loss(self.W)

# update params

if verbose and it % print_step == 0:
print('iteration %d / %d: loss %f' % (it, max_iters, loss)) ```
Example 13
```def fit(self, X, y):
# def loss function
def calc_linear_loss(W):
y_pred = np.dot(XMat, W)
return np.sqrt((np.power(yMat - y_pred, 2))).mean() \
+ np.sum(self.alpha * W[0:-1] * W[0:-1])

verbose = self.verbose
print_step = self.print_step
max_iters = self.max_iters

XMat = np.array(X)
yMat = np.array(y)

if XMat.shape[0] != yMat.shape[0]:
yMat = yMat.T
assert XMat.shape[0] == yMat.shape[0]

n_samples, n_features = X.shape
n_outdim = y.shape[1]
XMat = np.hstack([XMat, np.ones((n_samples, 1))])

self.W = np.random.randn(n_features+1, n_outdim)
for it in range(max_iters+1):
loss = calc_linear_loss(self.W)

# update params

if verbose and it % print_step == 0:
print('iteration %d / %d: loss %f' % (it, max_iters, loss)) ```
Example 14
```def fit(self, X, y):
# def loss function
def calc_linear_loss(W):
y_pred = np.dot(XMat, W)
return np.sqrt((np.power(yMat - y_pred, 2))).mean() \
+ np.sum(self.alpha * np.abs(W[0:-1]))

verbose = self.verbose
print_step = self.print_step
max_iters = self.max_iters

XMat = np.array(X)
yMat = np.array(y)

if XMat.shape[0] != yMat.shape[0]:
yMat = yMat.T
assert XMat.shape[0] == yMat.shape[0]

n_samples, n_features = X.shape
n_outdim = y.shape[1]
XMat = np.hstack([XMat, np.ones((n_samples, 1))])

self.W = np.random.randn(n_features + 1, n_outdim)
for it in range(max_iters+1):
loss = calc_linear_loss(self.W)

# update params

if verbose and it % print_step == 0:
print('iteration %d / %d: loss %f' % (it, max_iters, loss)) ```
Example 15
`def test_sqrt():    unary_ufunc_check(np.sqrt, lims=[1.0, 3.0]) `
Example 16
 Project: fftoptionlib   Author: arraystream   File: moment_generating_funs.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def heston_log_st_mgf(u, t, r, q, S0, V0, theta, k, sigma, rho):
dt = np.sqrt((sigma ** 2) * (u - u ** 2) + (k - rho * sigma * u) ** 2)
beta = k - u * rho * sigma
g = (beta - dt) / (beta + dt)
D_t = (beta - dt) / (sigma ** 2) * ((1 - np.exp(-dt * t)) / (1 - g * np.exp(-dt * t)))
C_t = u * (r - q) * t + k * theta / (sigma ** 2) * (
(beta - dt) * t - 2 * np.log((1 - g * np.exp(-dt * t)) / (1 - g)))
return np.exp(C_t + D_t * V0 + u * np.log(S0)) ```
Example 17
 Project: fftoptionlib   Author: arraystream   File: moment_generating_funs.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def nig_mgf(u, t, a, b, delta):
sqa = np.sqrt(a ** 2 - (b + u) ** 2)
sqb = np.sqrt(a ** 2 - b ** 2)
return np.exp(-delta * t * (sqa - sqb)) ```
Example 18
 Project: fftoptionlib   Author: arraystream   File: moment_generating_funs.py    BSD 3-Clause "New" or "Revised" License 5 votes
```def parameter_to_cgm(theta, v, sigma):
c = 1.0 / v
a = np.sqrt(0.25 * (theta ** 2) * (v ** 2) + 0.5 * (sigma ** 2) * v)
b = 0.5 * theta * v
g = 1 / (a - b)
m = 1 / (a + b)
return c, g, m ```
Example 19
```def update(self, param, noisy_gradient, step_size):
mhat, vhat = \
vhat += mhat**2
dparam = mhat / (np.sqrt(vhat) + self.eps)
self.ave_step_sizes.append(np.mean(1./(np.sqrt(vhat) + self.eps)))
return param - step_size * dparam, (mhat, vhat) ```
Example 20
```def default_callback(self, th, t, g):
if t % 20 == 0:
if self.fun is not None:
fval = self.fun(th, t)
print "iter %d: val = %2.4f, gmag = %2.4f" % \
(t, fval, np.sqrt(np.dot(g,g)))
else:
print "iter %d: gmag = %2.4f"%(t, np.sqrt(np.dot(g,g))) ```
Example 21
```def plot_gradients(self, dims=[1, 10, 50]):
import matplotlib.pyplot as plt
import seaborn as sns; sns.set_style("white")
fig, axarr = plt.subplots(len(dims), 1, figsize=(12, 3*len(dims)))
for d, ax in zip(dims, axarr.flatten()):
alpha = .25)
#ax.set_ylim(yrange)
ax.set_xlim((tgrid[0], tgrid[-1]))
ax.legend()
print "sample average deviation: ", \
return fig, axarr ```
Example 22
```def default_callback(self, th, t, g):
if t % 20 == 0:
if self.fun is not None:
fval = self.fun(th, t)
print "iter %d: val = %2.4f, gmag = %2.4f" % \
(t, fval, np.sqrt(np.dot(g,g)))
else:
print "iter %d: gmag = %2.4f"%(t, np.sqrt(np.dot(g,g))) ```
Example 23
 Project: momi2   Author: popgenmethods   File: confidence_region.py    GNU General Public License v3.0 5 votes
```def wald_intervals(self, lower=.025, upper=.975):
"""
Marginal wald-type confidence intervals.
"""
conf_lower, conf_upper = scipy.stats.norm.interval(.95,
loc=self.point,
scale=np.sqrt(np.diag(self.godambe(inverse=True))))
return np.array([conf_lower, conf_upper]).T ```
Example 24
 Project: momi2   Author: popgenmethods   File: sfs_stats.py    GNU General Public License v3.0 5 votes
```def sd(self):
"""
Standard deviation of the statistic, estimated via jackknife
"""
resids = self.jackknifed_array - self.observed
return np.sqrt(np.mean(resids**2) * (
len(self.jackknifed_array) - 1)) ```
Example 25
 Project: momi2   Author: popgenmethods   File: sfs_stats.py    GNU General Public License v3.0 5 votes
```def sd(self):
return np.sqrt(self.var) ```
Example 26
```def dist_matrix(X, Y):
"""
Construct a pairwise Euclidean distance matrix of size X.shape[0] x Y.shape[0]
"""
sx = np.sum(X**2, 1)
sy = np.sum(Y**2, 1)
D2 =  sx[:, np.newaxis] - 2.0*np.dot(X, Y.T) + sy[np.newaxis, :]
# to prevent numerical errors from taking sqrt of negative numbers
D2[D2 < 0] = 0
D = np.sqrt(D2)
return D ```
Example 27
```def power_criterion(tst_data, test_locs, k, reg=1e-2, use_unbiased=True):
"""
Compute the mean and standard deviation of the statistic under H1.
Return power criterion = mean_under_H1/sqrt(var_under_H1 + reg) .
"""
ume = UMETest(test_locs, k)
Z = ume.feature_matrix(tst_data)
u_mean, u_variance = UMETest.ustat_h1_mean_variance(Z,
return_variance=True, use_unbiased=use_unbiased)

# mean/sd criterion
sigma_h1 = np.sqrt(u_variance + reg)
ratio = old_div(u_mean, sigma_h1)
return ratio ```
Example 28
```def gen_blobs(stretch, angle, blob_distance, num_blobs, num_samples):
"""Generate 2d blobs dataset """

# rotation matrix
r = np.array( [[math.cos(angle), -math.sin(angle)], [math.sin(angle), math.cos(angle)]] )
eigenvalues = np.diag(np.array([np.sqrt(stretch), 1]))
mod_matix = np.dot(r, eigenvalues)
mean = old_div(float(blob_distance * (num_blobs-1)), 2)
mu = np.random.randint(0, num_blobs,(num_samples, 2))*blob_distance - mean
return np.random.randn(num_samples,2).dot(mod_matix) + mu ```
Example 29
```def sample(self, n, seed):
rstate = np.random.get_state()
np.random.seed(seed)

d = self.d
std_y = np.diag(np.hstack((np.sqrt(2.0), np.ones(d-1) )))
X = np.random.randn(n, d)
Y = np.random.randn(n, d).dot(std_y)
np.random.set_state(rstate)
return TSTData(X, Y, label='gvd') ```
Example 30
```def _calc_pareto_front(self, ref_dirs, *args, **kwargs):

F = self.dtlz.pareto_front(ref_dirs, *args, **kwargs)

a = np.sqrt(np.sum(F ** 2, 1) - 3 / 4 * np.max(F ** 2, axis=1))
a = np.expand_dims(a, axis=1)
a = np.tile(a, [1, ref_dirs.shape[1]])
F = F/a
# F = F / np.tile((np.sqrt(np.sum(F**2, 2) - 3 / 4 * np.max(F**2, axis=1))), [1, ref_dirs.shape[0]])
return F ```
Example 31
```def constraint_c2(f, r):
n_obj = f.shape[1]

v1 = np.inf*np.ones(f.shape[0])

for i in range(n_obj):
temp = (f[:, i] - 1)**2 + (np.sum(f**2, axis=1)-f[:, i]**2) - r**2
v1 = np.minimum(temp.flatten(), v1)

a = 1/np.sqrt(n_obj)
v2 = np.sum((f-a)**2, axis=1)-r**2
g = np.minimum(v1, v2.flatten())

return g ```
Example 32
```def _compute_scales(self):
return np.sqrt(ll_m2_exact_diag(self.mu0, self.Sig0, self.Siginv, self.x)) ```
Example 33
```def _reverse_kl_grad(self, w, normalize):
g = grad(lambda w : weighted_post_KL(self.mu0, self.Sig0inv, self.Siginv, self.x, w, reverse=True))
if normalize:
muw, Sigw = weighted_post(self.mu0, self.Sig0inv, self.Siginv, self.x, w)
return g(w)/np.sqrt(ll_m2_exact_diag(muw, Sigw, self.Siginv, self.x))
else:
return g(w) ```
Example 34
```def __init__(self, x, mu0, Sig0, Sig, reverse=True):
self.x = x
self.mu0 = mu0
self.Sig0 = Sig0
self.Sig0inv = np.linalg.inv(Sig0)
self.Sig = Sig
self.Siginv = np.linalg.inv(Sig)
super().__init__(N=x.shape[0], potentials=None, sampler=None, n_samples=None, reverse=reverse)

#def _compute_scales(self):
#  return np.sqrt(ll_m2_exact_diag(self.mu0, self.Sig0, self.Siginv, self.x)) ```
Example 35
```def _reverse_kl_grad(self, w, normalize):
g = grad(lambda w : weighted_post_KL(self.mu0, self.Sig0inv, self.Siginv, self.x, w, reverse=True))
if normalize:
muw, Sigw = weighted_post(self.mu0, self.Sig0inv, self.Siginv, self.x, w)
return g(w)/np.sqrt(ll_m2_exact_diag(muw, Sigw, self.Siginv, self.x))
else:
return g(w) ```
Example 36
```def _compute_loss(self):
"""Compute and store loss value for the given batch of examples."""
if self._loss_computed:
return
self._loss_computed = True

self.euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1)  # (1 + neg_size, batch_size)
self.dot_prods = (self.vectors_u * self.vectors_v).sum(axis=1) # (1 + neg, batch_size)

self.g = 1 + self.norms_v_sq * self.norms_u_sq - 2 * self.dot_prods
self.g_sqrt = np.sqrt(self.g)

self.euclidean_times_sqrt_g = self.euclidean_dists * self.g_sqrt

if not self.rels_reversed:
# u is x , v is y
# (1 + neg_size, batch_size)
child_numerator = self.dot_prods * (1 + self.norms_u_sq) - self.norms_u_sq * (1 + self.norms_v_sq)
self.child_numitor = self.euclidean_times_sqrt_g * self.norms_u
self.angles_psi_parent = np.arcsin(self.K * self.one_minus_norms_sq_u / self.norms_u) # (1, batch_size)

else:
# v is x , u is y
# (1 + neg_size, batch_size)
child_numerator = self.dot_prods * (1 + self.norms_v_sq) - self.norms_v_sq * (1 + self.norms_u_sq)
self.child_numitor = self.euclidean_times_sqrt_g * self.norms_v
self.angles_psi_parent = np.arcsin(self.K * self.one_minus_norms_sq_v / self.norms_v) # (1, batch_size)

self.cos_angles_child = child_numerator / self.child_numitor
# To avoid numerical errors
self.clipped_cos_angle_child = np.maximum(self.cos_angles_child, -1 + EPS)
self.clipped_cos_angle_child = np.minimum(self.clipped_cos_angle_child, 1 - EPS)
self.angles_child = np.arccos(self.clipped_cos_angle_child)  # (1 + neg_size, batch_size)

self.angle_diff = self.angles_child - self.angles_psi_parent
self.energy_vec = np.maximum(0, self.angle_diff) # (1 + neg_size, batch_size)
self.pos_loss = self.energy_vec[0].sum()
self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
self.loss = self.pos_loss + self.neg_loss ```
Example 37
```def is_a_scores_vector_batch(self, K, parent_vectors, other_vectors, rel_reversed):
norm_parent = np.linalg.norm(parent_vectors, axis=1)
norm_parent_sq = norm_parent ** 2
norms_other = np.linalg.norm(other_vectors, axis=1)
norms_other_sq = norms_other ** 2
euclidean_dists = np.maximum(np.linalg.norm(parent_vectors - other_vectors, axis=1), 1e-6) # To avoid the fact that parent can be equal to child for the reconstruction experiment
dot_prods = (parent_vectors * other_vectors).sum(axis=1)
g = 1 + norm_parent_sq * norms_other_sq - 2 * dot_prods
g_sqrt = np.sqrt(g)

if not rel_reversed:
# parent = x , other = y
child_numerator = dot_prods * (1 + norm_parent_sq) - norm_parent_sq * (1 + norms_other_sq)
child_numitor = euclidean_dists * norm_parent * g_sqrt
angles_psi_parent = np.arcsin(K * (1 - norm_parent_sq) / norm_parent)
else:
# parent = y , other = x
child_numerator = dot_prods * (1 + norms_other_sq) - norms_other_sq * (1 + norm_parent_sq)
child_numitor = euclidean_dists * norms_other * g_sqrt
angles_psi_parent = np.arcsin(K * (1 - norms_other_sq) / norms_other)

cos_angles_child = child_numerator / child_numitor
assert not np.isnan(cos_angles_child).any()
clipped_cos_angle_child = np.maximum(cos_angles_child, -1 + EPS)
clipped_cos_angle_child = np.minimum(clipped_cos_angle_child, 1 - EPS)
angles_child = np.arccos(clipped_cos_angle_child)  # (1 + neg_size, batch_size)

# return angles_child # np.maximum(1, angles_child / angles_psi_parent)
return np.maximum(0, angles_child - angles_psi_parent) ```
Example 38
```def __init__(self):
super(SphericalDisk, self).__init__()

# def is_a_scores_vector_batch(self, K, parent_vectors, other_vectors, rel_reversed):
# norm_parent = np.linalg.norm(parent_vectors, axis=1)
# norm_parent_sq = norm_parent ** 2
# norms_other = np.linalg.norm(other_vectors, axis=1)
# norms_other_sq = norms_other ** 2
# euclidean_dists = np.maximum(np.linalg.norm(parent_vectors - other_vectors, axis=1), 1e-6) # To avoid the fact that parent can be equal to child for the reconstruction experiment
# dot_prods = (parent_vectors * other_vectors).sum(axis=1)
# g = 1 + norm_parent_sq * norms_other_sq - 2 * dot_prods
# g_sqrt = np.sqrt(g)

# if not rel_reversed:
# # parent = x , other = y
# child_numerator = dot_prods * (1 + norm_parent_sq) - norm_parent_sq * (1 + norms_other_sq)
# child_numitor = euclidean_dists * norm_parent * g_sqrt
# angles_psi_parent = np.arcsin(K * (1 - norm_parent_sq) / norm_parent)
# else:
# # parent = y , other = x
# child_numerator = dot_prods * (1 + norms_other_sq) - norms_other_sq * (1 + norm_parent_sq)
# child_numitor = euclidean_dists * norms_other * g_sqrt
# angles_psi_parent = np.arcsin(K * (1 - norms_other_sq) / norms_other)

# cos_angles_child = child_numerator / child_numitor
# assert not np.isnan(cos_angles_child).any()
# clipped_cos_angle_child = np.maximum(cos_angles_child, -1 + EPS)
# clipped_cos_angle_child = np.minimum(clipped_cos_angle_child, 1 - EPS)
# angles_child = np.arccos(clipped_cos_angle_child)  # (1 + neg_size, batch_size)

# # return angles_child # np.maximum(1, angles_child / angles_psi_parent)
# return np.maximum(0, angles_child - angles_psi_parent) ```
Example 39
```def multiquadratic_kernel(x):
bs = x.shape[0]
K = np.zeros((bs,bs))

for i in range(bs):
dif = x[i,:] - x
K[i,:] = np.sum(dif*dif, axis=1)

K = np.sqrt(K + 1)
return -K ```
Example 40
```def centered_normalized_rbk_sklearn(X, σ, H):
Kx = rbk_sklearn(X, σ)
Dinv = 1.0/np.sqrt(Kx.sum(axis=1))
Dv = np.outer(Dinv,Dinv)
if H is None:
return Dv*Kx
else:
return H.dot(Dv*Kx).dot(H) ```
Example 41
```def compute_inverted_Degree_matrix(M):
return np.diag(1.0/np.sqrt(M.sum(axis=1))) ```
Example 42
```def relative_σ(X):
n = X.shape[0]
if n < 50: num_of_samples = n
else: num_of_samples = 50

unique_X = np.unique(X, axis=0)
neigh = NearestNeighbors(num_of_samples)

neigh.fit(unique_X)

[dis, idx] = neigh.kneighbors(X, num_of_samples, return_distance=True)
dis_inv = 1/dis[:,1:]
idx = idx[:,1:]

total_dis = np.sum(dis_inv, axis=1)
total_dis = np.reshape(total_dis,(n, 1))
total_dis = np.matlib.repmat(total_dis, 1, num_of_samples-1)
dis_ratios = dis_inv/total_dis

result_store_dictionary = {}
σ_list = np.zeros((n,1))

for i in range(n):
if str(X[i,:]) in result_store_dictionary:
σ = result_store_dictionary[str(X[i,:])]
σ_list[i] = σ
continue

dr = dis_ratios[i,:]

Δ = unique_X[idx[i,:],:] - X[i,:]
Δ2 = Δ*Δ
d = np.sum(Δ2,axis=1)
σ = np.sqrt(np.sum(dr*d))
σ_list[i] = σ#*10

result_store_dictionary[str(X[i,:])] = σ

#return σ_list.dot(σ_list.T)
return σ_list ```
Example 43
```def sim_prop(self, t, Xp, y, prop_params, model_params, rs = npr.RandomState(0)):
mu0, Sigma0, A, Q, C, R = model_params
mut, lint, log_s2t = prop_params[t]
s2t = np.exp(log_s2t)

if t > 0:
mu = mut + np.dot(A, Xp.T).T*lint
else:
mu = mut + lint*mu0
return mu + rs.randn(*Xp.shape)*np.sqrt(s2t) ```
Example 44
```def _compute_loss(self):
"""Compute and store loss value for the given batch of examples."""
if self._loss_computed:
return
self._loss_computed = True

self.euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1)  # (1 + neg_size, batch_size)
self.dot_prods = (self.vectors_u * self.vectors_v).sum(axis=1) # (1 + neg, batch_size)

self.g = 1 + self.norms_v_sq * self.norms_u_sq - 2 * self.dot_prods
self.g_sqrt = np.sqrt(self.g)

self.euclidean_times_sqrt_g = self.euclidean_dists * self.g_sqrt

if not self.rels_reversed:
# u is x , v is y
# (1 + neg_size, batch_size)
child_numerator = self.dot_prods * (1 + self.norms_u_sq) - self.norms_u_sq * (1 + self.norms_v_sq)
self.child_numitor = self.euclidean_times_sqrt_g * self.norms_u
self.angles_psi_parent = np.arcsin(self.K * self.one_minus_norms_sq_u / self.norms_u) # (1, batch_size)

else:
# v is x , u is y
# (1 + neg_size, batch_size)
child_numerator = self.dot_prods * (1 + self.norms_v_sq) - self.norms_v_sq * (1 + self.norms_u_sq)
self.child_numitor = self.euclidean_times_sqrt_g * self.norms_v
self.angles_psi_parent = np.arcsin(self.K * self.one_minus_norms_sq_v / self.norms_v) # (1, batch_size)

self.cos_angles_child = child_numerator / self.child_numitor
# To avoid numerical errors
self.clipped_cos_angle_child = np.maximum(self.cos_angles_child, -1 + EPS)
self.clipped_cos_angle_child = np.minimum(self.clipped_cos_angle_child, 1 - EPS)
self.angles_child = np.arccos(self.clipped_cos_angle_child)  # (1 + neg_size, batch_size)

self.angle_diff = self.angles_child - self.angles_psi_parent
self.energy_vec = np.maximum(0, self.angle_diff) # (1 + neg_size, batch_size)
self.pos_loss = self.energy_vec[0].sum()
self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
self.loss = self.pos_loss + self.neg_loss ```
Example 45
```def is_a_scores_vector_batch(self, K, parent_vectors, other_vectors, rel_reversed):
norm_parent = np.linalg.norm(parent_vectors, axis=1)
norm_parent_sq = norm_parent ** 2
norms_other = np.linalg.norm(other_vectors, axis=1)
norms_other_sq = norms_other ** 2
euclidean_dists = np.maximum(np.linalg.norm(parent_vectors - other_vectors, axis=1), 1e-6) # To avoid the fact that parent can be equal to child for the reconstruction experiment
dot_prods = (parent_vectors * other_vectors).sum(axis=1)
g = 1 + norm_parent_sq * norms_other_sq - 2 * dot_prods
g_sqrt = np.sqrt(g)

if not rel_reversed:
# parent = x , other = y
child_numerator = dot_prods * (1 + norm_parent_sq) - norm_parent_sq * (1 + norms_other_sq)
child_numitor = euclidean_dists * norm_parent * g_sqrt
angles_psi_parent = np.arcsin(K * (1 - norm_parent_sq) / norm_parent)
else:
# parent = y , other = x
child_numerator = dot_prods * (1 + norms_other_sq) - norms_other_sq * (1 + norm_parent_sq)
child_numitor = euclidean_dists * norms_other * g_sqrt
angles_psi_parent = np.arcsin(K * (1 - norms_other_sq) / norms_other)

cos_angles_child = child_numerator / child_numitor
assert not np.isnan(cos_angles_child).any()
clipped_cos_angle_child = np.maximum(cos_angles_child, -1 + EPS)
clipped_cos_angle_child = np.minimum(clipped_cos_angle_child, 1 - EPS)
angles_child = np.arccos(clipped_cos_angle_child)  # (1 + neg_size, batch_size)

# return angles_child # np.maximum(1, angles_child / angles_psi_parent)
return np.maximum(0, angles_child - angles_psi_parent) ```
Example 46
```def get_2var_e(var, fix, i):
if len(np.shape(var)) == 1:
ecoso = var[i[0]]
esino = var[i[1]]
else:
ecoso = var[:, i[0]]
esino = var[:, i[1]]
return np.sqrt(np.square(ecoso, dtype=np.double) + np.square(esino, dtype=np.double)) ```
Example 47
```def get_2var_c1(var, fix, i):
if len(np.shape(var)) == 1:
q1 = var[i[0]]
q2 = var[i[1]]
else:
q1 = var[:, i[0]]
q2 = var[:, i[1]]
return 2.0*np.sqrt(q1, dtype=np.double) * q2 ```
Example 48
```def get_2var_c2(var, fix, i):
if len(np.shape(var)) == 1:
q1 = var[i[0]]
q2 = var[i[1]]
else:
q1 = var[:, i[0]]
q2 = var[:, i[1]]
return np.sqrt(q1, dtype=np.double) * (1.0 - 2.0*q2) ```
Example 49
```def f1(x):

n = len(x)

sum1 = np.sum([(x[i] - 1.0/np.sqrt(n)) ** 2 for i in range(n)])

f1 = 1 - np.exp(- sum1)
return f1 ```
Example 50
```def f2(x):