Python autograd.numpy.sqrt() Examples
The following are 30
code examples of autograd.numpy.sqrt().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
autograd.numpy
, or try the search function
.
Example #1
Source File: truss2d.py From pymoo with Apache License 2.0 | 7 votes |
def _evaluate(self, x, out, *args, **kwargs): # variable names for convenient access x1 = x[:, 0] x2 = x[:, 1] y = x[:, 2] # first objectives f1 = x1 * anp.sqrt(16 + anp.square(y)) + x2 * anp.sqrt((1 + anp.square(y))) # measure which are needed for the second objective sigma_ac = 20 * anp.sqrt(16 + anp.square(y)) / (y * x1) sigma_bc = 80 * anp.sqrt(1 + anp.square(y)) / (y * x2) # take the max f2 = anp.max(anp.column_stack((sigma_ac, sigma_bc)), axis=1) # define a constraint g1 = f2 - self.Smax out["F"] = anp.column_stack([f1, f2]) out["G"] = g1
Example #2
Source File: zdt.py From pymoo with Apache License 2.0 | 6 votes |
def _calc_pareto_front(self, n_points=100, flatten=True): regions = [[0, 0.0830015349], [0.182228780, 0.2577623634], [0.4093136748, 0.4538821041], [0.6183967944, 0.6525117038], [0.8233317983, 0.8518328654]] pf = [] for r in regions: x1 = anp.linspace(r[0], r[1], int(n_points / len(regions))) x2 = 1 - anp.sqrt(x1) - x1 * anp.sin(10 * anp.pi * x1) pf.append(anp.array([x1, x2]).T) if not flatten: pf = anp.concatenate([pf[None,...] for pf in pf]) else: pf = anp.row_stack(pf) return pf
Example #3
Source File: Utilities.py From ParametricGP with MIT License | 6 votes |
def stochastic_update_Adam(w,grad_w,mt,vt,lrate,iteration): beta1 = 0.9; beta2 = 0.999; epsilon = 1e-8; mt = mt*beta1 + (1.0-beta1)*grad_w; vt = vt*beta2 + (1.0-beta2)*grad_w**2; mt_hat = mt/(1.0-beta1**iteration); vt_hat = vt/(1.0-beta2**iteration); scal = 1.0/(np.sqrt(vt_hat) + epsilon); w = w - lrate*mt_hat*scal; return w,mt,vt
Example #4
Source File: ex1_vary_n.py From kernel-gof with MIT License | 6 votes |
def gbrbm_perturb(var_perturb_B, dx=50, dh=10): """ Get a Gaussian-Bernoulli RBM problem where the first entry of the B matrix (the matrix linking the latent and the observation) is perturbed. - var_perturb_B: Gaussian noise variance for perturbing B. - dx: observed dimension - dh: latent dimension Return p (density), data source """ with util.NumpySeedContext(seed=10): B = np.random.randint(0, 2, (dx, dh))*2 - 1.0 b = np.random.randn(dx) c = np.random.randn(dh) p = density.GaussBernRBM(B, b, c) B_perturb = np.copy(B) if var_perturb_B > 1e-7: B_perturb[0, 0] = B_perturb[0, 0] + \ np.random.randn(1)*np.sqrt(var_perturb_B) ds = data.DSGaussBernRBM(B_perturb, b, c, burnin=2000) return p, ds
Example #5
Source File: ex3_vary_nlocs.py From kernel-gof with MIT License | 6 votes |
def gaussbern_rbm_tuple(var, dx=50, dh=10, n=sample_size): """ Get a tuple of Gaussian-Bernoulli RBM problems. We follow the parameter settings as described in section 6 of Liu et al., 2016. - var: Gaussian noise variance for perturbing B. - dx: observed dimension - dh: latent dimension Return p, a DataSource """ with util.NumpySeedContext(seed=1000): B = np.random.randint(0, 2, (dx, dh))*2 - 1.0 b = np.random.randn(dx) c = np.random.randn(dh) p = density.GaussBernRBM(B, b, c) B_perturb = B + np.random.randn(dx, dh)*np.sqrt(var) gb_rbm = data.DSGaussBernRBM(B_perturb, b, c, burnin=50) return p, gb_rbm
Example #6
Source File: geometry.py From AeroSandbox with MIT License | 6 votes |
def area_wetted(self): # Returns the wetted area of a wing. area = 0 for i in range(len(self.xsecs) - 1): chord_eff = (self.xsecs[i].chord + self.xsecs[i + 1].chord) / 2 this_xyz_te = self.xsecs[i].xyz_te() that_xyz_te = self.xsecs[i + 1].xyz_te() span_le_eff = np.sqrt( np.square(self.xsecs[i].xyz_le[1] - self.xsecs[i + 1].xyz_le[1]) + np.square(self.xsecs[i].xyz_le[2] - self.xsecs[i + 1].xyz_le[2]) ) span_te_eff = np.sqrt( np.square(this_xyz_te[1] - that_xyz_te[1]) + np.square(this_xyz_te[2] - that_xyz_te[2]) ) span_eff = (span_le_eff + span_te_eff) / 2 area += chord_eff * span_eff if self.symmetric: area *= 2 return area
Example #7
Source File: density.py From kernel-gof with MIT License | 6 votes |
def multivariate_normal_density(mean, cov, X): """ Exact density (not log density) of a multivariate Gaussian. mean: length-d array cov: a dxd covariance matrix X: n x d 2d-array """ evals, evecs = np.linalg.eigh(cov) cov_half_inv = evecs.dot(np.diag(evals**(-0.5))).dot(evecs.T) # print(evals) half_evals = np.dot(X-mean, cov_half_inv) full_evals = np.sum(half_evals**2, 1) unden = np.exp(-0.5*full_evals) Z = np.sqrt(np.linalg.det(2.0*np.pi*cov)) den = unden/Z assert len(den) == X.shape[0] return den
Example #8
Source File: run_synthetic_example.py From ParetoMTL with MIT License | 6 votes |
def create_pf(): ps = np.linspace(-1/np.sqrt(2),1/np.sqrt(2)) pf = [] for x1 in ps: #generate solutions on the Pareto front: x = np.array([x1,x1]) f, f_dx = concave_fun_eval(x) pf.append(f) pf = np.array(pf) return pf ### optimization method ###
Example #9
Source File: geometry.py From AeroSandbox with MIT License | 6 votes |
def get_mcl_normal_direction_at_chord_fraction(self, chord_fraction): # Returns the normal direction of the mean camber line at a specified chord fraction. # If you input a single value, returns a 1D numpy array with 2 elements (x,y). # If you input a vector of values, returns a 2D numpy array. First index is the point number, second index is (x,y) # Right now, does it by finite differencing camber values :( # When I'm less lazy I'll make it do it in a proper, more efficient way # TODO make this not finite difference epsilon = np.sqrt(np.finfo(float).eps) cambers = self.get_camber_at_chord_fraction(chord_fraction) cambers_incremented = self.get_camber_at_chord_fraction(chord_fraction + epsilon) dydx = (cambers_incremented - cambers) / epsilon if dydx.shape == 1: # single point normal = np.hstack((-dydx, 1)) normal /= np.linalg.norm(normal) return normal else: # multiple points vectorized normal = np.column_stack((-dydx, np.ones(dydx.shape))) normal /= np.expand_dims(np.linalg.norm(normal, axis=1), axis=1) # normalize return normal
Example #10
Source File: welded_beam.py From pymoo with Apache License 2.0 | 6 votes |
def _evaluate(self, x, out, *args, **kwargs): f1 = 1.10471 * x[:, 0] ** 2 * x[:, 1] + 0.04811 * x[:, 2] * x[:, 3] * (14.0 + x[:, 1]) f2 = 2.1952 / (x[:, 3] * x[:, 2] ** 3) P = 6000 L = 14 t_max = 13600 s_max = 30000 R = anp.sqrt(0.25 * (x[:, 1] ** 2 + (x[:, 0] + x[:, 2]) ** 2)) M = P * (L + x[:, 1] / 2) J = 2 * anp.sqrt(0.5) * x[:, 0] * x[:, 1] * (x[:, 1] ** 2 / 12 + 0.25 * (x[:, 0] + x[:, 2]) ** 2) t1 = P / (anp.sqrt(2) * x[:, 0] * x[:, 1]) t2 = M * R / J t = anp.sqrt(t1 ** 2 + t2 ** 2 + t1 * t2 * x[:, 1] / R) s = 6 * P * L / (x[:, 3] * x[:, 2] ** 2) P_c = 64746.022 * (1 - 0.0282346 * x[:, 2]) * x[:, 2] * x[:, 3] ** 3 g1 = (1 / t_max) * (t - t_max) g2 = (1 / s_max) * (s - s_max) g3 = (1 / (5 - 0.125)) * (x[:, 0] - x[:, 3]) g4 = (1 / P) * (P - P_c) out["F"] = anp.column_stack([f1, f2]) out["G"] = anp.column_stack([g1, g2, g3, g4])
Example #11
Source File: goftest.py From kernel-gof with MIT License | 6 votes |
def perform_test(self, dat): """ dat: a instance of Data """ with util.ContextTimer() as t: alpha = self.alpha X = dat.data() n = X.shape[0] # H: length-n vector _, H = self.compute_stat(dat, return_pointwise_stats=True) test_stat = np.sqrt(old_div(n,2))*np.mean(H) stat_var = np.mean(H**2) pvalue = stats.norm.sf(test_stat, loc=0, scale=np.sqrt(stat_var) ) results = {'alpha': self.alpha, 'pvalue': pvalue, 'test_stat': test_stat, 'h0_rejected': pvalue < alpha, 'time_secs': t.secs, } return results
Example #12
Source File: deep_gaussian_process.py From autograd with MIT License | 6 votes |
def plot_gp(ax, X, y, pred_mean, pred_cov, plot_xs): ax.cla() marg_std = np.sqrt(np.diag(pred_cov)) ax.plot(plot_xs, pred_mean, 'b') ax.fill(np.concatenate([plot_xs, plot_xs[::-1]]), np.concatenate([pred_mean - 1.96 * marg_std, (pred_mean + 1.96 * marg_std)[::-1]]), alpha=.15, fc='Blue', ec='None') # Show samples from posterior. rs = npr.RandomState(0) sampled_funcs = rs.multivariate_normal(pred_mean, pred_cov, size=10) ax.plot(plot_xs, sampled_funcs.T) ax.plot(X, y, 'kx') ax.set_ylim([-1.5, 1.5]) ax.set_xticks([]) ax.set_yticks([])
Example #13
Source File: g.py From pymoo with Apache License 2.0 | 6 votes |
def _evaluate(self, x, out, *args, **kwargs): l = [] for j in range(self.n_var): l.append((j + 1) * x[:, j] ** 2) sum_jx = anp.sum(anp.column_stack(l), axis=1) a = anp.sum(anp.cos(x) ** 4, axis=1) b = 2 * anp.prod(anp.cos(x) ** 2, axis=1) c = (anp.sqrt(sum_jx)).flatten() c = c + (c == 0) * 1e-20 f = -anp.absolute((a - b) / c) # Constraints g1 = -anp.prod(x, 1) + 0.75 g2 = anp.sum(x, axis=1) - 7.5 * self.n_var out["F"] = f out["G"] = anp.column_stack([g1, g2])
Example #14
Source File: test_truediv.py From autograd with MIT License | 5 votes |
def test_div(): fun = lambda x, y : x / y make_gap_from_zero = lambda x : np.sqrt(x **2 + 0.5) for arg1, arg2 in arg_pairs(): arg1 = make_gap_from_zero(arg1) arg2 = make_gap_from_zero(arg2) check_grads(fun)(arg1, arg2)
Example #15
Source File: goftest.py From kernel-gof with MIT License | 5 votes |
def feature_tensor(self, X): """ Compute the feature tensor which is n x d x J. The feature tensor can be used to compute the statistic, and the covariance matrix for simulating from the null distribution. X: n x d data numpy array return an n x d x J numpy array """ k = self.k J = self.V.shape[0] n, d = X.shape # n x d matrix of gradients grad_logp = self.p.grad_log(X) #assert np.all(util.is_real_num(grad_logp)) # n x J matrix #print 'V' #print self.V K = k.eval(X, self.V) #assert np.all(util.is_real_num(K)) list_grads = np.array([np.reshape(k.gradX_y(X, v), (1, n, d)) for v in self.V]) stack0 = np.concatenate(list_grads, axis=0) #a numpy array G of size n x d x J such that G[:, :, J] # is the derivative of k(X, V_j) with respect to X. dKdV = np.transpose(stack0, (1, 2, 0)) # n x d x J tensor grad_logp_K = util.outer_rows(grad_logp, K) #print 'grad_logp' #print grad_logp.dtype #print grad_logp #print 'K' #print K Xi = old_div((grad_logp_K + dKdV),np.sqrt(d*J)) #Xi = (grad_logp_K + dKdV) return Xi
Example #16
Source File: run_synthetic_example.py From ParetoMTL with MIT License | 5 votes |
def f2(x): n = len(x) sum2 = np.sum([(x[i] + 1.0/np.sqrt(n)) ** 2 for i in range(n)]) f2 = 1 - np.exp(- sum2) return f2 # calculate the gradients using autograd
Example #17
Source File: run_synthetic_example.py From ParetoMTL with MIT License | 5 votes |
def f1(x): n = len(x) sum1 = np.sum([(x[i] - 1.0/np.sqrt(n)) ** 2 for i in range(n)]) f1 = 1 - np.exp(- sum1) return f1
Example #18
Source File: griewank.py From pymop with Apache License 2.0 | 5 votes |
def _evaluate(self, x, out, *args, **kwargs): out["F"] = 1 + 1 / 4000 * np.sum(np.power(x, 2), axis=1) \ - np.prod(np.cos(x / np.sqrt(np.arange(1, x.shape[1] + 1))), axis=1)
Example #19
Source File: goftest.py From kernel-gof with MIT License | 5 votes |
def power_criterion(p, dat, k, test_locs, reg=1e-2, use_unbiased=True, use_2terms=False): """ Compute the mean and standard deviation of the statistic under H1. Return mean/sd. use_2terms: True if the objective should include the first term in the power expression. This term carries the test threshold and is difficult to compute (depends on the optimized test locations). If True, then the objective will be -1/(n**0.5*sigma_H1) + n**0.5 FSSD^2/sigma_H1, which ignores the test threshold in the first term. """ X = dat.data() n = X.shape[0] V = test_locs fssd = FSSD(p, k, V, null_sim=None) fea_tensor = fssd.feature_tensor(X) u_mean, u_variance = FSSD.ustat_h1_mean_variance(fea_tensor, return_variance=True, use_unbiased=use_unbiased) # mean/sd criterion sigma_h1 = np.sqrt(u_variance + reg) ratio = old_div(u_mean,sigma_h1) if use_2terms: obj = old_div(-1.0,(np.sqrt(n)*sigma_h1)) + np.sqrt(n)*ratio #print obj else: obj = ratio return obj
Example #20
Source File: kursawe.py From pymop with Apache License 2.0 | 5 votes |
def _evaluate(self, x, out, *args, **kwargs): l = [] for i in range(2): l.append(-10 * anp.exp(-0.2 * anp.sqrt(anp.square(x[:, i]) + anp.square(x[:, i + 1])))) f1 = anp.sum(anp.column_stack(l), axis=1) f2 = anp.sum(anp.power(anp.abs(x), 0.8) + 5 * anp.sin(anp.power(x, 3)), axis=1) out["F"] = anp.column_stack([f1, f2])
Example #21
Source File: test_binary_ops.py From autograd with MIT License | 5 votes |
def test_mod(): fun = lambda x, y : x % y make_gap_from_zero = lambda x : np.sqrt(x **2 + 0.5) for arg1, arg2 in arg_pairs(): if not arg1 is arg2: # Gradient undefined at x == y arg1 = make_gap_from_zero(arg1) arg2 = make_gap_from_zero(arg2) check_grads(fun)(arg1, arg2)
Example #22
Source File: test_scalar_ops.py From autograd with MIT License | 5 votes |
def test_sqrt(): fun = lambda x : 3.0 * np.sqrt(x) check_grads(fun)(10.0*npr.rand())
Example #23
Source File: test_systematic.py From autograd with MIT License | 5 votes |
def test_sqrt(): unary_ufunc_check(np.sqrt, lims=[1.0, 3.0])
Example #24
Source File: test_scipy.py From autograd with MIT License | 5 votes |
def func(y, t, arg1, arg2): return -np.sqrt(t) - y + arg1 - np.mean((y + arg2)**2)
Example #25
Source File: hyp_cones_model.py From hyperbolic_cones with Apache License 2.0 | 5 votes |
def is_a_scores_vector_batch(self, K, parent_vectors, other_vectors, rel_reversed): norm_parent = np.linalg.norm(parent_vectors, axis=1) norm_parent_sq = norm_parent ** 2 norms_other = np.linalg.norm(other_vectors, axis=1) norms_other_sq = norms_other ** 2 euclidean_dists = np.maximum(np.linalg.norm(parent_vectors - other_vectors, axis=1), 1e-6) # To avoid the fact that parent can be equal to child for the reconstruction experiment dot_prods = (parent_vectors * other_vectors).sum(axis=1) g = 1 + norm_parent_sq * norms_other_sq - 2 * dot_prods g_sqrt = np.sqrt(g) if not rel_reversed: # parent = x , other = y child_numerator = dot_prods * (1 + norm_parent_sq) - norm_parent_sq * (1 + norms_other_sq) child_numitor = euclidean_dists * norm_parent * g_sqrt angles_psi_parent = np.arcsin(K * (1 - norm_parent_sq) / norm_parent) else: # parent = y , other = x child_numerator = dot_prods * (1 + norms_other_sq) - norms_other_sq * (1 + norm_parent_sq) child_numitor = euclidean_dists * norms_other * g_sqrt angles_psi_parent = np.arcsin(K * (1 - norms_other_sq) / norms_other) cos_angles_child = child_numerator / child_numitor assert not np.isnan(cos_angles_child).any() clipped_cos_angle_child = np.maximum(cos_angles_child, -1 + EPS) clipped_cos_angle_child = np.minimum(clipped_cos_angle_child, 1 - EPS) angles_child = np.arccos(clipped_cos_angle_child) # (1 + neg_size, batch_size) # return angles_child # np.maximum(1, angles_child / angles_psi_parent) return np.maximum(0, angles_child - angles_psi_parent)
Example #26
Source File: optimizers.py From autograd with MIT License | 5 votes |
def adam(grad, x, callback=None, num_iters=100, step_size=0.001, b1=0.9, b2=0.999, eps=10**-8): """Adam as described in http://arxiv.org/pdf/1412.6980.pdf. It's basically RMSprop with momentum and some correction terms.""" m = np.zeros(len(x)) v = np.zeros(len(x)) for i in range(num_iters): g = grad(x, i) if callback: callback(x, i, g) m = (1 - b1) * g + b1 * m # First moment estimate. v = (1 - b2) * (g**2) + b2 * v # Second moment estimate. mhat = m / (1 - b1**(i + 1)) # Bias correction. vhat = v / (1 - b2**(i + 1)) x = x - step_size*mhat/(np.sqrt(vhat) + eps) return x
Example #27
Source File: generative_adversarial_net.py From autograd with MIT License | 5 votes |
def adam_minimax(grad_both, init_params_max, init_params_min, callback=None, num_iters=100, step_size_max=0.001, step_size_min=0.001, b1=0.9, b2=0.999, eps=10**-8): """Adam modified to do minimiax optimization, for instance to help with training generative adversarial networks.""" x_max, unflatten_max = flatten(init_params_max) x_min, unflatten_min = flatten(init_params_min) m_max = np.zeros(len(x_max)) v_max = np.zeros(len(x_max)) m_min = np.zeros(len(x_min)) v_min = np.zeros(len(x_min)) for i in range(num_iters): g_max_uf, g_min_uf = grad_both(unflatten_max(x_max), unflatten_min(x_min), i) g_max, _ = flatten(g_max_uf) g_min, _ = flatten(g_min_uf) if callback: callback(unflatten_max(x_max), unflatten_min(x_min), i, unflatten_max(g_max), unflatten_min(g_min)) m_max = (1 - b1) * g_max + b1 * m_max # First moment estimate. v_max = (1 - b2) * (g_max**2) + b2 * v_max # Second moment estimate. mhat_max = m_max / (1 - b1**(i + 1)) # Bias correction. vhat_max = v_max / (1 - b2**(i + 1)) x_max = x_max + step_size_max * mhat_max / (np.sqrt(vhat_max) + eps) m_min = (1 - b1) * g_min + b1 * m_min # First moment estimate. v_min = (1 - b2) * (g_min**2) + b2 * v_min # Second moment estimate. mhat_min = m_min / (1 - b1**(i + 1)) # Bias correction. vhat_min = v_min / (1 - b2**(i + 1)) x_min = x_min - step_size_min * mhat_min / (np.sqrt(vhat_min) + eps) return unflatten_max(x_max), unflatten_min(x_min) ### Setup and run on MNIST ###
Example #28
Source File: fixed_points.py From autograd with MIT License | 5 votes |
def sqrt(a, guess=10.): # return fixed_point(newton_sqrt_iter, a, guess, distance, 1e-4) return fixed_point(grad_descent_sqrt_iter, a, guess, distance, 1e-4)
Example #29
Source File: gaussian_process.py From autograd with MIT License | 5 votes |
def callback(params): print("Log likelihood {}".format(-objective(params))) plt.cla() # Show posterior marginals. plot_xs = np.reshape(np.linspace(-7, 7, 300), (300,1)) pred_mean, pred_cov = predict(params, X, y, plot_xs) marg_std = np.sqrt(np.diag(pred_cov)) ax.plot(plot_xs, pred_mean, 'b') ax.fill(np.concatenate([plot_xs, plot_xs[::-1]]), np.concatenate([pred_mean - 1.96 * marg_std, (pred_mean + 1.96 * marg_std)[::-1]]), alpha=.15, fc='Blue', ec='None') # Show samples from posterior. rs = npr.RandomState(0) sampled_funcs = rs.multivariate_normal(pred_mean, pred_cov, size=10) ax.plot(plot_xs, sampled_funcs.T) ax.plot(X, y, 'kx') ax.set_ylim([-1.5, 1.5]) ax.set_xticks([]) ax.set_yticks([]) plt.draw() plt.pause(1.0/60.0) # Initialize covariance parameters
Example #30
Source File: cdtlz.py From pymoo with Apache License 2.0 | 5 votes |
def constraint_c2(f, r): n_obj = f.shape[1] v1 = anp.inf * anp.ones(f.shape[0]) for i in range(n_obj): temp = (f[:, i] - 1) ** 2 + (anp.sum(f ** 2, axis=1) - f[:, i] ** 2) - r ** 2 v1 = anp.minimum(temp.flatten(), v1) a = 1 / anp.sqrt(n_obj) v2 = anp.sum((f - a) ** 2, axis=1) - r ** 2 g = anp.minimum(v1, v2.flatten()) return g