Python numpy.tensordot() Examples
The following are 30 code examples for showing how to use numpy.tensordot(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example 1
Project: pyscf Author: pyscf File: test_0004_vna.py License: Apache License 2.0 | 6 votes |
def test_vna_lih(self): dname = dirname(abspath(__file__)) n = nao(label='lih', cd=dname) m = 200 dvec,midv = 2*(n.atom2coord[1] - n.atom2coord[0])/m, (n.atom2coord[1] + n.atom2coord[0])/2.0 vgrid = np.tensordot(np.array(range(-m,m+1)), dvec, axes=0) + midv sgrid = np.array(range(-m,m+1)) * np.sqrt((dvec*dvec).sum()) #vgrid = np.array([[-1.517908564663352e+00, 1.180550033093826e+00,0.000000000000000e+00]]) vna = n.vna(vgrid) #for v,r in zip(vna,vgrid): # print("%23.15e %23.15e %23.15e %23.15e"%(r[0], r[1], r[2], v)) #print(vna.shape, sgrid.shape) np.savetxt('vna_lih_0004.txt', np.row_stack((sgrid, vna)).T) ref = np.loadtxt(dname+'/vna_lih_0004.txt-ref') for r,d in zip(ref[:,1],vna): self.assertAlmostEqual(r,d)
Example 2
Project: pyGSTi Author: pyGSTio File: spamvec.py License: Apache License 2.0 | 6 votes |
def deriv_wrt_params(self, wrtFilter=None): """ Construct a matrix whose columns are the derivatives of the SPAM vector with respect to a single param. Thus, each column is of length get_dimension and there is one column per SPAM vector parameter. Returns ------- numpy array Array of derivatives, shape == (dimension, num_params) """ dmVec = self.state_vec.todense() derrgen = self.error_map.deriv_wrt_params(wrtFilter) # shape (dim*dim, nParams) derrgen.shape = (self.dim, self.dim, derrgen.shape[1]) # => (dim,dim,nParams) if self._prep_or_effect == "prep": #derror map acts on dmVec #return _np.einsum("ijk,j->ik", derrgen, dmVec) # return shape = (dim,nParams) return _np.tensordot(derrgen, dmVec, (1, 0)) # return shape = (dim,nParams) else: # self.error_map acts on the *state* vector before dmVec acts # as an effect: E.dag -> dot(E.dag,errmap) ==> E -> dot(errmap.dag,E) #return _np.einsum("jik,j->ik", derrgen.conjugate(), dmVec) # return shape = (dim,nParams) return _np.tensordot(derrgen.conjugate(), dmVec, (0, 0)) # return shape = (dim,nParams)
Example 3
Project: tensorflow_constrained_optimization Author: google-research File: proxy_lagrangian_optimizer_test.py License: Apache License 2.0 | 6 votes |
def test_maximum_eigenvector_power_method(self): """Tests power method routine on some known left-stochastic matrices.""" matrix1 = np.matrix([[0.6, 0.1, 0.1], [0.0, 0.6, 0.9], [0.4, 0.3, 0.0]]) matrix2 = np.matrix([[0.4, 0.4, 0.2], [0.2, 0.1, 0.5], [0.4, 0.5, 0.3]]) with self.wrapped_session() as session: eigenvector1 = session.run( proxy_lagrangian_optimizer._maximal_eigenvector_power_method( tf.constant(matrix1))) eigenvector2 = session.run( proxy_lagrangian_optimizer._maximal_eigenvector_power_method( tf.constant(matrix2))) # Check that eigenvector1 and eigenvector2 are eigenvectors of matrix1 and # matrix2 (respectively) with associated eigenvalue 1. matrix_eigenvector1 = np.tensordot(matrix1, eigenvector1, axes=1) matrix_eigenvector2 = np.tensordot(matrix2, eigenvector2, axes=1) self.assertAllClose(eigenvector1, matrix_eigenvector1, rtol=0, atol=1e-6) self.assertAllClose(eigenvector2, matrix_eigenvector2, rtol=0, atol=1e-6)
Example 4
Project: tenpy Author: tenpy File: tdvp_numpy.py License: GNU General Public License v3.0 | 6 votes |
def middle_bond_hamiltonian(Jx, Jz, hx, hz, L): """" Returns the spin operators sigma_x and sigma_z for L sites.""" sx = np.array([[0., 1.], [1., 0.]]) sz = np.array([[1., 0.], [0., -1.]]) H_bond = Jx * np.kron(sx, sx) + Jz * np.kron(sz, sz) H_bond = H_bond + hx / 2 * np.kron(sx, np.eye(2)) + hx / 2 * np.kron(np.eye(2), sx) H_bond = H_bond + hz / 2 * np.kron(sz, np.eye(2)) + hz / 2 * np.kron(np.eye(2), sz) H_bond = H_bond.reshape(2, 2, 2, 2).transpose(0, 2, 1, 3).reshape(4, 4) #i1 i2 i1' i2' --> U, s, V = np.linalg.svd(H_bond) M1 = np.dot(U, np.diag(s)).reshape(2, 2, 1, 4).transpose(2, 3, 0, 1) M2 = V.reshape(4, 1, 2, 2) M0 = np.tensordot(np.tensordot([1], [1], axes=0), np.eye(2), axes=0) W = [] for i in range(L): if i == L / 2 - 1: W.append(M1) elif i == L / 2: W.append(M2) else: W.append(M0) return W
Example 5
Project: tenpy Author: tenpy File: test_np_conserved.py License: GNU General Public License v3.0 | 6 votes |
def test_npc_tensordot_extra(): # check that the sorting of charges is fine with special test matrices # which gave me some headaches at some point :/ chinfo = npc.ChargeInfo([1], ['Sz']) leg = npc.LegCharge.from_qflat(chinfo, [-1, 1]) legs = [leg, leg, leg.conj(), leg.conj()] idx = [(0, 0, 0, 0), (0, 1, 0, 1), (0, 1, 1, 0), (1, 0, 0, 1), (1, 0, 1, 0), (1, 1, 1, 1)] Uflat = np.eye(4).reshape([2, 2, 2, 2]) # up to numerical rubbish the identity Uflat[0, 1, 1, 0] = Uflat[1, 0, 0, 1] = 1.e-20 U = npc.Array.from_ndarray(Uflat, legs, cutoff=0.) theta_flat = np.zeros([2, 2, 2, 2]) vals = np.random.random(len(idx)) vals /= np.linalg.norm(vals) for i, val in zip(idx, vals): theta_flat[i] = val theta = npc.Array.from_ndarray(theta_flat, [leg, leg, leg.conj(), leg.conj()], cutoff=0.) assert abs(np.linalg.norm(theta_flat) - npc.norm(theta)) < 1.e-14 Utheta_flat = np.tensordot(Uflat, theta_flat, axes=2) Utheta = npc.tensordot(U, theta, axes=2) npt.assert_array_almost_equal_nulp(Utheta.to_ndarray(), Utheta_flat, 10) assert abs(np.linalg.norm(theta_flat) - npc.norm(Utheta)) < 1.e-10
Example 6
Project: tenpy Author: tenpy File: test_np_conserved.py License: GNU General Public License v3.0 | 6 votes |
def test_npc_outer(): for sort in [True, False]: print("sort =", sort) a = random_Array((6, 7), chinfo3, sort=sort) b = random_Array((5, 5), chinfo3, sort=sort) aflat = a.to_ndarray() bflat = b.to_ndarray() c = npc.outer(a, b) c.test_sanity() cflat = np.tensordot(aflat, bflat, axes=0) npt.assert_equal(c.to_ndarray(), cflat) c = npc.tensordot(a, b, axes=0) # (should as well call npc.outer) npt.assert_equal(c.to_ndarray(), cflat) print("for trivial charge") a = npc.Array.from_func(np.random.random, [lcTr, lcTr.conj()], shape_kw='size') aflat = a.to_ndarray() b = npc.tensordot(a, a, axes=0) bflat = np.tensordot(aflat, aflat, axes=0) npt.assert_array_almost_equal_nulp(b.to_ndarray(), bflat, sum(a.shape))
Example 7
Project: tenpy Author: tenpy File: d_dmrg.py License: GNU General Public License v3.0 | 6 votes |
def update_bond(self, i): j = (i + 1) % self.psi.L # get effective Hamiltonian Heff = SimpleHeff(self.LPs[i], self.RPs[j], self.H_mpo[i], self.H_mpo[j]) # Diagonalize Heff, find ground state `theta` theta0 = np.reshape(self.psi.get_theta2(i), [Heff.shape[0]]) # initial guess e, v = arp.eigsh(Heff, k=1, which='SA', return_eigenvectors=True, v0=theta0) theta = np.reshape(v[:, 0], Heff.theta_shape) # split and truncate Ai, Sj, Bj = split_truncate_theta(theta, self.chi_max, self.eps) # put back into MPS Gi = np.tensordot(np.diag(self.psi.Ss[i]**(-1)), Ai, axes=[1, 0]) # vL [vL*], [vL] i vC self.psi.Bs[i] = np.tensordot(Gi, np.diag(Sj), axes=[2, 0]) # vL i [vC], [vC*] vC self.psi.Ss[j] = Sj # vC self.psi.Bs[j] = Bj # vC j vR self.update_LP(i) self.update_RP(j)
Example 8
Project: tenpy Author: tenpy File: a_mps.py License: GNU General Public License v3.0 | 6 votes |
def correlation_length(self): """Diagonalize transfer matrix to obtain the correlation length.""" import scipy.sparse.linalg.eigen.arpack as arp assert self.bc == 'infinite' # works only in the infinite case B = self.Bs[0] # vL i vR chi = B.shape[0] T = np.tensordot(B, np.conj(B), axes=[1, 1]) # vL [i] vR, vL* [i*] vR* T = np.transpose(T, [0, 2, 1, 3]) # vL vL* vR vR* for i in range(1, self.L): B = self.Bs[i] T = np.tensordot(T, B, axes=[2, 0]) # vL vL* [vR] vR*, [vL] i vR T = np.tensordot(T, np.conj(B), axes=[[2, 3], [0, 1]]) # vL vL* [vR*] [i] vR, [vL*] [i*] vR* T = np.reshape(T, (chi**2, chi**2)) # Obtain the 2nd largest eigenvalue eta = arp.eigs(T, k=2, which='LM', return_eigenvectors=False, ncv=20) return -self.L / np.log(np.min(np.abs(eta)))
Example 9
Project: MNIST-baselines Author: cxy1997 File: pcanet.py License: MIT License | 6 votes |
def binary_to_decimal(X): """ Parameters ---------- X: xp.ndarray Feature maps """ # This function expects X of shape (n_images, L2, y, x) # as an argument. # Let's say that X[k] (0 <= k < n_images) can be represented like # X[k] = [map_k[0], map_k[1], ..., map_k[L2-1]] # where the shape of each map_k is (y, x). # Then we calculate # a[0] * map_k[0] + a[1] * map_k[1] + ... + a[L2-1] * map_k[L2-1] # for each X[k], where a = [2^(L2-1), 2^(L2-2), ..., 2^0] # Therefore, the output shape must be (n_images, y, x) a = xp.arange(X.shape[1])[::-1] a = xp.power(2, a) return xp.tensordot(X, a, axes=([1], [0]))
Example 10
Project: discopy Author: oxford-quantum-group File: tensor.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, *dim): """ >>> Id(1) Tensor(dom=Dim(1), cod=Dim(1), array=[1]) >>> list(Id(2).array.flatten()) [1.0, 0.0, 0.0, 1.0] >>> Id(2).array.shape (2, 2) >>> list(Id(2, 2).array.flatten())[:8] [1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0] >>> list(Id(2, 2).array.flatten())[8:] [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0] """ dim = dim[0] if isinstance(dim[0], Dim) else Dim(*dim) array = functools.reduce( lambda a, x: np.tensordot(a, np.identity(x), 0) if a.shape else np.identity(x), dim, np.array(1)) array = np.moveaxis( array, [2 * i for i in range(len(dim))], list(range(len(dim)))) super().__init__(dim, dim, array)
Example 11
Project: strawberryfields Author: XanaduAI File: program_functions.py License: Apache License 2.0 | 6 votes |
def _interleaved_identities(n: int, cutoff_dim: int): r"""Maximally entangled state of `n` modes. Returns the tensor :math:`\sum_{abc\ldots} \ket{abc\ldots}\bra{abc\ldots}` representing an unnormalized, maximally entangled state of `n` subsystems. Args: n (int): number of subsystems cutoff_dim (int): Fock basis truncation dimension Returns: array: unnormalized maximally entangled state, shape == (cutoff_dim,) * (2*n) """ I = np.identity(cutoff_dim) temp = I for _ in range(1, n): temp = np.tensordot(temp, I, axes=0) # use einsum to permute the indices such that |a><a|*|b><b|*|c><c|*... becomes |abc...><abc...| sublist = [int(n) for n in np.arange(2 * n).reshape((2, n)).T.reshape([-1])] return np.einsum(temp, sublist)
Example 12
Project: strawberryfields Author: XanaduAI File: ops.py License: Apache License 2.0 | 6 votes |
def tensor(u, v, n, pure, pos=None): """ Returns the tensor product of `u` and `v`, optionally spliced into a at location `pos`. """ w = np.tensordot(u, v, axes=0) if pos is not None: if pure: scale = 1 else: scale = 2 for i in range(v.ndim): w = np.rollaxis(w, scale * n + i, scale * pos + i) return w
Example 13
Project: trax Author: google File: lax_numpy_test.py License: Apache License 2.0 | 6 votes |
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng_factory): rng = rng_factory() args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)] lnp_fun = lambda a, b: lnp.tensordot(a, b, axes) def onp_fun(a, b): a = a if lhs_dtype != lnp.bfloat16 else a.astype(onp.float32) b = b if rhs_dtype != lnp.bfloat16 else b.astype(onp.float32) dtype = lnp.promote_types(lhs_dtype, rhs_dtype) return onp.tensordot(a, b, axes).astype(dtype) tol = {onp.float16: 1e-1, onp.float32: 1e-3, onp.float64: 1e-12, onp.complex64: 1e-3, onp.complex128: 1e-12} if jtu.device_under_test() == "tpu": tol[onp.float32] = tol[onp.complex64] = 2e-1 self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True, tol=tol) self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True, check_incomplete_shape=True)
Example 14
Project: pyscf Author: pyscf File: test_0004_vna.py License: Apache License 2.0 | 5 votes |
def test_vna_n2(self): dname = dirname(abspath(__file__)) n = nao(label='n2', cd=dname) m = 200 dvec,midv = 2*(n.atom2coord[1] - n.atom2coord[0])/m, (n.atom2coord[1] + n.atom2coord[0])/2.0 vgrid = np.tensordot(np.array(range(-m,m+1)), dvec, axes=0) + midv sgrid = np.array(range(-m,m+1)) * np.sqrt((dvec*dvec).sum()) vna = n.vna(vgrid) #print(vna.shape, sgrid.shape) #np.savetxt('vna_n2_0004.txt', np.row_stack((sgrid, vna)).T) ref = np.loadtxt(dname+'/vna_n2_0004.txt-ref') for r,d in zip(ref[:,1],vna): self.assertAlmostEqual(r,d)
Example 15
Project: pyGSTi Author: pyGSTio File: spamvec.py License: Apache License 2.0 | 5 votes |
def hessian_wrt_params(self, wrtFilter1=None, wrtFilter2=None): """ Construct the Hessian of this SPAM vector with respect to its parameters. This function returns a tensor whose first axis corresponds to the flattened operation matrix and whose 2nd and 3rd axes correspond to the parameters that are differentiated with respect to. Parameters ---------- wrtFilter1, wrtFilter2 : list Lists of indices of the paramters to take first and second derivatives with respect to. If None, then derivatives are taken with respect to all of the vectors's parameters. Returns ------- numpy array Hessian with shape (dimension, num_params1, num_params2) """ dmVec = self.state_vec.todense() herrgen = self.error_map.hessian_wrt_params(wrtFilter1, wrtFilter2) # shape (dim*dim, nParams1, nParams2) herrgen.shape = (self.dim, self.dim, herrgen.shape[1], herrgen.shape[2]) # => (dim,dim,nParams1, nParams2) if self._prep_or_effect == "prep": #derror map acts on dmVec #return _np.einsum("ijkl,j->ikl", herrgen, dmVec) # return shape = (dim,nParams) return _np.tensordot(herrgen, dmVec, (1, 0)) # return shape = (dim,nParams) else: # self.error_map acts on the *state* vector before dmVec acts # as an effect: E.dag -> dot(E.dag,errmap) ==> E -> dot(errmap.dag,E) #return _np.einsum("jikl,j->ikl", herrgen.conjugate(), dmVec) # return shape = (dim,nParams) return _np.tensordot(herrgen.conjugate(), dmVec, (0, 0)) # return shape = (dim,nParams)
Example 16
Project: pyGSTi Author: pyGSTio File: germselection.py License: Apache License 2.0 | 5 votes |
def calc_twirled_DDD(model, germ, eps=1e-6): """Calculate the positive squares of the germ Jacobian. twirledDerivDaggerDeriv == array J.H*J contributions from `germ` (J=Jacobian) indexed by (iModelParam1, iModelParam2) size (vec_model_dim, vec_model_dim) """ twirledDeriv = twirled_deriv(model, germ, eps) / len(germ) #twirledDerivDaggerDeriv = _np.einsum('jk,jl->kl', # _np.conjugate(twirledDeriv), # twirledDeriv) twirledDerivDaggerDeriv = _np.tensordot(_np.conjugate(twirledDeriv), twirledDeriv, (0, 0)) return twirledDerivDaggerDeriv
Example 17
Project: pyGSTi Author: pyGSTio File: germselection.py License: Apache License 2.0 | 5 votes |
def compute_score(weights, model_num, scoreFunc, derivDaggerDerivList, forceIndices, forceScore, nGaugeParams, opPenalty, germLengths, l1Penalty=1e-2, scoreDict=None): """Returns a germ set "score" in which smaller is better. Also returns intentionally bad score (`forceScore`) if `weights` is zero on any of the "forced" germs (i.e. at any index in `forcedIndices`). This function is included for use by :func:`optimize_integer_germs_slack`, but is not convenient for just computing the score of a germ set. For that, use :func:`calculate_germset_score`. """ if forceIndices is not None and _np.any(weights[forceIndices] <= 0): score = forceScore else: #combinedDDD = _np.einsum('i,ijk', weights, # derivDaggerDerivList[model_num]) combinedDDD = _np.squeeze( _np.tensordot(_np.expand_dims(weights, 1), derivDaggerDerivList[model_num], (0, 0))) assert len(combinedDDD.shape) == 2 sortedEigenvals = _np.sort(_np.real(_nla.eigvalsh(combinedDDD))) observableEigenvals = sortedEigenvals[nGaugeParams:] score = (_scoring.list_score(observableEigenvals, scoreFunc) + l1Penalty * _np.sum(weights) + opPenalty * _np.dot(germLengths, weights)) if scoreDict is not None: # Side effect: calling compute_score caches result in scoreDict scoreDict[model_num, tuple(weights)] = score return score
Example 18
Project: pyGSTi Author: pyGSTio File: germselection.py License: Apache License 2.0 | 5 votes |
def sq_sing_vals_from_deriv(deriv, weights=None): """Calculate the squared singulare values of the Jacobian of the germ set. Parameters ---------- deriv : numpy.array Array of shape ``(nGerms, flattened_op_dim, vec_model_dim)``. Each sub-array corresponding to an individual germ is the Jacobian of the vectorized gate representation of that germ raised to some power with respect to the model parameters, normalized by dividing by the length of each germ after repetition. weights : numpy.array Array of length ``nGerms``, giving the relative contributions of each individual germ's Jacobian to the combined Jacobian (which is calculated as a convex combination of the individual Jacobians). Returns ------- numpy.array The sorted squared singular values of the combined Jacobian of the germ set. """ # shape (nGerms, vec_model_dim, vec_model_dim) derivDaggerDeriv = _np.einsum('ijk,ijl->ikl', _np.conjugate(deriv), deriv) # awkward to convert to tensordot, so leave as einsum # Take the average of the D^dagger*D/L^2 matrices associated with each germ # with optional weights. combinedDDD = _np.average(derivDaggerDeriv, weights=weights, axis=0) sortedEigenvals = _np.sort(_np.real(_nla.eigvalsh(combinedDDD))) return sortedEigenvals
Example 19
Project: neurawkes Author: HMEIatJHU File: sequence_generators.py License: MIT License | 5 votes |
def compute_intensity_given_past(self, time_current): # compute the intensity of current time # given the past events # time_recent = self.one_seq[-1]['time_since_start'] # delta = self.soft_relu( numpy.tensordot( self.hidden_t, self.W_delta, (0, 0) ) ) # hidden_with_time = numpy.exp( -delta * ( time_current - time_recent ) ) * self.hidden_t[:, None] # (self.dim_model, self.dim_process) # self.W_alpha (self.dim_model, self.dim_process) self.intensity_tilde = numpy.sum( self.W_alpha * hidden_with_time, axis = 0 ) + self.mu # self.intensity = self.soft_relu( self.intensity_tilde ) # intensity computation is finished #
Example 20
Project: neurawkes Author: HMEIatJHU File: sequence_generators.py License: MIT License | 5 votes |
def compute_intensity_upper_bound(self, time_current): # compute the upper bound of intensity # at the current time time_recent = self.one_seq[-1]['time_since_start'] # delta = self.soft_relu( numpy.tensordot( self.hidden_t, self.W_delta, (0, 0) ) ) # hidden_with_time = numpy.exp( -delta * ( time_current - time_recent ) ) * self.hidden_t[:, None] # (self.dim_model, self.dim_process) # self.W_alpha (self.dim_model, self.dim_process) self.intensity_tilde_ub = numpy.sum( self.hard_relu( self.W_alpha * hidden_with_time ), axis = 0 ) + self.hard_relu(self.mu) # self.intensity_ub = self.soft_relu( self.intensity_tilde_ub ) # intensity computation is finished # #
Example 21
Project: neurawkes Author: HMEIatJHU File: sequence_generators.py License: MIT License | 5 votes |
def compute_intensity_given_past(self, time_current): # compute the intensity of current time # given the past events # time_recent = self.one_seq[-1]['time_since_start'] # delta = self.soft_relu( numpy.tensordot( self.hidden_t, self.W_delta, (0, 0) ) ) # hidden_with_time = numpy.exp( -delta * ( time_current - time_recent ) ) * self.hidden_t[:, None] # (self.dim_model, self.dim_process) # self.W_alpha (self.dim_model, self.dim_process) self.intensity_tilde = numpy.sum( self.W_alpha * hidden_with_time, axis = 0 ) + numpy.dot( self.hidden_t, self.W_mu ) # self.intensity = self.soft_relu( self.intensity_tilde ) # intensity computation is finished #
Example 22
Project: neurawkes Author: HMEIatJHU File: sequence_generators.py License: MIT License | 5 votes |
def compute_intensity_upper_bound(self, time_current): # compute the upper bound of intensity # at the current time time_recent = self.one_seq[-1]['time_since_start'] # delta = self.soft_relu( numpy.tensordot( self.hidden_t, self.W_delta, (0, 0) ) ) # hidden_with_time = numpy.exp( -delta * ( time_current - time_recent ) ) * self.hidden_t[:, None] # (self.dim_model, self.dim_process) # self.W_alpha (self.dim_model, self.dim_process) self.intensity_tilde_ub = numpy.sum( self.hard_relu( self.W_alpha * hidden_with_time ), axis = 0 ) + self.hard_relu( numpy.dot( self.hidden_t, self.W_mu ) ) # self.intensity_ub = self.soft_relu( self.intensity_tilde_ub ) # intensity computation is finished # #
Example 23
Project: neurawkes Author: HMEIatJHU File: sequence_generators.py License: MIT License | 5 votes |
def compute_intensity_given_past(self, time_current): # compute the intensity of current time # given the past events # time_recent = self.one_seq[-1]['time_since_start'] # delta = self.soft_relu( numpy.tensordot( self.hidden_t, self.W_delta, (0, 0) ) ) # hidden_with_time = numpy.exp( -delta * ( time_current - time_recent ) ) * self.hidden_t[:, None] # (self.dim_model, self.dim_process) # self.W_alpha (self.dim_model, self.dim_process) self.intensity_tilde = numpy.sum( self.W_alpha * hidden_with_time, axis = 0 ) + numpy.dot( self.hidden_t, self.W_mu ) # self.intensity = self.soft_relu( self.intensity_tilde ) # intensity computation is finished #
Example 24
Project: neurawkes Author: HMEIatJHU File: sequence_generators.py License: MIT License | 5 votes |
def compute_intensity_given_past(self, time_current): # compute the intensity of current time # given the past events # time_recent = self.one_seq[-1]['time_since_start'] # delta = self.soft_relu( numpy.tensordot( self.hidden_t, self.W_delta, (0, 0) ) ) # hidden_with_time = numpy.exp( -delta * ( time_current - time_recent ) ) * self.hidden_t[:, None] # (self.dim_model, self.dim_process) # self.W_alpha (self.dim_model, self.dim_process) self.intensity_tilde = numpy.sum( self.W_alpha * hidden_with_time, axis = 0 ) + numpy.dot( self.hidden_t, self.W_mu ) # self.intensity = self.soft_relu_scale( self.intensity_tilde ) # intensity computation is finished #
Example 25
Project: neurawkes Author: HMEIatJHU File: sequence_generators.py License: MIT License | 5 votes |
def compute_intensity_upper_bound(self, time_current): # compute the upper bound of intensity # at the current time time_recent = self.one_seq[-1]['time_since_start'] # delta = self.soft_relu( numpy.tensordot( self.hidden_t, self.W_delta, (0, 0) ) ) # hidden_with_time = numpy.exp( -delta * ( time_current - time_recent ) ) * self.hidden_t[:, None] # (self.dim_model, self.dim_process) # self.W_alpha (self.dim_model, self.dim_process) self.intensity_tilde_ub = numpy.sum( self.hard_relu( self.W_alpha * hidden_with_time ), axis = 0 ) + numpy.dot( self.hidden_t, self.W_mu ) # this part is time-invariant so # we do not need to take its hard_relu #self.hard_relu( # numpy.dot( # self.hidden_t, self.W_mu # ) #) # self.intensity_ub = self.soft_relu_scale( self.intensity_tilde_ub ) # intensity computation is finished # #
Example 26
Project: tensorflow_constrained_optimization Author: google-research File: candidates_test.py License: Apache License 2.0 | 5 votes |
def test_best_distribution(self): """Distribution should match known solution.""" distribution = candidates.find_best_candidate_distribution( self._objective_vector, self._constraints_matrix) # Verify that the solution is a probability distribution. self.assertTrue(np.all(distribution >= -1e-6)) self.assertAlmostEqual(np.sum(distribution), 1.0) # Verify that the solution satisfies the constraints. maximum_constraint_violation = np.amax( np.tensordot(self._constraints_matrix, distribution, axes=(0, 0))) self.assertLessEqual(maximum_constraint_violation, 1e-6) # Verify that the solution matches that which we expect. expected_distribution = np.array([0.37872711, 0.62127289, 0, 0]) self.assertAllClose(expected_distribution, distribution, rtol=0, atol=1e-6)
Example 27
Project: recruit Author: Frank-qlu File: test_linalg.py License: Apache License 2.0 | 5 votes |
def test_tensorinv_result(self): # mimic a docstring example a = np.eye(24) a.shape = (24, 8, 3) ainv = linalg.tensorinv(a, ind=1) b = np.ones(24) assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
Example 28
Project: recruit Author: Frank-qlu File: test_einsum.py License: Apache License 2.0 | 5 votes |
def test_einsum_fixedstridebug(self): # Issue #4485 obscure einsum bug # This case revealed a bug in nditer where it reported a stride # as 'fixed' (0) when it was in fact not fixed during processing # (0 or 4). The reason for the bug was that the check for a fixed # stride was using the information from the 2D inner loop reuse # to restrict the iteration dimensions it had to validate to be # the same, but that 2D inner loop reuse logic is only triggered # during the buffer copying step, and hence it was invalid to # rely on those values. The fix is to check all the dimensions # of the stride in question, which in the test case reveals that # the stride is not fixed. # # NOTE: This test is triggered by the fact that the default buffersize, # used by einsum, is 8192, and 3*2731 = 8193, is larger than that # and results in a mismatch between the buffering and the # striding for operand A. A = np.arange(2 * 3).reshape(2, 3).astype(np.float32) B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16) es = np.einsum('cl, cpx->lpx', A, B) tp = np.tensordot(A, B, axes=(0, 0)) assert_equal(es, tp) # The following is the original test case from the bug report, # made repeatable by changing random arrays to aranges. A = np.arange(3 * 3).reshape(3, 3).astype(np.float64) B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32) es = np.einsum('cl, cpxy->lpxy', A, B) tp = np.tensordot(A, B, axes=(0, 0)) assert_equal(es, tp)
Example 29
Project: recruit Author: Frank-qlu File: test_numeric.py License: Apache License 2.0 | 5 votes |
def test_zero_dimension(self): # Test resolution to issue #5663 a = np.ndarray((3,0)) b = np.ndarray((0,4)) td = np.tensordot(a, b, (1, 0)) assert_array_equal(td, np.dot(a, b)) assert_array_equal(td, np.einsum('ij,jk', a, b))
Example 30
Project: pytim Author: Marcello-Sega File: local_frame.py License: GNU General Public License v3.0 | 5 votes |
def _compute_local_coords(self, box): local_coords = [] for i, patch in enumerate(self.patches): p = self.tree.data[patch].copy() p = self.remove_pbc(p, box) cm = np.mean(p, axis=0) try: refp = self.reftree.data[self.refpatches[i]].copy() refp = self.remove_pbc(refp, box) # note that refp includes also surface atoms. # here we try to remove their contribution. if (len(refp) - len(p)): refcm = (np.sum(refp, axis=0) - np.sum(p, axis=0) ) / (len(refp) - len(p)) else: refcm = np.mean(refp) dcm = cm - refcm except AttributeError: dcm = np.zeros(3) dr = (p - cm) # compute the 'inertia' tensor I = -np.tensordot(dr[:], dr[:], axes=(0, 0)) + \ np.diag([np.sum(dr**2) / len(dr)] * 3) U, S, V = np.linalg.svd(I) z = np.argmin(S) UU = np.roll(U, 2 - z, axis=-1).T # flip normal if cm on the same side of normal UU[2] *= ((np.dot(UU[2], dcm) <= 0) * 2. - 1.) local_coords.append(UU.T) return np.array(local_coords)