# Python numpy.einsum() Examples

The following are 30 code examples for showing how to use numpy.einsum(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module , or try the search function .

Example 1
def validate_and_fill_geometry(geom=None, tooclose=0.1, copy=True):
"""Check geom for overlapping atoms. Return flattened"""

npgeom = np.array(geom, copy=copy, dtype=np.float).reshape((-1, 3))

# Upper triangular
metric = tooclose ** 2
tooclose_inds = []
for x in range(npgeom.shape[0]):
diffs = npgeom[x] - npgeom[x + 1 :]
dists = np.einsum("ij,ij->i", diffs, diffs)

# Record issues
if np.any(dists < metric):
indices = np.where(dists < metric)[0]
tooclose_inds.extend([(x, y, dist) for y, dist in zip(indices + x + 1, dists[indices] ** 0.5)])

if tooclose_inds:
raise ValidationError(
"""Following atoms are too close: {}""".format([(i, j, dist) for i, j, dist in tooclose_inds])
)

return {"geom": npgeom.reshape((-1))} 
Example 2
def generate_inflate(cloth):
"""Blow it up baby!"""

tri_nor = cloth.normals #* cloth.ob.modeling_cloth_inflate # non-unit calculated by tri_normals_in_place() per each triangle
#tri_nor /= np.einsum("ij, ij->i", tri_nor, tri_nor)[:, nax]

shape = cloth.inflate.shape

cloth.inflate += tri_nor[:, nax] * cloth.ob.modeling_cloth_inflate# * cloth.tri_mix

cloth.inflate.shape = (shape[0] * 3, 3)
cloth.inflate *= cloth.tri_mix

cloth.inflate.shape = shape
cloth.inflate *= 0 
Example 3
def tda_denisty_matrix(td, state_id):
'''
Taking the TDA amplitudes as the CIS coefficients, calculate the density
matrix (in AO basis) of the excited states
'''
cis_t1 = td.xy[state_id][0]
dm_oo =-np.einsum('ia,ka->ik', cis_t1.conj(), cis_t1)
dm_vv = np.einsum('ia,ic->ac', cis_t1, cis_t1.conj())

# The ground state density matrix in mo_basis
mf = td._scf
dm = np.diag(mf.mo_occ)

nocc = cis_t1.shape[0]
dm[:nocc,:nocc] += dm_oo * 2
dm[nocc:,nocc:] += dm_vv * 2

# Transform density matrix to AO basis
mo = mf.mo_coeff
dm = np.einsum('pi,ij,qj->pq', mo, dm, mo.conj())
return dm

# Density matrix for the 3rd excited state 
Example 4
def ss(mol):
n = mol.nao_nr()
mat1 = mol.intor('int2e_ip1ip2_sph').reshape(3,3,n,n,n,n) # <nabla1 nabla2 | 1 2>
mat2 =-mat1.transpose(0,1,2,3,5,4) # <nabla1 2 | 1 nabla2>
mat3 =-mat2.transpose(1,0,3,2,4,5) # <1 nabla2 | nabla1 2>
mat4 = mat1.transpose(0,1,3,2,5,4) # <1 2 | nabla1 nabla2>
mat = mat1 - mat2 - mat3 + mat4
# Fermi contact term
h_fc = mol.intor('int4c1e').reshape(nao,nao,nao,nao) * (4*numpy.pi/3)
mat[0,0] -= h_fc
mat[1,1] -= h_fc
mat[2,2] -= h_fc

s = lib.PauliMatrices * .5
# wxyz are the spin indices, ijkl are the AO indicies
alpha = 137.036
fac = alpha ** 2 / 2
mat = numpy.einsum('swx,tyz,stijkl->wxyzijkl', s[:,0,0], s[:,0,0], mat) * fac
return mat 
Example 5
def force(dm):
# The interaction between QM atoms and MM particles
# \sum_K d/dR (1/|r_K-R|) = \sum_K (r_K-R)/|r_K-R|^3
qm_coords = mol.atom_coords()
qm_charges = mol.atom_charges()
dr = qm_coords[:,None,:] - coords
r = numpy.linalg.norm(dr, axis=2)
g = numpy.einsum('r,R,rRx,rR->Rx', qm_charges, charges, dr, r**-3)

# The interaction between electron density and MM particles
# d/dR <i| (1/|r-R|) |j> = <i| d/dR (1/|r-R|) |j> = <i| -d/dr (1/|r-R|) |j>
#   = <d/dr i| (1/|r-R|) |j> + <i| (1/|r-R|) |d/dr j>
for i, q in enumerate(charges):
with mol.with_rinv_origin(coords[i]):
v = mol.intor('int1e_iprinv')
f =(numpy.einsum('ij,xji->x', dm, v) +
numpy.einsum('ij,xij->x', dm, v.conj())) * -q
g[i] += f

# Force = -d/dR
return -g

# The force from HF electron density
# Be careful with the unit of the MM particle coordinates. The gradients are
# computed in the atomic unit. 
Example 6
def myump2(mol, mo_energy, mo_coeff, mo_occ):
o = numpy.hstack((mo_coeff[0][:,mo_occ[0]>0] ,mo_coeff[1][:,mo_occ[1]>0]))
v = numpy.hstack((mo_coeff[0][:,mo_occ[0]==0],mo_coeff[1][:,mo_occ[1]==0]))
eo = numpy.hstack((mo_energy[0][mo_occ[0]>0] ,mo_energy[1][mo_occ[1]>0]))
ev = numpy.hstack((mo_energy[0][mo_occ[0]==0],mo_energy[1][mo_occ[1]==0]))
no = o.shape[1]
nv = v.shape[1]
noa = sum(mo_occ[0]>0)
nva = sum(mo_occ[0]==0)
eri = ao2mo.general(mol, (o,v,o,v)).reshape(no,nv,no,nv)
eri[:noa,nva:] = eri[noa:,:nva] = eri[:,:,:noa,nva:] = eri[:,:,noa:,:nva] = 0
g = eri - eri.transpose(0,3,2,1)
eov = eo.reshape(-1,1) - ev.reshape(-1)
de = 1/(eov.reshape(-1,1) + eov.reshape(-1)).reshape(g.shape)
emp2 = .25 * numpy.einsum('iajb,iajb,iajb->', g, g, de)
return emp2 
Example 7
def hcore_grad_generator(x2cobj, mol=None):
'''nuclear gradients of 1-component X2c hcore Hamiltonian  (spin-free part only)
'''
if mol is None: mol = x2cobj.mol
xmol, contr_coeff = x2cobj.get_xmol(mol)

if x2cobj.basis is not None:
s22 = xmol.intor_symmetric('int1e_ovlp')
s21 = gto.intor_cross('int1e_ovlp', xmol, mol)
contr_coeff = lib.cho_solve(s22, s21)

get_h1_xmol = gen_sf_hfw(xmol, x2cobj.approx)
def hcore_deriv(atm_id):
h1 = get_h1_xmol(atm_id)
if contr_coeff is not None:
h1 = lib.einsum('pi,xpq,qj->xij', contr_coeff, h1, contr_coeff)
return numpy.asarray(h1)
return hcore_deriv 
Example 8
def _invsqrt2(a0, a1i, a1j, a2ij):
'''Solving first order derivative of x^2 = a^{-1}'''
w, v = scipy.linalg.eigh(a0)
w = 1./numpy.sqrt(w)
a1i = reduce(numpy.dot, (v.conj().T, a1i, v))
x1i = numpy.einsum('i,ij,j->ij', w**2, a1i, w**2) / (w[:,None] + w)

a1j = reduce(numpy.dot, (v.conj().T, a1j, v))
x1j = numpy.einsum('i,ij,j->ij', w**2, a1j, w**2) / (w[:,None] + w)

a2ij = reduce(numpy.dot, (v.conj().T, a2ij, v))
tmp = (a1i*w**2).dot(a1j)
a2ij -= tmp + tmp.conj().T
a2ij = -numpy.einsum('i,ij,j->ij', w**2, a2ij, w**2)
tmp = x1i.dot(x1j)
a2ij -= tmp + tmp.conj().T
x2 = a2ij / (w[:,None] + w)
x2 = reduce(numpy.dot, (v, x2, v.conj().T))
return x2 
Example 9
def get_x1(mol, ia):
h0, s0 = get_h0_s0(mol)
h1, s1 = get_h1_s1(mol, ia)
e0, c0 = scipy.linalg.eigh(h0, s0)
nao = mol.nao_nr()
cl0 = c0[:nao,nao:]
cs0 = c0[nao:,nao:]
x0 = scipy.linalg.solve(cl0.T, cs0.T).T
h1 = numpy.einsum('pi,xpq,qj->xij', c0.conj(), h1, c0[:,nao:])
s1 = numpy.einsum('pi,xpq,qj->xij', c0.conj(), s1, c0[:,nao:])
epi = e0[:,None] - e0[nao:]
c1 = (h1 - s1 * e0[nao:]) / -epi
c1 = numpy.einsum('pq,xqi->xpi', c0, c1)
cl1 = c1[:,:nao]
cs1 = c1[:,nao:]
x1 = [scipy.linalg.solve(cl0.T, (cs1[i] - x0.dot(cl1[i])).T).T
for i in range(3)]
return numpy.asarray(x1) 
Example 10
def hcore_hess_generator(x2cobj, mol=None):
'''nuclear gradients of 1-component X2c hcore Hamiltonian  (spin-free part only)
'''
if mol is None: mol = x2cobj.mol
xmol, contr_coeff = x2cobj.get_xmol(mol)

if x2cobj.basis is not None:
s22 = xmol.intor_symmetric('int1e_ovlp')
s21 = gto.intor_cross('int1e_ovlp', xmol, mol)
contr_coeff = lib.cho_solve(s22, s21)

get_h1_xmol = gen_sf_hfw(xmol, x2cobj.approx)
def hcore_deriv(ia, ja):
h1 = get_h1_xmol(ia, ja)
if contr_coeff is not None:
h1 = lib.einsum('pi,xypq,qj->xyij', contr_coeff, h1, contr_coeff)
return numpy.asarray(h1)
return hcore_deriv 
Example 11
def normalize_dm_(mf, dm):
'''
Scale density matrix to make it produce the correct number of electrons.
'''
cell = mf.cell
if isinstance(dm, np.ndarray) and dm.ndim == 2:
ne = np.einsum('ij,ji->', dm, mf.get_ovlp(cell)).real
else:
ne = np.einsum('xij,ji->', dm, mf.get_ovlp(cell)).real
if abs(ne - cell.nelectron).sum() > 1e-7:
logger.debug(mf, 'Big error detected in the electron number '
'of initial guess density matrix (Ne/cell = %g)!\n'
'  This can cause huge error in Fock matrix and '
'lead to instability in SCF for low-dimensional '
'systems.\n  DM is normalized wrt the number '
'of electrons %s', ne, cell.nelectron)
dm *= cell.nelectron / ne
return dm 
Example 12
def spin_square(self, mo_coeff=None, s=None):
'''Treating the k-point sampling wfn as a giant Slater determinant,
the spin_square value is the <S^2> of the giant determinant.
'''
nkpts = len(self.kpts)
if mo_coeff is None:
mo_a = [self.mo_coeff[0][k][:,self.mo_occ[0][k]>0] for k in range(nkpts)]
mo_b = [self.mo_coeff[1][k][:,self.mo_occ[1][k]>0] for k in range(nkpts)]
else:
mo_a, mo_b = mo_coeff
if s is None:
s = self.get_ovlp()

nelec_a = sum([mo_a[k].shape[1] for k in range(nkpts)])
nelec_b = sum([mo_b[k].shape[1] for k in range(nkpts)])
ssxy = (nelec_a + nelec_b) * .5
for k in range(nkpts):
sij = reduce(np.dot, (mo_a[k].T.conj(), s[k], mo_b[k]))
ssxy -= np.einsum('ij,ij->', sij.conj(), sij).real
ssz = (nelec_b-nelec_a)**2 * .25
ss = ssxy + ssz
s = np.sqrt(ss+.25) - .5
return ss, s*2+1 
Example 13
def energy_elec(mf, dm_kpts=None, h1e_kpts=None, vhf_kpts=None):
'''Following pyscf.scf.hf.energy_elec()
'''
if dm_kpts is None: dm_kpts = mf.make_rdm1()
if h1e_kpts is None: h1e_kpts = mf.get_hcore()
if vhf_kpts is None: vhf_kpts = mf.get_veff(mf.cell, dm_kpts)

nkpts = len(dm_kpts)
e1 = 1./nkpts * np.einsum('kij,kji', dm_kpts, h1e_kpts)
e_coul = 1./nkpts * np.einsum('kij,kji', dm_kpts, vhf_kpts) * 0.5
mf.scf_summary['e1'] = e1.real
mf.scf_summary['e2'] = e_coul.real
logger.debug(mf, 'E1 = %s  E_coul = %s', e1, e_coul)
if CHECK_COULOMB_IMAG and abs(e_coul.imag > mf.cell.precision*10):
logger.warn(mf, "Coulomb energy has imaginary part %s. "
"Coulomb integrals (e-e, e-N) may not converge !",
e_coul.imag)
return (e1+e_coul).real, e_coul.real 
Example 14
def _gamma1_intermediates(mp, t2=None):
# Memory optimization should be here
if t2 is None:
t2 = mp.t2
if t2 is None:
raise NotImplementedError("Run kmp2.kernel with with_t2=True")
nmo = mp.nmo
nocc = mp.nocc
nvir = nmo - nocc
nkpts = mp.nkpts
dtype = t2.dtype

dm1occ = np.zeros((nkpts, nocc, nocc), dtype=dtype)
dm1vir = np.zeros((nkpts, nvir, nvir), dtype=dtype)

for ki in range(nkpts):
for kj in range(nkpts):
for ka in range(nkpts):
kb = mp.khelper.kconserv[ki, ka, kj]

dm1vir[kb] += einsum('ijax,ijay->yx', t2[ki][kj][ka].conj(), t2[ki][kj][ka]) * 2 -\
einsum('ijax,ijya->yx', t2[ki][kj][ka].conj(), t2[ki][kj][kb])
dm1occ[kj] += einsum('ixab,iyab->xy', t2[ki][kj][ka].conj(), t2[ki][kj][ka]) * 2 -\
einsum('ixab,iyba->xy', t2[ki][kj][ka].conj(), t2[ki][kj][kb])
return -dm1occ, dm1vir 
Example 15
def make_h3_2_5() -> Tuple[RestrictedHartreeFockObjective, of.MolecularData, np.
ndarray, np.ndarray, np.ndarray]:
# load the molecule from moelcular data
h3_2_5_path = os.path.join(
hfvqe.__path__[0],
'molecular_data/hydrogen_chains/h_3_p_sto-3g/bond_distance_2.5')

molfile = os.path.join(h3_2_5_path,
'H3_plus_sto-3g_singlet_linear_r-2.5.hdf5')
molecule = of.MolecularData(filename=molfile)

_, X = sp.linalg.eigh(Hcore, S)
obi = of.general_basis_change(Hcore, X, (1, 0))
tbi = np.einsum('psqr', of.general_basis_change(TEI, X, (1, 0, 1, 0)))
molecular_hamiltonian = generate_hamiltonian(obi, tbi,
molecule.nuclear_repulsion)

rhf_objective = RestrictedHartreeFockObjective(molecular_hamiltonian,
molecule.n_electrons)

scipy_result = rhf_minimization(rhf_objective)
return rhf_objective, molecule, scipy_result.x, obi, tbi 
Example 16
def make_h6_1_3() -> Tuple[RestrictedHartreeFockObjective,
of.MolecularData,
np.ndarray,
np.ndarray,
np.ndarray]:
# load the molecule from moelcular data
import openfermioncirq.experiments.hfvqe as hfvqe
h6_1_3_path = os.path.join(
hfvqe.__path__[0],
'molecular_data/hydrogen_chains/h_6_sto-3g/bond_distance_1.3')

molfile = os.path.join(h6_1_3_path, 'H6_sto-3g_singlet_linear_r-1.3.hdf5')
molecule = of.MolecularData(filename=molfile)

_, X = sp.linalg.eigh(Hcore, S)
obi = of.general_basis_change(Hcore, X, (1, 0))
tbi = np.einsum('psqr', of.general_basis_change(TEI, X, (1, 0, 1, 0)))
molecular_hamiltonian = generate_hamiltonian(obi, tbi,
molecule.nuclear_repulsion)

rhf_objective = RestrictedHartreeFockObjective(molecular_hamiltonian,
molecule.n_electrons)

scipy_result = rhf_minimization(rhf_objective)

return rhf_objective, molecule, scipy_result.x, obi, tbi 
Example 17
def make_rhf_objective(molecule: of.MolecularData):
S, Hcore, TEI = get_ao_integrals(molecule)
_, X = sp.linalg.eigh(Hcore, S)

obi = of.general_basis_change(Hcore, X, (1, 0))
tbi = np.einsum('psqr', of.general_basis_change(TEI, X, (1, 0, 1, 0)))
molecular_hamiltonian = generate_hamiltonian(obi, tbi,
molecule.nuclear_repulsion)

rhf_objective = RestrictedHartreeFockObjective(molecular_hamiltonian,
molecule.n_electrons)
return rhf_objective, S, Hcore, TEI, obi, tbi 
Example 18
def make_rhf_objective(molecule):
# coverage: ignore
S, Hcore, TEI = get_ao_integrals(molecule)
evals, X = scipy.linalg.eigh(Hcore, S)

molecular_hamiltonian = generate_hamiltonian(
general_basis_change(Hcore, X, (1, 0)),
numpy.einsum('psqr', general_basis_change(TEI, X, (1, 0, 1, 0)),
molecule.nuclear_repulsion)
)

rhf_objective = RestrictedHartreeFockObjective(molecular_hamiltonian,
molecule.n_electrons)
return rhf_objective, S, Hcore, TEI 
Example 19
def _norm(points) -> float:
"""
Return the Frobenius norm across axis=-1, NumPy's internal norm is crazy slow (~4x)
"""

tmp = np.atleast_2d(points)
return np.sqrt(np.einsum("ij,ij->i", tmp, tmp)) 
Example 20
def compute_angle(points1, points2, points3, *, degrees: bool = False) -> np.ndarray:
"""
Computes the angle (p1, p2 [vertex], p3) between the provided points on a per-row basis.

Parameters
----------
points1 : np.ndarray
The first list of points, can be 1D or 2D
points2 : np.ndarray
The second list of points, can be 1D or 2D
points3 : np.ndarray
The third list of points, can be 1D or 2D
degrees : bool, options
Returns the angle in degrees rather than radians if True

Returns
-------
angles : np.ndarray
The angle between the three points in radians

Notes
-----
Units are not considered inside these expressions, please preconvert to the same units before using.
"""

points1 = np.atleast_2d(points1)
points2 = np.atleast_2d(points2)
points3 = np.atleast_2d(points3)

v12 = points1 - points2
v23 = points2 - points3

denom = _norm(v12) * _norm(v23)
cosine_angle = np.einsum("ij,ij->i", v12, v23) / denom

angle = np.pi - np.arccos(cosine_angle)

if degrees:
return np.degrees(angle)
else:
return angle 
Example 21
def _crit_active(self, X, pred, grad_crit):
for choice in range(self.nchoices):

and ("coef_" not in dir(self._oracles.algos[choice]))
):
self._oracles.get_n_pos(choice),
self._oracles.get_n_neg(choice),
self._oracles.rng_arm[choice])
else:
X, pred[:, choice])
else:
self._oracles.get_n_pos(choice),
self._oracles.get_n_neg(choice),
self._oracles.rng_arm[choice])

pred[:, choice] = grad_norms.min(axis = 1)
pred[:, choice] = grad_norms.max(axis = 1)
pred[:, choice] = np.einsum("i,ij->i", pred[:, choice], grad_norms)
else:
raise ValueError("Something went wrong. Please open an issue in GitHub indicating what you were doing.")
return pred 
Example 22
def _logistic_grad_norm(X, y, pred, base_algorithm):
coef = base_algorithm.coef_.reshape(-1)[:X.shape[1]]
err = pred - y

if issparse(X):
if not isspmatrix_csr(X):
warnings.warn("Sparse matrix will be cast to CSR format.")
X = csr_matrix(X)
else:
grad_norm = X * err.reshape((-1, 1))

### Note: since this is done on a row-by-row basis on two classes only,
### it doesn't matter whether the loss function is summed or averaged over
### data points, or whether there is regularization or not.

## coefficients
else:

## intercept
if base_algorithm.fit_intercept:

return grad_norm 
Example 23
def _score_rnd(self, X):
if not self.ts_byrow:
chosen_sample = self.random_state.integers(self.nsamples)
return self._get_score(chosen_sample, X)
else:
pred = self._pred_by_sample(X)
if not self.ts_weighted:
return pred[np.arange(X.shape[0]),
self.random_state.integers(self.nsamples, size=X.shape[0])]
else:
w = self.random_state.random(size = (X.shape[0], self.nsamples))
w[:] /= w.sum(axis=0, keepdims=True)
return np.einsum("ij,ij->i", w, pred) 
Example 24
def tri_normals_in_place(object, tri_co):
"""Takes N x 3 x 3 set of 3d triangles and
returns non-unit normals and origins"""
object.origins = tri_co[:,0]
object.cross_vecs = tri_co[:,1:] - object.origins[:, nax]
object.normals = np.cross(object.cross_vecs[:,0], object.cross_vecs[:,1])
object.nor_dots = np.einsum("ij, ij->i", object.normals, object.normals)
object.normals /= np.sqrt(object.nor_dots)[:, nax] 
Example 25
def inside_triangles(tri_vecs, v2, co, tri_co_2, cidx, tidx, nor, ori, in_margin, offset=None):
idxer = np.arange(in_margin.shape[0], dtype=np.int32)[in_margin]

r_co = co[cidx[in_margin]]
r_tri = tri_co_2[tidx[in_margin]]

v0 = tri_vecs[:,0]
v1 = tri_vecs[:,1]

d00_d11 = np.einsum('ijk,ijk->ij', tri_vecs, tri_vecs)
d00 = d00_d11[:,0]
d11 = d00_d11[:,1]
d01 = np.einsum('ij,ij->i', v0, v1)
d02 = np.einsum('ij,ij->i', v0, v2)
d12 = np.einsum('ij,ij->i', v1, v2)

div = 1 / (d00 * d11 - d01 * d01)
u = (d11 * d02 - d01 * d12) * div
v = (d00 * d12 - d01 * d02) * div

# !!! Watch out for this number. It could affect speed !!!
if offset:
check = (u > -offset) & (v > -offset) & (u + v < offset + 1)
else:
check = (u > 0) & (v > 0) & (u + v < 1)
in_margin[idxer] = check 
Example 26
def total_length(ed=None, coords=None, ob=None):
'''Returns the total length of all edge segments'''
if ob is None:
ob = bpy.context.object
if coords is None:
coords = get_coords(ob)
if ed is None:
ed = get_edge_idx(ob)
edc = coords[ed]
e1 = edc[:, 0]
e2 = edc[:, 1]
ee = e1 - e2
leng = np.einsum('ij,ij->i', ee, ee)
return np.sum(np.sqrt(leng)) 
Example 27
def barycentric_generate(hits, tris):
'''Create scalars to be used by points and triangles'''
# where the hit lands on the two tri vecs
tv = tris[:, 1] - tris[:, 0]
hv = hits - tris[:, 0]
d1a = np.einsum('ij, ij->i', hv, tv)
d1b = np.einsum('ij, ij->i', tv, tv)
scalar1 = np.nan_to_num(d1a / d1b)

t2v = tris[:, 2] - tris[:, 0]
d2a = np.einsum('ij, ij->i', hv, t2v)
d2b = np.einsum('ij, ij->i', t2v, t2v)
scalar2 = np.nan_to_num(d2a / d2b)

# closest point on edge segment between the two points created above
cp1 = tv * np.expand_dims(scalar1, axis=1)
cp2 = t2v * np.expand_dims(scalar2, axis=1)
cpvec = cp2 - cp1
cp1_at = tris[:,0] + cp1
hcp = hits - cp1_at # this is cp3 above. Not sure what's it's for yet
dhcp = np.einsum('ij, ij->i', hcp, cpvec)
d3b = np.einsum('ij, ij->i', cpvec, cpvec)
hcp_scalar = np.nan_to_num(dhcp / d3b)
hcp_vec = cpvec * np.expand_dims(hcp_scalar, axis=1)

# base of tri on edge between first two points
d3 = np.einsum('ij, ij->i', -cp1, cpvec)
scalar3 = np.nan_to_num(d3 / d3b)
base_cp_vec = cpvec * np.expand_dims(scalar3, axis=1)
base_on_span = cp1_at + base_cp_vec

# Where the point occurs on the edge between the base of the triangle
#   and the cpoe of the base of the triangle on the cpvec
base_vec = base_on_span - tris[:,0]
dba = np.einsum('ij, ij->i', hv, base_vec)
dbb = np.einsum('ij, ij->i', base_vec, base_vec)
scalar_final = np.nan_to_num(dba / dbb)
p_on_bv = base_vec * np.expand_dims(scalar_final, axis=1)
perp = (p_on_bv) - (cp1 + base_cp_vec)
return scalar1, scalar2, hcp_scalar, scalar3, scalar_final 
Example 28
def barycentric_remap_multi(tris, sc1, sc2, sc3, sc4, sc5, scale):
'''Uses the scalars generated by barycentric_generate() to remap the points
on the triangles and off the surface as the surface mesh changes'''
# where the hit lands on the two tri vecs
tv = tris[:, 1] - tris[:, 0]
t2v = tris[:, 2] - tris[:, 0]

# closest point on edge segment between the two points created above
cp1 = tv * np.expand_dims(sc1, axis=1)
cp2 = t2v * np.expand_dims(sc2, axis=1)
cpvec = cp2 - cp1
cp1_at = tris[:,0] + cp1
hcp_vec = cpvec * np.expand_dims(sc3, axis=1)

# base of tri on edge between first two points
base_cp_vec = cpvec * np.expand_dims(sc4, axis=1)
base_on_span = cp1_at + base_cp_vec

# Where the point occurs on the edge between the base of the triangle
#   and the cpoe of the base of the triangle on the cpvec
base_vec = base_on_span - tris[:,0]
p_on_bv = base_vec * np.expand_dims(sc5, axis=1)
perp = (p_on_bv) - (cp1 + base_cp_vec)

# get the average length of the two vectors and apply it to the cross product
cross = np.cross(tv, t2v)
sq = np.sqrt(np.einsum('ij,ij->i', cross, cross))
x1 = np.einsum('ij,ij->i', tv, tv)
x2 = np.einsum('ij,ij->i', t2v, t2v)
av_root = np.sqrt((x1 + x2) / 2)
cr_root = (cross / np.expand_dims(sq, axis=1)) * np.expand_dims(av_root * scale, axis=1)

return tris[:,0] + cp1 + hcp_vec + perp + cr_root 
Example 29
def project_points(points, tri_coords):
'''Using this to get the points off the surface
Takes the average length of two vecs off triangles
and applies it to the length of the normals.
This way the normal scales with the mesh and with
changes to the individual triangle vectors'''
t0 = tri_coords[:, 0]
t1 = tri_coords[:, 1]
t2 = tri_coords[:, 2]
tv1 = t1 - t0
tv2 = t2 - t0
cross = np.cross(tv1, tv2)

# get the average length of the two vectors and apply it to the cross product
sq = np.sqrt(np.einsum('ij,ij->i', cross, cross))
x1 = np.einsum('ij,ij->i', tv1, tv1)
x2 = np.einsum('ij,ij->i', tv2, tv2)
av_root = np.sqrt((x1 + x2) / 2)
cr_root = (cross / np.expand_dims(sq, axis=1)) * np.expand_dims(av_root, axis=1)

v1 = points - t0
v1_dots = np.einsum('ij,ij->i', cr_root, v1)
n_dots = np.einsum('ij,ij->i', cr_root, cr_root)
scale = np.nan_to_num(v1_dots / n_dots)
offset = cr_root * np.expand_dims(scale, axis=1)
drop = points - offset # The drop is used by the barycentric generator as points in the triangles
return drop, scale 
Example 30
def nearest_triangles(surface_coords, follower_coords, tris):
return sorts