Python numpy.tensordot() Examples

The following are 30 code examples of numpy.tensordot(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: d_dmrg.py    From tenpy with GNU General Public License v3.0 6 votes vote down vote up
def update_bond(self, i):
        j = (i + 1) % self.psi.L
        # get effective Hamiltonian
        Heff = SimpleHeff(self.LPs[i], self.RPs[j], self.H_mpo[i], self.H_mpo[j])
        # Diagonalize Heff, find ground state `theta`
        theta0 = np.reshape(self.psi.get_theta2(i), [Heff.shape[0]])  # initial guess
        e, v = arp.eigsh(Heff, k=1, which='SA', return_eigenvectors=True, v0=theta0)
        theta = np.reshape(v[:, 0], Heff.theta_shape)
        # split and truncate
        Ai, Sj, Bj = split_truncate_theta(theta, self.chi_max, self.eps)
        # put back into MPS
        Gi = np.tensordot(np.diag(self.psi.Ss[i]**(-1)), Ai, axes=[1, 0])  # vL [vL*], [vL] i vC
        self.psi.Bs[i] = np.tensordot(Gi, np.diag(Sj), axes=[2, 0])  # vL i [vC], [vC*] vC
        self.psi.Ss[j] = Sj  # vC
        self.psi.Bs[j] = Bj  # vC j vR
        self.update_LP(i)
        self.update_RP(j) 
Example #2
Source File: spamvec.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def deriv_wrt_params(self, wrtFilter=None):
        """
        Construct a matrix whose columns are the derivatives of the SPAM vector
        with respect to a single param.  Thus, each column is of length
        get_dimension and there is one column per SPAM vector parameter.

        Returns
        -------
        numpy array
            Array of derivatives, shape == (dimension, num_params)
        """
        dmVec = self.state_vec.todense()

        derrgen = self.error_map.deriv_wrt_params(wrtFilter)  # shape (dim*dim, nParams)
        derrgen.shape = (self.dim, self.dim, derrgen.shape[1])  # => (dim,dim,nParams)

        if self._prep_or_effect == "prep":
            #derror map acts on dmVec
            #return _np.einsum("ijk,j->ik", derrgen, dmVec) # return shape = (dim,nParams)
            return _np.tensordot(derrgen, dmVec, (1, 0))  # return shape = (dim,nParams)
        else:
            # self.error_map acts on the *state* vector before dmVec acts
            # as an effect:  E.dag -> dot(E.dag,errmap) ==> E -> dot(errmap.dag,E)
            #return _np.einsum("jik,j->ik", derrgen.conjugate(), dmVec) # return shape = (dim,nParams)
            return _np.tensordot(derrgen.conjugate(), dmVec, (0, 0))  # return shape = (dim,nParams) 
Example #3
Source File: test_0004_vna.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def test_vna_lih(self):
    dname = dirname(abspath(__file__))
    n = nao(label='lih', cd=dname)
    m = 200
    dvec,midv = 2*(n.atom2coord[1] - n.atom2coord[0])/m,  (n.atom2coord[1] + n.atom2coord[0])/2.0
    vgrid = np.tensordot(np.array(range(-m,m+1)), dvec, axes=0) + midv
    sgrid = np.array(range(-m,m+1)) * np.sqrt((dvec*dvec).sum())
    
    
    #vgrid = np.array([[-1.517908564663352e+00, 1.180550033093826e+00,0.000000000000000e+00]])
    vna = n.vna(vgrid)
    
    #for v,r in zip(vna,vgrid):
    #  print("%23.15e %23.15e %23.15e %23.15e"%(r[0], r[1], r[2], v))
    
    #print(vna.shape, sgrid.shape)
    np.savetxt('vna_lih_0004.txt', np.row_stack((sgrid, vna)).T)
    ref = np.loadtxt(dname+'/vna_lih_0004.txt-ref')
    for r,d in zip(ref[:,1],vna): self.assertAlmostEqual(r,d) 
Example #4
Source File: ops.py    From strawberryfields with Apache License 2.0 6 votes vote down vote up
def tensor(u, v, n, pure, pos=None):
    """
    Returns the tensor product of `u` and `v`, optionally spliced into a
    at location `pos`.
    """

    w = np.tensordot(u, v, axes=0)

    if pos is not None:
        if pure:
            scale = 1
        else:
            scale = 2
        for i in range(v.ndim):
            w = np.rollaxis(w, scale * n + i, scale * pos + i)

    return w 
Example #5
Source File: program_functions.py    From strawberryfields with Apache License 2.0 6 votes vote down vote up
def _interleaved_identities(n: int, cutoff_dim: int):
    r"""Maximally entangled state of `n` modes.

    Returns the tensor :math:`\sum_{abc\ldots} \ket{abc\ldots}\bra{abc\ldots}`
    representing an unnormalized, maximally entangled state of `n` subsystems.

    Args:
        n (int): number of subsystems
        cutoff_dim (int): Fock basis truncation dimension

    Returns:
        array: unnormalized maximally entangled state, shape == (cutoff_dim,) * (2*n)
    """
    I = np.identity(cutoff_dim)
    temp = I
    for _ in range(1, n):
        temp = np.tensordot(temp, I, axes=0)

    # use einsum to permute the indices such that |a><a|*|b><b|*|c><c|*... becomes |abc...><abc...|
    sublist = [int(n) for n in np.arange(2 * n).reshape((2, n)).T.reshape([-1])]
    return np.einsum(temp, sublist) 
Example #6
Source File: proxy_lagrangian_optimizer_test.py    From tensorflow_constrained_optimization with Apache License 2.0 6 votes vote down vote up
def test_maximum_eigenvector_power_method(self):
    """Tests power method routine on some known left-stochastic matrices."""
    matrix1 = np.matrix([[0.6, 0.1, 0.1], [0.0, 0.6, 0.9], [0.4, 0.3, 0.0]])
    matrix2 = np.matrix([[0.4, 0.4, 0.2], [0.2, 0.1, 0.5], [0.4, 0.5, 0.3]])

    with self.wrapped_session() as session:
      eigenvector1 = session.run(
          proxy_lagrangian_optimizer._maximal_eigenvector_power_method(
              tf.constant(matrix1)))
      eigenvector2 = session.run(
          proxy_lagrangian_optimizer._maximal_eigenvector_power_method(
              tf.constant(matrix2)))

    # Check that eigenvector1 and eigenvector2 are eigenvectors of matrix1 and
    # matrix2 (respectively) with associated eigenvalue 1.
    matrix_eigenvector1 = np.tensordot(matrix1, eigenvector1, axes=1)
    matrix_eigenvector2 = np.tensordot(matrix2, eigenvector2, axes=1)
    self.assertAllClose(eigenvector1, matrix_eigenvector1, rtol=0, atol=1e-6)
    self.assertAllClose(eigenvector2, matrix_eigenvector2, rtol=0, atol=1e-6) 
Example #7
Source File: tensor.py    From discopy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, *dim):
        """
        >>> Id(1)
        Tensor(dom=Dim(1), cod=Dim(1), array=[1])
        >>> list(Id(2).array.flatten())
        [1.0, 0.0, 0.0, 1.0]
        >>> Id(2).array.shape
        (2, 2)
        >>> list(Id(2, 2).array.flatten())[:8]
        [1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]
        >>> list(Id(2, 2).array.flatten())[8:]
        [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]
        """
        dim = dim[0] if isinstance(dim[0], Dim) else Dim(*dim)
        array = functools.reduce(
            lambda a, x: np.tensordot(a, np.identity(x), 0)
            if a.shape else np.identity(x), dim, np.array(1))
        array = np.moveaxis(
            array, [2 * i for i in range(len(dim))], list(range(len(dim))))
        super().__init__(dim, dim, array) 
Example #8
Source File: a_mps.py    From tenpy with GNU General Public License v3.0 6 votes vote down vote up
def correlation_length(self):
        """Diagonalize transfer matrix to obtain the correlation length."""
        import scipy.sparse.linalg.eigen.arpack as arp
        assert self.bc == 'infinite'  # works only in the infinite case
        B = self.Bs[0]  # vL i vR
        chi = B.shape[0]
        T = np.tensordot(B, np.conj(B), axes=[1, 1])  # vL [i] vR, vL* [i*] vR*
        T = np.transpose(T, [0, 2, 1, 3])  # vL vL* vR vR*
        for i in range(1, self.L):
            B = self.Bs[i]
            T = np.tensordot(T, B, axes=[2, 0])  # vL vL* [vR] vR*, [vL] i vR
            T = np.tensordot(T, np.conj(B), axes=[[2, 3], [0, 1]])
            # vL vL* [vR*] [i] vR, [vL*] [i*] vR*
        T = np.reshape(T, (chi**2, chi**2))
        # Obtain the 2nd largest eigenvalue
        eta = arp.eigs(T, k=2, which='LM', return_eigenvectors=False, ncv=20)
        return -self.L / np.log(np.min(np.abs(eta))) 
Example #9
Source File: tdvp_numpy.py    From tenpy with GNU General Public License v3.0 6 votes vote down vote up
def middle_bond_hamiltonian(Jx, Jz, hx, hz, L):
    """" Returns the spin operators sigma_x and sigma_z for L sites."""
    sx = np.array([[0., 1.], [1., 0.]])
    sz = np.array([[1., 0.], [0., -1.]])

    H_bond = Jx * np.kron(sx, sx) + Jz * np.kron(sz, sz)
    H_bond = H_bond + hx / 2 * np.kron(sx, np.eye(2)) + hx / 2 * np.kron(np.eye(2), sx)
    H_bond = H_bond + hz / 2 * np.kron(sz, np.eye(2)) + hz / 2 * np.kron(np.eye(2), sz)
    H_bond = H_bond.reshape(2, 2, 2, 2).transpose(0, 2, 1, 3).reshape(4, 4)  #i1 i2 i1' i2' -->
    U, s, V = np.linalg.svd(H_bond)

    M1 = np.dot(U, np.diag(s)).reshape(2, 2, 1, 4).transpose(2, 3, 0, 1)
    M2 = V.reshape(4, 1, 2, 2)
    M0 = np.tensordot(np.tensordot([1], [1], axes=0), np.eye(2), axes=0)
    W = []

    for i in range(L):
        if i == L / 2 - 1:
            W.append(M1)
        elif i == L / 2:
            W.append(M2)
        else:
            W.append(M0)
    return W 
Example #10
Source File: test_np_conserved.py    From tenpy with GNU General Public License v3.0 6 votes vote down vote up
def test_npc_tensordot_extra():
    # check that the sorting of charges is fine with special test matrices
    # which gave me some headaches at some point :/
    chinfo = npc.ChargeInfo([1], ['Sz'])
    leg = npc.LegCharge.from_qflat(chinfo, [-1, 1])
    legs = [leg, leg, leg.conj(), leg.conj()]
    idx = [(0, 0, 0, 0), (0, 1, 0, 1), (0, 1, 1, 0), (1, 0, 0, 1), (1, 0, 1, 0), (1, 1, 1, 1)]
    Uflat = np.eye(4).reshape([2, 2, 2, 2])  # up to numerical rubbish the identity
    Uflat[0, 1, 1, 0] = Uflat[1, 0, 0, 1] = 1.e-20
    U = npc.Array.from_ndarray(Uflat, legs, cutoff=0.)
    theta_flat = np.zeros([2, 2, 2, 2])
    vals = np.random.random(len(idx))
    vals /= np.linalg.norm(vals)
    for i, val in zip(idx, vals):
        theta_flat[i] = val
    theta = npc.Array.from_ndarray(theta_flat, [leg, leg, leg.conj(), leg.conj()], cutoff=0.)
    assert abs(np.linalg.norm(theta_flat) - npc.norm(theta)) < 1.e-14
    Utheta_flat = np.tensordot(Uflat, theta_flat, axes=2)
    Utheta = npc.tensordot(U, theta, axes=2)
    npt.assert_array_almost_equal_nulp(Utheta.to_ndarray(), Utheta_flat, 10)
    assert abs(np.linalg.norm(theta_flat) - npc.norm(Utheta)) < 1.e-10 
Example #11
Source File: pcanet.py    From MNIST-baselines with MIT License 6 votes vote down vote up
def binary_to_decimal(X):
    """
    Parameters
    ----------
    X: xp.ndarray
        Feature maps
    """
    # This function expects X of shape (n_images, L2, y, x)
    # as an argument.
    # Let's say that X[k] (0 <= k < n_images) can be represented like
    # X[k] = [map_k[0], map_k[1], ..., map_k[L2-1]]
    # where the shape of each map_k is (y, x).
    # Then we calculate
    # a[0] * map_k[0] + a[1] * map_k[1] + ... + a[L2-1] * map_k[L2-1]
    # for each X[k], where a = [2^(L2-1), 2^(L2-2), ..., 2^0]
    # Therefore, the output shape must be (n_images, y, x)
    a = xp.arange(X.shape[1])[::-1]
    a = xp.power(2, a)
    return xp.tensordot(X, a, axes=([1], [0])) 
Example #12
Source File: test_np_conserved.py    From tenpy with GNU General Public License v3.0 6 votes vote down vote up
def test_npc_outer():
    for sort in [True, False]:
        print("sort =", sort)
        a = random_Array((6, 7), chinfo3, sort=sort)
        b = random_Array((5, 5), chinfo3, sort=sort)
        aflat = a.to_ndarray()
        bflat = b.to_ndarray()
        c = npc.outer(a, b)
        c.test_sanity()
        cflat = np.tensordot(aflat, bflat, axes=0)
        npt.assert_equal(c.to_ndarray(), cflat)
        c = npc.tensordot(a, b, axes=0)  # (should as well call npc.outer)
        npt.assert_equal(c.to_ndarray(), cflat)

    print("for trivial charge")
    a = npc.Array.from_func(np.random.random, [lcTr, lcTr.conj()], shape_kw='size')
    aflat = a.to_ndarray()
    b = npc.tensordot(a, a, axes=0)
    bflat = np.tensordot(aflat, aflat, axes=0)
    npt.assert_array_almost_equal_nulp(b.to_ndarray(), bflat, sum(a.shape)) 
Example #13
Source File: lax_numpy_test.py    From trax with Apache License 2.0 6 votes vote down vote up
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng_factory):
    rng = rng_factory()
    args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
    lnp_fun = lambda a, b: lnp.tensordot(a, b, axes)
    def onp_fun(a, b):
      a = a if lhs_dtype != lnp.bfloat16 else a.astype(onp.float32)
      b = b if rhs_dtype != lnp.bfloat16 else b.astype(onp.float32)
      dtype = lnp.promote_types(lhs_dtype, rhs_dtype)
      return onp.tensordot(a, b, axes).astype(dtype)
    tol = {onp.float16: 1e-1, onp.float32: 1e-3, onp.float64: 1e-12,
           onp.complex64: 1e-3, onp.complex128: 1e-12}
    if jtu.device_under_test() == "tpu":
      tol[onp.float32] = tol[onp.complex64] = 2e-1
    self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True,
                            tol=tol)
    self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True,
                          check_incomplete_shape=True) 
Example #14
Source File: smpl_np.py    From SMPL with MIT License 5 votes vote down vote up
def update(self):
    """
    Called automatically when parameters are updated.

    """
    # how beta affect body shape
    v_shaped = self.shapedirs.dot(self.beta) + self.v_template
    # joints location
    self.J = self.J_regressor.dot(v_shaped)
    pose_cube = self.pose.reshape((-1, 1, 3))
    # rotation matrix for each joint
    self.R = self.rodrigues(pose_cube)
    I_cube = np.broadcast_to(
      np.expand_dims(np.eye(3), axis=0),
      (self.R.shape[0]-1, 3, 3)
    )
    lrotmin = (self.R[1:] - I_cube).ravel()
    # how pose affect body shape in zero pose
    v_posed = v_shaped + self.posedirs.dot(lrotmin)
    # world transformation of each joint
    G = np.empty((self.kintree_table.shape[1], 4, 4))
    G[0] = self.with_zeros(np.hstack((self.R[0], self.J[0, :].reshape([3, 1]))))
    for i in range(1, self.kintree_table.shape[1]):
      G[i] = G[self.parent[i]].dot(
        self.with_zeros(
          np.hstack(
            [self.R[i],((self.J[i, :]-self.J[self.parent[i],:]).reshape([3,1]))]
          )
        )
      )
    G = G - self.pack(
      np.matmul(
        G,
        np.hstack([self.J, np.zeros([24, 1])]).reshape([24, 4, 1])
        )
      )
    # transformation of each vertex
    T = np.tensordot(self.weights, G, axes=[[1], [0]])
    rest_shape_h = np.hstack((v_posed, np.ones([v_posed.shape[0], 1])))
    v = np.matmul(T, rest_shape_h.reshape([-1, 4, 1])).reshape([-1, 4])[:, :3]
    self.verts = v + self.trans.reshape([1, 3]) 
Example #15
Source File: test_linalg.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_tensorinv_result(self):
        # mimic a docstring example
        a = np.eye(24)
        a.shape = (24, 8, 3)
        ainv = linalg.tensorinv(a, ind=1)
        b = np.ones(24)
        assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) 
Example #16
Source File: test_numeric.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_zero_dimension(self):
        # Test resolution to issue #5663
        a = np.ndarray((3,0))
        b = np.ndarray((0,4))
        td = np.tensordot(a, b, (1, 0))
        assert_array_equal(td, np.dot(a, b))
        assert_array_equal(td, np.einsum('ij,jk', a, b)) 
Example #17
Source File: test_einsum.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_einsum_fixedstridebug(self):
        # Issue #4485 obscure einsum bug
        # This case revealed a bug in nditer where it reported a stride
        # as 'fixed' (0) when it was in fact not fixed during processing
        # (0 or 4). The reason for the bug was that the check for a fixed
        # stride was using the information from the 2D inner loop reuse
        # to restrict the iteration dimensions it had to validate to be
        # the same, but that 2D inner loop reuse logic is only triggered
        # during the buffer copying step, and hence it was invalid to
        # rely on those values. The fix is to check all the dimensions
        # of the stride in question, which in the test case reveals that
        # the stride is not fixed.
        #
        # NOTE: This test is triggered by the fact that the default buffersize,
        #       used by einsum, is 8192, and 3*2731 = 8193, is larger than that
        #       and results in a mismatch between the buffering and the
        #       striding for operand A.
        A = np.arange(2 * 3).reshape(2, 3).astype(np.float32)
        B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16)
        es = np.einsum('cl, cpx->lpx',  A,  B)
        tp = np.tensordot(A,  B,  axes=(0,  0))
        assert_equal(es,  tp)
        # The following is the original test case from the bug report,
        # made repeatable by changing random arrays to aranges.
        A = np.arange(3 * 3).reshape(3, 3).astype(np.float64)
        B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32)
        es = np.einsum('cl, cpxy->lpxy',  A, B)
        tp = np.tensordot(A, B,  axes=(0, 0))
        assert_equal(es, tp) 
Example #18
Source File: test_tensor.py    From funsor with Apache License 2.0 5 votes vote down vote up
def test_tensor_tensordot(x_shape, xy_shape, y_shape):
    x = randn(x_shape + xy_shape)
    y = randn(xy_shape + y_shape)
    dim = len(xy_shape)
    actual = tensordot(Tensor(x), Tensor(y), dim)
    expected = Tensor(_numeric_tensordot(x, y, dim))
    assert_close(actual, expected, atol=1e-5, rtol=None) 
Example #19
Source File: _multivariate.py    From lambda-packs with MIT License 5 votes vote down vote up
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
                col_prec_rt, log_det_colcov):
        """
        Parameters
        ----------
        dims : tuple
            Dimensions of the matrix variates
        X : ndarray
            Points at which to evaluate the log of the probability
            density function
        mean : ndarray
            Mean of the distribution
        row_prec_rt : ndarray
            A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
            is the inverse of the among-row covariance matrix
        log_det_rowcov : float
            Logarithm of the determinant of the among-row covariance matrix
        col_prec_rt : ndarray
            A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
            is the inverse of the among-column covariance matrix
        log_det_colcov : float
            Logarithm of the determinant of the among-column covariance matrix

        Notes
        -----
        As this function does no argument checking, it should not be
        called directly; use 'logpdf' instead.

        """
        numrows, numcols = dims
        roll_dev = np.rollaxis(X-mean, axis=-1, start=0)
        scale_dev = np.tensordot(col_prec_rt.T,
                                 np.dot(roll_dev, row_prec_rt), 1)
        maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
        return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
                       + numrows*log_det_colcov + maha) 
Example #20
Source File: test_numeric.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_zero_dimension(self):
        # Test resolution to issue #5663
        a = np.ndarray((3,0))
        b = np.ndarray((0,4))
        td = np.tensordot(a, b, (1, 0))
        assert_array_equal(td, np.dot(a, b))
        assert_array_equal(td, np.einsum('ij,jk', a, b)) 
Example #21
Source File: impulses.py    From pyhawkes with MIT License 5 votes vote down vote up
def impulses(self):
        basis = self.model.basis.basis
        return np.tensordot(basis, self.g, axes=[1,2]) 
Example #22
Source File: a_mps.py    From tenpy with GNU General Public License v3.0 5 votes vote down vote up
def site_expectation_value(self, op):
        """Calculate expectation values of a local operator at each site."""
        result = []
        for i in range(self.L):
            theta = self.get_theta1(i)  # vL i vR
            op_theta = np.tensordot(op, theta, axes=[1, 1])  # i [i*], vL [i] vR
            result.append(np.tensordot(theta.conj(), op_theta, [[0, 1, 2], [1, 0, 2]]))
            # [vL*] [i*] [vR*], [i] [vL] [vR]
        return np.real_if_close(result) 
Example #23
Source File: a_mps.py    From tenpy with GNU General Public License v3.0 5 votes vote down vote up
def get_theta2(self, i):
        """Calculate effective two-site wave function on sites i,j=(i+1) in mixed canonical form.

        The returned array has legs ``vL, i, j, vR``.
        """
        j = (i + 1) % self.L
        return np.tensordot(self.get_theta1(i), self.Bs[j], [2, 0])  # vL i [vR], [vL] j vR 
Example #24
Source File: servoing_policy.py    From visual_dynamics with MIT License 5 votes vote down vote up
def _get_pi_var(self):
        w_var, lambda_var = self.param_vars
        A_split_var, b_split_var, _ = self._get_A_b_c_split_vars()

        A_var = T.tensordot(A_split_var, w_var / self.repeats, axes=(0, 0)) + T.diag(lambda_var)
        B_var = T.tensordot(b_split_var, w_var / self.repeats, axes=(0, 0))
        pi_var = T.batched_tensordot(T.nlinalg.matrix_inverse(A_var), B_var, axes=(2, 1))  # preprocessed units
        return pi_var 
Example #25
Source File: a_mps.py    From tenpy with GNU General Public License v3.0 5 votes vote down vote up
def get_theta1(self, i):
        """Calculate effective single-site wave function on sites i in mixed canonical form.

        The returned array has legs ``vL, i, vR`` (as one of the Bs).
        """
        return np.tensordot(np.diag(self.Ss[i]), self.Bs[i], [1, 0])  # vL [vL'], [vL] i vR 
Example #26
Source File: c_tebd.py    From tenpy with GNU General Public License v3.0 5 votes vote down vote up
def example_TEBD_tf_ising_lightcone(L, g, tmax, dt):
    print("finite TEBD, real time evolution, transverse field Ising")
    print("L={L:d}, g={g:.2f}, tmax={tmax:.2f}, dt={dt:.3f}".format(L=L, g=g, tmax=tmax, dt=dt))
    # find ground state with TEBD or DMRG
    #  E, psi, M = example_TEBD_gs_tf_ising_finite(L, g)
    from d_dmrg import example_DMRG_tf_ising_finite
    E, psi, M = example_DMRG_tf_ising_finite(L, g)
    i0 = L // 2
    # apply sigmaz on site i0
    SzB = np.tensordot(M.sigmaz, psi.Bs[i0], axes=[1, 1])  # i [i*], vL [i] vR
    psi.Bs[i0] = np.transpose(SzB, [1, 0, 2])  # vL i vR
    U_bonds = calc_U_bonds(M.H_bonds, 1.j * dt)  # (imaginary dt -> realtime evolution)
    S = [psi.entanglement_entropy()]
    Nsteps = int(tmax / dt + 0.5)
    for n in range(Nsteps):
        if abs((n * dt + 0.1) % 0.2 - 0.1) < 1.e-10:
            print("t = {t:.2f}, chi =".format(t=n * dt), psi.get_chi())
        run_TEBD(psi, U_bonds, 1, chi_max=50, eps=1.e-10)
        S.append(psi.entanglement_entropy())
    import matplotlib.pyplot as plt
    plt.figure()
    plt.imshow(S[::-1],
               vmin=0.,
               aspect='auto',
               interpolation='nearest',
               extent=(0, L - 1., -0.5 * dt, (Nsteps + 0.5) * dt))
    plt.xlabel('site $i$')
    plt.ylabel('time $t/J$')
    plt.ylim(0., tmax)
    plt.colorbar().set_label('entropy $S$')
    filename = 'c_tebd_lightcone_{g:.2f}.pdf'.format(g=g)
    plt.savefig(filename)
    print("saved " + filename) 
Example #27
Source File: c_tebd.py    From tenpy with GNU General Public License v3.0 5 votes vote down vote up
def update_bond(psi, i, U_bond, chi_max, eps):
    """Apply `U_bond` acting on i,j=(i+1) to `psi`."""
    j = (i + 1) % psi.L
    # construct theta matrix
    theta = psi.get_theta2(i)  # vL i j vR
    # apply U
    Utheta = np.tensordot(U_bond, theta, axes=([2, 3], [1, 2]))  # i j [i*] [j*], vL [i] [j] vR
    Utheta = np.transpose(Utheta, [2, 0, 1, 3])  # vL i j vR
    # split and truncate
    Ai, Sj, Bj = split_truncate_theta(Utheta, chi_max, eps)
    # put back into MPS
    Gi = np.tensordot(np.diag(psi.Ss[i]**(-1)), Ai, axes=[1, 0])  # vL [vL*], [vL] i vC
    psi.Bs[i] = np.tensordot(Gi, np.diag(Sj), axes=[2, 0])  # vL i [vC], [vC] vC
    psi.Ss[j] = Sj  # vC
    psi.Bs[j] = Bj  # vC j vR 
Example #28
Source File: d_dmrg.py    From tenpy with GNU General Public License v3.0 5 votes vote down vote up
def update_RP(self, i):
        """Calculate RP right of site `i-1` from RP right of site `i`."""
        j = (i - 1) % self.psi.L
        RP = self.RPs[i]  # vR* wR* vR
        B = self.psi.Bs[i]  # vL i vR
        Bc = B.conj()  # vL* i* vR*
        W = self.H_mpo[i]  # wL wR i i*
        RP = np.tensordot(B, RP, axes=[2, 0])  # vL i [vR], [vR*] wR* vR
        RP = np.tensordot(RP, W, axes=[[1, 2], [3, 1]])  # vL [i] [wR*] vR, wL [wR] i [i*]
        RP = np.tensordot(RP, Bc, axes=[[1, 3], [2, 1]])  # vL [vR] wL [i], vL* [i*] [vR*]
        self.RPs[j] = RP  # vL wL vL* (== vR* wR* vR on site i-1) 
Example #29
Source File: test_0004_vna.py    From pyscf with Apache License 2.0 5 votes vote down vote up
def test_vna_n2(self):
    dname = dirname(abspath(__file__))
    n = nao(label='n2', cd=dname)
    m = 200
    dvec,midv = 2*(n.atom2coord[1] - n.atom2coord[0])/m,  (n.atom2coord[1] + n.atom2coord[0])/2.0
    vgrid = np.tensordot(np.array(range(-m,m+1)), dvec, axes=0) + midv
    sgrid = np.array(range(-m,m+1)) * np.sqrt((dvec*dvec).sum())
    
    vna = n.vna(vgrid)
    #print(vna.shape, sgrid.shape)
    #np.savetxt('vna_n2_0004.txt', np.row_stack((sgrid, vna)).T)
    ref = np.loadtxt(dname+'/vna_n2_0004.txt-ref')
    for r,d in zip(ref[:,1],vna): self.assertAlmostEqual(r,d) 
Example #30
Source File: _multivariate.py    From lambda-packs with MIT License 5 votes vote down vote up
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
        """
        Draw random samples from a matrix normal distribution.

        Parameters
        ----------
        %(_matnorm_doc_default_callparams)s
        size : integer, optional
            Number of samples to draw (default 1).
        %(_doc_random_state)s

        Returns
        -------
        rvs : ndarray or scalar
            Random variates of size (`size`, `dims`), where `dims` is the
            dimension of the random matrices.

        Notes
        -----
        %(_matnorm_doc_callparams_note)s

        """
        size = int(size)
        dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
                                                              colcov)
        rowchol = scipy.linalg.cholesky(rowcov, lower=True)
        colchol = scipy.linalg.cholesky(colcov, lower=True)
        random_state = self._get_random_state(random_state)
        std_norm = random_state.standard_normal(size=(dims[1],size,dims[0]))
        roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)
        out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis,:,:]
        if size == 1:
            #out = np.squeeze(out, axis=0)
            out = out.reshape(mean.shape)
        return out