Python numpy.identity() Examples

The following are 30 code examples of numpy.identity(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: test_eom_kgccsd_diag.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def _test_ip_diag(self,cc):
        eom = kccsd_ghf.EOMIP(cc)
        imds = eom.make_imds()
        nkpts, nocc, nvir = imds.t1.shape
        diag = kccsd_ghf.ipccsd_diag(eom,0,imds=imds)

        I = np.identity(diag.shape[0],dtype=complex)
        indices = np.arange(len(diag))
        H = np.zeros((I.shape[0],len(indices)),dtype=complex)
        for j,idx in enumerate(indices):
            H[:,j] = kccsd_ghf.ipccsd_matvec(eom,I[:,idx],0,imds=imds)

        diag_ref = np.zeros(len(indices),dtype=complex)
        diag_out = np.zeros(len(indices),dtype=complex)
        for j,idx in enumerate(indices):
            diag_ref[j] = H[idx,j]
            diag_out[j] = diag[idx]
        diff = np.linalg.norm(diag_ref - diag_out)
        self.assertTrue(abs(diff) < KGCCSD_TEST_THRESHOLD,"Difference in IP diag: {}".format(diff)) 
Example #2
Source File: povm.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def _rebuild_complement(self, identity_for_complement=None):
        """ Rebuild complement vector (in case other vectors have changed) """

        if self.complement_label is not None and self.complement_label in self:
            non_comp_effects = [v for k, v in self.items()
                                if k != self.complement_label]

            if identity_for_complement is None:
                identity_for_complement = self[self.complement_label].identity

            complement_effect = _sv.ComplementSPAMVec(
                identity_for_complement, non_comp_effects)
            complement_effect.set_gpindices(slice(0, self.Np), self)  # all parameters

            #Assign new complement effect without calling our __setitem__
            old_ro = self._readonly; self._readonly = False
            POVM.__setitem__(self, self.complement_label, complement_effect)
            self._readonly = old_ro 
Example #3
Source File: uadc.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def get_init_guess(self, nroots=1, diag=None, ascending = True):
       if diag is None :
           diag = self.ea_adc_diag()
       idx = None
       if ascending:
           idx = np.argsort(diag)
       else:
           idx = np.argsort(diag)[::-1]
       guess = np.zeros((diag.shape[0], nroots))
       min_shape = min(diag.shape[0], nroots)
       guess[:min_shape,:min_shape] = np.identity(min_shape)
       g = np.zeros((diag.shape[0], nroots))
       g[idx] = guess.copy()
       guess = []
       for p in range(g.shape[1]):
           guess.append(g[:,p])
       return guess 
Example #4
Source File: optimizers.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def optimize(self, sess, feed_dict):
    reg_input, reg_weight, old_values, targets = sess.run(
        [self.inputs, self.regression_weight, self.values, self.targets],
        feed_dict=feed_dict)

    intended_values = targets * self.mix_frac + old_values * (1 - self.mix_frac)

    # taken from rllab
    reg_coeff = 1e-5
    for _ in range(5):
      best_fit_weight = np.linalg.lstsq(
          reg_input.T.dot(reg_input) +
          reg_coeff * np.identity(reg_input.shape[1]),
          reg_input.T.dot(intended_values))[0]
      if not np.any(np.isnan(best_fit_weight)):
        break
      reg_coeff *= 10

    if len(best_fit_weight.shape) == 1:
      best_fit_weight = np.expand_dims(best_fit_weight, -1)

    sess.run(self.update_regression_weight,
             feed_dict={self.new_regression_weight: best_fit_weight}) 
Example #5
Source File: test_scipy_hungarian.py    From QCElemental with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_linear_sum_assignment_input_validation():
    assert_raises(ValueError, linear_sum_assignment, [1, 2, 3])

    C = [[1, 2, 3], [4, 5, 6]]
    assert_array_equal(linear_sum_assignment(C), linear_sum_assignment(np.asarray(C)))
    # assert_array_equal(linear_sum_assignment(C),
    #                    linear_sum_assignment(matrix(C)))

    I = np.identity(3)
    assert_array_equal(linear_sum_assignment(I.astype(np.bool)), linear_sum_assignment(I))
    assert_raises(ValueError, linear_sum_assignment, I.astype(str))

    I[0][0] = np.nan
    assert_raises(ValueError, linear_sum_assignment, I)

    I = np.identity(3)
    I[1][1] = np.inf
    assert_raises(ValueError, linear_sum_assignment, I) 
Example #6
Source File: uadc.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def get_init_guess(self, nroots=1, diag=None, ascending = True):
        if diag is None :
            diag = self.ip_adc_diag()
        idx = None
        if ascending:
            idx = np.argsort(diag)
        else:
            idx = np.argsort(diag)[::-1]
        guess = np.zeros((diag.shape[0], nroots))
        min_shape = min(diag.shape[0], nroots)
        guess[:min_shape,:min_shape] = np.identity(min_shape)
        g = np.zeros((diag.shape[0], nroots))
        g[idx] = guess.copy()
        guess = []
        for p in range(g.shape[1]):
            guess.append(g[:,p])
        return guess 
Example #7
Source File: povm.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def depolarize(self, amount):
        """
        Depolarize this POVM by the given `amount`.

        Parameters
        ----------
        amount : float or tuple
            The amount to depolarize by.  If a tuple, it must have length
            equal to one less than the dimension of the gate. All but the
            first element of each spam vector (often corresponding to the
            identity element) are multiplied by `amount` (if a float) or
            the corresponding `amount[i]` (if a tuple).

        Returns
        -------
        None
        """
        raise ValueError("Cannot depolarize a %s object" % self.__class__.__name__)
        #self.dirty = True 
Example #8
Source File: transform_utils.py    From robosuite with MIT License 6 votes vote down vote up
def quat2mat(quaternion):
    """
    Converts given quaternion (x, y, z, w) to matrix.

    Args:
        quaternion: vec4 float angles

    Returns:
        3x3 rotation matrix
    """
    q = np.array(quaternion, dtype=np.float32, copy=True)[[3, 0, 1, 2]]
    n = np.dot(q, q)
    if n < EPS:
        return np.identity(3)
    q *= math.sqrt(2.0 / n)
    q = np.outer(q, q)
    return np.array(
        [
            [1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0]],
            [q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0]],
            [q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2]],
        ]
    ) 
Example #9
Source File: __init__.py    From razzy-spinner with GNU General Public License v3.0 6 votes vote down vote up
def cluster(self, vectors, assign_clusters=False, trace=False):
        assert len(vectors) > 0

        # normalise the vectors
        if self._should_normalise:
            vectors = map(self._normalise, vectors)

        # use SVD to reduce the dimensionality
        if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
            [u, d, vt] = linalg.svd(numpy.transpose(array(vectors)))
            S = d[:self._svd_dimensions] * \
                numpy.identity(self._svd_dimensions, numpy.Float64)
            T = u[:,:self._svd_dimensions]
            Dt = vt[:self._svd_dimensions,:]
            vectors = numpy.transpose(numpy.matrixmultiply(S, Dt))
            self._Tt = numpy.transpose(T)
            
        # call abstract method to cluster the vectors
        self.cluster_vectorspace(vectors, trace)

        # assign the vectors to clusters
        if assign_clusters:
            print self._Tt, vectors
            return [self.classify(vector) for vector in vectors] 
Example #10
Source File: util.py    From razzy-spinner with GNU General Public License v3.0 6 votes vote down vote up
def cluster(self, vectors, assign_clusters=False, trace=False):
        assert len(vectors) > 0

        # normalise the vectors
        if self._should_normalise:
            vectors = list(map(self._normalise, vectors))

        # use SVD to reduce the dimensionality
        if self._svd_dimensions and self._svd_dimensions < len(vectors[0]):
            [u, d, vt] = numpy.linalg.svd(numpy.transpose(numpy.array(vectors)))
            S = d[:self._svd_dimensions] * \
                numpy.identity(self._svd_dimensions, numpy.float64)
            T = u[:,:self._svd_dimensions]
            Dt = vt[:self._svd_dimensions,:]
            vectors = numpy.transpose(numpy.dot(S, Dt))
            self._Tt = numpy.transpose(T)

        # call abstract method to cluster the vectors
        self.cluster_vectorspace(vectors, trace)

        # assign the vectors to clusters
        if assign_clusters:
            return [self.classify(vector) for vector in vectors] 
Example #11
Source File: ap.py    From padasip with MIT License 6 votes vote down vote up
def __init__(self, n, order=5, mu=0.1, eps=0.001, w="random"):
        self.kind = "AP filter"
        self.n = self.check_int(
            n,'The size of filter must be an integer')
        self.order = self.check_int(
            order, 'The order of projection must be an integer')
        self.mu = self.check_float_param(mu, 0, 1000, "mu")
        self.eps = self.check_float_param(eps, 0, 1000, "eps")
        self.init_weights(w, self.n)
        self.w_history = False
        self.x_mem = np.zeros((self.n, self.order))
        self.d_mem = np.zeros(order)
        self.ide_eps = self.eps * np.identity(self.order)
        self.ide = np.identity(self.order)
        self.y_mem = False
        self.e_mem = False 
Example #12
Source File: nn.py    From kvae with MIT License 6 votes vote down vote up
def __call__(self, shape, dtype=None, partition_info=None):
        if dtype is None:
            dtype = self.dtype

        if len(shape) == 1:
            return constant_op.constant(0., dtype=dtype, shape=shape)
        elif len(shape) == 2 and shape[0] == shape[1]:
            return constant_op.constant(np.identity(shape[0], dtype))
        elif len(shape) == 4 and shape[2] == shape[3]:
            array = np.zeros(shape, dtype=float)
            cx, cy = shape[0]/2, shape[1]/2
            for i in range(shape[2]):
                array[cx, cy, i, i] = 1
            return constant_op.constant(array, dtype=dtype)
        else:
            constant_op.constant(0., dtype=dtype, shape=shape) 
Example #13
Source File: dmrgci.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def _make_dm123(self, state, norb, nelec, link_index=None, **kwargs):
        r'''Note this function does NOT compute the standard density matrix.
        The density matrices are reordered to match the the fci.rdm.make_dm123
        function (used by NEVPT code).
        The returned "2pdm" is :math:`\langle p^\dagger q r^\dagger s\rangle`;
        The returned "3pdm" is :math:`\langle p^\dagger q r^\dagger s t^\dagger u\rangle`.
        '''
        onepdm, twopdm, threepdm = self.make_rdm123(state, norb, nelec, None, **kwargs)
        threepdm = numpy.einsum('mkijln->ijklmn',threepdm).copy()
        threepdm += numpy.einsum('jk,lm,in->ijklmn',numpy.identity(norb),numpy.identity(norb),onepdm)
        threepdm += numpy.einsum('jk,miln->ijklmn',numpy.identity(norb),twopdm)
        threepdm += numpy.einsum('lm,kijn->ijklmn',numpy.identity(norb),twopdm)
        threepdm += numpy.einsum('jm,kinl->ijklmn',numpy.identity(norb),twopdm)

        twopdm =(numpy.einsum('iklj->ijkl',twopdm)
               + numpy.einsum('li,jk->ijkl',onepdm,numpy.identity(norb)))

        return onepdm, twopdm, threepdm 
Example #14
Source File: gaugegroup.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def __init__(self, dim):
        """
        Create a new gauge group with gauge-transform dimension `dim`, which
        should be the same as `mdl.dim` where `mdl` is a :class:`Model` you
        might gauge-transform.
        """
        from . import operation as _op  # b/c operation.py imports gaugegroup
        ltrans = _np.identity(dim, 'd')
        rtrans = _np.identity(dim, 'd')
        baseMx = _np.identity(dim, 'd')
        parameterArray = _np.zeros(1, 'd')
        parameterToBaseIndicesMap = {0: [(i, i) for i in range(1, dim)]}
        gate = _op.LinearlyParamDenseOp(baseMx, parameterArray,
                                        parameterToBaseIndicesMap,
                                        ltrans, rtrans, real=True)
        OpGaugeGroup.__init__(self, gate, TPSpamGaugeGroupElement, "TP Spam") 
Example #15
Source File: dmrgci.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def _make_dm123(self, state, norb, nelec, link_index=None, **kwargs):
        r'''Note this function does NOT compute the standard density matrix.
        The density matrices are reordered to match the the fci.rdm.make_dm123
        function (used by NEVPT code).
        The returned "2pdm" is :math:`\langle p^\dagger q r^\dagger s\rangle`;
        The returned "3pdm" is :math:`\langle p^\dagger q r^\dagger s t^\dagger u\rangle`.
        '''
        onepdm, twopdm, threepdm = self.make_rdm123(state, norb, nelec, None, **kwargs)
        threepdm = numpy.einsum('mkijln->ijklmn',threepdm).copy()
        threepdm += numpy.einsum('jk,lm,in->ijklmn',numpy.identity(norb),numpy.identity(norb),onepdm)
        threepdm += numpy.einsum('jk,miln->ijklmn',numpy.identity(norb),twopdm)
        threepdm += numpy.einsum('lm,kijn->ijklmn',numpy.identity(norb),twopdm)
        threepdm += numpy.einsum('jm,kinl->ijklmn',numpy.identity(norb),twopdm)

        twopdm =(numpy.einsum('iklj->ijkl',twopdm)
               + numpy.einsum('li,jk->ijkl',onepdm,numpy.identity(norb)))

        return onepdm, twopdm, threepdm 
Example #16
Source File: test_eom_kgccsd_diag.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def _test_ea_diag(self,cc):
       eom = kccsd_ghf.EOMEA(cc)
       imds = eom.make_imds()
       nkpts, nocc, nvir = imds.t1.shape
       diag = kccsd_ghf.eaccsd_diag(eom,0,imds=imds)

       I = np.identity(diag.shape[0],dtype=complex)
       indices = np.arange(len(diag))
       H = np.zeros((I.shape[0],len(indices)),dtype=complex)
       for j,idx in enumerate(indices):
           H[:,j] = kccsd_ghf.eaccsd_matvec(eom,I[:,idx],0,imds=imds)

       diag_ref = np.zeros(len(indices),dtype=complex)
       diag_out = np.zeros(len(indices),dtype=complex)
       for j,idx in enumerate(indices):
           diag_ref[j] = H[idx,j]
           diag_out[j] = diag[idx]
       diff = np.linalg.norm(diag_ref - diag_out)
       self.assertTrue(abs(diff) < KGCCSD_TEST_THRESHOLD,"Difference in EA diag: {}".format(diff)) 
Example #17
Source File: test_eom_kuccsd_diag.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def _test_ip_diag(self,kmf,kshift=0):
        cc = kccsd.KUCCSD(kmf)
        Ecc = cc.kernel()[0]

        eom = kccsd_uhf.EOMIP(cc)
        imds = eom.make_imds()
        t1a,t1b = imds.t1
        nkpts, nocc_a, nvir_a = t1a.shape
        nkpts, nocc_b, nvir_b = t1b.shape
        nocc = nocc_a + nocc_b
        diag = kccsd_uhf.ipccsd_diag(eom,kshift,imds=imds)

        I = np.identity(diag.shape[0],dtype=complex)
        indices = np.arange(diag.shape[0])
        H = np.zeros((I.shape[0],len(indices)),dtype=complex)
        for j,idx in enumerate(indices):
            H[:,j] = kccsd_uhf.ipccsd_matvec(eom,I[:,idx],kshift,imds=imds)

        diag_ref = np.zeros(len(indices),dtype=complex)
        diag_out = np.zeros(len(indices),dtype=complex)
        for j,idx in enumerate(indices):
            diag_ref[j] = H[idx,j]
            diag_out[j] = diag[idx]
        diff = np.linalg.norm(diag_ref - diag_out)
        self.assertTrue(abs(diff) < KGCCSD_TEST_THRESHOLD,"Difference in IP diag: {}".format(diff)) 
Example #18
Source File: test_linucb.py    From striatum with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_add_action(self):
        policy = self.policy
        context1 = {1: [1, 1], 2: [2, 2], 3: [3, 3]}
        history_id, _ = policy.get_action(context1, 2)
        new_actions = [Action() for i in range(2)]
        policy.add_action(new_actions)
        self.assertEqual(len(new_actions) + len(self.actions),
                         policy._action_storage.count())
        policy.reward(history_id, {3: 1})
        model = policy._model_storage.get_model()
        for action in new_actions:
            self.assertTrue((model['A'][action.id]
                             == np.identity(self.context_dimension)).all())

        context2 = {1: [1, 1], 2: [2, 2], 3: [3, 3], 4: [4, 4], 5: [5, 5]}
        history_id2, recommendations = policy.get_action(context2, 4)
        self.assertEqual(len(recommendations), 4)
        policy.reward(history_id2, {new_actions[0].id: 4, new_actions[1].id: 5})
        model = policy._model_storage.get_model()
        for action in new_actions:
            self.assertFalse((model['A'][action.id] == np.identity(2)).all()) 
Example #19
Source File: ulocal.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def lowdinPop(mol,coeff,ova,enorb,occ):
   print '\nLowdin population for LMOs:'
   nb,nc = coeff.shape
   s12 = sqrtm(ova)
   lcoeff = s12.dot(coeff)
   diff = reduce(numpy.dot,(lcoeff.T,lcoeff)) - numpy.identity(nc)
   print 'diff=',numpy.linalg.norm(diff)
   pthresh = 0.05
   labels = mol.ao_labels(None)
   nelec = 0.0
   for iorb in range(nc):
      vec = lcoeff[:,iorb]**2
      idx = list(numpy.argwhere(vec>pthresh))
      print ' iorb=',iorb,' occ=',occ[iorb],' <i|F|i>=',enorb[iorb]
      for iao in idx:
         print '    iao=',labels[iao],' pop=',vec[iao]
      nelec += occ[iorb]
   print 'nelec=',nelec
   return 0 
Example #20
Source File: pmloc.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def lowdin(s):
   e, v = numpy.linalg.eigh(s)
   return numpy.dot(v/numpy.sqrt(e), v.T.conj())

#def scdmU(coeff,ova):
#   aux = numpy.identity(ova.shape[0])
#   #aux = lowdin(ova)
#   no = coeff.shape[1]	
#   ova = reduce(numpy.dot,(coeff.T,ova,aux))
#   # ova = no*nb
#   q,r,piv = scipy.linalg.qr(ova, pivoting=True)
#   # In fact, it is just "Lowdin-orthonormalized PAO".
#   bc = ova[:,piv[:no]]
#   ova = numpy.dot(bc.T,bc)
#   s12inv = lowdin(ova)
#   u = numpy.dot(bc,s12inv)
#   return u

#------------------------------------------------
# Boys/PM-Localization
#------------------------------------------------ 
Example #21
Source File: make_test_cell.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def test_cell_n2(L=5, mesh=[9]*3):
    cell = pbcgto.Cell()
    cell.unit = 'B'
    cell.atom.extend([['O', (L/2., L/2., L/2.)],
                      ['H', (L/2.-0.689440, L/2.+0.578509, L/2.)],
                      ['H', (L/2.+0.689440, L/2.-0.578509, L/2.)],
        ])
    cell.a = L * np.identity(3)

    cell.basis = 'sto-3g'
    cell.pseudo = 'gth-pade'
    cell.mesh = mesh

    cell.output = '/dev/null'
    cell.build()
    return cell 
Example #22
Source File: shci.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def _make_dm123(self, state, norb, nelec, link_index=None, **kwargs):
        r"""Note this function does NOT compute the standard density matrix.
        The density matrices are reordered to match the the fci.rdm.make_dm123
        function (used by NEVPT code).
        The returned "2pdm" is :math:`\langle p^\dagger q r^\dagger s\rangle`;
        The returned "3pdm" is :math:`\langle p^\dagger q r^\dagger s t^\dagger u\rangle`.
        """
        onepdm, twopdm, threepdm = self.make_rdm123(state, norb, nelec, None, **kwargs)
        threepdm = numpy.einsum("mkijln->ijklmn", threepdm).copy()
        threepdm += numpy.einsum("jk,lm,in->ijklmn", numpy.identity(norb), numpy.identity(norb), onepdm)
        threepdm += numpy.einsum("jk,miln->ijklmn", numpy.identity(norb), twopdm)
        threepdm += numpy.einsum("lm,kijn->ijklmn", numpy.identity(norb), twopdm)
        threepdm += numpy.einsum("jm,kinl->ijklmn", numpy.identity(norb), twopdm)

        twopdm = numpy.einsum("iklj->ijkl", twopdm) + numpy.einsum("li,jk->ijkl", onepdm, numpy.identity(norb))

        return onepdm, twopdm, threepdm 
Example #23
Source File: gaugegroup.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def __init__(self, dim):
        """
        Create a new gauge group with gauge-transform dimension `dim`, which
        should be the same as `mdl.dim` where `mdl` is a :class:`Model` you
        might gauge-transform.
        """
        from . import operation as _op  # b/c operation.py imports gaugegroup
        ltrans = _np.identity(dim, 'd')
        rtrans = _np.identity(dim, 'd')
        baseMx = _np.identity(dim, 'd')
        parameterArray = _np.zeros(dim, 'd')
        parameterToBaseIndicesMap = {i: [(i, i)] for i in range(dim)}
        gate = _op.LinearlyParamDenseOp(baseMx, parameterArray,
                                        parameterToBaseIndicesMap,
                                        ltrans, rtrans, real=True)
        OpGaugeGroup.__init__(self, gate, DiagGaugeGroupElement, "Diagonal") 
Example #24
Source File: gaugegroup.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def __init__(self, dim):
        """
        Create a new gauge group with gauge-transform dimension `dim`, which
        should be the same as `mdl.dim` where `mdl` is a :class:`Model` you
        might gauge-transform.
        """
        from . import operation as _op  # b/c operation.py imports gaugegroup
        ltrans = _np.identity(dim, 'd')
        rtrans = _np.identity(dim, 'd')
        baseMx = _np.identity(dim, 'd')
        parameterArray = _np.zeros(dim - 1, 'd')
        parameterToBaseIndicesMap = {i: [(i + 1, i + 1)] for i in range(dim - 1)}
        gate = _op.LinearlyParamDenseOp(baseMx, parameterArray,
                                        parameterToBaseIndicesMap,
                                        ltrans, rtrans, real=True)
        OpGaugeGroup.__init__(self, gate, TPDiagGaugeGroupElement, "TP Diagonal") 
Example #25
Source File: test_distribution.py    From chainerrl with MIT License 5 votes vote down vote up
def test_log_prob(self):
        sample = self.distrib.sample()
        sample_log_prob = self.distrib.log_prob(sample)
        for b in range(self.batch_size):
            cov = (np.identity(self.ndim, dtype=np.float32) *
                   self.var[b])
            desired_pdf = scipy.stats.multivariate_normal(
                self.mean[b], cov).pdf(sample.array[b])
            np.testing.assert_allclose(
                sample_log_prob.array[b],
                np.log(desired_pdf), rtol=1e-4) 
Example #26
Source File: test_distribution.py    From chainerrl with MIT License 5 votes vote down vote up
def test_prob(self):
        sample = self.distrib.sample()
        sample_prob = self.distrib.prob(sample)
        for b in range(self.batch_size):
            cov = (np.identity(self.ndim, dtype=np.float32) *
                   self.var[b])
            desired_pdf = scipy.stats.multivariate_normal(
                self.mean[b], cov).pdf(sample.array[b])
            np.testing.assert_allclose(
                sample_prob.array[b],
                desired_pdf, rtol=1e-5) 
Example #27
Source File: gifti.py    From me-ica with GNU Lesser General Public License v2.1 5 votes vote down vote up
def __init__(self, dataspace = 0, xformspace = 0, xform = None):
        self.dataspace = dataspace
        self.xformspace = xformspace
        if xform is None:
            # create identity matrix
            self.xform = np.identity(4)
        else:
            self.xform = xform 
Example #28
Source File: statefbk_test.py    From python-control with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_dare(self):
        #discrete-time
        A = np.diag([0.5,2])
        B = np.identity(2)
        Q = np.identity(2)
        R = np.identity(2)
        S = 0 * B
        E = np.identity(2)
        X, L , G = dare(A, B, Q, R, S, E, stabilizing=True)
        assert np.all(np.abs(L) < 1)
        X, L , G = dare(A, B, Q, R, S, E, stabilizing=False)
        assert np.all(np.abs(L) > 1) 
Example #29
Source File: linthompsamp.py    From striatum with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, history_storage, model_storage, action_storage,
                 recommendation_cls=None, context_dimension=128, delta=0.5,
                 R=0.01, epsilon=0.5, random_state=None):
        super(LinThompSamp, self).__init__(history_storage, model_storage,
                                           action_storage, recommendation_cls)
        self.random_state = get_random_state(random_state)
        self.context_dimension = context_dimension

        # 0 < delta < 1
        if not isinstance(delta, float):
            raise ValueError("delta should be float")
        elif (delta < 0) or (delta >= 1):
            raise ValueError("delta should be in (0, 1]")
        else:
            self.delta = delta

        # R > 0
        if not isinstance(R, float):
            raise ValueError("R should be float")
        elif R <= 0:
            raise ValueError("R should be positive")
        else:
            self.R = R  # pylint: disable=invalid-name

        # 0 < epsilon < 1
        if not isinstance(epsilon, float):
            raise ValueError("epsilon should be float")
        elif (epsilon < 0) or (epsilon > 1):
            raise ValueError("epsilon should be in (0, 1)")
        else:
            self.epsilon = epsilon

        # model initialization
        B = np.identity(self.context_dimension)  # pylint: disable=invalid-name
        mu_hat = np.zeros(shape=(self.context_dimension, 1))
        f = np.zeros(shape=(self.context_dimension, 1))
        self._model_storage.save_model({'B': B, 'mu_hat': mu_hat, 'f': f}) 
Example #30
Source File: test_distribution.py    From chainerrl with MIT License 5 votes vote down vote up
def test_entropy(self):
        entropy = self.distrib.entropy
        for b in range(self.batch_size):
            cov = (np.identity(self.ndim, dtype=np.float32) *
                   self.var[b])
            desired_entropy = scipy.stats.multivariate_normal(
                self.mean[b], cov).entropy()
            np.testing.assert_allclose(
                entropy.array[b], desired_entropy, rtol=1e-4)