Python functools.reduce() Examples
The following are 30
code examples of functools.reduce().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
functools
, or try the search function
.
Example #1
Source File: afl.py From firmanal with MIT License | 6 votes |
def extract(iid, bindir): print('Extracting binaries......') query = '''select filename from object_to_image where iid=''' + iid + ''' and score>0 and (mime='application/x-executable; charset=binary' or mime='application/x-object; charset=binary' or mime='application/x-sharedlib; charset=binary') order by score DESC;''' wanted = dbquery(query) wanted = reduce((lambda a, b: a + b), wanted) wanted = map((lambda a: '.' + a), wanted) wanted = reduce((lambda a, b: a + ' ' + b), wanted) cmd = 'tar xf ' + bindir + '/../../../../images/' + iid + '.tar.gz -C ' + bindir + ' ' + wanted subprocess.run([cmd], shell=True) print('Extracting library links......') query = '''select filename from object_to_image where iid=''' + iid + ''' and regular_file='f';''' wanted = dbquery(query) wanted = reduce((lambda a, b: a + b), wanted) wanted = filter((lambda a: 'lib' in a), wanted) wanted = map((lambda a: '.' + a), wanted) wanted = reduce((lambda a, b: a + ' ' + b), wanted) cmd = 'tar xf ' + bindir + '/../../../../images/' + iid + '.tar.gz -C ' + bindir + ' ' + wanted subprocess.run([cmd], shell=True)
Example #2
Source File: cdft.py From pyscf with Apache License 2.0 | 6 votes |
def fast_iao_mullikan_pop(mf,cell,a=None): ''' Input: mf -- a preconverged mean fild object Returns: mullikan populaion analysis in the basisIAO a ''' # # here we convert the density matrix to the IAO basis # if a is None: a = numpy.eye(mf.make_rdm1().shape[1]) #converts the occupied MOs to the IAO basis ovlpS = mf.get_ovlp() CIb = reduce(numpy.dot, (a.T, ovlpS , mf.make_rdm1())) # # This is the mullikan population below here # mo_occ = mf.mo_coeff[:,mf.mo_occ>0] mo_occ = reduce(numpy.dot, (a.T, mf.get_ovlp(), mo_occ)) dm = numpy.dot(mo_occ, mo_occ.T) * 2 pmol = cell.copy() pmol.build(False, False, basis='minao') return mf.mulliken_pop(pmol, dm, s=numpy.eye(pmol.nao_nr()))
Example #3
Source File: xrft.py From xrft with MIT License | 6 votes |
def _apply_window(da, dims, window_type='hanning'): """Creating windows in dimensions dims.""" if window_type not in ['hanning']: raise NotImplementedError("Only hanning window is supported for now.") numpy_win_func = getattr(np, window_type) if da.chunks: def dask_win_func(n): return dsar.from_delayed( delayed(numpy_win_func, pure=True)(n), (n,), float) win_func = dask_win_func else: win_func = numpy_win_func windows = [xr.DataArray(win_func(len(da[d])), dims=da[d].dims, coords=da[d].coords) for d in dims] return da * reduce(operator.mul, windows[::-1])
Example #4
Source File: convert_Basset_to_pytorch.py From models with MIT License | 6 votes |
def simplify_source(s): s = map(lambda x: x.replace(',(1, 1),(0, 0),1,1,bias=True),#Conv2d',')'),s) s = map(lambda x: x.replace(',(0, 0),1,1,bias=True),#Conv2d',')'),s) s = map(lambda x: x.replace(',1,1,bias=True),#Conv2d',')'),s) s = map(lambda x: x.replace(',bias=True),#Conv2d',')'),s) s = map(lambda x: x.replace('),#Conv2d',')'),s) s = map(lambda x: x.replace(',1e-05,0.1,True),#BatchNorm2d',')'),s) s = map(lambda x: x.replace('),#BatchNorm2d',')'),s) s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#MaxPool2d',')'),s) s = map(lambda x: x.replace(',ceil_mode=False),#MaxPool2d',')'),s) s = map(lambda x: x.replace('),#MaxPool2d',')'),s) s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#AvgPool2d',')'),s) s = map(lambda x: x.replace(',ceil_mode=False),#AvgPool2d',')'),s) s = map(lambda x: x.replace(',bias=True)),#Linear',')), # Linear'),s) s = map(lambda x: x.replace(')),#Linear',')), # Linear'),s) s = map(lambda x: '{},\n'.format(x),s) s = map(lambda x: x[1:],s) s = reduce(lambda x,y: x+y, s) return s
Example #5
Source File: khf.py From pyscf with Apache License 2.0 | 6 votes |
def canonicalize(mf, mo_coeff_kpts, mo_occ_kpts, fock=None): if fock is None: dm = mf.make_rdm1(mo_coeff_kpts, mo_occ_kpts) fock = mf.get_fock(dm=dm) mo_coeff = [] mo_energy = [] for k, mo in enumerate(mo_coeff_kpts): mo1 = np.empty_like(mo) mo_e = np.empty_like(mo_occ_kpts[k]) occidx = mo_occ_kpts[k] == 2 viridx = ~occidx for idx in (occidx, viridx): if np.count_nonzero(idx) > 0: orb = mo[:,idx] f1 = reduce(np.dot, (orb.T.conj(), fock[k], orb)) e, c = scipy.linalg.eigh(f1) mo1[:,idx] = np.dot(orb, c) mo_e[idx] = e mo_coeff.append(mo1) mo_energy.append(mo_e) return mo_energy, mo_coeff
Example #6
Source File: kuhf.py From pyscf with Apache License 2.0 | 6 votes |
def spin_square(self, mo_coeff=None, s=None): '''Treating the k-point sampling wfn as a giant Slater determinant, the spin_square value is the <S^2> of the giant determinant. ''' nkpts = len(self.kpts) if mo_coeff is None: mo_a = [self.mo_coeff[0][k][:,self.mo_occ[0][k]>0] for k in range(nkpts)] mo_b = [self.mo_coeff[1][k][:,self.mo_occ[1][k]>0] for k in range(nkpts)] else: mo_a, mo_b = mo_coeff if s is None: s = self.get_ovlp() nelec_a = sum([mo_a[k].shape[1] for k in range(nkpts)]) nelec_b = sum([mo_b[k].shape[1] for k in range(nkpts)]) ssxy = (nelec_a + nelec_b) * .5 for k in range(nkpts): sij = reduce(np.dot, (mo_a[k].T.conj(), s[k], mo_b[k])) ssxy -= np.einsum('ij,ij->', sij.conj(), sij).real ssz = (nelec_b-nelec_a)**2 * .25 ss = ssxy + ssz s = np.sqrt(ss+.25) - .5 return ss, s*2+1
Example #7
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def row_norms(ii, f=Ellipsis, squared=False): ''' row_norms(ii) yields a potential function h(x) that calculates the vector norms of the rows of the matrix formed by [x[i] for i in ii] (ii is a matrix of parameter indices). row_norms(ii, f) yield a potential function h(x) equivalent to compose(row_norms(ii), f). ''' try: (n,m) = ii # matrix shape given ii = np.reshape(np.arange(n*m), (n,m)) except Exception: ii = np.asarray(ii) f = to_potential(f) if is_const_potential(f): q = flattest(f.c) q = np.sum([q[i]**2 for i in ii.T], axis=0) return PotentialConstant(q if squared else np.sqrt(q)) F = reduce(lambda a,b: a + b, [part(Ellipsis, col)**2 for col in ii.T]) F = compose(F, f) if not squared: F = sqrt(F) return F
Example #8
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def col_norms(ii, f=Ellipsis, squared=False): ''' col_norms(ii) yields a potential function h(x) that calculates the vector norms of the columns of the matrix formed by [x[i] for i in ii] (ii is a matrix of parameter indices). col_norms(ii, f) yield a potential function h(x) equivalent to compose(col_norms(ii), f). ''' try: (n,m) = ii # matrix shape given ii = np.reshape(np.arange(n*m), (n,m)) except Exception: ii = np.asarray(ii) f = to_potential(f) if is_const_potential(f): q = flattest(f.c) q = np.sum([q[i]**2 for i in ii], axis=0) return PotentialConstant(q if squared else np.sqrt(q)) F = reduce(lambda a,b: a + b, [part(Ellipsis, col)**2 for col in ii]) F = compose(F, f) if not squared: F = sqrt(F) return F
Example #9
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def cplus(*args): ''' cplus(a, b...) returns the sum of all the values as a numpy array object. Like numpy's add function or a+b syntax, plus will thread over the latest dimension possible. Additionally, cplus works correctly with sparse arrays. ''' n = len(args) if n == 0: return np.asarray(0) elif n == 1: return np.asarray(args[0]) elif n > 2: return reduce(plus, args) (a,b) = args if sps.issparse(a): if not sps.issparse(b): b = np.asarray(b) if len(b.shape) == 0: b = np.reshape(b, (1,1)) elif sps.issparse(b): a = np.asarray(a) if len(a.shape) == 0: a = np.reshape(a, (1,1)) else: a = np.asarray(a) b = np.asarray(b) return a + b
Example #10
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def ctimes(*args): ''' ctimes(a, b...) returns the product of all the values as a numpy array object. Like numpy's multiply function or a*b syntax, times will thread over the latest dimension possible; thus if a.shape is (4,2) and b.shape is 2, times(a,b) is a equivalent to a * b. Unlike numpy's multiply function, ctimes works with sparse matrices and will reify them. ''' n = len(args) if n == 0: return np.asarray(0) elif n == 1: return np.asarray(args[0]) elif n > 2: return reduce(plus, args) (a,b) = args if sps.issparse(a): return a.multiply(b) elif sps.issparse(b): return b.multiply(a) else: return np.asarray(a) * b
Example #11
Source File: tabulate.py From cs294-112_hws with MIT License | 6 votes |
def _column_type(strings, has_invisible=True): """The least generic type all column values are convertible to. >>> _column_type(["1", "2"]) is _int_type True >>> _column_type(["1", "2.3"]) is _float_type True >>> _column_type(["1", "2.3", "four"]) is _text_type True >>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type True >>> _column_type([None, "brux"]) is _text_type True >>> _column_type([1, 2, None]) is _int_type True >>> import datetime as dt >>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type True """ types = [_type(s, has_invisible) for s in strings ] return reduce(_more_generic, types, int)
Example #12
Source File: rl.py From fine-lm with MIT License | 6 votes |
def feed_forward_categorical_fun(action_space, config, observations): """Feed-forward categorical.""" if not isinstance(action_space, gym.spaces.Discrete): raise ValueError("Expecting discrete action space.") flat_observations = tf.reshape(observations, [ tf.shape(observations)[0], tf.shape(observations)[1], functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)]) with tf.variable_scope("network_parameters"): with tf.variable_scope("policy"): x = flat_observations for size in config.policy_layers: x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu) logits = tf.contrib.layers.fully_connected(x, action_space.n, activation_fn=None) with tf.variable_scope("value"): x = flat_observations for size in config.value_layers: x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu) value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0] policy = tf.contrib.distributions.Categorical(logits=logits) return NetworkOutput(policy, value, lambda a: a)
Example #13
Source File: x2c.py From pyscf with Apache License 2.0 | 6 votes |
def _get_r(s, snesc): # R^dag \tilde{S} R = S # R = S^{-1/2} [S^{-1/2}\tilde{S}S^{-1/2}]^{-1/2} S^{1/2} w, v = numpy.linalg.eigh(s) idx = w > 1e-14 v = v[:,idx] w_sqrt = numpy.sqrt(w[idx]) w_invsqrt = 1 / w_sqrt # eigenvectors of S as the new basis snesc = reduce(numpy.dot, (v.conj().T, snesc, v)) r_mid = numpy.einsum('i,ij,j->ij', w_invsqrt, snesc, w_invsqrt) w1, v1 = numpy.linalg.eigh(r_mid) idx1 = w1 > 1e-14 v1 = v1[:,idx1] r_mid = numpy.dot(v1/numpy.sqrt(w1[idx1]), v1.conj().T) r = numpy.einsum('i,ij,j->ij', w_invsqrt, r_mid, w_sqrt) # Back transform to AO basis r = reduce(numpy.dot, (v, r, v.conj().T)) return r
Example #14
Source File: rl.py From fine-lm with MIT License | 6 votes |
def dense_bitwise_categorical_fun(action_space, config, observations): """Dense network with bitwise input and categorical output.""" del config obs_shape = common_layers.shape_list(observations) x = tf.reshape(observations, [-1] + obs_shape[2:]) with tf.variable_scope("network_parameters"): with tf.variable_scope("dense_bitwise"): x = discretization.int_to_bit_embed(x, 8, 32) flat_x = tf.reshape( x, [obs_shape[0], obs_shape[1], functools.reduce(operator.mul, x.shape.as_list()[1:], 1)]) x = tf.contrib.layers.fully_connected(flat_x, 256, tf.nn.relu) x = tf.contrib.layers.fully_connected(flat_x, 128, tf.nn.relu) logits = tf.contrib.layers.fully_connected(x, action_space.n, activation_fn=None) value = tf.contrib.layers.fully_connected( x, 1, activation_fn=None)[..., 0] policy = tf.contrib.distributions.Categorical(logits=logits) return NetworkOutput(policy, value, lambda a: a)
Example #15
Source File: common_attention.py From fine-lm with MIT License | 6 votes |
def gather_indices_2d(x, block_shape, block_stride): """Getting gather indices.""" # making an identity matrix kernel kernel = tf.eye(block_shape[0] * block_shape[1]) kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1]) # making indices [1, h, w, 1] to appy convs x_shape = common_layers.shape_list(x) indices = tf.range(x_shape[2] * x_shape[3]) indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1]) indices = tf.nn.conv2d( tf.cast(indices, tf.float32), kernel, strides=[1, block_stride[0], block_stride[1], 1], padding="VALID") # making indices [num_blocks, dim] to gather dims = common_layers.shape_list(indices)[:3] if all([isinstance(dim, int) for dim in dims]): num_blocks = functools.reduce(operator.mul, dims, 1) else: num_blocks = tf.reduce_prod(dims) indices = tf.reshape(indices, [num_blocks, -1]) return tf.cast(indices, tf.int32)
Example #16
Source File: ddpg.py From lirpg with MIT License | 6 votes |
def setup_critic_optimizer(self): logger.info('setting up critic optimizer') normalized_critic_target_tf = tf.clip_by_value(normalize(self.critic_target, self.ret_rms), self.return_range[0], self.return_range[1]) self.critic_loss = tf.reduce_mean(tf.square(self.normalized_critic_tf - normalized_critic_target_tf)) if self.critic_l2_reg > 0.: critic_reg_vars = [var for var in self.critic.trainable_vars if 'kernel' in var.name and 'output' not in var.name] for var in critic_reg_vars: logger.info(' regularizing: {}'.format(var.name)) logger.info(' applying l2 regularization with {}'.format(self.critic_l2_reg)) critic_reg = tc.layers.apply_regularization( tc.layers.l2_regularizer(self.critic_l2_reg), weights_list=critic_reg_vars ) self.critic_loss += critic_reg critic_shapes = [var.get_shape().as_list() for var in self.critic.trainable_vars] critic_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in critic_shapes]) logger.info(' critic shapes: {}'.format(critic_shapes)) logger.info(' critic params: {}'.format(critic_nb_params)) self.critic_grads = U.flatgrad(self.critic_loss, self.critic.trainable_vars, clip_norm=self.clip_norm) self.critic_optimizer = MpiAdam(var_list=self.critic.trainable_vars, beta1=0.9, beta2=0.999, epsilon=1e-08)
Example #17
Source File: to_wiki.py From mindustry-modding with GNU General Public License v3.0 | 6 votes |
def normalize(md): '''Normalize anchors.''' def on_match(link): desc = link.group(1) old = link.group(2) href = (link.group(2) .lower() .replace('%20', '-') .replace(" ", "-") .replace("~", "") .replace(".", "")) old, new = f'[{desc}]({old})', f'[{desc}]({href})' print(old, new) return old, new replacers = set((on_match(x) for x in re.finditer(r'\[([^\]\[]*)\]\((#[^\)]*)\)', md))) return ft.reduce(lambda md, x: md.replace(x[0], x[1]), replacers, md)
Example #18
Source File: ddpg.py From HardRLWithYoutube with MIT License | 6 votes |
def setup_critic_optimizer(self): logger.info('setting up critic optimizer') normalized_critic_target_tf = tf.clip_by_value(normalize(self.critic_target, self.ret_rms), self.return_range[0], self.return_range[1]) self.critic_loss = tf.reduce_mean(tf.square(self.normalized_critic_tf - normalized_critic_target_tf)) if self.critic_l2_reg > 0.: critic_reg_vars = [var for var in self.critic.trainable_vars if 'kernel' in var.name and 'output' not in var.name] for var in critic_reg_vars: logger.info(' regularizing: {}'.format(var.name)) logger.info(' applying l2 regularization with {}'.format(self.critic_l2_reg)) critic_reg = tc.layers.apply_regularization( tc.layers.l2_regularizer(self.critic_l2_reg), weights_list=critic_reg_vars ) self.critic_loss += critic_reg critic_shapes = [var.get_shape().as_list() for var in self.critic.trainable_vars] critic_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in critic_shapes]) logger.info(' critic shapes: {}'.format(critic_shapes)) logger.info(' critic params: {}'.format(critic_nb_params)) self.critic_grads = U.flatgrad(self.critic_loss, self.critic.trainable_vars, clip_norm=self.clip_norm) self.critic_optimizer = MpiAdam(var_list=self.critic.trainable_vars, beta1=0.9, beta2=0.999, epsilon=1e-08)
Example #19
Source File: loader.py From vergeml with MIT License | 6 votes |
def begin_read_samples(self): if self.cache: return self.input.begin_read_samples() # copy meta if self.output: self.output.meta = self.input.meta self.multipliers = {} self.rngs = {} def _mul(split): return reduce(operator.mul, map(lambda op: _get_multiplier(split, op), self.ops), 1) for split in SPLITS: self.multipliers[split] = _mul(split) self.cache[split] = self._calculate_num_samples(split) self.rngs[split] = self.cache[split] * [None] self.input.end_read_samples()
Example #20
Source File: test_x2c_hess.py From pyscf with Apache License 2.0 | 6 votes |
def _sqrt2(a0, a1i, a1j, a2ij): '''Solving second order derivative of x^2 = a''' w, v = scipy.linalg.eigh(a0) w = numpy.sqrt(w) a1i = reduce(numpy.dot, (v.conj().T, a1i, v)) x1i = a1i / (w[:,None] + w) a1j = reduce(numpy.dot, (v.conj().T, a1j, v)) x1j = a1j / (w[:,None] + w) a2ij = reduce(numpy.dot, (v.conj().T, a2ij, v)) tmp = x1i.dot(x1j) a2ij -= tmp + tmp.conj().T x2 = a2ij / (w[:,None] + w) x2 = reduce(numpy.dot, (v, x2, v.conj().T)) return x2
Example #21
Source File: 020-dmrg_casscf_nevpt2_for_FeS.py From pyscf with Apache License 2.0 | 5 votes |
def psort(ova, fav, coeff): # pT is density matrix, fav is Fock matrix # OCC-SORT pTnew = 2.0*reduce(numpy.dot,(coeff.T,s12,pT,s12,coeff)) nocc = numpy.diag(pTnew) index = numpy.argsort(-nocc) ncoeff = coeff[:,index] nocc = nocc[index] enorb = numpy.diag(reduce(numpy.dot,(ncoeff.T,ova,fav,ova,ncoeff))) return ncoeff, nocc, enorb # E-SORT
Example #22
Source File: test_concat.py From myhdl with GNU Lesser General Public License v2.1 | 5 votes |
def ConcatToSizedBase(self, bases, extslist): for base, basestr in zip(bases, self.bases): for exts, extstr in zip(extslist, self.extslist): bv = concat(base, *exts) refstr = basestr + reduce(operator.add, extstr) reflen = len(refstr) ref = int(refstr, 2) assert bv == ref assert len(bv) == reflen
Example #23
Source File: 32-dmrg_casscf_nevpt2_for_FeS.py From pyscf with Apache License 2.0 | 5 votes |
def scdm(coeff, overlap, aux): # # Argument coeff is a set of orthogonal orbitals |O> (eg occupied HF # orbitals); aux is a set of localized orbitals. One can define a subset |B> # of aux, which has the closest overlap to the coeff space. # The (orthogonalized) resultant local orbitals |B> can be considered as the # localized coeff |O> # # |B> = |O><O|aux>, in which det(<O|aux>) is maximized; # return lowdin(|B>) # no = coeff.shape[1] ova = reduce(numpy.dot,(coeff.T, overlap, aux)) # ova = no*nb q,r,piv = scipy.linalg.qr(ova, pivoting=True) # piv[:no] defines the subset of aux which has the largest overlap to coeff space bc = ova[:,piv[:no]] ova = numpy.dot(bc.T,bc) s12inv = lowdin(ova) cnew = reduce(numpy.dot,(coeff,bc,s12inv)) return cnew # # Various choices for the localized orbitals # * the non-orthogonal AOs # aux=numpy.identity(nb) # * Lowdin orthogonalized AOs
Example #24
Source File: 020-dmrg_casscf_nevpt2_for_FeS.py From pyscf with Apache License 2.0 | 5 votes |
def scdm(coeff, overlap, aux): # # Argument coeff is a set of orthogonal orbitals |O> (eg occupied HF # orbitals); aux is a set of localized orbitals. One can define a subset |B> # of aux, which has the closest overlap to the coeff space. # The (orthogonalized) resultant local orbitals |B> can be considered as the # localized coeff |O> # # |B> = |O><O|aux>, in which det(<O|aux>) is maximized; # return lowdin(|B>) # no = coeff.shape[1] ova = reduce(numpy.dot,(coeff.T, overlap, aux)) # ova = no*nb q,r,piv = scipy.linalg.qr(ova, pivoting=True) # piv[:no] defines the subset of aux which has the largest overlap to coeff space bc = ova[:,piv[:no]] ova = numpy.dot(bc.T,bc) s12inv = lowdin(ova) cnew = reduce(numpy.dot,(coeff,bc,s12inv)) return cnew # # Various choices for the localized orbitals # * the non-orthogonal AOs # aux=numpy.identity(nb) # * Lowdin orthogonalized AOs
Example #25
Source File: x2c.py From pyscf with Apache License 2.0 | 5 votes |
def _get_rmat(self, x=None): '''The matrix (in AO basis) that changes metric from NESC metric to NR metric''' xmol = self.get_xmol()[0] if x is None: x = self.get_xmat(xmol) c = lib.param.LIGHT_SPEED s = xmol.intor_symmetric('int1e_ovlp_spinor') t = xmol.intor_symmetric('int1e_spsp_spinor') * .5 s1 = s + reduce(numpy.dot, (x.conj().T, t, x)) * (.5/c**2) return _get_r(s, s1)
Example #26
Source File: node.py From tensortrade with Apache License 2.0 | 5 votes |
def forward(self): return functools.reduce(self.func, [node.value for node in self.inputs])
Example #27
Source File: node.py From tensortrade with Apache License 2.0 | 5 votes |
def generic_name(self) -> str: return "reduce"
Example #28
Source File: functions.py From sopt with MIT License | 5 votes |
def _shubert(x,n): ''' :param x: :param n: :return: ''' tmps = [] for i in range(n): tmp = 0 for j in range(1,6): tmp += j*math.cos((j+1)*x[i]+j) tmps.append(tmp) res = reduce(lambda x,y:x*y,tmps) + 1000 return res
Example #29
Source File: khf.py From pyscf with Apache License 2.0 | 5 votes |
def mulliken_meta(cell, dm_ao_kpts, verbose=logger.DEBUG, pre_orth_method=PRE_ORTH_METHOD, s=None): '''A modified Mulliken population analysis, based on meta-Lowdin AOs. Note this function only computes the Mulliken population for the gamma point density matrix. ''' from pyscf.lo import orth if s is None: s = get_ovlp(cell) log = logger.new_logger(cell, verbose) log.note('Analyze output for *gamma point*') log.info(' To include the contributions from k-points, transform to a ' 'supercell then run the population analysis on the supercell\n' ' from pyscf.pbc.tools import k2gamma\n' ' k2gamma.k2gamma(mf).mulliken_meta()') log.note("KRHF mulliken_meta") dm_ao_gamma = dm_ao_kpts[0,:,:].real s_gamma = s[0,:,:].real c = orth.restore_ao_character(cell, pre_orth_method) orth_coeff = orth.orth_ao(cell, 'meta_lowdin', pre_orth_ao=c, s=s_gamma) c_inv = np.dot(orth_coeff.T, s_gamma) dm = reduce(np.dot, (c_inv, dm_ao_gamma, c_inv.T.conj())) log.note(' ** Mulliken pop on meta-lowdin orthogonal AOs **') return mol_hf.mulliken_pop(cell, dm, np.eye(orth_coeff.shape[0]), log)
Example #30
Source File: op_counter.py From MSDNet-PyTorch with MIT License | 5 votes |
def get_layer_param(model): return sum([reduce(operator.mul, i.size(), 1) for i in model.parameters()]) ### The input batch size should be 1 to call this function