Python functools.reduce() Examples
The following are 30
code examples of functools.reduce().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
functools
, or try the search function
.

Example #1
Source Project: vergeml Author: mme File: loader.py License: MIT License | 6 votes |
def begin_read_samples(self): if self.cache: return self.input.begin_read_samples() # copy meta if self.output: self.output.meta = self.input.meta self.multipliers = {} self.rngs = {} def _mul(split): return reduce(operator.mul, map(lambda op: _get_multiplier(split, op), self.ops), 1) for split in SPLITS: self.multipliers[split] = _mul(split) self.cache[split] = self._calculate_num_samples(split) self.rngs[split] = self.cache[split] * [None] self.input.end_read_samples()
Example #2
Source Project: xrft Author: xgcm File: xrft.py License: MIT License | 6 votes |
def _apply_window(da, dims, window_type='hanning'): """Creating windows in dimensions dims.""" if window_type not in ['hanning']: raise NotImplementedError("Only hanning window is supported for now.") numpy_win_func = getattr(np, window_type) if da.chunks: def dask_win_func(n): return dsar.from_delayed( delayed(numpy_win_func, pure=True)(n), (n,), float) win_func = dask_win_func else: win_func = numpy_win_func windows = [xr.DataArray(win_func(len(da[d])), dims=da[d].dims, coords=da[d].coords) for d in dims] return da * reduce(operator.mul, windows[::-1])
Example #3
Source Project: models Author: kipoi File: convert_Basset_to_pytorch.py License: MIT License | 6 votes |
def simplify_source(s): s = map(lambda x: x.replace(',(1, 1),(0, 0),1,1,bias=True),#Conv2d',')'),s) s = map(lambda x: x.replace(',(0, 0),1,1,bias=True),#Conv2d',')'),s) s = map(lambda x: x.replace(',1,1,bias=True),#Conv2d',')'),s) s = map(lambda x: x.replace(',bias=True),#Conv2d',')'),s) s = map(lambda x: x.replace('),#Conv2d',')'),s) s = map(lambda x: x.replace(',1e-05,0.1,True),#BatchNorm2d',')'),s) s = map(lambda x: x.replace('),#BatchNorm2d',')'),s) s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#MaxPool2d',')'),s) s = map(lambda x: x.replace(',ceil_mode=False),#MaxPool2d',')'),s) s = map(lambda x: x.replace('),#MaxPool2d',')'),s) s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#AvgPool2d',')'),s) s = map(lambda x: x.replace(',ceil_mode=False),#AvgPool2d',')'),s) s = map(lambda x: x.replace(',bias=True)),#Linear',')), # Linear'),s) s = map(lambda x: x.replace(')),#Linear',')), # Linear'),s) s = map(lambda x: '{},\n'.format(x),s) s = map(lambda x: x[1:],s) s = reduce(lambda x,y: x+y, s) return s
Example #4
Source Project: neuropythy Author: noahbenson File: core.py License: GNU Affero General Public License v3.0 | 6 votes |
def row_norms(ii, f=Ellipsis, squared=False): ''' row_norms(ii) yields a potential function h(x) that calculates the vector norms of the rows of the matrix formed by [x[i] for i in ii] (ii is a matrix of parameter indices). row_norms(ii, f) yield a potential function h(x) equivalent to compose(row_norms(ii), f). ''' try: (n,m) = ii # matrix shape given ii = np.reshape(np.arange(n*m), (n,m)) except Exception: ii = np.asarray(ii) f = to_potential(f) if is_const_potential(f): q = flattest(f.c) q = np.sum([q[i]**2 for i in ii.T], axis=0) return PotentialConstant(q if squared else np.sqrt(q)) F = reduce(lambda a,b: a + b, [part(Ellipsis, col)**2 for col in ii.T]) F = compose(F, f) if not squared: F = sqrt(F) return F
Example #5
Source Project: neuropythy Author: noahbenson File: core.py License: GNU Affero General Public License v3.0 | 6 votes |
def col_norms(ii, f=Ellipsis, squared=False): ''' col_norms(ii) yields a potential function h(x) that calculates the vector norms of the columns of the matrix formed by [x[i] for i in ii] (ii is a matrix of parameter indices). col_norms(ii, f) yield a potential function h(x) equivalent to compose(col_norms(ii), f). ''' try: (n,m) = ii # matrix shape given ii = np.reshape(np.arange(n*m), (n,m)) except Exception: ii = np.asarray(ii) f = to_potential(f) if is_const_potential(f): q = flattest(f.c) q = np.sum([q[i]**2 for i in ii], axis=0) return PotentialConstant(q if squared else np.sqrt(q)) F = reduce(lambda a,b: a + b, [part(Ellipsis, col)**2 for col in ii]) F = compose(F, f) if not squared: F = sqrt(F) return F
Example #6
Source Project: neuropythy Author: noahbenson File: core.py License: GNU Affero General Public License v3.0 | 6 votes |
def cplus(*args): ''' cplus(a, b...) returns the sum of all the values as a numpy array object. Like numpy's add function or a+b syntax, plus will thread over the latest dimension possible. Additionally, cplus works correctly with sparse arrays. ''' n = len(args) if n == 0: return np.asarray(0) elif n == 1: return np.asarray(args[0]) elif n > 2: return reduce(plus, args) (a,b) = args if sps.issparse(a): if not sps.issparse(b): b = np.asarray(b) if len(b.shape) == 0: b = np.reshape(b, (1,1)) elif sps.issparse(b): a = np.asarray(a) if len(a.shape) == 0: a = np.reshape(a, (1,1)) else: a = np.asarray(a) b = np.asarray(b) return a + b
Example #7
Source Project: neuropythy Author: noahbenson File: core.py License: GNU Affero General Public License v3.0 | 6 votes |
def ctimes(*args): ''' ctimes(a, b...) returns the product of all the values as a numpy array object. Like numpy's multiply function or a*b syntax, times will thread over the latest dimension possible; thus if a.shape is (4,2) and b.shape is 2, times(a,b) is a equivalent to a * b. Unlike numpy's multiply function, ctimes works with sparse matrices and will reify them. ''' n = len(args) if n == 0: return np.asarray(0) elif n == 1: return np.asarray(args[0]) elif n > 2: return reduce(plus, args) (a,b) = args if sps.issparse(a): return a.multiply(b) elif sps.issparse(b): return b.multiply(a) else: return np.asarray(a) * b
Example #8
Source Project: cs294-112_hws Author: xuwd11 File: tabulate.py License: MIT License | 6 votes |
def _column_type(strings, has_invisible=True): """The least generic type all column values are convertible to. >>> _column_type(["1", "2"]) is _int_type True >>> _column_type(["1", "2.3"]) is _float_type True >>> _column_type(["1", "2.3", "four"]) is _text_type True >>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type True >>> _column_type([None, "brux"]) is _text_type True >>> _column_type([1, 2, None]) is _int_type True >>> import datetime as dt >>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type True """ types = [_type(s, has_invisible) for s in strings ] return reduce(_more_generic, types, int)
Example #9
Source Project: fine-lm Author: akzaidi File: rl.py License: MIT License | 6 votes |
def feed_forward_categorical_fun(action_space, config, observations): """Feed-forward categorical.""" if not isinstance(action_space, gym.spaces.Discrete): raise ValueError("Expecting discrete action space.") flat_observations = tf.reshape(observations, [ tf.shape(observations)[0], tf.shape(observations)[1], functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)]) with tf.variable_scope("network_parameters"): with tf.variable_scope("policy"): x = flat_observations for size in config.policy_layers: x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu) logits = tf.contrib.layers.fully_connected(x, action_space.n, activation_fn=None) with tf.variable_scope("value"): x = flat_observations for size in config.value_layers: x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu) value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0] policy = tf.contrib.distributions.Categorical(logits=logits) return NetworkOutput(policy, value, lambda a: a)
Example #10
Source Project: fine-lm Author: akzaidi File: rl.py License: MIT License | 6 votes |
def dense_bitwise_categorical_fun(action_space, config, observations): """Dense network with bitwise input and categorical output.""" del config obs_shape = common_layers.shape_list(observations) x = tf.reshape(observations, [-1] + obs_shape[2:]) with tf.variable_scope("network_parameters"): with tf.variable_scope("dense_bitwise"): x = discretization.int_to_bit_embed(x, 8, 32) flat_x = tf.reshape( x, [obs_shape[0], obs_shape[1], functools.reduce(operator.mul, x.shape.as_list()[1:], 1)]) x = tf.contrib.layers.fully_connected(flat_x, 256, tf.nn.relu) x = tf.contrib.layers.fully_connected(flat_x, 128, tf.nn.relu) logits = tf.contrib.layers.fully_connected(x, action_space.n, activation_fn=None) value = tf.contrib.layers.fully_connected( x, 1, activation_fn=None)[..., 0] policy = tf.contrib.distributions.Categorical(logits=logits) return NetworkOutput(policy, value, lambda a: a)
Example #11
Source Project: fine-lm Author: akzaidi File: common_attention.py License: MIT License | 6 votes |
def gather_indices_2d(x, block_shape, block_stride): """Getting gather indices.""" # making an identity matrix kernel kernel = tf.eye(block_shape[0] * block_shape[1]) kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1]) # making indices [1, h, w, 1] to appy convs x_shape = common_layers.shape_list(x) indices = tf.range(x_shape[2] * x_shape[3]) indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1]) indices = tf.nn.conv2d( tf.cast(indices, tf.float32), kernel, strides=[1, block_stride[0], block_stride[1], 1], padding="VALID") # making indices [num_blocks, dim] to gather dims = common_layers.shape_list(indices)[:3] if all([isinstance(dim, int) for dim in dims]): num_blocks = functools.reduce(operator.mul, dims, 1) else: num_blocks = tf.reduce_prod(dims) indices = tf.reshape(indices, [num_blocks, -1]) return tf.cast(indices, tf.int32)
Example #12
Source Project: lirpg Author: Hwhitetooth File: ddpg.py License: MIT License | 6 votes |
def setup_critic_optimizer(self): logger.info('setting up critic optimizer') normalized_critic_target_tf = tf.clip_by_value(normalize(self.critic_target, self.ret_rms), self.return_range[0], self.return_range[1]) self.critic_loss = tf.reduce_mean(tf.square(self.normalized_critic_tf - normalized_critic_target_tf)) if self.critic_l2_reg > 0.: critic_reg_vars = [var for var in self.critic.trainable_vars if 'kernel' in var.name and 'output' not in var.name] for var in critic_reg_vars: logger.info(' regularizing: {}'.format(var.name)) logger.info(' applying l2 regularization with {}'.format(self.critic_l2_reg)) critic_reg = tc.layers.apply_regularization( tc.layers.l2_regularizer(self.critic_l2_reg), weights_list=critic_reg_vars ) self.critic_loss += critic_reg critic_shapes = [var.get_shape().as_list() for var in self.critic.trainable_vars] critic_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in critic_shapes]) logger.info(' critic shapes: {}'.format(critic_shapes)) logger.info(' critic params: {}'.format(critic_nb_params)) self.critic_grads = U.flatgrad(self.critic_loss, self.critic.trainable_vars, clip_norm=self.clip_norm) self.critic_optimizer = MpiAdam(var_list=self.critic.trainable_vars, beta1=0.9, beta2=0.999, epsilon=1e-08)
Example #13
Source Project: mindustry-modding Author: SimonWoodburyForget File: to_wiki.py License: GNU General Public License v3.0 | 6 votes |
def normalize(md): '''Normalize anchors.''' def on_match(link): desc = link.group(1) old = link.group(2) href = (link.group(2) .lower() .replace('%20', '-') .replace(" ", "-") .replace("~", "") .replace(".", "")) old, new = f'[{desc}]({old})', f'[{desc}]({href})' print(old, new) return old, new replacers = set((on_match(x) for x in re.finditer(r'\[([^\]\[]*)\]\((#[^\)]*)\)', md))) return ft.reduce(lambda md, x: md.replace(x[0], x[1]), replacers, md)
Example #14
Source Project: HardRLWithYoutube Author: MaxSobolMark File: ddpg.py License: MIT License | 6 votes |
def setup_critic_optimizer(self): logger.info('setting up critic optimizer') normalized_critic_target_tf = tf.clip_by_value(normalize(self.critic_target, self.ret_rms), self.return_range[0], self.return_range[1]) self.critic_loss = tf.reduce_mean(tf.square(self.normalized_critic_tf - normalized_critic_target_tf)) if self.critic_l2_reg > 0.: critic_reg_vars = [var for var in self.critic.trainable_vars if 'kernel' in var.name and 'output' not in var.name] for var in critic_reg_vars: logger.info(' regularizing: {}'.format(var.name)) logger.info(' applying l2 regularization with {}'.format(self.critic_l2_reg)) critic_reg = tc.layers.apply_regularization( tc.layers.l2_regularizer(self.critic_l2_reg), weights_list=critic_reg_vars ) self.critic_loss += critic_reg critic_shapes = [var.get_shape().as_list() for var in self.critic.trainable_vars] critic_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in critic_shapes]) logger.info(' critic shapes: {}'.format(critic_shapes)) logger.info(' critic params: {}'.format(critic_nb_params)) self.critic_grads = U.flatgrad(self.critic_loss, self.critic.trainable_vars, clip_norm=self.clip_norm) self.critic_optimizer = MpiAdam(var_list=self.critic.trainable_vars, beta1=0.9, beta2=0.999, epsilon=1e-08)
Example #15
Source Project: firmanal Author: kyechou File: afl.py License: MIT License | 6 votes |
def extract(iid, bindir): print('Extracting binaries......') query = '''select filename from object_to_image where iid=''' + iid + ''' and score>0 and (mime='application/x-executable; charset=binary' or mime='application/x-object; charset=binary' or mime='application/x-sharedlib; charset=binary') order by score DESC;''' wanted = dbquery(query) wanted = reduce((lambda a, b: a + b), wanted) wanted = map((lambda a: '.' + a), wanted) wanted = reduce((lambda a, b: a + ' ' + b), wanted) cmd = 'tar xf ' + bindir + '/../../../../images/' + iid + '.tar.gz -C ' + bindir + ' ' + wanted subprocess.run([cmd], shell=True) print('Extracting library links......') query = '''select filename from object_to_image where iid=''' + iid + ''' and regular_file='f';''' wanted = dbquery(query) wanted = reduce((lambda a, b: a + b), wanted) wanted = filter((lambda a: 'lib' in a), wanted) wanted = map((lambda a: '.' + a), wanted) wanted = reduce((lambda a, b: a + ' ' + b), wanted) cmd = 'tar xf ' + bindir + '/../../../../images/' + iid + '.tar.gz -C ' + bindir + ' ' + wanted subprocess.run([cmd], shell=True)
Example #16
Source Project: pyscf Author: pyscf File: x2c.py License: Apache License 2.0 | 6 votes |
def _get_r(s, snesc): # R^dag \tilde{S} R = S # R = S^{-1/2} [S^{-1/2}\tilde{S}S^{-1/2}]^{-1/2} S^{1/2} w, v = numpy.linalg.eigh(s) idx = w > 1e-14 v = v[:,idx] w_sqrt = numpy.sqrt(w[idx]) w_invsqrt = 1 / w_sqrt # eigenvectors of S as the new basis snesc = reduce(numpy.dot, (v.conj().T, snesc, v)) r_mid = numpy.einsum('i,ij,j->ij', w_invsqrt, snesc, w_invsqrt) w1, v1 = numpy.linalg.eigh(r_mid) idx1 = w1 > 1e-14 v1 = v1[:,idx1] r_mid = numpy.dot(v1/numpy.sqrt(w1[idx1]), v1.conj().T) r = numpy.einsum('i,ij,j->ij', w_invsqrt, r_mid, w_sqrt) # Back transform to AO basis r = reduce(numpy.dot, (v, r, v.conj().T)) return r
Example #17
Source Project: pyscf Author: pyscf File: test_x2c_hess.py License: Apache License 2.0 | 6 votes |
def _sqrt2(a0, a1i, a1j, a2ij): '''Solving second order derivative of x^2 = a''' w, v = scipy.linalg.eigh(a0) w = numpy.sqrt(w) a1i = reduce(numpy.dot, (v.conj().T, a1i, v)) x1i = a1i / (w[:,None] + w) a1j = reduce(numpy.dot, (v.conj().T, a1j, v)) x1j = a1j / (w[:,None] + w) a2ij = reduce(numpy.dot, (v.conj().T, a2ij, v)) tmp = x1i.dot(x1j) a2ij -= tmp + tmp.conj().T x2 = a2ij / (w[:,None] + w) x2 = reduce(numpy.dot, (v, x2, v.conj().T)) return x2
Example #18
Source Project: pyscf Author: pyscf File: cdft.py License: Apache License 2.0 | 6 votes |
def fast_iao_mullikan_pop(mf,cell,a=None): ''' Input: mf -- a preconverged mean fild object Returns: mullikan populaion analysis in the basisIAO a ''' # # here we convert the density matrix to the IAO basis # if a is None: a = numpy.eye(mf.make_rdm1().shape[1]) #converts the occupied MOs to the IAO basis ovlpS = mf.get_ovlp() CIb = reduce(numpy.dot, (a.T, ovlpS , mf.make_rdm1())) # # This is the mullikan population below here # mo_occ = mf.mo_coeff[:,mf.mo_occ>0] mo_occ = reduce(numpy.dot, (a.T, mf.get_ovlp(), mo_occ)) dm = numpy.dot(mo_occ, mo_occ.T) * 2 pmol = cell.copy() pmol.build(False, False, basis='minao') return mf.mulliken_pop(pmol, dm, s=numpy.eye(pmol.nao_nr()))
Example #19
Source Project: pyscf Author: pyscf File: kuhf.py License: Apache License 2.0 | 6 votes |
def spin_square(self, mo_coeff=None, s=None): '''Treating the k-point sampling wfn as a giant Slater determinant, the spin_square value is the <S^2> of the giant determinant. ''' nkpts = len(self.kpts) if mo_coeff is None: mo_a = [self.mo_coeff[0][k][:,self.mo_occ[0][k]>0] for k in range(nkpts)] mo_b = [self.mo_coeff[1][k][:,self.mo_occ[1][k]>0] for k in range(nkpts)] else: mo_a, mo_b = mo_coeff if s is None: s = self.get_ovlp() nelec_a = sum([mo_a[k].shape[1] for k in range(nkpts)]) nelec_b = sum([mo_b[k].shape[1] for k in range(nkpts)]) ssxy = (nelec_a + nelec_b) * .5 for k in range(nkpts): sij = reduce(np.dot, (mo_a[k].T.conj(), s[k], mo_b[k])) ssxy -= np.einsum('ij,ij->', sij.conj(), sij).real ssz = (nelec_b-nelec_a)**2 * .25 ss = ssxy + ssz s = np.sqrt(ss+.25) - .5 return ss, s*2+1
Example #20
Source Project: pyscf Author: pyscf File: khf.py License: Apache License 2.0 | 6 votes |
def canonicalize(mf, mo_coeff_kpts, mo_occ_kpts, fock=None): if fock is None: dm = mf.make_rdm1(mo_coeff_kpts, mo_occ_kpts) fock = mf.get_fock(dm=dm) mo_coeff = [] mo_energy = [] for k, mo in enumerate(mo_coeff_kpts): mo1 = np.empty_like(mo) mo_e = np.empty_like(mo_occ_kpts[k]) occidx = mo_occ_kpts[k] == 2 viridx = ~occidx for idx in (occidx, viridx): if np.count_nonzero(idx) > 0: orb = mo[:,idx] f1 = reduce(np.dot, (orb.T.conj(), fock[k], orb)) e, c = scipy.linalg.eigh(f1) mo1[:,idx] = np.dot(orb, c) mo_e[idx] = e mo_coeff.append(mo1) mo_energy.append(mo_e) return mo_energy, mo_coeff
Example #21
Source Project: vergeml Author: mme File: loader.py License: MIT License | 5 votes |
def _calculate_num_samples(self, split): """Calculate the total number of samples after applying ops. """ num_samples = self.input.num_samples(split) # calculate how much the samples will be augmented after going through ops multiplier = reduce(operator.mul, map(lambda op: _get_multiplier(split, op), self.ops), 1) return int(num_samples * multiplier)
Example #22
Source Project: neural-fingerprinting Author: StephanZheng File: utils_mnist.py License: BSD 3-Clause "New" or "Revised" License | 5 votes |
def download_and_parse_mnist_file(file_name, datadir=None, force=False): file_name = maybe_download_mnist_file(file_name, datadir=datadir, force=force) # Open the file and unzip it if necessary if os.path.splitext(file_name)[1] == '.gz': open_fn = gzip.open else: open_fn = open # Parse the file with open_fn(file_name, 'rb') as file_descriptor: header = file_descriptor.read(4) assert len(header) == 4 zeros, data_type, n_dims = struct.unpack('>HBB', header) assert zeros == 0 hex_to_data_type = { 0x08: 'B', 0x09: 'b', 0x0b: 'h', 0x0c: 'i', 0x0d: 'f', 0x0e: 'd'} data_type = hex_to_data_type[data_type] dim_sizes = struct.unpack( '>' + 'I' * n_dims, file_descriptor.read(4 * n_dims)) data = array.array(data_type, file_descriptor.read()) data.byteswap() desired_items = functools.reduce(operator.mul, dim_sizes) assert len(data) == desired_items return np.array(data).reshape(dim_sizes)
Example #23
Source Project: multibootusb Author: mbusb File: test-grub.py License: GNU General Public License v2.0 | 5 votes |
def os_path_exists(f): chunks = reduce(lambda accum, x : accum + x.split('/'), f.split('\\'), []) if chunks[-1] in ['multibootusb.log', 'loopback.cfg']: return True if 'arch' in chunks: return True if chunks[1] in ['rootfs1.gz', 'rootfs2.gz']: return True if chunks == ['multibootusb', 'debian-sid', 'boot', 'rootfs4.gz']: return True return False
Example #24
Source Project: models Author: kipoi File: pretrained_model_reloaded_th.py License: MIT License | 5 votes |
def forward(self, input): return reduce(self.lambda_func,self.forward_prepare(input))
Example #25
Source Project: models Author: kipoi File: convert_Basset_to_pytorch.py License: MIT License | 5 votes |
def forward(self, input): # result is a Variable return reduce(self.lambda_func,self.forward_prepare(input))
Example #26
Source Project: models Author: kipoi File: model_architecture.py License: MIT License | 5 votes |
def forward(self, input): return reduce(self.lambda_func,self.forward_prepare(input))
Example #27
Source Project: python-clean-architecture Author: pcah File: abstract.py License: MIT License | 5 votes |
def _reduced_filter(self) -> t.Optional[Predicate]: """Before evaluation, sum up all filter predicates into a single one""" return None if self._is_trivial else reduce(and_, self._filters) # lazy queries
Example #28
Source Project: VSE-C Author: ExplorerFreda File: numeral.py License: MIT License | 5 votes |
def valid(word): # check number try: numbers = [w2n.word_to_num(it) for it in word.split()] if len(numbers) > 1: return False except ValueError: return False # check English chars = map(lambda c: 'a' <= c <= 'z', word) return reduce(lambda a, b: a or b, chars, False)
Example #29
Source Project: neuropythy Author: noahbenson File: core.py License: GNU Affero General Public License v3.0 | 5 votes |
def compose(*args): ''' compose(g, h...) yields a potential function f that is the result of composing together all the arguments g, h, etc. after calling to_potential() on each. The result is defined such that f(x) is equivalent to g(h(...(x))). ''' return reduce(lambda h,g: PotentialComposition(g,h), reversed(list(map(to_potential, args))))
Example #30
Source Project: neuropythy Author: noahbenson File: retinotopy.py License: GNU Affero General Public License v3.0 | 5 votes |
def fit_pRF_radius(ctx, retinotopy=Ellipsis, mask=None, weight=Ellipsis, slope_only=False): ''' fit_pRF_radius(ctx) fits a line, m*eccen + b, to the pRF radius and yields the tuple (m, b). The following options may be given: * retinotopy (default: Ellipsis) specifies the prefix for the retinotopy (passed to retinotopy_data() to find the retinotopic dataset). * mask (default: None) specifies the mask over which to perform the calculation. This is passed to the to_mask() function. In the case that mask is a set or frozenset, then it is treated as a conjunction (intersection) of masks. * weight (default: None) specifies that a weight should be used; if this is True or Ellipsis, will use the variance_explained if it is part of the retinotopy dataset; if this is False or None, uses no weight; otherwise, this must be a weight property or property name. * slope_only (default: False) may be set to True to instead fit radius = m*eccen and return only m. ''' rdat = retinotopy_data(ctx, retinotopy) if 'radius' not in rdat: raise ValueError('No pRF radius found in dataset %s' % retinotopy) rad = rdat['radius'] (ang,ecc) = as_retinotopy(rdat, 'visual') if isinstance(mask, (set, frozenset)): mask = reduce(np.intersect1d, [ctx.mask(m, indices=True) for m in mask]) else: mask = ctx.mask(mask, indices=True) # get a weight if provided: if weight in [False, None]: wgt = np.ones(rad.shape) elif weight in [True, Ellipsis]: if 'variance_explained' in rdat: wgt = rdat['variance_explained'] else: wgt = np.ones(rad.shape) else: wgt = ctx.property(weight) # get the relevant eccen and radius values (ecc,rad,wgt) = [x[mask] for x in (ecc,rad,wgt)] # fit a line... if slope_only: ecc = np.reshape(ecc * wgt, (len(ecc), 1)) rad = np.reshape(rad * wgt, (len(rad), 1)) return np.linalg.lstsq(ecc, rad)[0] else: return tuple(np.polyfit(ecc, rad, 1, w=wgt))