Python numpy.full() Examples

The following are 30 code examples for showing how to use numpy.full(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: dustmaps   Author: gregreen   File: sfd.py    License: GNU General Public License v2.0 6 votes vote down vote up
def query(self, coords, order=1):
        """
        Returns the map value at the specified location(s) on the sky.

        Args:
            coords (`astropy.coordinates.SkyCoord`): The coordinates to query.
            order (Optional[int]): Interpolation order to use. Defaults to `1`,
                for linear interpolation.

        Returns:
            A float array containing the map value at every input coordinate.
            The shape of the output will be the same as the shape of the
            coordinates stored by `coords`.
        """
        out = np.full(len(coords.l.deg), np.nan, dtype='f4')

        for pole in self.poles:
            m = (coords.b.deg >= 0) if pole == 'ngp' else (coords.b.deg < 0)

            if np.any(m):
                data, w = self._data[pole]
                x, y = w.wcs_world2pix(coords.l.deg[m], coords.b.deg[m], 0)
                out[m] = map_coordinates(data, [y, x], order=order, mode='nearest')

        return out 
Example 2
Project: neuropythy   Author: noahbenson   File: files.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def find_subject_path(sid, check_path=True):
    '''
    find_subject_path(sub) yields the full path of a HCP subject with the name given by the string
      sub, if such a subject can be found in the HCP search paths. See also add_subject_path.

    If no subject is found, then None is returned.
    '''
    # if it's a full/relative path already, use it:
    sub = str(sid)
    if ((not check_path or is_hcp_subject_path(sub)) and
        (check_path is None or os.path.isdir(sub))):
        return sub
    # check the subject directories:
    sdirs = config['hcp_subject_paths']
    return next((os.path.abspath(p) for sd in sdirs
                 for p in [os.path.join(sd, sub)]
                 if ((not check_path or is_hcp_subject_path(p)) and
                     (check_path is None or os.path.isdir(p)))),
                None) 
Example 3
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def cos_edge(f=Ellipsis, width=np.pi, offset=0, scale=1):
    '''
    cos_edge() yields a potential function g(x) that calculates 0 for x < pi/2, 1 for x > pi/2, and
      0.5*(1 + cos(pi/2*(1 - x))) for x between -pi/2 and pi/2.
    
    The full formulat of the cosine well is, including optional arguments:
      scale/2 * (1 + cos(pi*(0.5 - (x - offset)/width)

    The following optional arguments may be given:
      * width (default: pi) specifies that the frequency of the cos-curve should be pi/width; the
        width is the distance between the points on the cos-curve with the value of 1.
      * offset (default: 0) specifies the offset of the minimum value of the coine curve on the
        x-axis.
      * scale (default: 1) specifies the height of the cosine well.
    '''
    f = to_potential(f)
    freq = np.pi/2
    (xmn,xmx) = (offset - width/2, offset + width/2)
    F = piecewise(scale,
                  ((-np.inf, xmn), 0),
                  ((xmn,xmx), scale/2 * (1 + cos(np.pi*(0.5 - (identity - offset)/width)))))
    if   is_const_potential(f):    return const_potential(F.value(f.c))
    elif is_identity_potential(f): return F
    else:                          return compose(F, f) 
Example 4
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def signed_face_areas(faces, axis=1):
    '''
    signed_face_areas(faces) yields a potential function f(x) that calculates the signed area of
      each face represented by the simplices matrix faces.

    If faces is None, then the parameters must arrive in the form of a flattened (n x 3 x 2) matrix
    where n is the number of triangles. Otherwise, the faces matrix must be either (n x 3) or (n x 3
    x s); if the former, each row must list the vertex indices for the faces where the vertex matrix
    is presumed to be shaped (V x 2). Alternately, faces may be a full (n x 3 x 2) simplex array of
    the indices into the parameters.

    The optional argument axis (default: 1) may be set to 0 if the faces argument is a matrix but
    the coordinate matrix will be (2 x V) instead of (V x 2).
    '''
    faces = np.asarray(faces)
    if len(faces.shape) == 2:
        if faces.shape[1] != 3: faces = faces.T
        n = 2 * (np.max(faces) + 1)
        if axis == 0: tmp = np.reshape(np.arange(n), (2,-1)).T
        else:         tmp = np.reshape(np.arange(n), (-1,2))
        faces = np.reshape(tmp[faces.flat], (-1,3,2))
    faces = faces.flatten()
    return compose(TriangleSignedArea2DPotential(), part(Ellipsis, faces)) 
Example 5
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def face_areas(faces, axis=1):
    '''
    face_areas(faces) yields a potential function f(x) that calculates the unsigned area of each
      faces represented by the simplices matrix faces.

    If faces is None, then the parameters must arrive in the form of a flattened (n x 3 x 2) matrix
    where n is the number of triangles. Otherwise, the faces matrix must be either (n x 3) or (n x 3
    x s); if the former, each row must list the vertex indices for the faces where the vertex matrix
    is presumed to be shaped (V x 2). Alternately, faces may be a full (n x 3 x 2) simplex array of
    the indices into the parameters.

    The optional argument axis (default: 1) may be set to 0 if the faces argument is a matrix but
    the coordinate matrix will be (2 x V) instead of (V x 2).
    '''
    faces = np.asarray(faces)
    if len(faces.shape) == 2:
        if faces.shape[1] != 3: faces = faces.T
        n = 2 * (np.max(faces) + 1)
        if axis == 0: tmp = np.reshape(np.arange(n), (2,-1)).T
        else:         tmp = np.reshape(np.arange(n), (-1,2))
        faces = np.reshape(tmp[faces.flat], (-1,3,2))
    faces = faces.flatten()
    return compose(TriangleArea2DPotential(), part(Ellipsis, faces)) 
Example 6
Project: neuropythy   Author: noahbenson   File: retinotopy.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def _retinotopic_field_sign_triangles(m, retinotopy):
    t = m.tess if isinstance(m, geo.Mesh) or isinstance(m, geo.Topology) else m
    # get the polar angle and eccen data as a complex number in degrees
    if pimms.is_str(retinotopy):
        (x,y) = as_retinotopy(retinotopy_data(m, retinotopy), 'geographical')
    elif retinotopy is Ellipsis:
        (x,y) = as_retinotopy(retinotopy_data(m, 'any'),      'geographical')
    else:
        (x,y) = as_retinotopy(retinotopy,                     'geographical')
    # Okay, now we want to make some coordinates...
    coords = np.asarray([x, y])
    us = coords[:, t.indexed_faces[1]] - coords[:, t.indexed_faces[0]]
    vs = coords[:, t.indexed_faces[2]] - coords[:, t.indexed_faces[0]]
    (us,vs) = [np.concatenate((xs, np.full((1, t.face_count), 0.0))) for xs in [us,vs]]
    xs = np.cross(us, vs, axis=0)[2]
    xs[np.isclose(xs, 0)] = 0
    return np.sign(xs) 
Example 7
def test_gluon_trainer_step():
    def check_trainer_step():
        ctx = mx.cpu(0)
        shape = (10, 1)
        x = mx.gluon.Parameter('x', shape=shape)
        x.initialize(ctx=ctx, init='ones')
        trainer = mx.gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'multi_precision': False}, kvstore=kv)
        with mx.autograd.record():
            w = x.data(ctx)
            y = (my_rank + 1) * w
            y.backward()
        trainer.step(1)
        expected = 1 - (1 + nworker) * nworker / 2
        assert_almost_equal(x.data(ctx).asnumpy(), np.full(shape, expected))
    check_trainer_step()
    print('worker ' + str(my_rank) + ' passed test_gluon_trainer_step') 
Example 8
def test_gluon_trainer_sparse_step():
    def check_trainer_sparse_step():
        ctx = mx.cpu(0)
        shape = (2, 10)
        all_rows = mx.nd.arange(0, shape[0], ctx=ctx)
        x = mx.gluon.Parameter('x', shape=shape, stype='row_sparse', grad_stype='row_sparse')
        x.initialize(ctx=ctx, init='ones')
        trainer = mx.gluon.Trainer([x], 'sgd', {'learning_rate': 1.0}, kvstore=kv)
        with mx.autograd.record():
            w = x.row_sparse_data(all_rows)
            y = (my_rank + 1) * w
            y.backward()
        trainer.step(1)
        expected = 1 - (1 + nworker) * nworker / 2
        assert_almost_equal(x.row_sparse_data(all_rows).asnumpy(), np.full(shape, expected))
    check_trainer_sparse_step()
    print('worker ' + str(my_rank) + ' passed test_gluon_trainer_sparse_step') 
Example 9
Project: DOTA_models   Author: ringringyi   File: visualization_utils_test.py    License: Apache License 2.0 6 votes vote down vote up
def create_colorful_test_image(self):
    """This function creates an image that can be used to test vis functions.

    It makes an image composed of four colored rectangles.

    Returns:
      colorful test numpy array image.
    """
    ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)
    ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)
    ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)
    imr = np.concatenate((ch255, ch128, ch128), axis=2)
    img = np.concatenate((ch255, ch255, ch0), axis=2)
    imb = np.concatenate((ch255, ch0, ch255), axis=2)
    imw = np.concatenate((ch128, ch128, ch128), axis=2)
    imu = np.concatenate((imr, img), axis=1)
    imd = np.concatenate((imb, imw), axis=1)
    image = np.concatenate((imu, imd), axis=0)
    return image 
Example 10
Project: post--memorization-in-rnns   Author: distillpub   File: auto_complete_fixed.py    License: MIT License 6 votes vote down vote up
def make_source_target_alignment(self, sequence):
        space_char_code = self._char_map_inverse[' ']
        unknown_word_code = self._word_map_inverse['<unknown>']

        source = []
        target = []
        length = 0

        for word in sequence.split(' '):
            source.append(
                np.array([space_char_code] + self.encode_source(word),
                         dtype='int32')
            )
            target.append(
                np.full(len(word) + 1, self.encode_target([word])[0],
                        dtype='int32')
            )
            length += 1 + len(word)

        # concatenate data
        return (
            length,
            np.concatenate(source),
            np.concatenate(target)
        ) 
Example 11
Project: HardRLWithYoutube   Author: MaxSobolMark   File: running_mean_std.py    License: MIT License 6 votes vote down vote up
def __init__(self, epsilon=1e-4, shape=(), scope=''):
        sess = get_session()

        self._new_mean = tf.placeholder(shape=shape, dtype=tf.float64)
        self._new_var = tf.placeholder(shape=shape, dtype=tf.float64)
        self._new_count = tf.placeholder(shape=(), dtype=tf.float64)

        
        with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
            self._mean  = tf.get_variable('mean',  initializer=np.zeros(shape, 'float64'),      dtype=tf.float64)
            self._var   = tf.get_variable('std',   initializer=np.ones(shape, 'float64'),       dtype=tf.float64)    
            self._count = tf.get_variable('count', initializer=np.full((), epsilon, 'float64'), dtype=tf.float64)

        self.update_ops = tf.group([
            self._var.assign(self._new_var),
            self._mean.assign(self._new_mean),
            self._count.assign(self._new_count)
        ])

        sess.run(tf.variables_initializer([self._mean, self._var, self._count]))
        self.sess = sess
        self._set_mean_var_count() 
Example 12
Project: object_detector_app   Author: datitran   File: visualization_utils_test.py    License: MIT License 6 votes vote down vote up
def create_colorful_test_image(self):
    """This function creates an image that can be used to test vis functions.

    It makes an image composed of four colored rectangles.

    Returns:
      colorful test numpy array image.
    """
    ch255 = np.full([100, 200, 1], 255, dtype=np.uint8)
    ch128 = np.full([100, 200, 1], 128, dtype=np.uint8)
    ch0 = np.full([100, 200, 1], 0, dtype=np.uint8)
    imr = np.concatenate((ch255, ch128, ch128), axis=2)
    img = np.concatenate((ch255, ch255, ch0), axis=2)
    imb = np.concatenate((ch255, ch0, ch255), axis=2)
    imw = np.concatenate((ch128, ch128, ch128), axis=2)
    imu = np.concatenate((imr, img), axis=1)
    imd = np.concatenate((imb, imw), axis=1)
    image = np.concatenate((imu, imd), axis=0)
    return image 
Example 13
Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 6 votes vote down vote up
def _positional_to_optimal(self, K):
        k, l = self.k, self.l

        suffix = np.full((len(K), self.l), 0.0)
        X = np.column_stack([K, suffix])
        X[:, self.k + self.l - 1] = 0.35

        for i in range(self.k + self.l - 2, self.k - 1, -1):
            m = X[:, i + 1:k + l]
            val = m.sum(axis=1) / m.shape[1]
            X[:, i] = 0.35 ** ((0.02 + 1.96 * val) ** -1)

        ret = X * (2 * (np.arange(self.n_var) + 1))
        return ret


# ---------------------------------------------------------------------------------------------------------
# TRANSFORMATIONS
# --------------------------------------------------------------------------------------------------------- 
Example 14
Project: pymoo   Author: msu-coinlab   File: replacement.py    License: Apache License 2.0 6 votes vote down vote up
def _do(self, problem, pop, off, **kwargs):
        ret = np.full((len(pop), 1), False)

        pop_F, pop_CV, pop_feasible = pop.get("F", "CV", "feasible")
        off_F, off_CV, off_feasible = off.get("F", "CV", "feasible")

        if problem.n_constr > 0:

            # 1) Both infeasible and constraints have been improved
            ret[(~pop_feasible & ~off_feasible) & (off_CV < pop_CV)] = True

            # 2) A solution became feasible
            ret[~pop_feasible & off_feasible] = True

            # 3) Both feasible but objective space value has improved
            ret[(pop_feasible & off_feasible) & (off_F < pop_F)] = True

        else:
            ret[off_F < pop_F] = True

        return ret[:, 0] 
Example 15
Project: pymoo   Author: msu-coinlab   File: usage_ga_custom.py    License: Apache License 2.0 6 votes vote down vote up
def _do(self, problem, X, **kwargs):
        n_parents, n_matings, n_var = X.shape

        _X = np.full((self.n_offsprings, n_matings, problem.n_var), False)

        for k in range(n_matings):
            p1, p2 = X[0, k], X[1, k]

            both_are_true = np.logical_and(p1, p2)
            _X[0, k, both_are_true] = True

            n_remaining = problem.n_max - np.sum(both_are_true)

            I = np.where(np.logical_xor(p1, p2))[0]

            S = I[np.random.permutation(len(I))][:n_remaining]
            _X[0, k, S] = True

        return _X 
Example 16
Project: pymoo   Author: msu-coinlab   File: mixed_variable_operator.py    License: Apache License 2.0 6 votes vote down vote up
def _do(self, problem, X, **kwargs):

        _, n_matings, n_var = X.shape

        def fun(mask, operator):
            return operator._do(problem, X[..., mask], **kwargs)

        ret = apply_mixed_variable_operation(problem, self.process, fun)

        # for the crossover the concatenation is different through the 3d arrays.
        X = np.full((self.n_offsprings, n_matings, n_var), np.nan, dtype=np.object)
        for i in range(len(self.process)):
            mask, _X = self.process[i]["mask"], ret[i]
            X[..., mask] = _X

        return X 
Example 17
Project: pymoo   Author: msu-coinlab   File: point_crossover.py    License: Apache License 2.0 6 votes vote down vote up
def _do(self, problem, X, **kwargs):

        # get the X of parents and count the matings
        _, n_matings, n_var = X.shape

        # start point of crossover
        r = np.row_stack([np.random.permutation(n_var - 1) + 1 for _ in range(n_matings)])[:, :self.n_points]
        r.sort(axis=1)
        r = np.column_stack([r, np.full(n_matings, n_var)])

        # the mask do to the crossover
        M = np.full((n_matings, n_var), False)

        # create for each individual the crossover range
        for i in range(n_matings):

            j = 0
            while j < r.shape[1] - 1:
                a, b = r[i, j], r[i, j + 1]
                M[i, a:b] = True
                j += 2

        _X = crossover_mask(X, M)

        return _X 
Example 18
Project: pymoo   Author: msu-coinlab   File: half_uniform_crossover.py    License: Apache License 2.0 6 votes vote down vote up
def _do(self, problem, X, **kwargs):
        _, n_matings, n_var = X.shape

        # the mask do to the crossover
        M = np.full((n_matings, n_var), False)

        not_equal = X[0] != X[1]

        # create for each individual the crossover range
        for i in range(n_matings):
            I = np.where(not_equal[i])[0]

            n = math.ceil(len(I) / 2)
            if n > 0:
                _I = I[np.random.permutation(len(I))[:n]]
                M[i, _I] = True

        _X = crossover_mask(X, M)
        return _X 
Example 19
Project: pymoo   Author: msu-coinlab   File: ctaea.py    License: Apache License 2.0 6 votes vote down vote up
def comp_by_cv_dom_then_random(pop, P, **kwargs):
    S = np.full(P.shape[0], np.nan)

    for i in range(P.shape[0]):
        a, b = P[i, 0], P[i, 1]

        if pop[a].CV <= 0.0 and pop[b].CV <= 0.0:
            rel = Dominator.get_relation(pop[a].F, pop[b].F)
            if rel == 1:
                S[i] = a
            elif rel == -1:
                S[i] = b
            else:
                S[i] = np.random.choice([a, b])
        elif pop[a].CV <= 0.0:
            S[i] = a
        elif pop[b].CV <= 0.0:
            S[i] = b
        else:
            S[i] = np.random.choice([a, b])

    return S[:, None].astype(np.int) 
Example 20
Project: pymoo   Author: msu-coinlab   File: rnsga2.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, ref_points,
                 epsilon,
                 weights,
                 normalization,
                 extreme_points_as_reference_points
                 ) -> None:

        super().__init__(True)
        self.n_obj = ref_points.shape[1]
        self.ref_points = ref_points
        self.epsilon = epsilon
        self.extreme_points_as_reference_points = extreme_points_as_reference_points

        self.weights = weights
        if self.weights is None:
            self.weights = np.full(self.n_obj, 1 / self.n_obj)

        self.normalization = normalization
        self.ideal_point = np.full(self.n_obj, np.inf)
        self.nadir_point = np.full(self.n_obj, -np.inf) 
Example 21
Project: pymoo   Author: msu-coinlab   File: so_cmaes.py    License: Apache License 2.0 6 votes vote down vote up
def initialize(self, problem, seed=None, **kwargs):
        super().initialize(problem, **kwargs)
        self.n_gen = 0

        xl = problem.xl.tolist() if problem.xl is not None else None
        xu = problem.xu.tolist() if problem.xu is not None else None

        self.options['bounds'] = [xl, xu]
        self.options['seed'] = seed

        if isinstance(self.termination, MaximumGenerationTermination):
            self.options['maxiter'] = self.termination.n_max_gen
        elif isinstance(self.termination, MaximumFunctionCallTermination):
            self.options['maxfevals'] = self.termination.n_max_evals

        # if self.problem.n_constr > 0:
        #     _al = AugmentedLagrangian(problem.n_var)
        #     _al.set_m(problem.n_constr)
        #     _al._equality = np.full(problem.n_constr, False)
        #     self.al = _al
        #     kwargs.setdefault('options', {}).setdefault('tolstagnation', 0) 
Example 22
Project: libTLDA   Author: wmkouw   File: suba.py    License: MIT License 5 votes vote down vote up
def find_medioid(self, X, Y):
        """
        Find point with minimal distance to all other points.

        Parameters
        ----------
        X : array
            data set, with N samples x D features.
        Y : array
            labels to select for which samples to compute distances.

        Returns
        -------
        x : array
            medioid
        ix : int
            index of medioid

        """
        # Initiate an array with infinities
        A = np.full((X.shape[0],), np.inf)

        # Insert sum of distances to other points
        A[Y] = np.sum(squareform(pdist(X[Y, :])), axis=1)

        # Find the index of the point with the smallest distance
        ix = np.argmin(A)

        return X[ix, :], ix 
Example 23
Project: dustmaps   Author: gregreen   File: bayestar.py    License: GNU General Public License v2.0 5 votes vote down vote up
def distances(self):
        """
        Returns the distance bin edges that the map uses. The return type is
        :obj:`astropy.units.Quantity`, which stores unit-full quantities.
        """
        d = 10.**(0.2*self._DM_bin_edges - 2.)
        return d * units.kpc 
Example 24
Project: neuropythy   Author: noahbenson   File: files.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def cifti_split(cii, label=('lh', 'rh', 'rest'), subject=None, hemi=None, null=np.nan):
    '''
    cifti_split(cii, label) yields the rows or columns of the given cifti file that correspond to
      the given label (see below).
    cifti_split(cii) is equivalent to cifti_split(cii, ('lh', 'rh', 'rest')).

    The label argument may be any of the following:
      * a valid CIFTI label name such as 'CIFTI_STRUCTURE_CEREBELLUM' or
        'CIFTI_STRUCTURE_CORTEX_LEFT';
      * an abbreviated name such as 'cerebellum' for 'CIFTI_STRUCTURE_CEREBELLUM'.
      * the abbreviations 'lh' and 'rh' which stand for 'CIFTI_STRUCTURE_CORTEX_LEFT' and 
        'CIFTI_STRUCTURE_CORTEX_RIGHT';
      * the special keyword 'rest', which represents all the rows/columns not collected by any other
        instruction ('rest', by itself, results in the whole matrix being returned); or
      * A tuple of the above, indicating that each of the items listed should be returned
        sequentially in a tuple.

    The following optional arguments may be given:
      * subject (default: None) may specify the subject
      * hemi (default: None) can specify the hemisphere object that 
    '''
    dat = np.asanyarray(cii.dataobj if is_image(cii) else cii)
    n = dat.shape[-1]
    atlas = cifti_split._size_data.get(n, None)
    if atlas is None: raise ValueError('cannot split cifti with size %d' % n)
    if atlas not in cifti_split._atlas_cache:
        patt = os.path.join('data', 'fs_LR', '%s.atlasroi.%dk_fs_LR.shape.gii')
        lgii = nib.load(os.path.join(library_path(), patt % ('lh', atlas)))
        rgii = nib.load(os.path.join(library_path(), patt % ('rh', atlas)))
        cifti_split._atlas_cache[atlas] = tuple([pimms.imm_array(gii.darrays[0].data.astype('bool'))
                                                 for gii in (lgii, rgii)])
    (lroi,rroi) = cifti_split._atlas_cache[atlas]
    (ln,lN) = (np.sum(lroi), len(lroi))
    (rn,rN) = (np.sum(rroi), len(rroi))
    (ldat,rdat,sdat) = [np.full(dat.shape[:-1] + (k,), null) for k in [lN, rN, n - ln - rn]]
    ldat[..., lroi] = dat[..., :ln]
    rdat[..., rroi] = dat[..., ln:(ln+rn)]
    sdat[...] = dat[..., (ln+rn):]
    if ln + rn >= n: sdat = None
    return (ldat, rdat, sdat) 
Example 25
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def unaddress(self, data, surface=0.5):
        '''
        cortex.unaddress(address) yields the (3 x n) coordinate matrix of the given addresses (or,
          if address is singular, the 3D vector) in the given cortex. If the address is a 2D instead
          of a 3D address, then the mid-gray position is returned by default.

        The following options may be given:
          * surface (default: 0.5) specifies the surface to use for 2D addresses; this should be
            either 'white', 'pial', 'midgray', or a real number in the range [0,1] where 0 is the
            white surface and 1 is the pial surface.
        '''
        (faces, coords) = address_data(data, 3, surface=surface)
        (bc, ds) = (coords[:2], coords[2])
        faces = self.tess.index(faces)
        (wx, px) = (self.white_surface.coordinates, self.pial_surface.coordinates)
        if all(len(np.shape(x)) > 1 for x in (faces, coords)):
            (wtx, ptx) = [
                np.transpose([sx[:,ff] if ff[0] >= 0 else null for ff in faces.T], (2,1,0))
                for null in [np.full((3, wx.shape[0]), np.nan)]
                for sx   in (wx, px)]
        elif faces == -1:
            return np.full(selfx.shape[0], np.nan)
        else:
            (wtx, ptx) = [sx[:,faces].T for sx in (wx, px)]
        (wu, pu) = [geo.barycentric_to_cartesian(tx, bc) for tx in (wtx, ptx)]
        return wu*ds + pu*(1 - ds) 
Example 26
Project: neuropythy   Author: noahbenson   File: images.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def image_spec_to_image(imspec, image_type=None, fill=0):
    '''
    image_spec_to_image(imspec) yields an empty with the given image-spec.
    image_spec_to_image(imspec, image_type) creates an image with the given type
    image_spec_to_image(imspec, image_type, fill) fills the resulting image with the given
      fill-value.
    '''
    imsh = imspec_lookup(imspec, 'image_shape')
    if fill == 0: imarr = np.zeros(imsh, dtype=imspec_lookup(imspec, 'voxel_type'))
    else:         imarr = np.full(imsh, fill, dtype=imspec_lookup(imspec, 'voxel_type'))
    # okay, we have the image array...
    image_type = to_image_type('nifti1' if image_type is None else image_type)
    return image_type.create(imarr, meta_data=imspec) 
Example 27
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def finto(x, ii, n, null=0):
    '''
    finto(x,ii,n) yields a vector u of length n such that u[ii] = x.

    Notes:
      * The ii index may be a tuple (as can be passed to numpy arrays' getitem method) in order to
        specify that the specific elements of a multidimensional output be set. In this case, the
        argument n should be a tuple of sizes (a single integer is taken to be a square/cube/etc).
      * x may be a sparse-array, but in it will be reified by this function.

    The following optional arguments are allowed:
      * null (defaut: 0) specifies the value that should appear in the elements of u that are not
        set.
    '''
    x  = x.toarray() if sps.issparse(x) else np.asarray(x)
    shx = x.shape
    if isinstance(ii, tuple):
        if not pimms.is_vector(n): n = tuple([n for _ in ii])
        if len(n) != len(ii): raise ValueError('%d-dim index but %d-dim output' % (len(ii),len(n)))
        sh = n + shx[1:]
    elif pimms.is_int(ii): sh = (n,) + shx
    else:                  sh = (n,) + shx[1:]
    u = np.zeros(sh, dtype=x.dtype) if null == 0 else np.full(sh, null, dtype=x.dtype)
    u[ii] = x
    return u

# Potential Functions ############################################################################## 
Example 28
Project: neuropythy   Author: noahbenson   File: hcp.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def cifti_data(pseudo_path):
        '''
        ny.data['hcp_retinotopy'].cifti_data is a tuple of lazy maps of the 32k and 59k data arrays,
        reorganized into 'visual' retinotopic coordinates. The tuple elements represent the
        (full, split1, split2) solutions.
        '''
        # our loader function:
        def _load(res, split):
            import h5py
            flnm = HCPRetinotopyDataset.retinotopy_files[res]
            logging.info('HCPRetinotopyDataset: Loading split %d from file %s...' % (split, flnm))
            # Before we get this from the 
            flnm = pseudo_path.local_path(flnm)
            with h5py.File(flnm, 'r') as f:
                sids = np.array(f['subjectids'][0], dtype='int')
                data = np.array(f['allresults'][split])
            sids.setflags(write=False)
            # convert these into something more coherent
            tmp = hcp.cifti_split(data)
            for q in tmp: q.setflags(write=False)
            return pyr.pmap(
                {h: pyr.m(prf_polar_angle        = np.mod(90 - dat[:,0] + 180, 360) - 180,
                          prf_eccentricity       = dat[:,1],
                          prf_radius             = dat[:,5],
                          prf_variance_explained = dat[:,4]/100.0,
                          prf_mean_signal        = dat[:,3],
                          prf_gain               = dat[:,2],
                          prf_x                  = dat[:,1]*np.cos(np.pi/180*dat[:,0]),
                          prf_y                  = dat[:,1]*np.sin(np.pi/180*dat[:,0]))
                 for (h,dat) in zip(['lh','rh','subcortical'], tmp)})
        splits = [pimms.lazy_map({res: curry(_load, res, split)
                                  for res in six.iterkeys(HCPRetinotopyDataset.retinotopy_files)})
                  for split in [0,1,2]]
        return tuple(splits) 
Example 29
Project: neuropythy   Author: noahbenson   File: hcp.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def _siblings_to_pairs(rs):
        subject_list = [u for v in six.itervalues(rs)
                        for uuu in [[six.iterkeys(v)], six.itervalues(v)]
                        for uu in uuu for u in ([uu] if pimms.is_int(uu) else uu)]
        subject_list = np.unique(subject_list)
        # setup twin numbers so that we can export anonymized twin data (i.e.,
        # files containing twin data but not the subject IDs)
        twin_pairs = {tw: pimms.imm_array(list(sorted(dat)))
                      for tw  in ['MZ','DZ']
                      for dat in [set([tuple(sorted([k,v])) for (k,v) in six.iteritems(rs[tw])])]}
        # also get a list of all siblings so we can track who is/isn't related
        siblings = {}
        for s1 in subject_list:
            q = []
            for sibs in six.itervalues(rs):
                if s1 not in sibs: continue
                ss = sibs[s1]
                if pimms.is_int(ss): ss = [ss]
                for s2 in ss: q.append(s2)
            if len(q) > 0: siblings[s1] = q
        # Make up a list of all possible unrelated pairs
        unrelated_pairs = []
        for sid in subject_list:
            # find a random subject to pair them with
            urs = np.setdiff1d(subject_list, [sid] + siblings.get(sid,[]))
            unrelated_pairs.append([urs, np.full(len(urs), sid)])
        unrelated_pairs = np.unique(np.sort(np.hstack(unrelated_pairs), axis=0), axis=1).T
        unrelated_pairs.setflags(write=False)
        # Having made those unrelated pairs, we can add them to the twin pairs
        twin_pairs['UR'] = unrelated_pairs
        # finally, let's figure out the non-twin siblings:
        sibs = [(k,v) for (k,vv) in six.iteritems(rs['']) for v in vv]
        twin_pairs['SB'] = np.unique(np.sort(sibs, axis=1), axis=0)
        twin_pairs['SB'].setflags(write=False)
        return pyr.pmap({'monozygotic_twins': twin_pairs['MZ'],
                         'dizygotic_twins':   twin_pairs['DZ'],
                         'nontwin_siblings':  twin_pairs['SB'],
                         'unrelated_pairs':   twin_pairs['UR']}) 
Example 30
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: stt_metric.py    License: Apache License 2.0 5 votes vote down vote up
def ctc_loss(label, prob, remainder, seq_length, batch_size, num_gpu=1, big_num=1e10):
    label_ = [0, 0]
    prob[prob < 1 / big_num] = 1 / big_num
    log_prob = np.log(prob)

    l = len(label)
    for i in range(l):
        label_.append(int(label[i]))
        label_.append(0)

    l_ = 2 * l + 1
    a = np.full((seq_length, l_ + 1), -big_num)
    a[0][1] = log_prob[remainder][0]
    a[0][2] = log_prob[remainder][label_[2]]
    for i in range(1, seq_length):
        row = i * int(batch_size / num_gpu) + remainder
        a[i][1] = a[i - 1][1] + log_prob[row][0]
        a[i][2] = np.logaddexp(a[i - 1][2], a[i - 1][1]) + log_prob[row][label_[2]]
        for j in range(3, l_ + 1):
            a[i][j] = np.logaddexp(a[i - 1][j], a[i - 1][j - 1])
            if label_[j] != 0 and label_[j] != label_[j - 2]:
                a[i][j] = np.logaddexp(a[i][j], a[i - 1][j - 2])
            a[i][j] += log_prob[row][label_[j]]

    return -np.logaddexp(a[seq_length - 1][l_], a[seq_length - 1][l_ - 1])


# label is done with remove_blank
# pred is got from pred_best