Python numpy.ones() Examples

The following are 30 code examples of numpy.ones(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: tcpr.py    From libTLDA with MIT License 7 votes vote down vote up
def add_intercept(self, X):
        """Add 1's to data as last features."""
        # Data shape
        N, D = X.shape

        # Check if there's not already an intercept column
        if np.any(np.sum(X, axis=0) == N):

            # Report
            print('Intercept is not the last feature. Swapping..')

            # Find which column contains the intercept
            intercept_index = np.argwhere(np.sum(X, axis=0) == N)

            # Swap intercept to last
            X = X[:, np.setdiff1d(np.arange(D), intercept_index)]

        # Add intercept as last column
        X = np.hstack((X, np.ones((N, 1))))

        # Append column of 1's to data, and increment dimensionality
        return X, D+1 
Example #2
Source File: images.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def image_reslice(image, spec, method=None, fill=0, dtype=None, weights=None, image_type=None):
    '''
    image_reslice(image, spec) yields a duplicate of the given image resliced to have the voxels
      indicated by the given image spec. Note that spec may be an image itself.

    Optional arguments that can be passed to image_interpolate() (asside from affine) are allowed
    here and are passed through.
    '''
    if image_type is None and is_image(image): image_type = to_image_type(image)
    spec = to_image_spec(spec)
    image = to_image(image)
    # we make a big mesh and interpolate at these points...
    imsh = spec['image_shape']
    (args, kw) = ([np.arange(n) for n in imsh[:3]], {'indexing': 'ij'})
    ijk = np.asarray([u.flatten() for u in np.meshgrid(*args, **kw)])
    ijk = np.dot(spec['affine'], np.vstack([ijk, np.ones([1,ijk.shape[1]])]))[:3]
    # interpolate here...
    u = image_interpolate(image, ijk, method=method, fill=fill, dtype=dtype, weights=weights)
    return to_image((np.reshape(u, imsh), spec), image_type=image_type) 
Example #3
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def apply_affine(aff, coords):
    '''
    apply_affine(affine, coords) yields the result of applying the given affine transformation to
      the given coordinate or coordinates.

    This function expects coords to be a (dims X n) matrix but if the first dimension is neither 2
    nor 3, coords.T is used; i.e.:
      apply_affine(affine3x3, coords2xN) ==> newcoords2xN
      apply_affine(affine4x4, coords3xN) ==> newcoords3xN
      apply_affine(affine3x3, coordsNx2) ==> newcoordsNx2 (for N != 2)
      apply_affine(affine4x4, coordsNx3) ==> newcoordsNx3 (for N != 3)
    '''
    if aff is None: return coords
    (coords,tr) = (np.asanyarray(coords), False)
    if len(coords.shape) == 1: return np.squeeze(apply_affine(np.reshape(coords, (-1,1)), aff))
    elif len(coords.shape) > 2: raise ValueError('cannot apply affine to ND-array for N > 2')
    if   len(coords) == 2: aff = to_affine(aff, 2)
    elif len(coords) == 3: aff = to_affine(aff, 3)
    else: (coords,aff,tr) = (coords.T, to_affine(aff, coords.shape[1]), True)
    r = np.dot(aff, np.vstack([coords, np.ones([1,coords.shape[1]])]))[:-1]
    return r.T if tr else r 
Example #4
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def dataframe_select(df, *cols, **filters):
    '''
    dataframe_select(df, k1=v1, k2=v2...) yields df after selecting all the columns in which the
      given keys (k1, k2, etc.) have been selected such that the associated columns in the dataframe
      contain only the rows whose cells match the given values.
    dataframe_select(df, col1, col2...) selects the given columns.
    dataframe_select(df, col1, col2..., k1=v1, k2=v2...) selects both.
    
    If a value is a tuple/list of 2 elements, then it is considered a range where cells must fall
    between the values. If value is a tuple/list of more than 2 elements or is a set of any length
    then it is a list of values, any one of which can match the cell.
    '''
    ii = np.ones(len(df), dtype='bool')
    for (k,v) in six.iteritems(filters):
        vals = df[k].values
        if   pimms.is_set(v):                    jj = np.isin(vals, list(v))
        elif pimms.is_vector(v) and len(v) == 2: jj = (v[0] <= vals) & (vals < v[1])
        elif pimms.is_vector(v):                 jj = np.isin(vals, list(v))
        else:                                    jj = (vals == v)
        ii = np.logical_and(ii, jj)
    if len(ii) != np.sum(ii): df = df.loc[ii]
    if len(cols) > 0: df = df[list(cols)]
    return df 
Example #5
Source File: test_util.py    From libTLDA with MIT License 6 votes vote down vote up
def test_one_hot():
    """Check if one_hot returns correct label matrices."""
    # Generate label vector
    y = np.hstack((np.ones((10,))*0,
                   np.ones((10,))*1,
                   np.ones((10,))*2))

    # Map to matrix
    Y, labels = one_hot(y)

    # Check for only 0's and 1's
    assert len(np.setdiff1d(np.unique(Y), [0, 1])) == 0

    # Check for correct labels
    assert np.all(labels == np.unique(y))

    # Check correct shape of matrix
    assert Y.shape[0] == y.shape[0]
    assert Y.shape[1] == len(labels) 
Example #6
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def color_overlap(color1, *args):
    '''
    color_overlap(color1, color2...) yields the rgba value associated with overlaying color2 on top
      of color1 followed by any additional colors (overlaid left to right). This respects alpha
      values when calculating the results.
    Note that colors may be lists of colors, in which case a matrix of RGBA values is yielded.
    '''
    args = list(args)
    args.insert(0, color1)
    rgba = np.asarray([0.5,0.5,0.5,0])
    for c in args:
        c = to_rgba(c)
        a = c[...,3]
        a0 = rgba[...,3]
        if   np.isclose(a0, 0).all(): rgba = np.ones(rgba.shape) * c
        elif np.isclose(a,  0).all(): continue
        else:                         rgba = times(a, c) + times(1-a, rgba)
    return rgba 
Example #7
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def jacobian(self, p, into=None):
        # transpose to be 3 x 2 x n
        p = np.transpose(np.reshape(p, (-1, 3, 2)), (1,2,0))
        # First, get the two legs...
        (dx_ab, dy_ab) = p[1] - p[0]
        (dx_ac, dy_ac) = p[2] - p[0]
        (dx_bc, dy_bc) = p[2] - p[1]
        # now, the area is half the z-value of the cross-product...
        sarea0 = 0.5 * (dx_ab*dy_ac - dx_ac*dy_ab)
        # but we want to abs it
        dsarea0 = np.sign(sarea0)
        z = np.transpose([[-dy_bc,dx_bc], [dy_ac,-dx_ac], [-dy_ab,dx_ab]], (2,0,1))
        z = times(0.5*dsarea0, z)
        m = numel(p)
        n = p.shape[2]
        ii = (np.arange(n) * np.ones([6, n])).T.flatten()
        z = sps.csr_matrix((z.flatten(), (ii, np.arange(len(ii)))), shape=(n, m))
        return safe_into(into, z) 
Example #8
Source File: mnist_projector_generate.py    From deep-learning-note with MIT License 6 votes vote down vote up
def create_sprite_image(images):
    if isinstance(images, list):
        images = np.array(images)
    img_h = images.shape[1]
    img_w = images.shape[2]
    # sprite 可以理解为所有小图片拼成的大正方形矩阵
    m = int(np.ceil(np.sqrt(images.shape[0])))

    # 使用全 1 来初始化最终的大图片
    sprite_image = np.ones((img_h*m, img_w*m))

    for i in range(m):
        for j in range(m):
            # 计算当前图片编号
            cur = i * m + j
            if cur < images.shape[0]:
                # 将小图片的内容复制到最终的 sprite 图像
                sprite_image[i*img_h:(i+1)*img_h,
                             j*img_w:(j+1)*img_w] = images[cur]
    return sprite_image

# 加载 mnist 数据,制定 one_hot=False,得到的 labels 就是一个数字,而不是一个向量 
Example #9
Source File: 6_bias_variance.py    From deep-learning-note with MIT License 6 votes vote down vote up
def prepare_poly_data(*args, power):
    """
    args: keep feeding in X, Xval, or Xtest
        will return in the same order
    """
    def prepare(x):
        # expand feature
        df = poly_features(x, power=power)

        # normalization
        ndarr = normalize_feature(df).as_matrix()

        # add intercept term
        return np.insert(ndarr, 0, np.ones(ndarr.shape[0]), axis=1)

    return [prepare(x) for x in args] 
Example #10
Source File: 4_multi_classification.py    From deep-learning-note with MIT License 6 votes vote down vote up
def predict_all(X, all_theta):
    rows = X.shape[0]
    params = X.shape[1]
    num_labels = all_theta.shape[0]
    
    # same as before, insert ones to match the shape
    X = np.insert(X, 0, values=np.ones(rows), axis=1)
    
    # convert to matrices
    X = np.matrix(X)
    all_theta = np.matrix(all_theta)
    
    # compute the class probability for each class on each training instance
    h = sigmoid(X * all_theta.T)
    
    # create array of the index with the maximum probability
    h_argmax = np.argmax(h, axis=1)
    
    # because our array was zero-indexed we need to add one for the true label prediction
    h_argmax = h_argmax + 1
    
    return h_argmax 
Example #11
Source File: gla_util.py    From Deep_VoiceChanger with MIT License 6 votes vote down vote up
def __init__(self, wave_len=254, wave_dif=64, buffer_size=5, loop_num=5, window=np.hanning(254)):
        self.wave_len = wave_len
        self.wave_dif = wave_dif
        self.buffer_size = buffer_size
        self.loop_num = loop_num
        self.window = window

        self.wave_buf = np.zeros(wave_len+wave_dif, dtype=float)
        self.overwrap_buf = np.zeros(wave_dif*buffer_size+(wave_len-wave_dif), dtype=float)
        self.spectrum_buffer = np.ones((self.buffer_size, self.wave_len), dtype=complex)
        self.absolute_buffer = np.ones((self.buffer_size, self.wave_len), dtype=complex)
        
        self.phase = np.zeros(self.wave_len, dtype=complex)
        self.phase += np.random.random(self.wave_len)-0.5 + np.random.random(self.wave_len)*1j - 0.5j
        self.phase[self.phase == 0] = 1
        self.phase /= np.abs(self.phase) 
Example #12
Source File: formating.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def _add_default_meta_keys(self, results):
        """Add default meta keys.

        We set default meta keys including `pad_shape`, `scale_factor` and
        `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and
        `Pad` are implemented during the whole pipeline.

        Args:
            results (dict): Result dict contains the data to convert.

        Returns:
            results (dict): Updated result dict contains the data to convert.
        """
        img = results['img']
        results.setdefault('pad_shape', img.shape)
        results.setdefault('scale_factor', 1.0)
        num_channels = 1 if len(img.shape) < 3 else img.shape[2]
        results.setdefault(
            'img_norm_cfg',
            dict(
                mean=np.zeros(num_channels, dtype=np.float32),
                std=np.ones(num_channels, dtype=np.float32),
                to_rgb=False))
        return results 
Example #13
Source File: baseop.py    From Traffic_sign_detection_YOLO with MIT License 6 votes vote down vote up
def wrap_variable(self, var):
        """wrap layer.w into variables"""
        val = self.lay.w.get(var, None)
        if val is None:
            shape = self.lay.wshape[var]
            args = [0., 1e-2, shape]
            if 'moving_mean' in var:
                val = np.zeros(shape)
            elif 'moving_variance' in var:
                val = np.ones(shape)
            else:
                val = np.random.normal(*args)
            self.lay.w[var] = val.astype(np.float32)
            self.act = 'Init '
        if not self.var: return

        val = self.lay.w[var]
        self.lay.w[var] = tf.constant_initializer(val)
        if var in self._SLIM: return
        with tf.variable_scope(self.scope):
            self.lay.w[var] = tf.get_variable(var,
                shape = self.lay.wshape[var],
                dtype = tf.float32,
                initializer = self.lay.w[var]) 
Example #14
Source File: point_cloud.py    From FRIDA with MIT License 6 votes vote down vote up
def classical_mds(self, D):
        ''' 
        Classical multidimensional scaling

        Parameters
        ----------
        D : square 2D ndarray
            Euclidean Distance Matrix (matrix containing squared distances between points
        '''

        # Apply MDS algorithm for denoising
        n = D.shape[0]
        J = np.eye(n) - np.ones((n,n))/float(n)
        G = -0.5*np.dot(J, np.dot(D, J))

        s, U = np.linalg.eig(G)

        # we need to sort the eigenvalues in decreasing order
        s = np.real(s)
        o = np.argsort(s)
        s = s[o[::-1]]
        U = U[:,o[::-1]]

        S = np.diag(s)[0:self.dim,:]
        self.X = np.dot(np.sqrt(S),U.T) 
Example #15
Source File: test_xrft.py    From xrft with MIT License 6 votes vote down vote up
def synthetic_field_xr(N, dL, amp, s,
                    other_dim_sizes=None, dim_order=True,
                    chunks=None):

    theta = xr.DataArray(synthetic_field(N, dL, amp, s),
                        dims=['y', 'x'],
                        coords={'y':range(N), 'x':range(N)}
                        )

    if other_dim_sizes:
        _da = xr.DataArray(np.ones(other_dim_sizes),
                           dims=['d%d'%i for i in range(len(other_dim_sizes))])
        if dim_order:
            theta = theta + _da
        else:
            theta = _da + theta

    if chunks:
        theta = theta.chunk(chunks)

    return theta 
Example #16
Source File: test_xrft.py    From xrft with MIT License 6 votes vote down vote up
def test_cross_phase_2d(self, dask):
        Ny, Nx = (32, 16)
        x = np.linspace(0, 1, num=Nx, endpoint=False)
        y = np.ones(Ny)
        f = 6
        phase_offset = np.pi/2
        signal1 = np.cos(2*np.pi*f*x)  # frequency = 1/(2*pi)
        signal2 = np.cos(2*np.pi*f*x - phase_offset)
        da1 = xr.DataArray(data=signal1*y[:,np.newaxis], name='a',
                          dims=['y','x'], coords={'y':y, 'x':x})
        da2 = xr.DataArray(data=signal2*y[:,np.newaxis], name='b',
                          dims=['y','x'], coords={'y':y, 'x':x})
        with pytest.raises(ValueError):
            xrft.cross_phase(da1, da2, dim=['y','x'])

        if dask:
            da1 = da1.chunk({'x': 16})
            da2 = da2.chunk({'x': 16})
        cp = xrft.cross_phase(da1, da2, dim=['x'])
        actual_phase_offset = cp.sel(freq_x=f).values
        npt.assert_almost_equal(actual_phase_offset, phase_offset) 
Example #17
Source File: models.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def angle_to_cortex(self, theta, rho):
        'See help(neuropythy.registration.RetinotopyModel.angle_to_cortex).'
        #TODO: This should be made to work correctly with visual area boundaries: this could be done
        # by, for each area (e.g., V2) looking at its boundaries (with V1 and V3) and flipping the
        # adjacent triangles so that there is complete coverage of each hemifield, guaranteed.
        if not pimms.is_vector(theta): return self.angle_to_cortex([theta], [rho])[0]
        theta = np.asarray(theta)
        rho = np.asarray(rho)
        zs = np.asarray(
            rho * np.exp([np.complex(z) for z in 1j * ((90.0 - theta)/180.0*np.pi)]),
            dtype=np.complex)
        coords = np.asarray([zs.real, zs.imag]).T
        if coords.shape[0] == 0: return np.zeros((0, len(self.visual_meshes), 2))
        # we step through each area in the forward model and return the appropriate values
        tx = self.transform
        res = np.transpose(
            [self.visual_meshes[area].interpolate(coords, 'cortical_coordinates', method='linear')
             for area in sorted(self.visual_meshes.keys())],
            (1,0,2))
        if tx is not None:
            res = np.asarray(
                [np.dot(tx, np.vstack((area_xy.T, np.ones(len(area_xy)))))[0:2].T
                 for area_xy in res])
        return res 
Example #18
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def colors_to_cmap(colors):
    colors = np.asarray(colors)
    if len(colors.shape) == 1: return colors_to_cmap([colors])[0]
    if colors.shape[1] == 3:
        colors = np.hstack((colors, np.ones((len(colors),1))))
    steps = (0.5 + np.asarray(range(len(colors)-1), dtype=np.float))/(len(colors) - 1)
    return matplotlib.colors.LinearSegmentedColormap(
        'auto_cmap',
        {clrname: ([(0, col[0], col[0])] +
                   [(step, c0, c1) for (step,c0,c1) in zip(steps, col[:-1], col[1:])] +
                   [(1, col[-1], col[-1])])
         for (clridx,clrname) in enumerate(['red', 'green', 'blue', 'alpha'])
         for col in [colors[:,clridx]]},
        N=(len(colors))) 
Example #19
Source File: point_cloud.py    From FRIDA with MIT License 5 votes vote down vote up
def EDM(self):
        ''' Computes the EDM corresponding to the marker set '''
        if self.X is None:
            raise ValueError('No marker set')

        G = np.dot(self.X.T, self.X)
        return np.outer(np.ones(self.m), np.diag(G)) \
            - 2*G + np.outer(np.diag(G), np.ones(self.m)) 
Example #20
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def _vertex_to_voxel_nearest_interpolation(hemi, gray_indices, voxel_to_vertex_matrix):
    if isinstance(hemi, Subject):       hemi   = (hemi.lh, hemi.rh)
    if isinstance(hemi, geo.VertexSet): vcount = hemi.vertex_count
    else:                               vcount = hemi[0].vertex_count + hemi[1].vertex_count
    n   = len(gray_indices[0])
    xyz = voxel_to_vertex_matrix.dot(np.vstack((np.asarray(gray_indices), np.ones(n))))[0:3].T
    if not xyz.flags['WRITEABLE']: xyz = np.array(xyz)
    # find the nearest to the white and pial layers
    if isinstance(hemi, Cortex):
        (wd, wn) = hemi.white_surface.vertex_hash.query(xyz, 1)
        (pd, pn) = hemi.pial_surface.vertex_hash.query(xyz, 1)
        oth = pd < wd
        wn[oth] = pn[oth]
    else:
        (lwd, lwn) = hemi[0].white_surface.vertex_hash.query(xyz, 1)
        (lpd, lpn) = hemi[0].pial_surface.vertex_hash.query(xyz, 1)
        (rwd, rwn) = hemi[1].white_surface.vertex_hash.query(xyz, 1)
        (rpd, rpn) = hemi[1].pial_surface.vertex_hash.query(xyz, 1)
        nn = hemi[0].vertex_count
        oth = lpd < lwd
        lwn[oth] = lpn[oth]
        lwd[oth] = lpd[oth]
        oth = rpd < lwd
        lwn[oth] = rpn[oth] + nn 
        lwd[oth] = rpd[oth]
        oth = rwd < lwd
        lwn[oth] = rwn[oth] + nn
        wn = lwn
    return sps.csr_matrix((np.ones(len(wn)), (range(len(wn)), wn)),
                          shape=(len(gray_indices[0]), vcount),
                          dtype=np.float) 
Example #21
Source File: picklable_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_input_shape(self, shape):
        self.input_shape = shape
        self.output_shape = shape
        channels = shape[-1]
        self.channels = channels
        self.actual_num_groups = min(self.channels, self.num_groups)
        extra_dims = (self.channels // self.actual_num_groups,
                      self.actual_num_groups)
        self.expanded_shape = tuple(shape[1:3]) + tuple(extra_dims)
        init_value = np.ones((channels,), dtype='float32') * self.init_gamma
        self.gamma = PV(init_value, name=self.name + "_gamma")
        self.beta = PV(np.zeros((self.channels,), dtype='float32'),
                       name=self.name + "_beta") 
Example #22
Source File: test_utils.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_get_logits_over_interval(self):
        import tensorflow as tf
        model = cnn_model()
        wrap = KerasModelWrapper(model)
        fgsm_params = {'eps': .5}
        img = np.ones(shape=(28, 28, 1))
        num_points = 21
        with tf.Session() as sess:
            tf.global_variables_initializer().run()
            logits = utils.get_logits_over_interval(sess, wrap,
                                                    img, fgsm_params,
                                                    min_epsilon=-10,
                                                    max_epsilon=10,
                                                    num_points=num_points)
            self.assertEqual(logits.shape[0], num_points) 
Example #23
Source File: coco_error_analysis.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def makeplot(rs, ps, outDir, class_name, iou_type):
    cs = np.vstack([
        np.ones((2, 3)),
        np.array([.31, .51, .74]),
        np.array([.75, .31, .30]),
        np.array([.36, .90, .38]),
        np.array([.50, .39, .64]),
        np.array([1, .6, 0])
    ])
    areaNames = ['allarea', 'small', 'medium', 'large']
    types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN']
    for i in range(len(areaNames)):
        area_ps = ps[..., i, 0]
        figure_tile = iou_type + '-' + class_name + '-' + areaNames[i]
        aps = [ps_.mean() for ps_ in area_ps]
        ps_curve = [
            ps_.mean(axis=1) if ps_.ndim > 1 else ps_ for ps_ in area_ps
        ]
        ps_curve.insert(0, np.zeros(ps_curve[0].shape))
        fig = plt.figure()
        ax = plt.subplot(111)
        for k in range(len(types)):
            ax.plot(rs, ps_curve[k + 1], color=[0, 0, 0], linewidth=0.5)
            ax.fill_between(
                rs,
                ps_curve[k],
                ps_curve[k + 1],
                color=cs[k],
                label=str(f'[{aps[k]:.3f}]' + types[k]))
        plt.xlabel('recall')
        plt.ylabel('precision')
        plt.xlim(0, 1.)
        plt.ylim(0, 1.)
        plt.title(figure_tile)
        plt.legend()
        # plt.show()
        fig.savefig(outDir + f'/{figure_tile}.png')
        plt.close(fig) 
Example #24
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def dataframe_except(df, *cols, **filters):
    '''
    dataframe_except(df, k1=v1, k2=v2...) yields df after selecting all the columns in which the
      given keys (k1, k2, etc.) have been selected such that the associated columns in the dataframe
      contain only the rows whose cells match the given values.
    dataframe_except(df, col1, col2...) selects all columns except for the given columns.
    dataframe_except(df, col1, col2..., k1=v1, k2=v2...) selects on both conditions.
    
    The dataframe_except() function is identical to the dataframe_select() function with the single
    difference being that the column names provided to dataframe_except() are dropped from the
    result while column names passed to dataframe_select() are kept.

    If a value is a tuple/list of 2 elements, then it is considered a range where cells must fall
    between the values. If value is a tuple/list of more than 2 elements or is a set of any length
    then it is a list of values, any one of which can match the cell.
    '''
    ii = np.ones(len(df), dtype='bool')
    for (k,v) in six.iteritems(filters):
        vals = df[k].values
        if   pimms.is_set(v):                    jj = np.isin(vals, list(v))
        elif pimms.is_vector(v) and len(v) == 2: jj = (v[0] <= vals) & (vals < v[1])
        elif pimms.is_vector(v):                 jj = np.isin(vals, list(v))
        else:                                    jj = (vals == v)
        ii = np.logical_and(ii, jj)
    if len(ii) != np.sum(ii): df = df.loc[ii]
    if len(cols) > 0: df = df.drop(list(cols), axis=1, inplace=False)
    return df 
Example #25
Source File: doa.py    From FRIDA with MIT License 5 votes vote down vote up
def build_lookup(self, r=None, theta=None, phi=None):
        """
        Construct lookup table for given candidate locations (in spherical 
        coordinates). Each column is a location in cartesian coordinates.

        :param r: Candidate distances from the origin.
        :type r: numpy array
        :param theta: Candidate azimuth angles with respect to x-axis.
        :type theta: numpy array
        :param phi: Candidate elevation angles with respect to z-axis.
        :type phi: numpy array
        """
        if theta is not None:
            self.theta = theta
        if phi is not None:
            self.phi = phi
        if r is not None:
            self.r = r
            if self.r == np.ones(1):
                self.mode = 'far'
            else:
                self.mode = 'near'
        self.loc = np.zeros([self.D, len(self.r) * len(self.theta) * 
            len(self.phi)])
        self.num_loc = self.loc.shape[1]
        # convert to cartesian
        for i in range(len(self.r)):
            r_s = self.r[i]
            for j in range(len(self.theta)):
                theta_s = self.theta[j]
                for k in range(len(self.phi)):
                    # spher = np.array([r_s,theta_s,self.phi[k]])
                    self.loc[:, i * len(self.theta) + j * len(self.phi) + k] = \
                        spher2cart(r_s, theta_s, self.phi[k])[0:self.D] 
Example #26
Source File: dotplots.py    From svviz with MIT License 5 votes vote down vote up
def dotplot2(s1, s2, wordsize=5, overlap=5, verbose=1):
        """ verbose = 0 (no progress), 1 (progress if s1 and s2 are long) or
        2 (progress in any case) """
        doProgress = False
        if verbose > 1 or len(s1)*len(s2) > 1e6:
            doProgress = True

        mat = numpy.ones(((len(s1)-wordsize)/overlap+2, (len(s2)-wordsize)/overlap+2))

        for i in range(0, len(s1)-wordsize, overlap):
            if i % 1000 == 0 and doProgress:
                logging.info("  dotplot progress: {} of {} rows done".format(i, len(s1)-wordsize))
            word1 = s1[i:i+wordsize]

            for j in range(0, len(s2)-wordsize, overlap):
                word2 = s2[j:j+wordsize]

                if word1 == word2 or word1 == word2[::-1]:
                    mat[i/overlap, j/overlap] = 0
        
        imgData = None
        tempDir = tempfile.mkdtemp()
        try:
            path = os.path.join(tempDir, "dotplot.png")
            misc.imsave(path, mat)
            imgData = open(path).read()
        except Exception as e:
            logging.error("Error generating dotplots:'{}'".format(e))
        finally:
            shutil.rmtree(tempDir)
        return imgData 
Example #27
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def unbroadcast(a, b):
    '''
    unbroadcast(a, b) yields a tuple (aa, bb) that is equivalent to (a, b) except that aa and bb
      have been reshaped such that arithmetic numpy operations such as aa * bb will result in
      row-wise operation instead of column-wise broadcasting.
    '''
    # they could be sparse:
    spa = sps.issparse(a)
    spb = sps.issparse(b)
    if   spa and spb: return (a,b)
    elif spa or  spb:
        def fix(sp,nm):
            nm = np.asarray(nm)
            dnm = len(nm.shape)
            nnm = np.prod(nm.shape)
            # if we have (sparse matrix) * (high-dim array), unbroadcast the dense array
            if   dnm == 0: return (sp, np.reshape(nm, (1,   1)))
            elif dnm == 1: return (sp, np.reshape(nm, (nnm, 1)))
            elif dnm == 2: return (sp, nm)
            else:          return unbroadcast(sp.toarray(), nm)
        return fix(a, b) if spa else tuple(reversed(fix(b, a)))
    # okay, no sparse matrices found:
    a = np.asarray(a)
    b = np.asarray(b)
    da = len(a.shape)
    db = len(b.shape)
    if   da > db: return (a, np.reshape(b, b.shape + tuple(np.ones(da-db, dtype=np.int))))
    elif da < db: return (np.reshape(a, a.shape + tuple(np.ones(db-da, dtype=np.int))), b)
    else:         return (a, b) 
Example #28
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def czdivide(a, b, null=0):
    '''
    czdivide(a, b) returns the quotient a / b as a numpy array object. Like numpy's divide function
      or a/b syntax, czdivide will thread over the latest dimension possible. Unlike numpy's divide,
      czdivide works with sparse matrices. Additionally, czdivide multiplies a by the zinv of b, so
      divide-by-zero entries are replaced with 0 in the result.

    The optional argument null (default: 0) may be given to specify that zeros in the arary b should
    instead be replaced with the given value in the result. Note that if this value is not equal to
    0, then any sparse array passed as argument b must be reified.

    The czdivide function never raises an error due to divide-by-zero; if you desire this behavior,
    use the cdivide function instead.
    '''
    if null == 0:         return a.multiply(zinv(b)) if sps.issparse(a) else a * zinv(b)
    elif sps.issparse(b): b = b.toarray()
    else:                 b = np.asarray(b)
    z = np.isclose(b, 0)
    q = np.logical_not(z)
    zi = q / (b + z)
    if sps.issparse(a):
        r = a.multiply(zi).tocsr()
    else:
        r = np.asarray(a) * zi
    r[np.ones(a.shape, dtype=np.bool)*z] = null
    return r 
Example #29
Source File: retinotopy.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def fit_pRF_radius(ctx, retinotopy=Ellipsis, mask=None, weight=Ellipsis, slope_only=False):
    '''
    fit_pRF_radius(ctx) fits a line, m*eccen + b, to the pRF radius and yields the tuple (m, b).

    The following options may be given:
      * retinotopy (default: Ellipsis) specifies the prefix for the retinotopy (passed to
        retinotopy_data() to find the retinotopic dataset).
      * mask (default: None) specifies the mask over which to perform the calculation. This is
        passed to the to_mask() function. In the case that mask is a set or frozenset, then it is
        treated as a conjunction (intersection) of masks.
      * weight (default: None) specifies that a weight should be used; if this is True or Ellipsis,
        will use the variance_explained if it is part of the retinotopy dataset; if this is False or
        None, uses no weight; otherwise, this must be a weight property or property name.
      * slope_only (default: False) may be set to True to instead fit radius = m*eccen and return
        only m.
    '''
    rdat = retinotopy_data(ctx, retinotopy)
    if 'radius' not in rdat: raise ValueError('No pRF radius found in dataset %s' % retinotopy)
    rad = rdat['radius']
    (ang,ecc) = as_retinotopy(rdat, 'visual')
    if isinstance(mask, (set, frozenset)):
        mask = reduce(np.intersect1d, [ctx.mask(m, indices=True) for m in mask])
    else: mask = ctx.mask(mask, indices=True)
    # get a weight if provided:
    if weight in [False, None]: wgt = np.ones(rad.shape)
    elif weight in [True, Ellipsis]:
        if 'variance_explained' in rdat: wgt = rdat['variance_explained']
        else: wgt = np.ones(rad.shape)
    else: wgt = ctx.property(weight)
    # get the relevant eccen and radius values
    (ecc,rad,wgt) = [x[mask] for x in (ecc,rad,wgt)]
    # fit a line...
    if slope_only:
        ecc = np.reshape(ecc * wgt, (len(ecc), 1))
        rad = np.reshape(rad * wgt, (len(rad), 1))
        return np.linalg.lstsq(ecc, rad)[0]
    else:
        return tuple(np.polyfit(ecc, rad, 1, w=wgt)) 
Example #30
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def t(distances,coordinates):
        n = coordinates.shape[1]
        if distances is None: distances = np.ones(n - 1)
        t = np.cumsum(np.pad(distances, (1,0), 'constant'))
        t.setflags(write=False)
        return t