Python numpy.reshape() Examples

The following are 30 code examples for showing how to use numpy.reshape(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: Att-ChemdNER   Author: lingluodlut   File: utils.py    License: Apache License 2.0 6 votes vote down vote up
def set_values(name, param, pretrained):
#{{{
    """
    Initialize a network parameter with pretrained values.
    We check that sizes are compatible.
    """
    param_value = param.get_value()
    if pretrained.size != param_value.size:
        raise Exception(
            "Size mismatch for parameter %s. Expected %i, found %i."
            % (name, param_value.size, pretrained.size)
        )
    param.set_value(np.reshape(
        pretrained, param_value.shape
    ).astype(np.float32))
#}}} 
Example 2
Project: FRIDA   Author: LCAV   File: tools_fri_doa_plane.py    License: MIT License 6 votes vote down vote up
def mtx_freq2visi(M, p_mic_x, p_mic_y):
    """
    build the matrix that maps the Fourier series to the visibility
    :param M: the Fourier series expansion is limited from -M to M
    :param p_mic_x: a vector that constains microphones x coordinates
    :param p_mic_y: a vector that constains microphones y coordinates
    :return:
    """
    num_mic = p_mic_x.size
    ms = np.reshape(np.arange(-M, M + 1, step=1), (1, -1), order='F')
    G = np.zeros((num_mic * (num_mic - 1), 2 * M + 1), dtype=complex, order='C')
    count_G = 0
    for q in range(num_mic):
        p_x_outer = p_mic_x[q]
        p_y_outer = p_mic_y[q]
        for qp in range(num_mic):
            if not q == qp:
                p_x_qqp = p_x_outer - p_mic_x[qp]
                p_y_qqp = p_y_outer - p_mic_y[qp]
                norm_p_qqp = np.sqrt(p_x_qqp ** 2 + p_y_qqp ** 2)
                phi_qqp = np.arctan2(p_y_qqp, p_x_qqp)
                G[count_G, :] = (-1j) ** ms * sp.special.jv(ms, norm_p_qqp) * \
                                np.exp(1j * ms * phi_qqp)
                count_G += 1
    return G 
Example 3
Project: FRIDA   Author: LCAV   File: tools_fri_doa_plane.py    License: MIT License 6 votes vote down vote up
def mtx_updated_G(phi_recon, M, mtx_amp2visi_ri, mtx_fri2visi_ri):
    """
    Update the linear transformation matrix that links the FRI sequence to the
    visibilities by using the reconstructed Dirac locations.
    :param phi_recon: the reconstructed Dirac locations (azimuths)
    :param M: the Fourier series expansion is between -M to M
    :param p_mic_x: a vector that contains microphones' x-coordinates
    :param p_mic_y: a vector that contains microphones' y-coordinates
    :param mtx_freq2visi: the linear mapping from Fourier series to visibilities
    :return:
    """
    L = 2 * M + 1
    ms_half = np.reshape(np.arange(-M, 1, step=1), (-1, 1), order='F')
    phi_recon = np.reshape(phi_recon, (1, -1), order='F')
    mtx_amp2freq = np.exp(-1j * ms_half * phi_recon)  # size: (M + 1) x K
    mtx_amp2freq_ri = np.vstack((mtx_amp2freq.real, mtx_amp2freq.imag[:-1, :]))  # size: (2M + 1) x K
    mtx_fri2amp_ri = linalg.lstsq(mtx_amp2freq_ri, np.eye(L))[0]
    # projection mtx_freq2visi to the null space of mtx_fri2amp
    mtx_null_proj = np.eye(L) - np.dot(mtx_fri2amp_ri.T,
                                       linalg.lstsq(mtx_fri2amp_ri.T, np.eye(L))[0])
    G_updated = np.dot(mtx_amp2visi_ri, mtx_fri2amp_ri) + \
                np.dot(mtx_fri2visi_ri, mtx_null_proj)
    return G_updated 
Example 4
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __getitem__(self, index):

        img=self.adv_flat[self.sample_num,:]

        if(self.shuff == False):
            # shuff is true for non-pgd attacks
            img = torch.from_numpy(np.reshape(img,(3,32,32)))
        else:
            img = torch.from_numpy(img).type(torch.FloatTensor)
        target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image
        if self.transform is not None:
            img = self.transform(img)

        if self.target_transform is not None:
            target = self.target_transform(target)

        self.sample_num = self.sample_num + 1
        return img, target 
Example 5
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __getitem__(self, index):

        img=self.adv_flat[self.sample_num,:]

        if(self.shuff == False):
            # shuff is true for non-pgd attacks
            img = torch.from_numpy(np.reshape(img,(3,224,224)))
        else:
            img = torch.from_numpy(img).type(torch.FloatTensor)
        target = self.adv_dict["adv_labels"][self.sample_num]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image
        if self.transform is not None:
            img = self.transform(img)

        if self.target_transform is not None:
            target = self.target_transform(target)

        self.sample_num = self.sample_num + 1
        return img, target 
Example 6
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __getitem__(self, index):

        img=self.adv_flat[self.sample_num,:]

        if(self.shuff == False):
            # shuff is true for non-pgd attacks
            img = torch.from_numpy(np.reshape(img,(3,32,32)))
        else:
            img = torch.from_numpy(img).type(torch.FloatTensor)
        target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image
        if self.transform is not None:
            img = self.transform(img)

        if self.target_transform is not None:
            target = self.target_transform(target)

        self.sample_num = self.sample_num + 1
        return img, target 
Example 7
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __getitem__(self, index):
        img=self.adv_flat[self.sample_num,:]
        if(self.transp == False):
            # shuff is true for non-pgd attacks
            img = torch.from_numpy(np.reshape(img,(28,28)))
        else:
            img = torch.from_numpy(img).type(torch.FloatTensor)
        target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image

        if self.transform is not None:
            img = self.transform(img)
        if self.target_transform is not None:
            target = self.target_transform(target)
        self.sample_num = self.sample_num + 1
        return img, target 
Example 8
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg):
    """
    TODO
    :param densities_pos:
    :param densities_neg:
    :param uncerts_pos:
    :param uncerts_neg:
    :return:
    """
    values_neg = np.concatenate(
        (densities_neg.reshape((1, -1)),
         uncerts_neg.reshape((1, -1))),
        axis=0).transpose([1, 0])
    values_pos = np.concatenate(
        (densities_pos.reshape((1, -1)),
         uncerts_pos.reshape((1, -1))),
        axis=0).transpose([1, 0])

    values = np.concatenate((values_neg, values_pos))
    labels = np.concatenate(
        (np.zeros_like(densities_neg), np.ones_like(densities_pos)))

    lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels)

    return values, labels, lr 
Example 9
Project: Deep_VoiceChanger   Author: pstuvwx   File: gla_gpu.py    License: MIT License 6 votes vote down vote up
def auto_inverse(self, whole_spectrum):
        whole_spectrum = np.copy(whole_spectrum).astype(complex)
        whole_spectrum[whole_spectrum < 1] = 1
        overwrap = self.buffer_size * 2
        height = whole_spectrum.shape[0]
        parallel_dif = (height-overwrap) // self.parallel
        if height < self.parallel*overwrap:
            raise Exception('voice length is too small to use gpu, or parallel number is too big')

        spec = [self.inverse(whole_spectrum[range(i, i+parallel_dif*self.parallel, parallel_dif), :]) for i in tqdm.tqdm(range(parallel_dif+overwrap))]
        spec = spec[overwrap:]
        spec = np.concatenate(spec, axis=1)
        spec = spec.reshape(-1, self.wave_len)

        #Below code don't consider wave_len and wave_dif, I'll fix.
        wave = np.fft.ifft(spec, axis=1).real
        pad = np.zeros((wave.shape[0], 2), dtype=float)
        wave = np.concatenate([wave, pad], axis=1)

        dst = np.zeros((wave.shape[0]+3)*self.wave_dif, dtype=float)
        for i in range(4):
            w = wave[range(i, wave.shape[0], 4),:]
            w = w.reshape(-1)
            dst[i*self.wave_dif:i*self.wave_dif+len(w)] += w
        return dst*0.5 
Example 10
Project: Deep_VoiceChanger   Author: pstuvwx   File: dataset.py    License: MIT License 6 votes vote down vote up
def wave2input_image(wave, window, pos=0, pad=0):
    wave_image = np.hstack([wave[pos+i*sride:pos+(i+pad*2)*sride+dif].reshape(height+pad*2, sride) for i in range(256//sride)])[:,:254]
    wave_image *= window
    spectrum_image = np.fft.fft(wave_image, axis=1)
    input_image = np.abs(spectrum_image[:,:128].reshape(1, height+pad*2, 128), dtype=np.float32)

    np.clip(input_image, 1000, None, out=input_image)
    np.log(input_image, out=input_image)
    input_image += bias
    input_image /= scale

    if np.max(input_image) > 0.95:
        print('input image max bigger than 0.95', np.max(input_image))
    if np.min(input_image) < 0.05:
        print('input image min smaller than 0.05', np.min(input_image))

    return input_image 
Example 11
Project: deep-learning-note   Author: wdxtub   File: 8_kmeans_pca.py    License: MIT License 6 votes vote down vote up
def plot_n_image(X, n):
    """ plot first n images
    n has to be a square number
    """
    pic_size = int(np.sqrt(X.shape[1]))
    grid_size = int(np.sqrt(n))

    first_n_images = X[:n, :]

    fig, ax_array = plt.subplots(nrows=grid_size, ncols=grid_size,
                                    sharey=True, sharex=True, figsize=(8, 8))

    for r in range(grid_size):
        for c in range(grid_size):
            ax_array[r, c].imshow(first_n_images[grid_size * r + c].reshape((pic_size, pic_size)))
            plt.xticks(np.array([]))
            plt.yticks(np.array([])) 
Example 12
Project: neuropythy   Author: noahbenson   File: images.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def parse_dataobj(self, dataobj, hdat={}):
        # first, see if we have a specified shape/size
        ish = next((hdat[k] for k in ('image_size', 'image_shape', 'shape') if k in hdat), None)
        if ish is Ellipsis: ish = None
        # make a numpy array of the appropriate dtype
        dtype = self.parse_type(hdat, dataobj=dataobj)
        try:    dataobj = dataobj.dataobj
        except Exception: pass
        if   dataobj is not None: arr = np.asarray(dataobj).astype(dtype)
        elif ish:                 arr = np.zeros(ish,       dtype=dtype)
        else:                     arr = np.zeros([1,1,1,0], dtype=dtype)
        # reshape to the requested shape if need-be
        if ish and ish != arr.shape: arr = np.reshape(arr, ish)
        # then reshape to a valid (4D) shape
        sh = arr.shape
        if   len(sh) == 2: arr = np.reshape(arr, (sh[0], 1, 1, sh[1]))
        elif len(sh) == 1: arr = np.reshape(arr, (sh[0], 1, 1))
        elif len(sh) == 3: arr = np.reshape(arr, sh)
        elif len(sh) != 4: raise ValueError('Cannot convert n-dimensional array to image if n > 4')
        # and return
        return arr 
Example 13
Project: neuropythy   Author: noahbenson   File: images.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def image_reslice(image, spec, method=None, fill=0, dtype=None, weights=None, image_type=None):
    '''
    image_reslice(image, spec) yields a duplicate of the given image resliced to have the voxels
      indicated by the given image spec. Note that spec may be an image itself.

    Optional arguments that can be passed to image_interpolate() (asside from affine) are allowed
    here and are passed through.
    '''
    if image_type is None and is_image(image): image_type = to_image_type(image)
    spec = to_image_spec(spec)
    image = to_image(image)
    # we make a big mesh and interpolate at these points...
    imsh = spec['image_shape']
    (args, kw) = ([np.arange(n) for n in imsh[:3]], {'indexing': 'ij'})
    ijk = np.asarray([u.flatten() for u in np.meshgrid(*args, **kw)])
    ijk = np.dot(spec['affine'], np.vstack([ijk, np.ones([1,ijk.shape[1]])]))[:3]
    # interpolate here...
    u = image_interpolate(image, ijk, method=method, fill=fill, dtype=dtype, weights=weights)
    return to_image((np.reshape(u, imsh), spec), image_type=image_type) 
Example 14
Project: neuropythy   Author: noahbenson   File: util.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def point_on_segment(ac, b, atol=1e-8):
    '''
    point_on_segment((a,b), c) yields True if point x is on segment (a,b) and False otherwise. Note
    that this differs from point_in_segment in that a point that if c is equal to a or b it is
    considered 'on' but not 'in' the segment.
    The option atol can be given and is used only to test for difference from 0; by default it is
    1e-8.
    '''
    (a,c) = ac
    abc = [np.asarray(u) for u in (a,b,c)]
    if any(len(u.shape) > 1 for u in abc): (a,b,c) = [np.reshape(u,(len(u),-1)) for u in abc]
    else:                                  (a,b,c) = abc
    vab = b - a
    vbc = c - b
    vac = c - a
    dab = np.sqrt(np.sum(vab**2, axis=0))
    dbc = np.sqrt(np.sum(vbc**2, axis=0))
    dac = np.sqrt(np.sum(vac**2, axis=0))
    return np.isclose(dab + dbc - dac, 0, atol=atol) 
Example 15
Project: neuropythy   Author: noahbenson   File: util.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def point_in_segment(ac, b, atol=1e-8):
    '''
    point_in_segment((a,b), c) yields True if point x is in segment (a,b) and False otherwise. Note
    that this differs from point_on_segment in that a point that if c is equal to a or b it is
    considered 'on' but not 'in' the segment.
    The option atol can be given and is used only to test for difference from 0; by default it is
    1e-8.
    '''
    (a,c) = ac
    abc = [np.asarray(u) for u in (a,b,c)]
    if any(len(u.shape) > 1 for u in abc): (a,b,c) = [np.reshape(u,(len(u),-1)) for u in abc]
    else:                                  (a,b,c) = abc
    vab = b - a
    vbc = c - b
    vac = c - a
    dab = np.sqrt(np.sum(vab**2, axis=0))
    dbc = np.sqrt(np.sum(vbc**2, axis=0))
    dac = np.sqrt(np.sum(vac**2, axis=0))
    return (np.isclose(dab + dbc - dac, 0, atol=atol) &
            ~np.isclose(dac - dab, 0, atol=atol) &
            ~np.isclose(dac - dbc, 0, atol=atol)) 
Example 16
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def row_norms(ii, f=Ellipsis, squared=False):
    '''
    row_norms(ii) yields a potential function h(x) that calculates the vector norms of the rows of
      the matrix formed by [x[i] for i in ii] (ii is a matrix of parameter indices).
    row_norms(ii, f) yield a potential function h(x) equivalent to compose(row_norms(ii), f).
    '''
    try:
        (n,m) = ii
        # matrix shape given
        ii = np.reshape(np.arange(n*m), (n,m))
    except Exception: ii = np.asarray(ii)
    f = to_potential(f)
    if is_const_potential(f):
        q = flattest(f.c)
        q = np.sum([q[i]**2 for i in ii.T], axis=0)
        return PotentialConstant(q if squared else np.sqrt(q))
    F = reduce(lambda a,b: a + b, [part(Ellipsis, col)**2 for col in ii.T])
    F = compose(F, f)
    if not squared: F = sqrt(F)
    return F 
Example 17
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def col_norms(ii, f=Ellipsis, squared=False):
    '''
    col_norms(ii) yields a potential function h(x) that calculates the vector norms of the columns
      of the matrix formed by [x[i] for i in ii] (ii is a matrix of parameter indices).
    col_norms(ii, f) yield a potential function h(x) equivalent to compose(col_norms(ii), f).
    '''
    try:
        (n,m) = ii
        # matrix shape given
        ii = np.reshape(np.arange(n*m), (n,m))
    except Exception: ii = np.asarray(ii)
    f = to_potential(f)
    if is_const_potential(f):
        q = flattest(f.c)
        q = np.sum([q[i]**2 for i in ii], axis=0)
        return PotentialConstant(q if squared else np.sqrt(q))
    F = reduce(lambda a,b: a + b, [part(Ellipsis, col)**2 for col in ii])
    F = compose(F, f)
    if not squared: F = sqrt(F)
    return F 
Example 18
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def distances(a, b, shape, squared=False, axis=1):
    '''
    distances(a, b, (n,d)) yields a potential function whose output is equivalent to the row-norms
      of reshape(a(x), (n,d)) - reshape(b(x), (n,d)).
    
    The shape argument (n,m) may alternately be a matrix of parameter indices, as can be passed to
    row_norms and col_norms.

    The following optional arguments are accepted:
      * squared (default: False) specifies whether the output should be the square distance or the
        distance.
      * axis (default: 1) specifies whether the rows (axis = 1) or columns (axis = 0) are treated
        as the vectors between which the distances should be calculated.
    '''
    a = to_potential(a)
    b = to_potential(b)
    if axis == 1: return row_norms(shape, a - b, squared=squared)
    else:         return col_norms(shape, a - b, squared=squared) 
Example 19
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def signed_face_areas(faces, axis=1):
    '''
    signed_face_areas(faces) yields a potential function f(x) that calculates the signed area of
      each face represented by the simplices matrix faces.

    If faces is None, then the parameters must arrive in the form of a flattened (n x 3 x 2) matrix
    where n is the number of triangles. Otherwise, the faces matrix must be either (n x 3) or (n x 3
    x s); if the former, each row must list the vertex indices for the faces where the vertex matrix
    is presumed to be shaped (V x 2). Alternately, faces may be a full (n x 3 x 2) simplex array of
    the indices into the parameters.

    The optional argument axis (default: 1) may be set to 0 if the faces argument is a matrix but
    the coordinate matrix will be (2 x V) instead of (V x 2).
    '''
    faces = np.asarray(faces)
    if len(faces.shape) == 2:
        if faces.shape[1] != 3: faces = faces.T
        n = 2 * (np.max(faces) + 1)
        if axis == 0: tmp = np.reshape(np.arange(n), (2,-1)).T
        else:         tmp = np.reshape(np.arange(n), (-1,2))
        faces = np.reshape(tmp[faces.flat], (-1,3,2))
    faces = faces.flatten()
    return compose(TriangleSignedArea2DPotential(), part(Ellipsis, faces)) 
Example 20
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def jacobian(self, p, into=None):
        # transpose to be 3 x 2 x n
        p = np.transpose(np.reshape(p, (-1, 3, 2)), (1,2,0))
        # First, get the two legs...
        (dx_ab, dy_ab) = p[1] - p[0]
        (dx_ac, dy_ac) = p[2] - p[0]
        (dx_bc, dy_bc) = p[2] - p[1]
        # now, the area is half the z-value of the cross-product...
        sarea0 = 0.5 * (dx_ab*dy_ac - dx_ac*dy_ab)
        # but we want to abs it
        dsarea0 = np.sign(sarea0)
        z = np.transpose([[-dy_bc,dx_bc], [dy_ac,-dx_ac], [-dy_ab,dx_ab]], (2,0,1))
        z = times(0.5*dsarea0, z)
        m = numel(p)
        n = p.shape[2]
        ii = (np.arange(n) * np.ones([6, n])).T.flatten()
        z = sps.csr_matrix((z.flatten(), (ii, np.arange(len(ii)))), shape=(n, m))
        return safe_into(into, z) 
Example 21
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def face_areas(faces, axis=1):
    '''
    face_areas(faces) yields a potential function f(x) that calculates the unsigned area of each
      faces represented by the simplices matrix faces.

    If faces is None, then the parameters must arrive in the form of a flattened (n x 3 x 2) matrix
    where n is the number of triangles. Otherwise, the faces matrix must be either (n x 3) or (n x 3
    x s); if the former, each row must list the vertex indices for the faces where the vertex matrix
    is presumed to be shaped (V x 2). Alternately, faces may be a full (n x 3 x 2) simplex array of
    the indices into the parameters.

    The optional argument axis (default: 1) may be set to 0 if the faces argument is a matrix but
    the coordinate matrix will be (2 x V) instead of (V x 2).
    '''
    faces = np.asarray(faces)
    if len(faces.shape) == 2:
        if faces.shape[1] != 3: faces = faces.T
        n = 2 * (np.max(faces) + 1)
        if axis == 0: tmp = np.reshape(np.arange(n), (2,-1)).T
        else:         tmp = np.reshape(np.arange(n), (-1,2))
        faces = np.reshape(tmp[faces.flat], (-1,3,2))
    faces = faces.flatten()
    return compose(TriangleArea2DPotential(), part(Ellipsis, faces)) 
Example 22
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def apply_affine(aff, coords):
    '''
    apply_affine(affine, coords) yields the result of applying the given affine transformation to
      the given coordinate or coordinates.

    This function expects coords to be a (dims X n) matrix but if the first dimension is neither 2
    nor 3, coords.T is used; i.e.:
      apply_affine(affine3x3, coords2xN) ==> newcoords2xN
      apply_affine(affine4x4, coords3xN) ==> newcoords3xN
      apply_affine(affine3x3, coordsNx2) ==> newcoordsNx2 (for N != 2)
      apply_affine(affine4x4, coordsNx3) ==> newcoordsNx3 (for N != 3)
    '''
    if aff is None: return coords
    (coords,tr) = (np.asanyarray(coords), False)
    if len(coords.shape) == 1: return np.squeeze(apply_affine(np.reshape(coords, (-1,1)), aff))
    elif len(coords.shape) > 2: raise ValueError('cannot apply affine to ND-array for N > 2')
    if   len(coords) == 2: aff = to_affine(aff, 2)
    elif len(coords) == 3: aff = to_affine(aff, 3)
    else: (coords,aff,tr) = (coords.T, to_affine(aff, coords.shape[1]), True)
    r = np.dot(aff, np.vstack([coords, np.ones([1,coords.shape[1]])]))[:-1]
    return r.T if tr else r 
Example 23
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def cplus(*args):
    '''
    cplus(a, b...) returns the sum of all the values as a numpy array object. Like numpy's add
      function or a+b syntax, plus will thread over the latest dimension possible.

    Additionally, cplus works correctly with sparse arrays.
    '''
    n = len(args)
    if   n == 0: return np.asarray(0)
    elif n == 1: return np.asarray(args[0])
    elif n >  2: return reduce(plus, args)
    (a,b) = args
    if sps.issparse(a):
        if not sps.issparse(b):
            b = np.asarray(b)
            if len(b.shape) == 0: b = np.reshape(b, (1,1))
    elif sps.issparse(b):
        a = np.asarray(a)
        if len(a.shape) == 0: a = np.reshape(a, (1,1))
    else:
        a = np.asarray(a)
        b = np.asarray(b)
    return a + b 
Example 24
Project: svviz   Author: svviz   File: kde.py    License: MIT License 5 votes vote down vote up
def evaluate(self, points):
        points = atleast_2d(points)

        d, m = points.shape
        if d != self.d:
            if d == 1 and m == self.d:
                # points was passed in as a row vector
                points = reshape(points, (self.d, 1))
                m = 1
            else:
                msg = "points have dimension %s, dataset has dimension %s" % (d,
                    self.d)
                raise ValueError(msg)

        result = zeros((m,), dtype=np.float)

        if m >= self.n:
            # there are more points than data, so loop over data
            for i in range(self.n):
                diff = self.dataset[:, i, newaxis] - points
                tdiff = dot(self.inv_cov, diff)
                energy = sum(diff*tdiff,axis=0) / 2.0
                result = result + exp(-energy)
        else:
            # loop over points
            for i in range(m):
                diff = self.dataset - points[:, i, newaxis]
                tdiff = dot(self.inv_cov, diff)
                energy = sum(diff * tdiff, axis=0) / 2.0
                result[i] = sum(exp(-energy), axis=0)

        result = result / self._norm_factor

        return result 
Example 25
Project: libTLDA   Author: wmkouw   File: viz.py    License: MIT License 5 votes vote down vote up
def plotc(parameters, ax=[], color='k', gridsize=(101, 101)):
    """
    Plot a linear classifier in a 2D scatterplot.

    INPUT   (1) tuple 'parameters': consists of a list of class proportions
                (1 by K classes), an array of class means (K classes by
                D features), an array of class-covariance matrices (D features
                by D features by K classes)
            (2) object 'ax': axes of a pyplot figure or subject (def: empty)
            (3) str 'colors': colors of the contours in the plot (def: 'k')
            (4) tuple 'gridsize': number of points in the grid
                (def: (101, 101))
    OUTPUT  None
    """
    # Check for figure object
    if fig:
        ax = fig.gca()
    else:
        fig, ax = plt.subplots()

    # Get axes limits
    xl = ax.get_xlim()
    yl = ax.get_ylim()

    # Define grid
    gx = np.linspace(xl[0], xl[1], gridsize[0])
    gy = np.linspace(yl[0], yl[1], gridsize[1])
    x, y = np.meshgrid(gx, gy)
    xy = np.vstack((x.ravel(), y.ravel())).T

    # Values of grid
    z = np.dot(xy, parameters[:-1, :]) + parameters[-1, :]
    z = np.reshape(z[:, 0] - z[:, 1], gridsize)

    # Plot grid
    ax.contour(x, y, z, levels=0, colors=colors) 
Example 26
def _shuffle_roidb_inds(self):
    """Randomly permute the training roidb."""
    # If the random flag is set, 
    # then the database is shuffled according to system time
    # Useful for the validation set
    if self._random:
      st0 = np.random.get_state()
      millis = int(round(time.time() * 1000)) % 4294967295
      np.random.seed(millis)
    
    if cfg.TRAIN.ASPECT_GROUPING:
      raise NotImplementedError
      '''
      widths = np.array([r['width'] for r in self._roidb])
      heights = np.array([r['height'] for r in self._roidb])
      horz = (widths >= heights)
      vert = np.logical_not(horz)
      horz_inds = np.where(horz)[0]
      vert_inds = np.where(vert)[0]
      inds = np.hstack((
          np.random.permutation(horz_inds),
          np.random.permutation(vert_inds)))
      inds = np.reshape(inds, (-1, 2))
      row_perm = np.random.permutation(np.arange(inds.shape[0]))
      inds = np.reshape(inds[row_perm, :], (-1,))
      self._perm = inds
      '''
    else:
      self._perm = np.random.permutation(np.arange(len(self._roidb)))
    # Restore the random state
    if self._random:
      np.random.set_state(st0)
      
    self._cur = 0 
Example 27
Project: xrft   Author: xgcm   File: xrft.py    License: MIT License 5 votes vote down vote up
def _stack_chunks(da, dim, suffix='_segment'):
    """Reshape a DataArray so there is only one chunk along dimension `dim`"""
    data = da.data
    attr = da.attrs
    newdims = []
    newcoords = {}
    newshape = []
    for d in da.dims:
        if d in dim:
            axis_num = da.get_axis_num(d)
            if np.diff(da.chunks[axis_num]).sum() != 0:
                raise ValueError("Chunk lengths need to be the same.")
            n = len(da[d])
            chunklen = da.chunks[axis_num][0]
            coord_rs = da[d].data.reshape((int(n/chunklen),int(chunklen)))
            newdims.append(d + suffix)
            newdims.append(d)
            newshape.append(int(n/chunklen))
            newshape.append(int(chunklen))
            newcoords[d+suffix] = range(int(n/chunklen))
            newcoords[d] = coord_rs[0]
        else:
            newdims.append(d)
            newshape.append(len(da[d]))
            newcoords[d] = da[d].data

    da = xr.DataArray(data.reshape(newshape), dims=newdims, coords=newcoords,
                     attrs=attr)

    return da 
Example 28
Project: xrft   Author: xgcm   File: test_xrft.py    License: MIT License 5 votes vote down vote up
def numpy_detrend(da):
    """
    Detrend a 2D field by subtracting out the least-square plane fit.

    Parameters
    ----------
    da : `numpy.array`
        The data to be detrended

    Returns
    -------
    da : `numpy.array`
        The detrended input data
    """
    N = da.shape

    G = np.ones((N[0]*N[1],3))
    for i in range(N[0]):
        G[N[1]*i:N[1]*i+N[1], 1] = i+1
        G[N[1]*i:N[1]*i+N[1], 2] = np.arange(1, N[1]+1)

    d_obs = np.reshape(da.copy(), (N[0]*N[1],1))
    m_est = np.dot(np.dot(spl.inv(np.dot(G.T, G)), G.T), d_obs)
    d_est = np.dot(G, m_est)

    lin_trend = np.reshape(d_est, N)

    return da - lin_trend 
Example 29
Project: FRIDA   Author: LCAV   File: generators.py    License: MIT License 5 votes vote down vote up
def gen_sig_at_mic(sigmak2_k, phi_k, pos_mic_x,
                   pos_mic_y, omega_band, sound_speed,
                   SNR, Ns=256):
    """
    generate complex base-band signal received at microphones
    :param sigmak2_k: the variance of the circulant complex Gaussian signal
                emitted by the K sources
    :param phi_k: source locations (azimuths)
    :param pos_mic_x: a vector that contains microphones' x coordinates
    :param pos_mic_y: a vector that contains microphones' y coordinates
    :param omega_band: mid-band (ANGULAR) frequency [radian/sec]
    :param sound_speed: speed of sound
    :param SNR: SNR for the received signal at microphones
    :param Ns: number of snapshots used to estimate the covariance matrix
    :return: y_mic: received (complex) signal at microphones
    """
    num_mic = pos_mic_x.size
    xk, yk = polar2cart(1, phi_k)  # source locations in cartesian coordinates
    # reshape to use broadcasting
    xk = np.reshape(xk, (1, -1), order='F')
    yk = np.reshape(yk, (1, -1), order='F')
    pos_mic_x = np.reshape(pos_mic_x, (-1, 1), order='F')
    pos_mic_y = np.reshape(pos_mic_y, (-1, 1), order='F')

    t = np.reshape(np.linspace(0, 10 * np.pi, num=Ns), (1, -1), order='F')
    K = sigmak2_k.size
    sigmak2_k = np.reshape(sigmak2_k, (-1, 1), order='F')

    # x_tilde_k size: K x length_of_t
    # circular complex Gaussian process
    x_tilde_k = np.sqrt(sigmak2_k / 2.) * (np.random.randn(K, Ns) + 1j *
                                           np.random.randn(K, Ns))
    y_mic = np.dot(np.exp(-1j * (xk * pos_mic_x + yk * pos_mic_y) / (sound_speed / omega_band)),
                   x_tilde_k * np.exp(1j * omega_band * t))
    signal_energy = linalg.norm(y_mic, 'fro') ** 2
    noise_energy = signal_energy / 10 ** (SNR * 0.1)
    sigma2_noise = noise_energy / (Ns * num_mic)
    noise = np.sqrt(sigma2_noise / 2.) * (np.random.randn(*y_mic.shape) + 1j *
                                          np.random.randn(*y_mic.shape))
    y_mic_noisy = y_mic + noise
    return y_mic_noisy, y_mic 
Example 30
Project: FRIDA   Author: LCAV   File: doa.py    License: MIT License 5 votes vote down vote up
def polar_distance(x1, x2):
    """
    Given two arrays of numbers x1 and x2, pairs the cells that are the
    closest and provides the pairing matrix index: x1(index(1,:)) should be as
    close as possible to x2(index(2,:)). The function outputs the average of 
    the absolute value of the differences abs(x1(index(1,:))-x2(index(2,:))).
    :param x1: vector 1
    :param x2: vector 2
    :return: d: minimum distance between d
             index: the permutation matrix
    """
    x1 = np.reshape(x1, (1, -1), order='F')
    x2 = np.reshape(x2, (1, -1), order='F')
    N1 = x1.size
    N2 = x2.size
    diffmat = np.arccos(np.cos(x1 - np.reshape(x2, (-1, 1), order='F')))
    min_N1_N2 = np.min([N1, N2])
    index = np.zeros((min_N1_N2, 2), dtype=int)
    if min_N1_N2 > 1:
        for k in range(min_N1_N2):
            d2 = np.min(diffmat, axis=0)
            index2 = np.argmin(diffmat, axis=0)
            index1 = np.argmin(d2)
            index2 = index2[index1]
            index[k, :] = [index1, index2]
            diffmat[index2, :] = float('inf')
            diffmat[:, index1] = float('inf')
        d = np.mean(np.arccos(np.cos(x1[:, index[:, 0]] - x2[:, index[:, 1]])))
    else:
        d = np.min(diffmat)
        index = np.argmin(diffmat)
        if N1 == 1:
            index = np.array([1, index])
        else:
            index = np.array([index, 1])
    return d, index