Python numpy.ones() Examples

The following are 30 code examples for showing how to use numpy.ones(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: libTLDA   Author: wmkouw   File: tcpr.py    License: MIT License 6 votes vote down vote up
def add_intercept(self, X):
        """Add 1's to data as last features."""
        # Data shape
        N, D = X.shape

        # Check if there's not already an intercept column
        if np.any(np.sum(X, axis=0) == N):

            # Report
            print('Intercept is not the last feature. Swapping..')

            # Find which column contains the intercept
            intercept_index = np.argwhere(np.sum(X, axis=0) == N)

            # Swap intercept to last
            X = X[:, np.setdiff1d(np.arange(D), intercept_index)]

        # Add intercept as last column
        X = np.hstack((X, np.ones((N, 1))))

        # Append column of 1's to data, and increment dimensionality
        return X, D+1 
Example 2
Project: libTLDA   Author: wmkouw   File: test_util.py    License: MIT License 6 votes vote down vote up
def test_one_hot():
    """Check if one_hot returns correct label matrices."""
    # Generate label vector
    y = np.hstack((np.ones((10,))*0,
                   np.ones((10,))*1,
                   np.ones((10,))*2))

    # Map to matrix
    Y, labels = one_hot(y)

    # Check for only 0's and 1's
    assert len(np.setdiff1d(np.unique(Y), [0, 1])) == 0

    # Check for correct labels
    assert np.all(labels == np.unique(y))

    # Check correct shape of matrix
    assert Y.shape[0] == y.shape[0]
    assert Y.shape[1] == len(labels) 
Example 3
Project: xrft   Author: xgcm   File: test_xrft.py    License: MIT License 6 votes vote down vote up
def test_cross_phase_2d(self, dask):
        Ny, Nx = (32, 16)
        x = np.linspace(0, 1, num=Nx, endpoint=False)
        y = np.ones(Ny)
        f = 6
        phase_offset = np.pi/2
        signal1 = np.cos(2*np.pi*f*x)  # frequency = 1/(2*pi)
        signal2 = np.cos(2*np.pi*f*x - phase_offset)
        da1 = xr.DataArray(data=signal1*y[:,np.newaxis], name='a',
                          dims=['y','x'], coords={'y':y, 'x':x})
        da2 = xr.DataArray(data=signal2*y[:,np.newaxis], name='b',
                          dims=['y','x'], coords={'y':y, 'x':x})
        with pytest.raises(ValueError):
            xrft.cross_phase(da1, da2, dim=['y','x'])

        if dask:
            da1 = da1.chunk({'x': 16})
            da2 = da2.chunk({'x': 16})
        cp = xrft.cross_phase(da1, da2, dim=['x'])
        actual_phase_offset = cp.sel(freq_x=f).values
        npt.assert_almost_equal(actual_phase_offset, phase_offset) 
Example 4
Project: xrft   Author: xgcm   File: test_xrft.py    License: MIT License 6 votes vote down vote up
def synthetic_field_xr(N, dL, amp, s,
                    other_dim_sizes=None, dim_order=True,
                    chunks=None):

    theta = xr.DataArray(synthetic_field(N, dL, amp, s),
                        dims=['y', 'x'],
                        coords={'y':range(N), 'x':range(N)}
                        )

    if other_dim_sizes:
        _da = xr.DataArray(np.ones(other_dim_sizes),
                           dims=['d%d'%i for i in range(len(other_dim_sizes))])
        if dim_order:
            theta = theta + _da
        else:
            theta = _da + theta

    if chunks:
        theta = theta.chunk(chunks)

    return theta 
Example 5
Project: FRIDA   Author: LCAV   File: point_cloud.py    License: MIT License 6 votes vote down vote up
def classical_mds(self, D):
        ''' 
        Classical multidimensional scaling

        Parameters
        ----------
        D : square 2D ndarray
            Euclidean Distance Matrix (matrix containing squared distances between points
        '''

        # Apply MDS algorithm for denoising
        n = D.shape[0]
        J = np.eye(n) - np.ones((n,n))/float(n)
        G = -0.5*np.dot(J, np.dot(D, J))

        s, U = np.linalg.eig(G)

        # we need to sort the eigenvalues in decreasing order
        s = np.real(s)
        o = np.argsort(s)
        s = s[o[::-1]]
        U = U[:,o[::-1]]

        S = np.diag(s)[0:self.dim,:]
        self.X = np.dot(np.sqrt(S),U.T) 
Example 6
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: baseop.py    License: MIT License 6 votes vote down vote up
def wrap_variable(self, var):
        """wrap layer.w into variables"""
        val = self.lay.w.get(var, None)
        if val is None:
            shape = self.lay.wshape[var]
            args = [0., 1e-2, shape]
            if 'moving_mean' in var:
                val = np.zeros(shape)
            elif 'moving_variance' in var:
                val = np.ones(shape)
            else:
                val = np.random.normal(*args)
            self.lay.w[var] = val.astype(np.float32)
            self.act = 'Init '
        if not self.var: return

        val = self.lay.w[var]
        self.lay.w[var] = tf.constant_initializer(val)
        if var in self._SLIM: return
        with tf.variable_scope(self.scope):
            self.lay.w[var] = tf.get_variable(var,
                shape = self.lay.wshape[var],
                dtype = tf.float32,
                initializer = self.lay.w[var]) 
Example 7
Project: mmdetection   Author: open-mmlab   File: formating.py    License: Apache License 2.0 6 votes vote down vote up
def _add_default_meta_keys(self, results):
        """Add default meta keys.

        We set default meta keys including `pad_shape`, `scale_factor` and
        `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and
        `Pad` are implemented during the whole pipeline.

        Args:
            results (dict): Result dict contains the data to convert.

        Returns:
            results (dict): Updated result dict contains the data to convert.
        """
        img = results['img']
        results.setdefault('pad_shape', img.shape)
        results.setdefault('scale_factor', 1.0)
        num_channels = 1 if len(img.shape) < 3 else img.shape[2]
        results.setdefault(
            'img_norm_cfg',
            dict(
                mean=np.zeros(num_channels, dtype=np.float32),
                std=np.ones(num_channels, dtype=np.float32),
                to_rgb=False))
        return results 
Example 8
Project: Deep_VoiceChanger   Author: pstuvwx   File: gla_util.py    License: MIT License 6 votes vote down vote up
def __init__(self, wave_len=254, wave_dif=64, buffer_size=5, loop_num=5, window=np.hanning(254)):
        self.wave_len = wave_len
        self.wave_dif = wave_dif
        self.buffer_size = buffer_size
        self.loop_num = loop_num
        self.window = window

        self.wave_buf = np.zeros(wave_len+wave_dif, dtype=float)
        self.overwrap_buf = np.zeros(wave_dif*buffer_size+(wave_len-wave_dif), dtype=float)
        self.spectrum_buffer = np.ones((self.buffer_size, self.wave_len), dtype=complex)
        self.absolute_buffer = np.ones((self.buffer_size, self.wave_len), dtype=complex)
        
        self.phase = np.zeros(self.wave_len, dtype=complex)
        self.phase += np.random.random(self.wave_len)-0.5 + np.random.random(self.wave_len)*1j - 0.5j
        self.phase[self.phase == 0] = 1
        self.phase /= np.abs(self.phase) 
Example 9
Project: deep-learning-note   Author: wdxtub   File: 4_multi_classification.py    License: MIT License 6 votes vote down vote up
def predict_all(X, all_theta):
    rows = X.shape[0]
    params = X.shape[1]
    num_labels = all_theta.shape[0]
    
    # same as before, insert ones to match the shape
    X = np.insert(X, 0, values=np.ones(rows), axis=1)
    
    # convert to matrices
    X = np.matrix(X)
    all_theta = np.matrix(all_theta)
    
    # compute the class probability for each class on each training instance
    h = sigmoid(X * all_theta.T)
    
    # create array of the index with the maximum probability
    h_argmax = np.argmax(h, axis=1)
    
    # because our array was zero-indexed we need to add one for the true label prediction
    h_argmax = h_argmax + 1
    
    return h_argmax 
Example 10
Project: deep-learning-note   Author: wdxtub   File: 6_bias_variance.py    License: MIT License 6 votes vote down vote up
def prepare_poly_data(*args, power):
    """
    args: keep feeding in X, Xval, or Xtest
        will return in the same order
    """
    def prepare(x):
        # expand feature
        df = poly_features(x, power=power)

        # normalization
        ndarr = normalize_feature(df).as_matrix()

        # add intercept term
        return np.insert(ndarr, 0, np.ones(ndarr.shape[0]), axis=1)

    return [prepare(x) for x in args] 
Example 11
Project: deep-learning-note   Author: wdxtub   File: mnist_projector_generate.py    License: MIT License 6 votes vote down vote up
def create_sprite_image(images):
    if isinstance(images, list):
        images = np.array(images)
    img_h = images.shape[1]
    img_w = images.shape[2]
    # sprite 可以理解为所有小图片拼成的大正方形矩阵
    m = int(np.ceil(np.sqrt(images.shape[0])))

    # 使用全 1 来初始化最终的大图片
    sprite_image = np.ones((img_h*m, img_w*m))

    for i in range(m):
        for j in range(m):
            # 计算当前图片编号
            cur = i * m + j
            if cur < images.shape[0]:
                # 将小图片的内容复制到最终的 sprite 图像
                sprite_image[i*img_h:(i+1)*img_h,
                             j*img_w:(j+1)*img_w] = images[cur]
    return sprite_image

# 加载 mnist 数据,制定 one_hot=False,得到的 labels 就是一个数字,而不是一个向量 
Example 12
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def color_overlap(color1, *args):
    '''
    color_overlap(color1, color2...) yields the rgba value associated with overlaying color2 on top
      of color1 followed by any additional colors (overlaid left to right). This respects alpha
      values when calculating the results.
    Note that colors may be lists of colors, in which case a matrix of RGBA values is yielded.
    '''
    args = list(args)
    args.insert(0, color1)
    rgba = np.asarray([0.5,0.5,0.5,0])
    for c in args:
        c = to_rgba(c)
        a = c[...,3]
        a0 = rgba[...,3]
        if   np.isclose(a0, 0).all(): rgba = np.ones(rgba.shape) * c
        elif np.isclose(a,  0).all(): continue
        else:                         rgba = times(a, c) + times(1-a, rgba)
    return rgba 
Example 13
Project: neuropythy   Author: noahbenson   File: images.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def image_reslice(image, spec, method=None, fill=0, dtype=None, weights=None, image_type=None):
    '''
    image_reslice(image, spec) yields a duplicate of the given image resliced to have the voxels
      indicated by the given image spec. Note that spec may be an image itself.

    Optional arguments that can be passed to image_interpolate() (asside from affine) are allowed
    here and are passed through.
    '''
    if image_type is None and is_image(image): image_type = to_image_type(image)
    spec = to_image_spec(spec)
    image = to_image(image)
    # we make a big mesh and interpolate at these points...
    imsh = spec['image_shape']
    (args, kw) = ([np.arange(n) for n in imsh[:3]], {'indexing': 'ij'})
    ijk = np.asarray([u.flatten() for u in np.meshgrid(*args, **kw)])
    ijk = np.dot(spec['affine'], np.vstack([ijk, np.ones([1,ijk.shape[1]])]))[:3]
    # interpolate here...
    u = image_interpolate(image, ijk, method=method, fill=fill, dtype=dtype, weights=weights)
    return to_image((np.reshape(u, imsh), spec), image_type=image_type) 
Example 14
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def jacobian(self, p, into=None):
        # transpose to be 3 x 2 x n
        p = np.transpose(np.reshape(p, (-1, 3, 2)), (1,2,0))
        # First, get the two legs...
        (dx_ab, dy_ab) = p[1] - p[0]
        (dx_ac, dy_ac) = p[2] - p[0]
        (dx_bc, dy_bc) = p[2] - p[1]
        # now, the area is half the z-value of the cross-product...
        sarea0 = 0.5 * (dx_ab*dy_ac - dx_ac*dy_ab)
        # but we want to abs it
        dsarea0 = np.sign(sarea0)
        z = np.transpose([[-dy_bc,dx_bc], [dy_ac,-dx_ac], [-dy_ab,dx_ab]], (2,0,1))
        z = times(0.5*dsarea0, z)
        m = numel(p)
        n = p.shape[2]
        ii = (np.arange(n) * np.ones([6, n])).T.flatten()
        z = sps.csr_matrix((z.flatten(), (ii, np.arange(len(ii)))), shape=(n, m))
        return safe_into(into, z) 
Example 15
Project: neuropythy   Author: noahbenson   File: models.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def angle_to_cortex(self, theta, rho):
        'See help(neuropythy.registration.RetinotopyModel.angle_to_cortex).'
        #TODO: This should be made to work correctly with visual area boundaries: this could be done
        # by, for each area (e.g., V2) looking at its boundaries (with V1 and V3) and flipping the
        # adjacent triangles so that there is complete coverage of each hemifield, guaranteed.
        if not pimms.is_vector(theta): return self.angle_to_cortex([theta], [rho])[0]
        theta = np.asarray(theta)
        rho = np.asarray(rho)
        zs = np.asarray(
            rho * np.exp([np.complex(z) for z in 1j * ((90.0 - theta)/180.0*np.pi)]),
            dtype=np.complex)
        coords = np.asarray([zs.real, zs.imag]).T
        if coords.shape[0] == 0: return np.zeros((0, len(self.visual_meshes), 2))
        # we step through each area in the forward model and return the appropriate values
        tx = self.transform
        res = np.transpose(
            [self.visual_meshes[area].interpolate(coords, 'cortical_coordinates', method='linear')
             for area in sorted(self.visual_meshes.keys())],
            (1,0,2))
        if tx is not None:
            res = np.asarray(
                [np.dot(tx, np.vstack((area_xy.T, np.ones(len(area_xy)))))[0:2].T
                 for area_xy in res])
        return res 
Example 16
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def apply_affine(aff, coords):
    '''
    apply_affine(affine, coords) yields the result of applying the given affine transformation to
      the given coordinate or coordinates.

    This function expects coords to be a (dims X n) matrix but if the first dimension is neither 2
    nor 3, coords.T is used; i.e.:
      apply_affine(affine3x3, coords2xN) ==> newcoords2xN
      apply_affine(affine4x4, coords3xN) ==> newcoords3xN
      apply_affine(affine3x3, coordsNx2) ==> newcoordsNx2 (for N != 2)
      apply_affine(affine4x4, coordsNx3) ==> newcoordsNx3 (for N != 3)
    '''
    if aff is None: return coords
    (coords,tr) = (np.asanyarray(coords), False)
    if len(coords.shape) == 1: return np.squeeze(apply_affine(np.reshape(coords, (-1,1)), aff))
    elif len(coords.shape) > 2: raise ValueError('cannot apply affine to ND-array for N > 2')
    if   len(coords) == 2: aff = to_affine(aff, 2)
    elif len(coords) == 3: aff = to_affine(aff, 3)
    else: (coords,aff,tr) = (coords.T, to_affine(aff, coords.shape[1]), True)
    r = np.dot(aff, np.vstack([coords, np.ones([1,coords.shape[1]])]))[:-1]
    return r.T if tr else r 
Example 17
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def dataframe_select(df, *cols, **filters):
    '''
    dataframe_select(df, k1=v1, k2=v2...) yields df after selecting all the columns in which the
      given keys (k1, k2, etc.) have been selected such that the associated columns in the dataframe
      contain only the rows whose cells match the given values.
    dataframe_select(df, col1, col2...) selects the given columns.
    dataframe_select(df, col1, col2..., k1=v1, k2=v2...) selects both.
    
    If a value is a tuple/list of 2 elements, then it is considered a range where cells must fall
    between the values. If value is a tuple/list of more than 2 elements or is a set of any length
    then it is a list of values, any one of which can match the cell.
    '''
    ii = np.ones(len(df), dtype='bool')
    for (k,v) in six.iteritems(filters):
        vals = df[k].values
        if   pimms.is_set(v):                    jj = np.isin(vals, list(v))
        elif pimms.is_vector(v) and len(v) == 2: jj = (v[0] <= vals) & (vals < v[1])
        elif pimms.is_vector(v):                 jj = np.isin(vals, list(v))
        else:                                    jj = (vals == v)
        ii = np.logical_and(ii, jj)
    if len(ii) != np.sum(ii): df = df.loc[ii]
    if len(cols) > 0: df = df[list(cols)]
    return df 
Example 18
Project: svviz   Author: svviz   File: dotplots.py    License: MIT License 5 votes vote down vote up
def dotplot2(s1, s2, wordsize=5, overlap=5, verbose=1):
        """ verbose = 0 (no progress), 1 (progress if s1 and s2 are long) or
        2 (progress in any case) """
        doProgress = False
        if verbose > 1 or len(s1)*len(s2) > 1e6:
            doProgress = True

        mat = numpy.ones(((len(s1)-wordsize)/overlap+2, (len(s2)-wordsize)/overlap+2))

        for i in range(0, len(s1)-wordsize, overlap):
            if i % 1000 == 0 and doProgress:
                logging.info("  dotplot progress: {} of {} rows done".format(i, len(s1)-wordsize))
            word1 = s1[i:i+wordsize]

            for j in range(0, len(s2)-wordsize, overlap):
                word2 = s2[j:j+wordsize]

                if word1 == word2 or word1 == word2[::-1]:
                    mat[i/overlap, j/overlap] = 0
        
        imgData = None
        tempDir = tempfile.mkdtemp()
        try:
            path = os.path.join(tempDir, "dotplot.png")
            misc.imsave(path, mat)
            imgData = open(path).read()
        except Exception as e:
            logging.error("Error generating dotplots:'{}'".format(e))
        finally:
            shutil.rmtree(tempDir)
        return imgData 
Example 19
Project: libTLDA   Author: wmkouw   File: util.py    License: MIT License 5 votes vote down vote up
def one_hot(y, fill_k=False, one_not=False):
    """Map to one-hot encoding."""
    # Check labels
    labels = np.unique(y)

    # Number of classes
    K = len(labels)

    # Number of samples
    N = y.shape[0]

    # Preallocate array
    if one_not:
        Y = -np.ones((N, K))
    else:
        Y = np.zeros((N, K))

    # Set k-th column to 1 for n-th sample
    for n in range(N):

        # Map current class to index label
        y_n = (y[n] == labels)

        if fill_k:
            Y[n, y_n] = y_n
        else:
            Y[n, y_n] = 1

    return Y, labels 
Example 20
Project: libTLDA   Author: wmkouw   File: rba.py    License: MIT License 5 votes vote down vote up
def psi(self, X, theta, w, K=2):
        """
        Compute psi function.

        Parameters
        ----------
        X : array
            data set (N samples by D features)
        theta : array
            classifier parameters (D features by 1)
        w : array
            importance-weights (N samples by 1)
        K : int
            number of classes (def: 2)

        Returns
        -------
        psi : array
            array with psi function values (N samples by K classes)

        """
        # Number of samples
        N = X.shape[0]

        # Preallocate psi array
        psi = np.zeros((N, K))

        # Loop over classes
        for k in range(K):
            # Compute feature statistics
            Xk = self.feature_stats(X, k*np.ones((N, 1)))

            # Compute psi function
            psi[:, k] = (w*np.dot(Xk, theta))[:, 0]

        return psi 
Example 21
Project: libTLDA   Author: wmkouw   File: rba.py    License: MIT License 5 votes vote down vote up
def predict_proba(self, Z):
        """
        Compute posteriors on new dataset.

        Parameters
        ----------
        Z : array
            new data set (M samples by D features)

        Returns
        -------
        preds : array
            label predictions (M samples by 1)

        """
        # Data shape
        M, D = Z.shape

        # If classifier is trained, check for same dimensionality
        if self.is_trained:
            if not self.train_data_dim == D:
                raise ValueError('''Test data is of different dimensionality
                                 than training data.''')

        # Calculate psi function for target samples
        psi = self.psi(Z, self.theta.T, np.ones((M, 1)), K=self.K)

        # Compute posteriors for target samples
        return self.posterior(psi) 
Example 22
Project: libTLDA   Author: wmkouw   File: test_iw.py    License: MIT License 5 votes vote down vote up
def test_regularization():
    """Test for fitting the model."""
    X = rnd.randn(10, 2)
    y = np.hstack((-np.ones((5,)), np.ones((5,))))
    Z = rnd.randn(10, 2) + 1
    clf = ImportanceWeightedClassifier(loss_function='lr',
                                       l2_regularization=None)
    assert isinstance(clf.clf, LogisticRegressionCV)
    clf = ImportanceWeightedClassifier(loss_function='lr',
                                       l2_regularization=1.0)
    assert isinstance(clf.clf, LogisticRegression) 
Example 23
Project: libTLDA   Author: wmkouw   File: test_iw.py    License: MIT License 5 votes vote down vote up
def test_fit():
    """Test for fitting the model."""
    X = rnd.randn(10, 2)
    y = np.hstack((-np.ones((5,)), np.ones((5,))))
    Z = rnd.randn(10, 2) + 1
    clf = ImportanceWeightedClassifier(loss_function='lr')
    clf.fit(X, y, Z)
    assert clf.is_trained
    clf = ImportanceWeightedClassifier(loss_function='qd')
    clf.fit(X, y, Z)
    assert clf.is_trained
    clf = ImportanceWeightedClassifier(loss_function='hinge')
    clf.fit(X, y, Z)
    assert clf.is_trained 
Example 24
Project: libTLDA   Author: wmkouw   File: test_iw.py    License: MIT License 5 votes vote down vote up
def test_predict():
    """Test for making predictions."""
    X = rnd.randn(10, 2)
    y = np.hstack((-np.ones((5,)), np.ones((5,))))
    Z = rnd.randn(10, 2) + 1
    clf = ImportanceWeightedClassifier()
    clf.fit(X, y, Z)
    u_pred = clf.predict(Z)
    labels = np.unique(y)
    assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0 
Example 25
Project: libTLDA   Author: wmkouw   File: test_tca.py    License: MIT License 5 votes vote down vote up
def test_predict():
    """Test for making predictions."""
    X = rnd.randn(10, 2)
    y = np.hstack((-np.ones((5,)), np.ones((5,))))
    Z = rnd.randn(10, 2) + 1
    clf = TransferComponentClassifier()
    clf.fit(X, y, Z)
    u_pred = clf.predict(Z)
    labels = np.unique(y)
    assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0 
Example 26
Project: libTLDA   Author: wmkouw   File: test_suba.py    License: MIT License 5 votes vote down vote up
def test_fit():
    """Test for fitting the model."""
    X = rnd.randn(10, 2)
    y = np.hstack((-np.ones((5,)), np.ones((5,))))
    Z = rnd.randn(10, 2) + 1
    clf = SubspaceAlignedClassifier()
    clf.fit(X, y, Z)
    assert clf.is_trained 
Example 27
Project: libTLDA   Author: wmkouw   File: test_suba.py    License: MIT License 5 votes vote down vote up
def test_fit_semi():
    """Test for fitting the model."""
    X = rnd.randn(10, 2)
    y = np.hstack((np.zeros((5,)), np.ones((5,))))
    Z = rnd.randn(10, 2) + 1
    u = np.array([[0, 0], [9, 1]])
    clf = SemiSubspaceAlignedClassifier()
    clf.fit(X, y, Z, u)
    assert clf.is_trained 
Example 28
Project: libTLDA   Author: wmkouw   File: test_suba.py    License: MIT License 5 votes vote down vote up
def test_predict():
    """Test for making predictions."""
    X = rnd.randn(10, 2)
    y = np.hstack((-np.ones((5,)), np.ones((5,))))
    Z = rnd.randn(10, 2) + 1
    clf = SubspaceAlignedClassifier()
    clf.fit(X, y, Z)
    u_pred = clf.predict(Z)
    labels = np.unique(y)
    assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0 
Example 29
Project: libTLDA   Author: wmkouw   File: test_suba.py    License: MIT License 5 votes vote down vote up
def test_predict_semi():
    """Test for making predictions."""
    X = rnd.randn(10, 2)
    y = np.hstack((np.zeros((5,)), np.ones((5,))))
    Z = rnd.randn(10, 2) + 1
    u = np.array([[0, 0], [9, 1]])
    clf = SemiSubspaceAlignedClassifier()
    clf.fit(X, y, Z, u)
    u_pred = clf.predict(Z)
    labels = np.unique(y)
    assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0 
Example 30
Project: libTLDA   Author: wmkouw   File: test_rba.py    License: MIT License 5 votes vote down vote up
def test_predict():
    """Test for making predictions."""
    X = rnd.randn(10, 2)
    y = np.hstack((np.zeros((5,)), np.ones((5,))))
    Z = rnd.randn(10, 2) + 1
    clf = RobustBiasAwareClassifier()
    clf.fit(X, y, Z)
    u_pred = clf.predict(Z)
    labels = np.unique(y)
    assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0