Python numpy.alltrue() Examples

The following are 30 code examples of numpy.alltrue(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: test_missing_values_cleaner.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_missing_values_cleaner(test_path):

    test_file = os.path.join(test_path, 'data_nan.npy')
    X_nan = np.load(test_file)
    X = copy(X_nan)

    cleaner = MissingValuesCleaner(missing_value=np.nan, strategy='zero')

    X_complete = cleaner.transform(X)

    test_file = os.path.join(test_path, 'data_complete.npy')
    X_expected = np.load(test_file)
    assert np.alltrue(X_complete == X_expected)

    expected_info = "MissingValuesCleaner(missing_value=[nan], new_value=1, strategy='zero',\n" \
                    "                     window_size=200)"
    assert cleaner.get_info() == expected_info

    assert cleaner._estimator_type == 'transform' 
Example #2
Source File: test_windowed_standard_scaler.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_windowed_standard_scaler(test_path):
    X_orig = np.array([[1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.]])
    X = copy(X_orig)

    cleaner = WindowedStandardScaler(window_size=20)

    X_complete = cleaner.transform(X)

    test_file = os.path.join(test_path, 'std_scaler.npy')
    X_expected = np.load(test_file)

    assert np.alltrue(X_complete == X_expected) 
Example #3
Source File: test_leverage_bagging.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def run_prequential_supervised(stream, learner, max_samples, n_wait, y_expected=None):
    stream.restart()

    y_pred = np.zeros(max_samples // n_wait, dtype=np.int)
    y_true = np.zeros(max_samples // n_wait, dtype=np.int)
    j = 0

    for i in range(max_samples):
        X, y = stream.next_sample()
        # Test every n samples
        if i % n_wait == 0:
            y_pred[j] = int(learner.predict(X)[0])
            y_true[j] = (y[0])
            j += 1
        learner.partial_fit(X, y, classes=stream.target_values)

    assert type(learner.predict(X)) == np.ndarray

    if y_expected is not None:
        assert np.alltrue(y_pred == y_expected) 
Example #4
Source File: test_windowed_minmax_scaler.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_windowed_minmax_scaler(test_path):
    X_orig = np.array([[1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.],
                       [1., 2., 3., 4.], [2., 3., 4., 5.], [3., 4., 5., 6.], [5., 4., 3., 2.], [4., 3., 2., 1.], [0., 1., 0., 1.], [3., 2., 3., 4.]])
    X = copy(X_orig)

    cleaner = WindowedMinmaxScaler(window_size=20)

    X_complete = cleaner.transform(X)

    test_file = os.path.join(test_path, 'minmax_scaler.npy')
    X_expected = np.load(test_file)

    assert np.alltrue(X_complete == X_expected) 
Example #5
Source File: test_measure_collection.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_regression_measurements():
    y_true = np.sin(range(100))
    y_pred = np.sin(range(100)) + .05

    measurements = RegressionMeasurements()
    for i in range(len(y_true)):
        measurements.add_result(y_true[i], y_pred[i])

    expected_mse = 0.0025000000000000022
    assert np.isclose(expected_mse, measurements.get_mean_square_error())

    expected_ae = 0.049999999999999906
    assert np.isclose(expected_ae, measurements.get_average_error())

    expected_info = 'RegressionMeasurements: - sample_count: 100 - mean_square_error: 0.002500 ' \
                    '- mean_absolute_error: 0.050000'
    assert expected_info == measurements.get_info()

    expected_last = (-0.9992068341863537, -0.9492068341863537)
    assert np.alltrue(expected_last == measurements.get_last())

    measurements.reset()
    assert measurements.sample_count == 0 
Example #6
Source File: test_agrawal_generator.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_agrawal_drift(test_path):
    stream = AGRAWALGenerator(random_state=1)
    X, y = stream.next_sample(10)
    stream.generate_drift()
    X_drift, y_drift = stream.next_sample(10)

    # Load test data corresponding to first 10 instances
    test_file = os.path.join(test_path, 'agrawal_stream_drift.npz')
    data = np.load(test_file)
    X_expected = data['X']
    y_expected = data['y']

    X = np.concatenate((X, X_drift))
    y = np.concatenate((y, y_drift))
    assert np.alltrue(X == X_expected)
    assert np.alltrue(y == y_expected) 
Example #7
Source File: tests_rsp.py    From NeuroKit with MIT License 6 votes vote down vote up
def test_rsp_eventrelated():

    rsp, info = nk.rsp_process(nk.rsp_simulate(duration=30, random_state=42))
    epochs = nk.epochs_create(rsp, events=[5000, 10000, 15000], epochs_start=-0.1, epochs_end=1.9)
    rsp_eventrelated = nk.rsp_eventrelated(epochs)

    # Test rate features
    assert np.alltrue(np.array(rsp_eventrelated["RSP_Rate_Min"]) < np.array(rsp_eventrelated["RSP_Rate_Mean"]))

    assert np.alltrue(np.array(rsp_eventrelated["RSP_Rate_Mean"]) < np.array(rsp_eventrelated["RSP_Rate_Max"]))

    # Test amplitude features
    assert np.alltrue(
        np.array(rsp_eventrelated["RSP_Amplitude_Min"]) < np.array(rsp_eventrelated["RSP_Amplitude_Mean"])
    )

    assert np.alltrue(
        np.array(rsp_eventrelated["RSP_Amplitude_Mean"]) < np.array(rsp_eventrelated["RSP_Amplitude_Max"])
    )

    assert len(rsp_eventrelated["Label"]) == 3 
Example #8
Source File: tests_emg.py    From NeuroKit with MIT License 6 votes vote down vote up
def test_emg_eventrelated():

    emg = nk.emg_simulate(duration=20, sampling_rate=1000, burst_number=3)
    emg_signals, info = nk.emg_process(emg, sampling_rate=1000)
    epochs = nk.epochs_create(
        emg_signals, events=[3000, 6000, 9000], sampling_rate=1000, epochs_start=-0.1, epochs_end=1.9
    )
    emg_eventrelated = nk.emg_eventrelated(epochs)

    # Test amplitude features
    no_activation = np.where(emg_eventrelated["EMG_Activation"] == 0)[0][0]
    assert int(pd.DataFrame(emg_eventrelated.values[no_activation]).isna().sum()) == 4

    assert np.alltrue(
        np.nansum(np.array(emg_eventrelated["EMG_Amplitude_Mean"]))
        < np.nansum(np.array(emg_eventrelated["EMG_Amplitude_Max"]))
    )

    assert len(emg_eventrelated["Label"]) == 3 
Example #9
Source File: test_one_hot_to_categorical.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_one_hot_to_categorical(test_path):
    n_categories = 5

    # Load test data generated using:
    # RandomTreeGenerator(tree_random_state=1, sample_random_state=1,
    #                     n_cat_features=n_categories, n_num_features=0)
    test_file = os.path.join(test_path, 'data-one-hot.npz')
    data = np.load(test_file)
    X = data['X']
    y = data['y']

    cat_att_idx = [[i+j for i in range(n_categories)] for j in range(0, n_categories * n_categories, n_categories)]
    transformer = OneHotToCategorical(categorical_list=cat_att_idx)

    X_decoded = transformer.transform(X)

    test_file = os.path.join(test_path, 'data-categorical.npy')
    X_expected = np.load(test_file)
    assert np.alltrue(X_decoded == X_expected)

    X_decoded = transformer.transform(X)
    assert np.alltrue(X_decoded == X_expected)

    expected_info = "OneHotToCategorical(categorical_list=[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9],\n" \
                    "                                      [10, 11, 12, 13, 14],\n" \
                    "                                      [15, 16, 17, 18, 19],\n" \
                    "                                      [20, 21, 22, 23, 24]])"
    assert transformer.get_info() == expected_info

    assert transformer._estimator_type == 'transform'

    transformer.fit(X=X, y=y)

    transformer.partial_fit_transform(X=X) 
Example #10
Source File: test_function_base.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_nd(self):
        y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]
        assert_(not np.all(y1))
        assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1])
        assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1]) 
Example #11
Source File: tests_ecg.py    From NeuroKit with MIT License 5 votes vote down vote up
def test_ecg_eventrelated():

    ecg, info = nk.ecg_process(nk.ecg_simulate(duration=20))
    epochs = nk.epochs_create(ecg, events=[5000, 10000, 15000], epochs_start=-0.1, epochs_end=1.9)
    ecg_eventrelated = nk.ecg_eventrelated(epochs)

    # Test rate features
    assert np.alltrue(np.array(ecg_eventrelated["ECG_Rate_Min"]) < np.array(ecg_eventrelated["ECG_Rate_Mean"]))

    assert np.alltrue(np.array(ecg_eventrelated["ECG_Rate_Mean"]) < np.array(ecg_eventrelated["ECG_Rate_Max"]))

    assert len(ecg_eventrelated["Label"]) == 3 
Example #12
Source File: testutils.py    From recruit with Apache License 2.0 5 votes vote down vote up
def fail_if_array_equal(x, y, err_msg='', verbose=True):
    """
    Raises an assertion error if two masked arrays are not equal elementwise.

    """
    def compare(x, y):
        return (not np.alltrue(approx(x, y)))
    assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
                         header='Arrays are not equal') 
Example #13
Source File: test_regression.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_fromiter_bytes(self):
        # Ticket #1058
        a = np.fromiter(list(range(10)), dtype='b')
        b = np.fromiter(list(range(10)), dtype='B')
        assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
        assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) 
Example #14
Source File: test_regression.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_method_args(self):
        # Make sure methods and functions have same default axis
        # keyword and arguments
        funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
                 ('sometrue', 'any'),
                 ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
                 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
                 'round', 'min', 'max', 'argsort', 'sort']
        funcs2 = ['compress', 'take', 'repeat']

        for func in funcs1:
            arr = np.random.rand(8, 7)
            arr2 = arr.copy()
            if isinstance(func, tuple):
                func_meth = func[1]
                func = func[0]
            else:
                func_meth = func
            res1 = getattr(arr, func_meth)()
            res2 = getattr(np, func)(arr2)
            if res1 is None:
                res1 = arr

            if res1.dtype.kind in 'uib':
                assert_((res1 == res2).all(), func)
            else:
                assert_(abs(res1-res2).max() < 1e-8, func)

        for func in funcs2:
            arr1 = np.random.rand(8, 7)
            arr2 = np.random.rand(8, 7)
            res1 = None
            if func == 'compress':
                arr1 = arr1.ravel()
                res1 = getattr(arr2, func)(arr1)
            else:
                arr2 = (15*arr2).astype(int).ravel()
            if res1 is None:
                res1 = getattr(arr1, func)(arr2)
            res2 = getattr(np, func)(arr1, arr2)
            assert_(abs(res1-res2).max() < 1e-8, func) 
Example #15
Source File: test_numeric.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_values(self):
        expected = np.array(list(self.makegen()))
        a = np.fromiter(self.makegen(), int)
        a20 = np.fromiter(self.makegen(), int, 20)
        assert_(np.alltrue(a == expected, axis=0))
        assert_(np.alltrue(a20 == expected[:20], axis=0)) 
Example #16
Source File: optimize.py    From lambda-packs with MIT License 5 votes vote down vote up
def derivative(self, x, *args):
        if self.jac is not None and numpy.alltrue(x == self.x):
            return self.jac
        else:
            self(x, *args)
            return self.jac 
Example #17
Source File: testutils.py    From lambda-packs with MIT License 5 votes vote down vote up
def fail_if_array_equal(x, y, err_msg='', verbose=True):
    """
    Raises an assertion error if two masked arrays are not equal elementwise.

    """
    def compare(x, y):
        return (not np.alltrue(approx(x, y)))
    assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
                         header='Arrays are not equal') 
Example #18
Source File: fitpack2.py    From lambda-packs with MIT License 5 votes vote down vote up
def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,
                 ext=0, check_finite=False):

        if check_finite:
            w_finite = np.isfinite(w).all() if w is not None else True
            if (not np.isfinite(x).all() or not np.isfinite(y).all() or
                    not w_finite or not np.isfinite(t).all()):
                raise ValueError("Input(s) must not contain NaNs or infs.")
        if not all(diff(x) > 0.0):
            raise ValueError('x must be strictly increasing')

        # _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
        xb = bbox[0]
        xe = bbox[1]
        if xb is None:
            xb = x[0]
        if xe is None:
            xe = x[-1]
        t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))
        n = len(t)
        if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):
            raise ValueError('Interior knots t must satisfy '
                             'Schoenberg-Whitney conditions')
        if not dfitpack.fpchec(x, t, k) == 0:
            raise ValueError(_fpchec_error_string)
        data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
        self._data = data[:-3] + (None, None, data[-1])
        self._reset_class()

        try:
            self.ext = _extrap_modes[ext]
        except KeyError:
            raise ValueError("Unknown extrapolation mode %s." % ext)


################ Bivariate spline #################### 
Example #19
Source File: utils_sisr.py    From KAIR with MIT License 5 votes vote down vote up
def zero_pad(image, shape, position='corner'):
    """
    Extends image to a certain size with zeros
    Parameters
    ----------
    image: real 2d `numpy.ndarray`
        Input image
    shape: tuple of int
        Desired output shape of the image
    position : str, optional
        The position of the input image in the output one:
            * 'corner'
                top-left corner (default)
            * 'center'
                centered
    Returns
    -------
    padded_img: real `numpy.ndarray`
        The zero-padded image
    """
    shape = np.asarray(shape, dtype=int)
    imshape = np.asarray(image.shape, dtype=int)
    if np.alltrue(imshape == shape):
        return image
    if np.any(shape <= 0):
        raise ValueError("ZERO_PAD: null or negative shape given")
    dshape = shape - imshape
    if np.any(dshape < 0):
        raise ValueError("ZERO_PAD: target size smaller than source one")
    pad_img = np.zeros(shape, dtype=image.dtype)
    idx, idy = np.indices(imshape)
    if position == 'center':
        if np.any(dshape % 2 != 0):
            raise ValueError("ZERO_PAD: source and target shapes "
                             "have different parity.")
        offx, offy = dshape // 2
    else:
        offx, offy = (0, 0)
    pad_img[idx + offx, idy + offy] = image
    return pad_img 
Example #20
Source File: utils_old.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def rank(X, cond=1.0e-12):
    """
    Return the rank of a matrix X based on its generalized inverse,
    not the SVD.
    """
    X = np.asarray(X)
    if len(X.shape) == 2:
        D = scipy.linalg.svdvals(X)
        return int(np.add.reduce(np.greater(D / D.max(), cond).astype(np.int32)))
    else:
        return int(not np.alltrue(np.equal(X, 0.))) 
Example #21
Source File: test_nonparametric.py    From pingouin with GNU General Public License v3.0 5 votes vote down vote up
def test_madmedianrule(self):
        """Test function madmedianrule."""
        a = [1.2, 3, 4.5, 2.4, 5, 12.7, 0.4]
        assert np.alltrue(madmedianrule(a) == [False, False, False,
                                               False, False, True, False]) 
Example #22
Source File: mat_to_hdf5.py    From seizure-prediction with MIT License 5 votes vote down vote up
def add_channels(self, channels):
        if self.channels is None:
            self.channels = channels
        else:
            assert np.alltrue(channels == self.channels) 
Example #23
Source File: test_sam_knn.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_sam_knn():

    stream = SEAGenerator(random_state=1)

    hyperParams = {'maxSize': 1000, 'nNeighbours': 5, 'knnWeights': 'distance', 'STMSizeAdaption': 'maxACCApprox',
                   'use_ltm': False}

    learner = SAMKNNClassifier(n_neighbors=hyperParams['nNeighbours'], max_window_size=hyperParams['maxSize'],
                               weighting=hyperParams['knnWeights'],
                               stm_size_option=hyperParams['STMSizeAdaption'], use_ltm=hyperParams['use_ltm'])

    cnt = 0
    max_samples = 5000
    predictions = array('d')

    wait_samples = 100

    while cnt < max_samples:
        X, y = stream.next_sample()
        # Test every n samples
        if (cnt % wait_samples == 0) and (cnt != 0):
            predictions.append(learner.predict(X)[0])
        learner.partial_fit(X, y)
        cnt += 1

    expected_predictions = array('i', [1, 1, 1, 0, 1, 1, 0, 0, 0, 1,
                                       1, 0, 1, 1, 1, 1, 1, 1, 1, 1,
                                       1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
                                       0, 0, 1, 1, 0, 0, 0, 0, 1, 1,
                                       1, 1, 0, 1, 0, 0, 1, 0, 1])

    assert np.alltrue(predictions == expected_predictions)

    assert type(learner.predict(X)) == np.ndarray

    with pytest.raises(NotImplementedError):
        learner.predict_proba(X) 
Example #24
Source File: test_half_space_trees.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_half_space_trees(test_path):
    stream = SEAGenerator(classification_function=0,
                          noise_percentage=0.1,
                          random_state=1)

    learner = HalfSpaceTrees(n_estimators=13,
                             size_limit=75,
                             anomaly_threshold=0.90,
                             depth=10,
                             random_state=5)

    cnt = 0
    max_samples = 5000
    y_pred = array('i')
    y_proba = []
    wait_samples = 500

    while cnt < max_samples:
        X, y = stream.next_sample()
        # Scale inputs between 0 and 1
        X = X / 10
        if (cnt % wait_samples == 0) and (cnt != 0):
            y_pred.append(learner.predict(X)[0])
            y_proba.append(learner.predict_proba(X)[0])
        learner.partial_fit(X)
        cnt += 1

    expected_predictions = array('i', [1, 0, 0, 0, 1, 0, 0, 1, 0])
    assert np.alltrue(y_pred == expected_predictions)
    test_file = os.path.join(test_path, 'test_half_space_trees.npy')
    expected_proba = np.load(test_file)
    assert np.allclose(y_proba, expected_proba) 
Example #25
Source File: test_classification_performance_evaluator.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_window_multi_label_classification_measurements():
    y_0 = np.ones(100)
    y_1 = np.concatenate((np.ones(90), np.zeros(10)))
    y_2 = np.concatenate((np.ones(85), np.zeros(10), np.ones(5)))
    y_true = np.ones((100, 3))
    y_pred = np.vstack((y_0, y_1, y_2)).T

    performance_evaluator = WindowMultiLabelClassificationPerformanceEvaluator(window_size=20)
    for i in range(len(y_true)):
        performance_evaluator.add_result(y_true[i], y_pred[i])

    expected_exact_match_score = 0.25
    assert np.isclose(expected_exact_match_score, performance_evaluator.exact_match_score())

    expected_hamming_score = 1 - 0.33333333333333337
    assert np.isclose(expected_hamming_score, performance_evaluator.hamming_score())

    expected_hamming_loss_score = 0.33333333333333337
    assert np.isclose(expected_hamming_loss_score, performance_evaluator.hamming_loss_score())

    expected_jaccard_score = 0.6666666666666667
    assert np.isclose(expected_jaccard_score, performance_evaluator.jaccard_score())

    expected_info = 'WindowMultiLabelClassificationPerformanceEvaluator(n_labels=3, window_size=20, n_samples=20, ' \
                    'hamming_score=0.666667, hamming_loss_score=0.333333, exact_match_score=0.250000, ' \
                    'jaccard_score=0.666667)'
    assert expected_info == performance_evaluator.get_info()

    expected_last_true = (1, 1, 1)
    expected_last_pred = (1, 0, 1)
    assert np.alltrue(expected_last_true == performance_evaluator.get_last()[0])
    assert np.alltrue(expected_last_pred == performance_evaluator.get_last()[1])

    performance_evaluator.reset()
    assert performance_evaluator.n_samples == 0 
Example #26
Source File: test_classification_performance_evaluator.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_multi_label_classification_measurements():
    y_0 = np.ones(100)
    y_1 = np.concatenate((np.ones(90), np.zeros(10)))
    y_2 = np.concatenate((np.ones(85), np.zeros(10), np.ones(5)))
    y_true = np.ones((100, 3))
    y_pred = np.vstack((y_0, y_1, y_2)).T

    performance_evaluator = MultiLabelClassificationPerformanceEvaluator()
    for i in range(len(y_true)):
        performance_evaluator.add_result(y_true[i], y_pred[i])

    expected_exact_match_score = 0.85
    assert np.isclose(expected_exact_match_score, performance_evaluator.exact_match_score())

    expected_hamming_score = 1 - 0.06666666666666667
    assert np.isclose(expected_hamming_score, performance_evaluator.hamming_score())

    expected_hamming_loss_score = 0.06666666666666667
    assert np.isclose(expected_hamming_loss_score, performance_evaluator.hamming_loss_score())

    expected_jaccard_score = 0.9333333333333332
    assert np.isclose(expected_jaccard_score, performance_evaluator.jaccard_score())

    expected_info = 'MultiLabelClassificationPerformanceEvaluator(n_labels=3, n_samples=100, ' \
                    'hamming_score=0.933333, hamming_loss_score=0.066667, exact_match_score=0.850000, ' \
                    'jaccard_score=0.933333)'
    assert expected_info == performance_evaluator.get_info()

    expected_last_true = (1, 1, 1)
    expected_last_pred = (1, 0, 1)
    assert np.alltrue(expected_last_true == performance_evaluator.get_last()[0])
    assert np.alltrue(expected_last_pred == performance_evaluator.get_last()[1])

    performance_evaluator.reset()
    assert performance_evaluator.n_samples == 0 
Example #27
Source File: test_measure_collection.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_multi_target_classification_measurements():
    y_0 = np.ones(100)
    y_1 = np.concatenate((np.ones(90), np.zeros(10)))
    y_2 = np.concatenate((np.ones(85), np.zeros(10), np.ones(5)))
    y_true = np.ones((100, 3))
    y_pred = np.vstack((y_0, y_1, y_2)).T

    measurements = MultiTargetClassificationMeasurements()
    for i in range(len(y_true)):
        measurements.add_result(y_true[i], y_pred[i])

    expected_acc = 0.85
    assert np.isclose(expected_acc, measurements.get_exact_match())

    expected_hamming_score = 1 - 0.06666666666666667
    assert np.isclose(expected_hamming_score, measurements.get_hamming_score())

    expected_hamming_loss = 0.06666666666666667
    assert np.isclose(expected_hamming_loss, measurements.get_hamming_loss())

    expected_jaccard_index = 0.9333333333333332
    assert np.isclose(expected_jaccard_index, measurements.get_j_index())

    expected_total_sum = 300
    assert expected_total_sum == measurements.get_total_sum()

    expected_info = 'MultiTargetClassificationMeasurements: - sample_count: 100 - hamming_loss: 0.066667 - ' \
                    'hamming_score: 0.933333 - exact_match: 0.850000 - j_index: 0.933333'
    assert expected_info == measurements.get_info()

    expected_last_true = (1.0, 1.0, 1.0)
    expected_last_pred = (1.0, 0.0, 1.0)
    assert np.alltrue(expected_last_true == measurements.get_last()[0])
    assert np.alltrue(expected_last_pred == measurements.get_last()[1])

    measurements.reset()
    assert measurements.sample_count == 0 
Example #28
Source File: test_measure_collection.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_window_multi_target_classification_measurements():
    y_0 = np.ones(100)
    y_1 = np.concatenate((np.ones(90), np.zeros(10)))
    y_2 = np.concatenate((np.ones(85), np.zeros(10), np.ones(5)))
    y_true = np.ones((100, 3))
    y_pred = np.vstack((y_0, y_1, y_2)).T

    measurements = WindowMultiTargetClassificationMeasurements(window_size=20)
    for i in range(len(y_true)):
        measurements.add_result(y_true[i], y_pred[i])

    expected_acc = 0.25
    assert np.isclose(expected_acc, measurements.get_exact_match())

    expected_hamming_score = 1 - 0.33333333333333337
    assert np.isclose(expected_hamming_score, measurements.get_hamming_score())

    expected_hamming_loss = 0.33333333333333337
    assert np.isclose(expected_hamming_loss, measurements.get_hamming_loss())

    expected_jaccard_index = 0.6666666666666667
    assert np.isclose(expected_jaccard_index, measurements.get_j_index())

    expected_total_sum = 300
    assert expected_total_sum == measurements.get_total_sum()

    expected_info = 'WindowMultiTargetClassificationMeasurements: - sample_count: 20 - hamming_loss: 0.333333 ' \
                    '- hamming_score: 0.666667 - exact_match: 0.250000 - j_index: 0.666667'
    assert expected_info == measurements.get_info()

    expected_last_true = (1.0, 1.0, 1.0)
    expected_last_pred = (1.0, 0.0, 1.0)
    assert np.alltrue(expected_last_true == measurements.get_last()[0])
    assert np.alltrue(expected_last_pred == measurements.get_last()[1])

    measurements.reset()
    assert measurements.sample_count == 0 
Example #29
Source File: test_classifier_chains.py    From scikit-multiflow with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_classifier_chains_all():
    seed = 1
    X, Y = make_logical(random_state=seed)

    # CC
    cc = ClassifierChain(SGDClassifier(max_iter=100, tol=1e-3, loss='log', random_state=seed))
    cc.partial_fit(X, Y)
    y_predicted = cc.predict(X)
    y_expected = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [1, 1, 0]]
    assert np.alltrue(y_predicted == y_expected)
    assert type(cc.predict_proba(X)) == np.ndarray

    # RCC
    rcc = ClassifierChain(SGDClassifier(max_iter=100, tol=1e-3, loss='log', random_state=seed),
                          order='random', random_state=seed)
    rcc.partial_fit(X, Y)
    y_predicted = rcc.predict(X)
    y_expected = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [1, 1, 0]]
    assert np.alltrue(y_predicted == y_expected)

    # MCC
    mcc = MonteCarloClassifierChain(SGDClassifier(max_iter=100, tol=1e-3, loss='log',
                                                  random_state=seed),
                                    M=1000)
    mcc.partial_fit(X, Y)
    y_predicted = mcc.predict(X)
    y_expected = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [1, 1, 0]]
    assert np.alltrue(y_predicted == y_expected)

    # PCC
    pcc = ProbabilisticClassifierChain(SGDClassifier(max_iter=100, tol=1e-3, loss='log',
                                                     random_state=seed))
    pcc.partial_fit(X, Y)
    y_predicted = pcc.predict(X)
    y_expected = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [1, 1, 0]]
    assert np.alltrue(y_predicted == y_expected) 
Example #30
Source File: testutils.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def fail_if_array_equal(x, y, err_msg='', verbose=True):
    """
    Raises an assertion error if two masked arrays are not equal elementwise.

    """
    def compare(x, y):
        return (not np.alltrue(approx(x, y)))
    assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
                         header='Arrays are not equal')