Python numpy.NINF Examples

The following are 30 code examples for showing how to use numpy.NINF(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: onnx-tensorflow   Author: onnx   File: test_node.py    License: Apache License 2.0 6 votes vote down vote up
def test_is_inf(self):
    if legacy_opset_pre_ver(10):
      raise unittest.SkipTest("ONNX version {} doesn't support IsInf.".format(
          defs.onnx_opset_version()))
    input = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf],
                     dtype=np.float32)
    expected_output = {
        "node_def": np.isinf(input),
        "node_def_neg_false": np.isposinf(input),
        "node_def_pos_false": np.isneginf(input)
    }
    node_defs = {
        "node_def":
            helper.make_node("IsInf", ["X"], ["Y"]),
        "node_def_neg_false":
            helper.make_node("IsInf", ["X"], ["Y"], detect_negative=0),
        "node_def_pos_false":
            helper.make_node("IsInf", ["X"], ["Y"], detect_positive=0)
    }
    for key in node_defs:
      output = run_node(node_defs[key], [input])
      np.testing.assert_equal(output["Y"], expected_output[key]) 
Example 2
Project: onnx-tensorflow   Author: onnx   File: test_dynamic_shape.py    License: Apache License 2.0 6 votes vote down vote up
def test_is_inf(self):
    if legacy_opset_pre_ver(10):
      raise unittest.SkipTest("ONNX version {} doesn't support IsInf.".format(
          defs.onnx_opset_version()))
    inp = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf],
                   dtype=np.float32)
    expected_output = np.isinf(inp)
    node_def = helper.make_node("IsInf", ["X"], ["Y"])
    graph_def = helper.make_graph(
        [node_def],
        name="test_unknown_shape",
        inputs=[
            helper.make_tensor_value_info("X", TensorProto.FLOAT, [None]),
        ],
        outputs=[helper.make_tensor_value_info("Y", TensorProto.BOOL, [None])])
    tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
    output = tf_rep.run({"X": inp})
    np.testing.assert_equal(output["Y"], expected_output) 
Example 3
Project: prpy   Author: personalrobotics   File: util.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def ComputeEnabledAABB(kinbody):
    """
    Returns the AABB of the enabled links of a KinBody.

    @param kinbody: an OpenRAVE KinBody
    @returns: AABB of the enabled links of the KinBody
    """
    from numpy import NINF, PINF
    from openravepy import AABB

    min_corner = numpy.array([PINF] * 3)
    max_corner = numpy.array([NINF] * 3)

    for link in kinbody.GetLinks():
        if link.IsEnabled():
            link_aabb = link.ComputeAABB()
            center = link_aabb.pos()
            half_extents = link_aabb.extents()
            min_corner = numpy.minimum(center - half_extents, min_corner)
            max_corner = numpy.maximum(center + half_extents, max_corner)

    center = (min_corner + max_corner) / 2.
    half_extents = (max_corner - min_corner) / 2.
    return AABB(center, half_extents) 
Example 4
Project: chainer   Author: chainer   File: test_constants.py    License: MIT License 6 votes vote down vote up
def test_constants():
    assert chainerx.Inf is numpy.Inf
    assert chainerx.Infinity is numpy.Infinity
    assert chainerx.NAN is numpy.NAN
    assert chainerx.NINF is numpy.NINF
    assert chainerx.NZERO is numpy.NZERO
    assert chainerx.NaN is numpy.NaN
    assert chainerx.PINF is numpy.PINF
    assert chainerx.PZERO is numpy.PZERO
    assert chainerx.e is numpy.e
    assert chainerx.euler_gamma is numpy.euler_gamma
    assert chainerx.inf is numpy.inf
    assert chainerx.infty is numpy.infty
    assert chainerx.nan is numpy.nan
    assert chainerx.newaxis is numpy.newaxis
    assert chainerx.pi is numpy.pi 
Example 5
Project: revscoring   Author: wikimedia   File: util.py    License: MIT License 6 votes vote down vote up
def normalize(v):
    if isinstance(v, numpy.bool_):
        return bool(v)
    elif isinstance(v, numpy.ndarray):
        return [normalize(item) for item in v]
    elif v == numpy.NaN:
        return "NaN"
    elif v == numpy.NINF:
        return "-Infinity"
    elif v == numpy.PINF:
        return "Infinity"
    elif isinstance(v, numpy.float):
        return float(v)
    elif isinstance(v, tuple):
        return list(v)
    else:
        return v 
Example 6
Project: stocknet-code   Author: yumoxu   File: Model.py    License: MIT License 6 votes vote down vote up
def _create_corpus_embed(self):
        """
            msg_embed: batch_size * max_n_days * max_n_msgs * msg_embed_size

            => corpus_embed: batch_size * max_n_days * corpus_embed_size
        """
        with tf.name_scope('corpus_embed'):
            with tf.variable_scope('u_t'):
                proj_u = self._linear(self.msg_embed, self.msg_embed_size, 'tanh', use_bias=False)
                w_u = tf.get_variable('w_u', shape=(self.msg_embed_size, 1), initializer=self.initializer)
            u = tf.reduce_mean(tf.tensordot(proj_u, w_u, axes=1), axis=-1)  # batch_size * max_n_days * max_n_msgs

            mask_msgs = tf.sequence_mask(self.n_msgs_ph, maxlen=self.max_n_msgs, dtype=tf.bool, name='mask_msgs')
            ninf = tf.fill(tf.shape(mask_msgs), np.NINF)
            masked_score = tf.where(mask_msgs, u, ninf)
            u = neural.softmax(masked_score)  # batch_size * max_n_days * max_n_msgs
            u = tf.where(tf.is_nan(u), tf.zeros_like(u), u)  # replace nan with 0.0

            u = tf.expand_dims(u, axis=-2)  # batch_size * max_n_days * 1 * max_n_msgs
            corpus_embed = tf.matmul(u, self.msg_embed)  # batch_size * max_n_days * 1 * msg_embed_size
            corpus_embed = tf.reduce_mean(corpus_embed, axis=-2)  # batch_size * max_n_days * msg_embed_size
            self.corpus_embed = tf.nn.dropout(corpus_embed, keep_prob=1-self.dropout_ce, name='corpus_embed') 
Example 7
Project: ALiPy   Author: NUAA-AL   File: multi_label.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def lr_predict(self, BV, data, num_sub):
        BV = np.asarray(BV)
        data = np.asarray(data)

        fs = data.dot(BV)
        n = data.shape[0]
        n_class = int(fs.shape[1] / num_sub)
        pres = np.ones((n, n_class)) * np.NINF
        for j in range(num_sub):
            f = fs[:, j: fs.shape[1]: num_sub]
            assert (np.all(f.shape == pres.shape))
            pres = np.fmax(pres, f)
        labels = -np.ones((n, n_class - 1))
        for line in range(n_class - 1):
            gt = np.nonzero(pres[:, line] > pres[:, n_class - 1])[0]
            labels[gt, line] = 1
        return pres, labels 
Example 8
Project: emukit   Author: amzn   File: continuous_fidelity_entropy_search.py    License: Apache License 2.0 6 votes vote down vote up
def _get_proposal_function(self, model, space):

        # Define proposal function for multi-fidelity
        ei = ExpectedImprovement(model)

        def proposal_func(x):
            x_ = x[None, :]
            # Map to highest fidelity
            idx = np.ones((x_.shape[0], 1)) * self.high_fidelity

            x_ = np.insert(x_, self.target_fidelity_index, idx, axis=1)

            if space.check_points_in_domain(x_):
                val = np.log(np.clip(ei.evaluate(x_)[0], 0., np.PINF))
                if np.any(np.isnan(val)):
                    return np.array([np.NINF])
                else:
                    return val
            else:
                return np.array([np.NINF])

        return proposal_func 
Example 9
Project: emukit   Author: amzn   File: entropy_search.py    License: Apache License 2.0 6 votes vote down vote up
def _get_proposal_function(self, model, space):

        # Define proposal function for multi-fidelity
        ei = ExpectedImprovement(model)

        def proposal_func(x):
            x_ = x[None, :]

            # Add information source parameter into array
            idx = np.ones((x_.shape[0], 1)) * self.target_information_source_index
            x_ = np.insert(x_, self.source_idx, idx, axis=1)

            if space.check_points_in_domain(x_):
                val = np.log(np.clip(ei.evaluate(x_)[0], 0., np.PINF))
                if np.any(np.isnan(val)):
                    return np.array([np.NINF])
                else:
                    return val
            else:
                return np.array([np.NINF])

        return proposal_func 
Example 10
Project: sdc   Author: IntelPython   File: test_rolling.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_df_rolling_corr(self):
        all_data = [
            list(range(10)), [1., -1., 0., 0.1, -0.1],
            [1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
            [np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
        ]
        length = min(len(d) for d in all_data)
        data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
        df = pd.DataFrame(data)
        for d in all_data:
            other = pd.Series(d)
            self._test_rolling_corr(df, other)

        other_all_data = deepcopy(all_data) + [list(range(10))[::-1]]
        other_all_data[1] = [-1., 1., 0., -0.1, 0.1, 0.]
        other_length = min(len(d) for d in other_all_data)
        other_data = {n: d[:other_length] for n, d in zip(string.ascii_uppercase, other_all_data)}
        other = pd.DataFrame(other_data)

        self._test_rolling_corr(df, other) 
Example 11
Project: sdc   Author: IntelPython   File: test_rolling.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_df_rolling_cov(self):
        all_data = [
            list(range(10)), [1., -1., 0., 0.1, -0.1],
            [1., np.inf, np.inf, -1., 0., np.inf, np.NINF, np.NINF],
            [np.nan, np.inf, np.inf, np.nan, np.nan, np.nan, np.NINF, np.NZERO]
        ]
        length = min(len(d) for d in all_data)
        data = {n: d[:length] for n, d in zip(string.ascii_uppercase, all_data)}
        df = pd.DataFrame(data)
        for d in all_data:
            other = pd.Series(d)
            self._test_rolling_cov(df, other)

        other_all_data = deepcopy(all_data) + [list(range(10))[::-1]]
        other_all_data[1] = [-1., 1., 0., -0.1, 0.1]
        other_length = min(len(d) for d in other_all_data)
        other_data = {n: d[:other_length] for n, d in zip(string.ascii_uppercase, other_all_data)}
        other = pd.DataFrame(other_data)

        self._test_rolling_cov(df, other) 
Example 12
Project: hiscore   Author: aothman   File: engine.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def value_bounds(self, point):
    """
    Returns the (lower_bound, upper_bound) tuple of a point implied by the reference set and the monotone relationship vector.
    Use it to improve and understand the reference set without triggering a MonotoneError.
    Returns np.inf as the second argument if there is no upper bound and np.NINF as the first argument if there is no lower bound.

    Required argument:
    point -- Point at which to assess upper and lower bounds.
    """
    padj = point/self.scale
    points_greater_than = filter(lambda x: np.allclose(x,padj) or self.__monotone_rel__(x,padj)==1, self.points.keys())
    points_less_than = filter(lambda x: np.allclose(x,padj) or self.__monotone_rel__(padj,x)==1, self.points.keys())
    gtbound = np.inf if self.maxval is None else self.maxval
    ltbound = np.NINF if self.minval is None else self.minval
    for p in points_greater_than:
      gtbound = min(self.points[p],gtbound)
    for p in points_less_than:
      ltbound = max(self.points[p],ltbound)
    return ltbound, gtbound 
Example 13
Project: incubator-tvm   Author: apache   File: pytorch.py    License: Apache License 2.0 6 votes vote down vote up
def _norm():
    def _impl(inputs, input_types):
        data = inputs[0]
        dtype = input_types[0]
        axis = None
        keepdims = False
        if len(inputs) > 3:
            axis = list(_infer_shape(inputs[2]))
            keepdims = bool(inputs[3])

        order = inputs[1]
        if order == np.inf:
            return _op.reduce.max(_op.abs(data), axis=axis, keepdims=keepdims)
        elif order == np.NINF:
            return _op.reduce.min(_op.abs(data), axis=axis, keepdims=keepdims)
        else:
            reci_order = _expr.const(1.0 / order, dtype=dtype)
            order = _expr.const(order)
            return _op.power(_op.reduce.sum(_op.power(_op.abs(data), order),
                                            axis=axis,
                                            keepdims=keepdims),
                             reci_order)
    return _impl 
Example 14
Project: GroundedTranslation   Author: elliottd   File: Callbacks.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, word2index, index2word, argsDict, dataset,
                 data_generator, use_sourcelang=False, use_image=True):
        super(Callback, self).__init__()

        self.verbose = True
        self.filename = "weights.hdf5"
        self.save_best_only = True

        self.val_loss = []
        self.best_val_loss = np.inf

        self.val_metric = []
        self.best_val_metric = np.NINF

        self.word2index = word2index
        self.index2word = index2word
        self.args = argsDict

        # used to control early stopping on the validation data
        self.wait = 0
        self.patience = self.args.patience

        # needed by model.predict in generate_sentences
        self.use_sourcelang = use_sourcelang
        self.use_image = use_image

        # controversial assignment but it makes it much easier to
        # do early stopping based on metrics
        self.data_generator = data_generator

        # this results in two file handlers for dataset (here and
        # data_generator)
        if not dataset:
            logger.warn("No dataset given, using flickr8k")
            self.dataset = h5py.File("flickr8k/dataset.h5", "r")
        else:
            self.dataset = h5py.File("%s/dataset.h5" % dataset, "r")
        if self.args.source_vectors is not None:
            self.source_dataset = h5py.File("%s/dataset.h5" % self.args.source_vectors, "r") 
Example 15
Project: recruit   Author: Frank-qlu   File: test_umath.py    License: Apache License 2.0 5 votes vote down vote up
def test_any_ninf(self):
        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) 
Example 16
Project: recruit   Author: Frank-qlu   File: test_pandas.py    License: Apache License 2.0 5 votes vote down vote up
def test_frame_from_json_nones(self):
        df = DataFrame([[1, 2], [4, 5, 6]])
        unser = read_json(df.to_json())
        assert np.isnan(unser[2][0])

        df = DataFrame([['1', '2'], ['4', '5', '6']])
        unser = read_json(df.to_json())
        assert np.isnan(unser[2][0])
        unser = read_json(df.to_json(), dtype=False)
        assert unser[2][0] is None
        unser = read_json(df.to_json(), convert_axes=False, dtype=False)
        assert unser['2']['0'] is None

        unser = read_json(df.to_json(), numpy=False)
        assert np.isnan(unser[2][0])
        unser = read_json(df.to_json(), numpy=False, dtype=False)
        assert unser[2][0] is None
        unser = read_json(df.to_json(), numpy=False,
                          convert_axes=False, dtype=False)
        assert unser['2']['0'] is None

        # infinities get mapped to nulls which get mapped to NaNs during
        # deserialisation
        df = DataFrame([[1, 2], [4, 5, 6]])
        df.loc[0, 2] = np.inf
        unser = read_json(df.to_json())
        assert np.isnan(unser[2][0])
        unser = read_json(df.to_json(), dtype=False)
        assert np.isnan(unser[2][0])

        df.loc[0, 2] = np.NINF
        unser = read_json(df.to_json())
        assert np.isnan(unser[2][0])
        unser = read_json(df.to_json(), dtype=False)
        assert np.isnan(unser[2][0]) 
Example 17
Project: lambda-packs   Author: ryfeus   File: _multivariate.py    License: MIT License 5 votes vote down vote up
def logpmf(self, x, n, p):
        """
        Log of the Multinomial probability mass function.

        Parameters
        ----------
        x : array_like
            Quantiles, with the last axis of `x` denoting the components.
            Each quantile must be a symmetric positive definite matrix.
        %(_doc_default_callparams)s

        Returns
        -------
        logpmf : ndarray or scalar
            Log of the probability mass function evaluated at `x`

        Notes
        -----
        %(_doc_callparams_note)s
        """
        n, p, npcond = self._process_parameters(n, p)
        x, xcond = self._process_quantiles(x, n, p)

        result = self._logpmf(x, n, p)

        # replace values for which x was out of the domain; broadcast
        # xcond to the right shape
        xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)
        result = self._checkresult(result, xcond_, np.NINF)

        # replace values bad for n or p; broadcast npcond to the right shape
        npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)
        return self._checkresult(result, npcond_, np.NAN) 
Example 18
Project: auto-alt-text-lambda-api   Author: abhisuri97   File: test_umath.py    License: MIT License 5 votes vote down vote up
def test_any_ninf(self):
        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) 
Example 19
Project: vnpy_crypto   Author: birforce   File: test_umath.py    License: MIT License 5 votes vote down vote up
def test_any_ninf(self):
        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) 
Example 20
Project: vnpy_crypto   Author: birforce   File: test_pandas.py    License: MIT License 5 votes vote down vote up
def test_frame_from_json_nones(self):
        df = DataFrame([[1, 2], [4, 5, 6]])
        unser = read_json(df.to_json())
        assert np.isnan(unser[2][0])

        df = DataFrame([['1', '2'], ['4', '5', '6']])
        unser = read_json(df.to_json())
        assert np.isnan(unser[2][0])
        unser = read_json(df.to_json(), dtype=False)
        assert unser[2][0] is None
        unser = read_json(df.to_json(), convert_axes=False, dtype=False)
        assert unser['2']['0'] is None

        unser = read_json(df.to_json(), numpy=False)
        assert np.isnan(unser[2][0])
        unser = read_json(df.to_json(), numpy=False, dtype=False)
        assert unser[2][0] is None
        unser = read_json(df.to_json(), numpy=False,
                          convert_axes=False, dtype=False)
        assert unser['2']['0'] is None

        # infinities get mapped to nulls which get mapped to NaNs during
        # deserialisation
        df = DataFrame([[1, 2], [4, 5, 6]])
        df.loc[0, 2] = np.inf
        unser = read_json(df.to_json())
        assert np.isnan(unser[2][0])
        unser = read_json(df.to_json(), dtype=False)
        assert np.isnan(unser[2][0])

        df.loc[0, 2] = np.NINF
        unser = read_json(df.to_json())
        assert np.isnan(unser[2][0])
        unser = read_json(df.to_json(), dtype=False)
        assert np.isnan(unser[2][0]) 
Example 21
Project: Computable   Author: ktraunmueller   File: test_umath.py    License: MIT License 5 votes vote down vote up
def test_any_ninf(self):
        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) 
Example 22
Project: Computable   Author: ktraunmueller   File: test_pandas.py    License: MIT License 5 votes vote down vote up
def test_frame_from_json_nones(self):
        df = DataFrame([[1, 2], [4, 5, 6]])
        unser = read_json(df.to_json())
        self.assertTrue(np.isnan(unser[2][0]))

        df = DataFrame([['1', '2'], ['4', '5', '6']])
        unser = read_json(df.to_json())
        self.assertTrue(np.isnan(unser[2][0]))
        unser = read_json(df.to_json(),dtype=False)
        self.assertTrue(unser[2][0] is None)
        unser = read_json(df.to_json(),convert_axes=False,dtype=False)
        self.assertTrue(unser['2']['0'] is None)

        unser = read_json(df.to_json(), numpy=False)
        self.assertTrue(np.isnan(unser[2][0]))
        unser = read_json(df.to_json(), numpy=False, dtype=False)
        self.assertTrue(unser[2][0] is None)
        unser = read_json(df.to_json(), numpy=False, convert_axes=False, dtype=False)
        self.assertTrue(unser['2']['0'] is None)

        # infinities get mapped to nulls which get mapped to NaNs during
        # deserialisation
        df = DataFrame([[1, 2], [4, 5, 6]])
        df[2][0] = np.inf
        unser = read_json(df.to_json())
        self.assertTrue(np.isnan(unser[2][0]))
        unser = read_json(df.to_json(), dtype=False)
        self.assertTrue(np.isnan(unser[2][0]))

        df[2][0] = np.NINF
        unser = read_json(df.to_json())
        self.assertTrue(np.isnan(unser[2][0]))
        unser = read_json(df.to_json(),dtype=False)
        self.assertTrue(np.isnan(unser[2][0])) 
Example 23
Project: Computable   Author: ktraunmueller   File: anneal.py    License: MIT License 5 votes vote down vote up
def init(self, **options):
        self.__dict__.update(options)
        self.lower = asarray(self.lower)
        self.lower = where(self.lower == numpy.NINF, -_double_max, self.lower)
        self.upper = asarray(self.upper)
        self.upper = where(self.upper == numpy.PINF, _double_max, self.upper)
        self.k = 0
        self.accepted = 0
        self.feval = 0
        self.tests = 0 
Example 24
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_umath.py    License: MIT License 5 votes vote down vote up
def test_any_ninf(self):
        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) 
Example 25
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_bagging.py    License: MIT License 5 votes vote down vote up
def test_bagging_regressor_with_missing_inputs():
    # Check that BaggingRegressor can accept X with missing/infinite data
    X = np.array([
        [1, 3, 5],
        [2, None, 6],
        [2, np.nan, 6],
        [2, np.inf, 6],
        [2, np.NINF, 6],
    ])
    y_values = [
        np.array([2, 3, 3, 3, 3]),
        np.array([
            [2, 1, 9],
            [3, 6, 8],
            [3, 6, 8],
            [3, 6, 8],
            [3, 6, 8],
        ])
    ]
    for y in y_values:
        regressor = DecisionTreeRegressor()
        pipeline = make_pipeline(
            FunctionTransformer(replace, validate=False),
            regressor
        )
        pipeline.fit(X, y).predict(X)
        bagging_regressor = BaggingRegressor(pipeline)
        y_hat = bagging_regressor.fit(X, y).predict(X)
        assert_equal(y.shape, y_hat.shape)

        # Verify that exceptions can be raised by wrapper regressor
        regressor = DecisionTreeRegressor()
        pipeline = make_pipeline(regressor)
        assert_raises(ValueError, pipeline.fit, X, y)
        bagging_regressor = BaggingRegressor(pipeline)
        assert_raises(ValueError, bagging_regressor.fit, X, y) 
Example 26
Project: Mastering-Elasticsearch-7.0   Author: PacktPublishing   File: test_bagging.py    License: MIT License 5 votes vote down vote up
def test_bagging_classifier_with_missing_inputs():
    # Check that BaggingClassifier can accept X with missing/infinite data
    X = np.array([
        [1, 3, 5],
        [2, None, 6],
        [2, np.nan, 6],
        [2, np.inf, 6],
        [2, np.NINF, 6],
    ])
    y = np.array([3, 6, 6, 6, 6])
    classifier = DecisionTreeClassifier()
    pipeline = make_pipeline(
        FunctionTransformer(replace, validate=False),
        classifier
    )
    pipeline.fit(X, y).predict(X)
    bagging_classifier = BaggingClassifier(pipeline)
    bagging_classifier.fit(X, y)
    y_hat = bagging_classifier.predict(X)
    assert_equal(y.shape, y_hat.shape)
    bagging_classifier.predict_log_proba(X)
    bagging_classifier.predict_proba(X)

    # Verify that exceptions can be raised by wrapper classifier
    classifier = DecisionTreeClassifier()
    pipeline = make_pipeline(classifier)
    assert_raises(ValueError, pipeline.fit, X, y)
    bagging_classifier = BaggingClassifier(pipeline)
    assert_raises(ValueError, bagging_classifier.fit, X, y) 
Example 27
Project: pixelworld   Author: vicariousinc   File: csp.py    License: MIT License 5 votes vote down vote up
def __init__(self, min_int=None, max_int=None):
        self.min_int = min_int or np.NINF
        self.max_int = max_int or np.inf 
Example 28
Project: GraphicDesignPatternByPython   Author: Relph1119   File: test_umath.py    License: MIT License 5 votes vote down vote up
def test_any_ninf(self):
        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi) 
Example 29
Project: GraphicDesignPatternByPython   Author: Relph1119   File: _multivariate.py    License: MIT License 5 votes vote down vote up
def logpmf(self, x, n, p):
        """
        Log of the Multinomial probability mass function.

        Parameters
        ----------
        x : array_like
            Quantiles, with the last axis of `x` denoting the components.
            Each quantile must be a symmetric positive definite matrix.
        %(_doc_default_callparams)s

        Returns
        -------
        logpmf : ndarray or scalar
            Log of the probability mass function evaluated at `x`

        Notes
        -----
        %(_doc_callparams_note)s
        """
        n, p, npcond = self._process_parameters(n, p)
        x, xcond = self._process_quantiles(x, n, p)

        result = self._logpmf(x, n, p)

        # replace values for which x was out of the domain; broadcast
        # xcond to the right shape
        xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)
        result = self._checkresult(result, xcond_, np.NINF)

        # replace values bad for n or p; broadcast npcond to the right shape
        npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)
        return self._checkresult(result, npcond_, np.NAN) 
Example 30
def test_any_ninf(self):
        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)