Python scipy.stats.expon() Examples

The following are 21 code examples of scipy.stats.expon(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.stats , or try the search function .
Example #1
Source File: test_exponential.py    From chainer with MIT License 6 votes vote down vote up
def setUp_configure(self):
        from scipy import stats
        self.dist = distributions.Exponential
        self.scipy_dist = stats.expon

        self.test_targets = set([
            'batch_shape', 'cdf', 'entropy', 'event_shape', 'icdf', 'log_prob',
            'mean', 'sample', 'support', 'variance'])

        lam = numpy.exp(numpy.random.uniform(
            -1, 1, self.shape)).astype(numpy.float32)
        lam = numpy.asarray(lam)
        self.params = {'lam': lam}
        self.scipy_params = {'scale': 1 / lam}

        self.support = 'positive' 
Example #2
Source File: continuous_distributions.py    From machine-learning-note with MIT License 6 votes vote down vote up
def diff_exp_dis():
    """
    不同参数下的指数分布
    :return:
    """
    exp_dis_0_5 = stats.expon(scale=0.5)
    exp_dis_1 = stats.expon(scale=1)
    exp_dis_2 = stats.expon(scale=2)

    x1 = np.linspace(exp_dis_0_5.ppf(0.001), exp_dis_0_5.ppf(0.9999), 100)
    x2 = np.linspace(exp_dis_1.ppf(0.001), exp_dis_1.ppf(0.999), 100)
    x3 = np.linspace(exp_dis_2.ppf(0.001), exp_dis_2.ppf(0.99), 100)
    fig, ax = plt.subplots(1, 1)
    ax.plot(x1, exp_dis_0_5.pdf(x1), 'b-', lw=2, label=r'lambda = 2')
    ax.plot(x2, exp_dis_1.pdf(x2), 'g-', lw=2, label='lambda = 1')
    ax.plot(x3, exp_dis_2.pdf(x3), 'r-', lw=2, label='lambda = 0.5')
    plt.ylabel('Probability')
    plt.title(r'PDF of Exponential Distribution')
    ax.legend(loc='best', frameon=False)
    plt.show()

# diff_exp_dis() 
Example #3
Source File: test_mixture.py    From carl with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_fit():
    p1 = Normal(mu=T.constant(0.0), sigma=T.constant(2.0))
    p2 = Normal(mu=T.constant(3.0), sigma=T.constant(2.0))
    p3 = Exponential(inverse_scale=T.constant(0.5))
    g = theano.shared(0.5)
    m = Mixture(components=[p1, p2, p3], weights=[g, g*g])

    X = np.concatenate([st.norm(loc=0.0, scale=2.0).rvs(300, random_state=0),
                        st.norm(loc=3.0, scale=2.0).rvs(100, random_state=1),
                        st.expon(scale=1. / 0.5).rvs(500, random_state=2)])
    X = X.reshape(-1, 1)
    s0 = m.score(X)

    m.fit(X)
    assert np.abs(g.eval() - 1. / 3.) < 0.05
    assert m.score(X) >= s0 
Example #4
Source File: exponential_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testExponentialSampleMultiDimensional(self):
    with self.test_session():
      batch_size = 2
      lam_v = [3.0, 22.0]
      lam = tf.constant([lam_v] * batch_size)

      exponential = tf.contrib.distributions.Exponential(lam=lam)

      n = 100000
      samples = exponential.sample(n, seed=138)
      self.assertEqual(samples.get_shape(), (n, batch_size, 2))

      sample_values = samples.eval()

      self.assertFalse(np.any(sample_values < 0.0))
      for i in range(2):
        self.assertLess(
            stats.kstest(
                sample_values[:, 0, i], stats.expon(scale=1.0/lam_v[i]).cdf)[0],
            0.01)
        self.assertLess(
            stats.kstest(
                sample_values[:, 1, i], stats.expon(scale=1.0/lam_v[i]).cdf)[0],
            0.01) 
Example #5
Source File: exponential_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testExponentialLogPDF(self):
    with tf.Session():
      batch_size = 6
      lam = tf.constant([2.0] * batch_size)
      lam_v = 2.0
      x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
      exponential = tf.contrib.distributions.Exponential(lam=lam)
      expected_log_pdf = stats.expon.logpdf(x, scale=1 / lam_v)

      log_pdf = exponential.log_pdf(x)
      self.assertEqual(log_pdf.get_shape(), (6,))
      self.assertAllClose(log_pdf.eval(), expected_log_pdf)

      pdf = exponential.pdf(x)
      self.assertEqual(pdf.get_shape(), (6,))
      self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf)) 
Example #6
Source File: test_randomizedsearch.py    From dislib with Apache License 2.0 5 votes vote down vote up
def test_fit(self):
        """Tests RandomizedSearchCV fit()."""
        x_np, y_np = datasets.load_iris(return_X_y=True)
        p = np.random.permutation(len(x_np))  # Pre-shuffling required for CSVM
        x = ds.array(x_np[p], (30, 4))
        y = ds.array((y_np[p] == 0)[:, np.newaxis], (30, 1))
        param_distributions = {'c': stats.expon(scale=0.5),
                               'gamma': stats.expon(scale=1)}
        csvm = CascadeSVM()
        n_iter = 12
        k = 3
        searcher = RandomizedSearchCV(estimator=csvm,
                                      param_distributions=param_distributions,
                                      n_iter=n_iter, cv=k, random_state=0)
        searcher.fit(x, y)

        expected_keys = {'param_c', 'param_gamma', 'params', 'mean_test_score',
                         'std_test_score', 'rank_test_score'}
        split_keys = {'split%d_test_score' % i for i in range(k)}
        expected_keys.update(split_keys)

        self.assertSetEqual(set(searcher.cv_results_.keys()), expected_keys)
        self.assertEqual(len(searcher.cv_results_['param_c']), n_iter)
        self.assertTrue(hasattr(searcher, 'best_estimator_'))
        self.assertTrue(hasattr(searcher, 'best_score_'))
        self.assertTrue(hasattr(searcher, 'best_params_'))
        self.assertTrue(hasattr(searcher, 'best_index_'))
        self.assertTrue(hasattr(searcher, 'scorer_'))
        self.assertEqual(searcher.n_splits_, k) 
Example #7
Source File: test_grid_search.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_randomized_search_grid_scores():
    # Make a dataset with a lot of noise to get various kind of prediction
    # errors across CV folds and parameter settings
    X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
                               random_state=0)

    # XXX: as of today (scipy 0.12) it's not possible to set the random seed
    # of scipy.stats distributions: the assertions in this test should thus
    # not depend on the randomization
    params = dict(C=expon(scale=10),
                  gamma=expon(scale=0.1))
    n_cv_iter = 3
    n_search_iter = 30
    search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
                                param_distributions=params, iid=False)
    search.fit(X, y)
    assert_equal(len(search.grid_scores_), n_search_iter)

    # Check consistency of the structure of each cv_score item
    for cv_score in search.grid_scores_:
        assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
        # Because we set iid to False, the mean_validation score is the
        # mean of the fold mean scores instead of the aggregate sample-wise
        # mean score
        assert_almost_equal(np.mean(cv_score.cv_validation_scores),
                            cv_score.mean_validation_score)
        assert_equal(list(sorted(cv_score.parameters.keys())),
                     list(sorted(params.keys())))

    # Check the consistency with the best_score_ and best_params_ attributes
    sorted_grid_scores = list(sorted(search.grid_scores_,
                              key=lambda x: x.mean_validation_score))
    best_score = sorted_grid_scores[-1].mean_validation_score
    assert_equal(search.best_score_, best_score)

    tied_best_params = [s.parameters for s in sorted_grid_scores
                        if s.mean_validation_score == best_score]
    assert_true(search.best_params_ in tied_best_params,
                "best_params_={0} is not part of the"
                " tied best models: {1}".format(
                    search.best_params_, tied_best_params)) 
Example #8
Source File: test_search.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_random_search_cv_results():
    X, y = make_classification(n_samples=50, n_features=4, random_state=42)

    n_splits = 3
    n_search_iter = 30

    params = dict(C=expon(scale=10), gamma=expon(scale=0.1))
    param_keys = ('param_C', 'param_gamma')
    score_keys = ('mean_test_score', 'mean_train_score',
                  'rank_test_score',
                  'split0_test_score', 'split1_test_score',
                  'split2_test_score',
                  'split0_train_score', 'split1_train_score',
                  'split2_train_score',
                  'std_test_score', 'std_train_score',
                  'mean_fit_time', 'std_fit_time',
                  'mean_score_time', 'std_score_time')
    n_cand = n_search_iter

    for iid in (False, True):
        search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_splits,
                                    iid=iid, param_distributions=params)
        search.fit(X, y)
        assert_equal(iid, search.iid)
        cv_results = search.cv_results_
        # Check results structure
        check_cv_results_array_types(search, param_keys, score_keys)
        check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)
        # For random_search, all the param array vals should be unmasked
        assert_false(any(cv_results['param_C'].mask) or
                     any(cv_results['param_gamma'].mask))
        check_cv_results_grid_scores_consistency(search) 
Example #9
Source File: test_search.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_random_search_cv_results():
    X, y = make_classification(n_samples=50, n_features=4, random_state=42)

    n_splits = 3
    n_search_iter = 30

    params = dict(C=expon(scale=10), gamma=expon(scale=0.1))
    param_keys = ('param_C', 'param_gamma')
    score_keys = ('mean_test_score', 'mean_train_score',
                  'rank_test_score',
                  'split0_test_score', 'split1_test_score',
                  'split2_test_score',
                  'split0_train_score', 'split1_train_score',
                  'split2_train_score',
                  'std_test_score', 'std_train_score',
                  'mean_fit_time', 'std_fit_time',
                  'mean_score_time', 'std_score_time')
    n_cand = n_search_iter

    for iid in (False, True):
        search = RandomizedSearchCV(SVC(gamma='scale'), n_iter=n_search_iter,
                                    cv=n_splits, iid=iid,
                                    param_distributions=params,
                                    return_train_score=True)
        search.fit(X, y)
        assert_equal(iid, search.iid)
        cv_results = search.cv_results_
        # Check results structure
        check_cv_results_array_types(search, param_keys, score_keys)
        check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)
        # For random_search, all the param array vals should be unmasked
        assert not(any(np.ma.getmaskarray(cv_results['param_C'])) or
                   any(np.ma.getmaskarray(cv_results['param_gamma']))) 
Example #10
Source File: continuous_distributions.py    From machine-learning-note with MIT License 5 votes vote down vote up
def exponential_dis(loc=0, scale=1.0):
    """
    指数分布,exponential continuous random variable
    按照定义,指数分布只有一个参数lambda,这里的scale = 1/lambda
    :param loc: 定义域的左端点,相当于将整体分布沿x轴平移loc
    :param scale: lambda的倒数,loc + scale表示该分布的均值,scale^2表示该分布的方差
    :return:
    """
    exp_dis = stats.expon(loc=loc, scale=scale)
    x = np.linspace(exp_dis.ppf(0.000001),
                    exp_dis.ppf(0.999999), 100)
    fig, ax = plt.subplots(1, 1)

    # 直接传入参数
    ax.plot(x, stats.expon.pdf(x, loc=loc, scale=scale), 'r-',
            lw=5, alpha=0.6, label='uniform pdf')

    # 从冻结的均匀分布取值
    ax.plot(x, exp_dis.pdf(x), 'k-',
            lw=2, label='frozen pdf')

    # 计算ppf分别等于0.001, 0.5, 0.999时的x值
    vals = exp_dis.ppf([0.001, 0.5, 0.999])
    print(vals)  # [ 2.004  4.     5.996]

    # Check accuracy of cdf and ppf
    print(np.allclose([0.001, 0.5, 0.999], exp_dis.cdf(vals)))

    r = exp_dis.rvs(size=10000)
    ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
    plt.ylabel('Probability')
    plt.title(r'PDF of Exp(0.5)')
    ax.legend(loc='best', frameon=False)
    plt.show()

# exponential_dis(loc=0, scale=2) 
Example #11
Source File: mapmatcher.py    From pgMapMatch with MIT License 5 votes vote down vote up
def temporalLL(travelcostratio):
    """Log likelihood function for the transition between different edges
    Input is ratio of implied speed to speed limit"""
    if isinstance(travelcostratio, list):
        travelcostratio = np.array(travelcostratio)
    if isinstance(travelcostratio, np.ndarray):
        retvals = stats.expon(scale=temporal_scale).logpdf(travelcostratio)
        retvals[travelcostratio > 1] = (stats.norm(1, scale=sigma_t).logpdf(travelcostratio[travelcostratio > 1])+temporalLL_ratio)
        return retvals*temporal_weight
    else:  # scalar
        if travelcostratio <= 1:
            return stats.expon(scale=temporal_scale).logpdf(travelcostratio)*temporal_weight
        else:
            return (stats.norm(1, scale=sigma_t).logpdf(travelcostratio)+temporalLL_ratio)*temporal_weight 
Example #12
Source File: exponential.py    From ngboost with Apache License 2.0 5 votes vote down vote up
def __getattr__(self, name):
        if name in dir(self.dist):
            return getattr(self.dist, name)
        return None

    # should implement a `sample()` method 
Example #13
Source File: exponential.py    From ngboost with Apache License 2.0 5 votes vote down vote up
def __init__(self, params):
        self._params = params
        self.scale = np.exp(params[0])
        self.dist = dist(scale=self.scale) 
Example #14
Source File: exponential.py    From ngboost with Apache License 2.0 5 votes vote down vote up
def score(self, Y):
        E, T = Y["Event"], Y["Time"]
        cens = (1 - E) * np.log(1 - self.dist.cdf(T) + eps)
        uncens = E * self.dist.logpdf(T)
        return -(cens + uncens) 
Example #15
Source File: exponential_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testExponentialMean(self):
    with tf.Session():
      lam_v = np.array([1.0, 4.0, 2.5])
      expected_mean = stats.expon.mean(scale=1 / lam_v)
      exponential = tf.contrib.distributions.Exponential(lam=lam_v)
      self.assertEqual(exponential.mean().get_shape(), (3,))
      self.assertAllClose(exponential.mean().eval(), expected_mean) 
Example #16
Source File: exponential.py    From Effective-Quadratures with GNU Lesser General Public License v2.1 5 votes vote down vote up
def __init__(self, rate=None):
        self.rate = rate
        if (self.rate is not None) and (self.rate > 0.0):
            #self.mean = 1. / self.rate
            #self.variance = 1./(self.rate)**2
            self.skewness = 2.0
            self.kurtosis = 6.0
            self.bounds = np.array([0.0, np.inf])
            self.x_range_for_pdf = np.linspace(0.0, 20*self.rate, RECURRENCE_PDF_SAMPLES)
            self.parent = expon(scale=1.0/rate)
            self.mean = self.parent.mean()
            self.variance = self.parent.var() 
Example #17
Source File: test_exponential.py    From carl with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def check_fit(inverse_scale):
    p = Exponential()
    X = st.expon(scale=1. / inverse_scale).rvs(5000,
                                               random_state=0).reshape(-1, 1)
    p.fit(X)
    assert np.abs(p.inverse_scale.get_value() - inverse_scale) <= 0.1 
Example #18
Source File: test_exponential.py    From carl with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def check_exponential(inverse_scale):
    rng = check_random_state(1)

    p_carl = Exponential(inverse_scale=inverse_scale)
    p_scipy = st.expon(scale=1. / inverse_scale)
    X = rng.rand(50, 1)

    assert_array_almost_equal(p_carl.pdf(X),
                              p_scipy.pdf(X.ravel()))
    assert_array_almost_equal(p_carl.cdf(X),
                              p_scipy.cdf(X.ravel()))
    assert_array_almost_equal(-np.log(p_carl.pdf(X)),
                              p_carl.nll(X)) 
Example #19
Source File: exponential_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testExponentialCDF(self):
    with tf.Session():
      batch_size = 6
      lam = tf.constant([2.0] * batch_size)
      lam_v = 2.0
      x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)

      exponential = tf.contrib.distributions.Exponential(lam=lam)
      expected_cdf = stats.expon.cdf(x, scale=1 / lam_v)

      cdf = exponential.cdf(x)
      self.assertEqual(cdf.get_shape(), (6,))
      self.assertAllClose(cdf.eval(), expected_cdf) 
Example #20
Source File: exponential_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testExponentialEntropy(self):
    with tf.Session():
      lam_v = np.array([1.0, 4.0, 2.5])
      expected_entropy = stats.expon.entropy(scale=1 / lam_v)
      exponential = tf.contrib.distributions.Exponential(lam=lam_v)
      self.assertEqual(exponential.entropy().get_shape(), (3,))
      self.assertAllClose(exponential.entropy().eval(), expected_entropy) 
Example #21
Source File: exponential_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testExponentialVariance(self):
    with tf.Session():
      lam_v = np.array([1.0, 4.0, 2.5])
      expected_variance = stats.expon.var(scale=1 / lam_v)
      exponential = tf.contrib.distributions.Exponential(lam=lam_v)
      self.assertEqual(exponential.variance().get_shape(), (3,))
      self.assertAllClose(exponential.variance().eval(), expected_variance)