Python scipy.stats.uniform() Examples

The following are code examples for showing how to use scipy.stats.uniform(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: pymach   Author: gusseppe   File: improve2.py    MIT License 7 votes vote down vote up
def knn_paramC(self, method='GridSearchCV'):
        if method == 'GridSearchCV' or method == 'GeneticSearchCV' or method == 'EdasSearch':
            parameters = {
            'selector__pca__svd_solver': ['full', 'arpack', 'randomized'],
            'selector__pca__whiten': [True,False],
            'KNeighborsClassifier__n_neighbors': [5,10,15],
            'KNeighborsClassifier__weights': ['uniform','distance'],
            'KNeighborsClassifier__algorithm': ['ball_tree','kd_tree','brute']
            }
        elif  method == 'RandomizedSearchCV':
            parameters = {
            'selector__pca__svd_solver': ['full', 'arpack', 'randomized'],
            'selector__pca__whiten': [True,False],
            'KNeighborsClassifier__n_neighbors': randint(5,15),
            'KNeighborsClassifier__weights': ['uniform','distance'],
            'KNeighborsClassifier__algorithm': ['ball_tree','kd_tree','brute']
            }
        else:
            pass
        return parameters 
Example 2
Project: pymach   Author: gusseppe   File: improve2.py    MIT License 7 votes vote down vote up
def knn_paramR(self,method='GridSearchCV'):
        if method == 'GridSearchCV' or method == 'GeneticSearchCV' or method == 'EdasSearch':
            parameters = {
                'selector__pca__svd_solver': ['full', 'arpack', 'randomized'],
                'selector__pca__whiten': [True,False],
                'KNeighborsRegressor__n_neighbors': [5,10,15],
                'KNeighborsRegressor__weights': ['uniform','distance'],
                'KNeighborsRegressor__algorithm': ['ball_tree','kd_tree','brute']
            }
        elif  method == 'RandomizedSearchCV':
            parameters = {
                'selector__pca__svd_solver': ['full', 'arpack', 'randomized'],
                'selector__pca__whiten': [True,False],
                'KNeighborsRegressor__n_neighbors': randint(5,15),
                'KNeighborsRegressor__weights': ['uniform','distance'],
                'KNeighborsRegressor__algorithm': ['ball_tree','kd_tree','brute']
            }
        else:
            pass
        return parameters 
Example 3
Project: ble5-nrf52-mac   Author: tomasero   File: test_multivariate.py    MIT License 6 votes vote down vote up
def test_frozen_dirichlet(self):
        np.random.seed(2846)

        n = np.random.randint(1, 32)
        alpha = np.random.uniform(10e-10, 100, n)

        d = dirichlet(alpha)

        assert_equal(d.var(), dirichlet.var(alpha))
        assert_equal(d.mean(), dirichlet.mean(alpha))
        assert_equal(d.entropy(), dirichlet.entropy(alpha))
        num_tests = 10
        for i in range(num_tests):
            x = np.random.uniform(10e-10, 100, n)
            x /= np.sum(x)
            assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha))
            assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha)) 
Example 4
Project: ble5-nrf52-mac   Author: tomasero   File: test_multivariate.py    MIT License 6 votes vote down vote up
def test_pairwise_distances(self):
        # Test that the distribution of pairwise distances is close to correct.
        np.random.seed(514)

        def random_ortho(dim):
            u, _s, v = np.linalg.svd(np.random.normal(size=(dim, dim)))
            return np.dot(u, v)

        for dim in range(2, 6):
            def generate_test_statistics(rvs, N=1000, eps=1e-10):
                stats = np.array([
                    np.sum((rvs(dim=dim) - rvs(dim=dim))**2)
                    for _ in range(N)
                ])
                # Add a bit of noise to account for numeric accuracy.
                stats += np.random.uniform(-eps, eps, size=stats.shape)
                return stats

            expected = generate_test_statistics(random_ortho)
            actual = generate_test_statistics(scipy.stats.ortho_group.rvs)

            _D, p = scipy.stats.ks_2samp(expected, actual)

            assert_array_less(.05, p) 
Example 5
Project: ble5-nrf52-mac   Author: tomasero   File: test_multivariate.py    MIT License 6 votes vote down vote up
def test_haar(self):
        # Test that the eigenvalues, which lie on the unit circle in
        # the complex plane, are uncorrelated.

        # Generate samples
        dim = 5
        samples = 1000  # Not too many, or the test takes too long
        np.random.seed(514)  # Note that the test is sensitive to seed too
        xs = unitary_group.rvs(dim, size=samples)

        # The angles "x" of the eigenvalues should be uniformly distributed
        # Overall this seems to be a necessary but weak test of the distribution.
        eigs = np.vstack([scipy.linalg.eigvals(x) for x in xs])
        x = np.arctan2(eigs.imag, eigs.real)
        res = kstest(x.ravel(), uniform(-np.pi, 2*np.pi).cdf)
        assert_(res.pvalue > 0.05) 
Example 6
Project: PyDREAM   Author: LoLab-VU   File: example_sample_robertson_nopysb_with_dream.py    GNU General Public License v3.0 6 votes vote down vote up
def likelihood(parameter_vector):

    parameter_vector = 10**np.array(parameter_vector)

    #Solve ODE system given parameter vector
    yout = odeint(odefunc, y0, tspan, args=(parameter_vector,))

    cout = yout[:, 2]

    #Calculate log probability contribution given simulated experimental values.
    
    logp_ctotal = np.sum(like_ctot.logpdf(cout))
    
    #If simulation failed due to integrator errors, return a log probability of -inf.
    if np.isnan(logp_ctotal):
        logp_ctotal = -np.inf
      
    return logp_ctotal


# Add vector of rate parameters to be sampled as unobserved random variables in DREAM with uniform priors. 
Example 7
Project: P3_image_processing   Author: latedude2   File: test_multivariate.py    MIT License 6 votes vote down vote up
def test_frozen_dirichlet(self):
        np.random.seed(2846)

        n = np.random.randint(1, 32)
        alpha = np.random.uniform(10e-10, 100, n)

        d = dirichlet(alpha)

        assert_equal(d.var(), dirichlet.var(alpha))
        assert_equal(d.mean(), dirichlet.mean(alpha))
        assert_equal(d.entropy(), dirichlet.entropy(alpha))
        num_tests = 10
        for i in range(num_tests):
            x = np.random.uniform(10e-10, 100, n)
            x /= np.sum(x)
            assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha))
            assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha)) 
Example 8
Project: P3_image_processing   Author: latedude2   File: test_multivariate.py    MIT License 6 votes vote down vote up
def test_pairwise_distances(self):
        # Test that the distribution of pairwise distances is close to correct.
        np.random.seed(514)

        def random_ortho(dim):
            u, _s, v = np.linalg.svd(np.random.normal(size=(dim, dim)))
            return np.dot(u, v)

        for dim in range(2, 6):
            def generate_test_statistics(rvs, N=1000, eps=1e-10):
                stats = np.array([
                    np.sum((rvs(dim=dim) - rvs(dim=dim))**2)
                    for _ in range(N)
                ])
                # Add a bit of noise to account for numeric accuracy.
                stats += np.random.uniform(-eps, eps, size=stats.shape)
                return stats

            expected = generate_test_statistics(random_ortho)
            actual = generate_test_statistics(scipy.stats.ortho_group.rvs)

            _D, p = scipy.stats.ks_2samp(expected, actual)

            assert_array_less(.05, p) 
Example 9
Project: P3_image_processing   Author: latedude2   File: test_multivariate.py    MIT License 6 votes vote down vote up
def test_haar(self):
        # Test that the eigenvalues, which lie on the unit circle in
        # the complex plane, are uncorrelated.

        # Generate samples
        dim = 5
        samples = 1000  # Not too many, or the test takes too long
        np.random.seed(514)  # Note that the test is sensitive to seed too
        xs = unitary_group.rvs(dim, size=samples)

        # The angles "x" of the eigenvalues should be uniformly distributed
        # Overall this seems to be a necessary but weak test of the distribution.
        eigs = np.vstack([scipy.linalg.eigvals(x) for x in xs])
        x = np.arctan2(eigs.imag, eigs.real)
        res = kstest(x.ravel(), uniform(-np.pi, 2*np.pi).cdf)
        assert_(res.pvalue > 0.05) 
Example 10
Project: chainer   Author: chainer   File: test_uniform.py    MIT License 6 votes vote down vote up
def setUp_configure(self):
        from scipy import stats
        self.dist = distributions.Uniform
        self.scipy_dist = stats.uniform

        self.test_targets = set([
            'batch_shape', 'cdf', 'entropy', 'event_shape', 'icdf', 'log_prob',
            'mean', 'sample', 'stddev', 'support', 'variance'])

        if self.use_loc_scale:
            loc = numpy.random.uniform(
                -10, 0, self.shape).astype(numpy.float32)
            scale = numpy.random.uniform(
                0, 10, self.shape).astype(numpy.float32)
            self.params = {'loc': loc, 'scale': scale}
            self.scipy_params = {'loc': loc, 'scale': scale}
        else:
            low = numpy.random.uniform(
                -10, 0, self.shape).astype(numpy.float32)
            high = numpy.random.uniform(
                low, low + 10, self.shape).astype(numpy.float32)
            self.params = {'low': low, 'high': high}
            self.scipy_params = {'loc': low, 'scale': high-low}

        self.support = '[low, high]' 
Example 11
Project: GraphicDesignPatternByPython   Author: Relph1119   File: test_multivariate.py    MIT License 6 votes vote down vote up
def test_frozen_dirichlet(self):
        np.random.seed(2846)

        n = np.random.randint(1, 32)
        alpha = np.random.uniform(10e-10, 100, n)

        d = dirichlet(alpha)

        assert_equal(d.var(), dirichlet.var(alpha))
        assert_equal(d.mean(), dirichlet.mean(alpha))
        assert_equal(d.entropy(), dirichlet.entropy(alpha))
        num_tests = 10
        for i in range(num_tests):
            x = np.random.uniform(10e-10, 100, n)
            x /= np.sum(x)
            assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha))
            assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha)) 
Example 12
Project: GraphicDesignPatternByPython   Author: Relph1119   File: test_multivariate.py    MIT License 6 votes vote down vote up
def test_pairwise_distances(self):
        # Test that the distribution of pairwise distances is close to correct.
        np.random.seed(514)

        def random_ortho(dim):
            u, _s, v = np.linalg.svd(np.random.normal(size=(dim, dim)))
            return np.dot(u, v)

        for dim in range(2, 6):
            def generate_test_statistics(rvs, N=1000, eps=1e-10):
                stats = np.array([
                    np.sum((rvs(dim=dim) - rvs(dim=dim))**2)
                    for _ in range(N)
                ])
                # Add a bit of noise to account for numeric accuracy.
                stats += np.random.uniform(-eps, eps, size=stats.shape)
                return stats

            expected = generate_test_statistics(random_ortho)
            actual = generate_test_statistics(scipy.stats.ortho_group.rvs)

            _D, p = scipy.stats.ks_2samp(expected, actual)

            assert_array_less(.05, p) 
Example 13
Project: GraphicDesignPatternByPython   Author: Relph1119   File: test_multivariate.py    MIT License 6 votes vote down vote up
def test_haar(self):
        # Test that the eigenvalues, which lie on the unit circle in
        # the complex plane, are uncorrelated.

        # Generate samples
        dim = 5
        samples = 1000  # Not too many, or the test takes too long
        np.random.seed(514)  # Note that the test is sensitive to seed too
        xs = unitary_group.rvs(dim, size=samples)

        # The angles "x" of the eigenvalues should be uniformly distributed
        # Overall this seems to be a necessary but weak test of the distribution.
        eigs = np.vstack(scipy.linalg.eigvals(x) for x in xs)
        x = np.arctan2(eigs.imag, eigs.real)
        res = kstest(x.ravel(), uniform(-np.pi, 2*np.pi).cdf)
        assert_(res.pvalue > 0.05) 
Example 14
Project: deep_image_model   Author: tobegit3hub   File: uniform_test.py    Apache License 2.0 6 votes vote down vote up
def testUniformPDF(self):
    with self.test_session():
      a = tf.constant([-3.0] * 5 + [15.0])
      b = tf.constant([11.0] * 5 + [20.0])
      uniform = tf.contrib.distributions.Uniform(a=a, b=b)

      a_v = -3.0
      b_v = 11.0
      x = np.array([-10.5, 4.0, 0.0, 10.99, 11.3, 17.0], dtype=np.float32)

      def _expected_pdf():
        pdf = np.zeros_like(x) + 1.0 / (b_v - a_v)
        pdf[x > b_v] = 0.0
        pdf[x < a_v] = 0.0
        pdf[5] = 1.0 / (20.0 - 15.0)
        return pdf

      expected_pdf = _expected_pdf()

      pdf = uniform.pdf(x)
      self.assertAllClose(expected_pdf, pdf.eval())

      log_pdf = uniform.log_pdf(x)
      self.assertAllClose(np.log(expected_pdf), log_pdf.eval()) 
Example 15
Project: deep_image_model   Author: tobegit3hub   File: uniform_test.py    Apache License 2.0 6 votes vote down vote up
def testUniformCDF(self):
    with self.test_session():
      batch_size = 6
      a = tf.constant([1.0] * batch_size)
      b = tf.constant([11.0] * batch_size)
      a_v = 1.0
      b_v = 11.0
      x = np.array([-2.5, 2.5, 4.0, 0.0, 10.99, 12.0], dtype=np.float32)

      uniform = tf.contrib.distributions.Uniform(a=a, b=b)

      def _expected_cdf():
        cdf = (x - a_v) / (b_v - a_v)
        cdf[x >= b_v] = 1
        cdf[x < a_v] = 0
        return cdf

      cdf = uniform.cdf(x)
      self.assertAllClose(_expected_cdf(), cdf.eval())

      log_cdf = uniform.log_cdf(x)
      self.assertAllClose(np.log(_expected_cdf()), log_cdf.eval()) 
Example 16
Project: deep_image_model   Author: tobegit3hub   File: uniform_test.py    Apache License 2.0 6 votes vote down vote up
def testUniformSample(self):
    with self.test_session():
      a = tf.constant([3.0, 4.0])
      b = tf.constant(13.0)
      a1_v = 3.0
      a2_v = 4.0
      b_v = 13.0
      n = tf.constant(100000)
      uniform = tf.contrib.distributions.Uniform(a=a, b=b)

      samples = uniform.sample(n, seed=137)
      sample_values = samples.eval()
      self.assertEqual(sample_values.shape, (100000, 2))
      self.assertAllClose(sample_values[::, 0].mean(), (b_v + a1_v) / 2,
                          atol=1e-2)
      self.assertAllClose(sample_values[::, 1].mean(), (b_v + a2_v) / 2,
                          atol=1e-2)
      self.assertFalse(np.any(sample_values[::, 0] < a1_v) or np.any(
          sample_values >= b_v))
      self.assertFalse(np.any(sample_values[::, 1] < a2_v) or np.any(
          sample_values >= b_v)) 
Example 17
Project: deep_image_model   Author: tobegit3hub   File: uniform_test.py    Apache License 2.0 6 votes vote down vote up
def testUniformNans(self):
    with self.test_session():
      a = 10.0
      b = [11.0, 100.0]
      uniform = tf.contrib.distributions.Uniform(a=a, b=b)

      no_nans = tf.constant(1.0)
      nans = tf.constant(0.0) / tf.constant(0.0)
      self.assertTrue(tf.is_nan(nans).eval())
      with_nans = tf.stack([no_nans, nans])

      pdf = uniform.pdf(with_nans)

      is_nan = tf.is_nan(pdf).eval()
      self.assertFalse(is_nan[0])
      self.assertTrue(is_nan[1]) 
Example 18
Project: deep_image_model   Author: tobegit3hub   File: uniform_test.py    Apache License 2.0 6 votes vote down vote up
def testUniformSampleWithShape(self):
    with self.test_session():
      a = 10.0
      b = [11.0, 20.0]
      uniform = tf.contrib.distributions.Uniform(a, b)

      pdf = uniform.pdf(uniform.sample((2, 3)))
      # pylint: disable=bad-continuation
      expected_pdf = [
        [[1.0, 0.1],
         [1.0, 0.1],
         [1.0, 0.1]],
        [[1.0, 0.1],
         [1.0, 0.1],
         [1.0, 0.1]],
      ]
      # pylint: enable=bad-continuation
      self.assertAllClose(expected_pdf, pdf.eval())

      pdf = uniform.pdf(uniform.sample())
      expected_pdf = [1.0, 0.1]
      self.assertAllClose(expected_pdf, pdf.eval()) 
Example 19
Project: Effective-Quadratures   Author: Effective-Quadratures   File: uniform.py    GNU Lesser General Public License v2.1 6 votes vote down vote up
def get_cdf(self, points=None):
        """
        A uniform cumulative density function.
        :param points:
                Matrix of points which have to be evaluated
        :param double lower:
            Lower bound of the support of the uniform distribution.
        :param double upper:
            Upper bound of the support of the uniform distribution.
        :return:
            An array of N equidistant values over the support of the distribution.
        :return:
            Cumulative density values along the support of the uniform distribution.
        """
        if points is not None:
            return self.parent.cdf(points)
        else:
            raise(ValueError, 'Please digit an input for getCDF method') 
Example 20
Project: Effective-Quadratures   Author: Effective-Quadratures   File: uniform.py    GNU Lesser General Public License v2.1 6 votes vote down vote up
def get_pdf(self, points=None):
        """
        A uniform probability distribution.
        :param points:
            Matrix of points which have to be evaluated
        :param double lower:
            Lower bound of the support of the uniform distribution.
        :param double upper:
            Upper bound of the support of the uniform distribution.
        :return:
            An array of N equidistant values over the support of the distribution.
        :return:
            Probability density values along the support of the uniform distribution.
        """
        if points is not None:
            return self.parent.pdf(points)
        else:
            raise(ValueError, 'Please digit an input for get_pdf method') 
Example 21
Project: psychrometric-chart-makeover   Author: buds-lab   File: categorical.py    MIT License 6 votes vote down vote up
def __init__(self, x, y, hue, data, order, hue_order,
                 jitter, dodge, orient, color, palette):
        """Initialize the plotter."""
        self.establish_variables(x, y, hue, data, orient, order, hue_order)
        self.establish_colors(color, palette, 1)

        # Set object attributes
        self.dodge = dodge
        self.width = .8

        if jitter == 1:  # Use a good default for `jitter = True`
            jlim = 0.1
        else:
            jlim = float(jitter)
        if self.hue_names is not None and dodge:
            jlim /= len(self.hue_names)
        self.jitterer = stats.uniform(-jlim, jlim * 2).rvs 
Example 22
Project: gmaneLegacy   Author: ttm   File: ksStatistics.py    The Unlicense 5 votes vote down vote up
def dnnUni(lb,rb,lb2,rb2,lbd=-1,rbd=4,NE=1000000):
    rb_=rb-lb
    rb2_=rb2-lb2
    a=st.uniform(lb,rb_)
    b=st.uniform(lb2,rb2_)
    domain=n.linspace(lbd,rbd,NE)
    avals=a.cdf(domain)
    bvals=b.cdf(domain)
    diffU=n.abs(avals-bvals).max()
    return diffU 
Example 23
Project: gmaneLegacy   Author: ttm   File: ksStatistics.py    The Unlicense 5 votes vote down vote up
def makeUniformDifferencesSamples2(self,NC,NB,table_dir):
        #xx=n.arange(.7,2.7,0.2)
        xx=n.logspace(2,5,4)
        labels=xx
        distsAllW=[[kolmogorovSmirnovDistance_(
                n.random.random(xxx)*1.2-0.1,
                n.random.random(xxx), NB) for i in range(NC)]
                for xxx in xx]
        distsAllWC=[[i[0] for i in j] for j in distsAllW]
        dnns=[[i[2] for i in j] for j in distsAllW]
        data=[(n.mean(dd),n.std(dd),n.median(dd),
            ("{:.3f},"*3)[:-1].format(*min3(dd)),
            ("{:.3f},"*3)[:-1].format(*max3(dd))) for dd in distsAllWC]
        data_=[]
        i=0
        for dists in distsAllWC:
            line=[]
            dnn=dnns[i]
            kline=[n.mean(dnn), n.std(dnn)]
            for calpha in self.calphas:
                line.append(sum([dist>calpha for dist in dists])/NC)
            data_.append(list(data[i])+kline+line); i+=1
        diffU2=dnnUni(0,1,-.1,1.1)
        caption=r"""Measurements of $c$ through simulations
        with fixed uniform distributions but different number of samples.
        One distribution is uniform in [0,1].
        The other distribution is uniform in [-0.1,1.1].
        The KS statistic of these distributions converges
        to {:.4f} as sample sizes increases.""".format(diffU2)
        labelsh=[r"$n=n'$",r"$\mu(c)$",r"$\sigma(c)$","m(c)","min(c)","max(c)",r"$\mu(D_{F,F'})$",r"$\sigma(D_{F,F'})$"]
        labelsh+=[r"$\overline{{C({})}}$".format(alpha) for alpha in self.alphas]
        fname="tabUniformDiffSamples2.tex"
        lTable(labels,labelsh,data_,caption,table_dir+fname,"kolmSamp_")
        i=0
        check("table {} written at {}".format(fname,table_dir)) 
Example 24
Project: gmaneLegacy   Author: ttm   File: ksStatistics.py    The Unlicense 5 votes vote down vote up
def makeUniformDifferencesSamples(self,NC,NB,table_dir):
        #xx=n.arange(.7,2.7,0.2)
        xx=n.logspace(2,5,4)
        labels=xx
        distsAllW=[[kolmogorovSmirnovDistance_(
                n.random.random(xxx)+0.05,n.random.random(xxx),NB) for i in range(NC)]

                for xxx in xx]
        distsAllWC=[[i[0] for i in j] for j in distsAllW]
        dnns=[[i[2] for i in j] for j in distsAllW]
        data=[(n.mean(dd),n.std(dd),n.median(dd),
            ("{:.3f},"*3)[:-1].format(*min3(dd)),
            ("{:.3f},"*3)[:-1].format(*max3(dd))) for dd in distsAllWC]
        data_=[]
        i=0
        for dists in distsAllWC:
            line=[]
            dnn=dnns[i]
            kline=[n.mean(dnn), n.std(dnn)]
            for calpha in self.calphas:
                line.append(sum([dist>calpha for dist in dists])/NC)
            data_.append(list(data[i])+kline+line); i+=1
        diffU=dnnUni(0,1,0.05,1.05)
        caption=r"""Measurements of $c$ through simulations
        with fixed uniform distributions but different number of samples.
        One distribution is uniform in [0,1].
        The other distribution is uniform in [0.05,1.05].
        The KS statistic of these distributions converges
        to {:.4f} as sample sizes increases.""".format(diffU)
        labelsh=[r"$n=n'$",r"$\mu(c)$",r"$\sigma(c)$","m(c)","min(c)","max(c)",r"$\mu(D_{F,F'})$",r"$\sigma(D_{F,F'})$"]
        labelsh+=[r"$\overline{{C({})}}$".format(alpha) for alpha in self.alphas]
        fname="tabUniformDiffSamples.tex"
        lTable(labels,labelsh,data_,caption,table_dir+fname,"kolmSamp_")
        i=0
        check("table {} written at {}".format(fname,table_dir)) 
Example 25
Project: gmaneLegacy   Author: ttm   File: ksStatistics.py    The Unlicense 5 votes vote down vote up
def makeUniformVerification(self,NC,NE,NE2,NB,table_dir):
        check("antes")
        dists=[kolmogorovSmirnovDistance(
                n.random.random(NE),n.random.random(NE2))
                for i in range(NC)]; check("uniforme1")
        dists2=[kolmogorovSmirnovDistance(
                2*n.random.random(NE)+2,2*n.random.random(NE2)+2)
                for i in range(NC)]; check("uniforme2")

        dists3=[kolmogorovSmirnovDistance(
                3*n.random.random(NE)+4,3*n.random.random(NE2)+4)
                for i in range(NC)]; check("uniforme3")
        labelsh=(r"$\alpha N_c$",r"$\alpha$",r"$c(\alpha)$",r"$|C_1(\alpha)|$",r"$|C_2(\alpha)|$",r"$|C_3(\alpha)|$")
        caption=r"""The theoretical maximum number $\alpha N_c$ of rejections
        of the null hypothesis for critical values of $\alpha$.
        The $c_1$ values were calculated using simulations of uniform distributions within $[{},{})$.
        The $c_2$ values were calculated using simulations of uniform distributions within $[{},{})$.
        The $c_3$ values were calculated using simulations of uniform distributions with $\mu={}$ and $\sigma={}$.
        Over all $N_c$ comparisons,
         $\mu(c_1)={:.4f}$ and $\sigma(c_1)={:.4f}$,
         $\mu(c_2)={:.4f}$ and $\sigma(c_2)={:.4f}$,
         $\mu(c_3)={:.4f}$ and $\sigma(c_3)={:.4f}$ .
        """.format(
                0,1,
                2,6,
                4,10,
                   n.mean(dists ),n.std(dists ),
                   n.mean(dists2),n.std(dists2),
                   n.mean(dists3),n.std(dists3),
                   )
        data=[]
        labels=[]
        for alpha, calpha in zip(self.alphas,self.calphas):
            n1=sum([dist>calpha for dist in dists])
            n2=sum([dist>calpha for dist in dists2])
            n3=sum([dist>calpha for dist in dists3])
            data.append((alpha,calpha,n1,n2,n3))
            labels.append(alpha*NC)
        fname="tabUniformNull.tex"
        lTable(labels,labelsh,data,caption,table_dir+fname)
        print("table {} written at {}".format(fname,table_dir)) 
Example 26
Project: gmaneLegacy   Author: ttm   File: ksStatistics.py    The Unlicense 5 votes vote down vote up
def makeUniformDifferencesDispersion(self,NC,NE,NE2,NB,table_dir):
        xx=n.arange(.70,1.35,0.05)
        labels=xx
        distsAll=[[kolmogorovSmirnovDistance_(
                xxx*n.random.random(NE),n.random.random(NE2)) for i in range(NC)]
                for xxx in xx]
        distsAllC=[[i[0] for i in j] for j in distsAll]
        dnns=[[i[2] for i in j] for j in distsAll]
        data=[(n.mean(dd),n.std(dd),
            ("{:.3f},"*3)[:-1].format(*min3(dd)),
            ("{:.3f},"*3)[:-1].format(*max3(dd)) ) for dd in distsAllC]
        data_=[]
        i=0
        for dists in distsAllC:
            line=[]
            dnn=dnns[i]
            kline=[dnnUni(0,xx[i],0,1,0,1.5), n.mean(dnn), n.std(dnn)]
            for calpha in self.calphas:
                line.append(sum([dist>calpha for dist in dists])/NC)
            data_.append(list(data[i])+kline+line); i+=1
        caption=r"""Measurements of $c$ through simulations
        with uniform distributions.
        One uniform distribution has the fixed domain $[0,1)$.
        The other uniform distribution in each comparison
        is also centered around 0.5,
        but spread over $b=b_u-b_l$ there $b_l$ and $b_u$ are the lower and upper boudaries."""

        labelsh=[r"$b$",r"$\mu(c)$",r"$\sigma(c)$","min(c)","max(c)","$D$",r"$\mu(D_{F,F'})$",r"$\sigma(D_{F,F'})$"]
        labelsh+=[r"$\overline{{C({})}}$".format(alpha) for alpha in self.alphas]
        fname="tabUniformDiffSpread.tex"
        lTable(labels,labelsh,data_,caption,table_dir+fname,"kolmDiff3_")
        i=0
        check("table {} written at {}".format(fname,table_dir)) 
Example 27
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 5 votes vote down vote up
def gen_data(ctr, size=1024*2):
    ctr = array(ctr)
    rnd = uniform(0,1).rvs((2,size))
    data = zeros(shape=(2,size))
    data[where(rnd < ctr[:,newaxis])] = 1.0
    return data 
Example 28
Project: better_uniform   Author: j-faria   File: better_uniform.py    MIT License 5 votes vote down vote up
def buniform(a=0, b=1):
    """
    A uniform continuous random variable.

    Without arguments, the distribution is uniform on [0, 1]. 
    Use the parameters `a` and `b` to obtain a uniform distribution on [a, b].
    The returned object has the same generic methods as other scipy.stats distributions.
    """
    dist = _stats.uniform
    dist.name = 'uniform'
    return _frozen(dist, loc=a, scale=b - a) 
Example 29
Project: vnpy_crypto   Author: birforce   File: dgp_examples.py    MIT License 5 votes vote down vote up
def __init__(self, nobs=50, x=None, distr_x=None, distr_noise=None):
        if distr_x is None:
            from scipy import stats
            distr_x = stats.uniform
        self.s_noise = 0.15
        self.func = fg1eu
        super(self.__class__, self).__init__(nobs=nobs, x=x,
                                             distr_x=distr_x,
                                             distr_noise=distr_noise) 
Example 30
Project: vnpy_crypto   Author: birforce   File: dgp_examples.py    MIT License 5 votes vote down vote up
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
        if x is None and distr_x is None:
            from scipy import stats
            distr_x = stats.uniform(-2, 4)
        else:
            nobs = x.shape[0]
        self.s_noise = 2.
        self.func = func1
        super(UnivariateFunc1, self).__init__(nobs=nobs, x=x,
                                             distr_x=distr_x,
                                             distr_noise=distr_noise) 
Example 31
Project: ble5-nrf52-mac   Author: tomasero   File: test_multivariate.py    MIT License 5 votes vote down vote up
def test_K_and_K_minus_1_calls_equal(self):
        # Test that calls with K and K-1 entries yield the same results.

        np.random.seed(2846)

        n = np.random.randint(1, 32)
        alpha = np.random.uniform(10e-10, 100, n)

        d = dirichlet(alpha)
        num_tests = 10
        for i in range(num_tests):
            x = np.random.uniform(10e-10, 100, n)
            x /= np.sum(x)
            assert_almost_equal(d.pdf(x[:-1]), d.pdf(x)) 
Example 32
Project: ble5-nrf52-mac   Author: tomasero   File: test_multivariate.py    MIT License 5 votes vote down vote up
def test_multiple_entry_calls(self):
        # Test that calls with multiple x vectors as matrix work
        np.random.seed(2846)

        n = np.random.randint(1, 32)
        alpha = np.random.uniform(10e-10, 100, n)
        d = dirichlet(alpha)

        num_tests = 10
        num_multiple = 5
        xm = None
        for i in range(num_tests):
            for m in range(num_multiple):
                x = np.random.uniform(10e-10, 100, n)
                x /= np.sum(x)
                if xm is not None:
                    xm = np.vstack((xm, x))
                else:
                    xm = x
            rm = d.pdf(xm.T)
            rs = None
            for xs in xm:
                r = d.pdf(xs)
                if rs is not None:
                    rs = np.append(rs, r)
                else:
                    rs = r
            assert_array_almost_equal(rm, rs) 
Example 33
Project: ble5-nrf52-mac   Author: tomasero   File: test_multivariate.py    MIT License 5 votes vote down vote up
def test_2D_dirichlet_is_beta(self):
        np.random.seed(2846)

        alpha = np.random.uniform(10e-10, 100, 2)
        d = dirichlet(alpha)
        b = beta(alpha[0], alpha[1])

        num_tests = 10
        for i in range(num_tests):
            x = np.random.uniform(10e-10, 100, 2)
            x /= np.sum(x)
            assert_almost_equal(b.pdf(x), d.pdf([x]))

        assert_almost_equal(b.mean(), d.mean()[0])
        assert_almost_equal(b.var(), d.var()[0]) 
Example 34
Project: PyDREAM   Author: LoLab-VU   File: example_sample_robertson_with_dream.py    GNU General Public License v3.0 5 votes vote down vote up
def likelihood(parameter_vector):    
    
    param_dict = {pname: pvalue for pname, pvalue in zip(pysb_sampled_parameter_names, parameter_vector)}
  
    for pname, pvalue in param_dict.items():
        
        #Change model parameter values to current location in parameter space
        
        model.parameters[pname].value = 10**(pvalue)
    
    #Simulate experimentally measured Ctotal values.
    
    solver.run()
    
    #Calculate log probability contribution from simulated experimental values.
    
    logp_ctotal = np.sum(like_ctot.logpdf(solver.yobs['C_total']))
    
    #If model simulation failed due to integrator errors, return a log probability of -inf.
    if np.isnan(logp_ctotal):
        logp_ctotal = -np.inf
      
    return logp_ctotal


# Add vector of PySB rate parameters to be sampled as unobserved random variables to DREAM with uniform priors. 
Example 35
Project: PyDREAM   Author: LoLab-VU   File: test_models.py    GNU General Public License v3.0 5 votes vote down vote up
def multidmodel_uniform():
    """Multidimensional model with uniform priors."""

    lower = np.array([-5, -9, 5, 3])
    upper = np.array([10, 2, 7, 8])
    range = upper-lower

    x = SampledParam(uniform, loc=lower, scale=range)
    like =simple_likelihood

    return [x], like 
Example 36
Project: distcan   Author: sglyon   File: univariate.py    MIT License 5 votes vote down vote up
def __init__(self, a, b):
        self.a = a
        self.b = b

        # set dist before calling super's __init__
        self.dist = st.uniform(loc=a, scale=b)
        super(Uniform, self).__init__() 
Example 37
Project: P3_image_processing   Author: latedude2   File: test_multivariate.py    MIT License 5 votes vote down vote up
def test_K_and_K_minus_1_calls_equal(self):
        # Test that calls with K and K-1 entries yield the same results.

        np.random.seed(2846)

        n = np.random.randint(1, 32)
        alpha = np.random.uniform(10e-10, 100, n)

        d = dirichlet(alpha)
        num_tests = 10
        for i in range(num_tests):
            x = np.random.uniform(10e-10, 100, n)
            x /= np.sum(x)
            assert_almost_equal(d.pdf(x[:-1]), d.pdf(x)) 
Example 38
Project: P3_image_processing   Author: latedude2   File: test_multivariate.py    MIT License 5 votes vote down vote up
def test_multiple_entry_calls(self):
        # Test that calls with multiple x vectors as matrix work
        np.random.seed(2846)

        n = np.random.randint(1, 32)
        alpha = np.random.uniform(10e-10, 100, n)
        d = dirichlet(alpha)

        num_tests = 10
        num_multiple = 5
        xm = None
        for i in range(num_tests):
            for m in range(num_multiple):
                x = np.random.uniform(10e-10, 100, n)
                x /= np.sum(x)
                if xm is not None:
                    xm = np.vstack((xm, x))
                else:
                    xm = x
            rm = d.pdf(xm.T)
            rs = None
            for xs in xm:
                r = d.pdf(xs)
                if rs is not None:
                    rs = np.append(rs, r)
                else:
                    rs = r
            assert_array_almost_equal(rm, rs) 
Example 39
Project: P3_image_processing   Author: latedude2   File: test_multivariate.py    MIT License 5 votes vote down vote up
def test_2D_dirichlet_is_beta(self):
        np.random.seed(2846)

        alpha = np.random.uniform(10e-10, 100, 2)
        d = dirichlet(alpha)
        b = beta(alpha[0], alpha[1])

        num_tests = 10
        for i in range(num_tests):
            x = np.random.uniform(10e-10, 100, 2)
            x /= np.sum(x)
            assert_almost_equal(b.pdf(x), d.pdf([x]))

        assert_almost_equal(b.mean(), d.mean()[0])
        assert_almost_equal(b.var(), d.var()[0]) 
Example 40
Project: GAlibrate   Author: blakeaw   File: sampled_parameter.py    MIT License 5 votes vote down vote up
def __init__(self, name, loc, width):
        """Initialize the sampled parameter.
        Args:
            name (str,int): set the name Attribute.
            prior (:obj:): set the prior_dist Attribute.
        """
        self.name = name
        self.loc = loc
        self.width = width
        self._dist = uniform(loc, width)
        return 
Example 41
Project: pyISC   Author: STREAM3   File: test_SklearnOutlierDetection.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_outlier_detection(self):
        print("Start of test")
        n_samples = 1000
        norm_dist = stats.norm(0, 1)

        truth = np.ones((n_samples,))
        truth[-100:] = -1

        X0 = norm_dist.rvs(n_samples)
        X = np.c_[X0*5, X0+norm_dist.rvs(n_samples)*2]

        uniform_dist = stats.uniform(-10,10)

        X[-100:] = np.c_[uniform_dist.rvs(100),uniform_dist.rvs(100)]

        outlier_detector = pyisc.SklearnOutlierDetector(
            100.0/n_samples,
            pyisc.P_Gaussian([0,1])
        )

        outlier_detector.fit(X, np.array([1]*len(X)))


        self.assertLess(outlier_detector.threshold_, 0.35)
        self.assertGreater(outlier_detector.threshold_, 0.25)

        predictions = outlier_detector.predict(X, np.array([1]*len(X)))

        accuracy =  sum(truth == predictions)/float(n_samples)

        print("accuracy", accuracy)
        self.assertGreater(accuracy, 0.85) 
Example 42
Project: chainer   Author: chainer   File: test_uniform.py    MIT License 5 votes vote down vote up
def check_initializer_statistics(self, backend_config, n):
        from scipy import stats

        xp = backend_config.xp
        ws = numpy.empty((n,) + self.shape, dtype=self.dtype)
        ws = backend_config.get_array(ws)
        for i in range(n):
            initializer = self.target(**self.target_kwargs)
            initializer(xp.squeeze(ws[i:i+1], axis=0))

        fan = self.fan_option or default_fan.get(self.target)
        expected_max = self.scale or default_scale.get(self.target) or 1.
        expected_max *= default_coeff.get(self.target) or 1.
        if fan is not None:
            if fan == 'fan_in':
                expected_max *= math.sqrt(1. / self.fans[0])
            elif fan == 'fan_out':
                expected_max *= math.sqrt(1. / self.fans[1])
            elif fan == 'fan_avg':
                expected_max *= math.sqrt(2. / sum(self.fans))
            else:
                assert False

        sampless = cuda.to_cpu(ws.reshape(n, -1).T)
        alpha = 0.01 / len(sampless)
        for samples in sampless:
            _, p = stats.kstest(
                samples,
                stats.uniform(-expected_max, 2*expected_max).cdf
            )
            assert p >= alpha 
Example 43
Project: GraphicDesignPatternByPython   Author: Relph1119   File: test_multivariate.py    MIT License 5 votes vote down vote up
def test_K_and_K_minus_1_calls_equal(self):
        # Test that calls with K and K-1 entries yield the same results.

        np.random.seed(2846)

        n = np.random.randint(1, 32)
        alpha = np.random.uniform(10e-10, 100, n)

        d = dirichlet(alpha)
        num_tests = 10
        for i in range(num_tests):
            x = np.random.uniform(10e-10, 100, n)
            x /= np.sum(x)
            assert_almost_equal(d.pdf(x[:-1]), d.pdf(x)) 
Example 44
Project: GraphicDesignPatternByPython   Author: Relph1119   File: test_multivariate.py    MIT License 5 votes vote down vote up
def test_multiple_entry_calls(self):
        # Test that calls with multiple x vectors as matrix work
        np.random.seed(2846)

        n = np.random.randint(1, 32)
        alpha = np.random.uniform(10e-10, 100, n)
        d = dirichlet(alpha)

        num_tests = 10
        num_multiple = 5
        xm = None
        for i in range(num_tests):
            for m in range(num_multiple):
                x = np.random.uniform(10e-10, 100, n)
                x /= np.sum(x)
                if xm is not None:
                    xm = np.vstack((xm, x))
                else:
                    xm = x
            rm = d.pdf(xm.T)
            rs = None
            for xs in xm:
                r = d.pdf(xs)
                if rs is not None:
                    rs = np.append(rs, r)
                else:
                    rs = r
            assert_array_almost_equal(rm, rs) 
Example 45
Project: GraphicDesignPatternByPython   Author: Relph1119   File: test_multivariate.py    MIT License 5 votes vote down vote up
def test_2D_dirichlet_is_beta(self):
        np.random.seed(2846)

        alpha = np.random.uniform(10e-10, 100, 2)
        d = dirichlet(alpha)
        b = beta(alpha[0], alpha[1])

        num_tests = 10
        for i in range(num_tests):
            x = np.random.uniform(10e-10, 100, 2)
            x /= np.sum(x)
            assert_almost_equal(b.pdf(x), d.pdf([x]))

        assert_almost_equal(b.mean(), d.mean()[0])
        assert_almost_equal(b.var(), d.var()[0]) 
Example 46
Project: Pytorch_Quantize_impls   Author: Enderdead   File: parameters.py    MIT License 5 votes vote down vote up
def __init__(self, name, min, max):
        Parameter.__init__(self, name)
        self.generator = uniform(min, max)
        self.params = [min, max] 
Example 47
Project: Pytorch_Quantize_impls   Author: Enderdead   File: parameters.py    MIT License 5 votes vote down vote up
def __init__(self, name, min, max):
        Parameter.__init__(self, name)
        self.generator = uniform(min, max)
        self.params = [min, max] 
Example 48
Project: Pytorch_Quantize_impls   Author: Enderdead   File: parameters.py    MIT License 5 votes vote down vote up
def __init__(self, name, min, max):
        Parameter.__init__(self, name)
        self.params = [min, max]
        self.generator = uniform(log10(min), log10(max)) 
Example 49
Project: brainiak   Author: brainiak   File: test_hpo.py    Apache License 2.0 5 votes vote down vote up
def test_simple_hpo():

    def f(args):
        x = args['x']
        return x*x

    s = {'x': {'dist': st.uniform(loc=-10., scale=20), 'lo': -10., 'hi': 10.}}
    trials = []

    # Test fmin and ability to continue adding to trials
    best = fmin(loss_fn=f, space=s, max_evals=40, trials=trials)
    best = fmin(loss_fn=f, space=s, max_evals=10, trials=trials)

    assert len(trials) == 50, "HPO continuation trials not working"

    # Test verbose flag
    best = fmin(loss_fn=f, space=s, max_evals=10, trials=trials)

    yarray = np.array([tr['loss'] for tr in trials])
    np.testing.assert_array_less(yarray, 100.)

    xarray = np.array([tr['x'] for tr in trials])
    np.testing.assert_array_less(np.abs(xarray), 10.)

    assert best['loss'] < 100., "HPO out of range"
    assert np.abs(best['x']) < 10., "HPO out of range"

    # Test unknown distributions
    s2 = {'x': {'dist': 'normal', 'mu': 0., 'sigma': 1.}}
    trials2 = []
    with pytest.raises(ValueError) as excinfo:
        fmin(loss_fn=f, space=s2, max_evals=40, trials=trials2)
    assert "Unknown distribution type for variable" in str(excinfo.value)

    s3 = {'x': {'dist': st.norm(loc=0., scale=1.)}}
    trials3 = []
    fmin(loss_fn=f, space=s3, max_evals=40, trials=trials3) 
Example 50
Project: deep_image_model   Author: tobegit3hub   File: uniform_test.py    Apache License 2.0 5 votes vote down vote up
def testUniformRange(self):
    with self.test_session():
      a = 3.0
      b = 10.0
      uniform = tf.contrib.distributions.Uniform(a=a, b=b)
      self.assertAllClose(a, uniform.a.eval())
      self.assertAllClose(b, uniform.b.eval())
      self.assertAllClose(b - a, uniform.range().eval()) 
Example 51
Project: deep_image_model   Author: tobegit3hub   File: uniform_test.py    Apache License 2.0 5 votes vote down vote up
def testUniformShape(self):
    with self.test_session():
      a = tf.constant([-3.0] * 5)
      b = tf.constant(11.0)
      uniform = tf.contrib.distributions.Uniform(a=a, b=b)

      self.assertEqual(uniform.batch_shape().eval(), (5,))
      self.assertEqual(uniform.get_batch_shape(), tf.TensorShape([5]))
      self.assertAllEqual(uniform.event_shape().eval(), [])
      self.assertEqual(uniform.get_event_shape(), tf.TensorShape([])) 
Example 52
Project: deep_image_model   Author: tobegit3hub   File: uniform_test.py    Apache License 2.0 5 votes vote down vote up
def testUniformPDFWithScalarEndpoint(self):
    with self.test_session():
      a = tf.constant([0.0, 5.0])
      b = tf.constant(10.0)
      uniform = tf.contrib.distributions.Uniform(a=a, b=b)

      x = np.array([0.0, 8.0], dtype=np.float32)
      expected_pdf = np.array([1.0 / (10.0 - 0.0), 1.0 / (10.0 - 5.0)])

      pdf = uniform.pdf(x)
      self.assertAllClose(expected_pdf, pdf.eval()) 
Example 53
Project: deep_image_model   Author: tobegit3hub   File: uniform_test.py    Apache License 2.0 5 votes vote down vote up
def testUniformAssertMaxGtMin(self):
    with self.test_session():
      a_v = np.array([1.0, 1.0, 1.0], dtype=np.float32)
      b_v = np.array([1.0, 2.0, 3.0], dtype=np.float32)
      uniform = tf.contrib.distributions.Uniform(
          a=a_v, b=b_v, validate_args=True)

      with self.assertRaisesWithPredicateMatch(tf.errors.InvalidArgumentError,
                                               "x < y"):
        uniform.a.eval() 
Example 54
Project: deep_image_model   Author: tobegit3hub   File: uniform_test.py    Apache License 2.0 5 votes vote down vote up
def _testUniformSampleMultiDimensional(self):
    # DISABLED: Please enable this test once b/issues/30149644 is resolved.
    with self.test_session():
      batch_size = 2
      a_v = [3.0, 22.0]
      b_v = [13.0, 35.0]
      a = tf.constant([a_v] * batch_size)
      b = tf.constant([b_v] * batch_size)

      uniform = tf.contrib.distributions.Uniform(a=a, b=b)

      n_v = 100000
      n = tf.constant(n_v)
      samples = uniform.sample(n)
      self.assertEqual(samples.get_shape(), (n_v, batch_size, 2))

      sample_values = samples.eval()

      self.assertFalse(np.any(sample_values[:, 0, 0] < a_v[0]) or np.any(
          sample_values[:, 0, 0] >= b_v[0]))
      self.assertFalse(np.any(sample_values[:, 0, 1] < a_v[1]) or np.any(
          sample_values[:, 0, 1] >= b_v[1]))

      self.assertAllClose(sample_values[:, 0, 0].mean(), (a_v[0] + b_v[0]) / 2,
                          atol=1e-2)
      self.assertAllClose(sample_values[:, 0, 1].mean(), (a_v[1] + b_v[1]) / 2,
                          atol=1e-2) 
Example 55
Project: deep_image_model   Author: tobegit3hub   File: uniform_test.py    Apache License 2.0 5 votes vote down vote up
def testUniformMean(self):
    with self.test_session():
      a = 10.0
      b = 100.0
      uniform = tf.contrib.distributions.Uniform(a=a, b=b)
      s_uniform = stats.uniform(loc=a, scale=b-a)
      self.assertAllClose(uniform.mean().eval(), s_uniform.mean()) 
Example 56
Project: deep_image_model   Author: tobegit3hub   File: uniform_test.py    Apache License 2.0 5 votes vote down vote up
def testUniformVariance(self):
    with self.test_session():
      a = 10.0
      b = 100.0
      uniform = tf.contrib.distributions.Uniform(a=a, b=b)
      s_uniform = stats.uniform(loc=a, scale=b-a)
      self.assertAllClose(uniform.variance().eval(), s_uniform.var()) 
Example 57
Project: deep_image_model   Author: tobegit3hub   File: uniform_test.py    Apache License 2.0 5 votes vote down vote up
def testUniformSamplePdf(self):
    with self.test_session():
      a = 10.0
      b = [11.0, 100.0]
      uniform = tf.contrib.distributions.Uniform(a, b)
      self.assertTrue(tf.reduce_all(uniform.pdf(uniform.sample(10)) > 0).eval(
      )) 
Example 58
Project: deep_image_model   Author: tobegit3hub   File: uniform_test.py    Apache License 2.0 5 votes vote down vote up
def testUniformBroadcasting(self):
    with self.test_session():
      a = 10.0
      b = [11.0, 20.0]
      uniform = tf.contrib.distributions.Uniform(a, b)

      pdf = uniform.pdf([[10.5, 11.5], [9.0, 19.0], [10.5, 21.0]])
      expected_pdf = np.array([[1.0, 0.1], [0.0, 0.1], [1.0, 0.0]])
      self.assertAllClose(expected_pdf, pdf.eval()) 
Example 59
Project: ESPEI   Author: PhasesResearchLab   File: priors.py    MIT License 5 votes vote down vote up
def _prior_uniform(self, params):
        """Instantiate a uniform prior"""
        # make sure loc is min and scale is max
        params['scale'] = np.abs(params.get('scale', 1.0))
        return uniform(**params) 
Example 60
Project: RIDDLE   Author: jisungk   File: tuning.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, lo=0, hi=1, mass_on_zero=0.0):
        """Initialize random uniform floating number generator.

        Arguments:
            lo: float
                lowest number in range
            hi: float
                highest number in range
            mass_on_zero: float
                probability that zero be returned
        """
        self.lo = lo
        self.scale = hi - lo
        self.mass_on_zero = mass_on_zero 
Example 61
Project: RIDDLE   Author: jisungk   File: tuning.py    Apache License 2.0 5 votes vote down vote up
def rvs(self, random_state=None):
        """Draw a value from this random variable."""
        if self.mass_on_zero > 0.0 and np.random.uniform() < self.mass_on_zero:
            return 0.0
        return uniform(loc=self.lo, scale=self.scale).rvs(
            random_state=random_state) 
Example 62
Project: RIDDLE   Author: jisungk   File: tuning.py    Apache License 2.0 5 votes vote down vote up
def rvs(self, random_state=None):
        if random_state is None:
            exp = uniform(loc=self.lo, scale=self.scale).rvs()
        else:
            exp = uniform(loc=self.lo, scale=self.scale).rvs(
                random_state=random_state)

        if self.mass_on_zero > 0.0 and np.random.uniform() < self.mass_on_zero:
            return 0.0

        return self.base ** exp 
Example 63
Project: carme   Author: CarmeLabs   File: modelgeneration.py    MIT License 5 votes vote down vote up
def findBestDistribution(df):
        """Finds the best fit for each column and returns the associated parameters

        Arguments:
            df { DataFrame } -- The data matrix

        Returns:
            (best_dist_name, pvalue, params)
                - best_dist_name: List of best fitted graph for each column
                - pvalue: The associated Pvalue generated from the KSTest
                - params: The parameters associated with the best fitted
                        graph (e.g. min&max, alpha&beta)
        """
        dist_names = ['truncnorm', 'beta', 'expon', 'uniform']
        best_dist_name = [0] * len(df.columns)
        pvalues = [0] * len(df.columns)
        params = [0] * len(df.columns)
        for col_num in range(len(df.columns)):
            dist_tests = []
            param_tests = {}
            column = df[df.columns[col_num]]
            for dist_name in dist_names:
                dist = getattr(scipy.stats, dist_name)
                # Fit the data to the shape
                param = dist.fit(column)
                param_tests[dist_name] = param
                # Apply kstest
                dist, pv = scipy.stats.kstest(column, dist_name, args=param)
                dist_tests.append((dist_name, pv))
            # Select best distribution (Highest pvalue)
            best_dist, best_pv = (max(dist_tests, key=lambda item: item[1]))
            best_param = param_tests[best_dist]
            best_dist_name[col_num] = best_dist
            pvalues[col_num] = best_pv
            params[col_num] = best_param
        return best_dist_name, pvalues, params 
Example 64
Project: carl   Author: diana-hep   File: test_uniform.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def check_uniform(low, high):
    rng = check_random_state(1)

    p_carl = Uniform(low=low, high=high)
    p_scipy = st.uniform(loc=low, scale=high - low)
    X = 3 * rng.rand(50, 1) - 1

    assert_array_almost_equal(p_carl.pdf(X),
                              p_scipy.pdf(X.ravel()))
    assert_array_almost_equal(p_carl.cdf(X),
                              p_scipy.cdf(X.ravel()))
    assert_array_almost_equal(-np.log(p_carl.pdf(X)),
                              p_carl.nll(X)) 
Example 65
Project: Effective-Quadratures   Author: Effective-Quadratures   File: uniform.py    GNU Lesser General Public License v2.1 5 votes vote down vote up
def __init__(self, lower, upper):
        self.lower = lower
        self.upper = upper
        self.bounds = np.array([-1.0, 1.0])
        if (self.lower is not None) and (self.upper is not None):
            self.mean = 0.5 * (self.upper + self.lower)
            self.variance = 1.0/12.0 * (self.upper - self.lower)**2
            self.x_range_for_pdf = np.linspace(self.lower, self.upper, RECURRENCE_PDF_SAMPLES)
            self.parent = uniform(loc=(self.lower), scale=(self.upper-self.lower))
        self.mean = 0.0
        self.skewness = 0.0
        self.shape_parameter_A = 0.
        self.shape_parameter_B = 0. 
Example 66
Project: Effective-Quadratures   Author: Effective-Quadratures   File: uniform.py    GNU Lesser General Public License v2.1 5 votes vote down vote up
def get_description(self):
        """
        A description of the Gaussian.

        :param Gaussian self:
            An instance of the Gaussian class.
        :return:
            A string describing the Gaussian.
        """
        text = "is a uniform distribution over the support "+str(self.lower)+" to "+str(self.upper)+"."
        return text 
Example 67
Project: Effective-Quadratures   Author: Effective-Quadratures   File: uniform.py    GNU Lesser General Public License v2.1 5 votes vote down vote up
def get_recurrence_coefficients(self, order):
        """
        Recurrence coefficients for the uniform distribution.

        :param Uniform self:
            An instance of the Uniform class.
        :param array order:
            The order of the recurrence coefficients desired.
        :return:
            Recurrence coefficients associated with the uniform distribution.
        """
        ab =  jacobi_recurrence_coefficients(self.shape_parameter_A, self.shape_parameter_B, self.lower, self.upper, order)
        return ab 
Example 68
Project: linear_neuron   Author: uglyboxer   File: test_grid_search.py    MIT License 5 votes vote down vote up
def test_param_sampler():
    # test basic properties of param sampler
    param_distributions = {"kernel": ["rbf", "linear"],
                           "C": uniform(0, 1)}
    sampler = ParameterSampler(param_distributions=param_distributions,
                               n_iter=10, random_state=0)
    samples = [x for x in sampler]
    assert_equal(len(samples), 10)
    for sample in samples:
        assert_true(sample["kernel"] in ["rbf", "linear"])
        assert_true(0 <= sample["C"] <= 1) 
Example 69
Project: Weiss   Author: WangWenjun559   File: test_grid_search.py    Apache License 2.0 5 votes vote down vote up
def test_param_sampler():
    # test basic properties of param sampler
    param_distributions = {"kernel": ["rbf", "linear"],
                           "C": uniform(0, 1)}
    sampler = ParameterSampler(param_distributions=param_distributions,
                               n_iter=10, random_state=0)
    samples = [x for x in sampler]
    assert_equal(len(samples), 10)
    for sample in samples:
        assert_true(sample["kernel"] in ["rbf", "linear"])
        assert_true(0 <= sample["C"] <= 1) 
Example 70
Project: snclass   Author: emilleishida   File: functions.py    GNU General Public License v3.0 5 votes vote down vote up
def calc_scores(matrix2, ncomp, dist):
    """
    Calculate classification results for 1 data matrix.

    input: matrix2, DataMatrix object
           output from DataMatrix.build()

           ncomp, int
           number of PCs to calculate

           dist, scipy.stats.uniform distribution
           prior over gamma parameter
    """
    np.random.seed()

    # reduce dimensionality
    matrix2.user_choices['gamma'] = dist.rvs()
    matrix2.user_choices['ncomp'] = ncomp

    matrix2.reduce_dimension()

    # project test
    test_proj = matrix2.transf_test.transform(matrix2.data_test)

    # classify
    new_label = nneighbor(test_proj, matrix2.low_dim_matrix,
                          matrix2.sntype, matrix2.user_choices)

    # calculate score
    score = sum(new_label == matrix2.test_type)

    return int(ncomp), matrix2.user_choices['gamma'], score 
Example 71
Project: Genetic-SRCPSP   Author: juliusf   File: phase_type.py    MIT License 5 votes vote down vote up
def approximate_mix_gaussian_uniform(X):
    mu = np.mean(X)
    sigma = np.var(X)
    p = 0.1
    a = 0.0
    b = (2 * mu) * (1-p)  #check whether this could be approximated otherwise

    mu1 = mu / p
    mu2 = (1.0/2.0) * (a+b)
    sigma2 = np.sqrt( (1.0/12.0) * ((b-a)**2) )
    #sigma1 = np.sqrt( -(mu1)**2 + (mu2**2) - ((mu2**2)/p) + (sigma2**2) + (sigma /p) - (sigma2**2)/p )
    sigma1 = np.sqrt(-mu1**2 - sigma2 ** 2 + (sigma2 / p) + (mu2/p) + (sigma/p)) # produces complex results! can't be handled by np.sqrt()
    dist = MixtureNormUni(p, sigma1, mu, a, b)
    dist.name = "gaussian uniform mixture"
    return dist 
Example 72
Project: Genetic-SRCPSP   Author: juliusf   File: phase_type.py    MIT License 5 votes vote down vote up
def approximate_uniform(cxsqrd, mean):
    b = 2 * mean
    dist = uniform(scale=b)
    dist.name = "uniform"
    return dist 
Example 73
Project: PyOpenDial   Author: KAIST-AILab   File: uniform_density_function.py    MIT License 5 votes vote down vote up
def __init__(self, min_val=None, max_val=None):
        if isinstance(min_val, float) and isinstance(max_val, float):
            """
            Creates a new uniform density function with the given minimum and maximum threshold

            :param min_val: the minimum threshold
            :param max_val: the maximum threshold
            """
            self._min_val = min_val
            self._max_val = max_val
            self._distrib = stats.uniform(min_val, max_val - min_val)
        else:
            raise NotImplementedError() 
Example 74
Project: skutil   Author: tgsmith61591   File: test_big.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def test_large_grid():
        """In this test, we purposely overfit a RandomForest to completely random data
        in order to assert that the test error will far supercede the train error.
        """

        if not SK18:
            custom_cv = KFold(n=y_train.shape[0], n_folds=3, shuffle=True, random_state=42)
        else:
            custom_cv = KFold(n_splits=3, shuffle=True, random_state=42)

        # define the pipe
        pipe = Pipeline([
            ('scaler', SelectiveScaler()),
            ('pca', SelectivePCA(weight=True)),
            ('rf', RandomForestClassifier(random_state=42))
        ])

        # define hyper parameters
        hp = {
            'scaler__scaler': [StandardScaler(), RobustScaler(), MinMaxScaler()],
            'pca__whiten': [True, False],
            'pca__weight': [True, False],
            'pca__n_components': uniform(0.75, 0.15),
            'rf__n_estimators': randint(5, 10),
            'rf__max_depth': randint(5, 15)
        }

        # define the grid
        grid = RandomizedSearchCV(pipe, hp, n_iter=2, scoring='accuracy', n_jobs=1, cv=custom_cv, random_state=42)

        # this will fail because we haven't fit yet
        assert_fails(grid.score, (ValueError, AttributeError), X_train, y_train)

        # fit the grid
        grid.fit(X_train, y_train)

        # score for coverage -- this might warn...
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            grid.score(X_train, y_train)

        # coverage:
        assert grid._estimator_type == 'classifier'

        # get predictions
        tr_pred, te_pred = grid.predict(X_train), grid.predict(X_test)

        # evaluate score (SHOULD be better than random...)
        accuracy_score(y_train, tr_pred), accuracy_score(y_test, te_pred)

        # grid score reports:
        # assert fails for bad percentile
        assert_fails(report_grid_score_detail, ValueError, **{'random_search': grid, 'percentile': 0.0})
        assert_fails(report_grid_score_detail, ValueError, **{'random_search': grid, 'percentile': 1.0})

        # assert fails for bad y_axis
        assert_fails(report_grid_score_detail, ValueError, **{'random_search': grid, 'y_axis': 'bad_axis'})

        # assert passes otherwise
        report_grid_score_detail(grid, charts=True, percentile=0.95)  # just ensure percentile works 
Example 75
Project: skutil   Author: tgsmith61591   File: test_pipe.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def test_random_grid():
    # build a pipeline
    pipe = Pipeline([
        ('retainer',       FeatureRetainer()),  # will retain all
        ('dropper',        FeatureDropper()),  # won't drop any
        ('mapper',         FunctionMapper()),  # pass through
        ('encoder',        OneHotCategoricalEncoder()),  # no object dtypes, so will pass through
        ('collinearity',   MulticollinearityFilterer(threshold=0.85)),
        ('imputer',        SelectiveImputer()),  # pass through
        ('scaler',         SelectiveScaler()),
        ('boxcox',         BoxCoxTransformer()),
        ('nzv',            NearZeroVarianceFilterer(threshold=1e-4)),
        ('pca',            SelectivePCA(n_components=0.9)),
        ('model',          RandomForestClassifier(n_jobs=1))
    ])

    # let's define a set of hyper-parameters over which to search
    hp = {
        'collinearity__threshold':    uniform(loc=.8, scale=.15),
        'collinearity__method':       ['pearson', 'kendall', 'spearman'],
        'scaler__scaler':             [StandardScaler(), RobustScaler()],
        'pca__n_components':          uniform(loc=.75, scale=.2),
        'pca__whiten':                [True, False],
        'model__n_estimators':        randint(5, 10),
        'model__max_depth':           randint(2, 5),
        'model__min_samples_leaf':    randint(1, 5),
        'model__max_features':        uniform(loc=.5, scale=.5),
        'model__max_leaf_nodes':      randint(10, 15)
    }

    # define the gridsearch
    search = RandomizedSearchCV(pipe, hp,
                                n_iter=2,  # just to test it even works
                                scoring='accuracy',
                                cv=2,
                                random_state=42)

    # fit the search
    search.fit(X_train, y_train)

    # test the report
    report_grid_score_detail(search, charts=False) 
Example 76
Project: CityEnergyAnalyst   Author: architecture-building-systems   File: latin_sampler.py    MIT License 4 votes vote down vote up
def latin_sampler(locator, num_samples, variables):
    """
    This script creates a matrix of m x n samples using the latin hypercube sampler.
    for this, it uses the database of probability distribtutions stored in locator.get_uncertainty_db()
    it returns clean and normalized samples.

    :param locator: pointer to locator of files of CEA
    :param num_samples: number of samples to do
    :param variables: list of variables to sample
    :return:
        1. design: a matrix m x n with the samples where each feature is normalized from [0,1]
        2. design_norm: a matrix m x n with the samples where each feature is normalized from [0,1]
        3. pdf_list: a dataframe with properties of the probability density functions used in the exercise.

    """

    # get probability density function PDF of variables of interest
    variable_groups = ('ENVELOPE', 'INDOOR_COMFORT', 'INTERNAL_LOADS','SYSTEMS')
    database = pd.concat([pd.read_excel(locator.get_uncertainty_db(), group, axis=1)
                          for group in variable_groups])
    pdf_list = database[database['name'].isin(variables)].set_index('name')

    # get number of variables
    num_vars = pdf_list.shape[0]  # alternatively use len(variables)

    # get design of experiments
    samples = latin_hypercube.lhs(num_vars, samples=num_samples, criterion='maximin')
    for i, variable in enumerate(variables):

        distribution = pdf_list.loc[variable, 'distribution']
        #sampling into lhs
        min = pdf_list.loc[variable, 'min']
        max = pdf_list.loc[variable, 'max']
        mu = pdf_list.loc[variable, 'mu']
        stdv = pdf_list.loc[variable, 'stdv']
        if distribution == 'triangular':
            loc = min
            scale = max - min
            c = (mu - min) / (max - min)
            samples[:, i] = triang(loc=loc, c=c, scale=scale).ppf(samples[:, i])
        elif distribution == 'normal':
            samples[:, i] = norm(loc=mu, scale=stdv).ppf(samples[:, i])
        elif distribution == 'boolean': # converts a uniform (0-1) into True/False
            samples[:, i] = ma.make_mask(np.rint(uniform(loc=min, scale=max).ppf(samples[:, i])))
        else:  # assume it is uniform
            samples[:, i] = uniform(loc=min, scale=max).ppf(samples[:, i])

    min_max_scaler = preprocessing.MinMaxScaler(copy=True, feature_range=(0, 1))
    samples_norm = min_max_scaler.fit_transform(samples)

    return samples, samples_norm, pdf_list 
Example 77
Project: CityEnergyAnalyst   Author: architecture-building-systems   File: surrogate_4_calibration.py    MIT License 4 votes vote down vote up
def latin_sampler(locator, num_samples, variables):
    """
    This script creates a matrix of m x n samples using the latin hypercube sampler.
    for this, it uses the database of probability distribtutions stored in locator.get_uncertainty_db()

    :param locator: pointer to locator of files of CEA
    :param num_samples: number of samples to do
    :param variables: list of variables to sample
    :return:
        1. design: a matrix m x n with the samples
        2. pdf_list: a dataframe with properties of the probability density functions used in the excercise.
    """


    # get probability density function PDF of variables of interest
    variable_groups = ('ENVELOPE', 'INDOOR_COMFORT', 'INTERNAL_LOADS')
    database = pd.concat([pd.read_excel(locator.get_uncertainty_db(), group, axis=1)
                                                for group in variable_groups])
    pdf_list = database[database['name'].isin(variables)].set_index('name')

    # get number of variables
    num_vars = pdf_list.shape[0] #alternatively use len(variables)

    # get design of experiments
    design = lhs(num_vars, samples=num_samples)
    for i, variable in enumerate(variables):
        distribution = pdf_list.loc[variable, 'distribution']
        min = pdf_list.loc[variable,'min']
        max = pdf_list.loc[variable,'max']
        mu = pdf_list.loc[variable,'mu']
        stdv = pdf_list.loc[variable,'stdv']
        if distribution == 'triangular':
            loc = min
            scale = max - min
            c = (mu - min) / (max - min)
            design[:, i] = triang(loc=loc, c=c, scale=scale).ppf(design[:, i])
        elif distribution == 'normal':
            design[:, i] = norm(loc=mu, scale=stdv).ppf(design[:, i])
        else: # assume it is uniform
            design[:, i] = uniform(loc=min, scale=max).ppf(design[:, i])

    return design, pdf_list 
Example 78
Project: RQpy   Author: ucbpylegroup   File: _pulse_sim.py    GNU General Public License v3.0 4 votes vote down vote up
def generate_sim_data(self, attr, *args, distribution=None, values=None, **kwargs):
        """
        Method for generating simulated data and adding it to the specified attribute.

        Parameters
        ----------
        attr : str
            The attribute that will be updated with the simulated data. Can be either
            "amplitudes", "tdelay", "taurises", or "taufalls".
        arg1, arg2, arg3,... : array_like
            The shape parameter(s) for the distribution (see docstring of the
            instance object for more information).
        distribution : NoneType, scipy.stats distribution, optional
            The `scipy.stats` distribution to use for generating the simulated data. If left
            as None, then the `scipy.stats.uniform` distribution is defaulted. This parameter
            will be overridden by `values` if `values` is not None.
        values : array_like, float, optional
            An array of specified values to use for the data, rather than generating simulated
            data from a probaility distribution. Can also pass a single value.
        loc : array_like, optional
            Location parameter for `scipy.stats` distribution. Default is 0.
        scale : array_like, optional
            Scale parameter for `scipy.stats` continuous distribution. Default is 1.
        random_state : NoneType, int, `numpy.random.RandomState` instance, optional
            Definition of the random state for the generated data. If int or RandomState,
            use it for drawing the random variates. If None, rely on `self.random_state`.
            Default is None.

        """

        self._check_valid_attr(attr)
        self._check_if_cut_set()
        self._check_taus_set(attr)
        self._check_data_size(attr)

        if values is None:
            if distribution is None:
                distribution = stats.uniform

            if "size" in kwargs.keys() and kwargs["size"]!=self.ntraces:
                raise ValueError(
                    f"The inputted size does not match the cut length ({self.ntraces}), "
                    "The size is automatically set, consider not passing it."
                )
            else:
                kwargs["size"] = self.ntraces

            sim_data = distribution.rvs(*args, **kwargs)
        elif np.isscalar(values):
            sim_data = np.ones(self.ntraces) * values
        else:
            if len(values)!=self.ntraces:
                raise ValueError(
                    "The length of the inputted values argument "
                    f"does not match the cut length ({self.ntraces})"
                )
            sim_data = values

        val = getattr(self, attr)
        val.append(sim_data) 
Example 79
Project: PointNetGPD   Author: lianghongzhuo   File: random_variables.py    MIT License 4 votes vote down vote up
def __init__(self, min_radius, max_radius,
                 min_elev, max_elev,
                 min_az=0, max_az=2*np.pi,
                 min_roll=0, max_roll=2*np.pi,
                 num_prealloc_samples=1):
        """Initialize a ViewsphereDiscretizer.

        Parameters
        ----------
        min_radius : float
            Minimum radius for viewing sphere.
        max_radius : float
            Maximum radius for viewing sphere.
        min_elev : float
            Minimum elevation (angle from z-axis) for camera position.
        max_elev : float
            Maximum elevation for camera position.
        min_az : float
            Minimum azimuth (angle from x-axis) for camera position.
        max_az : float
            Maximum azimuth for camera position.
        min_roll : float
            Minimum roll (rotation of camera about axis generated by azimuth and
            elevation) for camera.
        max_roll : float
            Maximum roll for camera.
        num_prealloc_samples : int
            Number of preallocated samples.
        """
        # read params
        self.min_radius = min_radius
        self.max_radius = max_radius
        self.min_az = min_az * np.pi
        self.max_az = max_az * np.pi
        self.min_elev = min_elev * np.pi
        self.max_elev = max_elev * np.pi
        self.min_roll = min_roll * np.pi
        self.max_roll = max_roll * np.pi
        self.num_prealloc_samples = num_prealloc_samples

        # setup random variables
        self.rad_rv = ss.uniform(loc=self.min_radius, scale=self.max_radius-self.min_radius)
        self.elev_rv = ss.uniform(loc=self.min_elev, scale=self.max_elev-self.min_elev)
        self.az_rv = ss.uniform(loc=self.min_az, scale=self.max_az-self.min_az)
        self.roll_rv = ss.uniform(loc=self.min_roll, scale=self.max_roll-self.min_roll)

        RandomVariable.__init__(self, self.num_prealloc_samples) 
Example 80
Project: snclass   Author: emilleishida   File: functions.py    GNU General Public License v3.0 4 votes vote down vote up
def core_cross_val(pars):
    """
    Perform 1/3 validation.

    input: pars, dict
           dictionary of input parameters
           keywords:
                   data, array
                   data matrix

                   types, vector
                   vector of types

                   user_choices, dict
                   output from read_user_input()

    output: vector of floats
            parameters with higher classification success
            [n_components, gamma, n_successes]
    """
    # split sample in 3
    indx_list1 = np.random.randint(0, len(pars['data']), 
                                   size=int(2 * len(pars['data']) / 3))
    indx_list2 = [elem for elem in xrange(len(pars['data']))
                  if elem not in indx_list1]

    # set train data matrix and types
    matrix2 = snclass.matrix.DataMatrix()
    matrix2.user_choices = pars['user_choices']
    matrix2.datam = np.array([pars['data'][indx] for indx in indx_list1])
    matrix2.sntype = np.array([pars['types'][indx] for indx in indx_list1])

    # set test data matrix and types
    matrix2.data_test = np.array([pars['data'][indx] for indx in indx_list2])
    matrix2.test_type = np.array([pars['types'][indx] for indx in indx_list2])

    ploc = matrix2.user_choices['gamma_lim'][0]
    pscale = matrix2.user_choices['gamma_lim'][1] - ploc
    dist = uniform(loc=ploc, scale=pscale)

    results = []
    for ncomp in xrange(int(matrix2.user_choices['ncomp_lim'][0]),
                        int(matrix2.user_choices['ncomp_lim'][1])):

        screen('... ncomp = ' + str(ncomp), pars['user_choices'])

        k = 0
        while k < pars['user_choices']['gamma_nparticles']:
            try:
                results.append(calc_scores(matrix2, ncomp, dist))

                # update counter
                k = k + 1

            except ArpackNoConvergence:
                screen('Arparck fail to converge!', pars['user_choices'])

    results = np.array(results)
    indx_max = list(results[:, -1]).index(max(results[:, -1]))

    return results[indx_max]