Python scipy.stats.rv_discrete() Examples

The following are 30 code examples of scipy.stats.rv_discrete(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.stats , or try the search function .
Example #1
Source File: Generate.py    From YouTubeCommenter with MIT License 6 votes vote down vote up
def probs_to_word_ix(pk, is_first):
	if is_first:
		pk[0] = 0.0
		pk /= np.sum(pk)
	else:
		pk *= pk
		pk /= np.sum(pk)
		#for i in range(3):
		#	max_val = np.amax(pk)
		#	if max_val > 0.5:
		#		break
		#	pk *= pk
		#	pk /= np.sum(pk)

	xk = np.arange(pk.shape[0], dtype=np.int32)
	custm = stats.rv_discrete(name='custm', values=(xk, pk))
	return custm.rvs() 
Example #2
Source File: draw_pmf.py    From machine-learning-note with MIT License 6 votes vote down vote up
def custom_made_discrete_dis_pmf():
    """
    https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_discrete.html
    :return:
    """
    xk = np.arange(7)  # 所有可能的取值
    print(xk)  # [0 1 2 3 4 5 6]
    pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)  # 各个取值的概率
    custm = stats.rv_discrete(name='custm', values=(xk, pk))

    X = custm.rvs(size=20)
    print(X)

    fig, ax = plt.subplots(1, 1)
    ax.plot(xk, custm.pmf(xk), 'ro', ms=8, mec='r')
    ax.vlines(xk, 0, custm.pmf(xk), colors='r', linestyles='-', lw=2)
    plt.title('Custom made discrete distribution(PMF)')
    plt.ylabel('Probability')
    plt.show()

# custom_made_discrete_dis_pmf() 
Example #3
Source File: test_distributions.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_pickling(self):
        # test that a frozen instance pickles and unpickles
        # (this method is a clone of common_tests.check_pickling)
        beta = stats.beta(2.3098496451481823, 0.62687954300963677)
        poiss = stats.poisson(3.)
        sample = stats.rv_discrete(values=([0, 1, 2, 3],
                                           [0.1, 0.2, 0.3, 0.4]))

        for distfn in [beta, poiss, sample]:
            distfn.random_state = 1234
            distfn.rvs(size=8)
            s = pickle.dumps(distfn)
            r0 = distfn.rvs(size=8)

            unpickled = pickle.loads(s)
            r1 = unpickled.rvs(size=8)
            assert_equal(r0, r1)

            # also smoke test some methods
            medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
            assert_equal(medians[0], medians[1])
            assert_equal(distfn.cdf(medians[0]),
                         unpickled.cdf(medians[1])) 
Example #4
Source File: LSCDE.py    From Conditional_Density_Estimation with MIT License 6 votes vote down vote up
def sample(self, X):
    """ sample from the conditional mixture distributions - requires the model to be fitted

    Args:
      X: values to be conditioned on when sampling - numpy array of shape (n_instances, n_dim_x)

    Returns: tuple (X, Y)
      - X - the values to conditioned on that were provided as argument - numpy array of shape (n_samples, ndim_x)
      - Y - conditional samples from the model p(y|x) - numpy array of shape (n_samples, ndim_y)
    """
    assert self.fitted
    X = self._handle_input_dimensionality(X)

    weights = np.multiply(self.alpha, self._gaussian_kernel(X))
    weights = weights / np.sum(weights, axis=1)[:,None]

    Y = np.zeros(shape=(X.shape[0], self.ndim_y))
    for i in range(X.shape[0]):
      discrete_dist = stats.rv_discrete(values=(range(weights.shape[1]), weights[i, :]))
      idx = discrete_dist.rvs()
      Y[i, :] = self.gaussians_y[idx].rvs()

    return X, Y 
Example #5
Source File: toy_world_state.py    From mcts with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def perform(self, action):
        # get distribution about outcomes
        probabilities = self.belief[action] / np.sum(self.belief[action])
        distrib = rv_discrete(values=(range(len(probabilities)),
                                      probabilities))

        # draw sample
        sample = distrib.rvs()

        # update belief accordingly
        belief = deepcopy(self.belief)
        belief[action][sample] += 1

        # manual found
        if (self.pos == self.world.manual).all():
            print("m", end="")
            belief = {ToyWorldAction(np.array([0, 1])): [50, 1, 1, 1],
                      ToyWorldAction(np.array([0, -1])): [1, 50, 1, 1],
                      ToyWorldAction(np.array([1, 0])): [1, 1, 50, 1],
                      ToyWorldAction(np.array([-1, 0])): [1, 1, 1, 50]}

        # build next state
        pos = self._correct_position(self.pos + self.actions[sample].action)

        return ToyWorldState(pos, self.world, belief) 
Example #6
Source File: common_tests.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def check_edge_support(distfn, args):
    # Make sure that x=self.a and self.b are handled correctly.
    x = [distfn.a, distfn.b]
    if isinstance(distfn, stats.rv_discrete):
        x = [distfn.a - 1, distfn.b]

    npt.assert_equal(distfn.cdf(x, *args), [0.0, 1.0])
    npt.assert_equal(distfn.sf(x, *args), [1.0, 0.0])

    if distfn.name not in ('skellam', 'dlaplace'):
        # with a = -inf, log(0) generates warnings
        npt.assert_equal(distfn.logcdf(x, *args), [-np.inf, 0.0])
        npt.assert_equal(distfn.logsf(x, *args), [0.0, -np.inf])

    npt.assert_equal(distfn.ppf([0.0, 1.0], *args), x)
    npt.assert_equal(distfn.isf([0.0, 1.0], *args), x[::-1])

    # out-of-bounds for isf & ppf
    npt.assert_(np.isnan(distfn.isf([-1, 2], *args)).all())
    npt.assert_(np.isnan(distfn.ppf([-1, 2], *args)).all()) 
Example #7
Source File: test_distributions.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_no_name_arg(self):
        # If name is not given, construction shouldn't fail.  See #1508.
        stats.rv_continuous()
        stats.rv_discrete() 
Example #8
Source File: test_distributions.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_bad_input(self):
        xk = [1, 2, 3]
        pk = [0.5, 0.5]
        assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))

        pk = [1, 2, 3]
        assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk))) 
Example #9
Source File: test_discrete_basic.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_discrete_basic(distname, arg, first_case):
    try:
        distfn = getattr(stats, distname)
    except TypeError:
        distfn = distname
        distname = 'sample distribution'
    np.random.seed(9765456)
    rvs = distfn.rvs(size=2000, *arg)
    supp = np.unique(rvs)
    m, v = distfn.stats(*arg)
    check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf')

    check_pmf_cdf(distfn, arg, distname)
    check_oth(distfn, arg, supp, distname + ' oth')
    check_edge_support(distfn, arg)

    alpha = 0.01
    check_discrete_chisquare(distfn, arg, rvs, alpha,
           distname + ' chisquare')

    if first_case:
        locscale_defaults = (0,)
        meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf,
                 distfn.logsf]
        # make sure arguments are within support
        spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, }
        k = spec_k.get(distname, 1)
        check_named_args(distfn, k, arg, locscale_defaults, meths)
        if distname != 'sample distribution':
            check_scale_docstring(distfn)
        check_random_state_property(distfn, arg)
        check_pickling(distfn, arg)

        # Entropy
        check_entropy(distfn, arg, distname)
        if distfn.__class__._entropy != stats.rv_discrete._entropy:
            check_private_entropy(distfn, arg, stats.rv_discrete) 
Example #10
Source File: test_discrete_basic.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_rvs_broadcast(dist, shape_args):
    # If shape_only is True, it means the _rvs method of the
    # distribution uses more than one random number to generate a random
    # variate.  That means the result of using rvs with broadcasting or
    # with a nontrivial size will not necessarily be the same as using the
    # numpy.vectorize'd version of rvs(), so we can only compare the shapes
    # of the results, not the values.
    # Whether or not a distribution is in the following list is an
    # implementation detail of the distribution, not a requirement.  If
    # the implementation the rvs() method of a distribution changes, this
    # test might also have to be changed.
    shape_only = dist in ['skellam']

    try:
        distfunc = getattr(stats, dist)
    except TypeError:
        distfunc = dist
        dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk)
    loc = np.zeros(2)
    nargs = distfunc.numargs
    allargs = []
    bshape = []
    # Generate shape parameter arguments...
    for k in range(nargs):
        shp = (k + 3,) + (1,)*(k + 1)
        param_val = shape_args[k]
        allargs.append(param_val*np.ones(shp, dtype=np.array(param_val).dtype))
        bshape.insert(0, shp[0])
    allargs.append(loc)
    bshape.append(loc.size)
    # bshape holds the expected shape when loc, scale, and the shape
    # parameters are all broadcast together.
    check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_]) 
Example #11
Source File: bioutilities.py    From Haystack with GNU Affero General Public License v3.0 5 votes vote down vote up
def set_bg_model(self,ACGT_probabilities):
        self.bg_model= rv_discrete(name='bg', values=([0, 1, 2,3], ACGT_probabilities)) 
Example #12
Source File: bioutilities.py    From Haystack with GNU Affero General Public License v3.0 5 votes vote down vote up
def generate_random(cls,n,bgmodel=rv_discrete(name='bg', values=([0, 1, 2,3], [0.2955, 0.2045, 0.2045,0.2955]))):
        int_seq=cls.bg_model.rvs(size=n)
        return ''.join([int2nt[c] for c in int_seq]) 
Example #13
Source File: hpo.py    From brainiak with Apache License 2.0 5 votes vote down vote up
def get_samples(self, n):
        """Sample the GMM distribution.

        Arguments
        ---------
        n : int
            Number of samples needed

        Returns
        -------
        1D array
            Samples from the distribution
        """

        normalized_w = self.weights / np.sum(self.weights)
        get_rand_index = st.rv_discrete(values=(range(self.N),
                                        normalized_w)).rvs(size=n)
        samples = np.zeros(n)
        k = 0
        j = 0
        while (k < n):
            i = get_rand_index[j]
            j = j + 1
            if (j == n):
                get_rand_index = st.rv_discrete(values=(range(self.N),
                                                normalized_w)).rvs(size=n)
                j = 0
            v = np.random.normal(loc=self.points[i], scale=self.sigma[i])
            if (v > self.max_limit or v < self.min_limit):
                continue
            else:
                samples[k] = v
                k = k + 1
                if (k == n):
                    break
        return samples 
Example #14
Source File: custom.py    From Effective-Quadratures with GNU Lesser General Public License v2.1 5 votes vote down vote up
def get_cdf(self, points=None):
        # Approssimation of PDF integral (not into the original version of custom class)
        #    given a set of points: what is the cdf, staring from the PDF of data?
        #------------------------------------
        # version 1:
        #---------------------------------------------------------------------
        #x = sorted(points) # points can be different from self.data
        #
        #y = self.getPDF(x) # pdf function associated with self.data and points
        #
        #c     = [] # list for future CDF array
        #c.append( 0.) # initialization
        #
        #for i in range(1,len(points)):
        #    c.append((y[i-1]+y[i] )*.5*(x[i]-x[i-1])+c[i-1] )
        #for i in range(1,len(points)):
        #    c[i] = c[i]/c[len(points)-1]
        #return c
        #--------------------------------------------------------------------
        # version 2
        points = np.matrix(points)

        y = self.get_pdf(self.data)
        summ = np.sum(y)
        p = np.array(y/summ)
        custom = stats.rv_discrete(name='custom', values=(self.data, p))

        return custom.cdf(points)
        #------------------------------------------------------------------------# 
Example #15
Source File: custom.py    From Effective-Quadratures with GNU Lesser General Public License v2.1 5 votes vote down vote up
def get_icdf(self, xx):
        """
        A custom inverse cumulative distribution function.

        :param Custom self:
            An instance of Custom class.
        :param array xx:
            An array of points in which the inverse cumulative density function needs to be evaluated.
        :return:
            Inverse cumulative density function values of the Custom distribution.
        """
        #x  = self.data
        #y  = self.getPDF(x)
        #c  = []
        #yy = []
        #c.append(0.0)
        #for i in range(1, len(x)):
        #    c.append(c[i-1]+(x[i]-x[i-1])*(y[i]+y[i-1])*.5)
        #for i in range(1, len(x)):
        #    c[i]=c[i]/c[len(x)-1]
        #for k in range(0, len(x)):
        #    for i in range(0, len(x)):
        #        if ((xx[k]>=c[i]) and (xx[k]<=c[i+1])):
        #            value = float((xx[k]-c[i])/(c[i+1]-c[i])*(x[i+1]-x[i])+x[i])
        #            yy.append(value)
        #            break
        #return yy
        xx = np.matrix(xx)
        y = self.get_pdf(self.data)
        summ = np.sum(y)
        p = np.array(y/summ)
        custom = stats.rv_discrete(name='custom', values=(self.data, p))
        return custom.ppf(xx) 
Example #16
Source File: pre_gsee_processing.py    From gsee with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def create_rand_month(xk: np.ndarray, pk: np.ndarray, n: int) -> np.ndarray:
        """

        Parameters
        ----------
        xk : List
            of bins of possible radiation values
        pk : List
            Probabilities for the bins in xk to occur
        n : int
            length of the month in days

        Returns
        -------
        List
            of length n with randon values xk following the probabilites given in pk

        """

        multi = 10000  # multiplied as .rvs only gives integer values, but we want a higher resolution
        xk = xk * multi

        if sum(pk) and sum(pk) > 0:
            pk = pk / sum(pk)  # normalized so sum(pk)==1
            try:
                custm = st.rv_discrete(name='custm', values=(xk, pk))
            except Exception:
                raise ValueError('Sum provided pk is not 1')
            r = custm.rvs(size=n) / multi
            return r
        else:
            return np.full(n, 0) 
Example #17
Source File: GMM.py    From Conditional_Density_Estimation with MIT License 5 votes vote down vote up
def _simulate_cond_rows_individually(self, X):
    W_x = self._W_x(X)
    y_samples = np.zeros(shape=(X.shape[0], self.ndim_y))

    for i in range(X.shape[0]):
      discrete_dist = stats.rv_discrete(values=(range(self.n_kernels), W_x[i, :]))
      idx = discrete_dist.rvs(random_state=self.random_state)
      y_samples[i, :] = self.gaussians_y[idx].rvs(random_state=self.random_state)

    return X, y_samples 
Example #18
Source File: utils.py    From GEOMetrics with MIT License 5 votes vote down vote up
def sample(verts, faces, num=10000): 

	dist_uni = torch.distributions.Uniform(torch.tensor([0.0]).cuda(), torch.tensor([1.0]).cuda())

	# calculate area of each face 
	x1,x2,x3 = torch.split(torch.index_select(verts, 0,faces[:,0]) - torch.index_select(verts, 0,faces[:,1]), 1, dim = 1)
	y1,y2,y3 = torch.split(torch.index_select(verts, 0,faces[:,1]) - torch.index_select(verts, 0,faces[:,2]), 1, dim = 1)
	a = (x2*y3-x3*y2)**2
	b = (x3*y1 - x1*y3)**2
	c = (x1*y2 - x2*y1)**2
	Areas = torch.sqrt(a+b+c)/2
	Areas = Areas /  torch.sum(Areas) # percentage of each face w.r.t. full surface area 

	# define descrete distribution w.r.t. face area ratios caluclated 
	choices = np.arange(Areas.shape[0])
	dist = stats.rv_discrete(name='custm', values=(choices, Areas.data.cpu().numpy()))
	choices = dist.rvs(size=num) # list of faces to be sampled from 

	# from each face sample a point 
	select_faces = faces[choices] 
	xs = torch.index_select(verts, 0,select_faces[:,0])
	ys = torch.index_select(verts, 0,select_faces[:,1])
	zs = torch.index_select(verts, 0,select_faces[:,2])
	u = torch.sqrt(dist_uni.sample_n(num))
	v = dist_uni.sample_n(num)
	points = (1- u)*xs + (u*(1-v))*ys + u*v*zs

	return points 

# calculate length of each edge 
Example #19
Source File: draw_pmf.py    From machine-learning-note with MIT License 5 votes vote down vote up
def sampling_and_empirical_dis():
    xk = np.arange(7)  # 所有可能的取值
    print(xk)  # [0 1 2 3 4 5 6]
    pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)  # 各个取值的概率
    custm = stats.rv_discrete(name='custm', values=(xk, pk))

    X1 = custm.rvs(size=20)  # 第一次抽样
    X2 = custm.rvs(size=200)  # 第二次抽样
    # 计算X1&X2中各个结果出现的频率(相当于PMF)
    val1, cnt1 = np.unique(X1, return_counts=True)
    val2, cnt2 = np.unique(X2, return_counts=True)
    pmf_X1 = cnt1 / len(X1)
    pmf_X2 = cnt2 / len(X2)

    plt.figure(1)
    plt.subplot(211)
    plt.plot(xk, custm.pmf(xk), 'ro', ms=8, mec='r', label='theor. pmf')
    plt.vlines(xk, 0, custm.pmf(xk), colors='r', lw=5, alpha=0.2)
    plt.vlines(val1, 0, pmf_X1, colors='b', linestyles='-', lw=3, label='X1 empir. pmf')
    plt.legend(loc='best', frameon=False)
    plt.ylabel('Probability')
    plt.title('Theoretical dist. PMF vs Empirical dist. PMF')
    plt.subplot(212)
    plt.plot(xk, custm.pmf(xk), 'ro', ms=8, mec='r', label='theor. pmf')
    plt.vlines(xk, 0, custm.pmf(xk), colors='r', lw=5, alpha=0.2)
    plt.vlines(val2, 0, pmf_X2, colors='g', linestyles='-', lw=3, label='X2 empir. pmf')
    plt.legend(loc='best', frameon=False)
    plt.ylabel('Probability')
    plt.show() 
Example #20
Source File: test_distributions.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_expect(self):
        xk = [1, 2, 4, 6, 7, 11]
        pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]
        rv = stats.rv_discrete(values=(xk, pk))

        assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14) 
Example #21
Source File: gee_categorical_simulation_check.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def simulate(self):

        endog, exog, group, time = [], [], [], []

        for i in range(self.ngroups):

            gsize = np.random.randint(self.group_size_range[0],
                                      self.group_size_range[1])

            group.append([i,] * gsize)

            time1 = np.random.normal(size=(gsize,2))
            time.append(time1)

            exog1 = np.random.normal(size=(gsize, len(self.params[0])))
            exog.append(exog1)

            # Probabilities for each outcome
            prob = [np.exp(np.dot(exog1, p)) for p in self.params]
            prob = np.vstack(prob).T
            prob /= prob.sum(1)[:, None]

            m = len(self.params)
            endog1 = []
            for k in range(gsize):
                pdist = stats.rv_discrete(values=(lrange(m),
                                                  prob[k,:]))
                endog1.append(pdist.rvs())

            endog.append(np.asarray(endog1))

        self.exog = np.concatenate(exog, axis=0)
        self.endog = np.concatenate(endog).astype(np.int32)
        self.time = np.concatenate(time, axis=0)
        self.group = np.concatenate(group)
        self.offset = np.zeros(len(self.endog), dtype=np.float64) 
Example #22
Source File: test_distributions.py    From Computable with MIT License 5 votes vote down vote up
def test_rvs(self):
        states = [-1,0,1,2,3,4]
        probability = [0.0,0.3,0.4,0.0,0.3,0.0]
        samples = 1000
        r = stats.rv_discrete(name='sample',values=(states,probability))
        x = r.rvs(size=samples)
        assert_(isinstance(x, numpy.ndarray))

        for s,p in zip(states,probability):
            assert_(abs(sum(x == s)/float(samples) - p) < 0.05)

        x = r.rvs()
        assert_(isinstance(x, int)) 
Example #23
Source File: test_distributions.py    From Computable with MIT License 5 votes vote down vote up
def test_entropy(self):
        # Basic tests of entropy.
        pvals = np.array([0.25, 0.45, 0.3])
        p = stats.rv_discrete(values=([0, 1, 2], pvals))
        expected_h = -sum(xlogy(pvals, pvals))
        h = p.entropy()
        assert_allclose(h, expected_h)

        p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
        h = p.entropy()
        assert_equal(h, 0.0) 
Example #24
Source File: test_distributions.py    From Computable with MIT License 5 votes vote down vote up
def test_no_name_arg(self):
        # If name is not given, construction shouldn't fail.  See #1508.
        stats.rv_continuous()
        stats.rv_discrete() 
Example #25
Source File: test_distributions.py    From Computable with MIT License 5 votes vote down vote up
def test_docstrings():
    badones = [',\s*,', '\(\s*,', '^\s*:']
    for distname in stats.__all__:
        dist = getattr(stats, distname)
        if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
            for regex in badones:
                assert_( re.search(regex, dist.__doc__) is None) 
Example #26
Source File: discrete.py    From bhmm with GNU Lesser General Public License v3.0 5 votes vote down vote up
def generate_observation_from_state(self, state_index):
        """
        Generate a single synthetic observation data from a given state.

        Parameters
        ----------
        state_index : int
            Index of the state from which observations are to be generated.

        Returns
        -------
        observation : float
            A single observation from the given state.

        Examples
        --------

        Generate an observation model.

        >>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))

        Generate sample from each state.

        >>> observation = output_model.generate_observation_from_state(0)

        """
        # generate random generator (note that this is inefficient - better use one of the next functions
        import scipy.stats
        gen = scipy.stats.rv_discrete(values=(range(len(self._output_probabilities[state_index])), 
                                              self._output_probabilities[state_index]))
        gen.rvs(size=1) 
Example #27
Source File: discrete.py    From bhmm with GNU Lesser General Public License v3.0 5 votes vote down vote up
def generate_observations_from_state(self, state_index, nobs):
        """
        Generate synthetic observation data from a given state.

        Parameters
        ----------
        state_index : int
            Index of the state from which observations are to be generated.
        nobs : int
            The number of observations to generate.

        Returns
        -------
        observations : numpy.array of shape(nobs,) with type dtype
            A sample of `nobs` observations from the specified state.

        Examples
        --------

        Generate an observation model.

        >>> output_model = DiscreteOutputModel(np.array([[0.5,0.5],[0.1,0.9]]))

        Generate sample from each state.

        >>> observations = [output_model.generate_observations_from_state(state_index, nobs=100) for state_index in range(output_model.nstates)]

        """
        import scipy.stats
        gen = scipy.stats.rv_discrete(values=(range(self._nsymbols), self._output_probabilities[state_index]))
        gen.rvs(size=nobs) 
Example #28
Source File: test_distributions.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_rvs(self):
        states = [-1, 0, 1, 2, 3, 4]
        probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
        samples = 1000
        r = stats.rv_discrete(name='sample', values=(states, probability))
        x = r.rvs(size=samples)
        assert_(isinstance(x, numpy.ndarray))

        for s, p in zip(states, probability):
            assert_(abs(sum(x == s)/float(samples) - p) < 0.05)

        x = r.rvs()
        assert_(isinstance(x, int)) 
Example #29
Source File: test_distributions.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_entropy(self):
        # Basic tests of entropy.
        pvals = np.array([0.25, 0.45, 0.3])
        p = stats.rv_discrete(values=([0, 1, 2], pvals))
        expected_h = -sum(xlogy(pvals, pvals))
        h = p.entropy()
        assert_allclose(h, expected_h)

        p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
        h = p.entropy()
        assert_equal(h, 0.0) 
Example #30
Source File: test_distributions.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_pmf(self):
        xk = [1, 2, 4]
        pk = [0.5, 0.3, 0.2]
        rv = stats.rv_discrete(values=(xk, pk))

        x = [[1., 4.],
             [3., 2]]
        assert_allclose(rv.pmf(x),
                        [[0.5, 0.2],
                         [0., 0.3]], atol=1e-14)