Python scipy.special.binom() Examples

The following are 30 code examples of scipy.special.binom(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.special , or try the search function .
Example #1
Source File: test_sampler_inbatch.py    From catalyst with Apache License 2.0 6 votes vote down vote up
def check_all_triplets_number(
    labels: List[int], num_selected_tri: int, max_tri: int
) -> None:
    """
    Checks that the selection strategy for all triplets
    returns the correct number of triplets.

    Args:
        labels: list of classes labels
        num_selected_tri: number of selected triplets
        max_tri: limit on the number of selected triplets
    """
    labels_counts = Counter(labels).values()

    n_all_tri = 0
    for count in labels_counts:
        n_pos = binom(count, 2)
        n_neg = len(labels) - count
        n_all_tri += n_pos * n_neg

    assert num_selected_tri == n_all_tri or num_selected_tri == max_tri 
Example #2
Source File: rdp_accountant.py    From privacy with Apache License 2.0 6 votes vote down vote up
def _compute_log_a_int(q, sigma, alpha):
  """Compute log(A_alpha) for integer alpha. 0 < q < 1."""
  assert isinstance(alpha, six.integer_types)

  # Initialize with 0 in the log space.
  log_a = -np.inf

  for i in range(alpha + 1):
    log_coef_i = (
        math.log(special.binom(alpha, i)) + i * math.log(q) +
        (alpha - i) * math.log(1 - q))

    s = log_coef_i + (i * i - i) / (2 * (sigma**2))
    log_a = _log_add(log_a, s)

  return float(log_a) 
Example #3
Source File: network_comparison.py    From IDTxl with GNU General Public License v3.0 6 votes vote down vote up
def _check_n_subjects(self, data_set_a, data_set_b):
        """Check if no. subjects is sufficient request no. permutations."""
        if self.settings['stats_type'] == 'dependent':
            assert len(data_set_a) == len(data_set_b), (
                    'The number of data sets is not equal between conditions.')
            n_data_sets = len(data_set_a)
            if 2**n_data_sets < self.settings['n_perm_comp']:
                raise RuntimeError('The number of data sets per condition {0} '
                                   'is not sufficient to enable the '
                                   'requested no. permutations {1}'.format(
                                                n_data_sets,
                                                self.settings['n_perm_comp']))
        elif self.settings['stats_type'] == 'independent':
            max_len = max(len(data_set_a), len(data_set_b))
            total_len = len(data_set_a) + len(data_set_b)
            if binom(total_len, max_len) < self.settings['n_perm_comp']:
                raise RuntimeError('The total number of data sets {0} is not '
                                   'sufficient to enable the requested no. '
                                   'permutations {1}'.format(
                                                total_len,
                                                self.settings['n_perm_comp']))
        else:
            raise RuntimeError('Unknown ''stats_type''!')
        self.settings['n_subjects'] = [len(data_set_a), len(data_set_b)] 
Example #4
Source File: network_comparison.py    From IDTxl with GNU General Public License v3.0 6 votes vote down vote up
def _check_n_replications(self, data_a, data_b):
        """Check if no. replications is sufficient request no. permutations."""
        assert data_a.n_replications == data_b.n_replications, (
                            'Unequal no. replications in the two data sets.')
        n_replications = data_a.n_replications
        if self.settings['stats_type'] == 'dependent':
            if 2**n_replications < self.settings['n_perm_comp']:
                raise RuntimeError('The number of replications {0} in the data'
                                   ' is not sufficient to allow for the '
                                   'requested no. permutations {1}'.format(
                                            n_replications,
                                            self.settings['n_perm_comp']))
        elif self.settings['stats_type'] == 'independent':
            if (binom(2*n_replications, n_replications) <
                    self.settings['n_perm_comp']):
                raise RuntimeError('The number of replications {0} in the data'
                                   ' is not sufficient to allow for the '
                                   'requested no. permutations {1}'.format(
                                                n_replications,
                                                self.settings['n_perm_comp']))
        else:
            raise RuntimeError('Unknown ''stats_type''!') 
Example #5
Source File: _continuous_distns.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def _munp(self, n, beta, m):
        """
        Returns the n-th non-central moment of the crystalball function.
        """
        N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) + _norm_pdf_C * _norm_cdf(beta))

        def n_th_moment(n, beta, m):
            """
            Returns n-th moment. Defined only if n+1 < m
            Function cannot broadcast due to the loop over n
            """
            A = (m/beta)**m * np.exp(-beta**2 / 2.0)
            B = m/beta - beta
            rhs = 2**((n-1)/2.0) * sc.gamma((n+1)/2) * (1.0 + (-1)**n * sc.gammainc((n+1)/2, beta**2 / 2))
            lhs = np.zeros(rhs.shape)
            for k in range(n + 1):
                lhs += sc.binom(n, k) * B**(n-k) * (-1)**k / (m - k - 1) * (m/beta)**(-m + k + 1)
            return A * lhs + rhs

        return N * _lazywhere(np.atleast_1d(n + 1 < m),
                              (n, beta, m),
                              np.vectorize(n_th_moment, otypes=[np.float]),
                              np.inf) 
Example #6
Source File: rdp_accountant.py    From models with Apache License 2.0 6 votes vote down vote up
def _compute_log_a_int(q, sigma, alpha):
  """Compute log(A_alpha) for integer alpha. 0 < q < 1."""
  assert isinstance(alpha, (int, long))

  # Initialize with 0 in the log space.
  log_a = -np.inf

  for i in range(alpha + 1):
    log_coef_i = (
        math.log(special.binom(alpha, i)) + i * math.log(q) +
        (alpha - i) * math.log(1 - q))

    s = log_coef_i + (i * i - i) / (2 * (sigma**2))
    log_a = _log_add(log_a, s)

  return float(log_a) 
Example #7
Source File: test_matfuncs.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_pascal(self):
        # Test pascal triangle.
        # Nilpotent exponential, used to trigger a failure (gh-8029)

        for scale in [1.0, 1e-3, 1e-6]:
            for n in range(120):
                A = np.diag(np.arange(1, n + 1), -1) * scale
                B = expm(A)

                sc = scale**np.arange(n, -1, -1)
                if np.any(sc < 1e-300):
                    continue

                got = B
                expected = binom(np.arange(n + 1)[:,None],
                                 np.arange(n + 1)[None,:]) * sc[None,:] / sc[:,None]
                err = abs(expected - got).max()
                atol = 1e-13 * abs(expected).max()
                assert_allclose(got, expected, atol=atol) 
Example #8
Source File: privacy_analysis.py    From pytorch-dp with Apache License 2.0 6 votes vote down vote up
def _compute_log_a_int(q, sigma, alpha: int):
    """Compute log(A_alpha) for integer alpha. 0 < q < 1."""

    # Initialize with 0 in the log space.
    log_a = -np.inf

    for i in range(alpha + 1):
        log_coef_i = (
            math.log(special.binom(alpha, i))
            + i * math.log(q)
            + (alpha - i) * math.log(1 - q)
        )

        s = log_coef_i + (i * i - i) / (2 * (sigma ** 2))
        log_a = _log_add(log_a, s)

    return float(log_a) 
Example #9
Source File: _weyl_ordering.py    From OpenFermion with Apache License 2.0 6 votes vote down vote up
def mccoy(mode, op_a, op_b, m, n):
    """ Implement the McCoy formula on two operators of the
    form op_a^m op_b^n.

    Args:
        mode (int): the mode number the two operators act on.
        op_a: the label of operator a. This can be any hashable type.
        op_b: the label of operator b. This can be any hashable type.
        m (int): the power of operator a.
        n (int): the power of operator b.
    """
    new_op = dict()
    for r in range(0, n+1):
        coeff = binom(n, r)/(2**n)
        new_term = tuple([(mode, op_b)]*r + [(mode, op_a)]*m
                         + [(mode, op_b)]*(n-r))
        if new_term not in new_op:
            new_op[tuple(new_term)] = coeff
        else:
            new_op[tuple(new_term)] += coeff
    return new_op 
Example #10
Source File: test_abc_smc_algorithm.py    From pyABC with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_beta_binomial_two_identical_models(db_path, sampler):
    binomial_n = 5

    def model_fun(args):
        return {"result": st.binom(binomial_n, args.theta).rvs()}

    models = [model_fun for _ in range(2)]
    models = list(map(SimpleModel, models))
    population_size = ConstantPopulationSize(800)
    parameter_given_model_prior_distribution = [Distribution(theta=st.beta(
                                                                      1, 1))
                                                for _ in range(2)]
    abc = ABCSMC(models, parameter_given_model_prior_distribution,
                 MinMaxDistance(measures_to_use=["result"]),
                 population_size,
                 eps=MedianEpsilon(.1),
                 sampler=sampler)
    abc.new(db_path, {"result": 2})

    minimum_epsilon = .2
    history = abc.run(minimum_epsilon, max_nr_populations=3)
    mp = history.get_model_probabilities(history.max_t)
    assert abs(mp.p[0] - .5) + abs(mp.p[1] - .5) < .08 
Example #11
Source File: csp.py    From IV-2a with MIT License 6 votes vote down vote up
def csp_one_one(cov_matrix,NO_csp,NO_classes):
	'''	
	calculate spatial filter for class all pairs of classes 

	Keyword arguments:
	cov_matrix -- numpy array of size [NO_channels, NO_channels]
	NO_csp -- number of spatial filters (24)

	Return:	spatial filter numpy array of size [22,NO_csp] 
	'''
	N, _ = cov_matrix[0].shape 
	n_comb = binom(NO_classes,2)

	NO_filtpairs = int(NO_csp/(n_comb*2))
	
	w = np.zeros((N,NO_csp))
	
	kk = 0 # internal counter 
	for cc1 in range(0,NO_classes):
		for cc2 in range(cc1+1,NO_classes):
			w[:,NO_filtpairs*2*(kk):NO_filtpairs*2*(kk+1)] = gevd(cov_matrix[cc1], cov_matrix[cc2],NO_filtpairs)
			kk +=1		
	return w 
Example #12
Source File: autoregression.py    From pysteps with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _compute_differenced_model_params(phi, p, q, d):
    phi_out = []
    for i in range(p + d):
        if q > 1:
            if len(phi[0].shape) == 2:
                phi_out.append(np.zeros((q, q)))
            else:
                phi_out.append(np.zeros(phi[0].shape))
        else:
            phi_out.append(0.0)

    for i in range(1, d + 1):
        if q > 1:
            phi_out[i - 1] -= binom(d, i) * (-1) ** i * np.eye(q)
        else:
            phi_out[i - 1] -= binom(d, i) * (-1) ** i
    for i in range(1, p + 1):
        phi_out[i - 1] += phi[i - 1]
    for i in range(1, p + 1):
        for j in range(1, d + 1):
            phi_out[i + j - 1] += phi[i - 1] * binom(d, j) * (-1) ** j

    return phi_out 
Example #13
Source File: test_abc_smc_algorithm.py    From pyABC with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_beta_binomial_two_identical_models_adaptive(db_path, sampler):
    binomial_n = 5

    def model_fun(args):
        return {"result": st.binom(binomial_n, args.theta).rvs()}

    models = [model_fun for _ in range(2)]
    models = list(map(SimpleModel, models))
    population_size = AdaptivePopulationSize(800)
    parameter_given_model_prior_distribution = [
        Distribution(theta=st.beta(1, 1)) for _ in range(2)]
    abc = ABCSMC(models, parameter_given_model_prior_distribution,
                 MinMaxDistance(measures_to_use=["result"]),
                 population_size,
                 eps=MedianEpsilon(.1),
                 sampler=sampler)
    abc.new(db_path, {"result": 2})

    minimum_epsilon = .2
    history = abc.run(minimum_epsilon, max_nr_populations=3)
    mp = history.get_model_probabilities(history.max_t)
    assert abs(mp.p[0] - .5) + abs(mp.p[1] - .5) < .08 
Example #14
Source File: _continuous_distns.py    From lambda-packs with MIT License 6 votes vote down vote up
def _munp(self, n, beta, m):
        """
        Returns the n-th non-central moment of the crystalball function.
        """
        N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) + _norm_pdf_C * _norm_cdf(beta))

        def n_th_moment(n, beta, m):
            """
            Returns n-th moment. Defined only if n+1 < m
            Function cannot broadcast due to the loop over n
            """
            A = (m/beta)**m * np.exp(-beta**2 / 2.0)
            B = m/beta - beta
            rhs = 2**((n-1)/2.0) * sc.gamma((n+1)/2) * (1.0 + (-1)**n * sc.gammainc((n+1)/2, beta**2 / 2))
            lhs = np.zeros(rhs.shape)
            for k in range(n + 1):
                lhs += sc.binom(n, k) * B**(n-k) * (-1)**k / (m - k - 1) * (m/beta)**(-m + k + 1)
            return A * lhs + rhs

        return N * _lazywhere(np.atleast_1d(n + 1 < m),
                              (n, beta, m),
                              np.vectorize(n_th_moment, otypes=[np.float]),
                              np.inf) 
Example #15
Source File: privacy_analysis.py    From pytorch-dp with Apache License 2.0 5 votes vote down vote up
def _compute_log_a_frac(q, sigma, alpha):
    """Compute log(A_alpha) for fractional alpha. 0 < q < 1."""
    # The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are
    # initialized to 0 in the log space:
    log_a0, log_a1 = -np.inf, -np.inf
    i = 0

    z0 = sigma ** 2 * math.log(1 / q - 1) + 0.5

    while True:  # do ... until loop
        coef = special.binom(alpha, i)
        log_coef = math.log(abs(coef))
        j = alpha - i

        log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)
        log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)

        log_e0 = math.log(0.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))
        log_e1 = math.log(0.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))

        log_s0 = log_t0 + (i * i - i) / (2 * (sigma ** 2)) + log_e0
        log_s1 = log_t1 + (j * j - j) / (2 * (sigma ** 2)) + log_e1

        if coef > 0:
            log_a0 = _log_add(log_a0, log_s0)
            log_a1 = _log_add(log_a1, log_s1)
        else:
            log_a0 = _log_sub(log_a0, log_s0)
            log_a1 = _log_sub(log_a1, log_s1)

        i += 1
        if max(log_s0, log_s1) < -30:
            break

    return _log_add(log_a0, log_a1) 
Example #16
Source File: reference_direction.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def get_number_of_uniform_points(n_partitions, n_dim):
    """
    Returns the number of uniform points that can be created uniformly.
    """
    return int(special.binom(n_dim + n_partitions - 1, n_partitions)) 
Example #17
Source File: rdp_accountant.py    From models with Apache License 2.0 5 votes vote down vote up
def _compute_log_a_frac(q, sigma, alpha):
  """Compute log(A_alpha) for fractional alpha. 0 < q < 1."""
  # The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are
  # initialized to 0 in the log space:
  log_a0, log_a1 = -np.inf, -np.inf
  i = 0

  z0 = sigma**2 * math.log(1 / q - 1) + .5

  while True:  # do ... until loop
    coef = special.binom(alpha, i)
    log_coef = math.log(abs(coef))
    j = alpha - i

    log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)
    log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)

    log_e0 = math.log(.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))
    log_e1 = math.log(.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))

    log_s0 = log_t0 + (i * i - i) / (2 * (sigma**2)) + log_e0
    log_s1 = log_t1 + (j * j - j) / (2 * (sigma**2)) + log_e1

    if coef > 0:
      log_a0 = _log_add(log_a0, log_s0)
      log_a1 = _log_add(log_a1, log_s1)
    else:
      log_a0 = _log_sub(log_a0, log_s0)
      log_a1 = _log_sub(log_a1, log_s1)

    i += 1
    if max(log_s0, log_s1) < -30:
      break

  return _log_add(log_a0, log_a1) 
Example #18
Source File: test_abc_smc_algorithm.py    From pyABC with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_beta_binomial_different_priors(db_path, sampler):
    binomial_n = 5

    def model(args):
        return {"result": st.binom(binomial_n, args['theta']).rvs()}

    models = [model for _ in range(2)]
    models = list(map(SimpleModel, models))
    population_size = ConstantPopulationSize(800)
    a1, b1 = 1, 1
    a2, b2 = 10, 1
    parameter_given_model_prior_distribution = [Distribution(theta=RV("beta",
                                                                      a1, b1)),
                                                Distribution(theta=RV("beta",
                                                                      a2, b2))]
    abc = ABCSMC(models, parameter_given_model_prior_distribution,
                 MinMaxDistance(measures_to_use=["result"]),
                 population_size,
                 eps=MedianEpsilon(.1),
                 sampler=sampler)
    n1 = 2
    abc.new(db_path, {"result": n1})

    minimum_epsilon = .2
    history = abc.run(minimum_epsilon, max_nr_populations=3)
    mp = history.get_model_probabilities(history.max_t)

    def B(a, b):
        return gamma(a) * gamma(b) / gamma(a + b)

    def expected_p(a, b, n1):
        return binom(binomial_n, n1) * B(a + n1, b + binomial_n - n1) / B(a, b)

    p1_expected_unnormalized = expected_p(a1, b1, n1)
    p2_expected_unnormalized = expected_p(a2, b2, n1)
    p1_expected = p1_expected_unnormalized / (p1_expected_unnormalized +
                                              p2_expected_unnormalized)
    p2_expected = p2_expected_unnormalized / (p1_expected_unnormalized +
                                              p2_expected_unnormalized)
    assert abs(mp.p[0] - p1_expected) + abs(mp.p[1] - p2_expected) < .08 
Example #19
Source File: sindy_utils.py    From SindyAutoencoders with MIT License 5 votes vote down vote up
def library_size(n, poly_order, use_sine=False, include_constant=True):
    l = 0
    for k in range(poly_order+1):
        l += int(binom(n+k-1,k))
    if use_sine:
        l += n
    if not include_constant:
        l -= 1
    return l 
Example #20
Source File: curves.py    From dnn-mode-connectivity with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, num_bends):
        super(Bezier, self).__init__()
        self.register_buffer(
            'binom',
            torch.Tensor(binom(num_bends - 1, np.arange(num_bends), dtype=np.float32))
        )
        self.register_buffer('range', torch.arange(0, float(num_bends)))
        self.register_buffer('rev_range', torch.arange(float(num_bends - 1), -1, -1)) 
Example #21
Source File: motivation.py    From Hands-On-Ensemble-Learning-with-Python with MIT License 5 votes vote down vote up
def prob(size):
    err = 0.15
    half = int(np.ceil(size/2))
    s = 0
    for i in range(half, size):
        s += binom(size, i)*np.power(err,i)*np.power((1-err),(size-i))
    return s 
Example #22
Source File: theil_sen.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _check_subparams(self, n_samples, n_features):
        n_subsamples = self.n_subsamples

        if self.fit_intercept:
            n_dim = n_features + 1
        else:
            n_dim = n_features

        if n_subsamples is not None:
            if n_subsamples > n_samples:
                raise ValueError("Invalid parameter since n_subsamples > "
                                 "n_samples ({0} > {1}).".format(n_subsamples,
                                                                 n_samples))
            if n_samples >= n_features:
                if n_dim > n_subsamples:
                    plus_1 = "+1" if self.fit_intercept else ""
                    raise ValueError("Invalid parameter since n_features{0} "
                                     "> n_subsamples ({1} > {2})."
                                     "".format(plus_1, n_dim, n_samples))
            else:  # if n_samples < n_features
                if n_subsamples != n_samples:
                    raise ValueError("Invalid parameter since n_subsamples != "
                                     "n_samples ({0} != {1}) while n_samples "
                                     "< n_features.".format(n_subsamples,
                                                            n_samples))
        else:
            n_subsamples = min(n_dim, n_samples)

        if self.max_subpopulation <= 0:
            raise ValueError("Subpopulation must be strictly positive "
                             "({0} <= 0).".format(self.max_subpopulation))

        all_combinations = max(1, np.rint(binom(n_samples, n_subsamples)))
        n_subpopulation = int(min(self.max_subpopulation, all_combinations))

        return n_subsamples, n_subpopulation 
Example #23
Source File: theil_sen.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def _check_subparams(self, n_samples, n_features):
        n_subsamples = self.n_subsamples

        if self.fit_intercept:
            n_dim = n_features + 1
        else:
            n_dim = n_features

        if n_subsamples is not None:
            if n_subsamples > n_samples:
                raise ValueError("Invalid parameter since n_subsamples > "
                                 "n_samples ({0} > {1}).".format(n_subsamples,
                                                                 n_samples))
            if n_samples >= n_features:
                if n_dim > n_subsamples:
                    plus_1 = "+1" if self.fit_intercept else ""
                    raise ValueError("Invalid parameter since n_features{0} "
                                     "> n_subsamples ({1} > {2})."
                                     "".format(plus_1, n_dim, n_samples))
            else:  # if n_samples < n_features
                if n_subsamples != n_samples:
                    raise ValueError("Invalid parameter since n_subsamples != "
                                     "n_samples ({0} != {1}) while n_samples "
                                     "< n_features.".format(n_subsamples,
                                                            n_samples))
        else:
            n_subsamples = min(n_dim, n_samples)

        if self.max_subpopulation <= 0:
            raise ValueError("Subpopulation must be strictly positive "
                             "({0} <= 0).".format(self.max_subpopulation))

        all_combinations = max(1, np.rint(binom(n_samples, n_subsamples)))
        n_subpopulation = int(min(self.max_subpopulation, all_combinations))

        return n_subsamples, n_subpopulation 
Example #24
Source File: rdp_accountant.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def _compute_log_a_int(q, sigma, alpha):
  """Compute log(A_alpha) for integer alpha."""
  assert isinstance(alpha, (int, long))

  # The first and second terms of A_alpha in the log space:
  log_a1, log_a2 = -np.inf, -np.inf

  for i in range(alpha + 1):
    # Compute in the log space. Extra care needed for q = 0 or 1.
    log_coef_i = math.log(special.binom(alpha, i))
    if q > 0:
      log_coef_i += i * math.log(q)
    elif i > 0:
      continue  # The term is 0, skip the rest.

    if q < 1.0:
      log_coef_i += (alpha - i) * math.log(1 - q)
    elif i < alpha:
      continue  # The term is 0, skip the rest.

    s1 = log_coef_i + (i * i - i) / (2.0 * (sigma ** 2))
    s2 = log_coef_i + (i * i + i) / (2.0 * (sigma ** 2))
    log_a1 = _log_add(log_a1, s1)
    log_a2 = _log_add(log_a2, s2)

  log_a = _log_add(math.log(1 - q) + log_a1, math.log(q) + log_a2)
  if FLAGS.rdp_verbose:
    print("A: by binomial expansion    {} = {} + {}".format(
        _log_print(log_a),
        _log_print(math.log(1 - q) + log_a1), _log_print(math.log(q) + log_a2)))
  return float(log_a) 
Example #25
Source File: rdp_accountant.py    From privacy with Apache License 2.0 5 votes vote down vote up
def _compute_log_a_frac(q, sigma, alpha):
  """Compute log(A_alpha) for fractional alpha. 0 < q < 1."""
  # The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are
  # initialized to 0 in the log space:
  log_a0, log_a1 = -np.inf, -np.inf
  i = 0

  z0 = sigma**2 * math.log(1 / q - 1) + .5

  while True:  # do ... until loop
    coef = special.binom(alpha, i)
    log_coef = math.log(abs(coef))
    j = alpha - i

    log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)
    log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)

    log_e0 = math.log(.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))
    log_e1 = math.log(.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))

    log_s0 = log_t0 + (i * i - i) / (2 * (sigma**2)) + log_e0
    log_s1 = log_t1 + (j * j - j) / (2 * (sigma**2)) + log_e1

    if coef > 0:
      log_a0 = _log_add(log_a0, log_s0)
      log_a1 = _log_add(log_a1, log_s1)
    else:
      log_a0 = _log_sub(log_a0, log_s0)
      log_a1 = _log_sub(log_a1, log_s1)

    i += 1
    if max(log_s0, log_s1) < -30:
      break

  return _log_add(log_a0, log_a1) 
Example #26
Source File: curves.py    From dnn-mode-connectivity with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def forward(self, t):
        return self.binom * \
               torch.pow(t, self.range) * \
               torch.pow((1.0 - t), self.rev_range) 
Example #27
Source File: theil_sen.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def _check_subparams(self, n_samples, n_features):
        n_subsamples = self.n_subsamples

        if self.fit_intercept:
            n_dim = n_features + 1
        else:
            n_dim = n_features

        if n_subsamples is not None:
            if n_subsamples > n_samples:
                raise ValueError("Invalid parameter since n_subsamples > "
                                 "n_samples ({0} > {1}).".format(n_subsamples,
                                                                 n_samples))
            if n_samples >= n_features:
                if n_dim > n_subsamples:
                    plus_1 = "+1" if self.fit_intercept else ""
                    raise ValueError("Invalid parameter since n_features{0} "
                                     "> n_subsamples ({1} > {2})."
                                     "".format(plus_1, n_dim, n_samples))
            else:  # if n_samples < n_features
                if n_subsamples != n_samples:
                    raise ValueError("Invalid parameter since n_subsamples != "
                                     "n_samples ({0} != {1}) while n_samples "
                                     "< n_features.".format(n_subsamples,
                                                            n_samples))
        else:
            n_subsamples = min(n_dim, n_samples)

        if self.max_subpopulation <= 0:
            raise ValueError("Subpopulation must be strictly positive "
                             "({0} <= 0).".format(self.max_subpopulation))

        all_combinations = max(1, np.rint(binom(n_samples, n_subsamples)))
        n_subpopulation = int(min(self.max_subpopulation, all_combinations))

        return n_subsamples, n_subpopulation 
Example #28
Source File: das_dennis.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def number_of_points(self):
        return int(special.binom(self.n_dim + self.n_partitions - 1, self.n_partitions)) 
Example #29
Source File: boolean.py    From MKLpy with GNU General Public License v3.0 5 votes vote down vote up
def monotone_conjunctive_kernel(X,Z=None,c=2):
    L = linear_kernel(X,Z)
    return binom(L,c) 
Example #30
Source File: boolean.py    From MKLpy with GNU General Public License v3.0 5 votes vote down vote up
def monotone_disjunctive_kernel(X,Z=None,d=2):
    L = linear_kernel(X,Z)
    n = X.shape[1]

    XX = np.dot(X.sum(axis=1).reshape(X.shape[0],1), np.ones((1,Z.shape[0])))
    TT = np.dot(Z.sum(axis=1).reshape(Z.shape[0],1), np.ones((1,X.shape[0])))
    N_x = n - XX
    N_t = n - TT
    N_xz = N_x - TT.T + L

    N_d = binom(n, d)
    N_x = binom(N_x,d)
    N_t = binom(N_t,d)
    N_xz = binom(N_xz,d)
    return (N_d - N_x - N_t.T + N_xz)