# Python math.factorial() Examples

The following are code examples for showing how to use math.factorial(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
def call(self, func, *args, **kwargs):
"""
Alternative way to time a simple function call using condensed syntax.

Returns:
self (timerit.Timerit): Use min, or mean to get a scalar. Use
print to output a report to stdout.

Example:
>>> import math
>>> time = Timerit(num=10).call(math.factorial, 50).min()
>>> assert time > 0
"""
for timer in self:
with timer:
func(*args, **kwargs)
return self 
Example 2
def perm(n):
"""Permutations.
n = number of elements

Notation: P
n

All elements included: yes
Can elements repeat:   no
Order matters:         yes

See: Number of ways there are to rearrange n elements.

Practical example: amount of numbers with 3 distinct digits.
Let the elements be: 1, 2, 3:

123, 132, 213, 231, 312, 321
"""
return factorial(n) 
Example 3
def dpois(lmbda):
"""Poisson Distribution
lmbda = average number of successes per unit interval

Used to determine the probability of an amount of
successes occuring in a fixed interval (time, area…)

This doesn't return a value, but rather the specified Poisson function
"""
def p(k):
if 0 <= k:
return (exp(-lmbda) * lmbda**k) / factorial(k)
else:
return 0

# Allow accessing the used 'lmbda' value from the function
p.__dict__['lmbda'] = lmbda
p.__dict__['expected'] = lmbda
p.__dict__['variance'] = lmbda
return p 
Example 4
def test_ctm_distribution_d1(nsymbols):
"""Check normalization of CTM distributions."""
bdm = BDM(ndim=1, nsymbols=nsymbols)
total = 0
for dct in bdm._ctm.values():
for key, cmx in dct.items():
n = len(set(key))
mult = factorial(nsymbols) / factorial(nsymbols - n)
total += 2**-cmx * mult
assert total == approx(1, .01)

# TODO: this is not passing; investigate reasons # pylint: disable=fixme
# @pytest.mark.slow
# @pytest.mark.parametrize('nsymbols', [2])
# def test_ctm_distribution_d2(nsymbols):
#     total = 0
#     bdm = BDM(ndim=2, nsymbols=nsymbols)
#     for dct in bdm._ctm.values():
#         for key, cmx in dct.items():
#             n = len(set(key))
#             mult = factorial(nsymbols) / factorial(nsymbols - n)
#             total += 2**-cmx * mult
#     assert total == approx(1, .01) 
Example 5
def test_get_poly_cc_k(self):
cc = trajGen3D.get_poly_cc(4, 1, 1)
expected = [0, 1, 2, 3]
np.testing.assert_array_equal(cc, expected)

cc = trajGen3D.get_poly_cc(4, 2, 2)
expected = [0, 0, 2, 12]
np.testing.assert_array_equal(cc, expected)

cc = trajGen3D.get_poly_cc(8, 7, 1)
expected = [0, 0, 0, 0, 0, 0, 0, math.factorial(7)]
np.testing.assert_array_equal(cc, expected)

cc = trajGen3D.get_poly_cc(8, 8, 1)
expected = np.zeros(8)
np.testing.assert_array_equal(cc, expected) 
Example 6
def reset(self, label=None):
"""
clears all measurements, allowing the object to be reused

Args:
label (str, optional) : optionally change the label

Example:
>>> from timerit import Timerit
>>> import math
>>> ti = Timerit(num=10, unit='us', verbose=True)
>>> _ = ti.reset(label='10!').call(math.factorial, 10)
Timed best=...s, mean=...s for 10!
>>> _ = ti.reset(label='20!').call(math.factorial, 20)
Timed best=...s, mean=...s for 20!
>>> _ = ti.reset().call(math.factorial, 20)
Timed best=...s, mean=...s for 20!
"""
if label:
self.label = label
self.times = []
self.n_loops = None
self.total_time = None
return self 
Example 7
def mean(self):
"""
The mean of the best results of each trial.

Returns:
float: mean of measured seconds

Note:
This is typically less informative than simply looking at the min.
It is recommended to use min as the expectation value rather than
mean in most cases.

Example:
>>> import math
>>> self = Timerit(num=10, verbose=0)
>>> self.call(math.factorial, 50)
>>> assert self.mean() > 0
"""
chunk_iter = chunks(self.times, self.bestof)
times = list(map(min, chunk_iter))
mean = sum(times) / len(times)
return mean 
Example 8
def std(self):
"""
The standard deviation of the best results of each trial.

Returns:
float: standard deviation of measured seconds

Note:
As mentioned in the timeit source code, the standard deviation is
not often useful. Typically the minimum value is most informative.

Example:
>>> import math
>>> self = Timerit(num=10, verbose=1)
>>> self.call(math.factorial, 50)
>>> assert self.std() >= 0
"""
import math
chunk_iter = chunks(self.times, self.bestof)
times = list(map(min, chunk_iter))
mean = sum(times) / len(times)
std = math.sqrt(sum((t - mean) ** 2 for t in times) / len(times))
return std 
Example 9
def print(self, verbose=1):
"""
Prints human readable report using the print function

Args:
verbose (int): verbosity level

SeeAlso:
timerit.Timerit.report

Example:
>>> import math
>>> Timerit(num=10).call(math.factorial, 50).print(verbose=1)
Timed best=...s, mean=...s
>>> Timerit(num=10).call(math.factorial, 50).print(verbose=2)
Timed for: 10 loops, best of 3
time per loop: best=...s, mean=...s
>>> Timerit(num=10).call(math.factorial, 50).print(verbose=3)
Timed for: 10 loops, best of 3
body took: ...
time per loop: best=...s, mean=...s
"""
print(self.report(verbose=verbose)) 
Example 10
def binomial_coefficient(k, i):
""" Computes the binomial coefficient (denoted by *k choose i*).

Please see the following website for details: http://mathworld.wolfram.com/BinomialCoefficient.html

:param k: size of the set of distinct elements
:type k: int
:param i: size of the subsets
:type i: int
:return: combination of *k* and *i*
:rtype: float
"""
# Special case
if i > k:
return float(0)
# Compute binomial coefficient
k_fact = math.factorial(k)
i_fact = math.factorial(i)
k_i_fact = math.factorial(k - i)
return float(k_fact / (k_i_fact * i_fact)) 
Example 11
 Project: NiujiaoDebugger   Author: MrSrc   File: test_math.py    GNU General Public License v3.0 6 votes
def testFactorial(self):
self.assertEqual(math.factorial(0), 1)
self.assertEqual(math.factorial(0.0), 1)
total = 1
for i in range(1, 1000):
total *= i
self.assertEqual(math.factorial(i), total)
self.assertEqual(math.factorial(float(i)), total)
self.assertEqual(math.factorial(i), py_factorial(i))
self.assertRaises(ValueError, math.factorial, -1)
self.assertRaises(ValueError, math.factorial, -1.0)
self.assertRaises(ValueError, math.factorial, -10**100)
self.assertRaises(ValueError, math.factorial, -1e100)
self.assertRaises(ValueError, math.factorial, math.pi)

# Other implementations may place different upper bounds. 
Example 12
def Rdirect(n_max, y):
r, theta, phi = sloppy_spherical(y)
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
Pmn = scipy.special.lpmn(n_max, n_max, np.cos(theta))[0]
for i in range(n_max + 1):
for j in range(-i, i + 1):
if j < 0:
lp = (
((-1) ** (-j)) * (factorial(i + j) / factorial(i - j))
* Pmn[-j, i] / ((-1) ** -j)
)
else:
lp = Pmn[j, i] / ((-1) ** j)
factor = (r ** i) * lp / factorial(i + j)
real[i, n_max + j] = factor * np.cos(j * phi)
imag[i, n_max + j] = factor * np.sin(j * phi)
return real, imag 
Example 13
def Sdirect(n_max, y):
r, theta, phi = sloppy_spherical(y)
real = np.zeros((n_max + 1, 2 * n_max + 1))
imag = np.zeros((n_max + 1, 2 * n_max + 1))
Pmn = scipy.special.lpmn(n_max, n_max, np.cos(theta))[0]
for i in range(n_max + 1):
for j in range(-i, i + 1):
if j < 0:
lp = (
((-1) ** (-j)) * (factorial(i + j) / factorial(i - j))
* Pmn[-j, i] / ((-1) ** -j)
)
else:
lp = Pmn[j, i] / ((-1) ** j)
factor = factorial(i - j) * lp / (r ** (i + 1))
real[i, n_max + j] = factor * np.cos(j * phi)
imag[i, n_max + j] = factor * np.sin(j * phi)
return real, imag 
Example 14
def __init__(self, data_size, p=1, non_overlapping_folds=False, seed=None):
super(LeavePOut, self).__init__(data_size, shuffle=False)
self.data_size = data_size
self.p = p
self.non_overlapping_folds = non_overlapping_folds
if non_overlapping_folds:
self._test_combinations = NonOverlappingCombinations(
n=data_size, p=p, seed=seed)
self._total_combinations = \
self._test_combinations.remaining_length()
else:
self._test_combinations = combinations(range(data_size), p)
self._total_combinations = int(factorial(data_size) /
(factorial(data_size - p) * factorial(p)))
self._remaining_combinations = self._total_combinations
self.seed = seed 
Example 15
def reset(self, p=None, non_overlapping_folds=False, seed=None):
self.p = p if p is not None else self.p
self.non_overlapping_folds = non_overlapping_folds \
if non_overlapping_folds is not None else self.non_overlapping_folds
self.seed = seed if seed is not None else self.seed
if self.non_overlapping_folds:
self._test_combinations = NonOverlappingCombinations(
n=self.data_size, p=self.p, seed=self.seed)
self._total_combinations = \
self._test_combinations.remaining_length()
else:
self._test_combinations = \
combinations(range(self.data_size), self.p)
self._total_combinations = int(factorial(self.data_size) /
(factorial(self.data_size - self.p) * factorial(self.p)))
self._remaining_combinations = self._total_combinations 
Example 16
def __init__(self, labels, p=1, non_overlapping_folds=False, seed=None):
super(LeavePLabelsOut, self).__init__(len(labels), shuffle=False)
self.labels = np.asarray(labels)
self.unique_labels = np.unique(labels)
self.num_unique_labels = len(self.unique_labels)
self.p = p
self.non_overlapping_folds = non_overlapping_folds
self.seed = seed

if non_overlapping_folds:
self._test_combinations = NonOverlappingCombinations(
n=self.num_unique_labels, p=p, seed=seed)
self._total_combinations = \
self._test_combinations.remaining_length()
else:
self._test_combinations = \
combinations(range(self.num_unique_labels), p)
self._total_combinations = int(
factorial(self.num_unique_labels)
/ (factorial(self.num_unique_labels - p) * factorial(p)))
self._remaining_combinations = self._total_combinations 
Example 17
def reset(self, n=None, p=None, max_combinations=None, seed=None):
self._n = n if n is not None else self._n
self._p = p if p is not None else self._p
assert self._p <= self._n
if seed and seed != self._seed:
self._seed = seed
self._rng = np.random.RandomState(self._seed)
self._total_combinations = \
int(factorial(n) / (factorial(n - p) * factorial(p)))
self._remaining_combinations = self._total_combinations

max_combinations = max_combinations \
if max_combinations is not None else self._max_combinations
self._max_combinations = self._total_combinations \
if max_combinations is None \
else min(max_combinations, self._total_combinations)

self._counter = 0

# Generate the combinations in random order, and store only the first
# self._max_combinations.
self._combinations = list(itertools.combinations(range(n), p))
self._rng.shuffle(self._combinations)
self._combinations = self._combinations[:max_combinations] 
Example 18
 Project: nn_dataflow   Author: stanford-mast   File: test_util.py    BSD 3-Clause "New" or "Revised" License 6 votes
def test_perm(self):
''' Permutations. '''
fs_ord = set()
fs_unord = set()
for fs in util.factorize(512, 3):

cnt = 0
for fs in fs_unord:
if len(fs) == 3:
# Permutations.
cnt += math.factorial(3)
elif len(fs) == 2:
# Permutations of a, a, b.
cnt += 3
else:
# Pattern a, a, a.
cnt += 1
self.assertEqual(len(fs_ord), cnt) 
Example 19
def test_math_subclass(self):
"""verify subtypes of float/long work w/ math functions"""
import math
class myfloat(float): pass
class mylong(long): pass

mf = myfloat(1)
ml = mylong(1)

for x in math.log, math.log10, math.log1p, math.asinh, math.acosh, math.atanh, math.factorial, math.trunc, math.isinf:
try:
resf = x(mf)
except ValueError:
resf = None
try:
resl = x(ml)
except ValueError:
resl = None
self.assertEqual(resf, resl) 
Example 20
 Project: pyblish-win   Author: pyblish   File: test_math.py    GNU Lesser General Public License v3.0 5 votes
def testFactorial(self):
def fact(n):
result = 1
for i in range(1, int(n)+1):
result *= i
return result
values = range(10) + [50, 100, 500]
random.shuffle(values)
for x in values:
for cast in (int, long, float):
self.assertEqual(math.factorial(cast(x)), fact(x), (x, fact(x), math.factorial(x)))
self.assertRaises(ValueError, math.factorial, -1)
self.assertRaises(ValueError, math.factorial, math.pi) 
Example 21
def test_parts_coverage_d1(nsymbols, size):
"""Check the fraction of possible parts that have CTM values."""
bdm = BDM(ndim=1, nsymbols=nsymbols)
expected = sum(nsymbols**i for i in range(1, size+1))
total = 0
for dct in bdm._ctm.values():
for key, _ in dct.items():
n = len(set(key))
mult = factorial(nsymbols) / factorial(nsymbols - n)
total += mult
assert total / expected >= .25 
Example 22
def _cycle_parts(self, shape):
"""Cycle over all possible dataset parts sorted by complexity."""

def rep(part):
key, cmx = part
n = len(set(key))
k = factorial(self.nsymbols) / factorial(self.nsymbols - n)
return repeat((key, cmx), int(k))

parts = chain.from_iterable(map(rep, self._ctm[shape].items()))
return cycle(enumerate(parts)) 
Example 23
 Project: phoneticSimilarity   Author: ronggong   File: embedding_rnn_siamese_train.py    GNU Affero General Public License v3.0 5 votes
def nPr(n,r):
f = math.factorial
return f(n) / f(n-r) 
Example 24
 Project: phoneticSimilarity   Author: ronggong   File: embedding_rnn_siamese_train_teacher_student.py    GNU Affero General Public License v3.0 5 votes
def nPr(n,r):
f = math.factorial
return f(n) / f(n-r) 
Example 25
def test_completeness(n, m):
graph = BipartiteGraph(map(lambda x: (x, True), itertools.product(range(n), range(m))))
count = sum(1 for _ in enum_maximum_matchings_iter(graph))
expected_count = m > 0 and math.factorial(n) / math.factorial(n - m) or 0
assert count == expected_count 
Example 26
def factorial(_stack):
new_val = math.factorial(_stack[-1])
_stack[-1] = new_val
return _stack 
Example 27
 Project: pohmm-keystroke   Author: vmonaco   File: montecarlo.py    BSD 3-Clause "New" or "Revised" License 5 votes
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
"""
See http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay
"""
import numpy as np
from math import factorial

try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid') 
Example 28
def min(self):
"""
The best time overall.

This is typically the best metric to consider when evaluating the
execution time of a function. To understand why consider this quote
from the docs of the original timeit module:

'''
In a typical case, the lowest value gives a lower bound for how fast
your machine can run the given code snippet; higher values in the
result vector are typically not caused by variability in Python's
speed, but by other processes interfering with your timing accuracy.
So the min() of the result is probably the only number you should be
interested in.
'''

Returns:
float: minimum measured seconds over all trials

Example:
>>> import math
>>> self = Timerit(num=10, verbose=0)
>>> self.call(math.factorial, 50)
>>> assert self.min() > 0
"""
return min(self.times) 
Example 29
def func_permutations(a):
if is_atom(a):
return Atom(a.type, math.factorial(int(a.value)))
else:
return [list(p) for p in itertools.permutations(a)] 
Example 30
def func_binary_permutations(a, b):
if is_atom(b):
return Atom(a.type, math.factorial(int(b.value)) // math.factorial(int(b.value - a.value)))
else:
return [list(p) for p in itertools.permutations(b, int(a))] 
Example 31
def func_combinations(a, b):
x = a.value
if is_atom(b):
y = b.value
if y < x:
return Atom(a.type, 0)
z = math.factorial(int(y)) // math.factorial(int(x)) // math.factorial(int(y - x))
return Atom(a.type, z)
elif b:
x = x % len(b)
return [list(c) for c in itertools.combinations(b, x)]
else:
return [] 
Example 32
def poisson(self, expected, num):
ret = ((expected**num)/math.factorial(num))*math.exp(-expected)
return ret 
Example 33
 Project: NiujiaoDebugger   Author: MrSrc   File: test_math.py    GNU General Public License v3.0 5 votes
def py_factorial(n):
"""Factorial of nonnegative integer n, via "Binary Split Factorial Formula"
described at http://www.luschny.de/math/factorial/binarysplitfact.html

"""
inner = outer = 1
for i in reversed(range(n.bit_length())):
inner *= partial_product((n >> i + 1) + 1 | 1, (n >> i) + 1 | 1)
outer *= inner
return outer << (n - count_set_bits(n)) 
Example 34
 Project: NiujiaoDebugger   Author: MrSrc   File: test_math.py    GNU General Public License v3.0 5 votes
def testFactorialHugeInputs(self):
# Currently raises ValueError for inputs that are too large
# to fit into a C long.
self.assertRaises(OverflowError, math.factorial, 10**100)
self.assertRaises(OverflowError, math.factorial, 1e100) 
Example 35
 Project: NiujiaoDebugger   Author: MrSrc   File: test_random.py    GNU General Public License v3.0 5 votes
def test_shuffle(self):
shuffle = self.gen.shuffle
lst = []
shuffle(lst)
self.assertEqual(lst, [])
lst = [37]
shuffle(lst)
self.assertEqual(lst, [37])
seqs = [list(range(n)) for n in range(10)]
shuffled_seqs = [list(range(n)) for n in range(10)]
for shuffled_seq in shuffled_seqs:
shuffle(shuffled_seq)
for (seq, shuffled_seq) in zip(seqs, shuffled_seqs):
self.assertEqual(len(seq), len(shuffled_seq))
self.assertEqual(set(seq), set(shuffled_seq))
# The above tests all would pass if the shuffle was a
# no-op. The following non-deterministic test covers that.  It
# asserts that the shuffled sequence of 1000 distinct elements
# must be different from the original one. Although there is
# mathematically a non-zero probability that this could
# actually happen in a genuinely random shuffle, it is
# completely negligible, given that the number of possible
# permutations of 1000 objects is 1000! (factorial of 1000),
# which is considerably larger than the number of atoms in the
# universe...
lst = list(range(1000))
shuffled_lst = list(range(1000))
shuffle(shuffled_lst)
self.assertTrue(lst != shuffled_lst)
shuffle(lst)
self.assertTrue(lst != shuffled_lst)
self.assertRaises(TypeError, shuffle, (1, 2, 3)) 
Example 36
def _ell(A, m):
"""
A helper function for expm_2009.

Parameters
----------
A : linear operator
A linear operator whose norm of power we care about.
m : int
The power of the linear operator

Returns
-------
value : int
A value related to a bound.

"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')

# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
# They are coefficients of terms of a generating function series expansion.
choose_2m_m = scipy.special.comb(2*m, m, exact=True)
abs_c_recip = float(choose_2m_m * math.factorial(2*m + 1))

# This is explained after Eq. (1.2) of the 2009 expm paper.
# It is the "unit roundoff" of IEEE double precision arithmetic.
u = 2**-53

# Compute the one-norm of matrix power p of abs(A).
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2*m + 1)

# Treat zero norm as a special case.
if not A_abs_onenorm:
return 0

alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
log2_alpha_div_u = np.log2(alpha/u)
value = int(np.ceil(log2_alpha_div_u / (2 * m)))
return max(value, 0) 
Example 37
def calc_bk2(N, a):
bk = []
for k in range(N):
if k < a - 1:
bk.append(-1)
continue
bk.append(
((-1) ** (k + a) * factorial(k + a - 1) / (factorial(k - a + 1) * factorial(a - 1)))
* (digamma(k + a) + digamma(k - a + 2) - 2 * digamma(a))
)
return bk 
Example 38
def C(n,r):
if(n<r):
return 0 #not possible
else:
return fact(n) // (fact(r) * fact(n-r)) 
Example 39
 Project: representability   Author: rigetti   File: utils.py    BSD 3-Clause "New" or "Revised" License 5 votes
def wedge_product(tensor_a, tensor_b):
"""
Returns the antisymmetric tensor product between two tensor operators

Tensor operators have the same number of upper and lower indices

:param tensor_a: tensor of p-rank.
:param tensor_b: tensor of q-rank
:returns: tensor_a_w_b antisymmetric tensor of p + q rank
"""
# get the number of upper and lower indices
rank_a = int(len(tensor_a.shape)/2)
rank_b = int(len(tensor_b.shape)/2)

permutations = generate_parity_permutations(rank_a + rank_b)

# define new tensor product which is the appropriate size
new_tensor = np.zeros((tensor_a.shape[:rank_a] + tensor_b.shape[:rank_b] +
tensor_a.shape[rank_a:] + tensor_b.shape[rank_b:]), dtype=complex)

for indices in product(*list(map(lambda x: range(x), new_tensor.shape))):
idx_upper = np.array(indices[:rank_a + rank_b])
idx_lower = np.array(indices[rank_a + rank_b:])

# sum over all over permutations and load into element of tensor
for perm_u, parity_u in permutations:
for perm_l, parity_l in permutations:

# index permutation
a_idx_u = list(idx_upper[perm_u][:rank_a])
b_idx_u = list(idx_upper[perm_u][rank_a:])
a_idx_l = list(idx_lower[perm_l][:rank_a])
b_idx_l = list(idx_lower[perm_l][rank_a:])

parity_term = parity_u * parity_l
# sum into new_tensor
new_tensor[indices] += parity_term * (tensor_a[tuple(a_idx_u + a_idx_l)] * tensor_b[tuple(b_idx_u + b_idx_l)])

new_tensor /= factorial(rank_a + rank_b)**2
return new_tensor 
Example 40
 Project: representability   Author: rigetti   File: basis_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes
def antisymmetry_adapting(dim):
"""
Generate the unitary matrix that transforms the dim**6 to the n choose 3 x
n choose 3 matrix

We reverse the order of pp, qq, rr because this is acting on the annihilator
indices in the 3-RDM.  Each row is indexed by p^ q^ r^ and annihilator is
indexed by i j k from <p^ q^ r^ i j k> but rember we store
3D_{ijk}^{pqr} = <p^ q^ r^ k j i>
this means that if we iterate over i, j, k then the transform corresponds to
k-j-i index
:param dim:
:return: unitary
"""
t1_dim = int(comb(dim, 3))
basis_transform = np.zeros((dim ** 3, t1_dim))
normalization = 1 / np.sqrt(factorial(3))
# for idx in range(t1_dim):  # column index
idx = 0
for i, j, k in product(range(dim), repeat=3):  # row index in each column
if i < j < k:
for ii, jj, kk, parity in _three_parity(i, j, k):
basis_transform[ii * dim**2 + jj * dim + kk, idx] += parity * normalization
idx += 1

return basis_transform 
Example 41
def poisson_1mcdf(lamb, k, offset):
if k <= offset:
return 1.
else:
k = k - offset
"""P(n >= k)"""
sum = 1.
for i in range(1, k):
sum += lamb**i / math.factorial(i)
return 1 - np.exp(-lamb) * sum 
Example 42
def poisson(n, lam):
global poisson_cache
key = n * 10 + lam
if key not in poisson_cache.keys():
poisson_cache[key] = math.exp(-lam) * math.pow(lam, n) / math.factorial(n)
return poisson_cache[key] 
Example 43
def test_recursive1(self):
""" Permutations of list [A,B,C] correct """
A = ['A', 'B', 'C']
r = perm_recursive(A)
self.assertEqual(set(tuple([tuple(l) for l in r])), set(self.perm_ABC))
self.assertEqual(len(r), factorial(len(A))) 
Example 44
def test_recursive2(self):
""" Permutations of str "ABC" correct """
A = "ABC"
r = perm_recursive(A)
self.assertEqual(set(tuple([tuple(l) for l in r])), set(self.perm_ABC))
self.assertEqual(len(r), factorial(len(A))) 
Example 45
def test_generator1(self):
""" Generated permutations of list [A,B,C] correct """
A = ['A', 'B', 'C']
r = list(perm_generator(A))
self.assertEqual(set(tuple([tuple(l) for l in r])), set(self.perm_ABC))
self.assertEqual(len(r), factorial(len(A))) 
Example 46
def test_generator2(self):
""" Generated Permutations of str "ABC" correct """
A = "BAC"
r = list(perm_generator(A))
self.assertEqual(set(tuple([tuple(l) for l in r])), set(self.perm_ABC))
self.assertEqual(len(r), factorial(len(A))) 
Example 47
def get_permutation(n, k):
numbers = [x for x in range(1, n + 1)]
permutation = ''
k -= 1
while n:
n -= 1
index = k // math.factorial(n)
k %= math.factorial(n)
permutation += str(numbers[index])
numbers.remove(numbers[index])

return permutation 
Example 48
def __init__(self,
batchsize,
hidden_dim:int,
num_node_type: int,
num_edge_type: int,
max_num_node: int=15,
num_pair_type: int=None,
celltype="GRU"):
super(SGGM, self).__init__()
torch.manual_seed(1)

self.batchsize = batchsize
self.hidden_dim = hidden_dim
self.num_node_type = num_node_type        # NN
self.num_edge_type = num_edge_type        # NE
self.max_num_node = max_num_node

if num_pair_type == None:
self.num_pair_type = num_pair_type
else:
# calculate permutation
self.num_pair_type = math.factorial(num_node_type) \
// math.factorial(num_node_type - 2) \
* num_edge_type

if num_node_type == 1:
else:
self.addnode_layer = nn.Linear(num_node_type, num_node_type + 1)

self.initilize_h_block = AggregationBlock(hidden_dim, hidden_dim * 2)

# self.reverse_message_layer = nn.Linear(3*hidden_dim, 2*3*hidden_dim)
# Not use reverse layer. This is because I don't support parsing task with this model. 
Example 49
 Project: cxroots   Author: rparini   File: Derivative.py    BSD 3-Clause "New" or "Revised" License 5 votes
def CxDerivative(f, z0, n=1, contour=None, absIntegrationTol=1e-10, verbose=False):
r"""
Compute the derivaive of an analytic function using Cauchy's
Integral Formula for Derivatives.

.. math::

f^{(n)}(z_0) = \frac{n!}{2\pi i} \oint_C \frac{f(z)}{(z-z_0)^{n+1}} dz

Parameters
----------
f : function
Function of a single variable f(x).
z0 : complex
Point to evaluate the derivative at.
n : int
The order of the derivative to evaluate.
contour : :class:Contour <cxroots.Contour.Contour>, optional
The contour, C, in the complex plane which encloses the point z0.
By default the contour is the circle |z-z_0|=1e-3.
absIntegrationTol : float, optional
The absolute tolerance required of the integration routine.
verbose : bool, optional
If True runtime information will be printed.  False be default.

Returns
-------
f^{(n)}(z0) : complex
The nth derivative of f evaluated at z0
"""
if contour is None:
from .contours.Circle import Circle
C = lambda z0: Circle(z0, 1e-3)
else:
C = lambda z0: contour

integrand = lambda z: f(z)/(z-z0)**(n+1)
integral = C(z0).integrate(integrand, absTol=absIntegrationTol, verbose=verbose)
return integral * math.factorial(n)/(2j*pi) 
Example 50
def generate_permutation_set(length):
'''
This function generate the set of maximum Hamming distance permutation.

The set has size 100.

Returns:
---------
perm: set with 100 permutations that maximize Hamming distance.
'''
perm = []
total = math.factorial(9)

#p = list(itertools.permutations(range(9)))
p = []
for i in itertools.permutations(range(9)):
p.append(i)
print(i)
print('Finished generating entire set with size {s}'.format(s=len(p)))
p0 = random.randint(0,total-1)

perm.append(p.pop(p0))

for i in range(length-1):
next_index,_ = get_max_hamming_distance_index(p, perm)
perm.append(p.pop(next_index))

asset_dir = "../"
store_location = os.path.join( asset_dir, 'jigsaw_max_hamming_set.npy')

with open(store_location, 'wb') as store:
np.save(store, perm)

return perm 
Example 51
def combos(n, k=2):
"""Compute n choose k"""
return int(factorial(n) / (factorial(k) * factorial(n - k))) 
Example 52
def combination(n,r):
return factorial(n)/factorial(r)/factorial(n-r) 
Example 53
def func_factorial(a):
""" functor for a bounded factorial """
if a > 170:
raise Exception('Factorial limited to N <= 170')
return math.factorial(a) 
Example 54
def func_double_factorial(a):
""" functor for a bounded double factorial """
if a > 288:
raise Exception('Double factorial limited to N <= 288')
return reduce(operator.mul, range(a, 0, -2), 1L) 
Example 55
 Project: programming-problems   Author: jsgoller1   File: binomial_coefficients.py    GNU General Public License v3.0 5 votes
def binomial_coefficient_direct(n, k):
return int(math.factorial(n)/(math.factorial(k)*math.factorial(n-k))) 
Example 56
def reduce_var_square(var_square, avg_diff, count, op, axis, sum_func):
moment = op.moment
dtype = op.dtype
kw = dict(axis=axis, dtype=dtype, keepdims=bool(op.keepdims))

reduced_var_square = var_square[..., moment - 2].sum(**kw) + \
sum_func(count * avg_diff ** moment, **kw)
for i in range(1, moment - 1):
coeff = factorial(moment) / float(factorial(i) * factorial(moment - i))
reduced_var_square += coeff * sum_func(var_square[..., moment - i - 2] * avg_diff ** moment, **kw)
return reduced_var_square 
Example 57
 Project: PySail-426   Author: j3b4   File: ModuloBarca01.py    GNU General Public License v3.0 5 votes
def cfbinomiale(n,i):
return math.factorial(n)/(math.factorial(n-i)*math.factorial(i)) 
Example 58
 Project: PySail-426   Author: j3b4   File: ModuloBarca02.py    GNU General Public License v3.0 5 votes
def cfbinomiale(n,i):
return math.factorial(n)/(math.factorial(n-i)*math.factorial(i)) 
Example 59
 Project: PySail-426   Author: j3b4   File: ModuloBarca02.py    GNU General Public License v3.0 5 votes
def cfbinomiale(n,i):
return math.factorial(n)/(math.factorial(n-i)*math.factorial(i)) 
Example 60
def binomial(n, k):
try:
res = math.factorial(n) // math.factorial(k) // math.factorial(n - k)
except ValueError:
res = 0
return res 
Example 61
def reset(self, p=None):
if p is not None:
self.p = p
if self.non_overlapping_folds:
self._test_combinations = NonOverlappingCombinations(
n=self.num_unique_labels, p=p, seed=self.seed)
self._total_combinations = \
self._test_combinations.remaining_length()
else:
self._test_combinations = \
combinations(range(self.num_unique_labels), p)
self._total_combinations = int(
factorial(self.num_unique_labels)
/ (factorial(self.num_unique_labels - p) * factorial(p)))
self._remaining_combinations = self._total_combinations 
Example 62
def __init__(self, n, p, max_combinations=None, seed=None):
assert p <= n
# The total number of elements.
self._n = n
# The size of the groups.
self._p = p
# Initialize the random number generator.
self._seed = seed
if seed is not None:
self._rng = np.random.RandomState(seed)
else:
self._rng = np.random.RandomState()
self._combinations = itertools.combinations(range(n), p)

# Keep track of the number of remaining combinations.
self._total_combinations =  \
int(factorial(n) / (factorial(n - p) * factorial(p)))

# Keep track of the remaining combinations of the self._combinations
# object.
self._remaining_combinations = self._total_combinations

# Set the max number of combinations to generate, after which we return
# StopIteration()
self._max_combinations = self._total_combinations \
if max_combinations is None \
else min(max_combinations, self._total_combinations)

# Keep track of the number of combinations that still need to be generated
# up to max_combinations.
self._remaining_from_max = self._max_combinations

# Set the rejection probability for rejection sampling.
self._accept_probability = \
self._max_combinations / self._total_combinations 
Example 63
def __init__(self, n, p, max_combinations=None, seed=None):
assert p <= n
# The total number of elements.
self._n = n
# The size of the groups.
self._p = p
# Initialize the random number generator.
self._seed = seed
if seed is not None:
self._rng = np.random.RandomState(seed)
else:
self._rng = np.random.RandomState()

# Keep track of the number of remaining combinations.
self._total_combinations = \
int(factorial(n) / (factorial(n - p) * factorial(p)))

# Set the max number of combinations to generate, after which we return
# StopIteration()
self._max_combinations = self._total_combinations \
if max_combinations is None \
else min(max_combinations, self._total_combinations)

# Keep track of the remaining combinations.
self._counter = 0

# Generate the combinations in random order, and store only the first
# self._max_combinations.
self._combinations = list(itertools.combinations(range(n), p))
self._rng.shuffle(self._combinations)
self._combinations = self._combinations[:max_combinations] 
Example 64
def testFactorial(self):
def fact(n):
result = 1
for i in range(1, int(n)+1):
result *= i
return result
values = range(10) + [50, 100, 500]
random.shuffle(values)
for x in values:
for cast in (int, long, float):
self.assertEqual(math.factorial(cast(x)), fact(x), (x, fact(x), math.factorial(x)))
self.assertRaises(ValueError, math.factorial, -1)
self.assertRaises(ValueError, math.factorial, math.pi) 
Example 65
def test_gamma(self):
self.assertAlmostEqual(math.gamma(0.5), math.sqrt(math.pi), places=15)
for i in xrange(1, 20):
self.assertEqual(math.factorial(i-1), math.gamma(i))
self.assertEqual(math.gamma(float('inf')), float('inf'))
self.assertRaises(ValueError, math.gamma, float('-inf'))
self.assertTrue(math.isnan(math.gamma(float('nan'))))
for i in xrange(0, -1001, -1):
self.assertRaises(ValueError, math.gamma, i) 
Example 66
def test_lgamma(self):
tolerance = 14
self.assertAlmostEqual(math.lgamma(0.5), 0.5 * math.log(math.pi), places=15)
for i in xrange(1, 20):
if i > 14:
tolerance = 13
self.assertAlmostEqual(math.log(math.factorial(i-1)), math.lgamma(i), places=tolerance)
self.assertEqual(math.lgamma(float('inf')), float('inf'))
self.assertEqual(math.lgamma(float('-inf')), float('inf'))
self.assertTrue(math.isnan(math.lgamma(float('nan'))))
for i in xrange(0, -1001, -1):
self.assertRaises(ValueError, math.lgamma, i) 
Example 67
def _binomial(n, k):
return factorial(n) // (factorial(k) * factorial(n - k)) 
Example 68
def findsum(n):
smallsum = 0
while(n>0):
smallsum = smallsum + math.factorial(n%10)
n = n//10
return(smallsum) 
Example 69
def test_leaf_commutativity(self):
clone = (Extent.empty()
.write(offset=0, length=2)
.write(offset=3, length=1))
self.assertEqual('d2h1d1', repr(clone))
# If we treat as identical holes of different provenance but the
# same length, these operations should commute since they write data
# to nonoverlapping regions.
ops = [
lambda e: e.write(offset=3, length=5),
lambda e: e.truncate(length=25),
lambda e: e.write(offset=1, length=1),
lambda e: e.write(offset=17, length=2),
lambda e: e.clone(
to_offset=10,
from_extent=clone, from_offset=0, length=clone.length,
),
]

def compose_ops(ops_it):
return functools.reduce(
lambda extent, op: op(extent), ops_it, Extent.empty(),
)

# All permutations make distinct nestings with the same leaf structure
all_extents = {compose_ops(p) for p in itertools.permutations(ops)}
self.assertEqual(math.factorial(len(ops)), len(all_extents))
self.assertEqual(
{(
1,
(0, 1, Extent(content=Extent.Kind.DATA, offset=0, length=1)),
1,
(0, 5, Extent(content=Extent.Kind.DATA, offset=0, length=5)),
2,
(0, 2, Extent(content=Extent.Kind.DATA, offset=0, length=2)),
1,
(0, 1, Extent(content=Extent.Kind.DATA, offset=0, length=1)),
3,
(0, 2, Extent(content=Extent.Kind.DATA, offset=0, length=2)),
6,
)},
{
tuple(
# Different permutations produce the same-length holes
# differently, so let's only compare lengths.
l if se.content == Extent.Kind.HOLE else (o, l, se)
for o, l, se in e.gen_trimmed_leaves()
) for e in all_extents
},
)

# test_write_and_clone covers leaf identity, but it's still nice to
# explicitly check that the whole nested object is cloned. 
Example 70
 Project: razzy-spinner   Author: rafasashi   File: ibm3.py    GNU General Public License v3.0 4 votes
def prob_t_a_given_s(self, alignment_info):
"""
Probability of target sentence and an alignment given the
source sentence
"""
src_sentence = alignment_info.src_sentence
trg_sentence = alignment_info.trg_sentence
l = len(src_sentence) - 1  # exclude NULL
m = len(trg_sentence) - 1
p1 = self.p1
p0 = 1 - p1

probability = 1.0
MIN_PROB = IBMModel.MIN_PROB

# Combine NULL insertion probability
null_fertility = alignment_info.fertility_of_i(0)
probability *= (pow(p1, null_fertility) *
pow(p0, m - 2 * null_fertility))
if probability < MIN_PROB:
return MIN_PROB

# Compute combination (m - null_fertility) choose null_fertility
for i in range(1, null_fertility + 1):
probability *= (m - null_fertility - i + 1) / i
if probability < MIN_PROB:
return MIN_PROB

# Combine fertility probabilities
for i in range(1, l + 1):
fertility = alignment_info.fertility_of_i(i)
probability *= (factorial(fertility) *
self.fertility_table[fertility][src_sentence[i]])
if probability < MIN_PROB:
return MIN_PROB

# Combine lexical and distortion probabilities
for j in range(1, m + 1):
t = trg_sentence[j]
i = alignment_info.alignment[j]
s = src_sentence[i]

probability *= (self.translation_table[t][s] *
self.distortion_table[j][i][l][m])
if probability < MIN_PROB:
return MIN_PROB

return probability 
Example 71
def prob_t_a_given_s(self, alignment_info):
"""
Probability of target sentence and an alignment given the
source sentence
"""
src_sentence = alignment_info.src_sentence
trg_sentence = alignment_info.trg_sentence
l = len(src_sentence) - 1  # exclude NULL
m = len(trg_sentence) - 1
p1 = self.p1
p0 = 1 - p1

probability = 1.0
MIN_PROB = IBMModel.MIN_PROB

# Combine NULL insertion probability
null_fertility = alignment_info.fertility_of_i(0)
probability *= (pow(p1, null_fertility) *
pow(p0, m - 2 * null_fertility))
if probability < MIN_PROB:
return MIN_PROB

# Compute combination (m - null_fertility) choose null_fertility
for i in range(1, null_fertility + 1):
probability *= (m - null_fertility - i + 1) / i
if probability < MIN_PROB:
return MIN_PROB

# Combine fertility probabilities
for i in range(1, l + 1):
fertility = alignment_info.fertility_of_i(i)
probability *= (factorial(fertility) *
self.fertility_table[fertility][src_sentence[i]])
if probability < MIN_PROB:
return MIN_PROB

# Combine lexical and distortion probabilities
for j in range(1, m + 1):
t = trg_sentence[j]
i = alignment_info.alignment[j]
s = src_sentence[i]

probability *= (self.translation_table[t][s] *
self.distortion_table[j][i][l][m])
if probability < MIN_PROB:
return MIN_PROB

return probability 
Example 72
def prob_t_a_given_s(self, alignment_info):
"""
Probability of target sentence and an alignment given the
source sentence
"""
src_sentence = alignment_info.src_sentence
trg_sentence = alignment_info.trg_sentence
l = len(src_sentence) - 1  # exclude NULL
m = len(trg_sentence) - 1
p1 = self.p1
p0 = 1 - p1

probability = 1.0
MIN_PROB = IBMModel.MIN_PROB

# Combine NULL insertion probability
null_fertility = alignment_info.fertility_of_i(0)
probability *= (pow(p1, null_fertility) *
pow(p0, m - 2 * null_fertility))
if probability < MIN_PROB:
return MIN_PROB

# Compute combination (m - null_fertility) choose null_fertility
for i in range(1, null_fertility + 1):
probability *= (m - null_fertility - i + 1) / i
if probability < MIN_PROB:
return MIN_PROB

# Combine fertility probabilities
for i in range(1, l + 1):
fertility = alignment_info.fertility_of_i(i)
probability *= (factorial(fertility) *
self.fertility_table[fertility][src_sentence[i]])
if probability < MIN_PROB:
return MIN_PROB

# Combine lexical and distortion probabilities
for j in range(1, m + 1):
t = trg_sentence[j]
i = alignment_info.alignment[j]
s = src_sentence[i]

probability *= (self.translation_table[t][s] *
self.distortion_table[j][i][l][m])
if probability < MIN_PROB:
return MIN_PROB

return probability 
Example 73
def _ell(A, m):
"""
A helper function for expm_2009.

Parameters
----------
A : linear operator
A linear operator whose norm of power we care about.
m : int
The power of the linear operator

Returns
-------
value : int
A value related to a bound.

"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')

p = 2*m + 1

# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
# They are coefficients of terms of a generating function series expansion.
choose_2p_p = scipy.special.comb(2*p, p, exact=True)
abs_c_recip = float(choose_2p_p * math.factorial(2*p + 1))

# This is explained after Eq. (1.2) of the 2009 expm paper.
# It is the "unit roundoff" of IEEE double precision arithmetic.
u = 2**-53

# Compute the one-norm of matrix power p of abs(A).
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), p)

# Treat zero norm as a special case.
if not A_abs_onenorm:
return 0

alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
log2_alpha_div_u = np.log2(alpha/u)
value = int(np.ceil(log2_alpha_div_u / (2 * m)))
return max(value, 0) 
Example 74
def test_factorial(self):
# Some known values, float math
assert_array_almost_equal(special.factorial(0), 1)
assert_array_almost_equal(special.factorial(1), 1)
assert_array_almost_equal(special.factorial(2), 2)
assert_array_almost_equal([6., 24., 120.],
special.factorial([3, 4, 5], exact=False))
assert_array_almost_equal(special.factorial([[5, 3], [4, 3]]),
[[120, 6], [24, 6]])

# Some known values, integer math
assert_equal(special.factorial(0, exact=True), 1)
assert_equal(special.factorial(1, exact=True), 1)
assert_equal(special.factorial(2, exact=True), 2)
assert_equal(special.factorial(5, exact=True), 120)
assert_equal(special.factorial(15, exact=True), 1307674368000)

# ndarray shape is maintained
assert_equal(special.factorial([7, 4, 15, 10], exact=True),
[5040, 24, 1307674368000, 3628800])

assert_equal(special.factorial([[5, 3], [4, 3]], True),
[[120, 6], [24, 6]])

# object arrays
assert_equal(special.factorial(np.arange(-3, 22), True),
special.factorial(np.arange(-3, 22), False))

# int64 array
assert_equal(special.factorial(np.arange(-3, 15), True),
special.factorial(np.arange(-3, 15), False))

# int32 array
assert_equal(special.factorial(np.arange(-3, 5), True),
special.factorial(np.arange(-3, 5), False))

# Consistent output for n < 0
for exact in (True, False):
assert_array_equal(0, special.factorial(-3, exact))
assert_array_equal([1, 2, 0, 0],
special.factorial([1, 2, -5, -4], exact))

for n in range(0, 22):
# Compare all with math.factorial
correct = math.factorial(n)
assert_array_equal(correct, special.factorial(n, True))
assert_array_equal(correct, special.factorial([n], True)[0])

assert_allclose(float(correct), special.factorial(n, False))
assert_allclose(float(correct), special.factorial([n], False)[0])

# Compare exact=True vs False, scalar vs array
assert_array_equal(special.factorial(n, True),
special.factorial(n, False))

assert_array_equal(special.factorial([n], True),
special.factorial([n], False)) 
Example 75
def factorial2(n, exact=False):
"""Double factorial.

This is the factorial with every second value skipped.  E.g., 7!! = 7 * 5
* 3 * 1.  It can be approximated numerically as::

n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi)  n odd
= 2**(n/2) * (n/2)!                           n even

Parameters
----------
n : int or array_like
Calculate n!!.  Arrays are only supported with exact set
to False.  If n < 0, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default).  If exact is set to True, calculate the

Returns
-------
nff : float or int
Double factorial of n, as an int or a float depending on
exact.

Examples
--------
>>> from scipy.special import factorial2
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L

"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in xrange(n, 0, -2):
val *= k
return val
else:
n = asarray(n)
vals = zeros(n.shape, 'd')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1, n)
evenn = extract(cond2, n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5))
place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e))
return vals 
Example 76
def factorialk(n, k, exact=True):
"""Multifactorial of n of order k, n(!!...!).

This is the multifactorial of n skipping k values.  For example,

factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1

In particular, for any integer n, we have

factorialk(n, 1) = factorial(n)

factorialk(n, 2) = factorial2(n)

Parameters
----------
n : int
Calculate multifactorial. If n < 0, the return value is 0.
k : int
Order of multifactorial.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.

Returns
-------
val : int
Multifactorial of n.

Raises
------
NotImplementedError
Raises when exact is False

Examples
--------
>>> from scipy.special import factorialk
>>> factorialk(5, 1, exact=True)
120L
>>> factorialk(5, 3, exact=True)
10L

"""
if exact:
if n < 1-k:
return 0
if n <= 0:
return 1
val = 1
for j in xrange(n, 0, -k):
val = val*j
return val
else:
raise NotImplementedError 
Example 77
 Project: NiujiaoDebugger   Author: MrSrc   File: test_math.py    GNU General Public License v3.0 4 votes
def ulp(x):
"""Return the value of the least significant bit of a
float x, such that the first float bigger than x is x+ulp(x).
Then, given an expected result x and a tolerance of n ulps,
the result y should be such that abs(y-x) <= n * ulp(x).
The results from this function will only make sense on platforms
where native doubles are represented in IEEE 754 binary64 format.
"""
x = abs(float(x))
if math.isnan(x) or math.isinf(x):
return x

# Find next float up from x.
n = struct.unpack('<q', struct.pack('<d', x))[0]
x_next = struct.unpack('<d', struct.pack('<q', n + 1))[0]
if math.isinf(x_next):
# Corner case: x was the largest finite float. Then it's
# not an exact power of two, so we can take the difference
# between x and the previous float.
x_prev = struct.unpack('<d', struct.pack('<q', n - 1))[0]
return x - x_prev
else:
return x_next - x

# Here's a pure Python version of the math.factorial algorithm, for
# documentation and comparison purposes.
#
# Formula:
#
#   factorial(n) = factorial_odd_part(n) << (n - count_set_bits(n))
#
# where
#
#   factorial_odd_part(n) = product_{i >= 0} product_{0 < j <= n >> i; j odd} j
#
# The outer product above is an infinite product, but once i >= n.bit_length,
# (n >> i) < 1 and the corresponding term of the product is empty.  So only the
# finitely many terms for 0 <= i < n.bit_length() contribute anything.
#
# We iterate downwards from i == n.bit_length() - 1 to i == 0.  The inner
# product in the formula above starts at 1 for i == n.bit_length(); for each i
# < n.bit_length() we get the inner product for i from that for i + 1 by
# multiplying by all j in {n >> i+1 < j <= n >> i; j odd}.  In Python terms,
# this set is range((n >> i+1) + 1 | 1, (n >> i) + 1 | 1, 2). 
Example 78
def prob_t_a_given_s(self, alignment_info):
"""
Probability of target sentence and an alignment given the
source sentence
"""
src_sentence = alignment_info.src_sentence
trg_sentence = alignment_info.trg_sentence
l = len(src_sentence) - 1  # exclude NULL
m = len(trg_sentence) - 1
p1 = self.p1
p0 = 1 - p1

probability = 1.0
MIN_PROB = IBMModel.MIN_PROB

# Combine NULL insertion probability
null_fertility = alignment_info.fertility_of_i(0)
probability *= (pow(p1, null_fertility) *
pow(p0, m - 2 * null_fertility))
if probability < MIN_PROB:
return MIN_PROB

# Compute combination (m - null_fertility) choose null_fertility
for i in range(1, null_fertility + 1):
probability *= (m - null_fertility - i + 1) / i
if probability < MIN_PROB:
return MIN_PROB

# Combine fertility probabilities
for i in range(1, l + 1):
fertility = alignment_info.fertility_of_i(i)
probability *= (factorial(fertility) *
self.fertility_table[fertility][src_sentence[i]])
if probability < MIN_PROB:
return MIN_PROB

# Combine lexical and distortion probabilities
for j in range(1, m + 1):
t = trg_sentence[j]
i = alignment_info.alignment[j]
s = src_sentence[i]

probability *= (self.translation_table[t][s] *
self.distortion_table[j][i][l][m])
if probability < MIN_PROB:
return MIN_PROB

return probability 
Example 79
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
"""
Smoothes input signal with a Savitzky-Golay function.

Uses a kernel of size "window_size" and a polinomial fit of order "order".

:param y: [description]
:type y: [type]
:param window_size: [description]
:type window_size: [type]
:param order: [description]
:type order: [type]
:param deriv: [description], defaults to 0
:param deriv: int, optional
:param rate: [description], defaults to 1
:param rate: int, optional
:raises ValueError: [description]
:raises TypeError: [description]
:raises TypeError: [description]
:return: [description]
:rtype: [type]
"""
y = np.array(y)

try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(
-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid') 
Example 80
 Project: att   Author: Centre-Alt-Rendiment-Esportiu   File: matfuncs.py    GNU General Public License v3.0 4 votes
def _ell(A, m):
"""
A helper function for expm_2009.

Parameters
----------
A : linear operator
A linear operator whose norm of power we care about.
m : int
The power of the linear operator

Returns
-------
value : int
A value related to a bound.

"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')

p = 2*m + 1

# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
# They are coefficients of terms of a generating function series expansion.
choose_2p_p = scipy.misc.comb(2*p, p, exact=True)
abs_c_recip = float(choose_2p_p * math.factorial(2*p + 1))

# This is explained after Eq. (1.2) of the 2009 expm paper.
# It is the "unit roundoff" of IEEE double precision arithmetic.
u = 2**-53

# Compute the one-norm of matrix power p of abs(A).
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), p)

# Treat zero norm as a special case.
if not A_abs_onenorm:
return 0

alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
log2_alpha_div_u = np.log2(alpha/u)
value = int(np.ceil(log2_alpha_div_u / (2 * m)))
return max(value, 0)