# Python random.normalvariate() Examples

The following are 30 code examples of random.normalvariate(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module , or try the search function .
Example #1
```def heston_construct_correlated_path(params: ModelParameters,
brownian_motion_one: np.array):
"""
This method is a simplified version of the Cholesky decomposition method
for just two assets. It does not make use of matrix algebra and is therefore
quite easy to implement.

Arguments:
params : ModelParameters
The parameters for the stochastic model.
brownian_motion_one : np.array
(Not filled)

Returns:
A correlated brownian motion path.
"""
# We do not multiply by sigma here, we do that in the Heston model
sqrt_delta = np.sqrt(params.all_delta)
# Construct a path correlated to the first path
brownian_motion_two = []
for i in range(params.all_time - 1):
term_one = params.cir_rho * brownian_motion_one[i]
term_two = np.sqrt(1 - pow(params.cir_rho, 2)) * random.normalvariate(0, sqrt_delta)
brownian_motion_two.append(term_one + term_two)
return np.array(brownian_motion_one), np.array(brownian_motion_two) ```
Example #2
```def random_rectangle(max_side, min_side, sigma=0.5, ratio=1.0, coherce=True):

assert min_side <= max_side

#
half_side = (max_side-min_side)/2
center = max_side-half_side
width  = random.normalvariate(0, sigma)*half_side
height = random.normalvariate(0, sigma)*half_side

#
if ratio > 1:
height = height/ratio
else:
width = width*ratio

# Coherce value to max
if coherce:
width  = coherce_to(max_side, min_side, width+center)
height = coherce_to(max_side, min_side, height+center)

return width, height ```
Example #3
```def n_rolls(count, n):
"""roll an n-sided die count times
:type count: int
:type n: int | str
"""
if n in ('f', 'F'):
return [random.randint(-1, 1) for _ in range(min(count, 100))]

if count < 100:
return [random.randint(1, n) for _ in range(count)]

# Calculate a random sum approximated using a randomized normal variate with the midpoint used as the mu
# and an approximated standard deviation based on variance as the sigma
mid = .5 * (n + 1) * count
var = (n ** 2 - 1) / 12
adj_var = (var * count) ** 0.5

Example #4
```def _get_gauss_data(n=10000, covariance=0.4, expand=True):
"""Generate correlated and uncorrelated Gaussian variables.

Generate two sets of random normal data, where one set has a given
covariance and the second is uncorrelated.
"""
corr_expected = covariance / (1 * np.sqrt(covariance**2 + (1-covariance)**2))
expected_mi = calculate_mi(corr_expected)
src_corr = [rn.normalvariate(0, 1) for r in range(n)]  # correlated src
src_uncorr = [rn.normalvariate(0, 1) for r in range(n)]  # uncorrelated src
target = [sum(pair) for pair in zip(
[covariance * y for y in src_corr[0:n]],
[(1-covariance) * y for y in [
rn.normalvariate(0, 1) for r in range(n)]])]
# Make everything numpy arrays so jpype understands it. Add an additional
# axis if requested (MI/CMI estimators accept 2D arrays, TE/AIS only 1D).
if expand:
src_corr = np.expand_dims(np.array(src_corr), axis=1)
src_uncorr = np.expand_dims(np.array(src_uncorr), axis=1)
target = np.expand_dims(np.array(target), axis=1)
else:
src_corr = np.array(src_corr)
src_uncorr = np.array(src_uncorr)
target = np.array(target)
return expected_mi, src_corr, src_uncorr, target ```
Example #5
```def test_single_source_storage_gaussian():
n = 1000
proc_1 = [rn.normalvariate(0, 1) for r in range(n)]  # correlated src
proc_2 = [rn.normalvariate(0, 1) for r in range(n)]  # correlated src
# Cast everything to numpy so the idtxl estimator understands it.
data = Data(np.array([proc_1, proc_2]), dim_order='ps')
settings = {
'alpha_mi': 0.05,
'tail_mi': 'one_bigger',
'n_perm_max_stat': 21,
'n_perm_min_stat': 21,
'n_perm_mi': 21,
'max_lag': 5,
'tau': 1
}
processes = [1]
network_analysis = ActiveInformationStorage()
results = network_analysis.analyse_network(settings, data, processes)
print('AIS for random normal data without memory (expected is NaN): '
'{0}'.format(results._single_process[1].ais))
assert results._single_process[1].ais is np.nan, (
'Estimator did not return nan for memoryless data.') ```
Example #6
```def testDependentGridSearchCallable(self):
class Normal:
def __call__(self, _config):
return random.normalvariate(mu=0, sigma=1)

class Single:
def __call__(self, _config):
return 20

trials = self.generate_trials({
"run": "PPO",
"config": {
"x": grid_search(
[tune.sample_from(Normal()),
tune.sample_from(Normal())]),
"y": tune.sample_from(Single()),
},
}, "dependent_grid_search")
trials = list(trials)
self.assertEqual(len(trials), 2)
self.assertEqual(trials[0].config["y"], 20)
self.assertEqual(trials[1].config["y"], 20) ```
Example #7
```def generate_tokens(mean, std_dev, num_tokens):
tokens = {}
cnt = 0
while cnt < num_tokens:
length = int(round(random.normalvariate(mean,
std_dev)))
if length < 2:
continue
flag = True
while flag:
new_token = ''.join(random.choice(string.ascii_lowercase)
for i in range(length))
if tokens.get(new_token) is None:
tokens[new_token] = True
flag = False
cnt += 1
return list(tokens.keys()) ```
Example #8
```def generate_table(mean, std_dev, tokens, num_records,
id_col_name, attr_col_name):
records = []
cnt = 0
num_tokens = len(tokens)
while cnt < num_records:
size = int(round(random.normalvariate(mean,
std_dev)))
new_string = ''
for i in range(size):
rand = random.randint(0, num_tokens - 1)
if i == 0:
new_string += tokens[rand]
else:
new_string += ' ' + tokens[rand]

records.append([cnt, new_string])
cnt += 1
return pd.DataFrame(records, columns=[id_col_name, attr_col_name]) ```
Example #9
```def n_rolls(count, n):
"""roll an n-sided die count times
:type count: int
:type n: int | str
"""
if n == "F":
return [random.randint(-1, 1) for x in range(min(count, 100))]
if n < 2:  # it's a coin
if count < 100:
return [random.randint(0, 1) for x in range(count)]
else:  # fake it
return [int(random.normalvariate(.5 * count, (.75 * count) ** .5))]
else:
if count < 100:
return [random.randint(1, n) for x in range(count)]
else:  # fake it
return [int(random.normalvariate(.5 * (1 + n) * count,
(((n + 1) * (2 * n + 1) / 6. -
(.5 * (1 + n)) ** 2) * count) ** .5))] ```
Example #10
```def coin(text, notice, action):
"""[amount] - flips [amount] coins
:type text: str
"""

if text:
try:
amount = int(text)
except (ValueError, TypeError):
notice("Invalid input '{}': not a number".format(text))
return
else:
amount = 1

if amount == 1:
action("flips a coin and gets {}.".format(random.choice(["heads", "tails"])))
elif amount == 0:
action("makes a coin flipping motion")
else:
heads = int(random.normalvariate(.5 * amount, (.75 * amount) ** .5))
action("flips {} coins and gets {} heads and {} tails.".format(amount, heads, tails)) ```
Example #11
```def get_dist(d):
return {
'randrange': random.randrange, # start, stop, step
'randint': random.randint, # a, b
'random': random.random,
'uniform': random, # a, b
'triangular': random.triangular, # low, high, mode
'beta': random.betavariate, # alpha, beta
'expo': random.expovariate, # lambda
'gamma': random.gammavariate, # alpha, beta
'gauss': random.gauss, # mu, sigma
'lognorm': random.lognormvariate, # mu, sigma
'normal': random.normalvariate, # mu, sigma
'vonmises': random.vonmisesvariate, # mu, kappa
'pareto': random.paretovariate, # alpha
'weibull': random.weibullvariate # alpha, beta
}.get(d) ```
Example #12
```def jump_diffusion_process(params: ModelParameters):
"""
Produces a sequence of Jump Sizes which represent a jump
diffusion process. These jumps are combined with a geometric brownian
motion (log returns) to produce the Merton model.

Arguments:
params : ModelParameters
The parameters for the stochastic model.

Returns:
jump sizes for each point in time (mostly zeroes if jumps are infrequent)
"""
s_n = time = 0
small_lamda = -(1.0 / params.lamda)
jump_sizes = []
for k in range(params.all_time):
jump_sizes.append(0.0)
while s_n < params.all_time:
s_n += small_lamda * np.log(np.random.uniform(0, 1))
for j in range(params.all_time):
if time * params.all_delta <= s_n * params.all_delta <= (j + 1) * params.all_delta:
jump_sizes[j] += random.normalvariate(params.jumps_mu, params.jumps_sigma)
break
time += 1
return jump_sizes ```
Example #13
```def get_correlated_geometric_brownian_motions(params: ModelParameters,
correlation_matrix: np.array,
n: int):
"""
Constructs a basket of correlated asset paths using the Cholesky
decomposition method.

Arguments:
params : ModelParameters
The parameters for the stochastic model.
correlation_matrix : np.array
An n x n correlation matrix.
n : int
Number of assets (number of paths to return)

Returns:
n correlated log return geometric brownian motion processes
"""
decomposition = sp.linalg.cholesky(correlation_matrix, lower=False)
uncorrelated_paths = []
sqrt_delta_sigma = np.sqrt(params.all_delta) * params.all_sigma
# Construct uncorrelated paths to convert into correlated paths
for i in range(params.all_time):
uncorrelated_random_numbers = []
for j in range(n):
uncorrelated_random_numbers.append(random.normalvariate(0, sqrt_delta_sigma))
uncorrelated_paths.append(np.array(uncorrelated_random_numbers))
uncorrelated_matrix = np.asmatrix(uncorrelated_paths)
correlated_matrix = uncorrelated_matrix * decomposition
assert isinstance(correlated_matrix, np.matrix)
# The rest of this method just extracts paths from the matrix
extracted_paths = []
for i in range(1, n + 1):
extracted_paths.append([])
for j in range(0, len(correlated_matrix) * n - n, n):
for i in range(n):
extracted_paths[i].append(correlated_matrix.item(j + i))
return extracted_paths ```
Example #14
```def initialize_v(n: int, k: int):
"""初始化交叉项

:param n: 特征个数
:param k: FM模型的度
:return: 交叉项的系数权重
"""
v = np.mat(np.zeros(shape=(n, k)))
for i in range(n):
for j in range(k):
v[i, j] = normalvariate(0, 0.2)
return v ```
Example #15
```def random_resize(image, gt_image, lower_size, upper_size, sig):
factor = random.normalvariate(1, sig)
if factor < lower_size:
factor = lower_size
if factor > upper_size:
factor = upper_size
image = scipy.misc.imresize(image, factor)
shape = gt_image.shape
gt_zero = np.zeros([shape[0], shape[1], 1])
gt_image = np.concatenate((gt_image, gt_zero), axis=2)
gt_image = scipy.misc.imresize(gt_image, factor, interp='nearest')
gt_image = gt_image[:, :, 0:2]/255
return image, gt_image ```
Example #16
```def update(self, actions, board, layers, backdrop, things, the_plot):
# Our motions are quite constrained: we can only move up or down one spot.
if actions == 1 and self.virtual_position.row > 1:    # go up?
self._north(board, the_plot)
elif actions == 2 and self.virtual_position.row < 2:  # go down?
self._south(board, the_plot)
elif actions in [0, 4]:                               # quit the game?
the_plot.terminate_episode()
else:                                                 # do nothing?
self._stay(board, the_plot)                         # (or can't move?)

# Give ourselves a point if we landed on the correct ball.
correct_ball = 'a' if the_plot.get('which_ball') == 'top' else 'b'
if self._reward_sigma:
if (self.position.col == things[correct_ball].position.col and
self._trials_till_reward <= 0):
float(self.position == things[correct_ball].position) +
random.normalvariate(mu=0, sigma=self._reward_sigma))
else:

else:
self.position == things[correct_ball].position and
self._trials_till_reward <= 0
))

# Decrement trials left till reward.
if (self.position.col == things[correct_ball].position.col and
self._trials_till_reward > 0):
self._trials_till_reward -= 1 ```
Example #17
```def main():
LEN = 1000000
bucket_size = 1
# This will give us a normal distribution centred around 0.0 with a standard
# deviation of 1.0
mu, sigma = 0.0, 1.0
lst = [random.normalvariate(mu, sigma) for _ in range(LEN)]
d = float_list_to_int_dict(lst, bucket_size)
pprint.pprint(d)
pprint_histogram(d, bucket_size) ```
Example #18
```def main():
LEN = 1000000
bucket_size = 1
# This will give us a normal distribution centred around 0.0 with a standard
# deviation of 1.0
mu, sigma = 0.0, 1.0
lst = [random.normalvariate(mu, sigma) for _ in range(LEN)]
d = float_list_to_int_dict(lst, bucket_size)
pprint.pprint(d)
pprint_histogram(d, bucket_size) ```
Example #19
```def retry(self, connector=None):
"""
Have this connector connect again, after a suitable delay.
"""
if not self.continueTrying:
if self.noisy:
log.msg("Abandoning %s on explicit request" % (connector,))
return

if connector is None:
if self.connector is None:
raise ValueError("no connector to retry")
else:
connector = self.connector

self.retries += 1
if self.maxRetries is not None and (self.retries > self.maxRetries):
if self.noisy:
log.msg("Abandoning %s after %d retries." %
(connector, self.retries))
return

self.delay = min(self.delay * self.factor, self.maxDelay)
if self.jitter:
self.delay = random.normalvariate(self.delay,
self.delay * self.jitter)

if self.noisy:
log.msg("%s will retry in %d seconds" % (connector, self.delay,))

def reconnector():
self._callID = None
connector.connect()
if self.clock is None:
from twisted.internet import reactor
self.clock = reactor
self._callID = self.clock.callLater(self.delay, reconnector) ```
Example #20
```def lookupTransformInWorld(self, frame='/cf_gt', forceRealtime = False, poseNoiseMeters=0.0 ):
now = rospy.Time(0)
if self.PIDDelay > 0.00001 and not forceRealtime:
#rospy.logwarn('Delay in TF Pose: %5.3s', self.PIDDelay)
now = rospy.Time.now()-rospy.Duration(self.PIDDelay)
(trans,rot) = self.sub_tf.lookupTransform('/world', frame, now)

if poseNoiseMeters>0:
trans = [trans[0]+random.normalvariate(0,poseNoiseMeters), trans[1]+random.normalvariate(0,poseNoiseMeters), trans[2]+random.normalvariate(0,poseNoiseMeters)]
return (trans,rot) ```
Example #21
```def _weight_generator(self):
MIN_WEIGHT = 20
MAX_WEIGHT = 100
weight = random.normalvariate(50, 10)
while weight < MIN_WEIGHT or weight > MAX_WEIGHT:
weight = random.normalvariate(50, 10)
return weight ```
Example #22
```def gen_noise_samples(self, imgwh, noise_type, num, **kwargs):
center_xy = self.xy + self.wh * 0.5
mean_wh = sum(self.wh) / 2.0

gaussian_translation_f = kwargs.get('gaussian_translation_f', 0.1)
uniform_translation_f = kwargs.get('uniform_translation_f', 1)
uniform_scale_f = kwargs.get('uniform_scale_f', 10)

samples = []
if noise_type == 'whole':
grid_x = range(self.wh.x // 2, imgwh.x - self.wh.x // 2, self.wh.x // 5)
grid_y = range(self.wh.y // 2, imgwh.y - self.wh.y // 2, self.wh.y // 5)
samples_tmp = []
for dx, dy, ds in itertools.product(grid_x, grid_y, range(-5, 5, 1)):
box = BoundingBox(dx, dy, self.wh.x*(1.05**ds), self.wh.y*(1.05**ds))
box.fit_image(imgwh)
samples_tmp.append(box)

for _ in range(num):
samples.append(random.choice(samples_tmp))
else:
for _ in range(num):
if noise_type == 'gaussian':
dx = gaussian_translation_f * mean_wh * minmax(0.5 * random.normalvariate(0, 1), -1, 1)
dy = gaussian_translation_f * mean_wh * minmax(0.5 * random.normalvariate(0, 1), -1, 1)
dwh = 1.05 ** (3 * minmax(0.5 * random.normalvariate(0, 1), -1, 1))
elif noise_type == 'uniform':
dx = uniform_translation_f * mean_wh * random.uniform(-1.0, 1.0)
dy = uniform_translation_f * mean_wh * random.uniform(-1.0, 1.0)
dwh = 1.05 ** (uniform_scale_f * random.uniform(-1.0, 1.0))
else:
raise
new_cxy = center_xy + (dx, dy)
new_wh = self.wh * dwh
box = BoundingBox(new_cxy.x - new_wh.x / 2.0, new_cxy.y - new_wh.y / 2.0, new_wh.x, new_wh.y)
box.fit_image(imgwh)
samples.append(box)

return samples ```
Example #23
```def time_per_part():
"""Return actual processing time for a concrete part."""
return random.normalvariate(PT_MEAN, PT_SIGMA) ```
Example #24
```def WriteHistogramSeries(writer, tag, mu_sigma_tuples, n=20):
"""Write a sequence of normally distributed histograms to writer."""
step = 0
wall_time = _start_time
for [mean, stddev] in mu_sigma_tuples:
data = [random.normalvariate(mean, stddev) for _ in xrange(n)]
histo = _MakeHistogram(data)
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=histo)])
event = tf.Event(wall_time=wall_time, step=step, summary=summary)
step += 10
wall_time += 100 ```
Example #25
```def coin(text, notice, action):
"""[amount] - flips [amount] coins

:type text: str
"""

if text:
try:
amount = int(text)
except (ValueError, TypeError):
notice("Invalid input '{}': not a number".format(text))
return
else:
amount = 1

if amount == 1:
action("flips a coin and gets {}.".format(random.choice(["heads", "tails"])))
elif amount == 0:
action("makes a coin flipping motion")
else:
mu = .5 * amount
sigma = (.75 * amount) ** .5
n = random.normalvariate(mu, sigma)
action("flips {} coins and gets {} heads and {} tails.".format(amount, heads, tails)) ```
Example #26
```def test_lagged_mi():
"""Test estimation of lagged MI."""
n = 10000
cov = 0.4
source = [rn.normalvariate(0, 1) for r in range(n)]
target = [0] + [sum(pair) for pair in zip(
[cov * y for y in source[0:n - 1]],
[(1 - cov) * y for y in
[rn.normalvariate(0, 1) for r in range(n - 1)]])]
source = np.array(source)
target = np.array(target)
settings = {
'discretise_method': 'equal',
'n_discrete_bins': 4,
'history': 1,
'history_target': 1,
'lag_mi': 1,
'source_target_delay': 1}

te_k = est_te_k.estimate(source, target)
est_te_d = JidtDiscreteTE(settings)
te_d = est_te_d.estimate(source, target)
est_d = JidtDiscreteMI(settings)
mi_d = est_d.estimate(source, target)
mi_k = est_k.estimate(source, target)
est_g = JidtGaussianMI(settings)
mi_g = est_g.estimate(source, target)
_compare_result(mi_d, te_d, 'JidtDiscreteMI', 'JidtDiscreteTE',
'lagged MI', tol=0.05)
'lagged MI', tol=0.05)
'lagged MI', tol=0.05) ```
Example #27
```def test_jidt_kraskov_alg1And2():
""" Test that JIDT estimate changes properly when we change KSG algorithm """
n = 100;
source = [sum(pair) for pair in zip(
[y for y in range(n)],
[rn.normalvariate(0, 0.000001) for r in range(n)])]
source = np.array(source)
target = np.array(source) # Target copies source on purpose
# We've generated simple data 0:99, plus a little noise to ensure
#  we only even get K nearest neighbours in each space.
# So result should be:
settings = {
'lag': 0,
'noise_level': 0,
'algorithm_num': 1}
for k in range(4,16):
settings['algorithm_num'] = 1;
mi_alg1 = est1.estimate(source, target)
# Neighbour counts n_x and n_y will be k-1 because they are
#  *strictly* within the boundary
expected_alg1 = digamma(k) - 2*digamma((k-1)+1) + digamma(n);
_compare_result(mi_alg1, expected_alg1, 'JidtDiscreteMI_alg1', 'Analytic',
'MI', tol=0.00001)
settings['algorithm_num'] = 2;
mi_alg2 = est2.estimate(source, target)
expected_alg2 = digamma(k) - 1/k - 2*digamma(k) + digamma(n);
_compare_result(mi_alg2, expected_alg2, 'JidtDiscreteMI_alg2', 'Analytic',
'MI', tol=0.00001)
# And now check that it doesn't work for algorithm "3"
settings['algorithm_num'] = 3;
caughtAssertionError = False;
try:
except AssertionError:
caughtAssertionError = True;
assert caughtAssertionError, 'Assertion error not raised for KSG algorithm 3 request' ```
Example #28
```def retry(self, connector=None):
"""
Have this connector connect again, after a suitable delay.
"""
if not self.continueTrying:
if self.noisy:
log.msg("Abandoning %s on explicit request" % (connector,))
return

if connector is None:
if self.connector is None:
raise ValueError("no connector to retry")
else:
connector = self.connector

self.retries += 1
if self.maxRetries is not None and (self.retries > self.maxRetries):
if self.noisy:
log.msg("Abandoning %s after %d retries." %
(connector, self.retries))
return

self.delay = min(self.delay * self.factor, self.maxDelay)
if self.jitter:
self.delay = random.normalvariate(self.delay,
self.delay * self.jitter)

if self.noisy:
log.msg("%s will retry in %d seconds" % (connector, self.delay,))

def reconnector():
self._callID = None
connector.connect()
if self.clock is None:
from twisted.internet import reactor
self.clock = reactor
self._callID = self.clock.callLater(self.delay, reconnector) ```
Example #29
```def sample(self):
# Only return positive values or zero
v = random.normalvariate(self._mu, self._sigma)
while v < 0:
v = random.normalvariate(self._mu, self._sigma)
return v ```
Example #30
```def random_resize(image, lower_size, upper_size, sig):