Python numpy.std() Examples
The following are 30 code examples for showing how to use numpy.std(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example 1
Project: nmp_qc Author: priba File: utils.py License: MIT License | 7 votes |
def get_graph_stats(graph_obj_handle, prop='degrees'): # if prop == 'degrees': num_cores = multiprocessing.cpu_count() inputs = [int(i*len(graph_obj_handle)/num_cores) for i in range(num_cores)] + [len(graph_obj_handle)] res = Parallel(n_jobs=num_cores)(delayed(get_values)(graph_obj_handle, inputs[i], inputs[i+1], prop) for i in range(num_cores)) stat_dict = {} if 'degrees' in prop: stat_dict['degrees'] = list(set([d for core_res in res for file_res in core_res for d in file_res['degrees']])) if 'edge_labels' in prop: stat_dict['edge_labels'] = list(set([d for core_res in res for file_res in core_res for d in file_res['edge_labels']])) if 'target_mean' in prop or 'target_std' in prop: param = np.array([file_res['params'] for core_res in res for file_res in core_res]) if 'target_mean' in prop: stat_dict['target_mean'] = np.mean(param, axis=0) if 'target_std' in prop: stat_dict['target_std'] = np.std(param, axis=0) return stat_dict
Example 2
Project: svviz Author: svviz File: insertsizes.py License: MIT License | 6 votes |
def __init__(self, bam, keepReads=False): self.insertSizes = [] self.readLengths = [] self.orientations = [] self._insertSizeKDE = None self.singleEnded = False self._insertSizeScores = {} # cache try: self.insertSizes, self.reads, self.orientations, self.readLengths = sampleInsertSizes(bam, keepReads=keepReads) if len(self.insertSizes) > 1: logging.info(" insert size mean: {:.2f} std: {:.2f}".format(numpy.mean(self.insertSizes), numpy.std(self.insertSizes))) except ValueError as e: print("*"*100, "here") print("ERROR:", e)
Example 3
Project: TradzQAI Author: kkuette File: bollinger_bands.py License: Apache License 2.0 | 6 votes |
def upper_bollinger_band(data, period, std_mult=2.0): """ Upper Bollinger Band. Formula: u_bb = SMA(t) + STD(SMA(t-n:t)) * std_mult """ check_for_period_error(data, period) period = int(period) simple_ma = sma(data, period)[period-1:] upper_bb = [] for idx in range(len(data) - period + 1): std_dev = np.std(data[idx:idx + period]) upper_bb.append(simple_ma[idx] + std_dev * std_mult) upper_bb = fill_for_noncomputable_vals(data, upper_bb) return np.array(upper_bb)
Example 4
Project: TradzQAI Author: kkuette File: bollinger_bands.py License: Apache License 2.0 | 6 votes |
def lower_bollinger_band(data, period, std=2.0): """ Lower Bollinger Band. Formula: u_bb = SMA(t) - STD(SMA(t-n:t)) * std_mult """ check_for_period_error(data, period) period = int(period) simple_ma = sma(data, period)[period-1:] lower_bb = [] for idx in range(len(data) - period + 1): std_dev = np.std(data[idx:idx + period]) lower_bb.append(simple_ma[idx] - std_dev * std) lower_bb = fill_for_noncomputable_vals(data, lower_bb) return np.array(lower_bb)
Example 5
Project: TradzQAI Author: kkuette File: bollinger_bands.py License: Apache License 2.0 | 6 votes |
def bandwidth(data, period, std=2.0): """ Bandwidth. Formula: bw = u_bb - l_bb / m_bb """ check_for_period_error(data, period) period = int(period) bandwidth = ((upper_bollinger_band(data, period, std) - lower_bollinger_band(data, period, std)) / middle_bollinger_band(data, period, std) ) return bandwidth
Example 6
Project: TradzQAI Author: kkuette File: standard_deviation.py License: Apache License 2.0 | 6 votes |
def standard_deviation(data, period): """ Standard Deviation. Formula: std = sqrt(avg(abs(x - avg(x))^2)) """ check_for_period_error(data, period) stds = list(map( lambda idx: np.std(data[idx+1-period:idx+1], ddof=1), range(period-1, len(data)) )) stds = fill_for_noncomputable_vals(data, stds) return stds
Example 7
Project: cs294-112_hws Author: xuwd11 File: utils.py License: MIT License | 6 votes |
def log(self): end_idxs = np.nonzero(self._dones)[0] + 1 returns = [] start_idx = 0 for end_idx in end_idxs: rewards = self._rewards[start_idx:end_idx] returns.append(np.sum(rewards)) start_idx = end_idx logger.record_tabular('ReturnAvg', np.mean(returns)) logger.record_tabular('ReturnStd', np.std(returns)) logger.record_tabular('ReturnMin', np.min(returns)) logger.record_tabular('ReturnMax', np.max(returns)) ################## ### Tensorflow ### ##################
Example 8
Project: cs294-112_hws Author: xuwd11 File: train_policy.py License: MIT License | 6 votes |
def update_critic(self, ob_no, hidden, q_n): """ given: self.num_value_iters self.l2_reg arguments: ob_no: (minibsize, history, meta_obs_dim) hidden: (minibsize, self.gru_size) q_n: (minibsize) requires: self.num_value_iters """ target_n = (q_n - np.mean(q_n))/(np.std(q_n)+1e-8) for k in range(self.num_value_iters): critic_loss, _ = self.sess.run( [self.critic_loss, self.critic_update_op], feed_dict={self.sy_target_n: target_n, self.sy_ob_no: ob_no, self.sy_hidden: hidden}) return critic_loss
Example 9
Project: torch-toolbox Author: PistonY File: feature_verification.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def get(self): tpr, fpr, accuracy, threshold = calculate_roc( self.thresholds, np.asarray( self.dists), np.asarray( self.issame), self.nfolds) val, val_std, far = calculate_val( self.thresholds, np.asarray( self.dists), np.asarray( self.issame), self.far_target, self.nfolds) acc, acc_std = np.mean(accuracy), np.std(accuracy) threshold = ( 1 - threshold) if self.dist_type == 'cosine' else threshold return tpr, fpr, acc, threshold, val, val_std, far, acc_std # code below is modified from project <Facenet (David Sandberg)> and # <Gluon-Face>
Example 10
Project: pytorch-mri-segmentation-3D Author: Achilleas File: PP.py License: MIT License | 6 votes |
def extractMeanDataStats(size = [200, 200, 100], postfix = '_200x200x100orig', main_folder_path = '../../Data/MS2017b/', ): scan_folders = glob.glob(main_folder_path + 'scans/*') img_path = 'pre/FLAIR' + postfix + '.nii.gz' segm_path = 'wmh' + postfix + '.nii.gz' shape_ = [len(scan_folders), size[0], size[1], size[2]] arr = np.zeros(shape_) for i, sf in enumerate(scan_folders): arr[i, :,:,:] = numpyFromScan(os.path.join(sf,img_path)).squeeze() arr /= len(scan_folders) means = np.mean(arr) stds = np.std(arr, axis = 0) np.save(main_folder_path + 'extra_data/std' + postfix, stds) np.save(main_folder_path + 'extra_data/mean' + postfix, means)
Example 11
Project: gandlf Author: codekansas File: reversing_gan.py License: MIT License | 6 votes |
def reverse_generator(generator, X_sample, y_sample, title): """Gradient descent to map images back to their latent vectors.""" latent_vec = np.random.normal(size=(1, 100)) # Function for figuring out how to bump the input. target = K.placeholder() loss = K.sum(K.square(generator.outputs[0] - target)) grad = K.gradients(loss, generator.inputs[0])[0] update_fn = K.function(generator.inputs + [target], [grad]) # Repeatedly apply the update rule. xs = [] for i in range(60): print('%d: latent_vec mean=%f, std=%f' % (i, np.mean(latent_vec), np.std(latent_vec))) xs.append(generator.predict_on_batch([latent_vec, y_sample])) for _ in range(10): update_vec = update_fn([latent_vec, y_sample, X_sample])[0] latent_vec -= update_vec * update_rate # Plots the samples. xs = np.concatenate(xs, axis=0) plot_as_gif(xs, X_sample, title)
Example 12
Project: svviz Author: svviz File: summarystats.py License: MIT License | 5 votes |
def addVariantResults(self, dataHub): variant = str(dataHub.variant) for sampleName, sample in dataHub.samples.items(): counts = collections.Counter() reasons = {} alnScores = collections.defaultdict(list) insertSizes = collections.defaultdict(list) # collect stats for alnCollection in sample.alnCollections: allele = alnCollection.choice counts[allele] += 1 if not allele in reasons: reasons[allele] = collections.Counter() reasons[allele][alnCollection.why] += 1 alnScores[allele].append(sum(aln.score for aln in alnCollection.chosenSet().getAlignments())) insertSizes[allele].append(len(alnCollection.chosenSet())) # record stats for allele, count in counts.items(): self.stats.append([variant, sampleName, allele, "count", count]) for allele in reasons: for reason in reasons[allele]: self.stats.append([variant, sampleName, allele, "reason_{}".format(reason), reasons[allele][reason]]) for allele in alnScores: self.stats.append([variant, sampleName, allele, "alnScore_mean", numpy.mean(alnScores[allele])]) self.stats.append([variant, sampleName, allele, "alnScore_std", numpy.std(alnScores[allele])]) for allele in insertSizes: self.stats.append([variant, sampleName, allele, "insertSize_mean", numpy.mean(insertSizes[allele])]) self.stats.append([variant, sampleName, allele, "insertSize_std", numpy.std(insertSizes[allele])])
Example 13
Project: EDeN Author: fabriziocosta File: estimator.py License: MIT License | 5 votes |
def bias_variance_decomposition(self, graphs, targets, cv=5, n_bootstraps=10): """bias_variance_decomposition.""" x = self.transform(graphs) score_list = [] for i in range(n_bootstraps): scores = cross_val_score( self.model, x, targets, cv=cv) score_list.append(scores) score_list = np.array(score_list) mean_scores = np.mean(score_list, axis=1) std_scores = np.std(score_list, axis=1) return mean_scores, std_scores
Example 14
Project: EDeN Author: fabriziocosta File: estimator_utils.py License: MIT License | 5 votes |
def output_avg_and_std(iterable): """output_avg_and_std.""" print(('score: %.2f +-%.2f' % (np.mean(iterable), np.std(iterable)))) return iterable
Example 15
Project: EDeN Author: fabriziocosta File: ml.py License: MIT License | 5 votes |
def estimate_model(positive_data_matrix=None, negative_data_matrix=None, target=None, estimator=None, n_jobs=4): """estimate_model.""" X, y = make_data_matrix(positive_data_matrix=positive_data_matrix, negative_data_matrix=negative_data_matrix, target=target) logger.info('Test set') logger.info(describe(X)) logger.info('-' * 80) logger.info('Test Estimate') predictions = estimator.predict(X) margins = estimator.decision_function(X) logger.info(classification_report(y, predictions)) apr = average_precision_score(y, margins) logger.info('APR: %.3f' % apr) roc = roc_auc_score(y, margins) logger.info('ROC: %.3f' % roc) logger.info('Cross-validated estimate') scoring_strings = ['accuracy', 'precision', 'recall', 'f1', 'average_precision', 'roc_auc'] for scoring in scoring_strings: scores = cross_validation.cross_val_score( estimator, X, y, cv=5, scoring=scoring, n_jobs=n_jobs) logger.info('%20s: %.3f +- %.3f' % (scoring, np.mean(scores), np.std(scores))) return roc, apr
Example 16
Project: Adversarial-Face-Attack Author: ppwwyyxx File: face_attack.py License: GNU General Public License v3.0 | 5 votes |
def validate_on_lfw(model, lfw_160_path): # Read the file containing the pairs used for testing pairs = lfw.read_pairs('validation-LFW-pairs.txt') # Get the paths for the corresponding images paths, actual_issame = lfw.get_paths(lfw_160_path, pairs) num_pairs = len(actual_issame) all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32') for k in tqdm.trange(num_pairs): img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1] img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1] batch = np.stack([img1, img2], axis=0) embeddings = model.eval_embeddings(batch) all_embeddings[k * 2: k * 2 + 2, :] = embeddings tpr, fpr, accuracy, val, val_std, far = lfw.evaluate( all_embeddings, actual_issame, distance_metric=1, subtract_mean=True) print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy))) print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far)) auc = metrics.auc(fpr, tpr) print('Area Under Curve (AUC): %1.3f' % auc) eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.) print('Equal Error Rate (EER): %1.3f' % eer)
Example 17
Project: nmp_qc Author: priba File: utils.py License: MIT License | 5 votes |
def normalize_data(data, mean, std): data_norm = (data-mean)/std return data_norm
Example 18
Project: Sound-Recognition-Tutorial Author: JasonZhang156 File: esc10_input.py License: Apache License 2.0 | 5 votes |
def get_data(test_fold, feat): """load feature for train and test""" # load feature data = np.load('./data/esc10/feature/esc10_{}_fold{}.npz'.format(feat, test_fold)) train_x = np.expand_dims(data['train_x'], axis=-1) train_y = data['train_y'] test_x = np.expand_dims(data['test_x'], axis=-1) test_y = data['test_y'] # one-hot encode train_y = dense_to_one_hot(train_y, 10) test_y = dense_to_one_hot(test_y, 10) # z-score normalization mean = np.mean(train_x) std = np.std(train_x) train_x = (train_x - mean) / std test_x = (test_x - mean) / std # shuffle np.random.seed(RANDOM_SEED) np.random.shuffle(train_x) np.random.seed(RANDOM_SEED) np.random.shuffle(train_y) print('Audio Feature: ', feat) print('Training Set Shape: ', train_x.shape) print('Test Set Shape: ', test_x.shape) return train_x, train_y, test_x, test_y
Example 19
Project: neuropythy Author: noahbenson File: cmag.py License: GNU Affero General Public License v3.0 | 5 votes |
def sigma_bin_walls(sigma, bins): import scipy, scipy.cluster, scipy.cluster.vq as vq std = np.std(sigma) if np.isclose(std, 0): return pimms.imm_array([0, np.max(sigma)]) cl = sorted(std * vq.kmeans(sigma/std, bins)[0]) cl = np.mean([cl[:-1],cl[1:]], axis=0) return pimms.imm_array(np.concatenate(([0], cl, [np.max(sigma)])))
Example 20
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: metrics.py License: Apache License 2.0 | 5 votes |
def rse(label, pred): """computes the root relative squared error (condensed using standard deviation formula)""" numerator = np.sqrt(np.mean(np.square(label - pred), axis = None)) denominator = np.std(label, axis = None) return numerator / denominator
Example 21
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: metrics.py License: Apache License 2.0 | 5 votes |
def corr(label, pred): """computes the empirical correlation coefficient""" numerator1 = label - np.mean(label, axis=0) numerator2 = pred - np.mean(pred, axis = 0) numerator = np.mean(numerator1 * numerator2, axis=0) denominator = np.std(label, axis=0) * np.std(pred, axis=0) return np.mean(numerator / denominator)
Example 22
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: rnn.py License: Apache License 2.0 | 5 votes |
def run_benchmark(cell_type, ctx, seq_len, batch_size, hidden_dim): obj = {"foreach": ForeachRNN, "while_loop": WhileRNN}[args.benchmark] inputs = _array((seq_len, batch_size, hidden_dim), ctx) states = [_array((batch_size, hidden_dim), ctx) for _ in cell_type(0).state_info()] if args.benchmark == "while_loop": states.insert(0, _zeros((1, ), ctx)) for is_train, is_hyb_cell, is_hyb_layer in product([True, False], [False, True], [False, True]): cell = cell_type(hidden_dim) if is_hyb_cell: cell.hybridize(static_alloc=True) layer = obj(cell, seq_len) layer.initialize(ctx=ctx) if is_hyb_layer: layer.hybridize(static_alloc=True) print("is_train = %r, hybridize_cell = %r, hybridize_layer = %r" % (is_train, is_hyb_cell, is_hyb_layer)) times = [] for _ in range(args.warmup_rounds + args.test_rounds): tick = time() if not is_train: res = layer(inputs, states) else: with mx.autograd.record(): res = layer(inputs, states) if is_train: res.backward() mx.nd.waitall() tock = time() times.append((tock - tick) * 1000.0) times = times[args.warmup_rounds: ] print("Time used: mean = %.3f ms, std = %.3f ms" % (np.mean(times), np.std(times)))
Example 23
Project: dynamic-training-with-apache-mxnet-on-aws Author: awslabs File: test_random.py License: Apache License 2.0 | 5 votes |
def set_seed_variously_for_context(ctx, init_seed, num_init_seeds, final_seed): end_seed = init_seed + num_init_seeds for seed in range(init_seed, end_seed): mx.random.seed(seed, ctx=ctx) mx.random.seed(final_seed, ctx=ctx) return end_seed # Tests that seed setting of std (non-parallel) rng for specific context is synchronous w.r.t. rng use before and after.
Example 24
Project: DOTA_models Author: ringringyi File: input.py License: Apache License 2.0 | 5 votes |
def image_whitening(data): """ Subtracts mean of image and divides by adjusted standard variance (for stability). Operations are per image but performed for the entire array. :param image: 4D array (ID, Height, Weight, Channel) :return: 4D array (ID, Height, Weight, Channel) """ assert len(np.shape(data)) == 4 # Compute number of pixels in image nb_pixels = np.shape(data)[1] * np.shape(data)[2] * np.shape(data)[3] # Subtract mean mean = np.mean(data, axis=(1,2,3)) ones = np.ones(np.shape(data)[1:4], dtype=np.float32) for i in xrange(len(data)): data[i, :, :, :] -= mean[i] * ones # Compute adjusted standard variance adj_std_var = np.maximum(np.ones(len(data), dtype=np.float32) / math.sqrt(nb_pixels), np.std(data, axis=(1,2,3))) #NOLINT(long-line) # Divide image for i in xrange(len(data)): data[i, :, :, :] = data[i, :, :, :] / adj_std_var[i] print(np.shape(data)) return data
Example 25
Project: TradzQAI Author: kkuette File: bollinger_bands.py License: Apache License 2.0 | 5 votes |
def middle_bollinger_band(data, period, std=2.0): """ Middle Bollinger Band. Formula: m_bb = sma() """ check_for_period_error(data, period) period = int(period) mid_bb = sma(data, period) return mid_bb
Example 26
Project: TradzQAI Author: kkuette File: bollinger_bands.py License: Apache License 2.0 | 5 votes |
def bb_range(data, period, std=2.0): """ Range. Formula: bb_range = u_bb - l_bb """ check_for_period_error(data, period) period = int(period) bb_range = (upper_bollinger_band(data, period, std) - lower_bollinger_band(data, period, std) ) return bb_range
Example 27
Project: TradzQAI Author: kkuette File: wallet.py License: Apache License 2.0 | 5 votes |
def calc_sharp_ratio(self, _return, period): if np.sum(_return[len(_return) - 1 - period:]) == 0: return 0 std = np.std(_return[len(_return) - 1 - period:], ddof=1) mean = np.mean(_return[len(_return) - 1 - period:]) sqrt = np.sqrt(period) return sqrt * mean / std
Example 28
Project: cs294-112_hws Author: xuwd11 File: utils.py License: MIT License | 5 votes |
def state_std(self): return np.std(self._states, axis=0)
Example 29
Project: cs294-112_hws Author: xuwd11 File: utils.py License: MIT License | 5 votes |
def action_std(self): return np.std(self._actions, axis=0)
Example 30
Project: cs294-112_hws Author: xuwd11 File: utils.py License: MIT License | 5 votes |
def normalize(x, mean, std, eps=1e-8): return (x - mean) / (std + eps)