Python numpy.std() Examples

The following are code examples for showing how to use numpy.std(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Adversarial-Face-Attack   Author: ppwwyyxx   File: face_attack.py    GNU General Public License v3.0 6 votes vote down vote up
def validate_on_lfw(model, lfw_160_path):
    # Read the file containing the pairs used for testing
    pairs = lfw.read_pairs('validation-LFW-pairs.txt')
    # Get the paths for the corresponding images
    paths, actual_issame = lfw.get_paths(lfw_160_path, pairs)
    num_pairs = len(actual_issame)

    all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32')
    for k in tqdm.trange(num_pairs):
        img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1]
        img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1]
        batch = np.stack([img1, img2], axis=0)
        embeddings = model.eval_embeddings(batch)
        all_embeddings[k * 2: k * 2 + 2, :] = embeddings

    tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
        all_embeddings, actual_issame, distance_metric=1, subtract_mean=True)

    print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))

    auc = metrics.auc(fpr, tpr)
    print('Area Under Curve (AUC): %1.3f' % auc)
    eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
    print('Equal Error Rate (EER): %1.3f' % eer) 
Example 2
Project: nmp_qc   Author: priba   File: utils.py    MIT License 6 votes vote down vote up
def get_graph_stats(graph_obj_handle, prop='degrees'):
    # if prop == 'degrees':
    num_cores = multiprocessing.cpu_count()
    inputs = [int(i*len(graph_obj_handle)/num_cores) for i in range(num_cores)] + [len(graph_obj_handle)]
    res = Parallel(n_jobs=num_cores)(delayed(get_values)(graph_obj_handle, inputs[i], inputs[i+1], prop) for i in range(num_cores))

    stat_dict = {}

    if 'degrees' in prop:
        stat_dict['degrees'] = list(set([d for core_res in res for file_res in core_res for d in file_res['degrees']]))
    if 'edge_labels' in prop:
        stat_dict['edge_labels'] = list(set([d for core_res in res for file_res in core_res for d in file_res['edge_labels']]))
    if 'target_mean' in prop or 'target_std' in prop:
        param = np.array([file_res['params'] for core_res in res for file_res in core_res])
    if 'target_mean' in prop:
        stat_dict['target_mean'] = np.mean(param, axis=0)
    if 'target_std' in prop:
        stat_dict['target_std'] = np.std(param, axis=0)

    return stat_dict 
Example 3
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari.py    MIT License 6 votes vote down vote up
def updateModel(self):
        '''Should do all work with updating weights.'''
        print('Updating weights...')
        # stack together all inputs, actions, and rewards for this episode
        epx = np.vstack(self.states)
        fakeLabels = [1 if action == 2 else 0 for action in self.actions]
        epy = np.vstack(fakeLabels)
        epr = np.vstack(self.rewards)
        self.resetMemory()
    
        # compute the discounted reward backwards through time
        discounted_epr = self._discountRewards(epr)
        # standardize the rewards to be unit normal (helps control the gradient estimator variance)
        discounted_epr -= np.mean(discounted_epr)
        discounted_epr /= np.std(discounted_epr)
    
        # update our model weights (all in one batch)
        self.model.train_on_batch(epx, epy, sample_weight=discounted_epr.reshape((-1,)))

        if self.episode % (self.batch_size * 3) == 0: 
            self.model.save(self.modelFileName) 
Example 4
Project: AnisotropicMultiStreamCNN   Author: AnnekeMeyer   File: utils.py    MIT License 6 votes vote down vote up
def getMeanAndStd(inputDir):

    patients = os.listdir(inputDir)
    list = []
    for patient in patients:
        data = os.listdir(inputDir + '/' + patient)
        for imgName in data:
            if 'tra' in imgName or 'cor' in imgName or 'sag' in imgName:
                img = sitk.ReadImage(inputDir + '/' + patient + '/' + imgName)
                arr = sitk.GetArrayFromImage(img)
                arr = np.ndarray.flatten(arr)

                list.append(np.ndarray.tolist(arr))


    array = np.concatenate(list).ravel()
    mean = np.mean(array)
    std = np.std(array)
    print(mean, std)
    return mean, std 
Example 5
Project: Multi-modal-learning   Author: vanya2v   File: preprocessing.py    Apache License 2.0 6 votes vote down vote up
def whitening(image):
    """Whitening

    Normalises image to zero mean and unit variance

    Parameters
    ----------
    image : np.ndarray
        image to be whitened

    Returns
    -------
    np.ndarray
        whitened image

    """
    ret = (image - np.mean(image)) / (np.std(image) + epsilon)
    return ret 
Example 6
Project: SyNEThesia   Author: RunOrVeith   File: feature_creators.py    MIT License 5 votes vote down vote up
def logfbank_features(signal, samplerate=44100, fps=24, num_filt=40, num_cepstra=40, nfft=8192, **kwargs):
    winstep = 2 / fps
    winlen = winstep * 2
    feat, energy = psf.fbank(signal=signal, samplerate=samplerate,
                             winlen=winlen, winstep=winstep, nfilt=num_filt,
                             nfft=nfft)
    feat = np.log(feat)
    feat = psf.dct(feat, type=2, axis=1, norm='ortho')[:, :num_cepstra]
    feat = psf.lifter(feat, L=22)
    feat = np.asarray(feat)

    energy = np.log(energy)
    energy = energy.reshape([energy.shape[0],1])

    if feat.shape[0] > 1:
        std = 0.5 * np.std(feat, axis=0)
        mat = (feat - np.mean(feat, axis=0)) / std
    else:
        mat = feat

    mat = np.concatenate((mat, energy), axis=1)

    duration = signal.shape[0] / samplerate
    expected_frames = fps * duration
    assert mat.shape[0] - expected_frames <= 1, "Producted feature number does not match framerate"
    return mat 
Example 7
Project: sfcc   Author: kv-kunalvyas   File: auxiliary.py    MIT License 5 votes vote down vote up
def plotLearningCurves(train, classifier):
    #P.show()
    X = train.values[:, 1::]
    y = train.values[:, 0]

    train_sizes, train_scores, test_scores = learning_curve(
            classifier, X, y, cv=10, n_jobs=-1, train_sizes=np.linspace(.1, 1., 10), verbose=0)

    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)

    plt.figure()
    plt.title("Learning Curves")
    plt.legend(loc="best")
    plt.xlabel("Training samples")
    plt.ylabel("Error Rate")
    plt.ylim((0, 1))
    plt.gca().invert_yaxis()
    plt.grid()

    # Plot the average training and test score lines at each training set size
    plt.plot(train_sizes, train_scores_mean, 'o-', color="b", label="Training score")
    plt.plot(train_sizes, test_scores_mean, 'o-', color="r", label="Test score")

    # Plot the std deviation as a transparent range at each training set size
    plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
                     alpha=0.1, color="b")
    plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,
                     alpha=0.1, color="r")

    # Draw the plot and reset the y-axis
    plt.draw()
    plt.gca().invert_yaxis()

    # shuffle and split training and test sets
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25)
    classifier.fit(X_train, y_train)
    plt.show() 
Example 8
Project: autodmri   Author: samuelstjean   File: gamma.py    MIT License 5 votes vote down vote up
def maxlk_sigma(m, xold=None, eps=1e-8, max_iter=100):
    '''Maximum likelihood equation to estimate sigma from gamma distributed values'''

    sum_m2 = np.sum(m**2)
    K = m.size
    sum_log_m2 = np.sum(np.log(m**2))

    def f(sigma):
        return digamma(sum_m2/(2*K*sigma**2)) - sum_log_m2/K + np.log(2*sigma**2)

    def fprime(sigma):
        return -sum_m2 * polygamma(1, sum_m2/(2*K*sigma**2)) / (K*sigma**3) + 2/sigma

    if xold is None:
        xold = m.std()

    for _ in range(max_iter):

        xnew = xold - f(xold) / fprime(xold)

        if np.abs(xold - xnew) < eps:
            break

        xold = xnew

    return xnew 
Example 9
Project: MODS_ConvNet   Author: santiagolopezg   File: test_lillabcrossval_network.py    MIT License 5 votes vote down vote up
def cv_calc():
#calculate mean and stdev for each metric, and append them to test_metrics file
	test_metrics.append(cvscores[0])

	other_counter = 0
	for metric in cvscores[1:]:
        	v = 'test {0}: {1:.4f} +/- {2:.4f}%'.format(cvscores[0][0][other_counter], np.mean(metric), np.std(metric))
        	print v
		test_metrics.append(v)
		other_counter +=1
		if other_counter == 7:
			other_counter=0
	return cvscores, test_metrics 
Example 10
Project: MODS_ConvNet   Author: santiagolopezg   File: test_network.py    MIT License 5 votes vote down vote up
def cv_calc():
#calculate mean and stdev for each metric, and append them to test_metrics file
	test_metrics.append(cvscores[0])

	other_counter = 0
	for metric in cvscores[1:]:
        	v = 'test {0}: {1:.4f} +/- {2:.4f}%'.format(cvscores[0][0][other_counter], np.mean(metric), np.std(metric))
        	print v
		test_metrics.append(v)
		other_counter +=1
		if other_counter == 7:
			other_counter=0
	return cvscores, test_metrics 
Example 11
Project: MODS_ConvNet   Author: santiagolopezg   File: test_labcrossval_network.py    MIT License 5 votes vote down vote up
def cv_calc():
#calculate mean and stdev for each metric, and append them to test_metrics file
	test_metrics.append(cvscores[0])

	other_counter = 0
	for metric in cvscores[1:]:
        	v = 'test {0}: {1:.4f} +/- {2:.4f}%'.format(cvscores[0][0][other_counter], np.mean(metric), np.std(metric))
        	print v
		test_metrics.append(v)
		other_counter +=1
		if other_counter == 7:
			other_counter=0
	return cvscores, test_metrics 
Example 12
Project: MODS_ConvNet   Author: santiagolopezg   File: test_network.py    MIT License 5 votes vote down vote up
def cv_calc(cvscores):
#calculate mean and stdev for each metric, and append them to test_metrics file
	test_metrics.append(cvscores[0])

	other_counter = 0
	for metric in cvscores[1:]:
        	v = 'test {0}: {1:.4f} +/- {2:.4f}%'.format(cvscores[0][other_counter], np.mean(metric), np.std(metric))
        	print v
		test_metrics.append(v)
		other_counter +=1
		if other_counter == 6:
			other_counter=0
	return cvscores, test_metrics 
Example 13
Project: MODS_ConvNet   Author: santiagolopezg   File: test_lilfoo.py    MIT License 5 votes vote down vote up
def cv_calc():
#calculate mean and stdev for each metric, and append them to test_metrics file
	test_metrics.append(cvscores[0])

	other_counter = 0
	for metric in cvscores[1:]:
        	v = 'test {0}: {1:.4f} +/- {2:.4f}%'.format(cvscores[0][0][other_counter], np.mean(metric), np.std(metric))
        	print v
		test_metrics.append(v)
		other_counter +=1
		if other_counter == 7:
			other_counter=0
	return cvscores, test_metrics 
Example 14
Project: ML_from_scratch   Author: jarfa   File: util.py    Apache License 2.0 5 votes vote down vote up
def normalize(data, mean=None, sd=None):
    if mean is None:
        mean = np.mean(data, axis=0)
    if sd is None:
        sd = np.std(data, axis=0)
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        normalized = np.divide(data - mean, sd)
    normalized[np.isnan(normalized)] = 0.0
    normalized[np.isinf(normalized)] = 0.0
    return normalized 
Example 15
Project: ML_from_scratch   Author: jarfa   File: test_util.py    Apache License 2.0 5 votes vote down vote up
def test_norm_no_params(self):
        data = data_to_norm(20)
        normed = normalize(data)
        # comparing numpy arrays with the unittest module is a bit ugly
        self.assertListEqual(
            rounded_list(np.mean(normed, axis=0)),
            [0.0] * 3
        )
        self.assertListEqual(
            rounded_list(np.std(normed, axis=0)),
            [1.0] * 3
        ) 
Example 16
Project: ML_from_scratch   Author: jarfa   File: test_util.py    Apache License 2.0 5 votes vote down vote up
def test_norm_defined_params(self):
        data = data_to_norm(20)
        means = np.mean(data, axis=0)
        stdevs = np.std(data, axis=0)
        normed = normalize(data, mean=1, sd=3)
        self.assertListEqual(
            rounded_list(np.mean(normed, axis=0)),
            rounded_list((means - 1.) / 3.)
        )
        self.assertListEqual(
            rounded_list(np.std(normed, axis=0)),
            rounded_list(stdevs / 3, 6)
        ) 
Example 17
Project: nmp_qc   Author: priba   File: utils.py    MIT License 5 votes vote down vote up
def normalize_data(data, mean, std):
    data_norm = (data-mean)/std
    return data_norm 
Example 18
Project: oslodatascience-rl   Author: Froskekongen   File: erlenda_pong_parallel.py    MIT License 5 votes vote down vote up
def run_training(mod):
    xs,ys,drs,inds = [],[],[],[]
    bn=0
    pp=Pool(3)
    while True:
        thr=[ iii for iii in range(3)]
        outs=pp.map(run_episodes,thr)
        for o in outs:
            xs.extend(o[0])
            ys.extend(o[1])
            drs.extend(o[2])
        # for iii in range(4):
        #     xs_n,ys_n,drs_n=run_episodes(mod)
        #     xs.extend(xs_n)
        #     ys.extend(ys_n)
        #     drs.extend(drs_n)
        print('Updating weights...')
        # stack together all inputs, actions, and rewards for this episode
        epx = np.vstack(xs)
        print(epx.shape)
        epy = np.vstack(ys)
        epr = np.vstack(drs)
        xs,ys,drs, = [],[],[] # reset array memory

        # compute the discounted reward backwards through time
        discounted_epr = discount_rewards(epr)
        # standardize the rewards to be unit normal (helps control the gradient estimator variance)
        discounted_epr -= np.mean(discounted_epr)
        discounted_epr /= np.std(discounted_epr)

        # update our model weights (all in one batch)
        mod.train_on_batch(epx, epy, sample_weight=discounted_epr.reshape((-1,)))
        mod.save_weights('mod_weights_binary.h5')
        del epx, epy, epr, discounted_epr
        bn+=1 
Example 19
Project: oslodatascience-rl   Author: Froskekongen   File: space_invaders4.py    MIT License 5 votes vote down vote up
def discount_with_rewards(gradient_log_p, episode_rewards, gamma):
    """ discount the gradient with the normalized rewards """
    discounted_episode_rewards = discount_rewards(episode_rewards, gamma)
    # standardize the rewards to be unit normal (helps control the gradient estimator variance)
    discounted_episode_rewards -= np.mean(discounted_episode_rewards)
#    discounted_episode_rewards /= np.std(discounted_episode_rewards)
    return gradient_log_p * discounted_episode_rewards 
Example 20
Project: oslodatascience-rl   Author: Froskekongen   File: space_invaders3.py    MIT License 5 votes vote down vote up
def discount_with_rewards(gradient_log_p, episode_rewards, gamma):
    """ discount the gradient with the normalized rewards """
    discounted_episode_rewards = discount_rewards(episode_rewards, gamma)
    # standardize the rewards to be unit normal (helps control the gradient estimator variance)
    discounted_episode_rewards -= np.mean(discounted_episode_rewards)
#    discounted_episode_rewards /= np.std(discounted_episode_rewards)
    return gradient_log_p * discounted_episode_rewards 
Example 21
Project: oslodatascience-rl   Author: Froskekongen   File: me_pong.py    MIT License 5 votes vote down vote up
def discount_with_rewards(gradient_log_p, episode_rewards, gamma):
    """ discount the gradient with the normalized rewards """
    discounted_episode_rewards = discount_rewards(episode_rewards, gamma)
    # standardize the rewards to be unit normal (helps control the gradient estimator variance)
    discounted_episode_rewards -= np.mean(discounted_episode_rewards)
    discounted_episode_rewards /= np.std(discounted_episode_rewards)
    return gradient_log_p * discounted_episode_rewards 
Example 22
Project: oslodatascience-rl   Author: Froskekongen   File: space_invaders.py    MIT License 5 votes vote down vote up
def discount_with_rewards(gradient_log_p, episode_rewards, gamma):
    """ discount the gradient with the normalized rewards """
    discounted_episode_rewards = discount_rewards(episode_rewards, gamma)
    # standardize the rewards to be unit normal (helps control the gradient estimator variance)
    discounted_episode_rewards -= np.mean(discounted_episode_rewards)
    discounted_episode_rewards /= np.std(discounted_episode_rewards)
    return gradient_log_p * discounted_episode_rewards 
Example 23
Project: oslodatascience-rl   Author: Froskekongen   File: pogn.py    MIT License 5 votes vote down vote up
def discount_with_rewards(self, episode_rewards):
        """ discount the gradient with the normalized rewards """
        discounted_episode_rewards = self.discount_rewards(episode_rewards)
        # standardize the rewards to be unit normal (helps control the gradient estimator variance)
        discounted_episode_rewards -= np.mean(discounted_episode_rewards)
        discounted_episode_rewards /= np.std(discounted_episode_rewards)
        return discounted_episode_rewards 
Example 24
Project: oslodatascience-rl   Author: Froskekongen   File: pogn_parallel.py    MIT License 5 votes vote down vote up
def discount_with_rewards(self, episode_rewards):
        """ discount the gradient with the normalized rewards """
        discounted_episode_rewards = self.discount_rewards(episode_rewards)
        # standardize the rewards to be unit normal (helps control the gradient estimator variance)
        discounted_episode_rewards -= np.mean(discounted_episode_rewards)
        discounted_episode_rewards /= np.std(discounted_episode_rewards)
        return discounted_episode_rewards 
Example 25
Project: AnisotropicMultiStreamCNN   Author: AnnekeMeyer   File: utils.py    MIT License 5 votes vote down vote up
def normalizeByMeanAndStd(img, mean, std):

    castImageFilter = sitk.CastImageFilter()
    castImageFilter.SetOutputPixelType(sitk.sitkFloat32)
    img = castImageFilter.Execute(img)
    subFilter = sitk.SubtractImageFilter()
    image = subFilter.Execute(img, mean)

    divFilter = sitk.DivideImageFilter()
    image = divFilter.Execute(image, std)

    return image 
Example 26
Project: PO_2_MLSA   Author: jvollme   File: PO_2_MLSA.py    GNU General Public License v3.0 5 votes vote down vote up
def pf_get_mean_and_stdev(depthlist): #phylofilter
	#import numpy
	#depthmax = max(depthlist)
	#depthmin = min(depthlist)
	depthmean = numpy.mean(depthlist)
	depthstdev = numpy.std(depthlist, ddof = 1)
	return depthmean, depthstdev 
Example 27
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: metrics.py    Apache License 2.0 5 votes vote down vote up
def rse(label, pred):
    """computes the root relative squared error (condensed using standard deviation formula)"""
    numerator = np.sqrt(np.mean(np.square(label - pred), axis = None))
    denominator = np.std(label, axis = None)
    return numerator / denominator 
Example 28
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: metrics.py    Apache License 2.0 5 votes vote down vote up
def corr(label, pred):
    """computes the empirical correlation coefficient"""
    numerator1 = label - np.mean(label, axis=0)
    numerator2 = pred - np.mean(pred, axis = 0)
    numerator = np.mean(numerator1 * numerator2, axis=0)
    denominator = np.std(label, axis=0) * np.std(pred, axis=0)
    return np.mean(numerator / denominator) 
Example 29
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: rnn.py    Apache License 2.0 5 votes vote down vote up
def run_benchmark(cell_type, ctx, seq_len, batch_size, hidden_dim):
    obj = {"foreach": ForeachRNN, "while_loop": WhileRNN}[args.benchmark]
    inputs = _array((seq_len, batch_size, hidden_dim), ctx)
    states = [_array((batch_size, hidden_dim), ctx) for _ in cell_type(0).state_info()]
    if args.benchmark == "while_loop":
        states.insert(0, _zeros((1, ), ctx))

    for is_train, is_hyb_cell, is_hyb_layer in product([True, False], [False, True], [False, True]):
        cell = cell_type(hidden_dim)
        if is_hyb_cell:
            cell.hybridize(static_alloc=True)
        layer = obj(cell, seq_len)
        layer.initialize(ctx=ctx)
        if is_hyb_layer:
            layer.hybridize(static_alloc=True)
        print("is_train = %r, hybridize_cell = %r, hybridize_layer = %r" % (is_train, is_hyb_cell, is_hyb_layer))
        times = []
        for _ in range(args.warmup_rounds + args.test_rounds):
            tick = time()
            if not is_train:
                res = layer(inputs, states)
            else:
                with mx.autograd.record():
                    res = layer(inputs, states)
            if is_train:
                res.backward()
            mx.nd.waitall()
            tock = time()
            times.append((tock - tick) * 1000.0)
        times = times[args.warmup_rounds: ]
        print("Time used: mean = %.3f ms, std = %.3f ms" % (np.mean(times), np.std(times))) 
Example 30
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_random.py    Apache License 2.0 5 votes vote down vote up
def set_seed_variously_for_context(ctx, init_seed, num_init_seeds, final_seed):
    end_seed = init_seed + num_init_seeds
    for seed in range(init_seed, end_seed):
        mx.random.seed(seed, ctx=ctx)
    mx.random.seed(final_seed, ctx=ctx)
    return end_seed

# Tests that seed setting of std (non-parallel) rng for specific context is synchronous w.r.t. rng use before and after. 
Example 31
Project: estim2bapi   Author: fredhatt   File: motion.py    MIT License 5 votes vote down vote up
def get_stats(self, low=0, high=None):
        means = np.mean(self.hist, axis=0)    
        stds = np.std(self.hist, axis=0)
        return means[low:high], stds[low:high] 
Example 32
Project: estim2bapi   Author: fredhatt   File: motion.py    MIT License 5 votes vote down vote up
def calibrate_velocities(self, motionstd=None):
        vels = self.calc_velocities()
        self.vel_means, self.vel_stds = np.mean(vels, axis=0), np.std(vels, axis=0)
        if motionstd is not None:
            self.vel_stds = motionstd

        return self.vel_means, self.vel_stds 
Example 33
Project: estim2bapi   Author: fredhatt   File: motion.py    MIT License 5 votes vote down vote up
def calibrate_speeds(self, motionstd=None):
        speeds = self.calc_speeds()
        ##df = pd.DataFrame(speeds, columns=['vel'])
        ##ema = pd.ewma(df, alpha=0.5)
        ##self.speed_means = ema.mean().values[-1]
        ##self.speed_stds = ema.std().values[-1]
        self.speed_means, self.speed_stds = np.mean(speeds, axis=0), np.std(speeds, axis=0)
        if self.speed_stds < 1.5: self.speed_stds = 10.0
        
        if motionstd is not None:
            self.speed_stds = motionstd

        return self.speed_means, self.speed_stds 
Example 34
Project: estim2bapi   Author: fredhatt   File: motion.py    MIT License 5 votes vote down vote up
def calibrate_angles(self, angstd=None):
        angles = self.calc_angles()
        self.angle_means, self.angle_stds = np.mean(angles, axis=1), np.std(angles, axis=1)
        if self.angle_stds[0] < 2.0: self.angle_stds[0] = 0.75
        if self.angle_stds[1] < 2.0: self.angle_stds[1] = 0.75
        if angstd is not None:
            self.angle_stds = np.array([angstd, angstd])

        return self.angle_means, self.angle_stds 
Example 35
Project: estim2bapi   Author: fredhatt   File: motion.py    MIT License 5 votes vote down vote up
def get_stats(self, low=0, high=None):
        means = np.mean(self.hist, axis=0)    
        stds = np.std(self.hist, axis=0)
        return means[low:high], stds[low:high] 
Example 36
Project: estim2bapi   Author: fredhatt   File: motion.py    MIT License 5 votes vote down vote up
def calibrate_angles(self, angstd=None):
        angles = self.calc_angles()
        self.angle_means, self.angle_stds = np.mean(angles, axis=1), np.std(angles, axis=1)
        if angstd is not None:
            self.angle_stds = np.array([angstd, angstd])

        return self.angle_means, self.angle_stds 
Example 37
Project: DOTA_models   Author: ringringyi   File: input.py    Apache License 2.0 5 votes vote down vote up
def image_whitening(data):
  """
  Subtracts mean of image and divides by adjusted standard variance (for
  stability). Operations are per image but performed for the entire array.
  :param image: 4D array (ID, Height, Weight, Channel)
  :return: 4D array (ID, Height, Weight, Channel)
  """
  assert len(np.shape(data)) == 4

  # Compute number of pixels in image
  nb_pixels = np.shape(data)[1] * np.shape(data)[2] * np.shape(data)[3]

  # Subtract mean
  mean = np.mean(data, axis=(1,2,3))

  ones = np.ones(np.shape(data)[1:4], dtype=np.float32)
  for i in xrange(len(data)):
    data[i, :, :, :] -= mean[i] * ones

  # Compute adjusted standard variance
  adj_std_var = np.maximum(np.ones(len(data), dtype=np.float32) / math.sqrt(nb_pixels), np.std(data, axis=(1,2,3))) #NOLINT(long-line)

  # Divide image
  for i in xrange(len(data)):
    data[i, :, :, :] = data[i, :, :, :] / adj_std_var[i]

  print(np.shape(data))

  return data 
Example 38
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling_and_clip.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        ret = np.array([
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ])
        return [np.clip(ret/40.0, -1, 1).max()] 
Example 39
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling_and_clip.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vector_meaning():
        return [
            'tactile_static_data.left.std.clip(ret/60.0, -1, 1).max()', \
        ] 
Example 40
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling_and_clip.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        ret = np.array([
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ])
        return np.clip(ret/60.0, -1, 1) 
Example 41
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling_and_clip.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vector_meaning():
        return [
            'tactile_static_data.left.std.clip(ret/60.0, -1, 1)', \
            'tactile_static_data.right.std.clip(ret/60.0, -1, 1)', \
        ] 
Example 42
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        ret = np.array([
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ])
        return ret/300.0 
Example 43
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        cur_f = [
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ]
        if self.prev_f is None:
            ret = [0, 0]
        else:
            ret = [cur_f[0]-self.prev_f[0], cur_f[1]-self.prev_f[1]]
        self.prev_f = cur_f
        return ret 
Example 44
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vector_meaning():
        return [
            'tactile_static_data.left.std.1stderivative', \
            'tactile_static_data.right.std.1stderivative', \
        ] 
Example 45
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        cur_f = [
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ]
        self.prev_f.append(cur_f)
        if len(self.prev_f) < 3:
            ret = [0, 0]
        else:
            ret = [
                self.prev_f[0][0]+self.prev_f[2][0]-2*self.prev_f[1][0], 
                self.prev_f[0][1]+self.prev_f[2][1]-2*self.prev_f[1][1], 
            ]
            self.prev_f.popleft()
        return ret 
Example 46
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vector_meaning():
        return [
            'tactile_static_data.left.std.EdgeDetectorSize3', \
            'tactile_static_data.right.std.EdgeDetectorSize3', \
        ] 
Example 47
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        cur_f = [
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ]
        self.prev_f.append(cur_f)
        if len(self.prev_f) < 5:
            ret = [0, 0]
        else:
            ret = [
                -2*self.prev_f[0][0]-self.prev_f[1][0]+self.prev_f[3][0]+2*self.prev_f[4][0], 
                -2*self.prev_f[0][1]-self.prev_f[1][1]+self.prev_f[3][1]+2*self.prev_f[4][1], 
            ]
            self.prev_f.popleft()
        return ret 
Example 48
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        return [
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ] 
Example 49
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vector_meaning():
        return [
            'tactile_static_data.left.std', \
            'tactile_static_data.right.std', \
        ] 
Example 50
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        cur_f = [
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ]
        if self.prev_f is None:
            ret = [0, 0]
        else:
            ret = [cur_f[0]-self.prev_f[0], cur_f[1]-self.prev_f[1]]
        self.prev_f = cur_f
        return ret 
Example 51
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vector_meaning():
        return [
            'tactile_static_data.left.std.1stderivative', \
            'tactile_static_data.right.std.1stderivative', \
        ] 
Example 52
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        cur_f = [
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ]
        self.prev_f.append(cur_f)
        if len(self.prev_f) < 3:
            ret = [0, 0]
        else:
            ret = [
                self.prev_f[0][0]+self.prev_f[2][0]-2*self.prev_f[1][0], 
                self.prev_f[0][1]+self.prev_f[2][1]-2*self.prev_f[1][1], 
            ]
            self.prev_f.popleft()
        return ret 
Example 53
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        cur_f = [
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ]
        self.prev_f.append(cur_f)
        if len(self.prev_f) < 5:
            ret = [0, 0]
        else:
            ret = [
                -2*self.prev_f[0][0]-self.prev_f[1][0]+self.prev_f[3][0]+2*self.prev_f[4][0], 
                -2*self.prev_f[0][1]-self.prev_f[1][1]+self.prev_f[3][1]+2*self.prev_f[4][1], 
            ]
            self.prev_f.popleft()
        return ret 
Example 54
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vector_meaning():
        return [
            'tactile_static_data.left.std.EdgeDetectorSize5', \
            'tactile_static_data.right.std.EdgeDetectorSize5', \
        ] 
Example 55
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling_and_clip.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        ret = np.array([
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ])
        return [np.clip(ret/60.0, -1, 1).max()] 
Example 56
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling_and_clip.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vector_meaning():
        return [
            'tactile_static_data.left.std.clip(ret/60.0, -1, 1).max()', \
        ] 
Example 57
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        ret = np.array([
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ])
        return ret/250.0 
Example 58
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        cur_f = [
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ]
        if self.prev_f is None:
            ret = [0, 0]
        else:
            ret = [cur_f[0]-self.prev_f[0], cur_f[1]-self.prev_f[1]]
        self.prev_f = cur_f
        return ret 
Example 59
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vector_meaning():
        return [
            'tactile_static_data.left.std.1stderivative', \
            'tactile_static_data.right.std.1stderivative', \
        ] 
Example 60
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        cur_f = [
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ]
        self.prev_f.append(cur_f)
        if len(self.prev_f) < 3:
            ret = [0, 0]
        else:
            ret = [
                self.prev_f[0][0]+self.prev_f[2][0]-2*self.prev_f[1][0], 
                self.prev_f[0][1]+self.prev_f[2][1]-2*self.prev_f[1][1], 
            ]
            self.prev_f.popleft()
        return ret 
Example 61
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vector_meaning():
        return [
            'tactile_static_data.left.std.EdgeDetectorSize3', \
            'tactile_static_data.right.std.EdgeDetectorSize3', \
        ] 
Example 62
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters_with_scaling.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        cur_f = [
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ]
        self.prev_f.append(cur_f)
        if len(self.prev_f) < 5:
            ret = [0, 0]
        else:
            ret = [
                -2*self.prev_f[0][0]-self.prev_f[1][0]+self.prev_f[3][0]+2*self.prev_f[4][0], 
                -2*self.prev_f[0][1]-self.prev_f[1][1]+self.prev_f[3][1]+2*self.prev_f[4][1], 
            ]
            self.prev_f.popleft()
        return ret 
Example 63
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        return [
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ] 
Example 64
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vector_meaning():
        return [
            'tactile_static_data.left.std', \
            'tactile_static_data.right.std', \
        ] 
Example 65
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        cur_f = [
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ]
        if self.prev_f is None:
            ret = [0, 0]
        else:
            ret = [cur_f[0]-self.prev_f[0], cur_f[1]-self.prev_f[1]]
        self.prev_f = cur_f
        return ret 
Example 66
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def vector_meaning():
        return [
            'tactile_static_data.left.std.1stderivative', \
            'tactile_static_data.right.std.1stderivative', \
        ] 
Example 67
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        cur_f = [
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ]
        self.prev_f.append(cur_f)
        if len(self.prev_f) < 3:
            ret = [0, 0]
        else:
            ret = [
                self.prev_f[0][0]+self.prev_f[2][0]-2*self.prev_f[1][0], 
                self.prev_f[0][1]+self.prev_f[2][1]-2*self.prev_f[1][1], 
            ]
            self.prev_f.popleft()
        return ret 
Example 68
Project: smach_based_introspection_framework   Author: birlrobotics   File: msg_filters.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convert(self, msg):
        cur_f = [
            np.std(msg.taxels[0].values),
            np.std(msg.taxels[1].values),
        ]
        self.prev_f.append(cur_f)
        if len(self.prev_f) < 5:
            ret = [0, 0]
        else:
            ret = [
                -2*self.prev_f[0][0]-self.prev_f[1][0]+self.prev_f[3][0]+2*self.prev_f[4][0], 
                -2*self.prev_f[0][1]-self.prev_f[1][1]+self.prev_f[3][1]+2*self.prev_f[4][1], 
            ]
            self.prev_f.popleft()
        return ret 
Example 69
Project: autodmri   Author: samuelstjean   File: gamma.py    MIT License 4 votes vote down vote up
def get_noise_distribution(data, method='moments'):
    '''Computes sigma and N from an array of gamma distributed data

    input
    -----
    data
        A numpy array of gamma distributed values
    method='moments' or method='maxlk'
        Use either the moments or maximum likelihood equations to estimate the parameters.

    output
    ------
    sigma, N
        parameters related to the original Gaussian noise distribution
    '''

    data = data[data > 0]

    # If we have no voxel or only the same value
    # it leads to a divide by 0 as an edge case
    if data.size == 0 or np.std(data) == 0:
        return 0, 0

    # First get sigma
    if method == 'moments':
        mdata2 = np.mean(data**2)
        mdata4 = np.mean(data**4)

        p1 = mdata4 / mdata2
        p2 = mdata2
        sigma = np.sqrt(p1 - p2) / np.sqrt(2)
    elif method == 'maxlk':
            sigma = maxlk_sigma(data)
    else:
        raise ValueError('Invalid method name {}'.format(method))

    t = data**2 / (2*sigma**2)

    # Now compute N
    if method == 'moments':
        N = np.mean(t)
    elif method == 'maxlk':
            y = np.mean(np.log(t))
            N = inv_digamma(y)
    else:
        raise ValueError('Invalid method name {}'.format(method))

    return sigma, N 
Example 70
Project: FRIDA   Author: LCAV   File: bands_selection.py    MIT License 4 votes vote down vote up
def select_bands(samples, freq_range, fs, nfft, win, n_bands, div=1):
    '''
    Selects the bins with most energy in a frequency range.

    It is possible to specify a div factor. Then the range is subdivided
    into div equal subbands and n_bands / div per subband are selected.
    '''

    if win is not None and isinstance(win, bool):
        if win:
            win = np.hanning(nfft)
        else:
            win = None

    # Read the signals in a single array
    sig = [wavfile.read(s)[1] for s in samples]
    L = max([s.shape[0] for s in sig])
    signals = np.zeros((L,len(samples)), dtype=np.float32)
    for i in range(signals.shape[1]):
        signals[:sig[i].shape[0],i] = sig[i] / np.std(sig[i][sig[i] > 1e-2])

    sum_sig = np.sum(signals, axis=1)

    sum_STFT = pra.stft(sum_sig, nfft, nfft, win=win, transform=rfft).T
    sum_STFT_avg = np.mean(np.abs(sum_STFT)**2, axis=1)

    # Do some band selection
    bnds = np.linspace(freq_range[0], freq_range[1], div+1)

    freq_hz = np.zeros(n_bands)
    freq_bins = np.zeros(n_bands, dtype=int)

    nsb = n_bands // div

    for i in range(div):

        bl = int(bnds[i] / fs * nfft)
        bh = int(bnds[i+1] / fs * nfft)

        k = np.argsort(sum_STFT_avg[bl:bh])[-nsb:]

        freq_hz[nsb*i:nsb*(i+1)] = (bl + k) / nfft * fs
        freq_bins[nsb*i:nsb*(i+1)] = k + bl

    freq_hz = freq_hz[:n_bands]

    return np.unique(freq_hz), np.unique(freq_bins) 
Example 71
Project: kuaa   Author: rafaelwerneck   File: plugin_zscore.py    GNU General Public License v3.0 4 votes vote down vote up
def normalize(img_path, images, images_set, pos_train_test, parameters, method,
        train_param):
    """
    Function that performs the normalization of a feature vector.
    
    Calculates the z-score of each position in the feature vector, relative to
    the sample mean and standard deviation of that position in all feature
    vectors.
    """

    print "Normalizer: ZSCORE"
    
    #Get the list of classes and the feature vector of the img_path
    img_classes = images[img_path][POS_CLASSES]
    try:
        img_fv = images[img_path][POS_FV][pos_train_test]
    except:
        img_fv = images[img_path][POS_FV][0]

    print "\tFeature vector of image", img_path, \
          "being normalized by process", os.getpid()

    # Performs the normalization ---------------------------------------------
    #If the parameters of normalization don't exists, calculate the mean and
    #   the standard deviation of the feature vectors in the train set
    if 'Mean' not in train_param:
        list_train = []
        for image in images_set:
            try:
                list_train.append(images[image][POS_FV][pos_train_test])
            except:
                list_train.append(images[image][POS_FV][ZERO_INDEX])
        
        mean_list = numpy.mean(list_train, axis=0)
        std_list = numpy.std(list_train, axis=0)
        
        train_param['Mean'] = mean_list
        train_param['Deviation'] = std_list
    #If the parameters of normalization already exists, load them
    else:
        print "\t\tGet Mean and Standard Deviation"
        mean_list = train_param['Mean']
        std_list = train_param['Deviation']
    
    fv_norm = [(img_fv[index] - mean_list[index]) / std_list[index]
            for index in range(len(img_fv))]
    fv_norm = [fv_item for fv_item in fv_norm if not numpy.isnan(fv_item)]
    #-------------------------------------------------------------------------

    return img_path, len(img_classes), img_classes, fv_norm, train_param 
Example 72
Project: DataComp   Author: Cojabi   File: utils.py    Apache License 2.0 4 votes vote down vote up
def calc_prog_scores(time_series, bl_index, method):
    """
    Calculates the progression scores. Can be done using either a z-score normalization to baseline or expressing the \
    score as log-ratio of baseline value.

    :param time_series: pandas.Series storing the values at the different points in time which shall be transformed \
    into progression scores.
    :param bl_index: Value representing the baseline measurement in the time column.
    :param method: Specifies which progression score should be calculated. z-score ("z-score") or ratio of baseline \
    ("robl")
    :return: Calculated progression scores
    """

    def _z_score_formula(x, bl, sd):
        """
        Calculates a z-score.

        :param x: Feature value
        :param bl: Baseline feature value
        :param sd: Standard deviation
        :return: z-score
        """
        return (x - bl) / sd

    def _robl_formula(x, bl):
        """
        Calculates the log-ratio between the current feature value and the baseline feature value.

        :param x: Feature Value
        :param bl: Baseline feature Value
        :return: Baseline feature value ratio
        """
        return np.log(x / bl)

    # get baseline value
    try:
        bl_value = time_series.loc[bl_index]
    # raise error if no baseline value is present
    except KeyError:
        raise KeyError("No Baseline value found for entity.")

    # when there is no baseline measurement present return NaN for all values
    if type(bl_value) == pd.Series:
        raise ValueError("Multiple baseline entries have been found have been found for one entity.")

    if not pd.isnull(bl_value):
        if method == "z-score":
            # calculate standard deviation
            sd = np.std(time_series)
            return time_series.apply(_z_score_formula, args=(bl_value, sd))

        elif method == "robl":
            return time_series.apply(_robl_formula, args=(bl_value,))

    else:
        return np.nan 
Example 73
Project: ML_from_scratch   Author: jarfa   File: compare_models.py    Apache License 2.0 4 votes vote down vote up
def test_models(csv_name):
    metrics_cols = ["model", "source", "target", "roc_auc", "norm_ll",
        "train_roc_auc", "train_norm_ll", "train_time", "pred_time"]
    metrics_data = dict((k,[]) for k in metrics_cols)

    digits = datasets.load_digits() #has attributes digits.data, digits.target

    # for each target, run each model 3 times on different datasets
    for run in range(3):
        for target_val in range(10):
            # to see how these models compete on a wider variety of data,
            # let's get a different train/test split for each run
            np.random.seed(10 * run + target_val)
            (train_data, holdout_data, train_targets, holdout_targets
                ) = train_test_split(
                    digits.data, 
                    np.array(digits.target == target_val, dtype=float),
                    test_size=0.25
            )
            train_mean = np.mean(train_data, axis=0)
            train_std = np.std(train_data, axis=0)
            norm_train_data = normalize(train_data, train_mean, train_std)
            norm_holdout_data = normalize(holdout_data, train_mean, train_std)
            test_br = np.mean(holdout_targets)
            train_br = np.mean(train_targets)
            # create all models fresh, ready to be trained
            for (mod_name, source), mod in create_models():
                ll, roc, train_ll, train_roc, ttime, ptime = try_model(
                    mod,
                    norm_train_data,
                    norm_holdout_data,
                    train_targets,
                    holdout_targets
                )
                metrics_data["model"].append(mod_name)
                metrics_data["source"].append(source)
                metrics_data["target"].append(target_val)
                metrics_data["roc_auc"].append(roc)
                metrics_data["norm_ll"].append(normLL(ll, test_br))
                metrics_data["train_roc_auc"].append(train_roc)
                metrics_data["train_norm_ll"].append(normLL(train_ll, train_br))
                metrics_data["train_time"].append(ttime)
                metrics_data["pred_time"].append(ptime)

    df = pd.DataFrame(metrics_data)
    df.to_csv(csv_name, index=False)
    print("Wrote {0:d} rows to {1}".format(df.shape[1], csv_name)) 
Example 74
Project: ML_from_scratch   Author: jarfa   File: RegressionSGD.py    Apache License 2.0 4 votes vote down vote up
def train(self, data, targets):
        # now that we have the data, we know the shape of the weight vector
        self.coefs = np.zeros(data.shape[1])
        # generate holdout set
        train_data, holdout_data, train_targets, holdout_targets = train_test_split(
            data, targets, test_size=self.holdout_proportion)

        if self.normalize_data:
            train_mean = np.mean(train_data, axis=0)
            train_std = np.std(train_data, axis=0)
            train_data = normalize(train_data, train_mean, train_std)
            if self.holdout_proportion:
                holdout_data = normalize(holdout_data, train_mean, train_std)

        for epoch in range(self.n_epochs):
            if epoch > 0:
                # randomize order for each epoch
                train_data, train_targets = shuffle_rows(train_data, train_targets)

            for batch_data, batch_targets in self.get_minibatches(train_data, train_targets):
                # evalute the gradient on this minibatch with the current coefs
                w_gradient, b_gradient = self.loss.gradient(batch_data,
                                            self.predict(batch_data), batch_targets)
                self.coefs -= self.learning_rate * w_gradient
                self.bias -= self.learning_rate * b_gradient
                # TODO: add learning rate decay
                # TODO: add momentum, rmsprop, etc.

                # regularization
                # I'm not regularizing the bias parameter
                if self.l2:
                    self.coefs -= 2. * self.l2 * self.coefs
                if self.l1:
                    self.coefs = np.sign(self.coefs) * np.maximum(
                        0.0, np.absolute(self.coefs) - self.l1)

            # report after every 2^(n-1) epoch and at the end of training
            if self.verbose and self.holdout_proportion and (
                (epoch & (epoch - 1)) == 0 or epoch == (self.n_epochs - 1)
                ):
                # evaluate holdout set w/ current coefs
                holdout_loss = self.loss.loss(holdout_targets,
                                    self.predict(holdout_data))
                sgd_report(
                    epoch=1 + epoch,
                    loss=holdout_loss,
                    br=np.mean(holdout_targets) if isinstance(self.loss, Logistic) else "",
                    bias=self.bias
                ) 
Example 75
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_atari_multi.py    MIT License 4 votes vote down vote up
def updateModel(self):
        print('Updating model...')
        discountedRewards = self.getDiscountedRewards()
        X = self.getStates()
        fakeLabels = [1 if action == 2 else 0 for action in self.getActions().flatten()]
        Y = np.vstack(fakeLabels)


        discountedRewards -= np.mean(discountedRewards)
        discountedRewards /= np.std(discountedRewards)

        # fakeLabels = [self.action2Class[action] for action in self.getActions().flatten()]
        # actionValues = discountedRewards - valuePreds
        # Y = responseWithSampleWeights(Y, actionValues, self.nbActionClasses)
        # self.model.train_on_batch(X, [Y, discountedRewards])
        self.model.train_on_batch(X, Y, sample_weight=discountedRewards.reshape((-1,)))

    # def updateModel(self):
        # '''Should do all work with updating weights.'''
        # print('Updating weights...')
        # # stack together all inputs, actions, and rewards for this episode
        # epx = np.vstack(self.states)
        # fakeLabels = [1 if action == 2 else 0 for action in self.actions]
        # epy = np.vstack(fakeLabels)
        # epr = np.vstack(self.rewards)
        # self.resetMemory()
    
        # # compute the discounted reward backwards through time
        # discounted_epr = self._discountRewards(epr)
        # # standardize the rewards to be unit normal (helps control the gradient estimator variance)
        # discounted_epr -= np.mean(discounted_epr)
        # discounted_epr /= np.std(discounted_epr)
    
        # # update our model weights (all in one batch)
        # self.model.train_on_batch(epx, epy, sample_weight=discounted_epr.reshape((-1,)))

        # if self.episode % (self.batch_size * 3) == 0: 
            # self.model.save(self.modelFileName)

    # def _discountRewards(self, r):
        # """ take 1D float array of rewards and compute discounted reward """
        # discounted_r = np.zeros_like(r)
        # running_add = 0
        # for t in reversed(range(0, r.size)):
            # if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
            # running_add = running_add * self.gamma + r[t]
            # discounted_r[t] = running_add
        # return discounted_r 
Example 76
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: dec.py    Apache License 2.0 4 votes vote down vote up
def cluster(self, X, y=None, update_interval=None):
        N = X.shape[0]
        if not update_interval:
            update_interval = N
        batch_size = 256
        test_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
                                      last_batch_handle='pad')
        args = {k: mx.nd.array(v.asnumpy(), ctx=self.xpu) for k, v in self.args.items()}
        z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
        kmeans = KMeans(self.num_centers, n_init=20)
        kmeans.fit(z)
        args['dec_mu'][:] = kmeans.cluster_centers_
        solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.01)
        def ce(label, pred):
            return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
        solver.set_metric(mx.metric.CustomMetric(ce))

        label_buff = np.zeros((X.shape[0], self.num_centers))
        train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff}, batch_size=batch_size,
                                       shuffle=False, last_batch_handle='roll_over')
        self.y_pred = np.zeros((X.shape[0]))
        def refresh(i):
            if i%update_interval == 0:
                z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
                p = np.zeros((z.shape[0], self.num_centers))
                self.dec_op.forward([z, args['dec_mu'].asnumpy()], [p])
                y_pred = p.argmax(axis=1)
                print(np.std(np.bincount(y_pred)), np.bincount(y_pred))
                print(np.std(np.bincount(y.astype(np.int))), np.bincount(y.astype(np.int)))
                if y is not None:
                    print(cluster_acc(y_pred, y)[0])
                weight = 1.0/p.sum(axis=0)
                weight *= self.num_centers/weight.sum()
                p = (p**2)*weight
                train_iter.data_list[1][:] = (p.T/p.sum(axis=1)).T
                print(np.sum(y_pred != self.y_pred), 0.001*y_pred.shape[0])
                if np.sum(y_pred != self.y_pred) < 0.001*y_pred.shape[0]:
                    self.y_pred = y_pred
                    return True
                self.y_pred = y_pred
        solver.set_iter_start_callback(refresh)
        solver.set_monitor(Monitor(50))

        solver.solve(self.xpu, self.loss, args, self.args_grad, None,
                     train_iter, 0, 1000000000, {}, False)
        self.end_args = args
        if y is not None:
            return cluster_acc(self.y_pred, y)[0]
        else:
            return -1 
Example 77
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: ddpg.py    Apache License 2.0 4 votes vote down vote up
def evaluate(self, epoch, memory):

        if epoch == self.n_epochs - 1:
            logger.log("Collecting samples for evaluation")
            rewards = sample_rewards(env=self.env,
                                     policy=self.policy,
                                     eval_samples=self.eval_samples,
                                     max_path_length=self.max_path_length)
            average_discounted_return = np.mean(
                [discount_return(reward, self.discount) for reward in rewards])
            returns = [sum(reward) for reward in rewards]

        all_qs = np.concatenate(self.q_averages)
        all_ys = np.concatenate(self.y_averages)

        average_qfunc_loss = np.mean(self.qfunc_loss_averages)
        average_policy_loss = np.mean(self.policy_loss_averages)

        logger.record_tabular('Epoch', epoch)
        if epoch == self.n_epochs - 1:
            logger.record_tabular('AverageReturn',
                              np.mean(returns))
            logger.record_tabular('StdReturn',
                              np.std(returns))
            logger.record_tabular('MaxReturn',
                              np.max(returns))
            logger.record_tabular('MinReturn',
                              np.min(returns))
            logger.record_tabular('AverageDiscountedReturn',
                              average_discounted_return)
        if len(self.strategy_path_returns) > 0:
            logger.record_tabular('AverageEsReturn',
                                  np.mean(self.strategy_path_returns))
            logger.record_tabular('StdEsReturn',
                                  np.std(self.strategy_path_returns))
            logger.record_tabular('MaxEsReturn',
                                  np.max(self.strategy_path_returns))
            logger.record_tabular('MinEsReturn',
                                  np.min(self.strategy_path_returns))
        logger.record_tabular('AverageQLoss', average_qfunc_loss)
        logger.record_tabular('AveragePolicyLoss', average_policy_loss)
        logger.record_tabular('AverageQ', np.mean(all_qs))
        logger.record_tabular('AverageAbsQ', np.mean(np.abs(all_qs)))
        logger.record_tabular('AverageY', np.mean(all_ys))
        logger.record_tabular('AverageAbsY', np.mean(np.abs(all_ys)))
        logger.record_tabular('AverageAbsQYDiff',
                              np.mean(np.abs(all_qs - all_ys)))

        self.qfunc_loss_averages = []
        self.policy_loss_averages = []
        self.q_averages = []
        self.y_averages = []
        self.strategy_path_returns = [] 
Example 78
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_cvnets.py    Apache License 2.0 4 votes vote down vote up
def test_tensorrt_on_cifar_resnets(batch_size=32, tolerance=0.1, num_workers=1):
    original_try_value = mx.contrib.tensorrt.get_use_tensorrt()
    try:
        models = [
            'cifar_resnet20_v1',
            'cifar_resnet56_v1',
            'cifar_resnet110_v1',
            'cifar_resnet20_v2',
            'cifar_resnet56_v2',
            'cifar_resnet110_v2',
            'cifar_wideresnet16_10',
            'cifar_wideresnet28_10',
            'cifar_wideresnet40_8',
            'cifar_resnext29_16x64d'
        ]

        num_models = len(models)

        speedups = np.zeros(num_models, dtype=np.float32)
        acc_diffs = np.zeros(num_models, dtype=np.float32)

        test_start = time()

        for idx, model in enumerate(models):
            speedup, acc_diff = run_experiment_for(model, batch_size, num_workers)
            speedups[idx] = speedup
            acc_diffs[idx] = acc_diff
            assert acc_diff < tolerance, "Accuracy difference between MXNet and TensorRT > %.2f%% for model %s" % (
                tolerance, model)

        print("Perf and correctness checks run on the following models:")
        print(models)
        mean_speedup = np.mean(speedups)
        std_speedup = np.std(speedups)
        print("\nSpeedups:")
        print(speedups)
        print("Speedup range: [%.2f, %.2f]" % (np.min(speedups), np.max(speedups)))
        print("Mean speedup: %.2f" % mean_speedup)
        print("St. dev. of speedups: %.2f" % std_speedup)
        print("\nAcc. differences: %s" % str(acc_diffs))

        test_duration = time() - test_start

        print("Test duration: %.2f seconds" % test_duration)
    finally:
        mx.contrib.tensorrt.set_use_tensorrt(original_try_value) 
Example 79
Project: soccer-matlab   Author: utra-robosoccer   File: eval_ars.py    BSD 2-Clause "Simplified" License 4 votes vote down vote up
def main(argv):
  del argv  # Unused.

  print('loading and building expert policy')
  checkpoint_file = os.path.join(FLAGS.logdir, FLAGS.checkpoint)
  lin_policy = np.load(checkpoint_file, encoding='bytes')
  lin_policy = lin_policy.items()[0][1]

  M = lin_policy[0]
  # mean and std of state vectors estimated online by ARS.
  mean = lin_policy[1]
  std = lin_policy[2]

  config = utility.load_config(FLAGS.logdir)
  print("config=",config)
  env = config['env'](hard_reset=True, render=True)
  ob_dim = env.observation_space.shape[0]
  ac_dim = env.action_space.shape[0]

  # set policy parameters. Possible filters: 'MeanStdFilter' for v2, 'NoFilter' for v1.
  policy_params = {
      'type': 'linear',
      'ob_filter': config['filter'],
      'ob_dim': ob_dim,
      'ac_dim': ac_dim,
      "weights": M,
      "mean": mean,
      "std": std,
  }
  policy = policies.LinearPolicy(policy_params, update_filter=False)
  returns = []
  observations = []
  actions = []
  for i in range(FLAGS.num_rollouts):
    print('iter', i)
    obs = env.reset()
    done = False
    totalr = 0.
    steps = 0
    while not done:
      action = policy.act(obs)
      observations.append(obs)
      actions.append(action)

      obs, r, done, _ = env.step(action)
      time.sleep(1./100.)
      totalr += r
      steps += 1
      if steps % 100 == 0:
        print('%i/%i' % (steps, config['rollout_length']))
      if steps >= config['rollout_length']:
        break
    returns.append(totalr)

  print('returns', returns)
  print('mean return', np.mean(returns))
  print('std of return', np.std(returns)) 
Example 80
Project: soccer-matlab   Author: utra-robosoccer   File: ars.py    BSD 2-Clause "Simplified" License 4 votes vote down vote up
def train(self, num_iter):

    start = time.time()
    for i in range(num_iter):

      t1 = time.time()
      info_dict = self.train_step()
      t2 = time.time()
      print('total time of one step', t2 - t1)
      print('iter ', i, ' done')

      # record statistics every 10 iterations
      if ((i) % 10 == 0):

        rewards = self.aggregate_rollouts(num_rollouts=8, evaluate=True)
        w = self.workers[0].get_weights_plus_stats()

        checkpoint_filename = os.path.join(
            self.logdir, 'lin_policy_plus_{:03d}.npz'.format(i))
        print('Save checkpoints to {}...', checkpoint_filename)
        checkpoint_file = open(checkpoint_filename, 'w')
        np.savez(checkpoint_file, w)
        print('End save checkpoints.')
        print(sorted(self.params.items()))
        logz.log_tabular('Time', time.time() - start)
        logz.log_tabular('Iteration', i + 1)
        logz.log_tabular('AverageReward', np.mean(rewards))
        logz.log_tabular('StdRewards', np.std(rewards))
        logz.log_tabular('MaxRewardRollout', np.max(rewards))
        logz.log_tabular('MinRewardRollout', np.min(rewards))
        logz.log_tabular('timesteps', self.timesteps)
        logz.dump_tabular()

      t1 = time.time()
      # get statistics from all workers
      for j in range(self.num_workers):
        self.policy.observation_filter.update(self.workers[j].get_filter())
      self.policy.observation_filter.stats_increment()

      # make sure master filter buffer is clear
      self.policy.observation_filter.clear_buffer()
      # sync all workers
      #filter_id = ray.put(self.policy.observation_filter)
      setting_filters_ids = [
          worker.sync_filter(self.policy.observation_filter)
          for worker in self.workers
      ]
      # waiting for sync of all workers
      #ray.get(setting_filters_ids)

      increment_filters_ids = [
          worker.stats_increment() for worker in self.workers
      ]
      # waiting for increment of all workers
      #ray.get(increment_filters_ids)
      t2 = time.time()
      print('Time to sync statistics:', t2 - t1)

    return info_dict