Python numpy.average() Examples

The following are 30 code examples of numpy.average(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: distributed_random_forest.py    From discomll with Apache License 2.0 6 votes vote down vote up
def reduce_fit(interface, state, label, inp):
    import numpy as np
    out = interface.output(0)
    out.add("X_names", state["X_names"])

    forest = []
    group_fillins = []
    for i, (k, value) in enumerate(inp):
        if k == "tree":
            forest.append(value)
        elif len(value) > 0:
            group_fillins.append(value)
    out.add("forest", forest)

    fill_in_values = []
    if len(group_fillins) > 0:
        for i, type in enumerate(state["X_meta"]):
            if type == "c":
                fill_in_values.append(np.average([sample[i] for sample in group_fillins]))
            else:
                fill_in_values.append(np.bincount([sample[i] for sample in group_fillins]).argmax())
    out.add("fill_in_values", fill_in_values) 
Example #2
Source File: forest_distributed_decision_trees.py    From discomll with Apache License 2.0 6 votes vote down vote up
def reduce_fit(interface, state, label, inp):
    import numpy as np
    out = interface.output(0)
    out.add("X_names", state["X_names"])

    forest = []
    group_fillins = []
    for i, (k, value) in enumerate(inp):
        if k == "tree":
            forest.append(value)
        elif len(value) > 0:
            group_fillins.append(value)
    out.add("forest", forest)

    fill_in_values = []
    if len(group_fillins) > 0:
        for i, type in enumerate(state["X_meta"]):
            if type == "c":
                fill_in_values.append(np.average([sample[i] for sample in group_fillins]))
            else:
                fill_in_values.append(np.bincount([sample[i] for sample in group_fillins]).argmax())
    out.add("fill_in_values", fill_in_values) 
Example #3
Source File: noduleCADEvaluationLUNA16.py    From DeepLung with GNU General Public License v3.0 6 votes vote down vote up
def compute_mean_ci(interp_sens, confidence = 0.95):
    sens_mean = np.zeros((interp_sens.shape[1]),dtype = 'float32')
    sens_lb   = np.zeros((interp_sens.shape[1]),dtype = 'float32')
    sens_up   = np.zeros((interp_sens.shape[1]),dtype = 'float32')
    
    Pz = (1.0-confidence)/2.0
    print(interp_sens.shape)
    for i in range(interp_sens.shape[1]):
        # get sorted vector
        vec = interp_sens[:,i]
        vec.sort()

        sens_mean[i] = np.average(vec)
        sens_lb[i] = vec[int(math.floor(Pz*len(vec)))]
        sens_up[i] = vec[int(math.floor((1.0-Pz)*len(vec)))]

    return sens_mean,sens_lb,sens_up 
Example #4
Source File: ensemble_cpu.py    From kaggle-carvana-2017 with MIT License 6 votes vote down vote up
def ensemble_image(files, dirs, ensembling_dir, strategy):
    for file in files:
        images = []
        for dir in dirs:
            file_path = os.path.join(dir, file)
            if os.path.exists(file_path):
                images.append(imread(file_path, mode='L'))
        images = np.array(images)

        if strategy == 'average':
            ensembled = average_strategy(images)
        elif strategy == 'hard_voting':
            ensembled = hard_voting(images)
        else:
            raise ValueError('Unknown ensembling strategy')
        imsave(os.path.join(ensembling_dir, file), ensembled) 
Example #5
Source File: main.py    From ConvLab with MIT License 6 votes vote down vote up
def pprint(self, name, window=None, prefix=None):
        str_losses = []
        for key, loss in self.losses.items():
            if loss is None:
                continue
            aver_loss = np.average(loss) if window is None else np.average(loss[-window:])
            if 'nll' in key:
                str_losses.append('{} PPL {:.3f}'.format(key, np.exp(aver_loss)))
            else:
                str_losses.append('{} {:.3f}'.format(key, aver_loss))


        if prefix:
            return '{}: {} {}'.format(prefix, name, ' '.join(str_losses))
        else:
            return '{} {}'.format(name, ' '.join(str_losses)) 
Example #6
Source File: main.py    From ConvLab with MIT License 6 votes vote down vote up
def validate_rl(dialog_eval, ctx_gen, num_episode=200):
    print("Validate on training goals for {} episode".format(num_episode))
    reward_list = []
    agree_list = []
    sent_metric = UniquenessSentMetric()
    word_metric = UniquenessWordMetric()
    for _ in range(num_episode):
        ctxs = ctx_gen.sample()
        conv, agree, rewards = dialog_eval.run(ctxs)
        true_reward = rewards[0] if agree else 0
        reward_list.append(true_reward)
        agree_list.append(float(agree if agree is not None else 0.0))
        for turn in conv:
            if turn[0] == 'System':
                sent_metric.record(turn[1])
                word_metric.record(turn[1])
    results = {'sys_rew': np.average(reward_list),
               'avg_agree': np.average(agree_list),
               'sys_sent_unique': sent_metric.value(),
               'sys_unique': word_metric.value()}
    return results 
Example #7
Source File: record.py    From ConvLab with MIT License 6 votes vote down vote up
def record_rl_task(n_epsd, dialog, goal_gen, rl_f):
    conv_list = []
    reward_list = []
    sent_metric = UniquenessSentMetric()
    word_metric = UniquenessWordMetric()
    print("Begin RL testing")
    cnt = 0
    for g_key, goal in goal_gen.iter(1):
        cnt += 1
        conv, success = dialog.run(g_key, goal)
        true_reward = success
        reward_list.append(true_reward)
        conv_list.append(conv)
        for turn in conv:
            if turn[0] == 'System':
                sent_metric.record(turn[1])
                word_metric.record(turn[1])

    # json.dump(conv_list, text_f, indent=4)
    aver_reward = np.average(reward_list)
    unique_sent_num = sent_metric.value()
    unique_word_num = word_metric.value()
    rl_f.write('{}\t{}\t{}\t{}\n'.format(n_epsd, aver_reward, unique_sent_num, unique_word_num))
    rl_f.flush()
    print("End RL testing") 
Example #8
Source File: fem.py    From simnibs with GNU General Public License v3.0 6 votes vote down vote up
def _sim_tdcs_pair(mesh, cond, ref_electrode, el_surf, el_c, units, solver_options):
    logger.info('Simulating electrode pair {0} - {1}'.format(
        ref_electrode, el_surf))
    S = FEMSystem.tdcs(mesh, cond, [ref_electrode, el_surf], [0., 1.],
                       solver_options=solver_options)
    v = S.solve()
    v = mesh_io.NodeData(v, name='v', mesh=mesh)
    flux = np.array([
        _calc_flux_electrodes(v, cond,
                              [el_surf - 1000, el_surf - 600,
                               el_surf - 2000, el_surf - 1600],
                              units=units),
        _calc_flux_electrodes(v, cond,
                              [ref_electrode - 1000, ref_electrode - 600,
                               ref_electrode - 2000, ref_electrode - 1600],
                              units=units)])
    current = np.average(np.abs(flux))
    error = np.abs(np.abs(flux[0]) - np.abs(flux[1])) / current
    logger.info('Estimated current calibration error: {0:.1%}'.format(error))
    return el_c / current * v.value 
Example #9
Source File: optimization_methods.py    From simnibs with GNU General Public License v3.0 6 votes vote down vote up
def _lp_variables(l, target_mean, max_total_current, max_el_current):
        n = l.shape[1]
        if max_el_current is None and max_total_current is None:
            raise ValueError(
                'max_el_current and max_total_current can be simultaneously None')
        if max_total_current is not None:
            A_ub = [np.ones((1, 2 * n))]
            b_ub = [2 * max_total_current]
        else:
            A_ub = []
            b_ub = []
        #Constraint on target intensity
        l_ = np.hstack([l, -l])
        # the LP will maximize the average of all targets, and limit the electric field
        # at each individual target
        l_avg = np.average(l_, axis=0)
        A_ub = np.vstack(A_ub + [l_])
        b_ub = np.hstack(b_ub + [target_mean])
        A_eq = np.hstack([np.ones((1, n)), -np.ones((1, n))])
        b_eq = np.array([0.])
        bounds = (0, max_el_current)
        return l_avg, A_ub, b_ub, A_eq, b_eq, bounds 
Example #10
Source File: test_optimization_methods.py    From simnibs with GNU General Public License v3.0 6 votes vote down vote up
def test_2_targets_field_component(self, optimization_variables_avg):
        l, Q, A = optimization_variables_avg
        l2 = l[::-1]
        l = np.vstack([l ,l2])
        m = 2e-3
        m1 = 4e-3
        x = optimization_methods.optimize_field_component(l, max_el_current=m,
                                                          max_total_current=m1)

        l_avg = np.average(l, axis=0)
        x_sp = optimize_comp(l_avg, np.ones_like(l2), max_el_current=m, max_total_current=m1)

        assert np.linalg.norm(x, 1) <= 2 * m1 + 1e-4
        assert np.all(np.abs(x) <= m + 1e-6)
        assert np.isclose(l_avg.dot(x), l_avg.dot(x_sp),
                          rtol=1e-4, atol=1e-4)
        assert np.isclose(np.sum(x), 0) 
Example #11
Source File: bottom_up.py    From Dispersion-based-Clustering with MIT License 6 votes vote down vote up
def generate_average_feature(self, labels):
        #extract feature/classifier
        u_feas, fcs = self.get_feature(self.u_data) #2048, 1024

        #images of the same cluster
        label_to_images = {}
        for idx, l in enumerate(labels):
            self.label_to_images[l] = self.label_to_images.get(l, []) + [idx]
            #label_to_image: key is a label and USAGE u_data[label_to_images[key]]=key to set the new label

        # used from u_data to re-arrange them to label index array
        sort_image_by_label = list(itertools.chain.from_iterable([label_to_images[key] for key in sorted(label_to_images.keys())]))
        # USAGE u_data[sort_image_by_label] then the data is sorted according to its class label
        #calculate average feature/classifier of a cluster
        feature_avg = np.zeros((len(label_to_images), len(u_feas[0])))
        fc_avg = np.zeros((len(label_to_images), len(fcs[0])))
        for l in label_to_images:
            feas = u_feas[label_to_images[l]]
            feature_avg[l] = np.mean(feas, axis=0)
            fc_avg[l] = np.mean(fcs[label_to_images[l]], axis=0)
        return u_feas, feature_avg, label_to_images, fc_avg   # [m 2048], [c 2018] [] [c 1024] 
Example #12
Source File: bottom_up.py    From Dispersion-based-Clustering with MIT License 6 votes vote down vote up
def linkage_calculation(self, dist, labels, penalty): 
        cluster_num = len(self.label_to_images.keys())
        start_index = np.zeros(cluster_num,dtype=np.int)
        end_index = np.zeros(cluster_num,dtype=np.int)
        counts=0
        i=0
        for key in sorted(self.label_to_images.keys()):
            start_index[i] = counts
            end_index[i] = counts + len(self.label_to_images[key])
            counts = end_index[i]
            i=i+1
        dist=dist.numpy()
        linkages = np.zeros([cluster_num, cluster_num])
        for i in range(cluster_num):
            for j in range(i, cluster_num):
                linkage = dist[start_index[i]:end_index[i], start_index[j]:end_index[j]]
                linkages[i,j] = np.average(linkage)



        linkages = linkages.T + linkages - linkages * np.eye(cluster_num)
        intra = linkages.diagonal()
        penalized_linkages = linkages + penalty * ((intra * np.ones_like(linkages)).T + intra).T
        return linkages, penalized_linkages 
Example #13
Source File: v3_validation.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def utt_scores(scores, scp, utt2label):
    """return predictions and labels per utterance
    """
    utt2len   = ako.read_key_len(scp)
    utt2label = ako.read_key_label(utt2label)
    key_list  = ako.read_all_key(scp)

    preds, labels = [], []
    idx = 0
    for key in key_list:
        frames_per_utt = utt2len[key]
        avg_scores = np.average(scores[idx:idx+frames_per_utt])
        idx = idx + frames_per_utt
        preds.append(avg_scores)
        labels.append(utt2label[key])

    return np.array(preds), np.array(labels) 
Example #14
Source File: v3_validation.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def compute_loss(model, device, data_loader):
    model.eval()
    loss = 0
    correct = 0
    scores  = []

    with torch.no_grad():
        for data, target in data_loader:
            data, target = data.to(device), target.to(device)
            target = target.view(-1,1).float()
            #output, hidden = model(data, None)
            output = model(data)
            loss += F.binary_cross_entropy(output, target, size_average=False)

            scores.append(output.data.cpu().numpy())

    loss /= len(data_loader.dataset) # average loss
    scores = np.vstack(scores) # scores per frame

    return loss, scores 
Example #15
Source File: v1_validation.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def utt_scores(scores, scp, utt2label):
    """return predictions and labels per utterance
    """
    utt2len   = ako.read_key_len(scp)
    utt2label = ako.read_key_label(utt2label)
    key_list  = ako.read_all_key(scp)

    preds, labels = [], []
    idx = 0
    for key in key_list:
        frames_per_utt = utt2len[key]
        avg_scores = np.average(scores[idx:idx+frames_per_utt])
        idx = idx + frames_per_utt
        preds.append(avg_scores)
        labels.append(utt2label[key])

    return np.array(preds), np.array(labels) 
Example #16
Source File: v1_validation.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def compute_loss(model, device, data_loader, threshold=0.5):
    model.eval()
    loss = 0
    correct = 0
    scores  = []

    with torch.no_grad():
        for data, target in data_loader:
            data, target = data.to(device), target.to(device)
            target = target.view(-1,1).float()
            #output, hidden = model(data, None)
            output = model(data)
            loss += F.binary_cross_entropy(output, target, size_average=False)
            pred = output > 0.5
            correct += pred.byte().eq(target.byte()).sum().item() # not really meaningful

            scores.append(output.data.cpu().numpy())

    loss /= len(data_loader.dataset) # average loss
    scores = np.vstack(scores) # scores per frame

    return loss, scores, correct 
Example #17
Source File: v1_prediction.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def compute_utt_eer(scores, scp, utt2label, threshold):
    """utterance-based eer
    """
    utt2len   = ako.read_key_len(scp)
    utt2label = ako.read_key_label(utt2label)
    key_list  = ako.read_all_key(scp)

    preds, labels = [], []
    idx = 0
    for key in key_list:
        frames_per_utt = utt2len[key]
        avg_scores = np.average(scores[idx:idx+frames_per_utt])
        idx = idx + frames_per_utt
        if avg_scores < threshold:
            preds.append(0)
        else: preds.append(1)
        labels.append(utt2label[key])

    eer = compute_eer(labels, preds)
    confuse_mat = compute_confuse(labels, preds)
    return eer, confuse_mat 
Example #18
Source File: fashion.py    From indras_net with GNU General Public License v3.0 5 votes vote down vote up
def new_color_pref(old_pref, env_color):
    """
    Calculate new color pref with the formula below:
    new_color = sin(avg(asin(old_pref) + asin(env_color)))
    """
    me = math.asin(old_pref)
    env = math.asin(env_color)
    avg = np.average([me, env], weights=weightings)
    new_color = math.sin(avg)
    return new_color 
Example #19
Source File: plotting.py    From cat-bbs with MIT License 5 votes vote down vote up
def get_recent_average(self, group_name, line_name, nb_points):
        ys = self.line_groups[group_name].lines[line_name].ys[-nb_points:]
        return np.average(ys) 
Example #20
Source File: predict_video.py    From cat-bbs with MIT License 5 votes vote down vote up
def _rect_to_score(self, rect, heatmap):
        """Compute a score for a given rectangle (i.e. the confidence value).
        Currently this is done via an average of the corresponding activations
        in the heatmap.
        """
        subheatmap = rect.extract_from_image(heatmap)
        if subheatmap.ndim == 2 and subheatmap.shape[0] > 0 and subheatmap.shape[1] > 0:
            return np.average(subheatmap)
        else:
            print("[WARN] Broken heatmap extracted for rectangle", rect)
            return 0

    # The following stuff is some old code to make use of all generated
    # heatmaps. Didn't work well in tests. 
Example #21
Source File: merge_augs.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None):
    """Merge augmented mask prediction.

    Args:
        aug_masks (list[ndarray]): shape (n, #class, h, w)
        img_shapes (list[ndarray]): shape (3, ).
        rcnn_test_cfg (dict): rcnn test config.

    Returns:
        tuple: (bboxes, scores)
    """
    recovered_masks = []
    for mask, img_info in zip(aug_masks, img_metas):
        flip = img_info[0]['flip']
        flip_direction = img_info[0]['flip_direction']
        if flip:
            if flip_direction == 'horizontal':
                mask = mask[:, :, :, ::-1]
            elif flip_direction == 'vertical':
                mask = mask[:, :, ::-1, :]
            else:
                raise ValueError(
                    f"Invalid flipping direction '{flip_direction}'")
        recovered_masks.append(mask)

    if weights is None:
        merged_masks = np.mean(recovered_masks, axis=0)
    else:
        merged_masks = np.average(
            np.array(recovered_masks), axis=0, weights=np.array(weights))
    return merged_masks 
Example #22
Source File: distributed_weighted_forest.py    From discomll with Apache License 2.0 5 votes vote down vote up
def reduce_fit(interface, state, label, inp):
    import numpy as np
    out = interface.output(0)
    out.add("X_names", state["X_names"])

    forest, medoids, stats, gower_ranges, group_fillins = [], [], [], [], []
    for i, (k, value) in enumerate(inp):
        if k == "model":
            forest.append(value[0])
            medoids.append(value[1])
            stats.append(value[2])
            gower_ranges.append(value[3])
        elif len(value) > 0:
            group_fillins.append(value)
    out.add("forest", forest)
    out.add("medoids", medoids)
    out.add("stats", stats)
    out.add("gower_ranges", gower_ranges)

    fill_in_values = []
    if len(group_fillins) > 0:
        for i, type in enumerate(state["X_meta"]):
            if type == "c":
                fill_in_values.append(np.average([sample[i] for sample in group_fillins]))
            else:
                fill_in_values.append(np.bincount([sample[i] for sample in group_fillins]).argmax())
    out.add("fill_in_values", fill_in_values) 
Example #23
Source File: distributed_weighted_forest_rand.py    From discomll with Apache License 2.0 5 votes vote down vote up
def reduce_fit(interface, state, label, inp):
    import numpy as np
    out = interface.output(0)
    out.add("X_names", state["X_names"])

    forest, medoids, margins, gower_ranges, group_fillins = [], [], [], [], []
    for i, (k, value) in enumerate(inp):
        if k == "model":
            forest.append(value[0])
            margins.append(value[1])
            medoids.append(value[2])
            gower_ranges.append(value[3])
        elif len(value) > 0:
            group_fillins.append(value)
    out.add("forest", forest)
    out.add("medoids", medoids)
    out.add("margins", margins)
    out.add("gower_ranges", gower_ranges)

    fill_in_values = []
    if len(group_fillins) > 0:
        for i, type in enumerate(state["X_meta"]):
            if type == "c":
                fill_in_values.append(np.average([sample[i] for sample in group_fillins]))
            else:
                fill_in_values.append(np.bincount([sample[i] for sample in group_fillins]).argmax())
    out.add("fill_in_values", fill_in_values) 
Example #24
Source File: wpcr.py    From clock-recovery with MIT License 5 votes vote down vote up
def slice_bits(symbols):
    symbols_average = numpy.average(symbols)
    bits = (symbols >= symbols_average)
    return numpy.array(bits, dtype=numpy.uint8) 
Example #25
Source File: reward.py    From TradzQAI with Apache License 2.0 5 votes vote down vote up
def unrealized_pnl_sma(self):
        if self.call_id > 0:
            if self.call_id > self.period:
                return np.average(self.daily[self.call_id-self.period:])
            else:
                return np.average(self.daily)
        else:
            return self.current 
Example #26
Source File: base_env.py    From TradzQAI with Apache License 2.0 5 votes vote down vote up
def avg_reward(self, reward, n):
        if n == 0:
            return np.average(np.array(reward))
        return np.average(np.array(reward[len(reward)-(n+1):])) 
Example #27
Source File: evaler.py    From SSGAN-Tensorflow with MIT License 5 votes vote down vote up
def report(self):
        # report L2 loss
        log.info("Computing scores...")
        score = {}
        score = []

        for id, pred, gt in zip(self._ids, self._predictions, self._groundtruths):
            score.append(self.compute_accuracy(pred, gt))
        avg = np.average(score)
        log.infov("Average accuracy : %.4f", avg*100) 
Example #28
Source File: visualization.py    From integrated-gradient-pytorch with MIT License 5 votes vote down vote up
def convert_to_gray_scale(attributions):
    return np.average(attributions, axis=2) 
Example #29
Source File: integrated_gradients.py    From integrated-gradient-pytorch with MIT License 5 votes vote down vote up
def integrated_gradients(inputs, model, target_label_idx, predict_and_gradients, baseline, steps=50, cuda=False):
    if baseline is None:
        baseline = 0 * inputs 
    # scale inputs and compute gradients
    scaled_inputs = [baseline + (float(i) / steps) * (inputs - baseline) for i in range(0, steps + 1)]
    grads, _ = predict_and_gradients(scaled_inputs, model, target_label_idx, cuda)
    avg_grads = np.average(grads[:-1], axis=0)
    avg_grads = np.transpose(avg_grads, (1, 2, 0))
    integrated_grad = (inputs - baseline) * avg_grads
    return integrated_grad 
Example #30
Source File: integrated_gradients.py    From integrated-gradient-pytorch with MIT License 5 votes vote down vote up
def random_baseline_integrated_gradients(inputs, model, target_label_idx, predict_and_gradients, steps, num_random_trials, cuda):
    all_intgrads = []
    for i in range(num_random_trials):
        integrated_grad = integrated_gradients(inputs, model, target_label_idx, predict_and_gradients, \
                                                baseline=255.0 *np.random.random(inputs.shape), steps=steps, cuda=cuda)
        all_intgrads.append(integrated_grad)
        print('the trial number is: {}'.format(i))
    avg_intgrads = np.average(np.array(all_intgrads), axis=0)
    return avg_intgrads