Python numpy.mean() Examples

The following are code examples for showing how to use numpy.mean(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: DataComp   Author: Cojabi   File: utils.py    Apache License 2.0 8 votes vote down vote up
def conf_interval(data_series):
    """
    Calculate the confidence interval for the data distribution under the assumptions that it can be calculated using \
    a student-t distribution.

    :return start: Starting value of the interval
    :return end: Ending value of the interval
    """

    mean = np.mean(data_series)

    conf_int = sem(data_series) * t.ppf((1 + 0.95) / 2, len(data_series) - 1)

    start = mean - conf_int
    end = mean + conf_int

    return start, end 
Example 2
Project: rhodonite   Author: nestauk   File: phylomemetic.py    MIT License 6 votes vote down vote up
def label_diversification(g, branching_prop, agg=np.mean):
    """label_diversification
    Diversification is defined as the average pairwise Jaccard similarity
    between the children of a branching node.

    Parameters
    ----------
        g : :obj:`Graph` 
            A graph.
        branching_prop : :obj:`graph_tool.VertexPropertyMap` 
            A verted property map that is True where a vertex is a branching 
            node.
        agg: :obj:`function` 
            An aggregation function. Typically mean or median.
    """
    diversification_prop = g.new_vertex_property('float')
    g_branching = GraphView(g, vfilt=branching_prop, skip_efilt=True)
    for v in g_branching.vertices():
        children = [g.vp['item'][c] for c in g.vertex(v).in_neighbors()]
        jaccard = agg(
            [1 - jaccard_similarity_set(list(c[0]), list(c[1]))
            for c in combinations(children, 2)]
        )
        diversification_prop[v] = jaccard
    return diversification_prop 
Example 3
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 6 votes vote down vote up
def from_vector_array_to_q(v_c1, v_c2):
    """
    Calculate transform quaternion and translation vector from vector pairs in two coordinate
    :param v_c1: list or tuple of vector in source coordinate
    :param v_c2: list or tuple of vector in target coordinate
    :return: coordinate rotate quaternion from c1 to c2, translation from v1 to v2
            (qw, qi, qj, qk), (x, y, z)
    """
    if len(v_c1) != len(v_c2) or len(v_c1) <= 3:
        print ("Error! on enough vector pair or length of two array is different")
        return (1, 0, 0, 0), (0, 0, 0)

    v_c1 = np.asarray(v_c1, dtype=np.float32).T
    v_c2 = np.asarray(v_c2, dtype=np.float32).T
    mean_c1 = np.mean(v_c1, axis=1)
    mean_c2 = np.mean(v_c1, axis=1)
    v_c1 -= mean_c1
    v_c2 -= mean_c2

    mat = v_c2.dot(v_c1.T).dot(np.linalg.pinv(v_c1.dot(v_c1.T)))
    return from_matrix_to_q(mat), np.mean(v_c2 - mat.dot(v_c1), 1) 
Example 4
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def draw(self, ax, color, line_width=1, fillcolor=None, name=None, arrow=True, alpha=0.2, scale=50):
        ax.add_patch(PolygonPatch(self.contour, alpha=alpha, fc=fillcolor, ec=color, linewidth=line_width))

        vertices = np.array(self.contour.exterior.coords)[1:]

        if arrow:
            arrow_center = np.mean(vertices, axis=0)
            arrow_direction = (vertices[2] - vertices[1]) / 1.5
            arrow_tail = arrow_center - arrow_direction / 2
            arrow_head = arrow_center + arrow_direction / 2
            style = plt_patches.ArrowStyle.Simple(head_length=.4, head_width=.6, tail_width=.1)
            x = np.array(ax.axis())
            scale_factor = np.sqrt(np.prod(np.abs(x[::2] - x[1::2])) / (60 * 60))
            arrow_patch = plt_patches.FancyArrowPatch(posA=arrow_tail, posB=arrow_head, arrowstyle=style,
                                                      color='w', mutation_scale= scale / scale_factor, alpha=0.4)
            ax.add_patch(arrow_patch)
        elif name is None:
            name = 'front'

        if name is not None:
            text_location = np.mean(vertices[[0, -1]], axis=0)
            ax.text(text_location[0], text_location[1], name, ha='center', va='top', color='w') 
Example 5
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def box2dtobox3d(boxes2d, z_translation=0.0, z_size=0.0, z_angle=0.0):
    """
    tranforms 2d boxes to 3d boxes
    :param boxes2d: np array shaped N,4. box = [x1,y1,x2,xy] (1-bottom left, 2 upper right)
    :return: boxes3d np array shaped N,7. box = [t1,t2,t3,s1,s2,s3,z_angle]
    """
    ctr_x = np.mean(boxes2d[:, [0, 2]], axis=-1, keepdims=True)
    ctr_y = np.mean(boxes2d[:, [1, 3]], axis=-1, keepdims=True)
    ctr_z = np.full([boxes2d.shape[0], 1], z_translation)
    ctr = np.concatenate((ctr_x, ctr_y, ctr_z), -1)

    size_x = boxes2d[:, 2:3] - boxes2d[:, 0:1]
    size_y = boxes2d[:, 3:4] - boxes2d[:, 1:2]
    size_z = np.full([boxes2d.shape[0], 1], z_size)
    size = np.concatenate((size_x, size_y, size_z), -1)

    z_angle = np.full([boxes2d.shape[0], 1], z_angle)

    return np.concatenate((ctr, size, z_angle), -1) 
Example 6
Project: Neural-LP   Author: fanyangxyz   File: experiment.py    MIT License 6 votes vote down vote up
def train(self):
        while (self.epoch < self.option.max_epoch and not self.early_stopped):
            self.one_epoch_train()
            self.one_epoch_valid()
            self.one_epoch_test()
            self.epoch += 1
            model_path = self.saver.save(self.sess, 
                                         self.option.model_path,
                                         global_step=self.epoch)
            print("Model saved at %s" % model_path)
            
            if self.early_stop():
                self.early_stopped = True
                print("Early stopped at epoch %d" % (self.epoch))
        
        all_test_in_top = [np.mean(x[1]) for x in self.test_stats]
        best_test_epoch = np.argmax(all_test_in_top)
        best_test = all_test_in_top[best_test_epoch]
        
        msg = "Best test in top: %0.4f at epoch %d." % (best_test, best_test_epoch + 1)       
        print(msg)
        self.log_file.write(msg + "\n")
        pickle.dump([self.train_stats, self.valid_stats, self.test_stats],
                    open(os.path.join(self.option.this_expsdir, "results.pckl"), "w")) 
Example 7
Project: DataComp   Author: Cojabi   File: utils.py    Apache License 2.0 6 votes vote down vote up
def calc_mean_diff(series1, series2, rnd=2):
    """
    Calculates the confidence interval of the difference in means between two iterables.

    :param series1: Iterable storing the values of variable 1.
    :param series2: Iterable storing the values of variable 2.
    :param rnd: Number of decimal positions on which result shall be rounded.
    :return: List representing the interval
    """
    # calculate means and variances
    mean1 = np.mean(series1)
    mean2 = np.mean(series2)
    var1 = np.var(series1, ddof=1)
    var2 = np.var(series2, ddof=1)

    # calculate and return confidence interval
    return diff_mean_conf_formula(len(series1), len(series2), mean1, mean2, var1, var2, rnd) 
Example 8
Project: Adversarial-Face-Attack   Author: ppwwyyxx   File: face_attack.py    GNU General Public License v3.0 6 votes vote down vote up
def validate_on_lfw(model, lfw_160_path):
    # Read the file containing the pairs used for testing
    pairs = lfw.read_pairs('validation-LFW-pairs.txt')
    # Get the paths for the corresponding images
    paths, actual_issame = lfw.get_paths(lfw_160_path, pairs)
    num_pairs = len(actual_issame)

    all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32')
    for k in tqdm.trange(num_pairs):
        img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1]
        img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1]
        batch = np.stack([img1, img2], axis=0)
        embeddings = model.eval_embeddings(batch)
        all_embeddings[k * 2: k * 2 + 2, :] = embeddings

    tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
        all_embeddings, actual_issame, distance_metric=1, subtract_mean=True)

    print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))

    auc = metrics.auc(fpr, tpr)
    print('Area Under Curve (AUC): %1.3f' % auc)
    eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
    print('Equal Error Rate (EER): %1.3f' % eer) 
Example 9
Project: StructEngPy   Author: zhuoju36   File: element.py    MIT License 6 votes vote down vote up
def __init__(self,node_i, node_j, node_k, node_l,t, E, mu, rho, name=None):
        #8-nodes
        self.__nodes.append(node_i)
        self.__nodes.append(node_j)
        self.__nodes.append(node_k)
        self.__nodes.append(node_l)

        self.__t=t
        
        center=np.mean([node_i,node_j,node_k,node_l])
#        self.local_csys = CoordinateSystem.cartisian(center,nodes[4],nodes[5])
        
        self.__alpha=[]#the angle between edge and local-x, to be added
        self.__alpha.append(self.angle(node_i,node_j,self.local_csys.x))
        self.__alpha.append(self.angle(node_j,node_k,self.local_csys.x))
        self.__alpha.append(self.angle(node_k,node_l,self.local_csys.x))
        self.__alpha.append(self.angle(node_l,node_i,self.local_csys.x))

        self.__K=np.zeros((24,24)) 
Example 10
Project: Lane-And-Vehicle-Detection   Author: JustinHeaton   File: main.py    MIT License 6 votes vote down vote up
def found_search(self, x, y):
        '''
        This function is applied when the lane lines have been detected in the previous frame.
        It uses a sliding window to search for lane pixels in close proximity (+/- 25 pixels in the x direction)
        around the previous detected polynomial.
        '''
        xvals = []
        yvals = []
        if self.found == True:
            i = 720
            j = 630
            while j >= 0:
                yval = np.mean([i,j])
                xval = (np.mean(self.fit0))*yval**2 + (np.mean(self.fit1))*yval + (np.mean(self.fit2))
                x_idx = np.where((((xval - 25) < x)&(x < (xval + 25))&((y > j) & (y < i))))
                x_window, y_window = x[x_idx], y[x_idx]
                if np.sum(x_window) != 0:
                    np.append(xvals, x_window)
                    np.append(yvals, y_window)
                i -= 90
                j -= 90
        if np.sum(xvals) == 0:
            self.found = False # If no lane pixels were detected then perform blind search
        return xvals, yvals, self.found 
Example 11
Project: mmdetection   Author: open-mmlab   File: merge_augs.py    Apache License 2.0 6 votes vote down vote up
def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):
    """Merge augmented detection bboxes and scores.

    Args:
        aug_bboxes (list[Tensor]): shape (n, 4*#class)
        aug_scores (list[Tensor] or None): shape (n, #class)
        img_shapes (list[Tensor]): shape (3, ).
        rcnn_test_cfg (dict): rcnn test config.

    Returns:
        tuple: (bboxes, scores)
    """
    recovered_bboxes = []
    for bboxes, img_info in zip(aug_bboxes, img_metas):
        img_shape = img_info[0]['img_shape']
        scale_factor = img_info[0]['scale_factor']
        flip = img_info[0]['flip']
        bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)
        recovered_bboxes.append(bboxes)
    bboxes = torch.stack(recovered_bboxes).mean(dim=0)
    if aug_scores is None:
        return bboxes
    else:
        scores = torch.stack(aug_scores).mean(dim=0)
        return bboxes, scores 
Example 12
Project: mmdetection   Author: open-mmlab   File: merge_augs.py    Apache License 2.0 6 votes vote down vote up
def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None):
    """Merge augmented mask prediction.

    Args:
        aug_masks (list[ndarray]): shape (n, #class, h, w)
        img_shapes (list[ndarray]): shape (3, ).
        rcnn_test_cfg (dict): rcnn test config.

    Returns:
        tuple: (bboxes, scores)
    """
    recovered_masks = [
        mask if not img_info[0]['flip'] else mask[..., ::-1]
        for mask, img_info in zip(aug_masks, img_metas)
    ]
    if weights is None:
        merged_masks = np.mean(recovered_masks, axis=0)
    else:
        merged_masks = np.average(
            np.array(recovered_masks), axis=0, weights=np.array(weights))
    return merged_masks 
Example 13
Project: mmdetection   Author: open-mmlab   File: analyze_logs.py    Apache License 2.0 6 votes vote down vote up
def cal_train_time(log_dicts, args):
    for i, log_dict in enumerate(log_dicts):
        print('{}Analyze train time of {}{}'.format('-' * 5, args.json_logs[i],
                                                    '-' * 5))
        all_times = []
        for epoch in log_dict.keys():
            if args.include_outliers:
                all_times.append(log_dict[epoch]['time'])
            else:
                all_times.append(log_dict[epoch]['time'][1:])
        all_times = np.array(all_times)
        epoch_ave_time = all_times.mean(-1)
        slowest_epoch = epoch_ave_time.argmax()
        fastest_epoch = epoch_ave_time.argmin()
        std_over_epoch = epoch_ave_time.std()
        print('slowest epoch {}, average time is {:.4f}'.format(
            slowest_epoch + 1, epoch_ave_time[slowest_epoch]))
        print('fastest epoch {}, average time is {:.4f}'.format(
            fastest_epoch + 1, epoch_ave_time[fastest_epoch]))
        print('time std over epochs is {:.4f}'.format(std_over_epoch))
        print('average iter time: {:.4f} s/iter'.format(np.mean(all_times)))
        print() 
Example 14
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_np_targeted_gives_adversarial_example(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1
        x_adv = self.attack.generate_np(x_val, max_iterations=100,
                                        binary_search_steps=3,
                                        initial_const=1,
                                        clip_min=-5, clip_max=5,
                                        batch_size=100, y_target=feed_labs)

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs)
                        > 0.9) 
Example 15
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_gives_adversarial_example(self):

        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), orig_labs] = 1
        x = tf.placeholder(tf.float32, x_val.shape)
        y = tf.placeholder(tf.float32, feed_labs.shape)

        x_adv_p = self.attack.generate(x, max_iterations=100,
                                       binary_search_steps=3,
                                       initial_const=1,
                                       clip_min=-5, clip_max=5,
                                       batch_size=100, y=y)
        self.assertEqual(x_val.shape, x_adv_p.shape)
        x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs})

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 16
Project: unicorn-hat-hd   Author: pimoroni   File: forest-fire.py    MIT License 5 votes vote down vote up
def average_forest(forest):
    avg_forest = [[space for x in range(width)] for y in range(height)]

    for i, x in enumerate(range(1, forest_width, scale)):
        for j, y in enumerate(range(1, forest_height, scale)):
            neighbours = get_neighbours(x, y, avg_size)
            red = int(numpy.mean([forest[n[0]][n[1]][0] for n in neighbours]))
            green = int(numpy.mean([forest[n[0]][n[1]][1] for n in neighbours]))
            blue = int(numpy.mean([forest[n[0]][n[1]][2] for n in neighbours]))
            avg_forest[i][j] = [red, green, blue]

    return avg_forest 
Example 17
Project: SyNEThesia   Author: RunOrVeith   File: feature_creators.py    MIT License 5 votes vote down vote up
def logfbank_features(signal, samplerate=44100, fps=24, num_filt=40, num_cepstra=40, nfft=8192, **kwargs):
    winstep = 2 / fps
    winlen = winstep * 2
    feat, energy = psf.fbank(signal=signal, samplerate=samplerate,
                             winlen=winlen, winstep=winstep, nfilt=num_filt,
                             nfft=nfft)
    feat = np.log(feat)
    feat = psf.dct(feat, type=2, axis=1, norm='ortho')[:, :num_cepstra]
    feat = psf.lifter(feat, L=22)
    feat = np.asarray(feat)

    energy = np.log(energy)
    energy = energy.reshape([energy.shape[0],1])

    if feat.shape[0] > 1:
        std = 0.5 * np.std(feat, axis=0)
        mat = (feat - np.mean(feat, axis=0)) / std
    else:
        mat = feat

    mat = np.concatenate((mat, energy), axis=1)

    duration = signal.shape[0] / samplerate
    expected_frames = fps * duration
    assert mat.shape[0] - expected_frames <= 1, "Producted feature number does not match framerate"
    return mat 
Example 18
Project: b2ac   Author: hbldh   File: polygon.py    MIT License 5 votes vote down vote up
def get_center_point(self, use_centroid=True):
        """Returns a center of weight for the object.

        :param use_centroid: Uses a centroid finding method instead of pure mean of vertices.
        :type use_centroid: bool

        """
        if use_centroid:
            with warnings.catch_warnings(record=False) as w:
                # Cause all warnings to never be triggered.
                warnings.simplefilter("ignore")

                pnt_array = self.get_closed_polygon()

                A = self._area_help_function()
                D = (pnt_array[:-1, 0] * pnt_array[1:, 1] -
                     pnt_array[1:, 0] * pnt_array[:-1, 1])

                c_x = ((pnt_array[:-1, 0] + pnt_array[1:, 0]) * D).sum() / (6 * A)
                c_y = ((pnt_array[:-1, 1] + pnt_array[1:, 1]) * D).sum() / (6 * A)

                if np.isnan(c_x) or np.isinf(c_x) or np.isnan(c_y) or np.isinf(c_y):
                    # If centroid calculations fails (e.g. due to zero-valued area) then use the
                    # mean of the vertices as center point instead.
                    return np.mean(self.get_open_polygon(), 0)
                else:
                    return np.array([c_x, c_y])
        else:
            return np.mean(self.get_open_polygon(), 0) 
Example 19
Project: meta-transfer-learning   Author: erfaneshrati   File: variables.py    MIT License 5 votes vote down vote up
def average_vars(var_seqs):
    """
    Average a sequence of variable sequences.
    """
    res = []
    for variables in zip(*var_seqs):
        res.append(np.mean(variables, axis=0))
    return res 
Example 20
Project: sfcc   Author: kv-kunalvyas   File: auxiliary.py    MIT License 5 votes vote down vote up
def compute_mean(data_frame, column):
    columnName = str(column)
    meanValue = data_frame[columnName].dropna().mean()
    if len(data_frame.column[data_frame.column.isnull()]) > 0:
        data_frame.loc[(data_frame.column.isnull()), columnName] = meanValue 
Example 21
Project: sfcc   Author: kv-kunalvyas   File: auxiliary.py    MIT License 5 votes vote down vote up
def plotLearningCurves(train, classifier):
    #P.show()
    X = train.values[:, 1::]
    y = train.values[:, 0]

    train_sizes, train_scores, test_scores = learning_curve(
            classifier, X, y, cv=10, n_jobs=-1, train_sizes=np.linspace(.1, 1., 10), verbose=0)

    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)

    plt.figure()
    plt.title("Learning Curves")
    plt.legend(loc="best")
    plt.xlabel("Training samples")
    plt.ylabel("Error Rate")
    plt.ylim((0, 1))
    plt.gca().invert_yaxis()
    plt.grid()

    # Plot the average training and test score lines at each training set size
    plt.plot(train_sizes, train_scores_mean, 'o-', color="b", label="Training score")
    plt.plot(train_sizes, test_scores_mean, 'o-', color="r", label="Test score")

    # Plot the std deviation as a transparent range at each training set size
    plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
                     alpha=0.1, color="b")
    plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,
                     alpha=0.1, color="r")

    # Draw the plot and reset the y-axis
    plt.draw()
    plt.gca().invert_yaxis()

    # shuffle and split training and test sets
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25)
    classifier.fit(X_train, y_train)
    plt.show() 
Example 22
Project: rhodonite   Author: nestauk   File: phylomemetic.py    MIT License 5 votes vote down vote up
def label_cross_pollination(g, merging_prop, agg=np.mean):
    """label_cross_pollination
    Cross-pollination is defined as the average pairwise Jaccard similarity
    between the parents of a merging node.

    Parameters
    ----------
        g : :obj:`graph_tool.Graph` 
            A graph.
        merging_prop : :obj:`graph_tool.VertexPropertyMap` 
            A vertex property map that is True where a vertex is a merging node.
        agg: :obj:`function` 
            An aggregation function. Typically mean or median.

    Returns
    -------
        cross_pol_prop : :obj:`graph_tool.VertexPropertyMap`
            Contains cross pollination values of each vertex.
    """
    cross_poll_prop = g.new_vertex_property('float')
    g_merging = GraphView(g, vfilt=merging_prop, skip_efilt=True)
    for v in g_merging.vertices():
        parents = [g.vp['item'][p] for p in g.vertex(v).out_neighbors()]
        jaccard = agg(
            [1 - jaccard_similarity_set(list(c[0]), list(c[1]))
            for c in combinations(parents, 2)]
        )
        cross_poll_prop[v] = jaccard
    return cross_poll_prop 
Example 23
Project: LipNet-PyTorch   Author: sailordiary   File: ctc_decoder.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def cer_batch(self, decoded, gt):
        assert len(decoded) == len(gt), 'batch size mismatch: {}!={}'.format(len(decoded), len(gt))
        mean_indiv_len = np.mean([len(s) for s in gt])
        
        return self.get_mean(decoded, gt, mean_indiv_len, editdistance.eval) 
Example 24
Project: LipNet-PyTorch   Author: sailordiary   File: ctc_decoder.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def wer_batch(self, decoded, gt):
        assert len(decoded) == len(gt), 'batch size mismatch: {}!={}'.format(len(decoded), len(gt))
        mean_indiv_len = np.mean([len(s.split()) for s in gt])
        
        return self.get_mean(decoded, gt, mean_indiv_len, self.wer_sentence) 
Example 25
Project: RF-Monitor   Author: EarToEarOak   File: monitor.py    GNU General Public License v2.0 5 votes vote down vote up
def __update_level(self, location, level, timestamp):
        updated = False
        signal = None
        threshold = self.get_dynamic_threshold()

        if len(self._signals) and self._signals[-1].end is None:
            signal = self._signals[-1]

        if signal is None:
            if level is not None and level >= threshold:
                signal = Signal(start=timestamp, location=location)
                self._signals.append(signal)
                updated = True
        else:
            if level is None or level < threshold:
                strength = numpy.mean(self._levels)
                self._levels.clear()
                signal.end = timestamp
                signal.level = strength
                updated = True

        if level is not None and level >= threshold:
            self._levels.append(level)

        if updated:
            return signal
        return None 
Example 26
Project: RF-Monitor   Author: EarToEarOak   File: gui.py    GNU General Public License v2.0 5 votes vote down vote up
def __on_scan_data(self, event):
        levels = numpy.log10(event['l'])
        levels *= 10
        self._levels = levels

        noise = numpy.percentile(levels,
                                 self._toolbar.get_dynamic_percentile())

        updated = False
        for monitor in self._monitors:
            freq = monitor.get_frequency()
            if monitor.get_enabled():
                monitor.set_noise(noise)
                index = numpy.where(freq == event['f'])[0]
                signal = monitor.set_level(levels[index][0],
                                           event['timestamp'],
                                           self._location)
                if signal is not None:
                    updated = True
                    if signal.end is not None:
                        recording = format_recording(freq, signal)
                        if self._settings.get_push_enable():
                            self._push.send(self._settings.get_push_uri(),
                                            recording)
                        if self._server is not None:
                            self._server.send(recording)

        if updated:
            if self._isSaved:
                self._isSaved = False
                self.__set_title()
                self.__set_timeline()

        self.__set_spectrum(noise)
        self._rssi.set_noise(numpy.mean(levels))
        self._rssi.set_level(numpy.max(levels)) 
Example 27
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: pascal_voc.py    MIT License 5 votes vote down vote up
def _eval_discovery(self, output_dir):
    annopath = os.path.join(
        self._devkit_path,
        'VOC' + self._year,
        'Annotations',
        '{:s}.xml')
    imagesetfile = os.path.join(
        self._devkit_path,
        'VOC' + self._year,
        'ImageSets',
        'Main',
        self._image_set + '.txt')
    cachedir = os.path.join(self._devkit_path, 'annotations_dis_cache')
    corlocs = []
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    for i, cls in enumerate(self._classes):
        if cls == '__background__':
            continue
        filename = self._get_voc_results_file_template().format(cls)
        corloc = dis_eval(
            filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5)
        corlocs += [corloc]
        print('CorLoc for {} = {:.4f}'.format(cls, corloc))
        with open(os.path.join(output_dir, cls + '_corloc.pkl'), 'wb') as f:
            pickle.dump({'corloc': corloc}, f)
    print('Mean CorLoc = {:.4f}'.format(np.mean(corlocs)))
    print('~~~~~~~~')
    print('Results:')
    for corloc in corlocs:
        print('{:.3f}'.format(corloc))
    print('{:.3f}'.format(np.mean(corlocs)))
    print('~~~~~~~~') 
Example 28
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: coco.py    MIT License 5 votes vote down vote up
def _print_detection_eval_metrics(self, coco_eval):
    IoU_lo_thresh = 0.5
    IoU_hi_thresh = 0.95

    def _get_thr_ind(coco_eval, thr):
      ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
                     (coco_eval.params.iouThrs < thr + 1e-5))[0][0]
      iou_thr = coco_eval.params.iouThrs[ind]
      assert np.isclose(iou_thr, thr)
      return ind

    ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
    ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
    # precision has dims (iou, recall, cls, area range, max dets)
    # area range index 0: all area ranges
    # max dets index 2: 100 per image
    precision = \
      coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
    ap_default = np.mean(precision[precision > -1])
    print(('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
           '~~~~').format(IoU_lo_thresh, IoU_hi_thresh))
    print('{:.1f}'.format(100 * ap_default))
    for cls_ind, cls in enumerate(self.classes):
      if cls == '__background__':
        continue
      # minus 1 because of __background__
      precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
      ap = np.mean(precision[precision > -1])
      print('{:.1f}'.format(100 * ap))

    print('~~~~ Summary metrics ~~~~')
    coco_eval.summarize() 
Example 29
Project: wikilinks   Author: trovdimi   File: fitpowerlaw.py    MIT License 5 votes vote down vote up
def meanClustering(net_name, net):
    clust = network.vertex_properties["local_clust"]
    print 'clustering mean for network: ', net_name
    print np.mean(clust.get_array())
    return 
Example 30
Project: curriculum-dropout   Author: pmorerio   File: analyseResults_mnist_average.py    GNU General Public License v3.0 5 votes vote down vote up
def average(list_of_lists, last_n=10):
	allValues =  np.array(list_of_lists)
	allValues = np.sort(allValues)
	
	return np.mean(np.mean(allValues[:,-last_n:])) 
Example 31
Project: curriculum-dropout   Author: pmorerio   File: analyseResults_dm_average.py    GNU General Public License v3.0 5 votes vote down vote up
def average(list_of_lists, last_n=10):
	allValues =  np.array(list_of_lists)
	allValues = np.sort(allValues)
	
	return np.mean(np.mean(allValues[:,-last_n:])) 
Example 32
Project: FRIDA   Author: LCAV   File: doa.py    MIT License 5 votes vote down vote up
def polar_distance(x1, x2):
    """
    Given two arrays of numbers x1 and x2, pairs the cells that are the
    closest and provides the pairing matrix index: x1(index(1,:)) should be as
    close as possible to x2(index(2,:)). The function outputs the average of 
    the absolute value of the differences abs(x1(index(1,:))-x2(index(2,:))).
    :param x1: vector 1
    :param x2: vector 2
    :return: d: minimum distance between d
             index: the permutation matrix
    """
    x1 = np.reshape(x1, (1, -1), order='F')
    x2 = np.reshape(x2, (1, -1), order='F')
    N1 = x1.size
    N2 = x2.size
    diffmat = np.arccos(np.cos(x1 - np.reshape(x2, (-1, 1), order='F')))
    min_N1_N2 = np.min([N1, N2])
    index = np.zeros((min_N1_N2, 2), dtype=int)
    if min_N1_N2 > 1:
        for k in range(min_N1_N2):
            d2 = np.min(diffmat, axis=0)
            index2 = np.argmin(diffmat, axis=0)
            index1 = np.argmin(d2)
            index2 = index2[index1]
            index[k, :] = [index1, index2]
            diffmat[index2, :] = float('inf')
            diffmat[:, index1] = float('inf')
        d = np.mean(np.arccos(np.cos(x1[:, index[:, 0]] - x2[:, index[:, 1]])))
    else:
        d = np.min(diffmat)
        index = np.argmin(diffmat)
        if N1 == 1:
            index = np.array([1, index])
        else:
            index = np.array([index, 1])
    return d, index 
Example 33
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def MSE(gt, rc):
    return np.mean((gt - rc) ** 2) 
Example 34
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def MRAE(gt, rc):
    return np.mean(np.abs(gt - rc) / (gt + 1e-3)) 
Example 35
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def SID(gt, rc):
    N = gt.shape[0]
    err = np.zeros(N)
    for i in range(N):
        err[i] = abs(np.sum(rc[i] * np.log10((rc[i] + 1e-3) / (gt[i] + 1e-3))) +
                        np.sum(gt[i] * np.log10((gt[i] + 1e-3) / (rc[i] + 1e-3))))
    return err.mean() 
Example 36
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def MSE(gt, rc):
    return np.mean((gt - rc) ** 2) 
Example 37
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def MRAE(gt, rc):
    return np.mean(np.abs(gt - rc) / (gt + 1.0)) 
Example 38
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: pascal_voc.py    MIT License 5 votes vote down vote up
def _do_python_eval(self, output_dir='output'):
        annopath = self._devkit_path + '\\VOC' + self._year + '\\Annotations\\' + '{:s}.xml'
        imagesetfile = os.path.join(
            self._devkit_path,
            'VOC' + self._year,
            'ImageSets',
            'Main',
            self._image_set + '.txt')
        cachedir = os.path.join(self._devkit_path, 'annotations_cache')
        aps = []
        # The PASCAL VOC metric changed in 2010
        use_07_metric = True if int(self._year) < 2010 else False
        print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
        if not os.path.isdir(output_dir):
            os.mkdir(output_dir)
        for i, cls in enumerate(self._classes):
            if cls == '__background__':
                continue
            filename = self._get_voc_results_file_template().format(cls)
            rec, prec, ap = voc_eval(
                filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
                use_07_metric=use_07_metric)
            aps += [ap]
            print(('AP for {} = {:.4f}'.format(cls, ap)))
            with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
                pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
        print(('Mean AP = {:.4f}'.format(np.mean(aps))))
        print('~~~~~~~~')
        print('Results:')
        for ap in aps:
            print(('{:.3f}'.format(ap)))
        print(('{:.3f}'.format(np.mean(aps))))
        print('~~~~~~~~')
        print('')
        print('--------------------------------------------------------------')
        print('Results computed with the **unofficial** Python eval code.')
        print('Results should be very close to the official MATLAB eval code.')
        print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
        print('-- Thanks, The Management')
        print('--------------------------------------------------------------') 
Example 39
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: coco.py    MIT License 5 votes vote down vote up
def _print_detection_eval_metrics(self, coco_eval):
        IoU_lo_thresh = 0.5
        IoU_hi_thresh = 0.95

        def _get_thr_ind(coco_eval, thr):
            ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
                           (coco_eval.params.iouThrs < thr + 1e-5))[0][0]
            iou_thr = coco_eval.params.iouThrs[ind]
            assert np.isclose(iou_thr, thr)
            return ind

        ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
        ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
        # precision has dims (iou, recall, cls, area range, max dets)
        # area range index 0: all area ranges
        # max dets index 2: 100 per image
        precision = \
            coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
        ap_default = np.mean(precision[precision > -1])
        print(('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
               '~~~~').format(IoU_lo_thresh, IoU_hi_thresh))
        print('{:.1f}'.format(100 * ap_default))
        for cls_ind, cls in enumerate(self.classes):
            if cls == '__background__':
                continue
            # minus 1 because of __background__
            precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
            ap = np.mean(precision[precision > -1])
            print('{:.1f}'.format(100 * ap))

        print('~~~~ Summary metrics ~~~~')
        coco_eval.summarize() 
Example 40
Project: Neural-LP   Author: fanyangxyz   File: experiment.py    MIT License 5 votes vote down vote up
def one_epoch(self, mode, num_batch, next_fn):
        epoch_loss = []
        epoch_in_top = []
        for batch in xrange(num_batch):
            if (batch+1) % max(1, (num_batch / self.option.print_per_batch)) == 0:
                sys.stdout.write("%d/%d\t" % (batch+1, num_batch))
                sys.stdout.flush()
            
            (qq, hh, tt), mdb = next_fn()
            if mode == "train":
                run_fn = self.learner.update
            else:
                run_fn = self.learner.predict
            loss, in_top = run_fn(self.sess,
                                  qq, 
                                  hh, 
                                  tt, 
                                  mdb) 
            epoch_loss += list(loss)
            epoch_in_top += list(in_top)
                                    
        msg = self.msg_with_time(
                "Epoch %d mode %s Loss %0.4f In top %0.4f." 
                % (self.epoch+1, mode, np.mean(epoch_loss), np.mean(epoch_in_top)))
        print(msg)
        self.log_file.write(msg + "\n")
        return epoch_loss, epoch_in_top 
Example 41
Project: Neural-LP   Author: fanyangxyz   File: experiment.py    MIT License 5 votes vote down vote up
def one_epoch_valid(self):
        loss, in_top = self.one_epoch("valid", 
                                      self.data.num_batch_valid, 
                                      self.data.next_valid)
        self.valid_stats.append([loss, in_top])
        self.best_valid_loss = min(self.best_valid_loss, np.mean(loss))
        self.best_valid_in_top = max(self.best_valid_in_top, np.mean(in_top)) 
Example 42
Project: Neural-LP   Author: fanyangxyz   File: experiment.py    MIT License 5 votes vote down vote up
def early_stop(self):
        loss_improve = self.best_valid_loss == np.mean(self.valid_stats[-1][0])
        in_top_improve = self.best_valid_in_top == np.mean(self.valid_stats[-1][1])
        if loss_improve or in_top_improve:
            return False
        else:
            if self.epoch < self.option.min_epoch:
                return False
            else:
                return True 
Example 43
Project: python-pool-performance   Author: JohnStarich   File: pools.py    MIT License 5 votes vote down vote up
def summarize_test(test_output: Mapping) -> Mapping:
    return {
        'jobs': test_output['jobs'],
        'time': numpy.mean(test_output['time']),
        'blocks': numpy.mean(test_output['blocks']),
    } 
Example 44
Project: DataComp   Author: Cojabi   File: utils.py    Apache License 2.0 5 votes vote down vote up
def get_diff_feats(sig_df):
    """
    Get's the feature names of features from a result table who's confidence interval for difference in means does not \
    include 0.

    :param sig_df: Dataframe storing the mean difference confidence intervals like returned by stats.p_correction()
    :return:
    """
    # grab significant deviances
    series = sig_df["diff_flag"]
    series = series.fillna("False")
    index_labels = series[series == True].index.labels[0]
    return set(itemgetter(index_labels)(series.index.levels[0])) 
Example 45
Project: RandomFourierFeatures   Author: tiskw   File: PyRFF.py    MIT License 5 votes vote down vote up
def score(self, X, y, **args):
        pred  = self.predict(X)
        return np.mean([(1 if pred[n, 0] == y[n] else 0) for n in range(X.shape[0])])

# }}}

#################################### SOURCE FINISH ##################################
# vim: expandtab tabstop=4 shiftwidth=4 fdm=marker
# Ganerated by grasp version 0.0 
Example 46
Project: Lane-And-Vehicle-Detection   Author: JustinHeaton   File: main.py    MIT License 5 votes vote down vote up
def blind_search(self, x, y, image):
        '''
        This function is applied in the first few frames and/or if the lane was not successfully detected
        in the previous frame. It uses a slinding window approach to detect peaks in a histogram of the
        binary thresholded image. Pixels in close proimity to the detected peaks are considered to belong
        to the lane lines.
        '''
        xvals = []
        yvals = []
        if self.found == False:
            i = 720
            j = 630
            histogram = np.sum(image[image.shape[0]//2:], axis=0)
            if self == Right:
                peak = np.argmax(histogram[image.shape[1]//2:]) + image.shape[1]//2
            else:
                peak = np.argmax(histogram[:image.shape[1]//2])
            while j >= 0:
                x_idx = np.where((((peak - 100) < x)&(x < (peak + 100))&((y > j) & (y < i))))
                x_window, y_window = x[x_idx], y[x_idx]
                if np.sum(x_window) != 0:
                    xvals.extend(x_window)
                    yvals.extend(y_window)
                if np.sum(x_window) > 100:
                    peak = np.int(np.mean(x_window))
                i -= 90
                j -= 90
        if np.sum(xvals) > 0:
            self.found = True
        else:
            yvals = self.Y
            xvals = self.X
        return xvals, yvals, self.found 
Example 47
Project: mmdetection   Author: open-mmlab   File: coco_utils.py    Apache License 2.0 5 votes vote down vote up
def fast_eval_recall(results,
                     coco,
                     max_dets,
                     iou_thrs=np.arange(0.5, 0.96, 0.05)):
    if mmcv.is_str(results):
        assert results.endswith('.pkl')
        results = mmcv.load(results)
    elif not isinstance(results, list):
        raise TypeError(
            'results must be a list of numpy arrays or a filename, not {}'.
            format(type(results)))

    gt_bboxes = []
    img_ids = coco.getImgIds()
    for i in range(len(img_ids)):
        ann_ids = coco.getAnnIds(imgIds=img_ids[i])
        ann_info = coco.loadAnns(ann_ids)
        if len(ann_info) == 0:
            gt_bboxes.append(np.zeros((0, 4)))
            continue
        bboxes = []
        for ann in ann_info:
            if ann.get('ignore', False) or ann['iscrowd']:
                continue
            x1, y1, w, h = ann['bbox']
            bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1])
        bboxes = np.array(bboxes, dtype=np.float32)
        if bboxes.shape[0] == 0:
            bboxes = np.zeros((0, 4))
        gt_bboxes.append(bboxes)

    recalls = eval_recalls(
        gt_bboxes, results, max_dets, iou_thrs, print_summary=False)
    ar = recalls.mean(axis=1)
    return ar 
Example 48
Project: mmdetection   Author: open-mmlab   File: merge_augs.py    Apache License 2.0 5 votes vote down vote up
def merge_aug_scores(aug_scores):
    """Merge augmented bbox scores."""
    if isinstance(aug_scores[0], torch.Tensor):
        return torch.mean(torch.stack(aug_scores), dim=0)
    else:
        return np.mean(aug_scores, axis=0) 
Example 49
Project: MODS_ConvNet   Author: santiagolopezg   File: test_lillabcrossval_network.py    MIT License 5 votes vote down vote up
def cv_calc():
#calculate mean and stdev for each metric, and append them to test_metrics file
	test_metrics.append(cvscores[0])

	other_counter = 0
	for metric in cvscores[1:]:
        	v = 'test {0}: {1:.4f} +/- {2:.4f}%'.format(cvscores[0][0][other_counter], np.mean(metric), np.std(metric))
        	print v
		test_metrics.append(v)
		other_counter +=1
		if other_counter == 7:
			other_counter=0
	return cvscores, test_metrics 
Example 50
Project: MODS_ConvNet   Author: santiagolopezg   File: test_network.py    MIT License 5 votes vote down vote up
def cv_calc():
#calculate mean and stdev for each metric, and append them to test_metrics file
	test_metrics.append(cvscores[0])

	other_counter = 0
	for metric in cvscores[1:]:
        	v = 'test {0}: {1:.4f} +/- {2:.4f}%'.format(cvscores[0][0][other_counter], np.mean(metric), np.std(metric))
        	print v
		test_metrics.append(v)
		other_counter +=1
		if other_counter == 7:
			other_counter=0
	return cvscores, test_metrics