Python numpy.mean() Examples

The following are code examples for showing how to use numpy.mean(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: DataComp   Author: Cojabi   File: utils.py    Apache License 2.0 10 votes vote down vote up
def conf_interval(data_series):
    """
    Calculate the confidence interval for the data distribution under the assumptions that it can be calculated using \
    a student-t distribution.

    :return start: Starting value of the interval
    :return end: Ending value of the interval
    """

    mean = np.mean(data_series)

    conf_int = sem(data_series) * t.ppf((1 + 0.95) / 2, len(data_series) - 1)

    start = mean - conf_int
    end = mean + conf_int

    return start, end 
Example 2
Project: rhodonite   Author: nestauk   File: phylomemetic.py    MIT License 6 votes vote down vote up
def label_diversification(g, branching_prop, agg=np.mean):
    """label_diversification
    Diversification is defined as the average pairwise Jaccard similarity
    between the children of a branching node.

    Parameters
    ----------
        g : :obj:`Graph` 
            A graph.
        branching_prop : :obj:`graph_tool.VertexPropertyMap` 
            A verted property map that is True where a vertex is a branching 
            node.
        agg: :obj:`function` 
            An aggregation function. Typically mean or median.
    """
    diversification_prop = g.new_vertex_property('float')
    g_branching = GraphView(g, vfilt=branching_prop, skip_efilt=True)
    for v in g_branching.vertices():
        children = [g.vp['item'][c] for c in g.vertex(v).in_neighbors()]
        jaccard = agg(
            [1 - jaccard_similarity_set(list(c[0]), list(c[1]))
            for c in combinations(children, 2)]
        )
        diversification_prop[v] = jaccard
    return diversification_prop 
Example 3
Project: UR5_Controller   Author: tsinghua-rll   File: quaternion.py    MIT License 6 votes vote down vote up
def from_vector_array_to_q(v_c1, v_c2):
    """
    Calculate transform quaternion and translation vector from vector pairs in two coordinate
    :param v_c1: list or tuple of vector in source coordinate
    :param v_c2: list or tuple of vector in target coordinate
    :return: coordinate rotate quaternion from c1 to c2, translation from v1 to v2
            (qw, qi, qj, qk), (x, y, z)
    """
    if len(v_c1) != len(v_c2) or len(v_c1) <= 3:
        print ("Error! on enough vector pair or length of two array is different")
        return (1, 0, 0, 0), (0, 0, 0)

    v_c1 = np.asarray(v_c1, dtype=np.float32).T
    v_c2 = np.asarray(v_c2, dtype=np.float32).T
    mean_c1 = np.mean(v_c1, axis=1)
    mean_c2 = np.mean(v_c1, axis=1)
    v_c1 -= mean_c1
    v_c2 -= mean_c2

    mat = v_c2.dot(v_c1.T).dot(np.linalg.pinv(v_c1.dot(v_c1.T)))
    return from_matrix_to_q(mat), np.mean(v_c2 - mat.dot(v_c1), 1) 
Example 4
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def draw(self, ax, color, line_width=1, fillcolor=None, name=None, arrow=True, alpha=0.2, scale=50):
        ax.add_patch(PolygonPatch(self.contour, alpha=alpha, fc=fillcolor, ec=color, linewidth=line_width))

        vertices = np.array(self.contour.exterior.coords)[1:]

        if arrow:
            arrow_center = np.mean(vertices, axis=0)
            arrow_direction = (vertices[2] - vertices[1]) / 1.5
            arrow_tail = arrow_center - arrow_direction / 2
            arrow_head = arrow_center + arrow_direction / 2
            style = plt_patches.ArrowStyle.Simple(head_length=.4, head_width=.6, tail_width=.1)
            x = np.array(ax.axis())
            scale_factor = np.sqrt(np.prod(np.abs(x[::2] - x[1::2])) / (60 * 60))
            arrow_patch = plt_patches.FancyArrowPatch(posA=arrow_tail, posB=arrow_head, arrowstyle=style,
                                                      color='w', mutation_scale= scale / scale_factor, alpha=0.4)
            ax.add_patch(arrow_patch)
        elif name is None:
            name = 'front'

        if name is not None:
            text_location = np.mean(vertices[[0, -1]], axis=0)
            ax.text(text_location[0], text_location[1], name, ha='center', va='top', color='w') 
Example 5
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def box2dtobox3d(boxes2d, z_translation=0.0, z_size=0.0, z_angle=0.0):
    """
    tranforms 2d boxes to 3d boxes
    :param boxes2d: np array shaped N,4. box = [x1,y1,x2,xy] (1-bottom left, 2 upper right)
    :return: boxes3d np array shaped N,7. box = [t1,t2,t3,s1,s2,s3,z_angle]
    """
    ctr_x = np.mean(boxes2d[:, [0, 2]], axis=-1, keepdims=True)
    ctr_y = np.mean(boxes2d[:, [1, 3]], axis=-1, keepdims=True)
    ctr_z = np.full([boxes2d.shape[0], 1], z_translation)
    ctr = np.concatenate((ctr_x, ctr_y, ctr_z), -1)

    size_x = boxes2d[:, 2:3] - boxes2d[:, 0:1]
    size_y = boxes2d[:, 3:4] - boxes2d[:, 1:2]
    size_z = np.full([boxes2d.shape[0], 1], z_size)
    size = np.concatenate((size_x, size_y, size_z), -1)

    z_angle = np.full([boxes2d.shape[0], 1], z_angle)

    return np.concatenate((ctr, size, z_angle), -1) 
Example 6
Project: Neural-LP   Author: fanyangxyz   File: experiment.py    MIT License 6 votes vote down vote up
def train(self):
        while (self.epoch < self.option.max_epoch and not self.early_stopped):
            self.one_epoch_train()
            self.one_epoch_valid()
            self.one_epoch_test()
            self.epoch += 1
            model_path = self.saver.save(self.sess, 
                                         self.option.model_path,
                                         global_step=self.epoch)
            print("Model saved at %s" % model_path)
            
            if self.early_stop():
                self.early_stopped = True
                print("Early stopped at epoch %d" % (self.epoch))
        
        all_test_in_top = [np.mean(x[1]) for x in self.test_stats]
        best_test_epoch = np.argmax(all_test_in_top)
        best_test = all_test_in_top[best_test_epoch]
        
        msg = "Best test in top: %0.4f at epoch %d." % (best_test, best_test_epoch + 1)       
        print(msg)
        self.log_file.write(msg + "\n")
        pickle.dump([self.train_stats, self.valid_stats, self.test_stats],
                    open(os.path.join(self.option.this_expsdir, "results.pckl"), "w")) 
Example 7
Project: DataComp   Author: Cojabi   File: utils.py    Apache License 2.0 6 votes vote down vote up
def calc_mean_diff(series1, series2, rnd=2):
    """
    Calculates the confidence interval of the difference in means between two iterables.

    :param series1: Iterable storing the values of variable 1.
    :param series2: Iterable storing the values of variable 2.
    :param rnd: Number of decimal positions on which result shall be rounded.
    :return: List representing the interval
    """
    # calculate means and variances
    mean1 = np.mean(series1)
    mean2 = np.mean(series2)
    var1 = np.var(series1, ddof=1)
    var2 = np.var(series2, ddof=1)

    # calculate and return confidence interval
    return diff_mean_conf_formula(len(series1), len(series2), mean1, mean2, var1, var2, rnd) 
Example 8
Project: Adversarial-Face-Attack   Author: ppwwyyxx   File: face_attack.py    GNU General Public License v3.0 6 votes vote down vote up
def validate_on_lfw(model, lfw_160_path):
    # Read the file containing the pairs used for testing
    pairs = lfw.read_pairs('validation-LFW-pairs.txt')
    # Get the paths for the corresponding images
    paths, actual_issame = lfw.get_paths(lfw_160_path, pairs)
    num_pairs = len(actual_issame)

    all_embeddings = np.zeros((num_pairs * 2, 512), dtype='float32')
    for k in tqdm.trange(num_pairs):
        img1 = cv2.imread(paths[k * 2], cv2.IMREAD_COLOR)[:, :, ::-1]
        img2 = cv2.imread(paths[k * 2 + 1], cv2.IMREAD_COLOR)[:, :, ::-1]
        batch = np.stack([img1, img2], axis=0)
        embeddings = model.eval_embeddings(batch)
        all_embeddings[k * 2: k * 2 + 2, :] = embeddings

    tpr, fpr, accuracy, val, val_std, far = lfw.evaluate(
        all_embeddings, actual_issame, distance_metric=1, subtract_mean=True)

    print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))

    auc = metrics.auc(fpr, tpr)
    print('Area Under Curve (AUC): %1.3f' % auc)
    eer = brentq(lambda x: 1. - x - interpolate.interp1d(fpr, tpr)(x), 0., 1.)
    print('Equal Error Rate (EER): %1.3f' % eer) 
Example 9
Project: StructEngPy   Author: zhuoju36   File: element.py    MIT License 6 votes vote down vote up
def __init__(self,node_i, node_j, node_k, node_l,t, E, mu, rho, name=None):
        #8-nodes
        self.__nodes.append(node_i)
        self.__nodes.append(node_j)
        self.__nodes.append(node_k)
        self.__nodes.append(node_l)

        self.__t=t
        
        center=np.mean([node_i,node_j,node_k,node_l])
#        self.local_csys = CoordinateSystem.cartisian(center,nodes[4],nodes[5])
        
        self.__alpha=[]#the angle between edge and local-x, to be added
        self.__alpha.append(self.angle(node_i,node_j,self.local_csys.x))
        self.__alpha.append(self.angle(node_j,node_k,self.local_csys.x))
        self.__alpha.append(self.angle(node_k,node_l,self.local_csys.x))
        self.__alpha.append(self.angle(node_l,node_i,self.local_csys.x))

        self.__K=np.zeros((24,24)) 
Example 10
Project: Lane-And-Vehicle-Detection   Author: JustinHeaton   File: main.py    MIT License 6 votes vote down vote up
def found_search(self, x, y):
        '''
        This function is applied when the lane lines have been detected in the previous frame.
        It uses a sliding window to search for lane pixels in close proximity (+/- 25 pixels in the x direction)
        around the previous detected polynomial.
        '''
        xvals = []
        yvals = []
        if self.found == True:
            i = 720
            j = 630
            while j >= 0:
                yval = np.mean([i,j])
                xval = (np.mean(self.fit0))*yval**2 + (np.mean(self.fit1))*yval + (np.mean(self.fit2))
                x_idx = np.where((((xval - 25) < x)&(x < (xval + 25))&((y > j) & (y < i))))
                x_window, y_window = x[x_idx], y[x_idx]
                if np.sum(x_window) != 0:
                    np.append(xvals, x_window)
                    np.append(yvals, y_window)
                i -= 90
                j -= 90
        if np.sum(xvals) == 0:
            self.found = False # If no lane pixels were detected then perform blind search
        return xvals, yvals, self.found 
Example 11
Project: mmdetection   Author: open-mmlab   File: merge_augs.py    Apache License 2.0 6 votes vote down vote up
def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):
    """Merge augmented detection bboxes and scores.

    Args:
        aug_bboxes (list[Tensor]): shape (n, 4*#class)
        aug_scores (list[Tensor] or None): shape (n, #class)
        img_shapes (list[Tensor]): shape (3, ).
        rcnn_test_cfg (dict): rcnn test config.

    Returns:
        tuple: (bboxes, scores)
    """
    recovered_bboxes = []
    for bboxes, img_info in zip(aug_bboxes, img_metas):
        img_shape = img_info[0]['img_shape']
        scale_factor = img_info[0]['scale_factor']
        flip = img_info[0]['flip']
        bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)
        recovered_bboxes.append(bboxes)
    bboxes = torch.stack(recovered_bboxes).mean(dim=0)
    if aug_scores is None:
        return bboxes
    else:
        scores = torch.stack(aug_scores).mean(dim=0)
        return bboxes, scores 
Example 12
Project: mmdetection   Author: open-mmlab   File: merge_augs.py    Apache License 2.0 6 votes vote down vote up
def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None):
    """Merge augmented mask prediction.

    Args:
        aug_masks (list[ndarray]): shape (n, #class, h, w)
        img_shapes (list[ndarray]): shape (3, ).
        rcnn_test_cfg (dict): rcnn test config.

    Returns:
        tuple: (bboxes, scores)
    """
    recovered_masks = [
        mask if not img_info[0]['flip'] else mask[..., ::-1]
        for mask, img_info in zip(aug_masks, img_metas)
    ]
    if weights is None:
        merged_masks = np.mean(recovered_masks, axis=0)
    else:
        merged_masks = np.average(
            np.array(recovered_masks), axis=0, weights=np.array(weights))
    return merged_masks 
Example 13
Project: mmdetection   Author: open-mmlab   File: analyze_logs.py    Apache License 2.0 6 votes vote down vote up
def cal_train_time(log_dicts, args):
    for i, log_dict in enumerate(log_dicts):
        print('{}Analyze train time of {}{}'.format('-' * 5, args.json_logs[i],
                                                    '-' * 5))
        all_times = []
        for epoch in log_dict.keys():
            if args.include_outliers:
                all_times.append(log_dict[epoch]['time'])
            else:
                all_times.append(log_dict[epoch]['time'][1:])
        all_times = np.array(all_times)
        epoch_ave_time = all_times.mean(-1)
        slowest_epoch = epoch_ave_time.argmax()
        fastest_epoch = epoch_ave_time.argmin()
        std_over_epoch = epoch_ave_time.std()
        print('slowest epoch {}, average time is {:.4f}'.format(
            slowest_epoch + 1, epoch_ave_time[slowest_epoch]))
        print('fastest epoch {}, average time is {:.4f}'.format(
            fastest_epoch + 1, epoch_ave_time[fastest_epoch]))
        print('time std over epochs is {:.4f}'.format(std_over_epoch))
        print('average iter time: {:.4f} s/iter'.format(np.mean(all_times)))
        print() 
Example 14
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_np_targeted_gives_adversarial_example(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1
        x_adv = self.attack.generate_np(x_val, max_iterations=100,
                                        binary_search_steps=3,
                                        initial_const=1,
                                        clip_min=-5, clip_max=5,
                                        batch_size=100, y_target=feed_labs)

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs)
                        > 0.9) 
Example 15
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_gives_adversarial_example(self):

        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), orig_labs] = 1
        x = tf.placeholder(tf.float32, x_val.shape)
        y = tf.placeholder(tf.float32, feed_labs.shape)

        x_adv_p = self.attack.generate(x, max_iterations=100,
                                       binary_search_steps=3,
                                       initial_const=1,
                                       clip_min=-5, clip_max=5,
                                       batch_size=100, y=y)
        self.assertEqual(x_val.shape, x_adv_p.shape)
        x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs})

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 16
Project: unicorn-hat-hd   Author: pimoroni   File: forest-fire.py    MIT License 5 votes vote down vote up
def average_forest(forest):
    avg_forest = [[space for x in range(width)] for y in range(height)]

    for i, x in enumerate(range(1, forest_width, scale)):
        for j, y in enumerate(range(1, forest_height, scale)):
            neighbours = get_neighbours(x, y, avg_size)
            red = int(numpy.mean([forest[n[0]][n[1]][0] for n in neighbours]))
            green = int(numpy.mean([forest[n[0]][n[1]][1] for n in neighbours]))
            blue = int(numpy.mean([forest[n[0]][n[1]][2] for n in neighbours]))
            avg_forest[i][j] = [red, green, blue]

    return avg_forest 
Example 17
Project: SyNEThesia   Author: RunOrVeith   File: feature_creators.py    MIT License 5 votes vote down vote up
def logfbank_features(signal, samplerate=44100, fps=24, num_filt=40, num_cepstra=40, nfft=8192, **kwargs):
    winstep = 2 / fps
    winlen = winstep * 2
    feat, energy = psf.fbank(signal=signal, samplerate=samplerate,
                             winlen=winlen, winstep=winstep, nfilt=num_filt,
                             nfft=nfft)
    feat = np.log(feat)
    feat = psf.dct(feat, type=2, axis=1, norm='ortho')[:, :num_cepstra]
    feat = psf.lifter(feat, L=22)
    feat = np.asarray(feat)

    energy = np.log(energy)
    energy = energy.reshape([energy.shape[0],1])

    if feat.shape[0] > 1:
        std = 0.5 * np.std(feat, axis=0)
        mat = (feat - np.mean(feat, axis=0)) / std
    else:
        mat = feat

    mat = np.concatenate((mat, energy), axis=1)

    duration = signal.shape[0] / samplerate
    expected_frames = fps * duration
    assert mat.shape[0] - expected_frames <= 1, "Producted feature number does not match framerate"
    return mat 
Example 18
Project: b2ac   Author: hbldh   File: polygon.py    MIT License 5 votes vote down vote up
def get_center_point(self, use_centroid=True):
        """Returns a center of weight for the object.

        :param use_centroid: Uses a centroid finding method instead of pure mean of vertices.
        :type use_centroid: bool

        """
        if use_centroid:
            with warnings.catch_warnings(record=False) as w:
                # Cause all warnings to never be triggered.
                warnings.simplefilter("ignore")

                pnt_array = self.get_closed_polygon()

                A = self._area_help_function()
                D = (pnt_array[:-1, 0] * pnt_array[1:, 1] -
                     pnt_array[1:, 0] * pnt_array[:-1, 1])

                c_x = ((pnt_array[:-1, 0] + pnt_array[1:, 0]) * D).sum() / (6 * A)
                c_y = ((pnt_array[:-1, 1] + pnt_array[1:, 1]) * D).sum() / (6 * A)

                if np.isnan(c_x) or np.isinf(c_x) or np.isnan(c_y) or np.isinf(c_y):
                    # If centroid calculations fails (e.g. due to zero-valued area) then use the
                    # mean of the vertices as center point instead.
                    return np.mean(self.get_open_polygon(), 0)
                else:
                    return np.array([c_x, c_y])
        else:
            return np.mean(self.get_open_polygon(), 0) 
Example 19
Project: meta-transfer-learning   Author: erfaneshrati   File: variables.py    MIT License 5 votes vote down vote up
def average_vars(var_seqs):
    """
    Average a sequence of variable sequences.
    """
    res = []
    for variables in zip(*var_seqs):
        res.append(np.mean(variables, axis=0))
    return res 
Example 20
Project: sfcc   Author: kv-kunalvyas   File: auxiliary.py    MIT License 5 votes vote down vote up
def compute_mean(data_frame, column):
    columnName = str(column)
    meanValue = data_frame[columnName].dropna().mean()
    if len(data_frame.column[data_frame.column.isnull()]) > 0:
        data_frame.loc[(data_frame.column.isnull()), columnName] = meanValue 
Example 21
Project: sfcc   Author: kv-kunalvyas   File: auxiliary.py    MIT License 5 votes vote down vote up
def plotLearningCurves(train, classifier):
    #P.show()
    X = train.values[:, 1::]
    y = train.values[:, 0]

    train_sizes, train_scores, test_scores = learning_curve(
            classifier, X, y, cv=10, n_jobs=-1, train_sizes=np.linspace(.1, 1., 10), verbose=0)

    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)

    plt.figure()
    plt.title("Learning Curves")
    plt.legend(loc="best")
    plt.xlabel("Training samples")
    plt.ylabel("Error Rate")
    plt.ylim((0, 1))
    plt.gca().invert_yaxis()
    plt.grid()

    # Plot the average training and test score lines at each training set size
    plt.plot(train_sizes, train_scores_mean, 'o-', color="b", label="Training score")
    plt.plot(train_sizes, test_scores_mean, 'o-', color="r", label="Test score")

    # Plot the std deviation as a transparent range at each training set size
    plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
                     alpha=0.1, color="b")
    plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,
                     alpha=0.1, color="r")

    # Draw the plot and reset the y-axis
    plt.draw()
    plt.gca().invert_yaxis()

    # shuffle and split training and test sets
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25)
    classifier.fit(X_train, y_train)
    plt.show() 
Example 22
Project: rhodonite   Author: nestauk   File: phylomemetic.py    MIT License 5 votes vote down vote up
def label_cross_pollination(g, merging_prop, agg=np.mean):
    """label_cross_pollination
    Cross-pollination is defined as the average pairwise Jaccard similarity
    between the parents of a merging node.

    Parameters
    ----------
        g : :obj:`graph_tool.Graph` 
            A graph.
        merging_prop : :obj:`graph_tool.VertexPropertyMap` 
            A vertex property map that is True where a vertex is a merging node.
        agg: :obj:`function` 
            An aggregation function. Typically mean or median.

    Returns
    -------
        cross_pol_prop : :obj:`graph_tool.VertexPropertyMap`
            Contains cross pollination values of each vertex.
    """
    cross_poll_prop = g.new_vertex_property('float')
    g_merging = GraphView(g, vfilt=merging_prop, skip_efilt=True)
    for v in g_merging.vertices():
        parents = [g.vp['item'][p] for p in g.vertex(v).out_neighbors()]
        jaccard = agg(
            [1 - jaccard_similarity_set(list(c[0]), list(c[1]))
            for c in combinations(parents, 2)]
        )
        cross_poll_prop[v] = jaccard
    return cross_poll_prop 
Example 23
Project: LipNet-PyTorch   Author: sailordiary   File: ctc_decoder.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def cer_batch(self, decoded, gt):
        assert len(decoded) == len(gt), 'batch size mismatch: {}!={}'.format(len(decoded), len(gt))
        mean_indiv_len = np.mean([len(s) for s in gt])
        
        return self.get_mean(decoded, gt, mean_indiv_len, editdistance.eval) 
Example 24
Project: LipNet-PyTorch   Author: sailordiary   File: ctc_decoder.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def wer_batch(self, decoded, gt):
        assert len(decoded) == len(gt), 'batch size mismatch: {}!={}'.format(len(decoded), len(gt))
        mean_indiv_len = np.mean([len(s.split()) for s in gt])
        
        return self.get_mean(decoded, gt, mean_indiv_len, self.wer_sentence) 
Example 25
Project: RF-Monitor   Author: EarToEarOak   File: monitor.py    GNU General Public License v2.0 5 votes vote down vote up
def __update_level(self, location, level, timestamp):
        updated = False
        signal = None
        threshold = self.get_dynamic_threshold()

        if len(self._signals) and self._signals[-1].end is None:
            signal = self._signals[-1]

        if signal is None:
            if level is not None and level >= threshold:
                signal = Signal(start=timestamp, location=location)
                self._signals.append(signal)
                updated = True
        else:
            if level is None or level < threshold:
                strength = numpy.mean(self._levels)
                self._levels.clear()
                signal.end = timestamp
                signal.level = strength
                updated = True

        if level is not None and level >= threshold:
            self._levels.append(level)

        if updated:
            return signal
        return None 
Example 26
Project: RF-Monitor   Author: EarToEarOak   File: gui.py    GNU General Public License v2.0 5 votes vote down vote up
def __on_scan_data(self, event):
        levels = numpy.log10(event['l'])
        levels *= 10
        self._levels = levels

        noise = numpy.percentile(levels,
                                 self._toolbar.get_dynamic_percentile())

        updated = False
        for monitor in self._monitors:
            freq = monitor.get_frequency()
            if monitor.get_enabled():
                monitor.set_noise(noise)
                index = numpy.where(freq == event['f'])[0]
                signal = monitor.set_level(levels[index][0],
                                           event['timestamp'],
                                           self._location)
                if signal is not None:
                    updated = True
                    if signal.end is not None:
                        recording = format_recording(freq, signal)
                        if self._settings.get_push_enable():
                            self._push.send(self._settings.get_push_uri(),
                                            recording)
                        if self._server is not None:
                            self._server.send(recording)

        if updated:
            if self._isSaved:
                self._isSaved = False
                self.__set_title()
                self.__set_timeline()

        self.__set_spectrum(noise)
        self._rssi.set_noise(numpy.mean(levels))
        self._rssi.set_level(numpy.max(levels)) 
Example 27
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: pascal_voc.py    MIT License 5 votes vote down vote up
def _eval_discovery(self, output_dir):
    annopath = os.path.join(
        self._devkit_path,
        'VOC' + self._year,
        'Annotations',
        '{:s}.xml')
    imagesetfile = os.path.join(
        self._devkit_path,
        'VOC' + self._year,
        'ImageSets',
        'Main',
        self._image_set + '.txt')
    cachedir = os.path.join(self._devkit_path, 'annotations_dis_cache')
    corlocs = []
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    for i, cls in enumerate(self._classes):
        if cls == '__background__':
            continue
        filename = self._get_voc_results_file_template().format(cls)
        corloc = dis_eval(
            filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5)
        corlocs += [corloc]
        print('CorLoc for {} = {:.4f}'.format(cls, corloc))
        with open(os.path.join(output_dir, cls + '_corloc.pkl'), 'wb') as f:
            pickle.dump({'corloc': corloc}, f)
    print('Mean CorLoc = {:.4f}'.format(np.mean(corlocs)))
    print('~~~~~~~~')
    print('Results:')
    for corloc in corlocs:
        print('{:.3f}'.format(corloc))
    print('{:.3f}'.format(np.mean(corlocs)))
    print('~~~~~~~~') 
Example 28
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: coco.py    MIT License 5 votes vote down vote up
def _print_detection_eval_metrics(self, coco_eval):
    IoU_lo_thresh = 0.5
    IoU_hi_thresh = 0.95

    def _get_thr_ind(coco_eval, thr):
      ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
                     (coco_eval.params.iouThrs < thr + 1e-5))[0][0]
      iou_thr = coco_eval.params.iouThrs[ind]
      assert np.isclose(iou_thr, thr)
      return ind

    ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
    ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
    # precision has dims (iou, recall, cls, area range, max dets)
    # area range index 0: all area ranges
    # max dets index 2: 100 per image
    precision = \
      coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
    ap_default = np.mean(precision[precision > -1])
    print(('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
           '~~~~').format(IoU_lo_thresh, IoU_hi_thresh))
    print('{:.1f}'.format(100 * ap_default))
    for cls_ind, cls in enumerate(self.classes):
      if cls == '__background__':
        continue
      # minus 1 because of __background__
      precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
      ap = np.mean(precision[precision > -1])
      print('{:.1f}'.format(100 * ap))

    print('~~~~ Summary metrics ~~~~')
    coco_eval.summarize() 
Example 29
Project: wikilinks   Author: trovdimi   File: fitpowerlaw.py    MIT License 5 votes vote down vote up
def meanClustering(net_name, net):
    clust = network.vertex_properties["local_clust"]
    print 'clustering mean for network: ', net_name
    print np.mean(clust.get_array())
    return 
Example 30
Project: curriculum-dropout   Author: pmorerio   File: analyseResults_mnist_average.py    GNU General Public License v3.0 5 votes vote down vote up
def average(list_of_lists, last_n=10):
	allValues =  np.array(list_of_lists)
	allValues = np.sort(allValues)
	
	return np.mean(np.mean(allValues[:,-last_n:])) 
Example 31
Project: curriculum-dropout   Author: pmorerio   File: analyseResults_dm_average.py    GNU General Public License v3.0 5 votes vote down vote up
def average(list_of_lists, last_n=10):
	allValues =  np.array(list_of_lists)
	allValues = np.sort(allValues)
	
	return np.mean(np.mean(allValues[:,-last_n:])) 
Example 32
Project: FRIDA   Author: LCAV   File: doa.py    MIT License 5 votes vote down vote up
def polar_distance(x1, x2):
    """
    Given two arrays of numbers x1 and x2, pairs the cells that are the
    closest and provides the pairing matrix index: x1(index(1,:)) should be as
    close as possible to x2(index(2,:)). The function outputs the average of 
    the absolute value of the differences abs(x1(index(1,:))-x2(index(2,:))).
    :param x1: vector 1
    :param x2: vector 2
    :return: d: minimum distance between d
             index: the permutation matrix
    """
    x1 = np.reshape(x1, (1, -1), order='F')
    x2 = np.reshape(x2, (1, -1), order='F')
    N1 = x1.size
    N2 = x2.size
    diffmat = np.arccos(np.cos(x1 - np.reshape(x2, (-1, 1), order='F')))
    min_N1_N2 = np.min([N1, N2])
    index = np.zeros((min_N1_N2, 2), dtype=int)
    if min_N1_N2 > 1:
        for k in range(min_N1_N2):
            d2 = np.min(diffmat, axis=0)
            index2 = np.argmin(diffmat, axis=0)
            index1 = np.argmin(d2)
            index2 = index2[index1]
            index[k, :] = [index1, index2]
            diffmat[index2, :] = float('inf')
            diffmat[:, index1] = float('inf')
        d = np.mean(np.arccos(np.cos(x1[:, index[:, 0]] - x2[:, index[:, 1]])))
    else:
        d = np.min(diffmat)
        index = np.argmin(diffmat)
        if N1 == 1:
            index = np.array([1, index])
        else:
            index = np.array([index, 1])
    return d, index 
Example 33
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def MSE(gt, rc):
    return np.mean((gt - rc) ** 2) 
Example 34
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def MRAE(gt, rc):
    return np.mean(np.abs(gt - rc) / (gt + 1e-3)) 
Example 35
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def SID(gt, rc):
    N = gt.shape[0]
    err = np.zeros(N)
    for i in range(N):
        err[i] = abs(np.sum(rc[i] * np.log10((rc[i] + 1e-3) / (gt[i] + 1e-3))) +
                        np.sum(gt[i] * np.log10((gt[i] + 1e-3) / (rc[i] + 1e-3))))
    return err.mean() 
Example 36
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def MSE(gt, rc):
    return np.mean((gt - rc) ** 2) 
Example 37
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def MRAE(gt, rc):
    return np.mean(np.abs(gt - rc) / (gt + 1.0)) 
Example 38
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: pascal_voc.py    MIT License 5 votes vote down vote up
def _do_python_eval(self, output_dir='output'):
        annopath = self._devkit_path + '\\VOC' + self._year + '\\Annotations\\' + '{:s}.xml'
        imagesetfile = os.path.join(
            self._devkit_path,
            'VOC' + self._year,
            'ImageSets',
            'Main',
            self._image_set + '.txt')
        cachedir = os.path.join(self._devkit_path, 'annotations_cache')
        aps = []
        # The PASCAL VOC metric changed in 2010
        use_07_metric = True if int(self._year) < 2010 else False
        print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
        if not os.path.isdir(output_dir):
            os.mkdir(output_dir)
        for i, cls in enumerate(self._classes):
            if cls == '__background__':
                continue
            filename = self._get_voc_results_file_template().format(cls)
            rec, prec, ap = voc_eval(
                filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
                use_07_metric=use_07_metric)
            aps += [ap]
            print(('AP for {} = {:.4f}'.format(cls, ap)))
            with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
                pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
        print(('Mean AP = {:.4f}'.format(np.mean(aps))))
        print('~~~~~~~~')
        print('Results:')
        for ap in aps:
            print(('{:.3f}'.format(ap)))
        print(('{:.3f}'.format(np.mean(aps))))
        print('~~~~~~~~')
        print('')
        print('--------------------------------------------------------------')
        print('Results computed with the **unofficial** Python eval code.')
        print('Results should be very close to the official MATLAB eval code.')
        print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
        print('-- Thanks, The Management')
        print('--------------------------------------------------------------') 
Example 39
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: coco.py    MIT License 5 votes vote down vote up
def _print_detection_eval_metrics(self, coco_eval):
        IoU_lo_thresh = 0.5
        IoU_hi_thresh = 0.95

        def _get_thr_ind(coco_eval, thr):
            ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
                           (coco_eval.params.iouThrs < thr + 1e-5))[0][0]
            iou_thr = coco_eval.params.iouThrs[ind]
            assert np.isclose(iou_thr, thr)
            return ind

        ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
        ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
        # precision has dims (iou, recall, cls, area range, max dets)
        # area range index 0: all area ranges
        # max dets index 2: 100 per image
        precision = \
            coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
        ap_default = np.mean(precision[precision > -1])
        print(('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
               '~~~~').format(IoU_lo_thresh, IoU_hi_thresh))
        print('{:.1f}'.format(100 * ap_default))
        for cls_ind, cls in enumerate(self.classes):
            if cls == '__background__':
                continue
            # minus 1 because of __background__
            precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
            ap = np.mean(precision[precision > -1])
            print('{:.1f}'.format(100 * ap))

        print('~~~~ Summary metrics ~~~~')
        coco_eval.summarize() 
Example 40
Project: Neural-LP   Author: fanyangxyz   File: experiment.py    MIT License 5 votes vote down vote up
def one_epoch(self, mode, num_batch, next_fn):
        epoch_loss = []
        epoch_in_top = []
        for batch in xrange(num_batch):
            if (batch+1) % max(1, (num_batch / self.option.print_per_batch)) == 0:
                sys.stdout.write("%d/%d\t" % (batch+1, num_batch))
                sys.stdout.flush()
            
            (qq, hh, tt), mdb = next_fn()
            if mode == "train":
                run_fn = self.learner.update
            else:
                run_fn = self.learner.predict
            loss, in_top = run_fn(self.sess,
                                  qq, 
                                  hh, 
                                  tt, 
                                  mdb) 
            epoch_loss += list(loss)
            epoch_in_top += list(in_top)
                                    
        msg = self.msg_with_time(
                "Epoch %d mode %s Loss %0.4f In top %0.4f." 
                % (self.epoch+1, mode, np.mean(epoch_loss), np.mean(epoch_in_top)))
        print(msg)
        self.log_file.write(msg + "\n")
        return epoch_loss, epoch_in_top 
Example 41
Project: Neural-LP   Author: fanyangxyz   File: experiment.py    MIT License 5 votes vote down vote up
def one_epoch_valid(self):
        loss, in_top = self.one_epoch("valid", 
                                      self.data.num_batch_valid, 
                                      self.data.next_valid)
        self.valid_stats.append([loss, in_top])
        self.best_valid_loss = min(self.best_valid_loss, np.mean(loss))
        self.best_valid_in_top = max(self.best_valid_in_top, np.mean(in_top)) 
Example 42
Project: Neural-LP   Author: fanyangxyz   File: experiment.py    MIT License 5 votes vote down vote up
def early_stop(self):
        loss_improve = self.best_valid_loss == np.mean(self.valid_stats[-1][0])
        in_top_improve = self.best_valid_in_top == np.mean(self.valid_stats[-1][1])
        if loss_improve or in_top_improve:
            return False
        else:
            if self.epoch < self.option.min_epoch:
                return False
            else:
                return True 
Example 43
Project: python-pool-performance   Author: JohnStarich   File: pools.py    MIT License 5 votes vote down vote up
def summarize_test(test_output: Mapping) -> Mapping:
    return {
        'jobs': test_output['jobs'],
        'time': numpy.mean(test_output['time']),
        'blocks': numpy.mean(test_output['blocks']),
    } 
Example 44
Project: DataComp   Author: Cojabi   File: utils.py    Apache License 2.0 5 votes vote down vote up
def get_diff_feats(sig_df):
    """
    Get's the feature names of features from a result table who's confidence interval for difference in means does not \
    include 0.

    :param sig_df: Dataframe storing the mean difference confidence intervals like returned by stats.p_correction()
    :return:
    """
    # grab significant deviances
    series = sig_df["diff_flag"]
    series = series.fillna("False")
    index_labels = series[series == True].index.labels[0]
    return set(itemgetter(index_labels)(series.index.levels[0])) 
Example 45
Project: RandomFourierFeatures   Author: tiskw   File: PyRFF.py    MIT License 5 votes vote down vote up
def score(self, X, y, **args):
        pred  = self.predict(X)
        return np.mean([(1 if pred[n, 0] == y[n] else 0) for n in range(X.shape[0])])

# }}}

#################################### SOURCE FINISH ##################################
# vim: expandtab tabstop=4 shiftwidth=4 fdm=marker
# Ganerated by grasp version 0.0 
Example 46
Project: Lane-And-Vehicle-Detection   Author: JustinHeaton   File: main.py    MIT License 5 votes vote down vote up
def blind_search(self, x, y, image):
        '''
        This function is applied in the first few frames and/or if the lane was not successfully detected
        in the previous frame. It uses a slinding window approach to detect peaks in a histogram of the
        binary thresholded image. Pixels in close proimity to the detected peaks are considered to belong
        to the lane lines.
        '''
        xvals = []
        yvals = []
        if self.found == False:
            i = 720
            j = 630
            histogram = np.sum(image[image.shape[0]//2:], axis=0)
            if self == Right:
                peak = np.argmax(histogram[image.shape[1]//2:]) + image.shape[1]//2
            else:
                peak = np.argmax(histogram[:image.shape[1]//2])
            while j >= 0:
                x_idx = np.where((((peak - 100) < x)&(x < (peak + 100))&((y > j) & (y < i))))
                x_window, y_window = x[x_idx], y[x_idx]
                if np.sum(x_window) != 0:
                    xvals.extend(x_window)
                    yvals.extend(y_window)
                if np.sum(x_window) > 100:
                    peak = np.int(np.mean(x_window))
                i -= 90
                j -= 90
        if np.sum(xvals) > 0:
            self.found = True
        else:
            yvals = self.Y
            xvals = self.X
        return xvals, yvals, self.found 
Example 47
Project: mmdetection   Author: open-mmlab   File: coco_utils.py    Apache License 2.0 5 votes vote down vote up
def fast_eval_recall(results,
                     coco,
                     max_dets,
                     iou_thrs=np.arange(0.5, 0.96, 0.05)):
    if mmcv.is_str(results):
        assert results.endswith('.pkl')
        results = mmcv.load(results)
    elif not isinstance(results, list):
        raise TypeError(
            'results must be a list of numpy arrays or a filename, not {}'.
            format(type(results)))

    gt_bboxes = []
    img_ids = coco.getImgIds()
    for i in range(len(img_ids)):
        ann_ids = coco.getAnnIds(imgIds=img_ids[i])
        ann_info = coco.loadAnns(ann_ids)
        if len(ann_info) == 0:
            gt_bboxes.append(np.zeros((0, 4)))
            continue
        bboxes = []
        for ann in ann_info:
            if ann.get('ignore', False) or ann['iscrowd']:
                continue
            x1, y1, w, h = ann['bbox']
            bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1])
        bboxes = np.array(bboxes, dtype=np.float32)
        if bboxes.shape[0] == 0:
            bboxes = np.zeros((0, 4))
        gt_bboxes.append(bboxes)

    recalls = eval_recalls(
        gt_bboxes, results, max_dets, iou_thrs, print_summary=False)
    ar = recalls.mean(axis=1)
    return ar 
Example 48
Project: mmdetection   Author: open-mmlab   File: merge_augs.py    Apache License 2.0 5 votes vote down vote up
def merge_aug_scores(aug_scores):
    """Merge augmented bbox scores."""
    if isinstance(aug_scores[0], torch.Tensor):
        return torch.mean(torch.stack(aug_scores), dim=0)
    else:
        return np.mean(aug_scores, axis=0) 
Example 49
Project: MODS_ConvNet   Author: santiagolopezg   File: test_lillabcrossval_network.py    MIT License 5 votes vote down vote up
def cv_calc():
#calculate mean and stdev for each metric, and append them to test_metrics file
	test_metrics.append(cvscores[0])

	other_counter = 0
	for metric in cvscores[1:]:
        	v = 'test {0}: {1:.4f} +/- {2:.4f}%'.format(cvscores[0][0][other_counter], np.mean(metric), np.std(metric))
        	print v
		test_metrics.append(v)
		other_counter +=1
		if other_counter == 7:
			other_counter=0
	return cvscores, test_metrics 
Example 50
Project: MODS_ConvNet   Author: santiagolopezg   File: test_network.py    MIT License 5 votes vote down vote up
def cv_calc():
#calculate mean and stdev for each metric, and append them to test_metrics file
	test_metrics.append(cvscores[0])

	other_counter = 0
	for metric in cvscores[1:]:
        	v = 'test {0}: {1:.4f} +/- {2:.4f}%'.format(cvscores[0][0][other_counter], np.mean(metric), np.std(metric))
        	print v
		test_metrics.append(v)
		other_counter +=1
		if other_counter == 7:
			other_counter=0
	return cvscores, test_metrics 
Example 51
Project: MODS_ConvNet   Author: santiagolopezg   File: test_labcrossval_network.py    MIT License 5 votes vote down vote up
def cv_calc():
#calculate mean and stdev for each metric, and append them to test_metrics file
	test_metrics.append(cvscores[0])

	other_counter = 0
	for metric in cvscores[1:]:
        	v = 'test {0}: {1:.4f} +/- {2:.4f}%'.format(cvscores[0][0][other_counter], np.mean(metric), np.std(metric))
        	print v
		test_metrics.append(v)
		other_counter +=1
		if other_counter == 7:
			other_counter=0
	return cvscores, test_metrics 
Example 52
Project: MODS_ConvNet   Author: santiagolopezg   File: test_network.py    MIT License 5 votes vote down vote up
def cv_calc(cvscores):
#calculate mean and stdev for each metric, and append them to test_metrics file
	test_metrics.append(cvscores[0])

	other_counter = 0
	for metric in cvscores[1:]:
        	v = 'test {0}: {1:.4f} +/- {2:.4f}%'.format(cvscores[0][other_counter], np.mean(metric), np.std(metric))
        	print v
		test_metrics.append(v)
		other_counter +=1
		if other_counter == 6:
			other_counter=0
	return cvscores, test_metrics 
Example 53
Project: MODS_ConvNet   Author: santiagolopezg   File: test_lilfoo.py    MIT License 5 votes vote down vote up
def cv_calc():
#calculate mean and stdev for each metric, and append them to test_metrics file
	test_metrics.append(cvscores[0])

	other_counter = 0
	for metric in cvscores[1:]:
        	v = 'test {0}: {1:.4f} +/- {2:.4f}%'.format(cvscores[0][0][other_counter], np.mean(metric), np.std(metric))
        	print v
		test_metrics.append(v)
		other_counter +=1
		if other_counter == 7:
			other_counter=0
	return cvscores, test_metrics 
Example 54
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def mle_batch(data, batch, k):
    data = np.asarray(data, dtype=np.float32)
    batch = np.asarray(batch, dtype=np.float32)

    k = min(k, len(data)-1)
    f = lambda v: - k / np.sum(np.log(v/v[-1]))
    a = cdist(batch, data)
    a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1]
    a = np.apply_along_axis(f, axis=1, arr=a)
    return a

# mean distance of x to its k nearest neighbours 
Example 55
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def kmean_batch(data, batch, k):
    data = np.asarray(data, dtype=np.float32)
    batch = np.asarray(batch, dtype=np.float32)

    k = min(k, len(data)-1)
    f = lambda v: np.mean(v)
    a = cdist(batch, data)
    a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1]
    a = np.apply_along_axis(f, axis=1, arr=a)
    return a

# mean distance of x to its k nearest neighbours 
Example 56
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def help_generate_np_gives_adversarial_example(self, ord, eps=.5, **kwargs):
        x_val, x_adv, delta = self.generate_adversarial_examples_np(ord, eps,
                                                                    **kwargs)
        self.assertClose(delta, eps)
        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(orig_labs == new_labs) < 0.5) 
Example 57
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_targeted_generate_np_gives_adversarial_example(self):
        random_labs = np.random.random_integers(0, 1, 100)
        random_labs_one_hot = np.zeros((100, 2))
        random_labs_one_hot[np.arange(100), random_labs] = 1

        _, x_adv, delta = self.generate_adversarial_examples_np(
            eps=.5, ord=np.inf, y_target=random_labs_one_hot)

        self.assertClose(delta, 0.5)

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(random_labs == new_labs) > 0.7) 
Example 58
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_attack_strength(self):
        # This uses the existing input structure for SPSA. Tom tried for ~40
        # minutes to get generate_np to work correctly but could not.

        n_samples = 10
        x_val = np.random.rand(n_samples, 2)
        x_val = np.array(x_val, dtype=np.float32)

        # The SPSA attack currently uses non-one-hot labels
        # TODO: change this to use standard cleverhans label conventions
        feed_labs = np.random.randint(0, 2, n_samples)

        x_input = tf.placeholder(tf.float32, shape=(1,2))
        y_label = tf.placeholder(tf.int32, shape=(1,))

        x_adv_op = self.attack.generate(
            x_input, y=y_label,
            epsilon=.5, num_steps=100, batch_size=64, spsa_iters=1,
        )

        all_x_adv = []
        for i in range(n_samples):
            x_adv_np = self.sess.run(x_adv_op, feed_dict={
                            x_input: np.expand_dims(x_val[i], axis=0),
                             y_label: np.expand_dims(feed_labs[i], axis=0),
            })
            all_x_adv.append(x_adv_np[0])

        x_adv = np.vstack(all_x_adv)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(feed_labs == new_labs) < 0.1) 
Example 59
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_does_not_cache_graph_computation_for_nb_iter(self):

        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf,
                                        clip_min=-5.0, clip_max=5.0,
                                        nb_iter=10)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)

        ok = [False]
        old_grads = tf.gradients

        def fn(*x, **y):
            ok[0] = True
            return old_grads(*x, **y)
        tf.gradients = fn

        x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf,
                                        clip_min=-5.0, clip_max=5.0,
                                        nb_iter=11)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)

        tf.gradients = old_grads

        self.assertTrue(ok[0]) 
Example 60
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_untargeted_gives_adversarial_example(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, max_iterations=100,
                                        binary_search_steps=3,
                                        initial_const=1,
                                        clip_min=-5, clip_max=5,
                                        batch_size=10)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 61
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_high_confidence_targeted_examples(self):

        trivial_model = TrivialModel()

        for CONFIDENCE in [0, 2.3]:
            x_val = np.random.rand(10, 1) - .5
            x_val = np.array(x_val, dtype=np.float32)

            feed_labs = np.zeros((10, 2))
            feed_labs[np.arange(10), np.random.randint(0, 2, 10)] = 1
            attack = CarliniWagnerL2(trivial_model, sess=self.sess)
            x_adv = attack.generate_np(x_val,
                                       max_iterations=100,
                                       binary_search_steps=2,
                                       learning_rate=1e-2,
                                       initial_const=1,
                                       clip_min=-10, clip_max=10,
                                       confidence=CONFIDENCE,
                                       y_target=feed_labs,
                                       batch_size=10)

            new_labs = self.sess.run(trivial_model.get_logits(x_adv))

            good_labs = new_labs[np.arange(10), np.argmax(feed_labs, axis=1)]
            bad_labs = new_labs[np.arange(
                10), 1 - np.argmax(feed_labs, axis=1)]

            self.assertClose(CONFIDENCE, np.min(good_labs - bad_labs),
                             atol=1e-1)
            self.assertTrue(np.mean(np.argmax(new_labs, axis=1) ==
                                    np.argmax(feed_labs, axis=1)) > .9) 
Example 62
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_untargeted_gives_adversarial_example(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, max_iterations=100,
                                        binary_search_steps=3,
                                        initial_const=1,
                                        clip_min=-5, clip_max=5,
                                        batch_size=10)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 63
Project: b2ac   Author: hbldh   File: distance_functions.py    MIT License 4 votes vote down vote up
def distance(this, other):
    if isinstance(this, B2ACPoint):
        if isinstance(other, B2ACPoint):
            # Distance defined as distance between the Point objects.
            return np.linalg.norm(this.point - other.point)
        elif isinstance(other, B2ACEllipse):
            # Distance defined as distance from this point to center point of ellipse.
            return np.linalg.norm(this.point - other.center_point)
        elif isinstance(other, B2ACPolygon):
            # Distance defined as distance from this point to center point of polygon.
            return np.linalg.norm(this.point - np.mean(other.polygon_points, 0))
            # TODO: Better to evaluate distance from any polygon node point?
        else:
            raise ValueError("Cannot compare B2ACPoint to {0}.".format(type(other)))
    elif isinstance(this, B2ACEllipse):
        if isinstance(other, B2ACPoint):
            # Distance defined as distance from this point to center point of ellipse.
            return np.linalg.norm(this.center_point - other.point)
        elif isinstance(other, B2ACEllipse):
            # Distance defined as distance between center points of ellipses.
            return np.linalg.norm(this.center_point - other.center_point)
        elif isinstance(other, B2ACPolygon):
            # Distance defined as distance from ellipse center point and center of polygon.
            return np.linalg.norm(this.center_point - np.mean(other.polygon_points, 0))
            # return np.min(np.sqrt(((other.polygon_points - this.center_point) ** 2).sum(1)))
        else:
            raise ValueError("Cannot compare B2ACEllipse to {0}.".format(type(other)))
    elif isinstance(this, B2ACPolygon):
        if isinstance(other, B2ACPoint):
            # Distance defined as distance from the other point to center point of polygon.
            return np.linalg.norm(other.point - np.mean(this.polygon_points, 0))

            # TODO: Redefine to minimal distance from center point or any polygon node?
            # p1 = np.min(np.sqrt(((this.polygon_points - other.point) ** 2).sum(1)))
            # p2 = np.linalg.norm(np.mean(this.polygon_points, 0) - other.point)
            # return np.min([p1, p2])

        elif isinstance(other, B2ACEllipse):
            # Distance defined as distance from the center point of the other ellipse
            # to center point of polygon.
            return np.linalg.norm(other.center_point - np.mean(this.polygon_points, 0))

            # TODO: Redefine to minimal distance from center point or any polygon node?
            # p1 = np.min(np.sqrt(((this.polygon_points - other.center_point) ** 2).sum(1)))
            # p2 = np.linalg.norm(np.mean(this.polygon_points, 0) - other.center_point)
            # return np.min([p1, p2])

        elif isinstance(other, B2ACPolygon):
            # Distance defined as distances between center points of polygons.
            return np.linalg.norm(np.mean(this.polygon_points, 0) - np.mean(other.polygon_points, 0))

            # TODO: Redefine to minimal distance from center points or any polygon nodes?
            # p1 = np.min([np.min(np.sqrt(((v - other.polygon_points) ** 2).sum(1))) for v in this.polygon_points])
            # p2 = np.min(np.sqrt(((this.polygon_points - np.mean(other.polygon_points, 0)) ** 2).sum(1)))
            # p3 = np.min(np.sqrt(((other.polygon_points - np.mean(this.polygon_points, 0)) ** 2).sum(1)))
            # return np.min([p1, p2, p3])
        else:
            raise ValueError("Cannot compare B2ACPolygon to {0}.".format(type(other)))
    else:
        raise ValueError("Cannot call this method with a {0}.".format(type(this))) 
Example 64
Project: sfcc   Author: kv-kunalvyas   File: auxiliary.py    MIT License 4 votes vote down vote up
def initialise_test(dates):
    if not dates:
        data_frame = pd.read_csv('../data/test.csv', header=0)
    elif dates:
        data_frame = pd.read_csv('../data/test.csv', header=0, parse_dates=['Dates'])
        data_frame['Year'] = data_frame['Dates'].map(lambda x: x.year)
        data_frame['Month'] = data_frame['Dates'].map(lambda x: x.month)
        data_frame['Week'] = data_frame['Dates'].map(lambda x: x.week)
        data_frame['Hour'] = data_frame['Dates'].map(lambda x: x.hour)

    # Change string categories to integer classifiers
    PdDistricts = list(enumerate(sorted(np.unique(data_frame['PdDistrict']))))
    DaysOfWeeks = list(enumerate(sorted(np.unique(data_frame['DayOfWeek']))))
    PdDistrictsDict = {name: i for i, name in PdDistricts}
    DaysOfWeeksDict = {name: i for i, name in DaysOfWeeks}
    data_frame.PdDistrict = data_frame.PdDistrict.map(lambda x: PdDistrictsDict[x]).astype(int)
    data_frame.DayOfWeek = data_frame.DayOfWeek.map(lambda x: DaysOfWeeksDict[x]).astype(int)

    xy_scaler = pp.StandardScaler()
    xy_scaler.fit(data_frame[["X", "Y"]])
    data_frame[["X", "Y"]] = xy_scaler.transform(data_frame[["X", "Y"]])
    data_frame["rot45_X"] = .707 * data_frame["Y"] + .707 * data_frame["X"]
    data_frame["rot45_Y"] = .707 * data_frame["Y"] - .707 * data_frame["X"]
    data_frame["rot30_X"] = (1.732 / 2) * data_frame["X"] + (1. / 2) * data_frame["Y"]
    data_frame["rot30_Y"] = (1.732 / 2) * data_frame["Y"] - (1. / 2) * data_frame["X"]
    data_frame["rot60_X"] = (1. / 2) * data_frame["X"] + (1.732 / 2) * data_frame["Y"]
    data_frame["rot60_Y"] = (1. / 2) * data_frame["Y"] - (1.732 / 2) * data_frame["X"]
    data_frame["radial_r"] = np.sqrt(np.power(data_frame["Y"], 2) + np.power(data_frame["X"], 2))

    # rounding off location coordinates to 2 decimal points
    data_frame.X = data_frame.X.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.Y = data_frame.Y.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot45_X = data_frame.rot45_X.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot45_Y = data_frame.rot45_Y.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot30_X = data_frame.rot30_X.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot30_Y = data_frame.rot30_Y.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot60_X = data_frame.rot60_X.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot60_Y = data_frame.rot60_Y.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.radial_r = data_frame.radial_r.map(lambda x: "%.2f" % round(x, 2)).astype(float)

    return data_frame


# TODO: Fill missing values if any
# Compute mean of a column and fill missing values 
Example 65
Project: autodmri   Author: samuelstjean   File: gamma.py    MIT License 4 votes vote down vote up
def get_noise_distribution(data, method='moments'):
    '''Computes sigma and N from an array of gamma distributed data

    input
    -----
    data
        A numpy array of gamma distributed values
    method='moments' or method='maxlk'
        Use either the moments or maximum likelihood equations to estimate the parameters.

    output
    ------
    sigma, N
        parameters related to the original Gaussian noise distribution
    '''

    data = data[data > 0]

    # If we have no voxel or only the same value
    # it leads to a divide by 0 as an edge case
    if data.size == 0 or np.std(data) == 0:
        return 0, 0

    # First get sigma
    if method == 'moments':
        mdata2 = np.mean(data**2)
        mdata4 = np.mean(data**4)

        p1 = mdata4 / mdata2
        p2 = mdata2
        sigma = np.sqrt(p1 - p2) / np.sqrt(2)
    elif method == 'maxlk':
            sigma = maxlk_sigma(data)
    else:
        raise ValueError('Invalid method name {}'.format(method))

    t = data**2 / (2*sigma**2)

    # Now compute N
    if method == 'moments':
        N = np.mean(t)
    elif method == 'maxlk':
            y = np.mean(np.log(t))
            N = inv_digamma(y)
    else:
        raise ValueError('Invalid method name {}'.format(method))

    return sigma, N 
Example 66
Project: building-boundary   Author: Geodan   File: segment.py    MIT License 4 votes vote down vote up
def fit_line(self, max_error=None):
        """
        Fit a line to the set of points of the object.

        Parameters
        ----------
        max_error : float or int
            The maximum error (max distance points to line) the
            fitted line is allowed to have. A ThresholdError will be
            raised if this max error is exceeded.

        Raises
        ------
        ThresholdError
            If the error of the fitted line (max distance points to
            line) exceeds the given max error.
        """
        if len(self.points) == 1:
            raise ValueError('Not enough points to fit a line.')
        elif len(self.points) == 2:
            dx, dy = np.diff(self.points, axis=0)[0]
            if dx == 0:
                self.a = 0
            else:
                self.a = dy / dx
            self.b = -1
            self.c = (np.mean(self.points[:, 1]) -
                      np.mean(self.points[:, 0]) * self.a)
        elif all(self.points[0, 0] == self.points[:, 0]):
            self.a = 1
            self.b = 0
            self.c = -self.points[0, 0]
        elif all(self.points[0, 1] == self.points[:, 1]):
            self.a = 0
            self.b = 1
            self.c = -self.points[0, 1]
        else:
            _, eigenvectors = PCA(self.points)
            self.a = eigenvectors[1, 0] / eigenvectors[0, 0]
            self.b = -1
            self.c = (np.mean(self.points[:, 1]) -
                      np.mean(self.points[:, 0]) * self.a)

            if max_error is not None:
                error = self.error()
                if error > max_error:
                    raise utils.error.ThresholdError(
                        "Could not fit a proper line. Error: {}".format(error)
                    )

        self._create_line_segment() 
Example 67
Project: building-boundary   Author: Geodan   File: segment.py    MIT License 4 votes vote down vote up
def regularize(self, orientation, max_error=None):
        """
        Recreates the line segment based on the given orientation.

        Parameters
        ----------
        orientation : float or int
            The orientation the line segment should have. In radians from
            0 to pi (east to west counterclockwise) and
            0 to -pi (east to west clockwise).
        max_error : float or int
            The maximum error (max distance points to line) the
            fitted line is allowed to have. A ThresholdError will be
            raised if this max error is exceeded.

        Raises
        ------
        ThresholdError
            If the error of the fitted line (max distance points to
            line) exceeds the given max error.

        .. [1] https://math.stackexchange.com/questions/1377716/how-to-find-a-least-squares-line-with-a-known-slope  # noqa
        """
        prev_a = self.a
        prev_b = self.b
        prev_c = self.c

        if not np.isclose(orientation, self.orientation):
            if np.isclose(abs(orientation), math.pi / 2):
                self.a = 1
                self.b = 0
                self.c = np.mean(self.points[:, 0])
            elif (np.isclose(abs(orientation), math.pi) or
                    np.isclose(orientation, 0)):
                self.a = 0
                self.b = 1
                self.c = np.mean(self.points[:, 1])
            else:
                self.a = math.tan(orientation)
                self.b = -1
                self.c = (sum(self.points[:, 1] - self.a * self.points[:, 0]) /
                          len(self.points))

            if max_error is not None:
                error = self.error()
                if error > max_error:
                    self.a = prev_a
                    self.b = prev_b
                    self.c = prev_c
                    raise utils.error.ThresholdError(
                        "Could not fit a proper line. Error: {}".format(error)
                    )

            self._create_line_segment() 
Example 68
Project: mtrl-auto-uav   Author: brunapearson   File: test_mtrl.py    MIT License 4 votes vote down vote up
def interventions_counter(client,depth_img,uav_size,pred_pos,yaw,behaviour,smoothness_x,smoothness_y,smoothness_z):
    global n_intervention

    current_pos = client.simGetGroundTruthKinematics().position

    if behaviour=="search":
        # check change in position to maximize exploration in x direction
        if int(pred_pos[0])<0:
            p_x = int(abs(pred_pos[0])-abs(current_pos.x_val)*(-2))
        else:
            p_x = int(abs(pred_pos[0])+abs(current_pos.x_val))
        # check change in position to maximize exploration in y direction
        if int(pred_pos[1])<0:
            p_y = int(abs(pred_pos[1])-abs(current_pos.y_val)*(-2))
        else:
            p_y = int(abs(pred_pos[1])+abs(current_pos.y_val))

    if behaviour=="flight":
        p_x = ((current_pos.x_val - pred_pos[0])*(smoothness_x))+current_pos.x_val
        p_y = ((current_pos.y_val - pred_pos[1])*(smoothness_y))+current_pos.y_val

    p_z = ((current_pos.z_val-pred_pos[2])*smoothness_z)+current_pos.z_val #snowy

    # control max_height and min_height
    if p_z < -150:
        p_z=(-30)
    elif p_z==0 or p_z > (-2):
        p_z=(-4)

    hfov=radians(120)#90
    coll_thres=3 #3 forest
    intervention_thres = 50

    [h,w] = np.shape(depth_img)
    [roi_h,roi_w] = compute_bb((h,w), uav_size, hfov, coll_thres)

    img2d_box = depth_img[int((h-roi_h)/2):int((h+roi_h)/2),int((w-roi_w)/2):int((w+roi_w)/2)]

    if (int(np.mean(img2d_box)) < intervention_thres):
        n_intervention += 1
        client.moveToPositionAsync(-30, p_y, p_z,5,drivetrain = airsim.DrivetrainType.ForwardOnly,lookahead=-1,adaptive_lookahead=1, yaw_mode = airsim.YawMode(is_rate = False, yaw_or_rate = yaw))
        time.sleep(1)
    else:
        client.moveToPositionAsync(p_x, p_y, p_z,5,drivetrain = airsim.DrivetrainType.ForwardOnly,lookahead=-1,adaptive_lookahead=1, yaw_mode = airsim.YawMode(is_rate = False, yaw_or_rate = yaw))
        time.sleep(1) 
Example 69
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: pascal_voc.py    MIT License 4 votes vote down vote up
def _do_python_eval(self, output_dir='output'):
    annopath = os.path.join(
      self._devkit_path,
      'VOC' + self._year,
      'Annotations',
      '{:s}.xml')
    imagesetfile = os.path.join(
      self._devkit_path,
      'VOC' + self._year,
      'ImageSets',
      'Main',
      self._image_set + '.txt')
    cachedir = os.path.join(self._devkit_path, 'annotations_cache')
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(self._year) < 2010 else False
    print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
    if not os.path.isdir(output_dir):
      os.mkdir(output_dir)
    for i, cls in enumerate(self._classes):
      if cls == '__background__':
        continue
      filename = self._get_voc_results_file_template().format(cls)
      rec, prec, ap = voc_eval(
        filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
        use_07_metric=use_07_metric, use_diff=self.config['use_diff'])
      aps += [ap]
      print(('AP for {} = {:.4f}'.format(cls, ap)))
      with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
        pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
    print(('Mean AP = {:.4f}'.format(np.mean(aps))))
    print('~~~~~~~~')
    print('Results:')
    for ap in aps:
      print(('{:.3f}'.format(ap)))
    print(('{:.3f}'.format(np.mean(aps))))
    print('~~~~~~~~')
    print('')
    print('--------------------------------------------------------------')
    print('Results computed with the **unofficial** Python eval code.')
    print('Results should be very close to the official MATLAB eval code.')
    print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
    print('-- Thanks, The Management')
    print('--------------------------------------------------------------') 
Example 70
Project: FRIDA   Author: LCAV   File: bands_selection.py    MIT License 4 votes vote down vote up
def select_bands(samples, freq_range, fs, nfft, win, n_bands, div=1):
    '''
    Selects the bins with most energy in a frequency range.

    It is possible to specify a div factor. Then the range is subdivided
    into div equal subbands and n_bands / div per subband are selected.
    '''

    if win is not None and isinstance(win, bool):
        if win:
            win = np.hanning(nfft)
        else:
            win = None

    # Read the signals in a single array
    sig = [wavfile.read(s)[1] for s in samples]
    L = max([s.shape[0] for s in sig])
    signals = np.zeros((L,len(samples)), dtype=np.float32)
    for i in range(signals.shape[1]):
        signals[:sig[i].shape[0],i] = sig[i] / np.std(sig[i][sig[i] > 1e-2])

    sum_sig = np.sum(signals, axis=1)

    sum_STFT = pra.stft(sum_sig, nfft, nfft, win=win, transform=rfft).T
    sum_STFT_avg = np.mean(np.abs(sum_STFT)**2, axis=1)

    # Do some band selection
    bnds = np.linspace(freq_range[0], freq_range[1], div+1)

    freq_hz = np.zeros(n_bands)
    freq_bins = np.zeros(n_bands, dtype=int)

    nsb = n_bands // div

    for i in range(div):

        bl = int(bnds[i] / fs * nfft)
        bh = int(bnds[i+1] / fs * nfft)

        k = np.argsort(sum_STFT_avg[bl:bh])[-nsb:]

        freq_hz[nsb*i:nsb*(i+1)] = (bl + k) / nfft * fs
        freq_bins[nsb*i:nsb*(i+1)] = k + bl

    freq_hz = freq_hz[:n_bands]

    return np.unique(freq_hz), np.unique(freq_bins) 
Example 71
Project: FRIDA   Author: LCAV   File: utils.py    MIT License 4 votes vote down vote up
def polar_distance(x1, x2):
    """
    Given two arrays of numbers x1 and x2, pairs the cells that are the
    closest and provides the pairing matrix index: x1(index(1,:)) should be as
    close as possible to x2(index(2,:)). The function outputs the average of the
    absolute value of the differences abs(x1(index(1,:))-x2(index(2,:))).
    :param x1: vector 1
    :param x2: vector 2
    :return: d: minimum distance between d
             index: the permutation matrix
    """
    x1 = np.reshape(x1, (1, -1), order='F')
    x2 = np.reshape(x2, (1, -1), order='F')
    N1 = x1.size
    N2 = x2.size
    diffmat = np.arccos(np.cos(x1 - np.reshape(x2, (-1, 1), order='F')))
    min_N1_N2 = np.min([N1, N2])
    index = np.zeros((min_N1_N2, 2), dtype=int)
    if min_N1_N2 > 1:
        for k in xrange(min_N1_N2):
            d2 = np.min(diffmat, axis=0)
            index2 = np.argmin(diffmat, axis=0)
            index1 = np.argmin(d2)
            index2 = index2[index1]
            index[k, :] = [index1, index2]
            diffmat[index2, :] = float('inf')
            diffmat[:, index1] = float('inf')
        d = np.mean(np.arccos(np.cos(x1[:, index[:, 0]] - x2[:, index[:, 1]])))
    else:
        d = np.min(diffmat)
        index = np.argmin(diffmat)
        if N1 == 1:
            index = np.array([1, index])
        else:
            index = np.array([index, 1])

    # sort to keep the order of the first vector
    if min_N1_N2 > 1:
        perm = np.argsort(index[:,0])
        index = index[perm,:]

    return d, index 
Example 72
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: misc.py    MIT License 4 votes vote down vote up
def profile(self, net):
    pass
#     data = self.parse(exclusive = True)
#     size = len(data); batch = self.FLAGS.batch
#     all_inp_ = [x[0] for x in data]
#     net.say('Will cycle through {} examples {} times'.format(
#         len(all_inp_), net.FLAGS.epoch))

#     fetch = list(); mvave = list(); names = list();
#     this = net.top
#     conv_lay = ['convolutional', 'connected', 'local', 'conv-select']
#     while this.inp is not None:
#         if this.lay.type in conv_lay:
#             fetch = [this.out] + fetch
#             names = [this.lay.signature] + names
#             mvave = [None] + mvave 
#         this = this.inp
#     print(names)

#     total = int(); allofthem = len(all_inp_) * net.FLAGS.epoch
#     batch = min(net.FLAGS.batch, len(all_inp_))
#     for count in range(net.FLAGS.epoch):
#         net.say('EPOCH {}'.format(count))
#         for j in range(len(all_inp_)/batch):
#             inp_feed = list(); new_all = list()
#             all_inp = all_inp_[j*batch: (j*batch+batch)]
#             for inp in all_inp:
#                 new_all += [inp]
#                 this_inp = os.path.join(net.FLAGS.dataset, inp)
#                 this_inp = net.framework.preprocess(this_inp)
#                 expanded = np.expand_dims(this_inp, 0)
#                 inp_feed.append(expanded)
#             all_inp = new_all
#             feed_dict = {net.inp : np.concatenate(inp_feed, 0)}
#             out = net.sess.run(fetch, feed_dict)

#             for i, o in enumerate(out):
#                 oi = out[i];
#                 dim = len(oi.shape) - 1
#                 ai = mvave[i]; 
#                 mi = np.mean(oi, tuple(range(dim)))
#                 vi = np.var(oi, tuple(range(dim)))
#                 if ai is None: mvave[i] = [mi, vi]
#                 elif 'banana ninja yada yada':
#                     ai[0] = (1 - _MVA) * ai[0] + _MVA * mi
#                     ai[1] = (1 - _MVA) * ai[1] + _MVA * vi
#             total += len(inp_feed)
#             net.say('{} / {} = {}%'.format(
#                 total, allofthem, 100. * total / allofthem))

#         with open('profile', 'wb') as f:
#             pickle.dump([mvave], f, protocol = -1) 
Example 73
Project: em-prior-adjust   Author: aesuli   File: em_test.py    GNU General Public License v3.0 4 votes vote down vote up
def em_experiment(ax, clf, X_tr, y_tr, X_te, y_te, y_min=0, y_max=1.0):
    mlb = MultiLabelBinarizer()
    mlb.fit(np.expand_dims(np.hstack((y_tr, y_te)), 1))
    y_tr_bin = mlb.transform(np.expand_dims(y_tr, 1))
    y_te_bin = mlb.transform(np.expand_dims(y_te, 1))
    train_priors = np.mean(y_tr_bin, 0)
    test_priors = np.mean(y_te_bin, 0)

    print("Fitting", clf)

    clf.fit(X_tr, y_tr)
    test_posteriors = clf.predict_proba(X_te)
    posteriors_test_priors = np.mean(test_posteriors, axis=0)

    print('train priors', train_priors, sep='\n')
    print('test priors', test_priors, sep='\n')
    print('posteriors mean', posteriors_test_priors, sep='\n')
    print()

    em_test_posteriors, em_test_priors, history = em(y_te, test_posteriors, train_priors)

    em_prior = [p[1] for _, p, _, _, _, _ in history]
    accuracy = [a for _, _, _, a, _, _ in history]
    f1 = [2 * p * r / (p + r) if p + r > 0 else 0 for _, _, _, _, p, r in history]
    ax.set_ylim([y_min, y_max])
    ax.plot(range(len(accuracy)), accuracy, linestyle='-.', color='m', label='accuracy')
    ax.plot(range(len(f1)), f1, linestyle='--', color='#dd9f00', label='f1')
    ax.plot(range(len(em_prior)), em_prior, color='b', label='em pr')
    ax.hlines([train_priors[1]], 0, len(em_prior) - 1, colors=['r'], linestyles=[':'], label='train pr')
    ax.hlines([posteriors_test_priors[1]], 0, len(em_prior) - 1, colors=['#b5651d'], linestyles=['-.'], label='clf pr')
    ax.hlines([test_priors[1]], 0, len(em_prior) - 1, colors=['g'], linestyles=['--'], label='test pr')

    ax.set()
    ax.grid()

    print('Results')
    print('prior from:   train test  post  em')
    for i, (a, b, c, d) in enumerate(
            zip(train_priors, test_priors, posteriors_test_priors, em_test_priors)):
        print(f'{i:11d} - {a:3.3f} {b:3.3f} {c:3.3f} {d:3.3f}')

    return posteriors_test_priors[1], em_test_priors[1], accuracy[0], accuracy[-1], f1[0], f1[-1] 
Example 74
Project: em-prior-adjust   Author: aesuli   File: em.py    GNU General Public License v3.0 4 votes vote down vote up
def em(y, posteriors_zero, priors_zero, epsilon=1e-6, positive_class=1):
    """
    Implements the prior correction method based on EM presented in:
    "Adjusting the Outputs of a Classifier to New a Priori Probabilities: A Simple Procedure"
    Saerens, Latinne and Decaestecker, 2002
    http://www.isys.ucl.ac.be/staff/marco/Publications/Saerens2002a.pdf

    :param y: true labels of test items, to measure accuracy, precision and recall.
    :param posteriors_zero: posterior probabilities on test items, as returned by a classifier. A 2D-array with shape
    Ø(items, classes).
    :param priors_zero: prior probabilities measured on training set.
    :param epsilon: stopping threshold.
    :param positive_class: class index to be considered the positive one, for precision and recall.
    :return: posteriors_s, priors_s, history: final adjusted posteriors, final adjusted priors, a list of length s
    where each element is a tuple with the step counter, the current priors (as list), the stopping criterium value,
    accuracy, precision and recall.
    """
    s = 0
    priors_s = np.copy(priors_zero)
    posteriors_s = np.copy(posteriors_zero)
    val = 2 * epsilon
    history = list()
    acc = np.mean((y == positive_class) == (posteriors_zero[:, positive_class] > 0.5))
    rec = np.sum(np.logical_and((y == positive_class), (posteriors_zero[:, positive_class] > 0.5))) / np.sum(
        y == positive_class)
    prec = np.sum(np.logical_and((y == positive_class), (posteriors_zero[:, positive_class] > 0.5))) / np.sum(
        posteriors_zero[:, positive_class] > 0.5)
    history.append((s, list(priors_s), 1, acc, prec, rec))
    while not val < epsilon:
        # E step
        ratios = priors_s / priors_zero
        denominators = 0
        for c in range(priors_zero.shape[0]):
            denominators += ratios[c] * posteriors_zero[:, c]
        for c in range(priors_zero.shape[0]):
            posteriors_s[:, c] = ratios[c] * posteriors_zero[:, c] / denominators

        acc = np.mean((y == positive_class) == (posteriors_s[:, positive_class] > 0.5))
        rec = np.sum(np.logical_and((y == positive_class), (posteriors_s[:, positive_class] > 0.5))) / np.sum(
            y == positive_class)
        prec = np.sum(np.logical_and((y == positive_class), (posteriors_s[:, positive_class] > 0.5))) / np.sum(
            posteriors_s[:, positive_class] > 0.5)
        priors_s_minus_one = priors_s.copy()

        # M step
        priors_s = posteriors_s.mean(0)

        # check for stop
        val = 0
        for i in range(len(priors_s_minus_one)):
            val += abs(priors_s_minus_one[i] - priors_s[i])
        s += 1
        history.append((s, list(priors_s), val, acc, prec, rec))

    return posteriors_s, priors_s, history 
Example 75
Project: kuaa   Author: rafaelwerneck   File: plugin_zscore.py    GNU General Public License v3.0 4 votes vote down vote up
def normalize(img_path, images, images_set, pos_train_test, parameters, method,
        train_param):
    """
    Function that performs the normalization of a feature vector.
    
    Calculates the z-score of each position in the feature vector, relative to
    the sample mean and standard deviation of that position in all feature
    vectors.
    """

    print "Normalizer: ZSCORE"
    
    #Get the list of classes and the feature vector of the img_path
    img_classes = images[img_path][POS_CLASSES]
    try:
        img_fv = images[img_path][POS_FV][pos_train_test]
    except:
        img_fv = images[img_path][POS_FV][0]

    print "\tFeature vector of image", img_path, \
          "being normalized by process", os.getpid()

    # Performs the normalization ---------------------------------------------
    #If the parameters of normalization don't exists, calculate the mean and
    #   the standard deviation of the feature vectors in the train set
    if 'Mean' not in train_param:
        list_train = []
        for image in images_set:
            try:
                list_train.append(images[image][POS_FV][pos_train_test])
            except:
                list_train.append(images[image][POS_FV][ZERO_INDEX])
        
        mean_list = numpy.mean(list_train, axis=0)
        std_list = numpy.std(list_train, axis=0)
        
        train_param['Mean'] = mean_list
        train_param['Deviation'] = std_list
    #If the parameters of normalization already exists, load them
    else:
        print "\t\tGet Mean and Standard Deviation"
        mean_list = train_param['Mean']
        std_list = train_param['Deviation']
    
    fv_norm = [(img_fv[index] - mean_list[index]) / std_list[index]
            for index in range(len(img_fv))]
    fv_norm = [fv_item for fv_item in fv_norm if not numpy.isnan(fv_item)]
    #-------------------------------------------------------------------------

    return img_path, len(img_classes), img_classes, fv_norm, train_param 
Example 76
Project: kuaa   Author: rafaelwerneck   File: plugin_normalized_accuracy_score.py    GNU General Public License v3.0 4 votes vote down vote up
def evaluation(images, test_set, classes_list, pos_train_test, parameters):
    """
    Performs the calculation of the normalized accuracy score.

    Calculates the accuracy score using the entries of test set and save
    the result in a string to be returned by the function.
    """
    
    print "\tOutput: NORMALIZED ACCURACY SCORE"

    # Get parameters

    # Evaluation method
    #-------------------------------------------------------------------------
    list_class = []
    result_class = []

    for img_test in test_set:
        list_class.append(images[img_test][POS_CLASSES][INDEX_ZERO])
        classes_proba = images[img_test][POS_PREDICT][pos_train_test]
        img_predict_index = 0
        for i in range(len(classes_list)):
            if classes_proba[i] > classes_proba[img_predict_index]:
                img_predict_index = i
        result_class.append(classes_list[img_predict_index])

    # Calculates the accuracy score
    known_data = [(predict, test) for predict, test in izip(result_class, list_class) if test != None]
    unknown_data = [(predict, test) for predict, test in izip(result_class, list_class) if test == None]
    
    known_accuracy = sum([predict == test for predict, test in known_data]) / float(len(known_data))
    unknown_accuracy = sum([predict == test for predict, test in unknown_data]) / float(len(unknown_data)) if len(unknown_data) > 0 else 1.0
    
    accuracy_per_class = [numpy.mean([predict == test for predict, test in izip(result_class, list_class) if test == item_class]) for item_class in classes_list]
    global_na = float(numpy.mean(accuracy_per_class))
    
    known_classes = classes_list[:]
    known_classes.remove(None)
    naks = [numpy.mean([predict == test for predict, test in izip(result_class, list_class) if test == item_class]) for item_class in known_classes]
    naks = float(numpy.mean(naks))
    
    returns = {
            'Accuracy of Known Samples': [known_accuracy],
            'Accuracy of Unknown Samples': [unknown_accuracy],
            'Normalized Accuracy': [(known_accuracy + unknown_accuracy) / 2],
            'Global Normalized Accuracy': [global_na],
            'Normalized Accuracy of Known Samples': [naks]
            }
    #-------------------------------------------------------------------------
    print "\tSuccess in the calculation of the accuracy score"

    # Delete variables not used
    del list_class
    del result_class

    return returns 
Example 77
Project: kuaa   Author: rafaelwerneck   File: plugin_normalized_accuracy_score.py    GNU General Public License v3.0 4 votes vote down vote up
def write_tex(evaluation_path, classes, node_id):
    """
    Calculates the average normalized accuracy score from the accuracy scores in the
    evaluation_path.
    """
    
    from numpy import array
    from scipy import stats
    from math import sqrt
    
    print "\t\tTeX: Normalized Accuracy Score"
    
    na_dicts = []
    
    evaluation_file = open(evaluation_path, "rb")
    for line in evaluation_file.readlines():
        na_dicts.append(literal_eval(line))
    evaluation_file.close()
    
    avg_na = {}
    for evaluation in na_dicts[INDEX_ZERO].iterkeys():
        na_list = []
        for na in na_dicts:
            na_list.append(na[evaluation][INDEX_ZERO])
        avg_na[evaluation] = numpy.array(na_list).mean()
    
    evaluation_file = open(evaluation_path, "ab")
    for evaluation, item_na in avg_na.iteritems():
        evaluation_file.write("\n" + str(evaluation) + "\n")
        evaluation_file.write(str(item_na))
    evaluation_file.close()
    
    tex_string = """
\\begin{table}[htbp]
    \\centering
    \\begin{tabular}{cc}
        & Accuracy \\\\
        \\hline"""
    
    for evaluation, value in avg_na.iteritems():
        tex_string += """
        %s & %.2f \\\\""" % (evaluation, value * 100)
    
    tex_string += """
    \\end{tabular}
    \\caption{Normalized Accuracy Score of Node %s}
    \\label{tab:acc_%s}
\\end{table}
    """ % (node_id, node_id)
    
    return tex_string 
Example 78
Project: kuaa   Author: rafaelwerneck   File: plugin_f_measure.py    GNU General Public License v3.0 4 votes vote down vote up
def write_tex(evaluation_path, classes, node_id):
    """
    Calculates an average number of f-measure in the experiment.
    """
    
    #Constants
    INDEX_ZERO = 0
    
    print "\t\tTeX: F-Measure"
    
    f_measure_dicts = []
    
    evaluation_file = open(evaluation_path, "rb")
    for line in evaluation_file.readlines():
        f_measure_dicts.append(literal_eval(line))
    evaluation_file.close()
    
    avg_f_measure = {}
    for evaluation in f_measure_dicts[INDEX_ZERO].iterkeys():
        f_measure_list = []
        for f_measure in f_measure_dicts:
            f_measure_list.append(f_measure[evaluation][INDEX_ZERO])
        avg_f_measure[evaluation] = numpy.array(f_measure_list).mean()
    
    evaluation_file = open(evaluation_path, "ab")
    evaluation_file.write("\nAverage F-Measure\n")
    evaluation_file.write(str(avg_f_measure))
    evaluation_file.close()
    
    tex_string = """
\\begin{table}[htbp]
    \\centering
    \\begin{tabular}{cc}
        & Mean of F-Measure \\\\
        \\hline"""
    
    for evaluation, f_measure in avg_f_measure.iteritems():
        tex_string += """
        %s & %.2f \\\\""" % (evaluation, f_measure * 100)
                
    tex_string += """
    \\end{tabular}
    \\caption{Mean F-Measure Evaluation of Node %s}
    \\label{tab:f_measure_%s}
\\end{table}
    """ % (node_id, node_id)
    
    return tex_string 
Example 79
Project: kuaa   Author: rafaelwerneck   File: plugin_global_accuracy_score.py    GNU General Public License v3.0 4 votes vote down vote up
def evaluation(images, test_set, classes_list, pos_train_test, parameters):
    """
    Performs the calculation of the global accuracy score.

    Calculates the accuracy score using the entries of test set and save
    the result in a string to be returned by the function.
    """

    # CONSTANTS
    POS_CLASSES = 0
    POS_PREDICT = 1

    print "\tOutput: GLOBAL ACCURACY SCORE"

    # Get parameters

    # Output methods
    #-------------------------------------------------------------------------
    list_class = []
    result_class = []

    for img_test in test_set:
        list_class.append(str(images[img_test][POS_CLASSES][0]))
        classes_proba = images[img_test][POS_PREDICT][pos_train_test]
        img_predict_index = 0
        for i in range(len(classes_list)):
            if classes_proba[i] > classes_proba[img_predict_index]:
                img_predict_index = i
        result_class.append(str(classes_list[img_predict_index]))

    # Calculates the accuracy score
    accuracy = metrics.accuracy_score(list_class, result_class)
    
    # Calculates the normalized global accuracy score
    accuracy_per_class = []
    for item_class in classes_list:
        accuracy_class = [predict == test for predict, test in izip(result_class, list_class) if test == item_class]
        if len(accuracy_class):
            accuracy_per_class.append(numpy.mean(accuracy_class))
    global_na = float(numpy.mean(accuracy_per_class))
    #-------------------------------------------------------------------------
    print "\tSuccess in the calculation of the global accuracy score"

    # Delete variables not used
    del list_class
    del result_class

    return {"Global Accuracy": accuracy, "Normalized Global Accuracy": global_na} 
Example 80
Project: DataComp   Author: Cojabi   File: utils.py    Apache License 2.0 4 votes vote down vote up
def _create_result_table(result, p_val_col, p_trans, mean_confs, prop_confs, conf_invs, counts):
    """
    Builds the dataframe displaying the results.
    The first three arguments are used to handle the p-values and match them back to the corresponding dataset and \
    features, because they will be ordered during multiple testing correction.

    :param result:
    :param p_val_col: Stores the uncorrected p_values
    :param p_trans: Stores uncorrected p_values, the dataset combination which was tested and the corresponding \
    feature name
    :param conf_invs: DataFrame storing the 95% confidence interval per dataset, per feature.
    :param counts: DataFrame storing the number of observations per dataset, per feature.
    :return: Result table listing all important outcomes of the statistical comparison
    """
    # store test results
    result_table = pd.DataFrame(p_val_col)

    # rename columns and set values in result dataframe
    result_table.rename(columns={"0": "p-value"}, inplace=True)

    # insert corrected p_values
    if result:
        result_table["cor_p-value"] = result[1]
        result_table["signf"] = result[0]
    elif result is None:
        result_table["cor_p-value"] = np.nan
        result_table["signf"] = np.nan

    # combine p_value information with dataset and feature information stored in p_trans
    result_table = pd.concat([result_table, p_trans], axis=1, join="outer")

    # create multi index from feature name (result_table[2]) and datasets (result_table[1])
    result_table.index = result_table[[2, 1]]
    result_table.index = pd.MultiIndex.from_tuples(result_table.index)
    result_table.drop([0, 1, 2], axis=1, inplace=True)

    # name index levels
    result_table.index.levels[0].name = "features"
    result_table.index.levels[1].name = "datasets"

    # prepare confidence interval for mean difference
    mean_confs = _convert_multindex(mean_confs, {0: "mean_diff", 1: "mean_flag"})
    prop_confs = _convert_multindex(prop_confs, {0: "prop_diff", 1: "prop_flag"})

    # join with mean difference confidence intervals dataframe
    result_table = result_table.join(mean_confs, how="outer")
    # join with mean difference confidence intervals dataframe
    result_table = result_table.join(prop_confs, how="outer")
    # join with actual mean confidence intervals dataframe
    result_table = result_table.join(conf_invs, how="outer")
    # join with counts dataframe to display number of datapoint for each comparison
    result_table = result_table.join(counts, how="outer")

    return result_table.sort_index()