Python numpy.bincount() Examples

The following are 30 code examples for showing how to use numpy.bincount(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: xrft   Author: xgcm   File: xrft.py    License: MIT License 6 votes vote down vote up
def _radial_wvnum(k, l, N, nfactor):
    """ Creates a radial wavenumber based on two horizontal wavenumbers
    along with the appropriate index map
    """

    # compute target wavenumbers
    k = k.values
    l = l.values
    K = np.sqrt(k[np.newaxis,:]**2 + l[:,np.newaxis]**2)
    nbins = int(N/nfactor)
    if k.max() > l.max():
        ki = np.linspace(0., l.max(), nbins)
    else:
        ki = np.linspace(0., k.max(), nbins)

    # compute bin index
    kidx = np.digitize(np.ravel(K), ki)
    # compute number of points for each wavenumber
    area = np.bincount(kidx)
    # compute the average radial wavenumber for each bin
    kr = (np.bincount(kidx, weights=K.ravel())
          / np.ma.masked_where(area==0, area))

    return ki, kr[1:-1] 
Example 2
Project: discomll   Author: romanorac   File: distributed_random_forest.py    License: Apache License 2.0 6 votes vote down vote up
def reduce_fit(interface, state, label, inp):
    import numpy as np
    out = interface.output(0)
    out.add("X_names", state["X_names"])

    forest = []
    group_fillins = []
    for i, (k, value) in enumerate(inp):
        if k == "tree":
            forest.append(value)
        elif len(value) > 0:
            group_fillins.append(value)
    out.add("forest", forest)

    fill_in_values = []
    if len(group_fillins) > 0:
        for i, type in enumerate(state["X_meta"]):
            if type == "c":
                fill_in_values.append(np.average([sample[i] for sample in group_fillins]))
            else:
                fill_in_values.append(np.bincount([sample[i] for sample in group_fillins]).argmax())
    out.add("fill_in_values", fill_in_values) 
Example 3
Project: discomll   Author: romanorac   File: forest_distributed_decision_trees.py    License: Apache License 2.0 6 votes vote down vote up
def reduce_fit(interface, state, label, inp):
    import numpy as np
    out = interface.output(0)
    out.add("X_names", state["X_names"])

    forest = []
    group_fillins = []
    for i, (k, value) in enumerate(inp):
        if k == "tree":
            forest.append(value)
        elif len(value) > 0:
            group_fillins.append(value)
    out.add("forest", forest)

    fill_in_values = []
    if len(group_fillins) > 0:
        for i, type in enumerate(state["X_meta"]):
            if type == "c":
                fill_in_values.append(np.average([sample[i] for sample in group_fillins]))
            else:
                fill_in_values.append(np.bincount([sample[i] for sample in group_fillins]).argmax())
    out.add("fill_in_values", fill_in_values) 
Example 4
Project: overhaul-distillation   Author: clovaai   File: calculate_weights.py    License: MIT License 6 votes vote down vote up
def calculate_weigths_labels(dataset, dataloader, num_classes):
    # Create an instance from the data loader
    z = np.zeros((num_classes,))
    # Initialize tqdm
    tqdm_batch = tqdm(dataloader)
    print('Calculating classes weights')
    for sample in tqdm_batch:
        y = sample['label']
        y = y.detach().cpu().numpy()
        mask = (y >= 0) & (y < num_classes)
        labels = y[mask].astype(np.uint8)
        count_l = np.bincount(labels, minlength=num_classes)
        z += count_l
    tqdm_batch.close()
    total_frequency = np.sum(z)
    class_weights = []
    for frequency in z:
        class_weight = 1 / (np.log(1.02 + (frequency / total_frequency)))
        class_weights.append(class_weight)
    ret = np.array(class_weights)
    classes_weights_path = os.path.join(Path.db_root_dir(dataset), dataset+'_classes_weights.npy')
    np.save(classes_weights_path, ret)

    return ret 
Example 5
Project: pytorch-segmentation-toolbox   Author: speedinghzl   File: evaluate.py    License: MIT License 6 votes vote down vote up
def get_confusion_matrix(gt_label, pred_label, class_num):
        """
        Calcute the confusion matrix by given label and pred
        :param gt_label: the ground truth label
        :param pred_label: the pred label
        :param class_num: the nunber of class
        :return: the confusion matrix
        """
        index = (gt_label * class_num + pred_label).astype('int32')
        label_count = np.bincount(index)
        confusion_matrix = np.zeros((class_num, class_num))

        for i_label in range(class_num):
            for i_pred_label in range(class_num):
                cur_index = i_label * class_num + i_pred_label
                if cur_index < len(label_count):
                    confusion_matrix[i_label, i_pred_label] = label_count[cur_index]

        return confusion_matrix 
Example 6
Project: simnibs   Author: simnibs   File: electrode_placement.py    License: GNU General Public License v3.0 6 votes vote down vote up
def _optimize_2D(nodes, triangles, stay=[]):
    ''' Optimize the locations of the points by moving them towards the center
    of their patch. This is done iterativally for all points for a number of
    iterations and using a .05 step length'''
    edges, tr_edges, adjacency_list = _edge_list(triangles)
    boundary = edges[adjacency_list[:, 1] == -1].reshape(-1)
    stay = np.union1d(boundary, stay)
    stay = stay.astype(int)
    n_iter = 5
    step_length = .05
    mean_bar = np.zeros_like(nodes)
    new_nodes = np.copy(nodes)
    k = np.bincount(triangles.reshape(-1), minlength=len(nodes))
    for n in range(n_iter):
        bar = np.mean(new_nodes[triangles], axis=1)
        for i in range(2):
            mean_bar[:, i] = np.bincount(triangles.reshape(-1),
                                         weights=np.repeat(bar[:, i], 3),
                                         minlength=len(nodes))
        mean_bar /= k[:, None]
        new_nodes += step_length * (mean_bar - new_nodes)
        new_nodes[stay] = nodes[stay]
    return new_nodes 
Example 7
Project: simnibs   Author: simnibs   File: mesh_io.py    License: GNU General Public License v3.0 6 votes vote down vote up
def nodes_areas(self):
        ''' Areas for all nodes in a surface

        Returns
        ---------
        nd: NodeData
            NodeData structure with normals for each node

        '''
        areas = self.elements_volumes_and_areas()[self.elm.triangles]
        triangle_nodes = self.elm[self.elm.triangles, :3] - 1
        nd = np.bincount(
            triangle_nodes.reshape(-1),
            np.repeat(areas/3., 3), self.nodes.nr
        )

        return NodeData(nd, 'areas') 
Example 8
Project: License-Plate-Recognition   Author: wzh191920   File: predict.py    License: MIT License 6 votes vote down vote up
def preprocess_hog(digits):
	samples = []
	for img in digits:
		gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
		gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
		mag, ang = cv2.cartToPolar(gx, gy)
		bin_n = 16
		bin = np.int32(bin_n*ang/(2*np.pi))
		bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
		mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
		hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
		hist = np.hstack(hists)
		
		# transform to Hellinger kernel
		eps = 1e-7
		hist /= hist.sum() + eps
		hist = np.sqrt(hist)
		hist /= norm(hist) + eps
		
		samples.append(hist)
	return np.float32(samples)
#不能保证包括所有省份 
Example 9
Project: Fast_Seg   Author: lxtGH   File: val.py    License: Apache License 2.0 6 votes vote down vote up
def get_confusion_matrix(gt_label, pred_label, class_num):
    """
    Calcute the confusion matrix by given label and pred
    :param gt_label: the ground truth label
    :param pred_label: the pred label
    :param class_num: the nunber of class
    :return: the confusion matrix
    """
    index = (gt_label * class_num + pred_label).astype('int32')
    label_count = np.bincount(index)
    confusion_matrix = np.zeros((class_num, class_num))

    for i_label in range(class_num):
        for i_pred_label in range(class_num):
            cur_index = i_label * class_num + i_pred_label
            if cur_index < len(label_count):
                confusion_matrix[i_label, i_pred_label] = label_count[cur_index]

    return confusion_matrix 
Example 10
Project: AerialDetection   Author: dingjiansw101   File: sampler.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 dataset,
                 samples_per_gpu=1,
                 num_replicas=None,
                 rank=None):
        if num_replicas is None:
            num_replicas = get_world_size()
        if rank is None:
            rank = get_rank()
        self.dataset = dataset
        self.samples_per_gpu = samples_per_gpu
        self.num_replicas = num_replicas
        self.rank = rank
        self.epoch = 0

        assert hasattr(self.dataset, 'flag')
        self.flag = self.dataset.flag
        self.group_sizes = np.bincount(self.flag)

        self.num_samples = 0
        for i, j in enumerate(self.group_sizes):
            self.num_samples += int(
                math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
                          self.num_replicas)) * self.samples_per_gpu
        self.total_size = self.num_samples * self.num_replicas 
Example 11
Project: Deep-Feature-Flow-Segmentation   Author: tonysy   File: cityscape_video.py    License: MIT License 6 votes vote down vote up
def get_confusion_matrix(self, gt_label, pred_label, class_num):
        """
        Calcute the confusion matrix by given label and pred
        :param gt_label: the ground truth label
        :param pred_label: the pred label
        :param class_num: the nunber of class
        :return: the confusion matrix
        """
        index = (gt_label * class_num + pred_label).astype('int32')
        label_count = np.bincount(index)
        confusion_matrix = np.zeros((class_num, class_num))

        for i_label in range(class_num):
            for i_pred_label in range(class_num):
                cur_index = i_label * class_num + i_pred_label
                if cur_index < len(label_count):
                    confusion_matrix[i_label, i_pred_label] = label_count[cur_index]

        return confusion_matrix 
Example 12
Project: Deep-Feature-Flow-Segmentation   Author: tonysy   File: pascal_voc.py    License: MIT License 6 votes vote down vote up
def get_confusion_matrix(self, gt_label, pred_label, class_num):
        """
        Calcute the confusion matrix by given label and pred
        :param gt_label: the ground truth label
        :param pred_label: the pred label
        :param class_num: the nunber of class
        :return: the confusion matrix
        """
        index = (gt_label * class_num + pred_label).astype('int32')
        label_count = np.bincount(index)
        confusion_matrix = np.zeros((class_num, class_num))

        for i_label in range(class_num):
            for i_pred_label in range(class_num):
                cur_index = i_label * class_num + i_pred_label
                if cur_index < len(label_count):
                    confusion_matrix[i_label, i_pred_label] = label_count[cur_index]

        return confusion_matrix 
Example 13
Project: Deep-Feature-Flow-Segmentation   Author: tonysy   File: cityscape.py    License: MIT License 6 votes vote down vote up
def get_confusion_matrix(self, gt_label, pred_label, class_num):
        """
        Calcute the confusion matrix by given label and pred
        :param gt_label: the ground truth label
        :param pred_label: the pred label
        :param class_num: the nunber of class
        :return: the confusion matrix
        """
        index = (gt_label * class_num + pred_label).astype('int32')
        label_count = np.bincount(index)
        confusion_matrix = np.zeros((class_num, class_num))

        for i_label in range(class_num):
            for i_pred_label in range(class_num):
                cur_index = i_label * class_num + i_pred_label
                if cur_index < len(label_count):
                    confusion_matrix[i_label, i_pred_label] = label_count[cur_index]

        return confusion_matrix 
Example 14
Project: recruit   Author: Frank-qlu   File: test_function_base.py    License: Apache License 2.0 6 votes vote down vote up
def test_with_incorrect_minlength(self):
        x = np.array([], dtype=int)
        assert_raises_regex(TypeError,
                            "'str' object cannot be interpreted",
                            lambda: np.bincount(x, minlength="foobar"))
        assert_raises_regex(ValueError,
                            "must not be negative",
                            lambda: np.bincount(x, minlength=-1))

        x = np.arange(5)
        assert_raises_regex(TypeError,
                            "'str' object cannot be interpreted",
                            lambda: np.bincount(x, minlength="foobar"))
        assert_raises_regex(ValueError,
                            "must not be negative",
                            lambda: np.bincount(x, minlength=-1)) 
Example 15
Project: cgpm   Author: probcomp   File: engine.py    License: Apache License 2.0 6 votes vote down vote up
def _likelihood_weighted_resample(self, samples, rowid, constraints=None,
            inputs=None, statenos=None, multiprocess=1):
        assert len(samples) == \
            len(self.states) if statenos is None else len(statenos)
        assert all(len(s) == len(samples[0]) for s in samples[1:])
        N = len(samples[0])
        weights = np.zeros(len(samples)) if not constraints else \
            self.logpdf(rowid, constraints, inputs,
                statenos=statenos, multiprocess=multiprocess)
        n_model = np.bincount(gu.log_pflip(weights, size=N, rng=self.rng))
        indexes = [self.rng.choice(N, size=n, replace=False) for n in n_model]
        resamples = [
            [s[i] for i in index]
            for s, index in zip(samples, indexes)
            if len(index) > 0
        ]
        return list(itertools.chain.from_iterable(resamples))

    # --------------------------------------------------------------------------
    # Serialize 
Example 16
Project: cgpm   Author: probcomp   File: plots.py    License: Apache License 2.0 6 votes vote down vote up
def plot_dist_discrete(X, output, clusters, ax=None, Y=None, hist=True):
    # Create a new axis?
    if ax is None:
        _, ax = plt.subplots()
    # Set up x axis.
    X = np.asarray(X, dtype=int)
    x_max = max(X)
    Y = range(int(x_max)+1)
    X_hist = np.bincount(X) / float(len(X))
    ax.bar(Y, X_hist, color='gray', edgecolor='none')
    # Compute weighted pdfs
    pdf = np.zeros((len(clusters), len(Y)))
    W = [log(clusters[k].N) - log(float(len(X))) for k in clusters]
    for i, k in enumerate(clusters):
        pdf[i,:] = np.exp(
            [W[i] + clusters[k].logpdf(None, {output:y}) for y in Y])
        color, alpha = gu.curve_color(i)
        ax.bar(Y, pdf[i,:], color=color, edgecolor='none', alpha=alpha)
    # Plot the sum of pdfs.
    ax.bar(
        Y, np.sum(pdf, axis=0), color='none', edgecolor='black', linewidth=3)
    ax.set_xlim([0, x_max+1])
    # Title.
    ax.set_title(clusters.values()[0].name())
    return ax 
Example 17
Project: cgpm   Author: probcomp   File: test_crp.py    License: Apache License 2.0 6 votes vote down vote up
def test_crp_decrement(N, alpha, seed):
    A = gu.simulate_crp(N, alpha, rng=gu.gen_rng(seed))
    Nk = list(np.bincount(A))
    # Decrement all counts by 1.
    Nk = [n-1 if n > 1 else n for n in Nk]

    # Decrement rowids.
    crp = simulate_crp_gpm(N, alpha, rng=gu.gen_rng(seed))
    targets = [c for c in crp.counts if crp.counts[c] > 1]
    seen = set([])
    for r, c in crp.data.items():
        if c in targets and c not in seen:
            seen.add(c)
            crp.unincorporate(r)
        if seen == len(targets):
            break

    assert_crp_equality(alpha, Nk, crp) 
Example 18
Project: cgpm   Author: probcomp   File: test_crp.py    License: Apache License 2.0 6 votes vote down vote up
def test_crp_increment(N, alpha, seed):
    A = gu.simulate_crp(N, alpha, rng=gu.gen_rng(seed))
    Nk = list(np.bincount(A))
    # Add 3 new classes.
    Nk.extend([2, 3, 1])

    crp = simulate_crp_gpm(N, alpha, rng=gu.gen_rng(seed))
    # Increment rowids.
    rowid = max(crp.data)
    clust = max(crp.data.values())
    crp.incorporate(rowid+1, {0:clust+1}, None)
    crp.incorporate(rowid+2, {0:clust+1}, None)
    crp.incorporate(rowid+3, {0:clust+2}, None)
    crp.incorporate(rowid+4, {0:clust+2}, None)
    crp.incorporate(rowid+5, {0:clust+2}, None)
    crp.incorporate(rowid+6, {0:clust+3}, None)

    assert_crp_equality(alpha, Nk, crp) 
Example 19
Project: cgpm   Author: probcomp   File: test_normal_categorical.py    License: Apache License 2.0 6 votes vote down vote up
def test_conditional_real(state):
    # Simulate from the conditional Z|X
    fig, axes = plt.subplots(2,3)
    fig.suptitle('Conditional Simulation Of Indicator Z Given Data X')
    # Compute representative data sample for each indicator.
    means = [np.mean(DATA[DATA[:,1]==t], axis=0)[0] for t in INDICATORS]
    for mean, indicator, ax in zip(means, INDICATORS, axes.ravel('F')):
        samples_subpop = [s[1] for s in
            state.simulate(-1, [1], {0:mean}, None, N_SAMPLES)]
        ax.hist(samples_subpop, color='g', alpha=.4)
        ax.set_title('True Indicator %d' % indicator)
        ax.set_xlabel('Simulated Indicator')
        ax.set_xticks(INDICATORS)
        ax.set_ylabel('Frequency')
        ax.set_ylim([0, ax.get_ylim()[1]+10])
        ax.grid()
        # Check that the simulated indicator agrees with true indicator.
        true_ind_a = indicator
        true_ind_b = indicator-1  if indicator % 2 else indicator+1
        counts = np.bincount(samples_subpop)
        frac = sum(counts[[true_ind_a, true_ind_b]])/float(sum(counts))
        assert .8 < frac 
Example 20
Project: cgpm   Author: probcomp   File: test_mvkde.py    License: Apache License 2.0 6 votes vote down vote up
def test_univariate_categorical():
    # This test generates univariate data from a nominal variable with 6 levels
    # and probability vector p_theory, and performs a chi-square test on
    # posterior samples from MvKde.

    rng = gu.gen_rng(2)
    N_SAMPLES = 1000
    p_theory = [.3, .1, .2, .15, .15, .1]
    samples_test = rng.choice(range(6), p=p_theory, size=N_SAMPLES)
    kde = MultivariateKde(
        [7], None, distargs={O: {ST: [C], SA:[{'k': 6}]}}, rng=rng)
    # Incorporate observations.
    for rowid, x in enumerate(samples_test):
        kde.incorporate(rowid, {7: x})
    kde.transition()
    # Posterior samples.
    samples_gen = kde.simulate(-1, [7], N=N_SAMPLES)
    f_obs = np.bincount([s[7] for s in samples_gen])
    f_exp = np.bincount(samples_test)
    _, pval = chisquare(f_obs, f_exp)
    assert 0.05 < pval
    # Get some coverage on logpdf_score.
    assert kde.logpdf_score() < 0 
Example 21
Project: mmdetection   Author: open-mmlab   File: group_sampler.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, dataset, samples_per_gpu=1):
        assert hasattr(dataset, 'flag')
        self.dataset = dataset
        self.samples_per_gpu = samples_per_gpu
        self.flag = dataset.flag.astype(np.int64)
        self.group_sizes = np.bincount(self.flag)
        self.num_samples = 0
        for i, size in enumerate(self.group_sizes):
            self.num_samples += int(np.ceil(
                size / self.samples_per_gpu)) * self.samples_per_gpu 
Example 22
Project: mmdetection   Author: open-mmlab   File: group_sampler.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 dataset,
                 samples_per_gpu=1,
                 num_replicas=None,
                 rank=None):
        _rank, _num_replicas = get_dist_info()
        if num_replicas is None:
            num_replicas = _num_replicas
        if rank is None:
            rank = _rank
        self.dataset = dataset
        self.samples_per_gpu = samples_per_gpu
        self.num_replicas = num_replicas
        self.rank = rank
        self.epoch = 0

        assert hasattr(self.dataset, 'flag')
        self.flag = self.dataset.flag
        self.group_sizes = np.bincount(self.flag)

        self.num_samples = 0
        for i, j in enumerate(self.group_sizes):
            self.num_samples += int(
                math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
                          self.num_replicas)) * self.samples_per_gpu
        self.total_size = self.num_samples * self.num_replicas 
Example 23
Project: PolarSeg   Author: edwardzhou130   File: test_pretrain.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fast_hist(pred, label, n):
    k = (label >= 0) & (label < n)
    bin_count=np.bincount(
        n * label[k].astype(int) + pred[k], minlength=n ** 2)
    return bin_count[:n ** 2].reshape(n, n) 
Example 24
Project: PolarSeg   Author: edwardzhou130   File: train.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fast_hist(pred, label, n):
    k = (label >= 0) & (label < n)
    bin_count=np.bincount(
        n * label[k].astype(int) + pred[k], minlength=n ** 2)
    return bin_count[:n ** 2].reshape(n, n) 
Example 25
Project: discomll   Author: romanorac   File: distributed_weighted_forest.py    License: Apache License 2.0 5 votes vote down vote up
def reduce_fit(interface, state, label, inp):
    import numpy as np
    out = interface.output(0)
    out.add("X_names", state["X_names"])

    forest, medoids, stats, gower_ranges, group_fillins = [], [], [], [], []
    for i, (k, value) in enumerate(inp):
        if k == "model":
            forest.append(value[0])
            medoids.append(value[1])
            stats.append(value[2])
            gower_ranges.append(value[3])
        elif len(value) > 0:
            group_fillins.append(value)
    out.add("forest", forest)
    out.add("medoids", medoids)
    out.add("stats", stats)
    out.add("gower_ranges", gower_ranges)

    fill_in_values = []
    if len(group_fillins) > 0:
        for i, type in enumerate(state["X_meta"]):
            if type == "c":
                fill_in_values.append(np.average([sample[i] for sample in group_fillins]))
            else:
                fill_in_values.append(np.bincount([sample[i] for sample in group_fillins]).argmax())
    out.add("fill_in_values", fill_in_values) 
Example 26
Project: discomll   Author: romanorac   File: measures.py    License: Apache License 2.0 5 votes vote down vote up
def info_gain_numeric(x, y, accuracy):
    x_unique = list(np.unique(x))
    if len(x_unique) == 1:
        return None
    indices = x.argsort()  # sort numeric attribute
    x, y = x[indices], y[indices]  # save sorted features with sorted labels

    right_dist = np.bincount(y)
    dummy_class = np.array([len(right_dist)])
    class_indices = right_dist.nonzero()[0]
    right_dist = right_dist[class_indices]
    left_dist = np.zeros(len(class_indices))

    diffs = np.nonzero(y[:-1] != y[1:])[0] + 1  # different neighbor classes have value True
    if accuracy > 0:
        diffs = np.array([diffs[i] for i in range(1, len(diffs)) if diffs[i] - diffs[i - 1] > accuracy],
                         dtype=np.int32) if len(diffs) > 15 else diffs
    intervals = np.array((np.concatenate(([0], diffs[:-1])), diffs)).T
    if len(diffs) < 2:
        return None

    max_ig, max_i, max_j = 0, 0, 0
    prior_h = h(right_dist)  # calculate prior entropy

    for i, j in intervals:
        dist = np.bincount(np.concatenate((dummy_class, y[i:j])))[class_indices]
        left_dist += dist
        right_dist -= dist
        coef = np.true_divide((np.sum(left_dist), np.sum(right_dist)), len(y))
        ig = prior_h - np.dot(coef, [h(left_dist[left_dist.nonzero()]), h(right_dist[right_dist.nonzero()])])
        if ig > max_ig:
            max_ig, max_i, max_j = ig, i, j

    if x[max_i] == x[max_j]:
        ind = x_unique.index(x[max_i])
        mean = np.float32(np.mean((x_unique[1 if ind == 0 else ind - 1], x_unique[ind])))
    else:
        mean = np.float32(np.mean((x[max_i], x[max_j])))

    return float(max_ig), [mean, mean] 
Example 27
Project: DOTA_models   Author: ringringyi   File: depth_utils.py    License: Apache License 2.0 5 votes vote down vote up
def bin_points(XYZ_cms, map_size, z_bins, xy_resolution):
  """Bins points into xy-z bins
  XYZ_cms is ... x H x W x3
  Outputs is ... x map_size x map_size x (len(z_bins)+1)
  """
  sh = XYZ_cms.shape
  XYZ_cms = XYZ_cms.reshape([-1, sh[-3], sh[-2], sh[-1]])
  n_z_bins = len(z_bins)+1
  map_center = (map_size-1.)/2.
  counts = []
  isvalids = []
  for XYZ_cm in XYZ_cms:
    isnotnan = np.logical_not(np.isnan(XYZ_cm[:,:,0]))
    X_bin = np.round(XYZ_cm[:,:,0] / xy_resolution + map_center).astype(np.int32)
    Y_bin = np.round(XYZ_cm[:,:,1] / xy_resolution + map_center).astype(np.int32)
    Z_bin = np.digitize(XYZ_cm[:,:,2], bins=z_bins).astype(np.int32)

    isvalid = np.array([X_bin >= 0, X_bin < map_size, Y_bin >= 0, Y_bin < map_size,
                        Z_bin >= 0, Z_bin < n_z_bins, isnotnan])
    isvalid = np.all(isvalid, axis=0)

    ind = (Y_bin * map_size + X_bin) * n_z_bins + Z_bin
    ind[np.logical_not(isvalid)] = 0
    count = np.bincount(ind.ravel(), isvalid.ravel().astype(np.int32),
                         minlength=map_size*map_size*n_z_bins)
    count = np.reshape(count, [map_size, map_size, n_z_bins])
    counts.append(count)
    isvalids.append(isvalid)
  counts = np.array(counts).reshape(list(sh[:-3]) + [map_size, map_size, n_z_bins])
  isvalids = np.array(isvalids).reshape(list(sh[:-3]) + [sh[-3], sh[-2], 1])
  return counts, isvalids 
Example 28
Project: DOTA_models   Author: ringringyi   File: map_utils.py    License: Apache License 2.0 5 votes vote down vote up
def _fill_holes(img, thresh):
  """Fills holes less than thresh area (assumes 4 connectivity when computing
  hole area."""
  l, n = scipy.ndimage.label(np.logical_not(img))
  img_ = img == True
  cnts = np.bincount(l.reshape(-1))
  for i, cnt in enumerate(cnts):
    if cnt < thresh:
      l[l == i] = -1
  img_[l == -1] = True
  return img_ 
Example 29
Project: DOTA_models   Author: ringringyi   File: map_utils.py    License: Apache License 2.0 5 votes vote down vote up
def pick_largest_cc(traversible):
  out = scipy.ndimage.label(traversible)[0]
  cnt = np.bincount(out.reshape(-1))[1:]
  return out == np.argmax(cnt) + 1 
Example 30
Project: DOTA_models   Author: ringringyi   File: aggregation.py    License: Apache License 2.0 5 votes vote down vote up
def aggregation_most_frequent(logits):
  """
  This aggregation mechanism takes the softmax/logit output of several models
  resulting from inference on identical inputs and computes the most frequent
  label. It is deterministic (no noise injection like noisy_max() above.
  :param logits: logits or probabilities for each sample
  :return:
  """
  # Compute labels from logits/probs and reshape array properly
  labels = labels_from_probs(logits)
  labels_shape = np.shape(labels)
  labels = labels.reshape((labels_shape[0], labels_shape[1]))

  # Initialize array to hold final labels
  result = np.zeros(int(labels_shape[1]))

  # Parse each sample
  for i in xrange(int(labels_shape[1])):
    # Count number of votes assigned to each class
    label_counts = np.bincount(labels[:, i], minlength=10)

    label_counts = np.asarray(label_counts, dtype=np.int32)

    # Result is the most frequent label
    result[i] = np.argmax(label_counts)

  return np.asarray(result, dtype=np.int32)