Python torch.bincount() Examples

The following are 30 code examples of torch.bincount(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: transfer_labels.py    From geomloss with MIT License 6 votes vote down vote up
def KMeans(x_i, c_j, Nits = 10, ranges = None):

    D = x_i.shape[1]
    for i in range(10):
        # Points -> Nearest cluster
        labs_i = nn_search(x_i, c_j, ranges = ranges)
        # Class cardinals:
        Ncl = torch.bincount(labs_i.view(-1)).type(dtype)
        # Compute the cluster centroids with torch.bincount:
        for d in range(D):  # Unfortunately, vector weights are not supported...
            c_j[:, d] = torch.bincount(labs_i.view(-1), weights=x_i[:, d]) / Ncl
    
    return c_j, labs_i


##############################################
# On the subject
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# For new subject (unlabelled), we perform a simple Kmean
# on R^60 to obtain a cluster of the data.
# 
Example #2
Source File: tu.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def split(data, batch):
    node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)
    node_slice = torch.cat([torch.tensor([0]), node_slice])

    row, _ = data.edge_index
    edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)
    edge_slice = torch.cat([torch.tensor([0]), edge_slice])

    # Edge indices should start at zero for every graph.
    data.edge_index -= node_slice[batch[row]].unsqueeze(0)
    data.__num_nodes__ = torch.bincount(batch).tolist()

    slices = {'edge_index': edge_slice}
    if data.x is not None:
        slices['x'] = node_slice
    if data.edge_attr is not None:
        slices['edge_attr'] = edge_slice
    if data.y is not None:
        if data.y.size(0) == batch.size(0):
            slices['y'] = node_slice
        else:
            slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long)

    return data, slices 
Example #3
Source File: qlinear.py    From fairseq with MIT License 6 votes vote down vote up
def __init__(self, centroids, assignments, bias, in_features, out_features):
        super(PQLinear, self).__init__()
        self.block_size = centroids.size(1)
        self.n_centroids = centroids.size(0)
        self.in_features = in_features
        self.out_features = out_features
        # check compatibility
        if self.in_features % self.block_size != 0:
            raise ValueError("Wrong PQ sizes")
        if len(assignments) % self.out_features != 0:
            raise ValueError("Wrong PQ sizes")
        # define parameters
        self.centroids = nn.Parameter(centroids, requires_grad=True)
        self.register_buffer("assignments", assignments)
        self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
        if bias is not None:
            self.bias = nn.Parameter(bias)
        else:
            self.register_parameter("bias", None) 
Example #4
Source File: drmm.py    From OpenNIR with MIT License 6 votes vote down vote up
def forward(self, simmat, dlens, dtoks, qtoks):
        BATCH, CHANNELS, QLEN, DLEN = simmat.shape

        # +1e-5 to nudge scores of 1 to above threshold
        bins = ((simmat + 1.00001) / 2. * (self.bins - 1)).int()
        weights = ((dtoks != -1).reshape(BATCH, 1, DLEN).expand(BATCH, QLEN, DLEN) * \
                      (qtoks != -1).reshape(BATCH, QLEN, 1).expand(BATCH, QLEN, DLEN)).float()
        # apparently no way to batch this... https://discuss.pytorch.org/t/histogram-function-in-pytorch/5350
        bins, weights = bins.cpu(), weights.cpu() # WARNING: this line (and the similar line below) improve performance tenfold when on GPU
        histogram = []
        for superbins, w in zip(bins, weights):
            result = []
            for b in superbins:
                result.append(torch.stack([torch.bincount(q, x, self.bins) for q, x in zip(b, w)], dim=0))
            result = torch.stack(result, dim=0)
            histogram.append(result)
        histogram = torch.stack(histogram, dim=0)
        histogram = histogram.to(simmat.device) # WARNING: this line (and the similar line above) improve performance tenfold when on GPU
        return histogram 
Example #5
Source File: confusion_matrix.py    From ignite with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def update(self, output: Sequence[torch.Tensor]) -> None:
        self._check_shape(output)
        y_pred, y = output

        self._num_examples += y_pred.shape[0]

        # target is (batch_size, ...)
        y_pred = torch.argmax(y_pred, dim=1).flatten()
        y = y.flatten()

        target_mask = (y >= 0) & (y < self.num_classes)
        y = y[target_mask]
        y_pred = y_pred[target_mask]

        indices = self.num_classes * y + y_pred
        m = torch.bincount(indices, minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
        self.confusion_matrix += m.to(self.confusion_matrix) 
Example #6
Source File: modeling_util.py    From cedr with MIT License 6 votes vote down vote up
def forward(self, simmat, dtoks, qtoks):
        # THIS IS SLOW ... Any way to make this faster? Maybe it's not worth doing on GPU?
        BATCH, CHANNELS, QLEN, DLEN = simmat.shape
        # +1e-5 to nudge scores of 1 to above threshold
        bins = ((simmat + 1.000001) / 2. * (self.bins - 1)).int()
        # set weights of 0 for padding (in both query and doc dims)
        weights = ((dtoks != -1).reshape(BATCH, 1, DLEN).expand(BATCH, QLEN, DLEN) * \
                  (qtoks != -1).reshape(BATCH, QLEN, 1).expand(BATCH, QLEN, DLEN)).float()

        # no way to batch this... loses gradients here. https://discuss.pytorch.org/t/histogram-function-in-pytorch/5350
        bins, weights = bins.cpu(), weights.cpu()
        histogram = []
        for superbins, w in zip(bins, weights):
            result = []
            for b in superbins:
                result.append(torch.stack([torch.bincount(q, x, self.bins) for q, x in zip(b, w)], dim=0))
            result = torch.stack(result, dim=0)
            histogram.append(result)
        histogram = torch.stack(histogram, dim=0)

        # back to GPU
        histogram = histogram.to(simmat.device)
        return (histogram.float() + 1e-5).log() 
Example #7
Source File: metrics.py    From seismic-deeplearning with MIT License 6 votes vote down vote up
def _torch_hist(label_true, label_pred, n_class):
    """Calculates the confusion matrix for the labels
    
    Args:
        label_true ([type]): [description]
        label_pred ([type]): [description]
        n_class ([type]): [description]
    
    Returns:
        [type]: [description]
    """
    
    assert len(label_true.shape) == 1, "Labels need to be 1D"
    assert len(label_pred.shape) == 1, "Predictions need to be 1D"
    mask = (label_true >= 0) & (label_true < n_class)
    hist = torch.bincount(n_class * label_true[mask] + label_pred[mask], minlength=n_class ** 2).reshape(
        n_class, n_class
    )
    return hist 
Example #8
Source File: qlinear.py    From attn2d with MIT License 6 votes vote down vote up
def __init__(self, centroids, assignments, bias, in_features, out_features):
        super(PQLinear, self).__init__()
        self.block_size = centroids.size(1)
        self.n_centroids = centroids.size(0)
        self.in_features = in_features
        self.out_features = out_features
        # check compatibility
        if self.in_features % self.block_size != 0:
            raise ValueError("Wrong PQ sizes")
        if len(assignments) % self.out_features != 0:
            raise ValueError("Wrong PQ sizes")
        # define parameters
        self.centroids = nn.Parameter(centroids, requires_grad=True)
        self.register_buffer("assignments", assignments)
        self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
        if bias is not None:
            self.bias = nn.Parameter(bias)
        else:
            self.register_parameter("bias", None) 
Example #9
Source File: segmenter.py    From gandissect with MIT License 6 votes vote down vote up
def test_main():
    '''
    Test the unified segmenter.
    '''
    from PIL import Image
    testim = Image.open('script/testdata/test_church_242.jpg')
    tensor_im = (torch.from_numpy(numpy.asarray(testim)).permute(2, 0, 1)
            .float() / 255 * 2 - 1)[None, :, :, :].cuda()
    segmenter = UnifiedParsingSegmenter()
    seg = segmenter.segment_batch(tensor_im)
    bc = torch.bincount(seg.view(-1))
    labels, cats = segmenter.get_label_and_category_names()
    for label in bc.nonzero()[:,0]:
        if label.item():
            # What is the prediction for this class?
            pred, mask = segmenter.predict_single_class(tensor_im, label.item())
            assert mask.sum().item() == bc[label].item()
            assert len(((seg == label).max(1)[0] - mask).nonzero()) == 0
            inside_pred = pred[mask].mean().item()
            outside_pred = pred[~mask].mean().item()
            print('%s (%s, #%d): %d pixels, pred %.2g inside %.2g outside' %
                (labels[label.item()] + (label.item(), bc[label].item(),
                    inside_pred, outside_pred))) 
Example #10
Source File: qemb.py    From attn2d with MIT License 5 votes vote down vote up
def __init__(self, centroids, assignments, num_embeddings, embedding_dim,
                     padding_idx=None, max_norm=None, norm_type=2.,
                     scale_grad_by_freq=False, sparse=False, _weight=None):
        super(PQEmbedding, self).__init__()
        self.block_size = centroids.size(1)
        self.n_centroids = centroids.size(0)
        self.num_embeddings = num_embeddings
        self.embedding_dim = embedding_dim
        if padding_idx is not None:
            if padding_idx > 0:
                assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
            elif padding_idx < 0:
                assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
                padding_idx = self.num_embeddings + padding_idx
        self.padding_idx = padding_idx
        self.max_norm = max_norm
        self.norm_type = norm_type
        self.scale_grad_by_freq = scale_grad_by_freq
        self.sparse = sparse
        # check compatibility
        if self.embedding_dim % self.block_size != 0:
            raise ValueError("Wrong PQ sizes")
        if len(assignments) % self.num_embeddings != 0:
            raise ValueError("Wrong PQ sizes")
        # define parameters
        self.centroids = nn.Parameter(centroids, requires_grad=True)
        self.register_buffer("assignments", assignments)
        self.register_buffer("counts", torch.bincount(assignments).type_as(centroids)) 
Example #11
Source File: utils.py    From deconvolution with GNU General Public License v3.0 5 votes vote down vote up
def update(self, a, b):
        n = self.num_classes
        if self.mat is None:
            self.mat = torch.zeros((n, n), dtype=torch.int64, device=a.device)
        with torch.no_grad():
            k = (a >= 0) & (a < n)
            inds = n * a[k].to(torch.int64) + b[k]
            self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n)# this line can take long 
Example #12
Source File: classification.py    From pytorch-lightning with Apache License 2.0 5 votes vote down vote up
def confusion_matrix(
        pred: torch.Tensor,
        target: torch.Tensor,
        normalize: bool = False,
) -> torch.Tensor:
    """
    Computes the confusion matrix C where each entry C_{i,j} is the number of observations
    in group i that were predicted in group j.

    Args:
        pred: estimated targets
        target: ground truth labels
        normalize: normalizes confusion matrix

    Return:
        Tensor, confusion matrix C [num_classes, num_classes ]

    Example:

        >>> x = torch.tensor([1, 2, 3])
        >>> y = torch.tensor([0, 2, 3])
        >>> confusion_matrix(x, y)
        tensor([[0., 1., 0., 0.],
                [0., 0., 0., 0.],
                [0., 0., 1., 0.],
                [0., 0., 0., 1.]])
    """
    num_classes = get_num_classes(pred, target, None)

    unique_labels = target.view(-1) * num_classes + pred.view(-1)

    bins = torch.bincount(unique_labels, minlength=num_classes ** 2)
    cm = bins.reshape(num_classes, num_classes).squeeze().float()

    if normalize:
        cm = cm / cm.sum(-1)

    return cm 
Example #13
Source File: evaluatev2.py    From BiSeNet with MIT License 5 votes vote down vote up
def __call__(self, net, dl, n_classes):
        ## evaluate
        hist = torch.zeros(n_classes, n_classes).cuda().detach()
        if dist.is_initialized() and dist.get_rank() != 0:
            diter = enumerate(dl)
        else:
            diter = enumerate(tqdm(dl))
        for i, (imgs, label) in diter:
            N, _, H, W = label.shape
            label = label.squeeze(1).cuda()
            size = label.size()[-2:]
            imgs = imgs.cuda()
            logits = net(imgs)[0]
            logits = F.interpolate(logits, size=size,
                    mode='bilinear', align_corners=True)
            probs = torch.softmax(logits, dim=1)
            preds = torch.argmax(probs, dim=1)
            keep = label != self.ignore_label
            hist += torch.bincount(
                label[keep] * n_classes + preds[keep],
                minlength=n_classes ** 2
                ).view(n_classes, n_classes)
        if dist.is_initialized():
            dist.all_reduce(hist, dist.ReduceOp.SUM)
        ious = hist.diag() / (hist.sum(dim=0) + hist.sum(dim=1) - hist.diag())
        miou = ious.mean()
        return miou.item() 
Example #14
Source File: utils.py    From ray with Apache License 2.0 5 votes vote down vote up
def update(self, a, b):
        n = self.num_classes
        if self.mat is None:
            self.mat = torch.zeros((n, n), dtype=torch.int64, device=a.device)
        with torch.no_grad():
            k = (a >= 0) & (a < n)
            inds = n * a[k].to(torch.int64) + b[k]
            self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n) 
Example #15
Source File: run.py    From predictions_balancing with MIT License 5 votes vote down vote up
def _get_labels_distribution(predicts, coefficients):
    predicts = _get_predicts(predicts, coefficients)
    labels = predicts.argmax(dim=-1)
    counter = torch.bincount(labels, minlength=predicts.shape[1])
    return counter 
Example #16
Source File: vot_torch.py    From pyvot with MIT License 5 votes vote down vote up
def update_p_base(e_idx, data_p, data_e):
        """ base function to update each p to the centroids of its cluster

        Args:
            e_idx (pytorch Tensor): assignment of e to p
            data_p (pytorch Tensor): cluster centroids, p
            data_e (pytorch Tensor): empirical samples, e
            p0 (pytorch Tensor): iteration index

        Returns:
            p0 (pytorch Tensor): new p
            max_change_pct (float): max_change
        """

        p0 = torch.zeros(data_p.shape).double()
        num_p = data_p.shape[0]

        max_change_pct = 0.0
        # update p to the centroid of its clustered e samples
        bincount = torch.bincount(e_idx, minlength=num_p).double()
        if 0 in bincount:
            print('Empty cluster found, optimal transport probably did not converge\n'
                  'Try larger lr or max_iter after checking the measures.')
            # return False
        eps = 1e-8
        for i in range(data_p.shape[1]):
            # update p to the centroid of their correspondences one dimension at a time
            p_target = torch.bincount(e_idx, weights=data_e[:, i], minlength=num_p).double() / (bincount+eps)
            change_pct = torch.max(torch.abs((data_p[:, i] - p_target) / (data_p[:, i])+eps))
            max_change_pct = max(max_change_pct, change_pct)
            p0[:, i] = p_target

        # replace nan by original data TODO replace nan by nn barycenter?
        mask = torch.isnan(p0).any(dim=1)
        p0[mask] = data_p[mask].clone()

        return p0, max_change_pct 
Example #17
Source File: vot_torch.py    From pyvot with MIT License 5 votes vote down vote up
def update_p_base(e_idx, data_p, data_e):
        """ base function to update each p to the centroids of its cluster

        Args:
            e_idx (pytorch Tensor): assignment of e to p
            data_p (pytorch Tensor): cluster centroids, p
            data_e (pytorch Tensor): empirical samples, e
            p0 (pytorch Tensor): iteration index

        Returns:
            p0 (pytorch Tensor): new p
            max_change_pct (float): max_change
        """

        p0 = torch.zeros(data_p.shape).double().to(data_p.device)
        num_p = data_p.shape[0]

        max_change_pct = 0.0
        # update p to the centroid of its clustered e samples
        bincount = torch.bincount(e_idx, minlength=num_p).double().to(data_p.device)
        if 0 in bincount:
            print('Empty cluster found, optimal transport probably did not converge\n'
                  'Try a different lr or max_iter after checking the measures.')
            # return False
        eps = 1e-8
        for i in range(data_p.shape[1]):
            # update p to the centroid of their correspondences one dimension at a time
            p_target = torch.bincount(e_idx, weights=data_e[:, i], minlength=num_p).double().to(data_p.device) / (bincount+eps)
            change_pct = torch.max(torch.abs((data_p[:, i] - p_target) / (data_p[:, i])+eps))
            max_change_pct = max(max_change_pct, change_pct)
            p0[:, i] = p_target

        # replace nan by original data TODO replace nan by nn barycenter?
        mask = torch.isnan(p0).any(dim=1)
        p0[mask] = data_p[mask].clone()

        return p0, max_change_pct 
Example #18
Source File: plot_optimal_transport_cluster.py    From geomloss with MIT License 5 votes vote down vote up
def KMeans(x, K=10, Niter=10, verbose=True):
    N, D = x.shape  # Number of samples, dimension of the ambient space

    # Define our KeOps CUDA kernel:
    nn_search = generic_argmin(  # Argmin reduction for generic formulas:
        'SqDist(x,y)',           # A simple squared L2 distance
        'ind = Vi(1)',           # Output one index per "line" (reduction over "j")
        'x = Vi({})'.format(D),  # 1st arg: one point per "line"
        'y = Vj({})'.format(D))  # 2nd arg: one point per "column"
    
    # K-means loop:
    # - x  is the point cloud, 
    # - cl is the vector of class labels
    # - c  is the cloud of cluster centroids
    start = time.time()

    # Simplistic random initialization for the cluster centroids:
    perm = torch.randperm(N)
    idx = perm[:K]
    c = x[idx, :].clone()  

    for i in range(Niter):
        cl  = nn_search(x,c).view(-1)  # Points -> Nearest cluster
        Ncl = torch.bincount(cl).type(dtype)  # Class weights
        for d in range(D):  # Compute the cluster centroids with torch.bincount:
            c[:, d] = torch.bincount(cl, weights=x[:, d]) / Ncl
    if use_cuda: torch.cuda.synchronize()
    end = time.time()
    if verbose: print("KMeans performed in {:.3f}s.".format(end-start))

    return cl, c 
Example #19
Source File: metric.py    From EMANet with GNU General Public License v3.0 5 votes vote down vote up
def fast_hist(label_true, label_pred):
    n_class = settings.N_CLASSES
    mask = (label_true >= 0) & (label_true < n_class)
    hist = torch.bincount(
        n_class * label_true[mask].int() + label_pred[mask].int(),
        minlength=n_class ** 2,
    ).reshape(n_class, n_class)
    return hist 
Example #20
Source File: RC.py    From DEEPSEC with MIT License 5 votes vote down vote up
def region_based_classification_single(self, sample, radius):
        """

        :param sample: one sample (1*channel*H*W)
        :param radius:
        :return:
        """
        self.model.eval()

        assert sample.shape[0] == 1, "the sample parameter should be one example in numpy format"
        copy_sample = np.copy(sample)

        with torch.no_grad():
            copy_sample = torch.from_numpy(copy_sample).to(self.device)

            # prepare the hypercube samples (size=num_points) for the sample (size=1)
            hypercube_samples = copy_sample.repeat(self.num_points, 1, 1, 1).to(self.device).float()
            random_space = torch.Tensor(*hypercube_samples.size()).to(self.device).float()
            random_space.uniform_(-radius, radius)
            hypercube_samples = torch.clamp(hypercube_samples + random_space, min=0.0, max=1.0)

            # predicting for hypercube samples
            hypercube_preds = self.model(hypercube_samples)
            hypercube_labels = torch.max(hypercube_preds, dim=1)[1]

            # voting for predicted labels
            bin_count = torch.bincount(hypercube_labels)
            rc_label = torch.max(bin_count, dim=0)[1]

            return rc_label.cpu().numpy() 
Example #21
Source File: metrics.py    From form2fit with MIT License 5 votes vote down vote up
def _fast_hist(true, pred, num_classes):
    mask = (true >= 0) & (true < num_classes)
    hist = torch.bincount(
        num_classes * true[mask] + pred[mask],
        minlength=num_classes ** 2,
    ).reshape(num_classes, num_classes).float()
    return hist 
Example #22
Source File: bag_of_word_counts_token_embedder.py    From allennlp with Apache License 2.0 5 votes vote down vote up
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
        """
        # Parameters

        inputs : `torch.Tensor`
            Shape `(batch_size, timesteps, sequence_length)` of word ids
            representing the current batch.

        # Returns

        `torch.Tensor`
            The bag-of-words representations for the input sequence, shape
            `(batch_size, vocab_size)`
        """
        bag_of_words_vectors = []

        mask = get_text_field_mask({"tokens": {"tokens": inputs}})
        if self._ignore_oov:
            # also mask out positions corresponding to oov
            mask &= inputs != self._oov_idx
        for document, doc_mask in zip(inputs, mask):
            document = torch.masked_select(document, doc_mask)
            vec = torch.bincount(document, minlength=self.vocab_size).float()
            vec = vec.view(1, -1)
            bag_of_words_vectors.append(vec)
        bag_of_words_output = torch.cat(bag_of_words_vectors, 0)

        if self._projection:
            projection = self._projection
            bag_of_words_output = projection(bag_of_words_output)
        return bag_of_words_output 
Example #23
Source File: torch_flow_stats.py    From space_time_pde with MIT License 5 votes vote down vote up
def energy_spectrum(vel):
    """
    Compute energy spectrum given a velocity field
    :param vel: tensor of shape (N, 3, res, res, res)
    :return spec: tensor of shape(N, res/2)
    :return k: tensor of shape (res/2,), frequencies corresponding to spec
    """
    device = vel.device
    res = vel.shape[-2:]

    assert(res[0] == res[1])
    r = res[0]
    k_end = int(r/2)
    vel_ = pad_rfft3(vel, onesided=False) # (N, 3, res, res, res, 2)
    uu_ = (torch.norm(vel_, dim=-1) / r**3)**2
    e_ = torch.sum(uu_, dim=1)  # (N, res, res, res)
    k = fftfreqs(res).to(device) # (3, res, res, res)
    rad = torch.norm(k, dim=0) # (res, res, res)
    k_bin = torch.arange(k_end, device=device).float()+1
    bins = torch.zeros(k_end+1).to(device)
    bins[1:-1] = (k_bin[1:]+k_bin[:-1])/2
    bins[-1] = k_bin[-1]
    bins = bins.unsqueeze(0)
    bins[1:] += 1e-3
    inds = searchsorted(bins, rad.flatten().unsqueeze(0)).squeeze().int()
    # bincount = torch.histc(inds.cpu(), bins=bins.shape[1]+1).to(device)
    bincount = torch.bincount(inds)
    asort = torch.argsort(inds.squeeze())
    sorted_e_ = e_.view(e_.shape[0], -1)[:, asort]
    csum_e_ = torch.cumsum(sorted_e_, dim=1)
    binloc = torch.cumsum(bincount, dim=0).long()-1
    spec_ = csum_e_[:,binloc[1:]] - csum_e_[:,binloc[:-1]]
    spec_ = spec_[:, :-1]
    spec_ = spec_ * 2 * np.pi * (k_bin.float()**2) / bincount[1:-1].float()
    return spec_, k_bin


##################### COMPUTE STATS ########################### 
Example #24
Source File: qemb.py    From fairseq with MIT License 5 votes vote down vote up
def __init__(self, centroids, assignments, num_embeddings, embedding_dim,
                     padding_idx=None, max_norm=None, norm_type=2.,
                     scale_grad_by_freq=False, sparse=False, _weight=None):
        super(PQEmbedding, self).__init__()
        self.block_size = centroids.size(1)
        self.n_centroids = centroids.size(0)
        self.num_embeddings = num_embeddings
        self.embedding_dim = embedding_dim
        if padding_idx is not None:
            if padding_idx > 0:
                assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
            elif padding_idx < 0:
                assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
                padding_idx = self.num_embeddings + padding_idx
        self.padding_idx = padding_idx
        self.max_norm = max_norm
        self.norm_type = norm_type
        self.scale_grad_by_freq = scale_grad_by_freq
        self.sparse = sparse
        # check compatibility
        if self.embedding_dim % self.block_size != 0:
            raise ValueError("Wrong PQ sizes")
        if len(assignments) % self.num_embeddings != 0:
            raise ValueError("Wrong PQ sizes")
        # define parameters
        self.centroids = nn.Parameter(centroids, requires_grad=True)
        self.register_buffer("assignments", assignments)
        self.register_buffer("counts", torch.bincount(assignments).type_as(centroids)) 
Example #25
Source File: utils.py    From torchbench with Apache License 2.0 5 votes vote down vote up
def update(self, a, b):
        n = self.num_classes
        if self.mat is None:
            self.mat = torch.zeros((n, n), dtype=torch.int64, device=a.device)
        with torch.no_grad():
            k = (a >= 0) & (a < n)
            inds = n * a[k].to(torch.int64) + b[k]
            self.mat += torch.bincount(inds, minlength=n ** 2).reshape(n, n) 
Example #26
Source File: metric.py    From EMANet with GNU General Public License v3.0 5 votes vote down vote up
def fast_hist(label_true, label_pred):
    n_class = settings.N_CLASSES
    mask = (label_true >= 0) & (label_true < n_class)
    hist = torch.bincount(
        n_class * label_true[mask].int() + label_pred[mask].int(),
        minlength=n_class ** 2,
    ).reshape(n_class, n_class)
    return hist 
Example #27
Source File: aceoptimize.py    From gandissect with MIT License 4 votes vote down vote up
def compute_present_locations(args, corpus, cache_filename,
        model, segmenter, classnum, full_sample):
    # Phase 1.  Identify a set of locations where there are doorways.
    # Segment the image and find featuremap pixels that maximize the number
    # of doorway pixels under the featuremap pixel.
    if all(k in corpus for k in ['present_indices',
            'object_present_sample', 'object_present_location',
            'object_location_popularity', 'weighted_mean_present_feature']):
        return
    progress = default_progress()
    feature_shape = model.feature_shape[args.layer][2:]
    num_locations = numpy.prod(feature_shape).item()
    num_units = model.feature_shape[args.layer][1]
    with torch.no_grad():
        weighted_feature_sum = torch.zeros(num_units).cuda()
        object_presence_scores = []
        for [zbatch] in progress(
                torch.utils.data.DataLoader(TensorDataset(full_sample),
                batch_size=args.inference_batch_size, num_workers=10,
                pin_memory=True),
                desc="Object pool"):
            zbatch = zbatch.cuda()
            tensor_image = model(zbatch)
            segmented_image = segmenter.segment_batch(tensor_image,
                    downsample=2)
            mask = (segmented_image == classnum).max(1)[0]
            score = torch.nn.functional.adaptive_avg_pool2d(
                    mask.float(), feature_shape)
            object_presence_scores.append(score.cpu())
            feat = model.retained_layer(args.layer)
            weighted_feature_sum += (feat * score[:,None,:,:]).view(
                    feat.shape[0],feat.shape[1], -1).sum(2).sum(0)
        object_presence_at_feature = torch.cat(object_presence_scores)
        object_presence_at_image, object_location_in_image = (
                object_presence_at_feature.view(args.search_size, -1).max(1))
        best_presence_scores, best_presence_images = torch.sort(
                -object_presence_at_image)
        all_present_indices = torch.sort(
                best_presence_images[:(args.train_size+args.eval_size)])[0]
        corpus.present_indices = all_present_indices[:args.train_size]
        corpus.object_present_sample = full_sample[corpus.present_indices]
        corpus.object_present_location = object_location_in_image[
                corpus.present_indices]
        corpus.object_location_popularity = torch.bincount(
            corpus.object_present_location,
            minlength=num_locations)
        corpus.weighted_mean_present_feature = (weighted_feature_sum.cpu() / (
            1e-20 + object_presence_at_feature.view(-1).sum()))
        corpus.eval_present_indices = all_present_indices[-args.eval_size:]
        corpus.eval_present_sample = full_sample[corpus.eval_present_indices]
        corpus.eval_present_location = object_location_in_image[
                corpus.eval_present_indices]

    if cache_filename:
        numpy.savez(cache_filename, **corpus) 
Example #28
Source File: evaluate.py    From DeepLab-v3-plus-cityscapes with MIT License 4 votes vote down vote up
def __call__(self, net):
        ## evaluate
        n_classes = self.cfg.n_classes
        ignore_label = self.cfg.ignore_label
        if dist.is_initialized() and dist.get_rank()!=0:
            diter = enumerate(self.dl)
        else:
            diter = enumerate(tqdm(self.dl))
        hist = torch.zeros(n_classes, n_classes).cuda()
        for i, (imgs, label) in diter:
            label = label.squeeze(1).cuda()
            N, H, W = label.shape
            probs = torch.zeros((N, n_classes, H, W)).cuda()
            probs.requires_grad = False
            for sc in self.cfg.eval_scales:
                new_hw = [int(H*sc), int(W*sc)]
                with torch.no_grad():
                    im = F.interpolate(imgs, new_hw, mode='bilinear', align_corners=True)
                    im = im.cuda()
                    out = net(im)
                    out = F.interpolate(out, (H, W), mode='bilinear', align_corners=True)
                    prob = F.softmax(out, 1)
                    probs += prob
                    if self.cfg.eval_flip:
                        out = net(torch.flip(im, dims=(3,)))
                        out = torch.flip(out, dims=(3,))
                        out = F.interpolate(out, (H, W), mode='bilinear',
                                align_corners=True)
                        prob = F.softmax(out, 1)
                        probs += prob
                    del out, prob
            torch.cuda.empty_cache()
            preds = torch.argmax(probs, dim=1)
            keep = label != ignore_label
            hist += torch.bincount(
                label[keep] * n_classes + preds[keep],
                minlength=n_classes ** 2
                ).view(n_classes, n_classes)
        if dist.is_initialized():
            dist.all_reduce(hist, dist.ReduceOp.SUM)
        ious = hist.diag() / (hist.sum(dim=0) + hist.sum(dim=1) - hist.diag())
        miou = ious.mean()
        return miou.item() 
Example #29
Source File: qconv.py    From attn2d with MIT License 4 votes vote down vote up
def __init__(
        self,
        centroids,
        assignments,
        bias,
        in_channels,
        out_channels,
        kernel_size,
        stride=1,
        padding=0,
        dilation=1,
        groups=1,
        padding_mode="zeros",
    ):
        super(PQConv2d, self).__init__()
        self.block_size = centroids.size(1)
        self.n_centroids = centroids.size(0)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = _pair(kernel_size)
        self.stride = _pair(stride)
        self.padding = _pair(padding)
        self.dilation = _pair(dilation)
        self.groups = groups
        self.padding_mode = padding_mode
        # check compatibility
        if in_channels // groups * np.prod(self.kernel_size) % self.block_size != 0:
            raise ValueError("Wrong PQ sizes")
        if len(assignments) % out_channels != 0:
            raise ValueError("Wrong PQ sizes")
        if in_channels % groups != 0:
            raise ValueError("in_channels must be divisible by groups")
        if out_channels % groups != 0:
            raise ValueError("out_channels must be divisible by groups")
        # define parameters
        self.centroids = nn.Parameter(centroids, requires_grad=True)
        self.register_buffer("assignments", assignments)
        self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
        if bias is not None:
            self.bias = nn.Parameter(bias)
        else:
            self.register_parameter("bias", None)
        # register hook for averaging gradients per centroids instead of summing
        self.centroids.register_hook(lambda x: x / self.counts[:, None]) 
Example #30
Source File: qconv.py    From fairseq with MIT License 4 votes vote down vote up
def __init__(
        self,
        centroids,
        assignments,
        bias,
        in_channels,
        out_channels,
        kernel_size,
        stride=1,
        padding=0,
        dilation=1,
        groups=1,
        padding_mode="zeros",
    ):
        super(PQConv2d, self).__init__()
        self.block_size = centroids.size(1)
        self.n_centroids = centroids.size(0)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = _pair(kernel_size)
        self.stride = _pair(stride)
        self.padding = _pair(padding)
        self.dilation = _pair(dilation)
        self.groups = groups
        self.padding_mode = padding_mode
        # check compatibility
        if in_channels // groups * np.prod(self.kernel_size) % self.block_size != 0:
            raise ValueError("Wrong PQ sizes")
        if len(assignments) % out_channels != 0:
            raise ValueError("Wrong PQ sizes")
        if in_channels % groups != 0:
            raise ValueError("in_channels must be divisible by groups")
        if out_channels % groups != 0:
            raise ValueError("out_channels must be divisible by groups")
        # define parameters
        self.centroids = nn.Parameter(centroids, requires_grad=True)
        self.register_buffer("assignments", assignments)
        self.register_buffer("counts", torch.bincount(assignments).type_as(centroids))
        if bias is not None:
            self.bias = nn.Parameter(bias)
        else:
            self.register_parameter("bias", None)
        # register hook for averaging gradients per centroids instead of summing
        self.centroids.register_hook(lambda x: x / self.counts[:, None])