Python torch.round() Examples

The following are 30 code examples of torch.round(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: guided_anchor_target.py    From GCNet with Apache License 2.0 6 votes vote down vote up
def calc_region(bbox, ratio, featmap_size=None):
    """Calculate a proportional bbox region.

    The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.

    Args:
        bbox (Tensor): Bboxes to calculate regions, shape (n, 4)
        ratio (float): Ratio of the output region.
        featmap_size (tuple): Feature map size used for clipping the boundary.

    Returns:
        tuple: x1, y1, x2, y2
    """
    x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
    y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
    x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
    y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
    if featmap_size is not None:
        x1 = x1.clamp(min=0, max=featmap_size[1] - 1)
        y1 = y1.clamp(min=0, max=featmap_size[0] - 1)
        x2 = x2.clamp(min=0, max=featmap_size[1] - 1)
        y2 = y2.clamp(min=0, max=featmap_size[0] - 1)
    return (x1, y1, x2, y2) 
Example #2
Source File: quant_dorefa.py    From pytorch_DoReFaNet with MIT License 6 votes vote down vote up
def uniform_quantize(k):
  class qfn(torch.autograd.Function):

    @staticmethod
    def forward(ctx, input):
      if k == 32:
        out = input
      elif k == 1:
        out = torch.sign(input)
      else:
        n = float(2 ** k - 1)
        out = torch.round(input * n) / n
      return out

    @staticmethod
    def backward(ctx, grad_output):
      grad_input = grad_output.clone()
      return grad_input

  return qfn().apply 
Example #3
Source File: copy_env.py    From pytorch-dnc with MIT License 6 votes vote down vote up
def visual(self, input_ts, target_ts, mask_ts, output_ts=None):
        """
        input_ts:  [(num_wordsx2+2) x batch_size x (len_word+2)]
        target_ts: [(num_wordsx2+2) x batch_size x (len_word)]
        mask_ts:   [(num_wordsx2+2) x batch_size x (len_word)]
        output_ts: [(num_wordsx2+2) x batch_size x (len_word)]
        """
        output_ts = torch.round(output_ts * mask_ts) if output_ts is not None else None
        input_strings  = [self._readable(input_ts[:, 0, i])  for i in range(input_ts.size(2))]
        target_strings = [self._readable(target_ts[:, 0, i]) for i in range(target_ts.size(2))]
        mask_strings   = [self._readable(mask_ts[:, 0, 0])]
        output_strings = [self._readable(output_ts[:, 0, i]) for i in range(output_ts.size(2))] if output_ts is not None else None
        input_strings  = 'Input:\n'  + '\n'.join(input_strings)
        target_strings = 'Target:\n' + '\n'.join(target_strings)
        mask_strings   = 'Mask:\n'   + '\n'.join(mask_strings)
        output_strings = 'Output:\n' + '\n'.join(output_strings) if output_ts is not None else None
        # strings = [input_strings, target_strings, mask_strings, output_strings]
        # self.logger.warning(input_strings)
        # self.logger.warning(target_strings)
        # self.logger.warning(mask_strings)
        # self.logger.warning(output_strings)
        print(input_strings)
        print(target_strings)
        print(mask_strings)
        print(output_strings) if output_ts is not None else None 
Example #4
Source File: guided_anchor_target.py    From AerialDetection with Apache License 2.0 6 votes vote down vote up
def calc_region(bbox, ratio, featmap_size=None):
    """Calculate a proportional bbox region.

    The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.

    Args:
        bbox (Tensor): Bboxes to calculate regions, shape (n, 4)
        ratio (float): Ratio of the output region.
        featmap_size (tuple): Feature map size used for clipping the boundary.

    Returns:
        tuple: x1, y1, x2, y2
    """
    x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
    y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
    x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
    y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
    if featmap_size is not None:
        x1 = x1.clamp(min=0, max=featmap_size[1] - 1)
        y1 = y1.clamp(min=0, max=featmap_size[0] - 1)
        x2 = x2.clamp(min=0, max=featmap_size[1] - 1)
        y2 = y2.clamp(min=0, max=featmap_size[0] - 1)
    return (x1, y1, x2, y2) 
Example #5
Source File: parsing.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def crop(self, box):
        assert isinstance(box, (list, tuple, torch.Tensor)), str(type(box))
        # box is assumed to be xyxy
        current_width, current_height = self.size
        xmin, ymin, xmax, ymax = [round(float(b)) for b in box]

        assert xmin <= xmax and ymin <= ymax, str(box)
        xmin = min(max(xmin, 0), current_width - 1)
        ymin = min(max(ymin, 0), current_height - 1)

        xmax = min(max(xmax, 0), current_width)
        ymax = min(max(ymax, 0), current_height)

        xmax = max(xmax, xmin + 1)
        ymax = max(ymax, ymin + 1)

        width, height = xmax - xmin, ymax - ymin
        cropped_parsing = self.parsing[:, ymin:ymax, xmin:xmax]
        cropped_size = width, height
        return Parsing(cropped_parsing, cropped_size) 
Example #6
Source File: parsing.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def parsing_on_boxes(parsing, rois, heatmap_size):
    device = rois.device
    rois = rois.to(torch.device("cpu"))
    parsing_list = []
    for i in range(rois.shape[0]):
        parsing_ins = parsing[i].cpu().numpy()
        xmin, ymin, xmax, ymax = torch.round(rois[i]).int()
        cropped_parsing = parsing_ins[ymin:ymax, xmin:xmax]
        resized_parsing = cv2.resize(
            cropped_parsing,
            (heatmap_size[1], heatmap_size[0]),
            interpolation=cv2.INTER_NEAREST
        )
        parsing_list.append(torch.from_numpy(resized_parsing))

    if len(parsing_list) == 0:
        return torch.empty(0, dtype=torch.int64, device=device)
    return torch.stack(parsing_list, dim=0).to(device, dtype=torch.int64) 
Example #7
Source File: distributions.py    From integer_discrete_flows with MIT License 6 votes vote down vote up
def sample_mixture_discretized_logistic(mean, logs, pi, inverse_bin_width):
    # Sample mixtures
    b, c, h, w, n_mixtures = tuple(map(int, pi.size()))
    pi = pi.view(b * c * h * w, n_mixtures)
    sampled_pi = torch.multinomial(pi, num_samples=1).view(-1)

    # Select mixture params
    mean = mean.view(b * c * h * w, n_mixtures)
    mean = mean[torch.arange(b*c*h*w), sampled_pi].view(b, c, h, w)
    logs = logs.view(b * c * h * w, n_mixtures)
    logs = logs[torch.arange(b*c*h*w), sampled_pi].view(b, c, h, w)

    y = torch.rand_like(mean)
    x = torch.exp(logs) * torch.log(y / (1 - y)) + mean

    x = torch.round(x * inverse_bin_width) / inverse_bin_width

    return x 
Example #8
Source File: loss.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def parsing_on_boxes(parsing, rois, heatmap_size):
    device = rois.device
    rois = rois.to(torch.device("cpu"))
    parsing_list = []
    for i in range(rois.shape[0]):
        parsing_ins = parsing[i].cpu().numpy()
        xmin, ymin, xmax, ymax = torch.round(rois[i]).int()
        cropped_parsing = parsing_ins[max(0, ymin):ymax, max(0, xmin):xmax]
        resized_parsing = cv2.resize(
            cropped_parsing, (heatmap_size[1], heatmap_size[0]), interpolation=cv2.INTER_NEAREST
        )
        parsing_list.append(torch.from_numpy(resized_parsing))

    if len(parsing_list) == 0:
        return torch.empty(0, dtype=torch.int64, device=device)
    return torch.stack(parsing_list, dim=0).to(device, dtype=torch.int64) 
Example #9
Source File: coder.py    From integer_discrete_flows with MIT License 6 votes vote down vote up
def encode_sample(
        z, pz, variable_type, distribution_type, bin_width=1./256, state=None):
    if state is None:
        state = rans.x_init
    else:
        state = rans.unflatten(state)

    CDFs, MEAN = CDF_fn(pz, bin_width, variable_type, distribution_type)

    # z is transformed to Z to match the indices for the CDFs array
    Z = torch.round(z / bin_width).long() + n_bins // 2 - MEAN
    Z = Z.cpu().numpy()

    if not ((np.sum(Z < 0) == 0 and np.sum(Z >= n_bins-1) == 0)):
        print('Z out of allowed range of values, canceling compression')
        return None

    Z, CDFs = Z.reshape(-1), CDFs.reshape(-1, n_bins).copy()
    for symbol, cdf in zip(Z[::-1], CDFs[::-1]):
        statfun = statfun_encode(cdf)
        state = rans.append_symbol(statfun, precision)(state, symbol)

    state = rans.flatten(state)

    return state 
Example #10
Source File: coder.py    From integer_discrete_flows with MIT License 6 votes vote down vote up
def CDF_fn(pz, bin_width, variable_type, distribution_type):
    mean = pz[0] if len(pz) == 2 else pz[0][..., (pz[0].size(-1) - 1) // 2]
    MEAN = torch.round(mean / bin_width).long()

    bin_locations = torch.arange(-n_bins // 2, n_bins // 2)[None, None, None, None, :] + MEAN.cpu()[..., None]
    bin_locations = bin_locations.float() * bin_width
    bin_locations = bin_locations.to(device=pz[0].device)

    pz = [param[:, :, :, :, None] for param in pz]
    cdf = cdf_fn(
        bin_locations - bin_width,
        pz,
        variable_type,
        distribution_type,
        1./bin_width).cpu().numpy()

    # Compute CDFs, reweigh to give all bins at least
    # 1 / (2^precision) probability.
    # CDF is equal to floor[cdf * (2^precision - n_bins)] + range(n_bins)
    CDFs = (cdf * ((1 << precision) - n_bins)).astype('int') \
        + np.arange(n_bins)

    return CDFs, MEAN 
Example #11
Source File: utils.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def calc_region(bbox, ratio, featmap_size=None):
    """Calculate a proportional bbox region.

    The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.

    Args:
        bbox (Tensor): Bboxes to calculate regions, shape (n, 4).
        ratio (float): Ratio of the output region.
        featmap_size (tuple): Feature map size used for clipping the boundary.

    Returns:
        tuple: x1, y1, x2, y2
    """
    x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
    y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
    x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
    y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
    if featmap_size is not None:
        x1 = x1.clamp(min=0, max=featmap_size[1])
        y1 = y1.clamp(min=0, max=featmap_size[0])
        x2 = x2.clamp(min=0, max=featmap_size[1])
        y2 = y2.clamp(min=0, max=featmap_size[0])
    return (x1, y1, x2, y2) 
Example #12
Source File: guided_anchor_target.py    From kaggle-kuzushiji-recognition with MIT License 6 votes vote down vote up
def calc_region(bbox, ratio, featmap_size=None):
    """Calculate a proportional bbox region.

    The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.

    Args:
        bbox (Tensor): Bboxes to calculate regions, shape (n, 4)
        ratio (float): Ratio of the output region.
        featmap_size (tuple): Feature map size used for clipping the boundary.

    Returns:
        tuple: x1, y1, x2, y2
    """
    x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
    y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
    x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
    y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
    if featmap_size is not None:
        x1 = x1.clamp(min=0, max=featmap_size[1] - 1)
        y1 = y1.clamp(min=0, max=featmap_size[0] - 1)
        x2 = x2.clamp(min=0, max=featmap_size[1] - 1)
        y2 = y2.clamp(min=0, max=featmap_size[0] - 1)
    return (x1, y1, x2, y2) 
Example #13
Source File: process.py    From DGI with MIT License 6 votes vote down vote up
def micro_f1(logits, labels):
    # Compute predictions
    preds = torch.round(nn.Sigmoid()(logits))
    
    # Cast to avoid trouble
    preds = preds.long()
    labels = labels.long()

    # Count true positives, true negatives, false positives, false negatives
    tp = torch.nonzero(preds * labels).shape[0] * 1.0
    tn = torch.nonzero((preds - 1) * (labels - 1)).shape[0] * 1.0
    fp = torch.nonzero(preds * (labels - 1)).shape[0] * 1.0
    fn = torch.nonzero((preds - 1) * labels).shape[0] * 1.0

    # Compute micro-f1 score
    prec = tp / (tp + fp)
    rec = tp / (tp + fn)
    f1 = (2 * prec * rec) / (prec + rec)
    return f1 
Example #14
Source File: tracking_utils.py    From pytorch-detect-to-track with MIT License 6 votes vote down vote up
def _score_of_edge(self, v1, v2):
        N1 = v1['boxes'].size(0)
        N2 = v2['boxes'].size(0)
        score = torch.cuda.FloatTensor(N1,N2).fill_(np.nan)
        track_score = torch.cuda.FloatTensor(N1,N2).fill_(np.nan)

        for i1 in range(N1):
            # scores of i1 box in frame i with all boxes in frame i+1
            scores2 = v2['scores'].contiguous().view(-1,1)
            scores1 = v1['scores'][i1]
            score[i1, :] = scores1 + scores2.t()

        if v1['trackedboxes'] is not None and v2['trackedboxes'] is not None:
            # overlaps between the boxes with tracked_boxes
            # overlaps (N1, N2)
            overlap_ratio_1 = bbox_overlaps(v1['boxes'].contiguous(), v1['trackedboxes'][0])
            overlap_ratio_2 = bbox_overlaps(v2['boxes'].contiguous(), v1['trackedboxes'][1])
            track_score = torch.mm(torch.round(overlap_ratio_1), torch.round(overlap_ratio_2).t())
            score[track_score>0.]+=1.0
            track_score = (track_score>0.).float()
        else:
            track_score = torch.cuda.FloatTensor(N1,N2).zero_()
        return score, track_score 
Example #15
Source File: metrics.py    From istn with Apache License 2.0 6 votes vote down vote up
def multi_class_score(one_class_fn, predictions, labels, one_hot=False, unindexed_classes=0):
    result = {}
    shape = labels.shape
    for label_index in range(shape[1] + unindexed_classes):
        if one_hot:
            class_predictions = torch.round(predictions[:, label_index, :, :, :])
        else:
            class_predictions = predictions.eq(label_index)
            class_predictions = class_predictions.squeeze(1)  # remove channel dim
        class_labels = labels.eq(label_index).float()
        class_labels = class_labels.squeeze(1)  # remove channel dim
        class_predictions = class_predictions.float()

        result[str(label_index)] = one_class_fn(class_predictions, class_labels).mean()

    return result


# Inefficient to do this twice, TODO: update multi_class_score to handle this 
Example #16
Source File: utils.py    From Pytorch_Medical_Segmention_Template with MIT License 6 votes vote down vote up
def eval_single_seg(predict, target, forground = 1):
    pred_seg=torch.round(torch.sigmoid(predict)).int()
    pred_seg = pred_seg.data.cpu().numpy()
    label_seg = target.data.cpu().numpy().astype(dtype=np.int)
    assert(pred_seg.shape == label_seg.shape)

    Dice = []
    Precsion = []
    Jaccard = []
    Sensitivity=[]
    Specificity=[]

    n = pred_seg.shape[0]
    
    for i in range(n):
        dice,precsion,jaccard,sensitivity,specificity= compute_score_single(pred_seg[i],label_seg[i])
        Dice.append(dice)
        Precsion .append(precsion)
        Jaccard.append(jaccard)
        Sensitivity.append(sensitivity)
        Specificity.append(specificity)

    return Dice,Precsion,Jaccard,Sensitivity,Specificity 
Example #17
Source File: length_regulator.py    From espnet with Apache License 2.0 6 votes vote down vote up
def forward(self, xs, ds, ilens, alpha=1.0):
        """Calculate forward propagation.

        Args:
            xs (Tensor): Batch of sequences of char or phoneme embeddings (B, Tmax, D).
            ds (LongTensor): Batch of durations of each frame (B, T).
            ilens (LongTensor): Batch of input lengths (B,).
            alpha (float, optional): Alpha value to control speed of speech.

        Returns:
            Tensor: replicated input tensor based on durations (B, T*, D).

        """
        assert alpha > 0
        if alpha != 1.0:
            ds = torch.round(ds.float() * alpha).long()
        xs = [x[:ilen] for x, ilen in zip(xs, ilens)]
        ds = [d[:ilen] for d, ilen in zip(ds, ilens)]
        xs = [self._repeat_one_sequence(x, d) for x, d in zip(xs, ds)]

        return pad_list(xs, self.pad_value) 
Example #18
Source File: duration_predictor.py    From espnet with Apache License 2.0 6 votes vote down vote up
def _forward(self, xs, x_masks=None, is_inference=False):
        xs = xs.transpose(1, -1)  # (B, idim, Tmax)
        for f in self.conv:
            xs = f(xs)  # (B, C, Tmax)

        # NOTE: calculate in log domain
        xs = self.linear(xs.transpose(1, -1)).squeeze(-1)  # (B, Tmax)

        if is_inference:
            # NOTE: calculate in linear domain
            xs = torch.clamp(
                torch.round(xs.exp() - self.offset), min=0
            ).long()  # avoid negative value

        if x_masks is not None:
            xs = xs.masked_fill(x_masks, 0.0)

        return xs 
Example #19
Source File: guided_anchor_target.py    From mmdetection-annotated with Apache License 2.0 6 votes vote down vote up
def calc_region(bbox, ratio, featmap_size=None):
    """Calculate a proportional bbox region.

    The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.

    Args:
        bbox (Tensor): Bboxes to calculate regions, shape (n, 4)
        ratio (float): Ratio of the output region.
        featmap_size (tuple): Feature map size used for clipping the boundary.

    Returns:
        tuple: x1, y1, x2, y2
    """
    x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
    y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
    x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
    y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
    if featmap_size is not None:
        x1 = x1.clamp(min=0, max=featmap_size[1] - 1)
        y1 = y1.clamp(min=0, max=featmap_size[0] - 1)
        x2 = x2.clamp(min=0, max=featmap_size[1] - 1)
        y2 = y2.clamp(min=0, max=featmap_size[0] - 1)
    return (x1, y1, x2, y2) 
Example #20
Source File: guided_anchor_target.py    From RDSNet with Apache License 2.0 6 votes vote down vote up
def calc_region(bbox, ratio, featmap_size=None):
    """Calculate a proportional bbox region.

    The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.

    Args:
        bbox (Tensor): Bboxes to calculate regions, shape (n, 4)
        ratio (float): Ratio of the output region.
        featmap_size (tuple): Feature map size used for clipping the boundary.

    Returns:
        tuple: x1, y1, x2, y2
    """
    x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
    y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
    x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
    y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
    if featmap_size is not None:
        x1 = x1.clamp(min=0, max=featmap_size[1] - 1)
        y1 = y1.clamp(min=0, max=featmap_size[0] - 1)
        x2 = x2.clamp(min=0, max=featmap_size[1] - 1)
        y2 = y2.clamp(min=0, max=featmap_size[0] - 1)
    return (x1, y1, x2, y2) 
Example #21
Source File: guided_anchor_target.py    From PolarMask with Apache License 2.0 6 votes vote down vote up
def calc_region(bbox, ratio, featmap_size=None):
    """Calculate a proportional bbox region.

    The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.

    Args:
        bbox (Tensor): Bboxes to calculate regions, shape (n, 4)
        ratio (float): Ratio of the output region.
        featmap_size (tuple): Feature map size used for clipping the boundary.

    Returns:
        tuple: x1, y1, x2, y2
    """
    x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
    y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
    x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
    y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
    if featmap_size is not None:
        x1 = x1.clamp(min=0, max=featmap_size[1] - 1)
        y1 = y1.clamp(min=0, max=featmap_size[0] - 1)
        x2 = x2.clamp(min=0, max=featmap_size[1] - 1)
        y2 = y2.clamp(min=0, max=featmap_size[0] - 1)
    return (x1, y1, x2, y2) 
Example #22
Source File: test_pytorch_RNN.py    From nn_builder with MIT License 6 votes vote down vote up
def test_output_head_activations_work():
    """Tests that output head activations work properly"""

    output_dim = [["linear", 5], ["linear", 10], ["linear", 3]]
    nn_instance = RNN(input_dim=5, layers_info=[["gru", 20], ["lstm", 8], output_dim],
                          hidden_activations="relu", output_activation=["softmax", None, "relu"])

    x = torch.randn((20, 12, 5)) * -20.0
    out = nn_instance(x)
    assert out.shape == (20, 18)
    sums = torch.sum(out[:, :5], dim=1).detach().numpy()
    sums_others = torch.sum(out[:, 5:], dim=1).detach().numpy()
    sums_others_2 = torch.sum(out[:, 5:15], dim=1).detach().numpy()
    sums_others_3 = torch.sum(out[:, 15:18], dim=1).detach().numpy()


    for row in range(out.shape[0]):
        assert np.round(sums[row], 4) == 1.0, sums[row]
        assert not np.round(sums_others[row], 4) == 1.0, sums_others[row]
        assert not np.round(sums_others_2[row], 4) == 1.0, sums_others_2[row]
        assert not np.round(sums_others_3[row], 4) == 1.0, sums_others_3[row]
        for col in range(3):
            assert out[row, 15 + col] >= 0.0, out[row, 15 + col] 
Example #23
Source File: mappers.py    From habitat-api with MIT License 6 votes vote down vote up
def pcl_to_obstacles(pts3d, map_size=40, cell_size=0.2, min_pts=10):
    r"""Counts number of 3d points in 2d map cell.
    Height is sum-pooled.
    """
    device = pts3d.device
    map_size_in_cells = get_map_size_in_cells(map_size, cell_size) - 1
    init_map = torch.zeros(
        (map_size_in_cells, map_size_in_cells), device=device
    )
    if len(pts3d) <= 1:
        return init_map
    num_pts, dim = pts3d.size()
    pts2d = torch.cat([pts3d[:, 2:3], pts3d[:, 0:1]], dim=1)
    data_idxs = torch.round(
        project2d_pcl_into_worldmap(pts2d, map_size, cell_size)
    )
    if len(data_idxs) > min_pts:
        u, counts = np.unique(
            data_idxs.detach().cpu().numpy(), axis=0, return_counts=True
        )
        init_map[u[:, 0], u[:, 1]] = torch.from_numpy(counts).to(
            dtype=torch.float32, device=device
        )
    return init_map 
Example #24
Source File: guided_anchor_target.py    From IoU-Uniform-R-CNN with Apache License 2.0 6 votes vote down vote up
def calc_region(bbox, ratio, featmap_size=None):
    """Calculate a proportional bbox region.

    The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.

    Args:
        bbox (Tensor): Bboxes to calculate regions, shape (n, 4)
        ratio (float): Ratio of the output region.
        featmap_size (tuple): Feature map size used for clipping the boundary.

    Returns:
        tuple: x1, y1, x2, y2
    """
    x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
    y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
    x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
    y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
    if featmap_size is not None:
        x1 = x1.clamp(min=0, max=featmap_size[1] - 1)
        y1 = y1.clamp(min=0, max=featmap_size[0] - 1)
        x2 = x2.clamp(min=0, max=featmap_size[1] - 1)
        y2 = y2.clamp(min=0, max=featmap_size[0] - 1)
    return (x1, y1, x2, y2) 
Example #25
Source File: guided_anchor_target.py    From mmdetection_with_SENet154 with Apache License 2.0 6 votes vote down vote up
def calc_region(bbox, ratio, featmap_size=None):
    """Calculate a proportional bbox region.

    The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.

    Args:
        bbox (Tensor): Bboxes to calculate regions, shape (n, 4)
        ratio (float): Ratio of the output region.
        featmap_size (tuple): Feature map size used for clipping the boundary.

    Returns:
        tuple: x1, y1, x2, y2
    """
    x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
    y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
    x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
    y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
    if featmap_size is not None:
        x1 = x1.clamp(min=0, max=featmap_size[1] - 1)
        y1 = y1.clamp(min=0, max=featmap_size[0] - 1)
        x2 = x2.clamp(min=0, max=featmap_size[1] - 1)
        y2 = y2.clamp(min=0, max=featmap_size[0] - 1)
    return (x1, y1, x2, y2) 
Example #26
Source File: quantizers.py    From nni with MIT License 6 votes vote down vote up
def _quantize(self, bits, op, real_val):
        """
        quantize real value.

        Parameters
        ----------
        bits : int
            quantization bits length
        op : torch.nn.module
            target module
        real_val : float
            real value to be quantized

        Returns
        -------
        float
        """
        transformed_val = op.zero_point + real_val / op.scale
        qmin = 0
        qmax = (1 << bits) - 1
        clamped_val = torch.clamp(transformed_val, qmin, qmax)
        quantized_val = torch.round(clamped_val)
        return quantized_val 
Example #27
Source File: train.py    From kaggle_carvana_segmentation with MIT License 5 votes vote down vote up
def dice_clamp(preds, trues, is_average=True):
    preds = torch.round(preds)
    return dice_loss(preds, trues, is_average=is_average) 
Example #28
Source File: model.py    From torch-light with MIT License 5 votes vote down vote up
def gather_index(encode, k1, k2, n=6):
    x = torch.arange(start=0, end=n/(n-1.), step=1./(n-1), dtype=torch.float)
    if k1.is_cuda:
        x = x.cuda()

    k1 = x*(k1.float())
    k2 = (1-x)*(k2.float())
    index = torch.round(k1+k2).long()
    return torch.stack([torch.index_select(encode[idx], 0, index[idx]) for idx in range(encode.size(0))], dim=0) 
Example #29
Source File: md_embedding_bag.py    From dlrm with MIT License 5 votes vote down vote up
def pow_2_round(dims):
    return 2 ** torch.round(torch.log2(dims.type(torch.float))) 
Example #30
Source File: q_utils.py    From dnn-quant-ocs with Apache License 2.0 5 votes vote down vote up
def linear_quantize(input, scale_factor, inplace=False):
    if inplace:
        input.mul_(scale_factor).round_()
        return input
    return torch.round(scale_factor * input)