Python torch.floor() Examples

The following are 30 code examples of torch.floor(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: HandCraftedModules.py    From affnet with MIT License 6 votes vote down vote up
def forward(self, x, return_rot_matrix = False):
        gx = self.gx(F.pad(x, (1,1,0, 0), 'replicate'))
        gy = self.gy(F.pad(x, (0,0, 1,1), 'replicate'))
        mag = torch.sqrt(gx * gx + gy * gy + 1e-10)
        if x.is_cuda:
            self.gk = self.gk.cuda()
        mag = mag * self.gk.unsqueeze(0).unsqueeze(0).expand_as(mag)
        ori = torch.atan2(gy,gx)
        o_big = float(self.num_ang_bins) *(ori + 1.0 * math.pi )/ (2.0 * math.pi)
        bo0_big =  torch.floor(o_big)
        wo1_big = o_big - bo0_big
        bo0_big =  bo0_big %  self.num_ang_bins
        bo1_big = (bo0_big + 1) % self.num_ang_bins
        wo0_big = (1.0 - wo1_big) * mag
        wo1_big = wo1_big * mag
        ang_bins = []
        for i in range(0, self.num_ang_bins):
            ang_bins.append(F.adaptive_avg_pool2d((bo0_big == i).float() * wo0_big, (1,1)))
        ang_bins = torch.cat(ang_bins,1).view(-1,1,self.num_ang_bins)
        ang_bins = self.angular_smooth(ang_bins)
        values, indices = ang_bins.view(-1,self.num_ang_bins).max(1)
        angle =  -((2. * float(np.pi) * indices.float() / float(self.num_ang_bins)) - float(math.pi))
        if return_rot_matrix:
            return self.get_rotation_matrix(angle)
        return angle 
Example #2
Source File: __init__.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 6 votes vote down vote up
def fit_positive(rows, cols, yx_min, yx_max, anchors):
    device_id = anchors.get_device() if torch.cuda.is_available() else None
    batch_size, num, _ = yx_min.size()
    num_anchors, _ = anchors.size()
    valid = torch.prod(yx_min < yx_max, -1)
    center = (yx_min + yx_max) / 2
    ij = torch.floor(center)
    i, j = torch.unbind(ij.long(), -1)
    index = i * cols + j
    anchors2 = anchors / 2
    iou_matrix = utils.iou.torch.iou_matrix((yx_min - center).view(-1, 2), (yx_max - center).view(-1, 2), -anchors2, anchors2).view(batch_size, -1, num_anchors)
    iou, index_anchor = iou_matrix.max(-1)
    _positive = []
    cells = rows * cols
    for valid, index, index_anchor in zip(torch.unbind(valid), torch.unbind(index), torch.unbind(index_anchor)):
        index, index_anchor = (t[valid] for t in (index, index_anchor))
        t = utils.ensure_device(torch.ByteTensor(cells, num_anchors).zero_(), device_id)
        t[index, index_anchor] = 1
        _positive.append(t)
    return torch.stack(_positive) 
Example #3
Source File: rbox_single_level.py    From AerialDetection with Apache License 2.0 6 votes vote down vote up
def map_roi_levels(self, rois, num_levels):
        """Map rrois to corresponding feature levels by scales.

        - scale < finest_scale: level 0
        - finest_scale <= scale < finest_scale * 2: level 1
        - finest_scale * 2 <= scale < finest_scale * 4: level 2
        - scale >= finest_scale * 4: level 3

        Args:
            rois (Tensor): Input RRoIs, shape (k, 6). (index, x, y, w, h, angle)
            num_levels (int): Total level number.

        Returns:
            Tensor: Level index (0-based) of each RoI, shape (k, )
        """
        scale = torch.sqrt(rois[:, 3] * rois[:, 4])
        target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
        target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
        return target_lvls 
Example #4
Source File: single_level.py    From AerialDetection with Apache License 2.0 6 votes vote down vote up
def map_roi_levels(self, rois, num_levels):
        """Map rois to corresponding feature levels by scales.

        - scale < finest_scale: level 0
        - finest_scale <= scale < finest_scale * 2: level 1
        - finest_scale * 2 <= scale < finest_scale * 4: level 2
        - scale >= finest_scale * 4: level 3

        Args:
            rois (Tensor): Input RoIs, shape (k, 5).
            num_levels (int): Total level number.

        Returns:
            Tensor: Level index (0-based) of each RoI, shape (k, )
        """
        scale = torch.sqrt(
            (rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
        target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
        target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
        return target_lvls 
Example #5
Source File: test_binaries.py    From crosentgec with GNU General Public License v3.0 6 votes vote down vote up
def create_dummy_data(data_dir, num_examples=1000, maxlen=20):

    def _create_dummy_data(filename):
        data = torch.rand(num_examples * maxlen)
        data = 97 + torch.floor(26 * data).int()
        with open(os.path.join(data_dir, filename), 'w') as h:
            offset = 0
            for _ in range(num_examples):
                ex_len = random.randint(1, maxlen)
                ex_str = ' '.join(map(chr, data[offset:offset+ex_len]))
                print(ex_str, file=h)
                offset += ex_len

    _create_dummy_data('train.in')
    _create_dummy_data('train.out')
    _create_dummy_data('valid.in')
    _create_dummy_data('valid.out')
    _create_dummy_data('test.in')
    _create_dummy_data('test.out') 
Example #6
Source File: f2m.py    From pytorch-pcen with MIT License 6 votes vote down vote up
def _init_buffers(self):
        m_min = 0. if self.f_min == 0 else 2595 * np.log10(1. + (self.f_min / 700))
        m_max = 2595 * np.log10(1. + (self.f_max / 700))

        m_pts = torch.linspace(m_min, m_max, self.n_mels + 2)
        f_pts = (700 * (10**(m_pts / 2595) - 1))

        bins = torch.floor(((self.n_fft - 1) * 2) * f_pts / self.sr).long()

        fb = torch.zeros(self.n_fft, self.n_mels)
        for m in range(1, self.n_mels + 1):
            f_m_minus = bins[m - 1].item()
            f_m = bins[m].item()
            f_m_plus = bins[m + 1].item()

            if f_m_minus != f_m:
                fb[f_m_minus:f_m, m - 1] = (torch.arange(f_m_minus, f_m) - f_m_minus) / (f_m - f_m_minus)
            if f_m != f_m_plus:
                fb[f_m:f_m_plus, m - 1] = (f_m_plus - torch.arange(f_m, f_m_plus)) / (f_m_plus - f_m)
        self.register_buffer("fb", fb) 
Example #7
Source File: rputil.py    From RelativePose with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def interpolate(feat, pt):
    # feat: c,h,w
    # pt: K,2
    # return: c,k
    h,w = feat.shape[1], feat.shape[2]
    x = pt[:,0] * (w-1)
    y = pt[:,1] * (h-1)
    x0 = torch.floor(x)
    y0 = torch.floor(y)

    val=feat[:,y0.long(),x0.long()]*(x0+1-x)*(y0+1-y)+\
            feat[:,y0.long()+1,x0.long()]*(x0+1-x)*(y-y0)+\
            feat[:,y0.long(),x0.long()+1]*(x-x0)*(y0+1-y)+\
            feat[:,y0.long()+1,x0.long()+1]*(x-x0)*(y-y0)
    
    return val 
Example #8
Source File: kitti_evaluation.py    From Attentional-PointNet with GNU General Public License v3.0 6 votes vote down vote up
def lidar_to_img(points, img_size):
    # pdb.set_trace()
    lidar_data = np.array(points[:, :2])
    lidar_data *= 9.9999
    lidar_data -= (0.5 * img_size, 0.5 * img_size)
    lidar_data = np.fabs(lidar_data)
    lidar_data = lidar_data.astype(np.int32)
    lidar_data = np.reshape(lidar_data, (-1, 2))
    lidar_img = np.zeros((img_size, img_size))
    lidar_img[tuple(lidar_data.T)] = 255
    return torch.tensor(lidar_img).cuda()


# def lidar_to_img(points, img_size):
#     # pdb.set_trace()
#     lidar_data = points[:, :2]
#     lidar_data *= 9.9999
#     lidar_data -= torch.tensor((0.5 * img_size, 0.5 * img_size)).cuda()
#     lidar_data = torch.abs(lidar_data)
#     lidar_data = torch.floor(lidar_data).long()
#     lidar_data = lidar_data.view(-1, 2)
#     lidar_img = torch.zeros((img_size, img_size)).cuda()
#     lidar_img[lidar_data.permute(1,0)] = 255
#     return lidar_img 
Example #9
Source File: single_level.py    From GCNet with Apache License 2.0 6 votes vote down vote up
def map_roi_levels(self, rois, num_levels):
        """Map rois to corresponding feature levels by scales.

        - scale < finest_scale: level 0
        - finest_scale <= scale < finest_scale * 2: level 1
        - finest_scale * 2 <= scale < finest_scale * 4: level 2
        - scale >= finest_scale * 4: level 3

        Args:
            rois (Tensor): Input RoIs, shape (k, 5).
            num_levels (int): Total level number.

        Returns:
            Tensor: Level index (0-based) of each RoI, shape (k, )
        """
        scale = torch.sqrt(
            (rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
        target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
        target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
        return target_lvls 
Example #10
Source File: losses.py    From conditional-motion-propagation with MIT License 6 votes vote down vote up
def tobin(self, target):
        indxneg = target.data[:,0,:,:] < 0
        eps = torch.zeros(target.data[:,0,:,:].size()).cuda()
        epsind = target.data[:,0,:,:] == 0
        eps[epsind] += 1e-5
        angle = torch.atan(target.data[:,1,:,:] / (target.data[:,0,:,:] + eps))
        angle[indxneg] += np.pi
        angle += np.pi / 2 # 0 to 2pi
        angle = torch.clamp(angle, 0, 2 * np.pi - 1e-3)
        radius = torch.sqrt(target.data[:,0,:,:] ** 2 + target.data[:,1,:,:] ** 2)
        radius = torch.clamp(radius, 0, self.fmax - 1e-3)
        quantized_angle = torch.floor(self.abins * angle / (2 * np.pi))
        if self.quantize_strategy == 'linear':
            quantized_radius = torch.floor(self.rbins * radius / self.fmax)
        elif self.quantize_strategy == 'quadratic':
            quantized_radius = torch.floor(self.rbins * torch.sqrt(radius / self.fmax))
        else:
            raise Exception("No such quantize strategy: {}".format(self.quantize_strategy))
        quantized_target = torch.autograd.Variable(torch.cat([torch.unsqueeze(quantized_angle, 1), torch.unsqueeze(quantized_radius, 1)], dim=1))
        return quantized_target.type(torch.cuda.LongTensor) 
Example #11
Source File: efficientnet.py    From ClassyVision with MIT License 6 votes vote down vote up
def drop_connect(inputs, is_training, drop_connect_rate):
    """
    Apply drop connect to random inputs in a batch.
    """
    if not is_training:
        return inputs

    keep_prob = 1 - drop_connect_rate

    # compute drop connect tensor
    batch_size = inputs.shape[0]
    random_tensor = keep_prob
    random_tensor += torch.rand(
        [batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device
    )
    binary_tensor = torch.floor(random_tensor)
    outputs = (inputs / keep_prob) * binary_tensor
    return outputs 
Example #12
Source File: pytorch_util.py    From leap with MIT License 6 votes vote down vote up
def alpha_dropout(
        x,
        p=0.05,
        alpha=-1.7580993408473766,
        fixedPointMean=0,
        fixedPointVar=1,
        training=False,
):
    keep_prob = 1 - p
    if keep_prob == 1 or not training:
        return x
    a = np.sqrt(fixedPointVar / (keep_prob * (
        (1 - keep_prob) * pow(alpha - fixedPointMean, 2) + fixedPointVar)))
    b = fixedPointMean - a * (
        keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
    keep_prob = 1 - p

    random_tensor = keep_prob + torch.rand(x.size())
    binary_tensor = Variable(torch.floor(random_tensor))
    x = x.mul(binary_tensor)
    ret = x + alpha * (1 - binary_tensor)
    ret.mul_(a).add_(b)
    return ret 
Example #13
Source File: utils.py    From fast-autoaugment with MIT License 6 votes vote down vote up
def drop_connect(inputs, drop_p, training):
    """ Drop connect. """
    if not training:
        return inputs * (1. - drop_p)
    batch_size = inputs.shape[0]
    random_tensor = torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
    binary_tensor = random_tensor > drop_p
    output = inputs * binary_tensor.float()
    # output = inputs / (1. - drop_p) * binary_tensor.float()
    return output

    # if not training: return inputs
    # batch_size = inputs.shape[0]
    # keep_prob = 1 - drop_p
    # random_tensor = keep_prob
    # random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
    # binary_tensor = torch.floor(random_tensor)
    # output = inputs / keep_prob * binary_tensor
    # return output 
Example #14
Source File: mackrel.py    From mackrl with Apache License 2.0 6 votes vote down vote up
def _joint_actions_2_action_pair(joint_action, n_actions,use_delegate_action=True):
    if isinstance(joint_action, int):
        if use_delegate_action:
            if joint_action != 0.0:
                _action1 = (joint_action - 1.0) // n_actions
                _action2 = (joint_action - 1.0) % n_actions
            else:
                _action1 = float("nan")
                _action2 = float("nan")
        else:
            _action1 = th.floor(joint_action / n_actions)
            _action2 = (joint_action) % n_actions
        return _action1, _action2
    else:
        if use_delegate_action:
            mask = (joint_action == 0.0)
            joint_action[mask] = 1.0
            _action1 = th.floor((joint_action-1.0) / n_actions)
            _action2 = (joint_action-1.0) % n_actions
            _action1[mask] = float("nan")
            _action2[mask] = float("nan")
        else:
            _action1 = th.floor(joint_action / n_actions)
            _action2 = (joint_action) % n_actions
        return _action1, _action2 
Example #15
Source File: single_level_roi_extractor.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def map_roi_levels(self, rois, num_levels):
        """Map rois to corresponding feature levels by scales.

        - scale < finest_scale * 2: level 0
        - finest_scale * 2 <= scale < finest_scale * 4: level 1
        - finest_scale * 4 <= scale < finest_scale * 8: level 2
        - scale >= finest_scale * 8: level 3

        Args:
            rois (Tensor): Input RoIs, shape (k, 5).
            num_levels (int): Total level number.

        Returns:
            Tensor: Level index (0-based) of each RoI, shape (k, )
        """
        scale = torch.sqrt(
            (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
        target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
        target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
        return target_lvls 
Example #16
Source File: HandCraftedModules.py    From affnet with MIT License 6 votes vote down vote up
def forward(self, x, return_rot_matrix = False):
        gx = self.gx(F.pad(x, (1,1,0, 0), 'replicate'))
        gy = self.gy(F.pad(x, (0,0, 1,1), 'replicate'))
        mag = torch.sqrt(gx * gx + gy * gy + 1e-10)
        if x.is_cuda:
            self.gk = self.gk.cuda()
        mag = mag * self.gk.unsqueeze(0).unsqueeze(0).expand_as(mag)
        ori = torch.atan2(gy,gx)
        o_big = float(self.num_ang_bins) *(ori + 1.0 * math.pi )/ (2.0 * math.pi)
        bo0_big =  torch.floor(o_big)
        wo1_big = o_big - bo0_big
        bo0_big =  bo0_big %  self.num_ang_bins
        bo1_big = (bo0_big + 1) % self.num_ang_bins
        wo0_big = (1.0 - wo1_big) * mag
        wo1_big = wo1_big * mag
        ang_bins = []
        for i in range(0, self.num_ang_bins):
            ang_bins.append(F.adaptive_avg_pool2d((bo0_big == i).float() * wo0_big, (1,1)))
        ang_bins = torch.cat(ang_bins,1).view(-1,1,self.num_ang_bins)
        ang_bins = self.angular_smooth(ang_bins)
        values, indices = ang_bins.view(-1,self.num_ang_bins).max(1)
        angle =  -((2. * float(np.pi) * indices.float() / float(self.num_ang_bins)) - float(math.pi))
        if return_rot_matrix:
            return self.get_rotation_matrix(angle)
        return angle 
Example #17
Source File: single_level.py    From kaggle-kuzushiji-recognition with MIT License 6 votes vote down vote up
def map_roi_levels(self, rois, num_levels):
        """Map rois to corresponding feature levels by scales.

        - scale < finest_scale * 2: level 0
        - finest_scale * 2 <= scale < finest_scale * 4: level 1
        - finest_scale * 4 <= scale < finest_scale * 8: level 2
        - scale >= finest_scale * 8: level 3

        Args:
            rois (Tensor): Input RoIs, shape (k, 5).
            num_levels (int): Total level number.

        Returns:
            Tensor: Level index (0-based) of each RoI, shape (k, )
        """
        scale = torch.sqrt(
            (rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
        target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
        target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
        return target_lvls 
Example #18
Source File: single_level.py    From PolarMask with Apache License 2.0 6 votes vote down vote up
def map_roi_levels(self, rois, num_levels):
        """Map rois to corresponding feature levels by scales.

        - scale < finest_scale * 2: level 0
        - finest_scale * 2 <= scale < finest_scale * 4: level 1
        - finest_scale * 4 <= scale < finest_scale * 8: level 2
        - scale >= finest_scale * 8: level 3

        Args:
            rois (Tensor): Input RoIs, shape (k, 5).
            num_levels (int): Total level number.

        Returns:
            Tensor: Level index (0-based) of each RoI, shape (k, )
        """
        scale = torch.sqrt(
            (rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
        target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
        target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
        return target_lvls 
Example #19
Source File: Evaluation.py    From CU-Net with Apache License 2.0 6 votes vote down vote up
def final_preds(output, center, scale, res, rot):
    coords = get_preds(output) # float type

    # pose-processing
    for n in range(coords.size(0)):
        for p in range(coords.size(1)):
            hm = output[n][p]
            px = int(math.floor(coords[n][p][0]))
            py = int(math.floor(coords[n][p][1]))
            if px > 1 and px < res[0] and py > 1 and py < res[1]:
                diff = torch.Tensor([hm[py - 1][px] - hm[py - 1][px - 2], hm[py][px - 1]-hm[py - 2][px - 1]])
                coords[n][p] += diff.sign() * .25
    coords += 0.5
    preds = coords.clone()

    # Transform back
    # print coords.size(), len(center), len(scale)
    for i in range(coords.size(0)):
        # print type(coords[i]), type(center[i]), type(scale[i])
        preds[i] = transform_preds(coords[i], center[i], scale[i], res, rot[i])

    if preds.dim() < 3:
        preds = preds.view(1, preds.size())

    return preds 
Example #20
Source File: Evaluation.py    From CU-Net with Apache License 2.0 6 votes vote down vote up
def get_preds(scores):
    ''' get predictions from score maps in torch Tensor
        return type: torch.LongTensor
    '''
    assert scores.dim() == 4, 'Score maps should be 4-dim'
    maxval, idx = torch.max(scores.view(scores.size(0), scores.size(1), -1), 2)

    maxval = maxval.view(scores.size(0), scores.size(1), 1)
    idx = idx.view(scores.size(0), scores.size(1), 1) + 1

    preds = idx.repeat(1, 1, 2).float()

    preds[:,:,0] = (preds[:,:,0] - 1) % scores.size(3) + 1
    preds[:,:,1] = torch.floor((preds[:,:,1] - 1) / scores.size(2)) + 1

    pred_mask = maxval.gt(0).repeat(1, 1, 2).float()
    preds *= pred_mask
    return preds 
Example #21
Source File: Evaluation_prev_version.py    From CU-Net with Apache License 2.0 6 votes vote down vote up
def get_preds(scores):
    ''' get predictions from score maps in torch Tensor
        return type: torch.LongTensor
    '''
    assert scores.dim() == 4, 'Score maps should be 4-dim'
    maxval, idx = torch.max(scores.view(scores.size(0), scores.size(1), -1), 2)

    maxval = maxval.view(scores.size(0), scores.size(1), 1)
    idx = idx.view(scores.size(0), scores.size(1), 1) + 1

    preds = idx.repeat(1, 1, 2).float()

    preds[:,:,0] = (preds[:,:,0] - 1) % scores.size(3) + 1
    preds[:,:,1] = torch.floor((preds[:,:,1] - 1) / scores.size(2)) + 1

    pred_mask = maxval.gt(0).repeat(1, 1, 2).float()
    preds *= pred_mask
    return preds 
Example #22
Source File: single_level.py    From mmdetection_with_SENet154 with Apache License 2.0 6 votes vote down vote up
def map_roi_levels(self, rois, num_levels):
        """Map rois to corresponding feature levels by scales.

        - scale < finest_scale: level 0
        - finest_scale <= scale < finest_scale * 2: level 1
        - finest_scale * 2 <= scale < finest_scale * 4: level 2
        - scale >= finest_scale * 4: level 3

        Args:
            rois (Tensor): Input RoIs, shape (k, 5).
            num_levels (int): Total level number.

        Returns:
            Tensor: Level index (0-based) of each RoI, shape (k, )
        """
        scale = torch.sqrt(
            (rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
        target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
        target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
        return target_lvls 
Example #23
Source File: test_binaries.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def create_dummy_data(data_dir, num_examples=1000, maxlen=20):

    def _create_dummy_data(filename):
        data = torch.rand(num_examples * maxlen)
        data = 97 + torch.floor(26 * data).int()
        with open(os.path.join(data_dir, filename), 'w') as h:
            offset = 0
            for _ in range(num_examples):
                ex_len = random.randint(1, maxlen)
                ex_str = ' '.join(map(chr, data[offset:offset+ex_len]))
                print(ex_str, file=h)
                offset += ex_len

    _create_dummy_data('train.in')
    _create_dummy_data('train.out')
    _create_dummy_data('valid.in')
    _create_dummy_data('valid.out')
    _create_dummy_data('test.in')
    _create_dummy_data('test.out') 
Example #24
Source File: mackrel.py    From mackrl with Apache License 2.0 6 votes vote down vote up
def _joint_actions_2_action_pair_aa(joint_action, n_actions, avail_actions1, avail_actions2, use_delegate_action=True):
    joint_action = joint_action.clone()
    if use_delegate_action:
        mask = (joint_action == 0.0)
        joint_action[mask] = 1.0
        _action1 = th.floor((joint_action-1.0) / n_actions)
        _action2 = (joint_action-1.0) % n_actions
        _action1[mask] = float("nan")
        _action2[mask] = float("nan")
    else:
        _action1 = th.floor(joint_action / n_actions)
        _action2 = (joint_action) % n_actions

    aa_m1 = _action1 != _action1
    aa_m2 = _action2 != _action2
    _action1[aa_m1] = 0
    _action2[aa_m2] = 0
    aa1 = avail_actions1.data.gather(-1, ( _action1.long() ))
    aa2 = avail_actions2.data.gather(-1, ( _action2.long() ))
    _action1[aa1 == 0] = float("nan")
    _action2[aa2 == 0] = float("nan")
    _action1[aa_m1] = float("nan")
    _action2[aa_m2] = float("nan")
    return _action1, _action2 
Example #25
Source File: single_level.py    From mmdetection-annotated with Apache License 2.0 6 votes vote down vote up
def map_roi_levels(self, rois, num_levels):
        """Map rois to corresponding feature levels by scales.

        - scale < finest_scale * 2: level 0
        - finest_scale * 2 <= scale < finest_scale * 4: level 1
        - finest_scale * 4 <= scale < finest_scale * 8: level 2
        - scale >= finest_scale * 8: level 3

        Args:
            rois (Tensor): Input RoIs, shape (k, 5).
            num_levels (int): Total level number.

        Returns:
            Tensor: Level index (0-based) of each RoI, shape (k, )
        """
        scale = torch.sqrt(
            (rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
        target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
        target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
        return target_lvls 
Example #26
Source File: model.py    From ACAN with MIT License 5 votes vote down vote up
def soft_ordinal_regression(self, pred_prob, d_min, d_max, n_c):
        pred_prob_sum = torch.sum(pred_prob, 1, keepdim=True)
        Intergral = torch.floor(pred_prob_sum)
        Fraction = pred_prob_sum - Intergral
        depth_low = (discrete2continuous(Intergral, d_min, d_max, n_c) +
                     discrete2continuous(Intergral + 1, d_min, d_max, n_c)) / 2
        depth_high = (discrete2continuous(Intergral + 1, d_min, d_max, n_c) +
                      discrete2continuous(Intergral + 2, d_min, d_max, n_c)) / 2
        pred_depth = depth_low * (1 - Fraction) + depth_high * Fraction
        return pred_depth 
Example #27
Source File: poolers.py    From DetNAS with MIT License 5 votes vote down vote up
def __call__(self, boxlists):
        """
        Arguments:
            boxlists (list[BoxList])
        """
        # Compute level ids
        s = torch.sqrt(cat([boxlist.area() for boxlist in boxlists]))

        # Eqn.(1) in FPN paper
        target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0 + self.eps))
        target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)
        return target_lvls.to(torch.int64) - self.k_min 
Example #28
Source File: HandCraftedModules.py    From affnet with MIT License 5 votes vote down vote up
def get_bin_weight_kernel_size_and_stride(self, patch_size, num_spatial_bins):
        bin_weight_stride = int(round(2.0 * np.floor(patch_size / 2) / float(num_spatial_bins + 1)))
        bin_weight_kernel_size = int(2 * bin_weight_stride - 1);
        return bin_weight_kernel_size, bin_weight_stride 
Example #29
Source File: pytorch_util.py    From leap with MIT License 5 votes vote down vote up
def compute_deconv_output_size(h_in, w_in, kernel_size, stride, padding=0):
    h_out = (h_in -1)*stride - 2*padding + kernel_size
    w_out = (w_in -1)*stride - 2*padding + kernel_size
    return int(np.floor(h_out)), int(np.floor(w_out)) 
Example #30
Source File: fpn.py    From seamseg with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _target_level(self, boxes):
        scales = (boxes[:, 2:] - boxes[:, :2]).prod(dim=-1).sqrt()
        target_level = torch.floor(self.canonical_level + torch.log2(scales / self.canonical_scale + 1e-6))
        return target_level.clamp(min=self.min_level, max=self.min_level + self.levels - 1)