Python torch.log2() Examples

The following are 30 code examples of torch.log2(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: poolers.py    From FreeAnchor with MIT License 6 votes vote down vote up
def __init__(self, output_size, scales, sampling_ratio, canonical_level=4):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(
            lvl_min, lvl_max, canonical_level=canonical_level
        ) 
Example #2
Source File: poolers.py    From Clothing-Detection with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, output_size, scales, sampling_ratio):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max) 
Example #3
Source File: poolers.py    From remote_sensing_object_detection_2019 with MIT License 6 votes vote down vote up
def __init__(self, output_size, scales):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(PyramidRROIAlign, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                RROIAlign(
                    output_size, spatial_scale=scale
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max) 
Example #4
Source File: poolers.py    From remote_sensing_object_detection_2019 with MIT License 6 votes vote down vote up
def __init__(self, output_size, scales, sampling_ratio):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max) 
Example #5
Source File: utils_deblur.py    From KAIR with MIT License 6 votes vote down vote up
def p2o(psf, shape):
    '''
    # psf: NxCxhxw
    # shape: [H,W]
    # otf: NxCxHxWx2
    '''
    otf = torch.zeros(psf.shape[:-2] + shape).type_as(psf)
    otf[...,:psf.shape[2],:psf.shape[3]].copy_(psf)
    for axis, axis_size in enumerate(psf.shape[2:]):
        otf = torch.roll(otf, -int(axis_size / 2), dims=axis+2)
    otf = torch.rfft(otf, 2, onesided=False)
    n_ops = torch.sum(torch.tensor(psf.shape).type_as(psf) * torch.log2(torch.tensor(psf.shape).type_as(psf)))
    otf[...,1][torch.abs(otf[...,1])<n_ops*2.22e-16] = torch.tensor(0).type_as(psf)
    return otf



# otf2psf: not sure where I got this one from. Maybe translated from Octave source code or whatever. It's just math. 
Example #6
Source File: exponential.py    From heat with MIT License 6 votes vote down vote up
def log2(x, out=None):
    """
    log base 2, element-wise.

    Parameters
    ----------
    x : ht.DNDarray
        The value for which to compute the logarithm.
    out : ht.DNDarray or None, optional
        A location in which to store the results. If provided, it must have a broadcastable shape. If not provided
        or set to None, a fresh tensor is allocated.

    Returns
    -------
    logarithms : ht.DNDarray
        A tensor of the same shape as x, containing the positive logarithms of each element in this tensor.
        Negative input elements are returned as nan. If out was provided, logarithms is a reference to it.

    Examples
    --------
    >>> ht.log2(ht.arange(5))
    tensor([  -inf, 0.0000, 1.0000, 1.5850, 2.0000])
    """
    return operations.__local_op(torch.log2, x, out) 
Example #7
Source File: poolers.py    From sampling-free with MIT License 6 votes vote down vote up
def __init__(self, output_size, scales, sampling_ratio):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max) 
Example #8
Source File: wasserstein_cost_mat.py    From pt-ranking.github.io with MIT License 6 votes vote down vote up
def get_delta_gains(batch_stds, discount=False):
    '''
    Delta-gains w.r.t. pairwise swapping of the ideal ltr_adhoc
    :param batch_stds: the standard labels sorted in a descending order
    :return:
    '''
    batch_gains = torch.pow(2.0, batch_stds) - 1.0
    batch_g_diffs = torch.unsqueeze(batch_gains, dim=2) - torch.unsqueeze(batch_gains, dim=1)

    if discount:
        batch_std_ranks = torch.arange(batch_stds.size(1)).type(tensor)
        batch_dists = 1.0 / torch.log2(batch_std_ranks + 2.0)   # discount co-efficients
        batch_dists = torch.unsqueeze(batch_dists, dim=0)
        batch_dists_diffs = torch.unsqueeze(batch_dists, dim=2) - torch.unsqueeze(batch_dists, dim=1)
        batch_delta_gs = torch.abs(batch_g_diffs) * torch.abs(batch_dists_diffs)  # absolute changes w.r.t. pairwise swapping
    else:
        batch_delta_gs = torch.abs(batch_g_diffs)  # absolute delta gains w.r.t. pairwise swapping

    return batch_delta_gs 
Example #9
Source File: metric.py    From pt-ranking.github.io with MIT License 6 votes vote down vote up
def tor_discounted_cumu_gain_at_k(sorted_labels, cutoff, multi_level_rele=True):
    '''
    ICML-nDCG, which places stronger emphasis on retrieving relevant documents
    :param sorted_labels: ranked labels (either standard or predicted by a system) in the form of np array
    :param max_cutoff: the maximum rank position to be considered
    :param multi_lavel_rele: either the case of multi-level relevance or the case of listwise int-value, e.g., MQ2007-list
    :return: cumulative gains for each rank position
    '''
    if multi_level_rele:    #the common case with multi-level labels
        nums = torch.pow(2.0, sorted_labels[0:cutoff]) - 1.0
    else:
        nums = sorted_labels[0:cutoff]  #the case like listwise ranking, where the relevance is labeled as (n-rank_position)

    denoms = torch.log2(torch.arange(cutoff, dtype=torch.float) + 2.0)   #discounting factor
    dited_cumu_gain = torch.sum(nums/denoms)   # discounted cumulative gain value

    return dited_cumu_gain 
Example #10
Source File: adhoc_metric.py    From pt-ranking.github.io with MIT License 6 votes vote down vote up
def torch_discounted_cumu_gain_at_k(sorted_labels, cutoff, multi_level_rele=True):
	'''
	ICML-nDCG, which places stronger emphasis on retrieving relevant documents
	:param sorted_labels: ranked labels (either standard or predicted by a system) in the form of np array
	:param max_cutoff: the maximum rank position to be considered
	:param multi_lavel_rele: either the case of multi-level relevance or the case of listwise int-value, e.g., MQ2007-list
	:return: cumulative gains for each rank position
	'''
	if multi_level_rele:    #the common case with multi-level labels
		nums = torch.pow(2.0, sorted_labels[0:cutoff]) - 1.0
	else:
		nums = sorted_labels[0:cutoff]  #the case like listwise ltr_adhoc, where the relevance is labeled as (n-rank_position)

	denoms = torch.log2(torch.arange(cutoff).type(torch.FloatTensor) + 2.0)   #discounting factor
	dited_cumu_gain = torch.sum(nums/denoms)   # discounted cumulative gain value

	return dited_cumu_gain 
Example #11
Source File: adhoc_metric.py    From pt-ranking.github.io with MIT License 6 votes vote down vote up
def torch_discounted_cumu_gain_at_ks(sorted_labels, max_cutoff, multi_level_rele=True):
	'''
	ICML-nDCG, which places stronger emphasis on retrieving relevant documents
	:param sorted_labels: ranked labels (either standard or predicted by a system) in the form of np array
	:param max_cutoff: the maximum rank position to be considered
	:param multi_lavel_rele: either the case of multi-level relevance or the case of listwise int-value, e.g., MQ2007-list
	:return: cumulative gains for each rank position
	'''

	if multi_level_rele:    #the common case with multi-level labels
		nums = torch.pow(2.0, sorted_labels[0:max_cutoff]) - 1.0
	else:
		nums = sorted_labels[0:max_cutoff]  #the case like listwise ltr_adhoc, where the relevance is labeled as (n-rank_position)

	denoms = torch.log2(torch.arange(max_cutoff).type(torch.FloatTensor) + 2.0)   #discounting factor
	dited_cumu_gains = torch.cumsum(nums/denoms, dim=0)   # discounted cumulative gain value w.r.t. each position

	return dited_cumu_gains 
Example #12
Source File: metric_utils.py    From pt-ranking.github.io with MIT License 6 votes vote down vote up
def get_delta_ndcg(batch_stds, batch_stds_sorted_via_preds):
    '''
    Delta-nDCG w.r.t. pairwise swapping of the currently predicted ltr_adhoc
    :param batch_stds: the standard labels sorted in a descending order
    :param batch_stds_sorted_via_preds: the standard labels sorted based on the corresponding predictions
    :return:
    '''
    batch_idcgs = torch_ideal_dcg(batch_sorted_labels=batch_stds, gpu=gpu)                      # ideal discount cumulative gains

    batch_gains = torch.pow(2.0, batch_stds_sorted_via_preds) - 1.0
    batch_n_gains = batch_gains / batch_idcgs               # normalised gains
    batch_ng_diffs = torch.unsqueeze(batch_n_gains, dim=2) - torch.unsqueeze(batch_n_gains, dim=1)

    batch_std_ranks = torch.arange(batch_stds_sorted_via_preds.size(1)).type(tensor)
    batch_dists = 1.0 / torch.log2(batch_std_ranks + 2.0)   # discount co-efficients
    batch_dists = torch.unsqueeze(batch_dists, dim=0)
    batch_dists_diffs = torch.unsqueeze(batch_dists, dim=2) - torch.unsqueeze(batch_dists, dim=1)
    batch_delta_ndcg = torch.abs(batch_ng_diffs) * torch.abs(batch_dists_diffs)  # absolute changes w.r.t. pairwise swapping

    return batch_delta_ndcg 
Example #13
Source File: poolers.py    From HRNet-MaskRCNN-Benchmark with MIT License 6 votes vote down vote up
def __init__(self, output_size, scales, sampling_ratio):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max) 
Example #14
Source File: poolers.py    From maskrcnn-benchmark with MIT License 6 votes vote down vote up
def __init__(self, output_size, scales, sampling_ratio):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max) 
Example #15
Source File: poolers.py    From EmbedMask with MIT License 6 votes vote down vote up
def __init__(self, output_size, scales, sampling_ratio):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max) 
Example #16
Source File: poolers.py    From Res2Net-maskrcnn with MIT License 6 votes vote down vote up
def __init__(self, output_size, scales, sampling_ratio):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max) 
Example #17
Source File: poolers.py    From retinamask with MIT License 6 votes vote down vote up
def __init__(self, output_size, scales, sampling_ratio, canonical_level=4):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(
            lvl_min, lvl_max, canonical_level=canonical_level
        ) 
Example #18
Source File: slate_estimators.py    From ReAgent with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _get_discount(self, slate_size: int) -> Tensor:
        weights = DCGSlateMetric._weights
        if (
            weights is None
            or weights.shape[0] < slate_size
            or weights.device != self._device
        ):
            DCGSlateMetric._weights = torch.reciprocal(
                torch.log2(
                    torch.arange(
                        2, slate_size + 2, dtype=torch.double, device=self._device
                    )
                )
            )
        weights = DCGSlateMetric._weights
        assert weights is not None
        return weights[:slate_size] 
Example #19
Source File: bpc.py    From flambe with MIT License 6 votes vote down vote up
def compute(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
        """Compute the bits per character given the input and target.

        Parameters
        ----------
        pred: torch.Tensor
            input logits of shape (B x N)
        target: torch.LontTensor
            target tensor of shape (B)

        Returns
        -------
        torch.float
            Output perplexity

        """
        entropy = self.entropy(pred, target).mean()
        return torch.log2(torch.exp(entropy)) 
Example #20
Source File: bpc.py    From flambe with MIT License 6 votes vote down vote up
def finalize(self, state: Dict) -> float:
        """Finalizes the metric computation

        Parameters
        ----------
        state: dict
            the metric state

        Returns
        -------
        float
            The final score.

        """
        if not state or state['sample_count'] == 0:
            # call on empty state
            return np.NaN
        return torch.log2(torch.exp(state['accumulated_score'] / state['sample_count'])).item() 
Example #21
Source File: evalu.py    From HyperIM with MIT License 6 votes vote down vote up
def ndcg_k(pred, label, k=[1, 3, 5]):
    batch_size = pred.shape[0]
    
    ndcg = []
    for _k in k:
        score = 0
        rank = th.log2(th.arange(2, 2 + _k, dtype=label.dtype, device=label.device))
        for i in range(batch_size):
            l = label[i, pred[i, :_k]]
            n = l.sum().item()
            if(n == 0):
                continue
            
            dcg = (l/rank).sum().item()
            label_count = label[i].sum().item()
            norm = 1 / th.log2(th.arange(2, 2 + min(_k, label_count), dtype=label.dtype))
            norm = norm.sum().item()
            score += dcg/norm
            
        ndcg.append(score*100/batch_size)
    
    return ndcg 
Example #22
Source File: poolers.py    From training with Apache License 2.0 6 votes vote down vote up
def __init__(self, output_size, scales, sampling_ratio):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max) 
Example #23
Source File: poolers.py    From NAS-FCOS with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, output_size, scales, sampling_ratio):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max) 
Example #24
Source File: uncertainty_sampling.py    From pytorch_active_learning with MIT License 6 votes vote down vote up
def entropy_based(self, prob_dist):
        """ 
        Returns the uncertainty score of a probability distribution using
        entropy 
        
        Assumes probability distribution is a pytorch tensor, like: 
            tensor([0.0321, 0.6439, 0.0871, 0.2369])
                    
        Keyword arguments:
            prob_dist -- a pytorch tensor of real numbers between 0 and 1 that total to 1.0
            sorted -- if the probability distribution is pre-sorted from largest to smallest
        """
        log_probs = prob_dist * torch.log2(prob_dist) # multiply each probability by its base 2 log
        raw_entropy = 0 - torch.sum(log_probs)
    
        normalized_entropy = raw_entropy / math.log2(prob_dist.numel())
        
        return normalized_entropy.item() 
Example #25
Source File: poolers.py    From RRPN_pytorch with MIT License 6 votes vote down vote up
def __init__(self, output_size, scales):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(PyramidRROIAlign, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                RROIAlign(
                    output_size, spatial_scale=scale
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max) 
Example #26
Source File: poolers.py    From RRPN_pytorch with MIT License 6 votes vote down vote up
def __init__(self, output_size, scales, sampling_ratio):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max) 
Example #27
Source File: softmax_entropy.py    From ViewAL with MIT License 6 votes vote down vote up
def calculate_scores(self, model, paths):
        model.eval()
        scores = []

        loader = DataLoader(PathsDataset(self.lmdb_handle, self.base_size, paths), batch_size=self.batch_size, shuffle=False, num_workers=0)
        
        with torch.no_grad():
            for sample in tqdm(loader):
                image_batch = sample['image'].cuda()
                label_batch = sample['label'].cuda()
                softmax = torch.nn.Softmax2d()
                output = softmax(model(image_batch))
                num_classes = output.shape[1]
                for batch_idx in range(output.shape[0]):
                    entropy_map = torch.cuda.FloatTensor(output.shape[2], output.shape[3]).fill_(0)
                    for c in range(self.num_classes):
                        entropy_map = entropy_map - (output[batch_idx, c, :, :] * torch.log2(output[batch_idx, c, :, :] + 1e-12))
                    entropy_map[label_batch[batch_idx, :, :] == 255] = 0
                    scores.append(entropy_map.mean().cpu().item())
                    del entropy_map
                torch.cuda.empty_cache()
        return scores 
Example #28
Source File: ceal.py    From ViewAL with MIT License 6 votes vote down vote up
def calculate_scores(self, model, paths):
        model.eval()
        scores = []

        loader = DataLoader(PathsDataset(self.lmdb_handle, self.base_size, paths), batch_size=self.batch_size, shuffle=False, num_workers=0)
        path_ctr = 0
        entropy_maps = {}
        with torch.no_grad():
            for sample in tqdm(loader):
                image_batch = sample['image'].cuda()
                label_batch = sample['label'].cuda()

                softmax = torch.nn.Softmax2d()
                output = softmax(model(image_batch))
                num_classes = output.shape[1]
                for batch_idx in range(output.shape[0]):
                    entropy_map = torch.cuda.FloatTensor(output.shape[2], output.shape[3]).fill_(0)
                    for c in range(self.num_classes):
                        entropy_map = entropy_map - (output[batch_idx, c, :, :] * torch.log2(output[batch_idx, c, :, :] + 1e-12))
                    entropy_map[label_batch[batch_idx, :, :] == 255] = 0
                    scores.append(entropy_map.mean().cpu().item())
                    entropy_maps[paths[path_ctr]] = entropy_map.cpu().numpy()
                    path_ctr += 1
                torch.cuda.empty_cache()
        return scores, entropy_maps 
Example #29
Source File: view_entropy.py    From ViewAL with MIT License 6 votes vote down vote up
def entropy_function(self, destination_frame_index, selection_mask_0, probabilities, probabilities_type, projected_points_flat, frame_origins):
        # view entropy score
        probability_matrix = torch.zeros((constants.DEPTH_HEIGHT * constants.DEPTH_WIDTH, self.num_classes)).type(torch.cuda.FloatTensor)
        selection_mask_0 = selection_mask_0.type(probabilities_type.BoolTensor)
        probability_matrix.index_add_(0, projected_points_flat, probabilities[selection_mask_0].cuda())
        probability_matrix = probability_matrix / probability_matrix.sum(dim=1).view(-1, 1)
        return_mask = (probability_matrix != probability_matrix).cpu().numpy()
        probability_matrix[ probability_matrix != probability_matrix ] = 0
        entropy_map = torch.zeros((constants.DEPTH_HEIGHT * constants.DEPTH_WIDTH)).type(torch.cuda.FloatTensor)
        for c in range(self.num_classes):
            entropy_map = entropy_map - (probability_matrix[:, c] * torch.log2(probability_matrix[:, c] + 1e-12))
        entropy_map = entropy_map.view(constants.DEPTH_HEIGHT, constants.DEPTH_WIDTH)
        return_map = entropy_map.cpu().numpy()
        del probability_matrix
        torch.cuda.empty_cache()
        return return_map, return_mask 
Example #30
Source File: poolers.py    From DF-Traffic-Sign-Identification with MIT License 6 votes vote down vote up
def __init__(self, output_size, scales, sampling_ratio):
        """
        Arguments:
            output_size (list[tuple[int]] or list[int]): output size for the pooled region
            scales (list[float]): scales for each Pooler
            sampling_ratio (int): sampling ratio for ROIAlign
        """
        super(Pooler, self).__init__()
        poolers = []
        for scale in scales:
            poolers.append(
                ROIAlign(
                    output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
                )
            )
        self.poolers = nn.ModuleList(poolers)
        self.output_size = output_size
        # get the levels in the feature map by leveraging the fact that the network always
        # downsamples by a factor of 2 at each level.
        lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
        lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
        self.map_levels = LevelMapper(lvl_min, lvl_max)