Python torch.empty() Examples

The following are 30 code examples of torch.empty(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: structures.py    From mmdetection with Apache License 2.0 7 votes vote down vote up
def __init__(self, masks, height, width):
        self.height = height
        self.width = width
        if len(masks) == 0:
            self.masks = np.empty((0, self.height, self.width), dtype=np.uint8)
        else:
            assert isinstance(masks, (list, np.ndarray))
            if isinstance(masks, list):
                assert isinstance(masks[0], np.ndarray)
                assert masks[0].ndim == 2  # (H, W)
            else:
                assert masks.ndim == 3  # (N, H, W)

            self.masks = np.stack(masks).reshape(-1, height, width)
            assert self.masks.shape[1] == self.height
            assert self.masks.shape[2] == self.width 
Example #2
Source File: test_compliance_kaldi.py    From audio with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _test_get_strided_helper(self, num_samples, window_size, window_shift, snip_edges):
        waveform = torch.arange(num_samples).float()
        output = kaldi._get_strided(waveform, window_size, window_shift, snip_edges)

        # from NumFrames in feature-window.cc
        n = window_size
        if snip_edges:
            m = 0 if num_samples < window_size else 1 + (num_samples - window_size) // window_shift
        else:
            m = (num_samples + (window_shift // 2)) // window_shift

        self.assertTrue(output.dim() == 2)
        self.assertTrue(output.shape[0] == m and output.shape[1] == n)

        window = torch.empty((m, window_size))

        for r in range(m):
            extract_window(window, waveform, r, window_size, window_shift, snip_edges)
        torch.testing.assert_allclose(window, output) 
Example #3
Source File: test_assigner.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def test_approx_iou_assigner_with_empty_boxes():
    """Test corner case where an network might predict no boxes."""
    self = ApproxMaxIoUAssigner(
        pos_iou_thr=0.5,
        neg_iou_thr=0.5,
    )
    bboxes = torch.empty((0, 4))
    gt_bboxes = torch.FloatTensor([
        [0, 0, 10, 9],
        [0, 10, 10, 19],
    ])
    approxs_per_octave = 1
    approxs = bboxes
    squares = bboxes
    assign_result = self.assign(approxs, squares, approxs_per_octave,
                                gt_bboxes)
    assert len(assign_result.gt_inds) == 0 
Example #4
Source File: test_assigner.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def test_max_iou_assigner_with_empty_boxes():
    """Test corner case where an network might predict no boxes."""
    self = MaxIoUAssigner(
        pos_iou_thr=0.5,
        neg_iou_thr=0.5,
    )
    bboxes = torch.empty((0, 4))
    gt_bboxes = torch.FloatTensor([
        [0, 0, 10, 9],
        [0, 10, 10, 19],
    ])
    gt_labels = torch.LongTensor([2, 3])

    # Test with gt_labels
    assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
    assert len(assign_result.gt_inds) == 0
    assert tuple(assign_result.labels.shape) == (0, )

    # Test without gt_labels
    assign_result = self.assign(bboxes, gt_bboxes, gt_labels=None)
    assert len(assign_result.gt_inds) == 0
    assert assign_result.labels is None 
Example #5
Source File: structures.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def crop(self, bbox):
        """See :func:`BaseInstanceMasks.crop`."""
        assert isinstance(bbox, np.ndarray)
        assert bbox.ndim == 1

        # clip the boundary
        bbox = bbox.copy()
        bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
        bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
        x1, y1, x2, y2 = bbox
        w = np.maximum(x2 - x1, 1)
        h = np.maximum(y2 - y1, 1)

        if len(self.masks) == 0:
            cropped_masks = np.empty((0, h, w), dtype=np.uint8)
        else:
            cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]
        return BitmapMasks(cropped_masks, h, w) 
Example #6
Source File: test_sampler.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def test_random_sampler_empty_gt():
    assigner = MaxIoUAssigner(
        pos_iou_thr=0.5,
        neg_iou_thr=0.5,
        ignore_iof_thr=0.5,
        ignore_wrt_candidates=False,
    )
    bboxes = torch.FloatTensor([
        [0, 0, 10, 10],
        [10, 10, 20, 20],
        [5, 5, 15, 15],
        [32, 32, 38, 42],
    ])
    gt_bboxes = torch.empty(0, 4)
    gt_labels = torch.empty(0, ).long()
    assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)

    sampler = RandomSampler(
        num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)

    sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)

    assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
    assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds) 
Example #7
Source File: utils.py    From pyfilter with MIT License 6 votes vote down vote up
def test_HelperMixin(self):
        obj = Help(torch.empty(3000).normal_())

        # ===== Verify that we don't break views when changing device ===== #
        obj.to_('cpu:0')

        temp = obj._params[0]
        temp += 1

        for p, v in zip(obj._params, obj._views):
            assert (p == v).all() and v._base is p

        # ===== Check state dict ===== #
        sd = obj.state_dict()

        newobj = Help(torch.empty(1))
        newobj.load_state_dict(sd)

        assert all((p1 == p2).all() for p1, p2 in zip(newobj._params, obj._params))
        assert all((p1 == p2).all() for p1, p2 in zip(newobj._views, newobj._params))
        assert all(p1._base is p2 for p1, p2 in zip(newobj._views, newobj._params)) 
Example #8
Source File: region_proposal_network.py    From easy-faster-rcnn.pytorch with MIT License 6 votes vote down vote up
def loss(self, anchor_objectnesses: Tensor, anchor_transformers: Tensor,
             gt_anchor_objectnesses: Tensor, gt_anchor_transformers: Tensor,
             batch_size: int, batch_indices: Tensor) -> Tuple[Tensor, Tensor]:
        cross_entropies = torch.empty(batch_size, dtype=torch.float, device=anchor_objectnesses.device)
        smooth_l1_losses = torch.empty(batch_size, dtype=torch.float, device=anchor_transformers.device)

        for batch_index in range(batch_size):
            selected_indices = (batch_indices == batch_index).nonzero().view(-1)

            cross_entropy = F.cross_entropy(input=anchor_objectnesses[selected_indices],
                                            target=gt_anchor_objectnesses[selected_indices])

            fg_indices = gt_anchor_objectnesses[selected_indices].nonzero().view(-1)
            smooth_l1_loss = beta_smooth_l1_loss(input=anchor_transformers[selected_indices][fg_indices],
                                                 target=gt_anchor_transformers[selected_indices][fg_indices],
                                                 beta=self._anchor_smooth_l1_loss_beta)

            cross_entropies[batch_index] = cross_entropy
            smooth_l1_losses[batch_index] = smooth_l1_loss

        return cross_entropies, smooth_l1_losses 
Example #9
Source File: transforms.py    From audio with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self,
                 n_mels: int = 128,
                 sample_rate: int = 16000,
                 f_min: float = 0.,
                 f_max: Optional[float] = None,
                 n_stft: Optional[int] = None) -> None:
        super(MelScale, self).__init__()
        self.n_mels = n_mels
        self.sample_rate = sample_rate
        self.f_max = f_max if f_max is not None else float(sample_rate // 2)
        self.f_min = f_min

        assert f_min <= self.f_max, 'Require f_min: %f < f_max: %f' % (f_min, self.f_max)

        fb = torch.empty(0) if n_stft is None else F.create_fb_matrix(
            n_stft, self.f_min, self.f_max, self.n_mels, self.sample_rate)
        self.register_buffer('fb', fb) 
Example #10
Source File: resampling.py    From pyfilter with MIT License 6 votes vote down vote up
def systematic(w: torch.Tensor, normalized=False, u: Union[torch.Tensor, float] = None):
    """
    Performs systematic resampling on either a 1D or 2D array.
    :param w: The weights to use for resampling
    :param normalized: Whether the data is normalized
    :param u: Parameter for overriding the sampled index, for testing
    :return: Resampled indices
    """

    shape = (1,) if w.dim() < 2 else (w.shape[0], 1)
    u = u if u is not None else (torch.empty(shape, device=w.device)).uniform_()
    w = normalize(w) if not normalized else w

    if w.dim() > 1:
        return _matrix(w, u)

    return _vector(w, u) 
Example #11
Source File: CustomLayers.py    From BMSG-GAN with MIT License 6 votes vote down vote up
def __init__(self, c_in, c_out, k_size, stride=1, pad=0, bias=True):
        """ constructor for the class """
        from torch.nn.modules.utils import _pair
        from numpy import sqrt, prod

        super().__init__()

        # define the weight and bias if to be used
        self.weight = th.nn.Parameter(th.nn.init.normal_(
            th.empty(c_out, c_in, *_pair(k_size))
        ))

        self.use_bias = bias
        self.stride = stride
        self.pad = pad

        if self.use_bias:
            self.bias = th.nn.Parameter(th.FloatTensor(c_out).fill_(0))

        fan_in = prod(_pair(k_size)) * c_in  # value of fan_in
        self.scale = sqrt(2) / sqrt(fan_in) 
Example #12
Source File: CustomLayers.py    From BMSG-GAN with MIT License 6 votes vote down vote up
def __init__(self, c_in, c_out, k_size, stride=1, pad=0, bias=True):
        """ constructor for the class """
        from torch.nn.modules.utils import _pair
        from numpy import sqrt

        super().__init__()

        # define the weight and bias if to be used
        self.weight = th.nn.Parameter(th.nn.init.normal_(
            th.empty(c_in, c_out, *_pair(k_size))
        ))

        self.use_bias = bias
        self.stride = stride
        self.pad = pad

        if self.use_bias:
            self.bias = th.nn.Parameter(th.FloatTensor(c_out).fill_(0))

        fan_in = c_in  # value of fan_in for deconv
        self.scale = sqrt(2) / sqrt(fan_in) 
Example #13
Source File: CustomLayers.py    From BMSG-GAN with MIT License 6 votes vote down vote up
def __init__(self, c_in, c_out, bias=True):
        """
        Linear layer modified for equalized learning rate
        """
        from numpy import sqrt

        super().__init__()

        self.weight = th.nn.Parameter(th.nn.init.normal_(
            th.empty(c_out, c_in)
        ))

        self.use_bias = bias

        if self.use_bias:
            self.bias = th.nn.Parameter(th.FloatTensor(c_out).fill_(0))

        fan_in = c_in
        self.scale = sqrt(2) / sqrt(fan_in) 
Example #14
Source File: parsing.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def parsing_on_boxes(parsing, rois, heatmap_size):
    device = rois.device
    rois = rois.to(torch.device("cpu"))
    parsing_list = []
    for i in range(rois.shape[0]):
        parsing_ins = parsing[i].cpu().numpy()
        xmin, ymin, xmax, ymax = torch.round(rois[i]).int()
        cropped_parsing = parsing_ins[ymin:ymax, xmin:xmax]
        resized_parsing = cv2.resize(
            cropped_parsing,
            (heatmap_size[1], heatmap_size[0]),
            interpolation=cv2.INTER_NEAREST
        )
        parsing_list.append(torch.from_numpy(resized_parsing))

    if len(parsing_list) == 0:
        return torch.empty(0, dtype=torch.int64, device=device)
    return torch.stack(parsing_list, dim=0).to(device, dtype=torch.int64) 
Example #15
Source File: loss.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def parsing_on_boxes(parsing, rois, heatmap_size):
    device = rois.device
    rois = rois.to(torch.device("cpu"))
    parsing_list = []
    for i in range(rois.shape[0]):
        parsing_ins = parsing[i].cpu().numpy()
        xmin, ymin, xmax, ymax = torch.round(rois[i]).int()
        cropped_parsing = parsing_ins[max(0, ymin):ymax, max(0, xmin):xmax]
        resized_parsing = cv2.resize(
            cropped_parsing, (heatmap_size[1], heatmap_size[0]), interpolation=cv2.INTER_NEAREST
        )
        parsing_list.append(torch.from_numpy(resized_parsing))

    if len(parsing_list) == 0:
        return torch.empty(0, dtype=torch.int64, device=device)
    return torch.stack(parsing_list, dim=0).to(device, dtype=torch.int64) 
Example #16
Source File: linear.py    From nsf with MIT License 6 votes vote down vote up
def __init__(self, features, orthogonal_initialization=True, using_cache=False):
        """Constructor.

        Args:
            features: int, number of input features.
            orthogonal_initialization: bool, if True initialize weights to be a random
                orthogonal matrix.

        Raises:
            TypeError: if `features` is not a positive integer.
        """
        super().__init__(features, using_cache)

        if orthogonal_initialization:
            self._weight = nn.Parameter(utils.random_orthogonal(features))
        else:
            self._weight = nn.Parameter(torch.empty(features, features))
            stdv = 1.0 / np.sqrt(features)
            init.uniform_(self._weight, -stdv, stdv) 
Example #17
Source File: test_inits.py    From pytorch_geometric with MIT License 6 votes vote down vote up
def test_inits():
    x = torch.empty(1, 4)

    uniform(size=4, tensor=x)
    assert x.min() >= -0.5
    assert x.max() <= 0.5

    glorot(x)
    assert x.min() >= -1.25
    assert x.max() <= 1.25

    zeros(x)
    assert x.tolist() == [[0, 0, 0, 0]]

    ones(x)
    assert x.tolist() == [[1, 1, 1, 1]] 
Example #18
Source File: region_proposal_network.py    From SlowFast-Network-pytorch with MIT License 6 votes vote down vote up
def loss(self, anchor_objectnesses: Tensor, anchor_transformers: Tensor,
             gt_anchor_objectnesses: Tensor, gt_anchor_transformers: Tensor,
             batch_size: int, batch_indices: Tensor) -> Tuple[Tensor, Tensor]:
        cross_entropies = torch.empty(batch_size, dtype=torch.float, device=anchor_objectnesses.device)
        smooth_l1_losses = torch.empty(batch_size, dtype=torch.float, device=anchor_transformers.device)

        for batch_index in range(batch_size):
            selected_indices = (batch_indices == batch_index).nonzero().view(-1)

            cross_entropy = F.cross_entropy(input=anchor_objectnesses[selected_indices],
                                            target=gt_anchor_objectnesses[selected_indices])

            fg_indices = gt_anchor_objectnesses[selected_indices].nonzero().view(-1)
            smooth_l1_loss = beta_smooth_l1_loss(input=anchor_transformers[selected_indices][fg_indices],
                                                 target=gt_anchor_transformers[selected_indices][fg_indices],
                                                 beta=self._anchor_smooth_l1_loss_beta)

            cross_entropies[batch_index] = cross_entropy
            smooth_l1_losses[batch_index] = smooth_l1_loss

        return cross_entropies, smooth_l1_losses 
Example #19
Source File: utils.py    From pytorch.sngan_projection with MIT License 6 votes vote down vote up
def sample_z(batch_size, dim_z, device, distribution=None):
    """Sample random noises.

    Args:
        batch_size (int)
        dim_z (int)
        device (torch.device)
        distribution (str, optional): default is normal

    Returns:
        torch.FloatTensor or torch.cuda.FloatTensor

    """

    if distribution is None:
        distribution = 'normal'
    if distribution == 'normal':
        return torch.empty(batch_size, dim_z, dtype=torch.float32, device=device).normal_()
    else:
        return torch.empty(batch_size, dim_z, dtype=torch.float32, device=device).uniform_() 
Example #20
Source File: segmentation_mask.py    From Parsing-R-CNN with MIT License 5 votes vote down vote up
def convert_to_binarymask(self):
        if len(self) > 0:
            masks = torch.stack([p.convert_to_binarymask() for p in self.polygons])
        else:
            size = self.size
            masks = torch.empty([0, size[1], size[0]], dtype=torch.uint8)

        return BinaryMaskList(masks, size=self.size) 
Example #21
Source File: adjust_smooth_l1_loss.py    From Parsing-R-CNN with MIT License 5 votes vote down vote up
def __init__(self, num_features, momentum=0.1, beta=1. /9):
        super(AdjustSmoothL1Loss, self).__init__()
        self.num_features = num_features
        self.momentum = momentum
        self.beta = beta
        self.register_buffer(
            'running_mean', torch.empty(num_features).fill_(beta)
        )
        self.register_buffer('running_var', torch.zeros(num_features)) 
Example #22
Source File: test_assigner.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def test_max_iou_assigner_with_empty_boxes_and_ignore():
    """Test corner case where an network might predict no boxes and
    ignore_iof_thr is on."""
    self = MaxIoUAssigner(
        pos_iou_thr=0.5,
        neg_iou_thr=0.5,
        ignore_iof_thr=0.5,
    )
    bboxes = torch.empty((0, 4))
    gt_bboxes = torch.FloatTensor([
        [0, 0, 10, 9],
        [0, 10, 10, 19],
    ])
    gt_bboxes_ignore = torch.Tensor([
        [30, 30, 40, 40],
    ])
    gt_labels = torch.LongTensor([2, 3])

    # Test with gt_labels
    assign_result = self.assign(
        bboxes,
        gt_bboxes,
        gt_labels=gt_labels,
        gt_bboxes_ignore=gt_bboxes_ignore)
    assert len(assign_result.gt_inds) == 0
    assert tuple(assign_result.labels.shape) == (0, )

    # Test without gt_labels
    assign_result = self.assign(
        bboxes, gt_bboxes, gt_labels=None, gt_bboxes_ignore=gt_bboxes_ignore)
    assert len(assign_result.gt_inds) == 0
    assert assign_result.labels is None 
Example #23
Source File: __init__.py    From sato with Apache License 2.0 5 votes vote down vote up
def __init__(self, num_tags: int, batch_first: bool = False) -> None:
        if num_tags <= 0:
            raise ValueError(f'invalid number of tags: {num_tags}')
        super().__init__()
        self.num_tags = num_tags
        self.batch_first = batch_first
        self.start_transitions = nn.Parameter(torch.empty(num_tags))
        self.end_transitions = nn.Parameter(torch.empty(num_tags))
        self.transitions = nn.Parameter(torch.empty(num_tags, num_tags))

        self.reset_parameters() 
Example #24
Source File: loss.py    From Res2Net-maskrcnn with MIT License 5 votes vote down vote up
def __call__(self, proposals, mask_logits, targets):
        """
        Arguments:
            proposals (list[BoxList])
            mask_logits (Tensor)
            targets (list[BoxList])

        Return:
            mask_loss (Tensor): scalar tensor containing the loss
        """
        labels, mask_targets = self.prepare_targets(proposals, targets)

        labels = cat(labels, dim=0)
        mask_targets = cat(mask_targets, dim=0)

        positive_inds = torch.nonzero(labels > 0).squeeze(1)
        labels_pos = labels[positive_inds]

        # torch.mean (in binary_cross_entropy_with_logits) doesn't
        # accept empty tensors, so handle it separately
        if mask_targets.numel() == 0:
            return mask_logits.sum() * 0

        mask_loss = F.binary_cross_entropy_with_logits(
            mask_logits[positive_inds, labels_pos], mask_targets
        )
        return mask_loss 
Example #25
Source File: structures.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def to_tensor(self, dtype, device):
        """See :func:`BaseInstanceMasks.to_tensor`."""
        if len(self.masks) == 0:
            return torch.empty((0, self.height, self.width),
                               dtype=dtype,
                               device=device)
        ndarray_masks = self.to_ndarray()
        return torch.tensor(ndarray_masks, dtype=dtype, device=device) 
Example #26
Source File: structures.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def expand(self, expanded_h, expanded_w, top, left):
        """See :func:`BaseInstanceMasks.expand`."""
        if len(self.masks) == 0:
            expanded_mask = np.empty((0, expanded_h, expanded_w),
                                     dtype=np.uint8)
        else:
            expanded_mask = np.zeros((len(self), expanded_h, expanded_w),
                                     dtype=np.uint8)
            expanded_mask[:, top:top + self.height,
                          left:left + self.width] = self.masks
        return BitmapMasks(expanded_mask, expanded_h, expanded_w) 
Example #27
Source File: loss.py    From Parsing-R-CNN with MIT License 5 votes vote down vote up
def project_masks_on_boxes(segmentation_masks, proposals, resolution):
    """
    Given segmentation masks and the bounding boxes corresponding
    to the location of the masks in the image, this function
    crops and resizes the masks in the position defined by the
    boxes. This prepares the masks for them to be fed to the
    loss computation as the targets.

    Arguments:
        segmentation_masks: an instance of SegmentationMask
        proposals: an instance of BoxList
    """
    masks = []
    h, w = resolution
    device = proposals.bbox.device
    proposals = proposals.convert("xyxy")
    assert segmentation_masks.size == proposals.size, "{}, {}".format(
        segmentation_masks, proposals
    )

    # FIXME: CPU computation bottleneck, this should be parallelized
    proposals = proposals.bbox.to(torch.device("cpu"))
    for segmentation_mask, proposal in zip(segmentation_masks, proposals):
        # crop the masks, resize them to the desired resolution and
        # then convert them to the tensor representation.
        cropped_mask = segmentation_mask.crop(proposal)
        scaled_mask = cropped_mask.resize((w, h))
        mask = scaled_mask.get_mask_tensor()
        masks.append(mask)

    if len(masks) == 0:
        return torch.empty(0, dtype=torch.float32, device=device)
    return torch.stack(masks, dim=0).to(device, dtype=torch.float32) 
Example #28
Source File: test_lorentz_math.py    From geoopt with Apache License 2.0 5 votes vote down vote up
def a(seed, k):
    a = torch.empty(100, 10, dtype=k.dtype).normal_(-1, 1)
    a /= a.norm(dim=-1, keepdim=True)
    a *= (torch.rand_like(k) * k) ** 0.5
    return lorentz.math.project(a, k=k) 
Example #29
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 5 votes vote down vote up
def a(seed, c, manifold, B, dtype):
    r = manifold.radius
    a = torch.empty(B, 10, dtype=dtype).normal_(-1, 1)
    a /= a.norm(dim=-1, keepdim=True)
    a *= torch.where(torch.isfinite(r), r, torch.ones((), dtype=dtype)).clamp_max_(100)
    a *= torch.rand_like(a)
    return manifold.projx(a).detach().requires_grad_(True) 
Example #30
Source File: test_lorentz_math.py    From geoopt with Apache License 2.0 5 votes vote down vote up
def b(seed, k):
    b = torch.empty(100, 10, dtype=k.dtype).normal_(-1, 1)
    b /= b.norm(dim=-1, keepdim=True)
    b *= (torch.rand_like(k) * k) ** 0.5
    return lorentz.math.project(b, k=k)