Python torch.nn.functional.interpolate() Examples

The following are 30 code examples of torch.nn.functional.interpolate(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: hrfpn.py    From mmdetection with Apache License 2.0 7 votes vote down vote up
def forward(self, inputs):
        """Forward function."""
        assert len(inputs) == self.num_ins
        outs = [inputs[0]]
        for i in range(1, self.num_ins):
            outs.append(
                F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
        out = torch.cat(outs, dim=1)
        if out.requires_grad and self.with_cp:
            out = checkpoint(self.reduction_conv, out)
        else:
            out = self.reduction_conv(out)
        outs = [out]
        for i in range(1, self.num_outs):
            outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
        outputs = []

        for i in range(self.num_outs):
            if outs[i].requires_grad and self.with_cp:
                tmp_out = checkpoint(self.fpn_convs[i], outs[i])
            else:
                tmp_out = self.fpn_convs[i](outs[i])
            outputs.append(tmp_out)
        return tuple(outputs) 
Example #2
Source File: CustomLayers.py    From BMSG-GAN with MIT License 7 votes vote down vote up
def forward(self, x):
        """
        forward pass of the block
        :param x: input
        :return: y => output
        """
        from torch.nn.functional import interpolate

        y = interpolate(x, scale_factor=2)
        y = self.pixNorm(self.lrelu(self.conv_1(y)))
        y = self.pixNorm(self.lrelu(self.conv_2(y)))

        return y


# function to calculate the Exponential moving averages for the Generator weights
# This function updates the exponential average weights based on the current training 
Example #3
Source File: hrfpn.py    From AerialDetection with Apache License 2.0 6 votes vote down vote up
def forward(self, inputs):
        assert len(inputs) == self.num_ins
        outs = [inputs[0]]
        for i in range(1, self.num_ins):
            outs.append(
                F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
        out = torch.cat(outs, dim=1)
        if out.requires_grad and self.with_cp:
            out = checkpoint(self.reduction_conv, out)
        else:
            out = self.reduction_conv(out)
        outs = [out]
        for i in range(1, self.num_outs):
            outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
        outputs = []

        for i in range(self.num_outs):
            if outs[i].requires_grad and self.with_cp:
                tmp_out = checkpoint(self.fpn_convs[i], outs[i])
            else:
                tmp_out = self.fpn_convs[i](outs[i])
            outputs.append(tmp_out)
        return tuple(outputs) 
Example #4
Source File: test_merge_cells.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def test_resize_methods():
    inputs_x = torch.randn([2, 256, 128, 128])
    target_resize_sizes = [(128, 128), (256, 256)]
    resize_methods_list = ['nearest', 'bilinear']

    for method in resize_methods_list:
        merge_cell = BaseMergeCell(upsample_mode=method)
        for target_size in target_resize_sizes:
            merge_cell_out = merge_cell._resize(inputs_x, target_size)
            gt_out = F.interpolate(inputs_x, size=target_size, mode=method)
            assert merge_cell_out.equal(gt_out)

    target_size = (64, 64)  # resize to a smaller size
    merge_cell = BaseMergeCell()
    merge_cell_out = merge_cell._resize(inputs_x, target_size)
    kernel_size = inputs_x.shape[-1] // target_size[-1]
    gt_out = F.max_pool2d(
        inputs_x, kernel_size=kernel_size, stride=kernel_size)
    assert (merge_cell_out == gt_out).all() 
Example #5
Source File: module.py    From SegmenTron with Apache License 2.0 6 votes vote down vote up
def forward(self, x):
        pool = self.image_pooling(x)
        pool = F.interpolate(pool, size=x.shape[2:], mode='bilinear', align_corners=True)

        x0 = self.aspp0(x)
        x1 = self.aspp1(x)
        x2 = self.aspp2(x)
        x3 = self.aspp3(x)
        x = torch.cat((pool, x0, x1, x2, x3), dim=1)

        x = self.conv(x)
        x = self.bn(x)
        x = self.relu(x)
        x = self.dropout(x)

        return x

# -----------------------------------------------------------------
#                 For PSPNet, fast_scnn
# ----------------------------------------------------------------- 
Example #6
Source File: GAN.py    From BMSG-GAN with MIT License 6 votes vote down vote up
def create_grid(self, samples, img_files):
        """
        utility function to create a grid of GAN samples
        :param samples: generated samples for storing list[Tensors]
        :param img_files: list of names of files to write
        :return: None (saves multiple files)
        """
        from torchvision.utils import save_image
        from torch.nn.functional import interpolate
        from numpy import sqrt, power

        # dynamically adjust the colour of the images
        samples = [Generator.adjust_dynamic_range(sample) for sample in samples]

        # resize the samples to have same resolution:
        for i in range(len(samples)):
            samples[i] = interpolate(samples[i],
                                     scale_factor=power(2,
                                                        self.depth - 1 - i))
        # save the images:
        for sample, img_file in zip(samples, img_files):
            save_image(sample, img_file, nrow=int(sqrt(sample.shape[0])),
                       normalize=True, scale_each=True, padding=0) 
Example #7
Source File: gce_heads.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def forward(self, x, proposals):
        resolution = cfg.PRCNN.ROI_XFORM_RESOLUTION
        x = self.pooler(x, proposals)
        roi_feature = x

        if self.conv_before_asppv3 is not None:
            x = self.conv_before_asppv3(x)

        asppv3_out = [F.interpolate(self.im_pool(x), scale_factor=resolution, mode="bilinear", align_corners=False)]
        for i in range(len(self.asppv3)):
            asppv3_out.append(self.asppv3[i](x))
        asppv3_out = torch.cat(asppv3_out, 1)
        asppv3_out = self.feat(asppv3_out)

        if self.conv_after_asppv3 is not None:
            x = self.conv_after_asppv3(asppv3_out)
        return x, roi_feature 
Example #8
Source File: gce_heads.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def forward(self, x, proposals):
        resolution = cfg.PRCNN.ROI_XFORM_RESOLUTION
        x = self.pooler(x, proposals)
        roi_feature = x

        x_hres = x
        x = self.subsample(x)
        if self.conv_before_asppv3 is not None:
            x = self.conv_before_asppv3(x)

        x_size = (resolution[0] // 2, resolution[1] // 2)
        asppv3_out = [F.interpolate(self.im_pool(x), scale_factor=x_size, mode="bilinear", align_corners=False)]
        for i in range(len(self.asppv3)):
            asppv3_out.append(self.asppv3[i](x))
        asppv3_out = torch.cat(asppv3_out, 1)
        asppv3_out = self.feat(asppv3_out)

        if self.conv_after_asppv3 is not None:
            x = self.conv_after_asppv3(asppv3_out)
        x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=False) + self.lateral(x_hres)
        return x, roi_feature 
Example #9
Source File: HRFPN.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def forward(self, x):
        outs = [x[0]]
        for i in range(1, len(x)):
            outs.append(F.interpolate(x[i], scale_factor=2**i, mode='bilinear'))
        out = torch.cat(outs, dim=1)
        out = self.reduction_conv(out)

        outs = [out]
        for i in range(1, self.num_output):
            outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
        fpn_output_blobs = []
        for i in range(self.num_output):
            fpn_output_blobs.append(self.fpn_conv[i](outs[i]))

        # use all levels
        return fpn_output_blobs  # [P2 - P6] 
Example #10
Source File: fpn.py    From seamseg with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def forward(self, xs):
        xs = xs[self.min_level:self.min_level + self.levels]

        ref_size = xs[0].shape[-2:]
        interp_params = {"mode": self.interpolation}
        if self.interpolation == "bilinear":
            interp_params["align_corners"] = False

        for i, output in enumerate(self.output):
            xs[i] = output(xs[i])
            if i > 0:
                xs[i] = functional.interpolate(xs[i], size=ref_size, **interp_params)

        xs = torch.cat(xs, dim=1)
        xs = self.conv_sem(xs)

        return xs 
Example #11
Source File: models.py    From VTuber_Unity with MIT License 6 votes vote down vote up
def _forward(self, level, inp):
        # Upper branch
        up1 = inp
        up1 = self._modules['b1_' + str(level)](up1)

        # Lower branch
        low1 = F.avg_pool2d(inp, 2, stride=2)
        low1 = self._modules['b2_' + str(level)](low1)

        if level > 1:
            low2 = self._forward(level - 1, low1)
        else:
            low2 = low1
            low2 = self._modules['b2_plus_' + str(level)](low2)

        low3 = low2
        low3 = self._modules['b3_' + str(level)](low3)

        up2 = F.interpolate(low3, scale_factor=2, mode='nearest')

        return up1 + up2 
Example #12
Source File: network.py    From EMANet with GNU General Public License v3.0 6 votes vote down vote up
def forward(self, img, lbl=None, size=None):
        x = self.extractor(img)
        x = self.fc0(x)
        x, mu = self.emau(x)
        x = self.fc1(x)
        x = self.fc2(x)

        if size is None:
            size = img.size()[-2:]
        pred = F.interpolate(x, size=size, mode='bilinear', align_corners=True)

        if self.training and lbl is not None:
            loss = self.crit(pred, lbl)
            return loss, mu
        else:
            return pred 
Example #13
Source File: late_fusion.py    From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def make_pred(self, model, frames):
        # Upsample prediction to frame length (because we want prediction for each frame)
        return F.interpolate(model(frames), frames.size(2), mode="linear", align_corners=True) 
Example #14
Source File: lstm_learner.py    From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def make_pred(self, model, frames):
        # Upsample prediction to frame length (because we want prediction for each frame)
        return F.interpolate(model(frames), frames.size(2), mode="linear", align_corners=True) 
Example #15
Source File: fusion_learner.py    From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def make_pred(self, model, frames):
        # Upsample prediction to frame length (because we want prediction for each frame)
        return F.interpolate(model(frames), frames.size(2), mode="linear", align_corners=True) 
Example #16
Source File: inference.py    From R2CNN.pytorch with MIT License 5 votes vote down vote up
def paste_mask_in_image(mask, box, im_h, im_w, thresh=0.5, padding=1):
    padded_mask, scale = expand_masks(mask[None], padding=padding)
    mask = padded_mask[0, 0]
    box = expand_boxes(box[None], scale)[0]
    box = box.to(dtype=torch.int32)

    TO_REMOVE = 1
    w = int(box[2] - box[0] + TO_REMOVE)
    h = int(box[3] - box[1] + TO_REMOVE)
    w = max(w, 1)
    h = max(h, 1)

    # Set shape to [batchxCxHxW]
    mask = mask.expand((1, 1, -1, -1))

    # Resize mask
    mask = mask.to(torch.float32)
    mask = F.interpolate(mask, size=(h, w), mode='bilinear', align_corners=False)
    mask = mask[0][0]

    if thresh >= 0:
        mask = mask > thresh
    else:
        # for visualization and debugging, we also
        # allow it to return an unmodified mask
        mask = (mask * 255).to(torch.bool)

    im_mask = torch.zeros((im_h, im_w), dtype=torch.bool)
    x_0 = max(box[0], 0)
    x_1 = min(box[2] + 1, im_w)
    y_0 = max(box[1], 0)
    y_1 = min(box[3] + 1, im_h)

    im_mask[y_0:y_1, x_0:x_1] = mask[
        (y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])
    ]
    return im_mask 
Example #17
Source File: network_exp_guided_enc_dec.py    From nconv with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, x0_d, c0, x0_rgb ):  
        
        # Depth Network
        xout_d, cout_d = self.d_net(x0_d, c0)

        # U-Net
        x1 = F.relu(self.conv1(torch.cat((xout_d, x0_rgb,cout_d),1)))
        x2 = F.relu(self.conv2(x1))
        x3 = F.relu(self.conv3(x2))
        x4 = F.relu(self.conv4(x3))
        x5 = F.relu(self.conv5(x4))

        # Upsample 1 
        x5u = F.interpolate(x5, x4.size()[2:], mode='nearest')
        x6 = F.leaky_relu(self.conv6(torch.cat((x5u, x4),1)), 0.2)
        
        # Upsample 2
        x6u = F.interpolate(x6, x3.size()[2:], mode='nearest')
        x7 = F.leaky_relu(self.conv7(torch.cat((x6u, x3),1)), 0.2)
        
        # Upsample 3
        x7u = F.interpolate(x7, x2.size()[2:], mode='nearest')
        x8 = F.leaky_relu(self.conv8(torch.cat((x7u, x2),1)), 0.2)
        
        # Upsample 4
        x8u = F.interpolate(x8, x1.size()[2:], mode='nearest')
        x9 = F.leaky_relu(self.conv9(torch.cat((x8u, x1),1)), 0.2)
                
        # Upsample 5
        x9u = F.interpolate(x9, x0_d.size()[2:], mode='nearest')
        xout = F.leaky_relu(self.conv10(torch.cat((x9u, x0_d),1)), 0.2)
        
        return xout, cout_d 
Example #18
Source File: semantic_seg.py    From seamseg with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _pack_logits(sem_logits, valid_size, img_size):
        sem_logits = functional.interpolate(sem_logits, size=img_size, mode="bilinear", align_corners=False)
        return pack_padded_images(sem_logits, valid_size) 
Example #19
Source File: bfp.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def forward(self, inputs):
        """Forward function."""
        assert len(inputs) == self.num_levels

        # step 1: gather multi-level features by resize and average
        feats = []
        gather_size = inputs[self.refine_level].size()[2:]
        for i in range(self.num_levels):
            if i < self.refine_level:
                gathered = F.adaptive_max_pool2d(
                    inputs[i], output_size=gather_size)
            else:
                gathered = F.interpolate(
                    inputs[i], size=gather_size, mode='nearest')
            feats.append(gathered)

        bsf = sum(feats) / len(feats)

        # step 2: refine gathered features
        if self.refine_type is not None:
            bsf = self.refine(bsf)

        # step 3: scatter refined features to multi-levels by a residual path
        outs = []
        for i in range(self.num_levels):
            out_size = inputs[i].size()[2:]
            if i < self.refine_level:
                residual = F.interpolate(bsf, size=out_size, mode='nearest')
            else:
                residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
            outs.append(residual + inputs[i])

        return tuple(outs) 
Example #20
Source File: misc.py    From seamseg with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def forward(self, x):
        return functional.interpolate(x, self.size, self.scale_factor, self.mode, self.align_corners) 
Example #21
Source File: fpn.py    From seamseg with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def forward(self, xs):
        """Feature Pyramid Network module

        Parameters
        ----------
        xs : sequence of torch.Tensor
            The input feature maps, tensors with shapes N x C_i x H_i x W_i

        Returns
        -------
        ys : sequence of torch.Tensor
            The output feature maps, tensors with shapes N x K x H_i x W_i
        """
        ys = []
        interp_params = {"mode": self.interpolation}
        if self.interpolation == "bilinear":
            interp_params["align_corners"] = False

        # Build pyramid
        for x_i, lateral_i in zip(xs[::-1], self.lateral[::-1]):
            x_i = lateral_i(x_i)
            if len(ys) > 0:
                x_i = x_i + functional.interpolate(ys[0], size=x_i.shape[-2:], **interp_params)
            ys.insert(0, x_i)

        # Compute outputs
        ys = [output_i(y_i) for y_i, output_i in zip(ys, self.output)]

        # Compute extra outputs if necessary
        if hasattr(self, "extra"):
            y = xs[-1]
            for extra_i in self.extra:
                y = extra_i(y)
                ys.append(y)

        return ys 
Example #22
Source File: point_rend_roi_head.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def _mask_point_forward_test(self, x, rois, label_pred, mask_pred,
                                 img_metas):
        """Mask refining process with point head in testing."""
        refined_mask_pred = mask_pred.clone()
        for subdivision_step in range(self.test_cfg.subdivision_steps):
            refined_mask_pred = F.interpolate(
                refined_mask_pred,
                scale_factor=self.test_cfg.scale_factor,
                mode='bilinear',
                align_corners=False)
            # If `subdivision_num_points` is larger or equal to the
            # resolution of the next step, then we can skip this step
            num_rois, channels, mask_height, mask_width = \
                refined_mask_pred.shape
            if (self.test_cfg.subdivision_num_points >=
                    self.test_cfg.scale_factor**2 * mask_height * mask_width
                    and
                    subdivision_step < self.test_cfg.subdivision_steps - 1):
                continue
            point_indices, rel_roi_points = \
                self.point_head.get_roi_rel_points_test(
                    refined_mask_pred, label_pred, cfg=self.test_cfg)
            fine_grained_point_feats = self._get_fine_grained_point_feats(
                x, rois, rel_roi_points, img_metas)
            coarse_point_feats = point_sample(mask_pred, rel_roi_points)
            mask_point_pred = self.point_head(fine_grained_point_feats,
                                              coarse_point_feats)

            point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)
            refined_mask_pred = refined_mask_pred.reshape(
                num_rois, channels, mask_height * mask_width)
            refined_mask_pred = refined_mask_pred.scatter_(
                2, point_indices, mask_point_pred)
            refined_mask_pred = refined_mask_pred.view(num_rois, channels,
                                                       mask_height, mask_width)

        return refined_mask_pred 
Example #23
Source File: merge_cells.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def _resize(self, x, size):
        if x.shape[-2:] == size:
            return x
        elif x.shape[-2:] < size:
            return F.interpolate(x, size=size, mode=self.upsample_mode)
        else:
            assert x.shape[-2] % size[-2] == 0 and x.shape[-1] % size[-1] == 0
            kernel_size = x.shape[-1] // size[-1]
            x = F.max_pool2d(x, kernel_size=kernel_size, stride=kernel_size)
            return x 
Example #24
Source File: outputs.py    From Parsing-R-CNN with MIT License 5 votes vote down vote up
def forward(self, x):
        x = self.parsing_score_lowres(x)
        if self.up_scale > 1:
            x = F.interpolate(x, scale_factor=self.up_scale, mode="bilinear", align_corners=False)

        return x 
Example #25
Source File: outputs.py    From Parsing-R-CNN with MIT License 5 votes vote down vote up
def forward(self, x):
        x_Ann = self.deconv_Ann(x)
        x_Index = self.deconv_Index(x)
        x_U = self.deconv_U(x)
        x_V = self.deconv_V(x)

        if self.up_scale > 1:
            x_Ann = F.interpolate(x_Ann, scale_factor=self.up_scale, mode="bilinear", align_corners=False)
            x_Index = F.interpolate(x_Index, scale_factor=self.up_scale, mode="bilinear", align_corners=False)
            x_U = F.interpolate(x_U, scale_factor=self.up_scale, mode="bilinear", align_corners=False)
            x_V = F.interpolate(x_V, scale_factor=self.up_scale, mode="bilinear", align_corners=False)

        return [x_Ann, x_Index, x_U, x_V] 
Example #26
Source File: outputs.py    From Parsing-R-CNN with MIT License 5 votes vote down vote up
def forward(self, x):
        x = self.kps_score_lowres(x)
        if self.up_scale > 1:
            x = F.interpolate(x, scale_factor=self.up_scale, mode="bilinear", align_corners=False)

        return x 
Example #27
Source File: generate_samples.py    From BMSG-GAN with MIT License 5 votes vote down vote up
def progressive_upscaling(images):
    """
    upsamples all images to the highest size ones
    :param images: list of images with progressively growing resolutions
    :return: images => images upscaled to same size
    """
    with th.no_grad():
        for factor in range(1, len(images)):
            images[len(images) - 1 - factor] = interpolate(
                images[len(images) - 1 - factor],
                scale_factor=pow(2, factor)
            )

    return images 
Example #28
Source File: generate_multi_scale_samples.py    From BMSG-GAN with MIT License 5 votes vote down vote up
def progressive_upscaling(images):
    """
    upsamples all images to the highest size ones
    :param images: list of images with progressively growing resolutions
    :return: images => images upscaled to same size
    """
    with th.no_grad():
        for factor in range(1, len(images)):
            images[len(images) - 1 - factor] = interpolate(
                images[len(images) - 1 - factor],
                scale_factor=pow(2, factor)
            )

    return images 
Example #29
Source File: albunet.py    From neural-pipeline with MIT License 5 votes vote down vote up
def forward(self, x):
        x = F.interpolate(x, scale_factor=2)
        return self.layer(x) 
Example #30
Source File: fpn.py    From R2CNN.pytorch with MIT License 5 votes vote down vote up
def forward(self, x):
        """
        Arguments:
            x (list[Tensor]): feature maps for each feature level.
        Returns:
            results (tuple[Tensor]): feature maps after FPN layers.
                They are ordered from highest resolution first.
        """
        last_inner = getattr(self, self.inner_blocks[-1])(x[-1])
        results = []
        results.append(getattr(self, self.layer_blocks[-1])(last_inner))
        for feature, inner_block, layer_block in zip(
            x[:-1][::-1], self.inner_blocks[:-1][::-1], self.layer_blocks[:-1][::-1]
        ):
            if not inner_block:
                continue
            inner_top_down = F.interpolate(last_inner, scale_factor=2, mode="nearest")
            inner_lateral = getattr(self, inner_block)(feature)
            # TODO use size instead of scale to make it robust to different sizes
            # inner_top_down = F.upsample(last_inner, size=inner_lateral.shape[-2:],
            # mode='bilinear', align_corners=False)
            last_inner = inner_lateral + inner_top_down
            results.insert(0, getattr(self, layer_block)(last_inner))

        if isinstance(self.top_blocks, LastLevelP6P7):
            last_results = self.top_blocks(x[-1], results[-1])
            results.extend(last_results)
        elif isinstance(self.top_blocks, LastLevelMaxPool):
            last_results = self.top_blocks(results[-1])
            results.extend(last_results)

        return tuple(results)