Python torchvision.transforms.functional.pad() Examples
The following are 30
code examples of torchvision.transforms.functional.pad().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torchvision.transforms.functional
, or try the search function
.
Example #1
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #2
Source File: transforms.py From medicaltorch with Apache License 2.0 | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #3
Source File: randomcrop.py From DSC-PyTorch with MIT License | 6 votes |
def __call__(self, img,img_gt): """ Args: img (PIL Image): Image to be cropped. Returns: PIL Image: Cropped image. """ if self.padding > 0: img = F.pad(img, self.padding) # pad the width if needed if self.pad_if_needed and img.size[0] < self.size[1]: img = F.pad(img, (int((1 + self.size[1] - img.size[0]) / 2), 0)) # pad the height if needed if self.pad_if_needed and img.size[1] < self.size[0]: img = F.pad(img, (0, int((1 + self.size[0] - img.size[1]) / 2))) i, j, h, w = self.get_params(img, self.size) return img[i:i+self.size[0],j:j+self.size[1],:],img_gt[i:i+self.size[0],j:j+self.size[1],:] # return F.crop(img, i, j, h, w),F.crop(img_gt, i, j, h, w)
Example #4
Source File: transforms.py From person-reid-lib with MIT License | 6 votes |
def _get_params(self, images): """ Args: img (PIL Image) list: Image to be cropped. Returns: PIL Image list: Cropped image. """ while isinstance(images, (tuple, list)): images = images[0] img = images.img if self.padding is not None: img = F.pad(img, self.padding, self.fill, self.padding_mode) # pad the width if needed if self.pad_if_needed and img.size[0] < self.size[1]: img = F.pad(img, (self.size[1] - img.size[0], 0), self.fill, self.padding_mode) # pad the height if needed if self.pad_if_needed and img.size[1] < self.size[0]: img = F.pad(img, (0, self.size[0] - img.size[1]), self.fill, self.padding_mode) return self.get_params(img, self.size)
Example #5
Source File: transforms.py From person-reid-lib with MIT License | 6 votes |
def pad_func(self, img, params): if self.padding is not None: img.img = F.pad(img.img, self.padding, self.fill, self.padding_mode) if img.x is not None: img.x = F.pad(img.x, self.padding, self.fill, self.padding_mode) if img.y is not None: img.y = F.pad(img.y, self.padding, self.fill, self.padding_mode) if self.pad_if_needed and img.img.size[0] < self.size[1]: img.img = F.pad(img.img, (self.size[1] - img.img.size[0], 0), self.fill, self.padding_mode) if img.x is not None: img.x = F.pad(img.x, (self.size[1] - img.img.size[0], 0), self.fill, self.padding_mode) if img.y is not None: img.y = F.pad(img.y, (self.size[1] - img.img.size[0], 0), self.fill, self.padding_mode) if self.pad_if_needed and img.img.size[1] < self.size[0]: img.img = F.pad(img.img, (0, self.size[0] - img.img.size[1]), self.fill, self.padding_mode) if img.x is not None: img.x = F.pad(img.x, (0, self.size[0] - img.img.size[1]), self.fill, self.padding_mode) if img.y is not None: img.y = F.pad(img.y, (0, self.size[0] - img.img.size[1]), self.fill, self.padding_mode) return img
Example #6
Source File: transforms.py From NAS-FCOS with BSD 2-Clause "Simplified" License | 6 votes |
def __call__(self, image, target): w, h = image.size new_h = min(h, self.crop_size) new_w = min(w, self.crop_size) while True: top = np.random.randint(0, h - new_h + 1) left = np.random.randint(0, w - new_w + 1) box = (left, top, left + new_w, top + new_h) # should make sure target crop method does not modify itself new_target = target.crop(box, remove_empty=True) # Attention: If Densebox does not support empty targets, random crop # should not provide empty targets # if len(new_target) > 0 or random.random() > self.discard_prob: if len(new_target) > 0: target = new_target break image = F.crop(image, top, left, new_h, new_w) if new_h < self.crop_size or new_w < self.crop_size: padding = (0, 0, (self.crop_size - new_w), (self.crop_size - new_h)) image = F.pad(image, padding=padding) target = target.pad(padding) return image, target
Example #7
Source File: cvfunctional.py From opencv_transforms_torchvision with MIT License | 6 votes |
def pil_transform(img): # img = functional.resize(img, size=(100, 300)) # img = functional.to_tensor(img) # img = functional.normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # img = functional.pad(img, padding=(10, 10, 20, 20), fill=(255, 255, 255), padding_mode='constant') # img = functional.pad(img, padding=(100, 100, 100, 100), padding_mode='symmetric') # img = functional.crop(img, -40, -20, 1000, 1000) # img = functional.center_crop(img, (310, 300)) # img = functional.resized_crop(img, -10.3, -20, 330, 220, (500, 500)) # img = functional.hflip(img) # img = functional.vflip(img) # tl, tr, bl, br, center = functional.five_crop(img, 100) # img = functional.adjust_brightness(img, 2.1) # img = functional.adjust_contrast(img, 1.5) # img = functional.adjust_saturation(img, 2.3) # img = functional.adjust_hue(img, 0.5) # img = functional.adjust_gamma(img, gamma=3, gain=0.1) # img = functional.rotate(img, 10, resample=PIL.Image.BILINEAR, expand=True, center=None) # img = functional.to_grayscale(img, 3) # img = functional.affine(img, 10, (0, 0), 1, 0, resample=PIL.Image.BICUBIC, fillcolor=(255,255,0)) return functional.to_tensor(img)
Example #8
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #9
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #10
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #11
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #12
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #13
Source File: video_transforms.py From Sound-of-Pixels with MIT License | 6 votes |
def __call__(self, frames): """ Args: frames: a list of PIL Image Returns: a list of PIL Image: Cropped images. """ i, j, h, w = self.get_params(frames, self.size) out_frames = [] for frame in frames: if self.padding is not None: frame = F.pad(frame, self.padding, self.fill, self.padding_mode) # pad the width if needed if self.pad_if_needed and frame.size[0] < self.size[1]: frame = F.pad(frame, (int((1 + self.size[1] - frame.size[0]) / 2), 0), self.fill, self.padding_mode) # pad the height if needed if self.pad_if_needed and frame.size[1] < self.size[0]: frame = F.pad(frame, (0, int((1 + self.size[0] - frame.size[1]) / 2)), self.fill, self.padding_mode) out_frames.append(F.crop(frame, i, j, h, w)) return out_frames
Example #14
Source File: load_data.py From integer_discrete_flows with MIT License | 6 votes |
def __call__(self, img): """ Args: img (PIL Image): Image to be padded. Returns: PIL Image: Padded image. """ w, h = img.size m = self.multiple nw = (w // m + int((w % m) != 0)) * m nh = (h // m + int((h % m) != 0)) * m padw = nw - w padh = nh - h out = vf.pad(img, (0, 0, padw, padh), self.fill, self.padding_mode) return out
Example #15
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #16
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #17
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #18
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #19
Source File: video_transforms.py From pvse with MIT License | 6 votes |
def __call__(self, img): """ Args: img (PIL Image): Image to be cropped. Returns: PIL Image: Cropped image. """ if self.padding is not None: img = F.pad(img, self.padding, self.fill, self.padding_mode) # pad the width if needed if self.pad_if_needed and img.size[0] < self.size[1]: img = F.pad(img, (self.size[1] - img.size[0], 0), self.fill, self.padding_mode) # pad the height if needed if self.pad_if_needed and img.size[1] < self.size[0]: img = F.pad(img, (0, self.size[0] - img.size[1]), self.fill, self.padding_mode) i, j, h, w = self.get_params(img, self.size) return F.crop(img, i, j, h, w)
Example #20
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #21
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #22
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #23
Source File: transforms.py From yolo_nano with MIT License | 6 votes |
def __call__(self, image, bboxes): if self.padding is not None: image = F.pad(image, self.padding, self.fill, self.padding_mode) bboxes = bboxes.pad(padding) # pad the width if needed if self.pad_if_needed and image.size[0] < self.size[1]: image = F.pad(image, (self.size[1] - image.size[0], 0), self.fill, self.padding_mode) bboxes = bboxes.pad((self.size[1] - image.size[0], 0)) # pad the height if needed if self.pad_if_needed and image.size[1] < self.size[0]: image = F.pad(image, (0, self.size[0] - image.size[1]), self.fill, self.padding_mode) bboxes = bboxes.pad((0, self.size[0] - image.size[1])) i, j, h, w = self.get_params(image, self.size) return F.crop(image, i, j, h, w), bboxes.crop((i, j, i+h, j+w))
Example #24
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #25
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #26
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #27
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #28
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #29
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample
Example #30
Source File: transforms.py From U-Net-Fixed-Point-Quantization-for-Medical-Image-Segmentation with MIT License | 6 votes |
def undo_transform(self, sample): rdict = {} input_data = sample['input'] fh, fw, w, h = self.get_params(sample) th, tw = self.size pad_left = fw pad_right = w - pad_left - tw pad_top = fh pad_bottom = h - pad_top - th padding = (pad_left, pad_top, pad_right, pad_bottom) input_data = F.pad(input_data, padding) rdict['input'] = input_data sample.update(rdict) return sample