Python torchvision.transforms.Resize() Examples
The following are 30 code examples for showing how to use torchvision.transforms.Resize(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
torchvision.transforms
, or try the search function
.
Example 1
Project: transferlearning Author: jindongwang File: data_loader.py License: MIT License | 9 votes |
def load_data(root_path, dir, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = datasets.ImageFolder(root=root_path + dir, transform=transform_dict[phase]) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return data_loader
Example 2
Project: transferlearning Author: jindongwang File: data_loader.py License: MIT License | 7 votes |
def load_training(root_path, dir, batch_size, kwargs): transform = transforms.Compose( [transforms.Resize([256, 256]), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]) data = datasets.ImageFolder(root=root_path + dir, transform=transform) train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True, **kwargs) return train_loader
Example 3
Project: Pytorch-Project-Template Author: moemen95 File: env_utils.py License: MIT License | 6 votes |
def get_screen(self, env): screen = env.render(mode='rgb_array').transpose((2, 0, 1)) # transpose into torch order (CHW) # Strip off the top and bottom of the screen screen = screen[:, 160:320] view_width = 320 cart_location = self.get_cart_location(env) if cart_location < view_width // 2: slice_range = slice(view_width) elif cart_location > (self.screen_width - view_width // 2): slice_range = slice(-view_width, None) else: slice_range = slice(cart_location - view_width // 2, cart_location + view_width // 2) # Strip off the edges, so that we have a square image centered on a cart screen = screen[:, :, slice_range] # Convert to float, rescale, convert to torch tensor screen = np.ascontiguousarray(screen, dtype=np.float32) / 255 screen = torch.from_numpy(screen) # Resize, and add a batch dimension (BCHW) return resize(screen).unsqueeze(0)
Example 4
Project: transferlearning Author: jindongwang File: data_load.py License: MIT License | 6 votes |
def load_data(data_folder, batch_size, phase='train', train_val_split=True, train_ratio=.8): transform_dict = { 'train': transforms.Compose( [transforms.Resize(256), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'test': transforms.Compose( [transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = datasets.ImageFolder(root=data_folder, transform=transform_dict[phase]) if phase == 'train': if train_val_split: train_size = int(train_ratio * len(data)) test_size = len(data) - train_size data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=4) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=4) return [train_loader, val_loader] else: train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=4) return train_loader else: test_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=4) return test_loader ## Below are for ImageCLEF datasets
Example 5
Project: transferlearning Author: jindongwang File: data_load.py License: MIT License | 6 votes |
def load_imageclef_train(root_path, domain, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.Resize((256, 256)), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = ImageCLEF(root_dir=root_path, domain=domain, transform=transform_dict[phase]) train_size = int(0.8 * len(data)) test_size = len(data) - train_size data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return train_loader, val_loader
Example 6
Project: transferlearning Author: jindongwang File: data_load.py License: MIT License | 6 votes |
def load_imageclef_test(root_path, domain, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.Resize((256,256)), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = ImageCLEF(root_dir=root_path, domain=domain, transform=transform_dict[phase]) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return data_loader
Example 7
Project: transferlearning Author: jindongwang File: data_loader.py License: MIT License | 6 votes |
def load_data(data_folder, batch_size, train, kwargs): transform = { 'train': transforms.Compose( [transforms.Resize([256, 256]), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]), 'test': transforms.Compose( [transforms.Resize([224, 224]), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) } data = datasets.ImageFolder(root = data_folder, transform=transform['train' if train else 'test']) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, **kwargs, drop_last = True if train else False) return data_loader
Example 8
Project: transferlearning Author: jindongwang File: data_loader.py License: MIT License | 6 votes |
def load_train(root_path, dir, batch_size, phase): transform_dict = { 'src': transforms.Compose( [transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), 'tar': transforms.Compose( [transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])} data = datasets.ImageFolder(root=root_path + dir, transform=transform_dict[phase]) train_size = int(0.8 * len(data)) test_size = len(data) - train_size data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return train_loader, val_loader
Example 9
Project: steppy-toolkit Author: minerva-ml File: segmentation.py License: MIT License | 6 votes |
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params): super().__init__(train_mode, loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w)), transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), ]) self.mask_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w), interpolation=0), transforms.Lambda(to_array), transforms.Lambda(to_tensor), ]) self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train']) self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train']) if self.dataset_params.target_format == 'png': self.dataset = ImageSegmentationPngDataset elif self.dataset_params.target_format == 'json': self.dataset = ImageSegmentationJsonDataset else: raise Exception('files must be png or json')
Example 10
Project: steppy-toolkit Author: minerva-ml File: segmentation.py License: MIT License | 6 votes |
def __init__(self, loader_params, dataset_params, augmentation_params): super().__init__(loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w)), transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), ]) self.mask_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w), interpolation=0), transforms.Lambda(to_array), transforms.Lambda(to_tensor), ]) self.dataset = ImageSegmentationTTADataset
Example 11
Project: cycada_release Author: jhoffman File: data_loader.py License: BSD 2-Clause "Simplified" License | 6 votes |
def get_transform2(dataset_name, net_transform, downscale): "Returns image and label transform to downscale, crop and prepare for net." orig_size = get_orig_size(dataset_name) transform = [] target_transform = [] if downscale is not None: transform.append(transforms.Resize(orig_size // downscale)) target_transform.append( transforms.Resize(orig_size // downscale, interpolation=Image.NEAREST)) transform.extend([transforms.Resize(orig_size), net_transform]) target_transform.extend([transforms.Resize(orig_size, interpolation=Image.NEAREST), to_tensor_raw]) transform = transforms.Compose(transform) target_transform = transforms.Compose(target_transform) return transform, target_transform
Example 12
Project: cycada_release Author: jhoffman File: data_loader.py License: BSD 2-Clause "Simplified" License | 6 votes |
def get_transform(params, image_size, num_channels): # Transforms for PIL Images: Gray <-> RGB Gray2RGB = transforms.Lambda(lambda x: x.convert('RGB')) RGB2Gray = transforms.Lambda(lambda x: x.convert('L')) transform = [] # Does size request match original size? if not image_size == params.image_size: transform.append(transforms.Resize(image_size)) # Does number of channels requested match original? if not num_channels == params.num_channels: if num_channels == 1: transform.append(RGB2Gray) elif num_channels == 3: transform.append(Gray2RGB) else: print('NumChannels should be 1 or 3', num_channels) raise Exception transform += [transforms.ToTensor(), transforms.Normalize((params.mean,), (params.std,))] return transforms.Compose(transform)
Example 13
Project: Self-Supervised-Gans-Pytorch Author: vandit15 File: dataloaders.py License: MIT License | 6 votes |
def get_mnist_dataloaders(batch_size=128): """MNIST dataloader with (32, 32) sized images.""" # Resize images so they are a power of 2 all_transforms = transforms.Compose([ transforms.Resize(32), transforms.ToTensor() ]) # Get train and test data train_data = datasets.MNIST('../data', train=True, download=True, transform=all_transforms) test_data = datasets.MNIST('../data', train=False, transform=all_transforms) # Create dataloaders train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True) return train_loader, test_loader
Example 14
Project: Self-Supervised-Gans-Pytorch Author: vandit15 File: dataloaders.py License: MIT License | 6 votes |
def get_fashion_mnist_dataloaders(batch_size=128): """Fashion MNIST dataloader with (32, 32) sized images.""" # Resize images so they are a power of 2 all_transforms = transforms.Compose([ transforms.Resize(32), transforms.ToTensor() ]) # Get train and test data train_data = datasets.FashionMNIST('../fashion_data', train=True, download=True, transform=all_transforms) test_data = datasets.FashionMNIST('../fashion_data', train=False, transform=all_transforms) # Create dataloaders train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True) return train_loader, test_loader
Example 15
Project: Self-Supervised-Gans-Pytorch Author: vandit15 File: dataloaders.py License: MIT License | 6 votes |
def get_lsun_dataloader(path_to_data='../lsun', dataset='bedroom_train', batch_size=64): """LSUN dataloader with (128, 128) sized images. path_to_data : str One of 'bedroom_val' or 'bedroom_train' """ # Compose transforms transform = transforms.Compose([ transforms.Resize(128), transforms.CenterCrop(128), transforms.ToTensor() ]) # Get dataset lsun_dset = datasets.LSUN(db_path=path_to_data, classes=[dataset], transform=transform) # Create dataloader return DataLoader(lsun_dset, batch_size=batch_size, shuffle=True)
Example 16
Project: robustness Author: hendrycks File: make_imagenet_c.py License: Apache License 2.0 | 6 votes |
def save_distorted(method=gaussian_noise): for severity in range(1, 6): print(method.__name__, severity) distorted_dataset = DistortImageFolder( root="/share/data/vision-greg/ImageNet/clsloc/images/val", method=method, severity=severity, transform=trn.Compose([trn.Resize(256), trn.CenterCrop(224)])) distorted_dataset_loader = torch.utils.data.DataLoader( distorted_dataset, batch_size=100, shuffle=False, num_workers=4) for _ in distorted_dataset_loader: continue # /////////////// End Further Setup /////////////// # /////////////// Display Results ///////////////
Example 17
Project: robustness Author: hendrycks File: make_tinyimagenet_c.py License: Apache License 2.0 | 6 votes |
def save_distorted(method=gaussian_noise): for severity in range(1, 6): print(method.__name__, severity) distorted_dataset = DistortImageFolder( root="./imagenet_val_bbox_crop/", method=method, severity=severity, transform=trn.Compose([trn.Resize((64, 64))])) distorted_dataset_loader = torch.utils.data.DataLoader( distorted_dataset, batch_size=100, shuffle=False, num_workers=6) for _ in distorted_dataset_loader: continue # /////////////// End Further Setup /////////////// # /////////////// Display Results ///////////////
Example 18
Project: robustness Author: hendrycks File: make_imagenet_64_c.py License: Apache License 2.0 | 6 votes |
def save_distorted(method=gaussian_noise): for severity in range(1, 6): print(method.__name__, severity) distorted_dataset = DistortImageFolder( root="/share/data/vision-greg/ImageNet/clsloc/images/val", method=method, severity=severity, transform=trn.Compose([trn.Resize((64, 64))])) distorted_dataset_loader = torch.utils.data.DataLoader( distorted_dataset, batch_size=100, shuffle=False, num_workers=6) for _ in distorted_dataset_loader: continue # /////////////// End Further Setup /////////////// # /////////////// Display Results ///////////////
Example 19
Project: Single-Human-Parsing-LIP Author: hyk1996 File: eval.py License: MIT License | 6 votes |
def get_transform(): transform_image_list = [ transforms.Resize((256, 256), 3), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ] transform_gt_list = [ transforms.Resize((256, 256), 0), transforms.Lambda(lambda img: np.asarray(img, dtype=np.uint8)), ] data_transforms = { 'img': transforms.Compose(transform_image_list), 'gt': transforms.Compose(transform_gt_list), } return data_transforms
Example 20
Project: Single-Human-Parsing-LIP Author: hyk1996 File: train.py License: MIT License | 6 votes |
def get_transform(): transform_image_list = [ transforms.Resize((256, 256), 3), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ] transform_gt_list = [ transforms.Resize((256, 256), 0), transforms.Lambda(lambda img: np.asarray(img, dtype=np.uint8)), ] data_transforms = { 'img': transforms.Compose(transform_image_list), 'gt': transforms.Compose(transform_gt_list), } return data_transforms
Example 21
Project: cwcf Author: jaromiru File: conv_cifar_2.py License: MIT License | 6 votes |
def get_data(train): data_raw = datasets.CIFAR10('../data/dl/', train=train, download=True, transform=transforms.Compose([ transforms.Grayscale(), transforms.Resize((20, 20)), transforms.ToTensor(), lambda x: x.numpy().flatten()])) data_x, data_y = zip(*data_raw) data_x = np.array(data_x) data_y = np.array(data_y, dtype='int32').reshape(-1, 1) # binarize label_0 = data_y < 5 label_1 = ~label_0 data_y[label_0] = 0 data_y[label_1] = 1 data = pd.DataFrame(data_x) data[COLUMN_LABEL] = data_y return data, data_x.mean(), data_x.std() #---
Example 22
Project: cwcf Author: jaromiru File: conv_cifar.py License: MIT License | 6 votes |
def get_data(train): data_raw = datasets.CIFAR10('../data/dl/', train=train, download=True, transform=transforms.Compose([ transforms.Grayscale(), transforms.Resize((20, 20)), transforms.ToTensor(), lambda x: x.numpy().flatten()])) data_x, data_y = zip(*data_raw) data_x = np.array(data_x) data_y = np.array(data_y, dtype='int32').reshape(-1, 1) data = pd.DataFrame(data_x) data[COLUMN_LABEL] = data_y return data, data_x.mean(), data_x.std() #---
Example 23
Project: Pointnet2.ScanNet Author: daveredrum File: compute_multiview_projection.py License: MIT License | 6 votes |
def resize_crop_image(image, new_image_dims): image_dims = [image.shape[1], image.shape[0]] if image_dims != new_image_dims: resize_width = int(math.floor(new_image_dims[1] * float(image_dims[0]) / float(image_dims[1]))) image = transforms.Resize([new_image_dims[1], resize_width], interpolation=Image.NEAREST)(Image.fromarray(image)) image = transforms.CenterCrop([new_image_dims[1], new_image_dims[0]])(image) return np.array(image)
Example 24
Project: RelationNetworks-CLEVR Author: mesnico File: train.py License: MIT License | 6 votes |
def initialize_dataset(clevr_dir, dictionaries, state_description=True): if not state_description: train_transforms = transforms.Compose([transforms.Resize((128, 128)), transforms.Pad(8), transforms.RandomCrop((128, 128)), transforms.RandomRotation(2.8), # .05 rad transforms.ToTensor()]) test_transforms = transforms.Compose([transforms.Resize((128, 128)), transforms.ToTensor()]) clevr_dataset_train = ClevrDataset(clevr_dir, True, dictionaries, train_transforms) clevr_dataset_test = ClevrDataset(clevr_dir, False, dictionaries, test_transforms) else: clevr_dataset_train = ClevrDatasetStateDescription(clevr_dir, True, dictionaries) clevr_dataset_test = ClevrDatasetStateDescription(clevr_dir, False, dictionaries) return clevr_dataset_train, clevr_dataset_test
Example 25
Project: VSE-C Author: ExplorerFreda File: saliency_visualization.py License: MIT License | 5 votes |
def build_image_transforms(self): self.image_transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ])
Example 26
Project: transferlearning Author: jindongwang File: digit_data_loader.py License: MIT License | 5 votes |
def load_data(domain, root_dir, batch_size): src_train_img, src_train_label, src_test_img, src_test_label = load_dataset(domain['src'], root_dir) tar_train_img, tar_train_label, tar_test_img, tar_test_label = load_dataset(domain['tar'], root_dir) transform = transforms.Compose([ transforms.Resize(32), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) data_src_train, data_src_test = GetDataset(src_train_img, src_train_label, transform), GetDataset(src_test_img, src_test_label, transform) data_tar_train, data_tar_test = GetDataset(tar_train_img, tar_train_label, transform), GetDataset(tar_test_img, tar_test_label, transform) dataloaders = {} dataloaders['src'] = torch.utils.data.DataLoader(data_src_train, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) dataloaders['val'] = torch.utils.data.DataLoader(data_src_test, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) dataloaders['tar'] = torch.utils.data.DataLoader(data_tar_train, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4) return dataloaders
Example 27
Project: transferlearning Author: jindongwang File: dataset.py License: MIT License | 5 votes |
def loader(path, batch_size=16, num_workers=1, pin_memory=True): normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) return data.DataLoader( datasets.ImageFolder(path, transforms.Compose([ transforms.Resize(256), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ])), batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
Example 28
Project: transferlearning Author: jindongwang File: dataset.py License: MIT License | 5 votes |
def test_loader(path, batch_size=16, num_workers=1, pin_memory=True): normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) return data.DataLoader( datasets.ImageFolder(path, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])), batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
Example 29
Project: transferlearning Author: jindongwang File: data_loader.py License: MIT License | 5 votes |
def load_training(root_path, dir, batch_size, kwargs): transform = transforms.Compose( [transforms.Resize([256, 256]), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]) data = datasets.ImageFolder(root=root_path + dir, transform=transform) train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True, **kwargs) return train_loader
Example 30
Project: transferlearning Author: jindongwang File: data_loader.py License: MIT License | 5 votes |
def load_testing(root_path, dir, batch_size, kwargs): transform = transforms.Compose( [transforms.Resize([224, 224]), transforms.ToTensor()]) data = datasets.ImageFolder(root=root_path + dir, transform=transform) test_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, **kwargs) return test_loader