Python torchvision.transforms.Lambda() Examples
The following are 30
code examples of torchvision.transforms.Lambda().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torchvision.transforms
, or try the search function
.
Example #1
Source File: loaders.py From open-solution-data-science-bowl-2018 with MIT License | 6 votes |
def __init__(self, loader_params, dataset_params): super().__init__(loader_params, dataset_params) self.image_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=MEAN, std=STD), ]) self.mask_transform = transforms.Compose([transforms.Lambda(to_array), transforms.Lambda(to_tensor), ]) self.image_augment_train = ImgAug(color_seq) self.image_augment_with_target_train = ImgAug( crop_seq(crop_size=(self.dataset_params.h, self.dataset_params.w))) self.image_augment_inference = ImgAug( pad_to_fit_net(self.dataset_params.divisor, self.dataset_params.pad_method)) self.image_augment_with_target_inference = ImgAug( pad_to_fit_net(self.dataset_params.divisor, self.dataset_params.pad_method)) if self.dataset_params.target_format == 'png': self.dataset = ImageSegmentationPngDataset elif self.dataset_params.target_format == 'json': self.dataset = ImageSegmentationJsonDataset else: raise Exception('files must be png or json')
Example #2
Source File: segmentation.py From steppy-toolkit with MIT License | 6 votes |
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params): super().__init__(train_mode, loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w)), transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), ]) self.mask_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w), interpolation=0), transforms.Lambda(to_array), transforms.Lambda(to_tensor), ]) self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train']) self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train']) if self.dataset_params.target_format == 'png': self.dataset = ImageSegmentationPngDataset elif self.dataset_params.target_format == 'json': self.dataset = ImageSegmentationJsonDataset else: raise Exception('files must be png or json')
Example #3
Source File: data_loader.py From cycada_release with BSD 2-Clause "Simplified" License | 6 votes |
def get_transform(params, image_size, num_channels): # Transforms for PIL Images: Gray <-> RGB Gray2RGB = transforms.Lambda(lambda x: x.convert('RGB')) RGB2Gray = transforms.Lambda(lambda x: x.convert('L')) transform = [] # Does size request match original size? if not image_size == params.image_size: transform.append(transforms.Resize(image_size)) # Does number of channels requested match original? if not num_channels == params.num_channels: if num_channels == 1: transform.append(RGB2Gray) elif num_channels == 3: transform.append(Gray2RGB) else: print('NumChannels should be 1 or 3', num_channels) raise Exception transform += [transforms.ToTensor(), transforms.Normalize((params.mean,), (params.std,))] return transforms.Compose(transform)
Example #4
Source File: loaders.py From open-solution-salt-identification with MIT License | 6 votes |
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params): super().__init__(train_mode, loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), AddDepthChannels() ]) self.mask_transform = transforms.Lambda(preprocess_emptiness_target) self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train']) self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train']) self.image_augment_inference = ImgAug(self.augmentation_params['image_augment_inference']) self.image_augment_with_target_inference = ImgAug( self.augmentation_params['image_augment_with_target_inference']) self.dataset = EmptinessDataset
Example #5
Source File: mnist.py From pytorch-atda with MIT License | 6 votes |
def get_mnist(train, get_dataset=False, batch_size=cfg.batch_size): """Get MNIST dataset loader.""" # image pre-processing convert_to_3_channels = transforms.Lambda( lambda x: torch.cat([x, x, x], 0)) pre_process = transforms.Compose([transforms.ToTensor(), transforms.Normalize( mean=cfg.dataset_mean, std=cfg.dataset_std), convert_to_3_channels]) # dataset and data loader mnist_dataset = datasets.MNIST(root=cfg.data_root, train=train, transform=pre_process, download=True) if get_dataset: return mnist_dataset else: mnist_data_loader = torch.utils.data.DataLoader( dataset=mnist_dataset, batch_size=batch_size, shuffle=True) return mnist_data_loader
Example #6
Source File: segmentation.py From steppy-toolkit with MIT License | 6 votes |
def __init__(self, loader_params, dataset_params, augmentation_params): super().__init__(loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w)), transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), ]) self.mask_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w), interpolation=0), transforms.Lambda(to_array), transforms.Lambda(to_tensor), ]) self.dataset = ImageSegmentationTTADataset
Example #7
Source File: loaders.py From open-solution-salt-identification with MIT License | 6 votes |
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params): super().__init__(train_mode, loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), AddDepthChannels() ]) self.mask_transform = transforms.Lambda(preprocess_target) self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train']) self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train']) self.image_augment_inference = ImgAug(self.augmentation_params['image_augment_inference']) self.image_augment_with_target_inference = ImgAug( self.augmentation_params['image_augment_with_target_inference']) if self.dataset_params.use_depth: self.dataset = ImageSegmentationDatasetWithDepth else: self.dataset = ImageSegmentationDataset
Example #8
Source File: loaders.py From open-solution-salt-identification with MIT License | 6 votes |
def __init__(self, loader_params, dataset_params, augmentation_params): super().__init__(loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), AddDepthChannels() ]) self.mask_transform = transforms.Lambda(preprocess_target) self.image_augment_inference = ImgAug(self.augmentation_params['image_augment_inference']) self.image_augment_with_target_inference = ImgAug( self.augmentation_params['image_augment_with_target_inference']) if self.dataset_params.use_depth: self.dataset = ImageSegmentationTTADatasetWithDepth else: self.dataset = ImageSegmentationTTADataset
Example #9
Source File: eval.py From Single-Human-Parsing-LIP with MIT License | 6 votes |
def get_transform(): transform_image_list = [ transforms.Resize((256, 256), 3), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ] transform_gt_list = [ transforms.Resize((256, 256), 0), transforms.Lambda(lambda img: np.asarray(img, dtype=np.uint8)), ] data_transforms = { 'img': transforms.Compose(transform_image_list), 'gt': transforms.Compose(transform_gt_list), } return data_transforms
Example #10
Source File: train.py From Single-Human-Parsing-LIP with MIT License | 6 votes |
def get_transform(): transform_image_list = [ transforms.Resize((256, 256), 3), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ] transform_gt_list = [ transforms.Resize((256, 256), 0), transforms.Lambda(lambda img: np.asarray(img, dtype=np.uint8)), ] data_transforms = { 'img': transforms.Compose(transform_image_list), 'gt': transforms.Compose(transform_gt_list), } return data_transforms
Example #11
Source File: base_dataset.py From Recycle-GAN with MIT License | 6 votes |
def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSize, opt.loadSize] transform_list.append(transforms.Scale(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSize))) transform_list.append(transforms.RandomCrop(opt.fineSize)) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
Example #12
Source File: pytorch_example.py From petastorm with Apache License 2.0 | 6 votes |
def _transform_row(mnist_row): # For this example, the images are stored as simpler ndarray (28,28), but the # training network expects 3-dim images, hence the additional lambda transform. transform = transforms.Compose([ transforms.Lambda(lambda nd: nd.reshape(28, 28, 1)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) # In addition, the petastorm pytorch DataLoader does not distinguish the notion of # data or target transform, but that actually gives the user more flexibility # to make the desired partial transform, as shown here. result_row = { 'image': transform(mnist_row['image']), 'digit': mnist_row['digit'] } return result_row
Example #13
Source File: preprocess.py From convNet.pytorch with MIT License | 6 votes |
def scale_crop(input_size, scale_size=None, num_crops=1, normalize=_IMAGENET_STATS): assert num_crops in [1, 5, 10], "num crops must be in {1,5,10}" convert_tensor = transforms.Compose([transforms.ToTensor(), transforms.Normalize(**normalize)]) if num_crops == 1: t_list = [ transforms.CenterCrop(input_size), convert_tensor ] else: if num_crops == 5: t_list = [transforms.FiveCrop(input_size)] elif num_crops == 10: t_list = [transforms.TenCrop(input_size)] # returns a 4D tensor t_list.append(transforms.Lambda(lambda crops: torch.stack([convert_tensor(crop) for crop in crops]))) if scale_size != input_size: t_list = [transforms.Resize(scale_size)] + t_list return transforms.Compose(t_list)
Example #14
Source File: segmentation.py From steppy-toolkit with MIT License | 6 votes |
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params): super().__init__(train_mode, loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), ]) self.mask_transform = transforms.Compose([transforms.Lambda(to_array), transforms.Lambda(to_tensor), ]) self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train']) self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train']) self.image_augment_inference = ImgAug(self.augmentation_params['image_augment_inference']) self.image_augment_with_target_inference = ImgAug( self.augmentation_params['image_augment_with_target_inference']) if self.dataset_params.target_format == 'png': self.dataset = ImageSegmentationPngDataset elif self.dataset_params.target_format == 'json': self.dataset = ImageSegmentationJsonDataset else: raise Exception('files must be png or json')
Example #15
Source File: data_loader.py From MADAN with MIT License | 6 votes |
def get_transform(params, image_size, num_channels): # Transforms for PIL Images: Gray <-> RGB Gray2RGB = transforms.Lambda(lambda x: x.convert('RGB')) RGB2Gray = transforms.Lambda(lambda x: x.convert('L')) transform = [] # Does size request match original size? if not image_size == params.image_size: transform.append(transforms.Resize(image_size)) # Does number of channels requested match original? if not num_channels == params.num_channels: if num_channels == 1: transform.append(RGB2Gray) elif num_channels == 3: transform.append(Gray2RGB) else: print('NumChannels should be 1 or 3', num_channels) raise Exception transform += [transforms.ToTensor(), transforms.Normalize((params.mean,), (params.std,))] return transforms.Compose(transform)
Example #16
Source File: base_dataset.py From MADAN with MIT License | 6 votes |
def get_label_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSize, opt.loadSize] transform_list.append(transforms.Resize(osize, interpolation=Image.NEAREST)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'resize_only': osize = [opt.loadSize, opt.loadSize] transform_list.append(transforms.Resize(osize, interpolation=Image.NEAREST)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Resize(opt.loadSize, interpolation=Image.NEAREST)) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Resize(opt.loadSize, interpolation=Image.NEAREST)) transform_list.append(transforms.RandomCrop(opt.fineSize)) # transform_list.append(transforms.RandomCrop(opt.fineSize)) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list.append(transforms.Lambda(lambda img: to_tensor_raw(img))) return transforms.Compose(transform_list)
Example #17
Source File: base_dataset.py From ganimation_replicate with MIT License | 6 votes |
def img_transformer(self): transform_list = [] if self.opt.resize_or_crop == 'resize_and_crop': transform_list.append(transforms.Resize([self.opt.load_size, self.opt.load_size], Image.BICUBIC)) transform_list.append(transforms.RandomCrop(self.opt.final_size)) elif self.opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(self.opt.final_size)) elif self.opt.resize_or_crop == 'none': transform_list.append(transforms.Lambda(lambda image: image)) else: raise ValueError("--resize_or_crop %s is not a valid option." % self.opt.resize_or_crop) if self.is_train and not self.opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list.append(transforms.ToTensor()) transform_list.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) img2tensor = transforms.Compose(transform_list) return img2tensor
Example #18
Source File: dataset_test.py From baal with Apache License 2.0 | 6 votes |
def test_transform(self): train_transform = Lambda(lambda k: 1) test_transform = Lambda(lambda k: 0) dataset = ActiveLearningDataset(MyDataset(train_transform), pool_specifics={'transform': test_transform}, make_unlabelled=lambda x: (x[0], -1)) dataset.label(np.arange(10)) pool = dataset.pool assert np.equal([i for i in pool], [(0, -1) for i in np.arange(10, 100)]).all() assert np.equal([i for i in dataset], [(1, i) for i in np.arange(10)]).all() with pytest.warns(DeprecationWarning) as e: ActiveLearningDataset(MyDataset(train_transform), eval_transform=train_transform) assert len(e) == 1 with pytest.raises(ValueError) as e: ActiveLearningDataset(MyDataset(train_transform), pool_specifics={'whatever': 123}).pool
Example #19
Source File: utils.py From inplace_abn with BSD 3-Clause "New" or "Revised" License | 6 votes |
def create_test_transforms(config, crop, scale, ten_crops): normalize = transforms.Normalize(mean=config["mean"], std=config["std"]) val_transforms = [] if scale != -1: val_transforms.append(transforms.Resize(scale)) if ten_crops: val_transforms += [ transforms.TenCrop(crop), transforms.Lambda(lambda crops: [transforms.ToTensor()(crop) for crop in crops]), transforms.Lambda(lambda crops: [normalize(crop) for crop in crops]), transforms.Lambda(lambda crops: torch.stack(crops)) ] else: val_transforms += [ transforms.CenterCrop(crop), transforms.ToTensor(), normalize ] return val_transforms
Example #20
Source File: base_dataset.py From non-stationary_texture_syn with MIT License | 6 votes |
def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSize, opt.loadSize] transform_list.append(transforms.Scale(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSize))) transform_list.append(transforms.RandomCrop(opt.fineSize)) # if opt.isTrain and not opt.no_flip: # transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
Example #21
Source File: train_AffNet_test_on_graffity.py From affnet with MIT License | 6 votes |
def create_loaders(): kwargs = {'num_workers': args.num_workers, 'pin_memory': args.pin_memory} if args.cuda else {} transform = transforms.Compose([ transforms.Lambda(np_reshape), transforms.ToTensor() ]) train_loader = torch.utils.data.DataLoader( TotalDatasetsLoader(datasets_path = args.dataroot, train=True, n_triplets = args.n_pairs, fliprot=True, batch_size=args.batch_size, download=True, transform=transform), batch_size=args.batch_size, shuffle=False, **kwargs) return train_loader, None
Example #22
Source File: loaders.py From open-solution-ship-detection with MIT License | 6 votes |
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params): super().__init__(train_mode, loader_params, dataset_params, augmentation_params) self.image_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=self.dataset_params.MEAN, std=self.dataset_params.STD), ]) self.mask_transform = transforms.Compose([transforms.Lambda(preprocess_target), ]) self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train']) self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train']) self.image_augment_inference = ImgAug(self.augmentation_params['image_augment_inference']) self.image_augment_with_target_inference = ImgAug( self.augmentation_params['image_augment_with_target_inference']) if self.dataset_params.target_format == 'png': self.dataset = ImageSegmentationPngDataset elif self.dataset_params.target_format == 'json': self.dataset = ImageSegmentationJsonDataset elif self.dataset_params.target_format == 'joblib': self.dataset = ImageSegmentationJoblibDataset else: raise Exception('files must be png or json')
Example #23
Source File: test_spark_dataset_converter.py From petastorm with Apache License 2.0 | 6 votes |
def test_torch_transform_spec(spark_test_ctx): df = spark_test_ctx.spark.range(8) conv = make_spark_converter(df) from torchvision import transforms from petastorm import TransformSpec def _transform_row(df_row): scale_tranform = transforms.Compose([ transforms.Lambda(lambda x: x * 0.1), ]) return scale_tranform(df_row) transform = TransformSpec(_transform_row) with conv.make_torch_dataloader(transform_spec=transform, num_epochs=1) as dataloader: for batch in dataloader: assert min(batch['id']) >= 0 and max(batch['id']) < 1
Example #24
Source File: cifar10.py From Deep-SVDD-PyTorch with MIT License | 5 votes |
def __init__(self, root: str, normal_class=5): super().__init__(root) self.n_classes = 2 # 0: normal, 1: outlier self.normal_classes = tuple([normal_class]) self.outlier_classes = list(range(0, 10)) self.outlier_classes.remove(normal_class) # Pre-computed min and max values (after applying GCN) from train data per class min_max = [(-28.94083453598571, 13.802961825439636), (-6.681770233365245, 9.158067708230273), (-34.924463588638204, 14.419298165027628), (-10.599172931391799, 11.093187820377565), (-11.945022995801637, 10.628045447867583), (-9.691969487694928, 8.948326776180823), (-9.174940012342555, 13.847014686472365), (-6.876682005899029, 12.282371383343161), (-15.603507135507172, 15.2464923804279), (-6.132882973622672, 8.046098172351265)] # CIFAR-10 preprocessing: GCN (with L1 norm) and min-max feature scaling to [0,1] transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda(lambda x: global_contrast_normalization(x, scale='l1')), transforms.Normalize([min_max[normal_class][0]] * 3, [min_max[normal_class][1] - min_max[normal_class][0]] * 3)]) target_transform = transforms.Lambda(lambda x: int(x in self.outlier_classes)) train_set = MyCIFAR10(root=self.root, train=True, download=True, transform=transform, target_transform=target_transform) # Subset train set to normal class train_idx_normal = get_target_label_idx(train_set.train_labels, self.normal_classes) self.train_set = Subset(train_set, train_idx_normal) self.test_set = MyCIFAR10(root=self.root, train=False, download=True, transform=transform, target_transform=target_transform)
Example #25
Source File: predictor.py From Res2Net-maskrcnn with MIT License | 5 votes |
def build_transform(self): """ Creates a basic transformation that was used to train the models """ cfg = self.cfg # we are loading images with OpenCV, so we don't need to convert them # to BGR, they are already! So all we need to do is to normalize # by 255 if we want to convert to BGR255 format, or flip the channels # if we want it to be in RGB in [0-1] range. if cfg.INPUT.TO_BGR255: to_bgr_transform = T.Lambda(lambda x: x * 255) else: to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]]) normalize_transform = T.Normalize( mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD ) transform = T.Compose( [ T.ToPILImage(), T.Resize(self.min_image_size), T.ToTensor(), to_bgr_transform, normalize_transform, ] ) return transform
Example #26
Source File: cifar10.py From Deep-SAD-PyTorch with MIT License | 5 votes |
def __init__(self, root: str, normal_class: int = 5, known_outlier_class: int = 3, n_known_outlier_classes: int = 0, ratio_known_normal: float = 0.0, ratio_known_outlier: float = 0.0, ratio_pollution: float = 0.0): super().__init__(root) # Define normal and outlier classes self.n_classes = 2 # 0: normal, 1: outlier self.normal_classes = tuple([normal_class]) self.outlier_classes = list(range(0, 10)) self.outlier_classes.remove(normal_class) self.outlier_classes = tuple(self.outlier_classes) if n_known_outlier_classes == 0: self.known_outlier_classes = () elif n_known_outlier_classes == 1: self.known_outlier_classes = tuple([known_outlier_class]) else: self.known_outlier_classes = tuple(random.sample(self.outlier_classes, n_known_outlier_classes)) # CIFAR-10 preprocessing: feature scaling to [0, 1] transform = transforms.ToTensor() target_transform = transforms.Lambda(lambda x: int(x in self.outlier_classes)) # Get train set train_set = MyCIFAR10(root=self.root, train=True, transform=transform, target_transform=target_transform, download=True) # Create semi-supervised setting idx, _, semi_targets = create_semisupervised_setting(np.array(train_set.targets), self.normal_classes, self.outlier_classes, self.known_outlier_classes, ratio_known_normal, ratio_known_outlier, ratio_pollution) train_set.semi_targets[idx] = torch.tensor(semi_targets) # set respective semi-supervised labels # Subset train_set to semi-supervised setup self.train_set = Subset(train_set, idx) # Get test set self.test_set = MyCIFAR10(root=self.root, train=False, transform=transform, target_transform=target_transform, download=True)
Example #27
Source File: base_dataset.py From EverybodyDanceNow_reproduce_pytorch with MIT License | 5 votes |
def get_transform(opt, params, method=Image.BICUBIC, normalize=True): transform_list = [] if 'resize' in opt.resize_or_crop: osize = [opt.loadSize, opt.loadSize] transform_list.append(transforms.Scale(osize, method)) elif 'scale_width' in opt.resize_or_crop: transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.loadSize, method))) if 'crop' in opt.resize_or_crop: transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.fineSize))) if opt.resize_or_crop == 'none': base = float(2 ** opt.n_downsample_global) if opt.netG == 'local': base *= (2 ** opt.n_local_enhancers) transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method))) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) transform_list += [transforms.ToTensor()] if normalize: transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
Example #28
Source File: base_dataset.py From non-stationary_texture_syn with MIT License | 5 votes |
def get_half_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSize, opt.loadSize] transform_list.append(transforms.Scale(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSize))) transform_list.append(transforms.RandomCrop(opt.fineSize)) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list) # def get_half_transform(opt): # transform_list1 = [] # transform_list2 = [] # transform_list1.append(transforms.RandomCrop(opt.fineSize)) # transform_list2.append(transforms.Lambda( # lambda img: __scale_width_then_half(img, opt.fineSize))) # if opt.isTrain and not opt.no_flip: # transform_list1.append(transforms.RandomHorizontalFlip()) # transform_list2.append(transforms.RandomHorizontalFlip()) # # transform_list1 += [transforms.ToTensor(), # transforms.Normalize((0.5, 0.5, 0.5), # (0.5, 0.5, 0.5))] # transform_list2 += [transforms.ToTensor(), # transforms.Normalize((0.5, 0.5, 0.5), # (0.5, 0.5, 0.5))] # return transforms.Compose(transform_list2), transforms.Compose(transform_list1)
Example #29
Source File: train_OriNet_test_on_graffity.py From affnet with MIT License | 5 votes |
def create_loaders(): kwargs = {'num_workers': args.num_workers, 'pin_memory': args.pin_memory} if args.cuda else {} transform = transforms.Compose([ transforms.Lambda(np_reshape), transforms.ToTensor() ]) train_loader = torch.utils.data.DataLoader( TotalDatasetsLoader(datasets_path = args.dataroot, train=True, n_triplets = args.n_pairs, fliprot=True, batch_size=args.batch_size, download=True, transform=transform), batch_size=args.batch_size, shuffle=False, **kwargs) #test_loader = torch.utils.data.DataLoader( # HPatchesDM('dataset/HP_HessianPatches/','', train=False, # n_pairs = args.n_test_pairs, # batch_size=args.test_batch_size, # download=True, # transform=transforms.Compose([])), # batch_size=args.test_batch_size, # shuffle=False, **kwargs) return train_loader, None
Example #30
Source File: omniglot.py From nsf with MIT License | 5 votes |
def main(): transform = tvtransforms.Compose([ tvtransforms.ToTensor(), tvtransforms.Lambda(torch.bernoulli) ]) dataset = OmniglotDataset(split='test', transform=transform) loader = data.DataLoader(dataset, batch_size=16) batch = next(iter(loader))[0] from matplotlib import pyplot as plt from experiments import cutils from torchvision.utils import make_grid fig, ax = plt.subplots(1, 1, figsize=(5, 5)) cutils.gridimshow(make_grid(batch, nrow=4), ax) plt.show()