Python torchvision.datasets.CIFAR10 Examples
The following are 30 code examples for showing how to use torchvision.datasets.CIFAR10(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
torchvision.datasets
, or try the search function
.
Example 1
Project: pytorch-multigpu Author: dnddnjs File: train.py License: MIT License | 6 votes |
def main(): best_acc = 0 device = 'cuda' if torch.cuda.is_available() else 'cpu' print('==> Preparing data..') transforms_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) dataset_train = CIFAR10(root='../data', train=True, download=True, transform=transforms_train) train_loader = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.num_worker) # there are 10 classes so the dataset name is cifar-10 classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') print('==> Making model..') net = pyramidnet() net = nn.DataParallel(net) net = net.to(device) num_params = sum(p.numel() for p in net.parameters() if p.requires_grad) print('The number of parameters of model is', num_params) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(net.parameters(), lr=args.lr) # optimizer = optim.SGD(net.parameters(), lr=args.lr, # momentum=0.9, weight_decay=1e-4) train(net, criterion, optimizer, train_loader, device)
Example 2
Project: Deep-SAD-PyTorch Author: lukasruff File: cifar10.py License: MIT License | 6 votes |
def __getitem__(self, index): """Override the original method of the CIFAR10 class. Args: index (int): Index Returns: tuple: (image, target, semi_target, index) """ img, target, semi_target = self.data[index], self.targets[index], int(self.semi_targets[index]) # doing this so that it is consistent with all other datasets # to return a PIL Image img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target, semi_target, index
Example 3
Project: cwcf Author: jaromiru File: conv_cifar_2.py License: MIT License | 6 votes |
def get_data(train): data_raw = datasets.CIFAR10('../data/dl/', train=train, download=True, transform=transforms.Compose([ transforms.Grayscale(), transforms.Resize((20, 20)), transforms.ToTensor(), lambda x: x.numpy().flatten()])) data_x, data_y = zip(*data_raw) data_x = np.array(data_x) data_y = np.array(data_y, dtype='int32').reshape(-1, 1) # binarize label_0 = data_y < 5 label_1 = ~label_0 data_y[label_0] = 0 data_y[label_1] = 1 data = pd.DataFrame(data_x) data[COLUMN_LABEL] = data_y return data, data_x.mean(), data_x.std() #---
Example 4
Project: cwcf Author: jaromiru File: conv_cifar.py License: MIT License | 6 votes |
def get_data(train): data_raw = datasets.CIFAR10('../data/dl/', train=train, download=True, transform=transforms.Compose([ transforms.Grayscale(), transforms.Resize((20, 20)), transforms.ToTensor(), lambda x: x.numpy().flatten()])) data_x, data_y = zip(*data_raw) data_x = np.array(data_x) data_y = np.array(data_y, dtype='int32').reshape(-1, 1) data = pd.DataFrame(data_x) data[COLUMN_LABEL] = data_y return data, data_x.mean(), data_x.std() #---
Example 5
Project: imgclsmob Author: osmr File: cifar10_cls_dataset.py License: MIT License | 6 votes |
def __init__(self): super(CIFAR10MetaInfo, self).__init__() self.label = "CIFAR10" self.short_label = "cifar" self.root_dir_name = "cifar10" self.dataset_class = CIFAR10Fine self.num_training_samples = 50000 self.in_channels = 3 self.num_classes = 10 self.input_image_size = (32, 32) self.train_metric_capts = ["Train.Err"] self.train_metric_names = ["Top1Error"] self.train_metric_extra_kwargs = [{"name": "err"}] self.val_metric_capts = ["Val.Err"] self.val_metric_names = ["Top1Error"] self.val_metric_extra_kwargs = [{"name": "err"}] self.saver_acc_ind = 0 self.train_transform = cifar10_train_transform self.val_transform = cifar10_val_transform self.test_transform = cifar10_val_transform self.ml_type = "imgcls"
Example 6
Project: pytorch_deephash Author: flyingpot File: evaluate.py License: MIT License | 6 votes |
def load_data(): transform_train = transforms.Compose( [transforms.Resize(227), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) transform_test = transforms.Compose( [transforms.Resize(227), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train) trainloader = torch.utils.data.DataLoader(trainset, batch_size=100, shuffle=False, num_workers=0) testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=0) return trainloader, testloader
Example 7
Project: pytorch_deephash Author: flyingpot File: train.py License: MIT License | 6 votes |
def init_dataset(): transform_train = transforms.Compose( [transforms.Resize(256), transforms.RandomCrop(227), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) transform_test = transforms.Compose( [transforms.Resize(227), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=0) testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=True, num_workers=0) return trainloader, testloader
Example 8
Project: RobGAN Author: xuanqing94 File: acc_under_attack.py License: MIT License | 6 votes |
def make_dataset(): if opt.dataset in ("imagenet", "dog_and_cat_64", "dog_and_cat_128"): trans = tfs.Compose([ tfs.Resize(opt.img_width), tfs.ToTensor(), tfs.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5])]) data = ImageFolder(opt.root, transform=trans) loader = DataLoader(data, batch_size=100, shuffle=False, num_workers=opt.workers) elif opt.dataset == "cifar10": trans = tfs.Compose([ tfs.Resize(opt.img_width), tfs.ToTensor(), tfs.Normalize(mean=[.5, .5, .5], std=[.5, .5, .5])]) data = CIFAR10(root=opt.root, train=True, download=False, transform=trans) loader = DataLoader(data, batch_size=100, shuffle=True, num_workers=opt.workers) else: raise ValueError(f"Unknown dataset: {opt.dataset}") return loader
Example 9
Project: convex_adversarial Author: locuslab File: problems.py License: MIT License | 6 votes |
def cifar_loaders(batch_size, shuffle_test=False): normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.225, 0.225, 0.225]) train = datasets.CIFAR10('./data', train=True, download=True, transform=transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, 4), transforms.ToTensor(), normalize, ])) test = datasets.CIFAR10('./data', train=False, transform=transforms.Compose([transforms.ToTensor(), normalize])) train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True, pin_memory=True) test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=shuffle_test, pin_memory=True) return train_loader, test_loader
Example 10
Project: jdit Author: dingguanglei File: dataset.py License: Apache License 2.0 | 6 votes |
def build_datasets(self): """ You must to rewrite this method to load your own datasets. * :attr:`self.dataset_train` . Assign a training ``dataset`` to this. * :attr:`self.dataset_valid` . Assign a valid_epoch ``dataset`` to this. * :attr:`self.dataset_test` is optional. Assign a test ``dataset`` to this. If not, it will be replaced by ``self.dataset_valid`` . Example:: self.dataset_train = datasets.CIFAR10(root, train=True, download=True, transform=transforms.Compose(self.train_transform_list)) self.dataset_valid = datasets.CIFAR10(root, train=False, download=True, transform=transforms.Compose(self.valid_transform_list)) """ pass
Example 11
Project: Deep-SVDD-PyTorch Author: lukasruff File: cifar10.py License: MIT License | 6 votes |
def __getitem__(self, index): """Override the original method of the CIFAR10 class. Args: index (int): Index Returns: triple: (image, target, index) where target is index of the target class. """ if self.train: img, target = self.train_data[index], self.train_labels[index] else: img, target = self.test_data[index], self.test_labels[index] # doing this so that it is consistent with all other datasets # to return a PIL Image img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target, index # only line changed
Example 12
Project: baal Author: ElementAI File: vgg_mcdropout_cifar10.py License: Apache License 2.0 | 6 votes |
def get_datasets(initial_pool): transform = transforms.Compose( [transforms.Resize((224, 224)), transforms.RandomHorizontalFlip(), transforms.RandomRotation(30), transforms.ToTensor(), transforms.Normalize(3 * [0.5], 3 * [0.5]), ]) test_transform = transforms.Compose( [ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(3 * [0.5], 3 * [0.5]), ] ) # Note: We use the test set here as an example. You should make your own validation set. train_ds = datasets.CIFAR10('.', train=True, transform=transform, target_transform=None, download=True) test_set = datasets.CIFAR10('.', train=False, transform=test_transform, target_transform=None, download=True) active_set = ActiveLearningDataset(train_ds, pool_specifics={'transform': test_transform}) # We start labeling randomly. active_set.label_randomly(initial_pool) return active_set, test_set
Example 13
Project: pytorch-multigpu Author: dnddnjs File: train.py License: MIT License | 5 votes |
def main(): best_acc = 0 device = 'cuda' if torch.cuda.is_available() else 'cpu' print('==> Preparing data..') transforms_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) dataset_train = CIFAR10(root='../data', train=True, download=True, transform=transforms_train) train_loader = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.num_worker) # there are 10 classes so the dataset name is cifar-10 classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') print('==> Making model..') net = pyramidnet() net = net.to(device) num_params = sum(p.numel() for p in net.parameters() if p.requires_grad) print('The number of parameters of model is', num_params) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4) train(net, criterion, optimizer, train_loader, device)
Example 14
Project: network-slimming Author: Eric-mingjie File: prune_mask.py License: MIT License | 5 votes |
def test(): kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} if args.dataset == 'cifar10': test_loader = torch.utils.data.DataLoader( datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])), batch_size=args.test_batch_size, shuffle=True, **kwargs) elif args.dataset == 'cifar100': test_loader = torch.utils.data.DataLoader( datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])), batch_size=args.test_batch_size, shuffle=True, **kwargs) else: raise ValueError("No valid dataset is given.") model.eval() correct = 0 for data, target in test_loader: if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) output = model(data) pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).cpu().sum() print('\nTest set: Accuracy: {}/{} ({:.1f}%)\n'.format( correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) return correct / float(len(test_loader.dataset))
Example 15
Project: network-slimming Author: Eric-mingjie File: vggprune.py License: MIT License | 5 votes |
def test(model): kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} if args.dataset == 'cifar10': test_loader = torch.utils.data.DataLoader( datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])), batch_size=args.test_batch_size, shuffle=True, **kwargs) elif args.dataset == 'cifar100': test_loader = torch.utils.data.DataLoader( datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])), batch_size=args.test_batch_size, shuffle=True, **kwargs) else: raise ValueError("No valid dataset is given.") model.eval() correct = 0 for data, target in test_loader: if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) output = model(data) pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).cpu().sum() print('\nTest set: Accuracy: {}/{} ({:.1f}%)\n'.format( correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) return correct / float(len(test_loader.dataset))
Example 16
Project: network-slimming Author: Eric-mingjie File: resprune.py License: MIT License | 5 votes |
def test(model): kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} if args.dataset == 'cifar10': test_loader = torch.utils.data.DataLoader( datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])), batch_size=args.test_batch_size, shuffle=False, **kwargs) elif args.dataset == 'cifar100': test_loader = torch.utils.data.DataLoader( datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])), batch_size=args.test_batch_size, shuffle=False, **kwargs) else: raise ValueError("No valid dataset is given.") model.eval() correct = 0 for data, target in test_loader: if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) output = model(data) pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).cpu().sum() print('\nTest set: Accuracy: {}/{} ({:.1f}%)\n'.format( correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) return correct / float(len(test_loader.dataset))
Example 17
Project: network-slimming Author: Eric-mingjie File: denseprune.py License: MIT License | 5 votes |
def test(model): kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} if args.dataset == 'cifar10': test_loader = torch.utils.data.DataLoader( datasets.CIFAR10('./data.cifar10', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])), batch_size=args.test_batch_size, shuffle=False, **kwargs) elif args.dataset == 'cifar100': test_loader = torch.utils.data.DataLoader( datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])), batch_size=args.test_batch_size, shuffle=False, **kwargs) else: raise ValueError("No valid dataset is given.") model.eval() correct = 0 for data, target in test_loader: if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) output = model(data) pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).cpu().sum() print('\nTest set: Accuracy: {}/{} ({:.1f}%)\n'.format( correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) return correct / float(len(test_loader.dataset))
Example 18
Project: residual-flows Author: rtqichen File: datasets.py License: MIT License | 5 votes |
def __init__(self, dataroot, train=True, transform=None): self.cifar10 = vdsets.CIFAR10(dataroot, train=train, download=True, transform=transform)
Example 19
Project: Pytorch-CapsuleNet Author: jindongwang File: data_loader.py License: MIT License | 5 votes |
def __init__(self, dataset, _batch_size): super(Dataset, self).__init__() if dataset == 'mnist': dataset_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]) train_dataset = datasets.MNIST('/data/mnist', train=True, download=True, transform=dataset_transform) test_dataset = datasets.MNIST('/data/mnist', train=False, download=True, transform=dataset_transform) self.train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=_batch_size, shuffle=True) self.test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=_batch_size, shuffle=False) elif dataset == 'cifar10': data_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) train_dataset = datasets.CIFAR10( '/data/cifar', train=True, download=True, transform=data_transform) test_dataset = datasets.CIFAR10( '/data/cifar', train=False, download=True, transform=data_transform) self.train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=_batch_size, shuffle=True) self.test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=_batch_size, shuffle=False) elif dataset == 'office-caltech': pass elif dataset == 'office31': pass
Example 20
Project: probabilistic-federated-neural-matching Author: IBM File: datasets.py License: Apache License 2.0 | 5 votes |
def __build_truncated_dataset__(self): cifar_dataobj = CIFAR10(self.root, self.train, self.transform, self.target_transform, self.download) data = np.array(cifar_dataobj.data) target = np.array(cifar_dataobj.targets) if self.dataidxs is not None: data = data[self.dataidxs] target = target[self.dataidxs] return data, target
Example 21
Project: DeepRobust Author: DSE-MSU File: evaluation_attack.py License: MIT License | 5 votes |
def generate_dataloader(dataset, batch_size): if(dataset == "MNIST"): test_loader = torch.utils.data.DataLoader( datasets.MNIST('deeprobust/image/data', train = False, download = True, transform = transforms.Compose([transforms.ToTensor()])), batch_size = args.batch_size, shuffle = True) print("Loading MNIST dataset.") elif(dataset == "CIFAR" or args.dataset == 'CIFAR10'): test_loader = torch.utils.data.DataLoader( datasets.CIFAR10('deeprobust/image/data', train = False, download = True, transform = transforms.Compose([transforms.ToTensor()])), batch_size = args.batch_size, shuffle = True) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') print("Loading CIFAR10 dataset.") elif(dataset == "ImageNet"): test_loader = torch.utils.data.DataLoader( datasets.CIFAR10('deeprobust/image/data', train=False, download = True, transform = transforms.Compose([transforms.ToTensor()])), batch_size = args.batch_size, shuffle = True) print("Loading ImageNet dataset.") return test_loader
Example 22
Project: DeepRobust Author: DSE-MSU File: evaluation_attack.py License: MIT License | 5 votes |
def parameter_parser(): parser = argparse.ArgumentParser(description = "Run attack algorithms.", usage ='Use -h for more information.') parser.add_argument("--attack_method", default = 'PGD', help = "Choose a attack algorithm from: PGD(default), FGSM, LBFGS, CW, deepfool, onepixel, Nattack") parser.add_argument("--attack_model", default = "CNN", help = "Choose network structure from: CNN, ResNet") parser.add_argument("--path", default = "./trained_models/", help = "Type the path where the model is saved.") parser.add_argument("--file_name", default = 'MNIST_CNN_epoch_20.pt', help = "Type the file_name of the model that is to be attack. The model structure should be matched with the ATTACK_MODEL parameter.") parser.add_argument("--dataset", default = 'MNIST', help = "Choose a dataset from: MNIST(default), CIFAR(or CIFAR10), ImageNet") parser.add_argument("--epsilon", type = float, default = 0.3) parser.add_argument("--batch_num", type = int, default = 1000) parser.add_argument("--batch_size", type = int, default = 1000) parser.add_argument("--num_steps", type = int, default = 40) parser.add_argument("--step_size", type = float, default = 0.01) parser.add_argument("--random_targeted", type = bool, default = False, help = "default: False. By setting this parameter be True, the program would random generate target labels for the input samples.") parser.add_argument("--target_label", type = int, default = -1, help = "default: -1. Generate all attack Fixed target label.") parser.add_argument("--device", default = 'cuda', help = "Choose the device.") return parser.parse_args()
Example 23
Project: EvolutionaryGAN-pytorch Author: WANG-Chaoyue File: torchvision_dataset.py License: MIT License | 5 votes |
def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions A few things can be done here. - save the options (have been done in BaseDataset) - get image paths and meta information of the dataset. - define the image transformation. """ # save the option and dataset root BaseDataset.__init__(self, opt) # define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function self.transform = get_transform(opt) # import torchvision dataset if opt.dataset_name == 'CIFAR10': from torchvision.datasets import CIFAR10 as torchvisionlib elif opt.dataset_name == 'CIFAR100': from torchvision.datasets import CIFAR100 as torchvisionlib else: raise ValueError('torchvision_dataset import fault.') self.dataload = torchvisionlib(root = opt.download_root, transform = self.transform, download = True)
Example 24
Project: self-attention-GAN-pytorch Author: voletiv File: utils.py License: MIT License | 5 votes |
def make_dataloader(batch_size, dataset_type, data_path, shuffle=True, drop_last=True, dataloader_args={}, resize=True, imsize=128, centercrop=False, centercrop_size=128, totensor=True, normalize=False, norm_mean=(0.5, 0.5, 0.5), norm_std=(0.5, 0.5, 0.5)): # Make transform transform = make_transform(resize=resize, imsize=imsize, centercrop=centercrop, centercrop_size=centercrop_size, totensor=totensor, normalize=normalize, norm_mean=norm_mean, norm_std=norm_std) # Make dataset if dataset_type in ['folder', 'imagenet', 'lfw']: # folder dataset assert os.path.exists(data_path), "data_path does not exist! Given: " + data_path dataset = dset.ImageFolder(root=data_path, transform=transform) elif dataset_type == 'lsun': assert os.path.exists(data_path), "data_path does not exist! Given: " + data_path dataset = dset.LSUN(root=data_path, classes=['bedroom_train'], transform=transform) elif dataset_type == 'cifar10': if not os.path.exists(data_path): print("data_path does not exist! Given: {}\nDownloading CIFAR10 dataset...".format(data_path)) dataset = dset.CIFAR10(root=data_path, download=True, transform=transform) elif dataset_type == 'fake': dataset = dset.FakeData(image_size=(3, centercrop_size, centercrop_size), transform=transforms.ToTensor()) assert dataset num_of_classes = len(dataset.classes) print("Data found! # of images =", len(dataset), ", # of classes =", num_of_classes, ", classes:", dataset.classes) # Make dataloader from dataset dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last, **dataloader_args) return dataloader, num_of_classes
Example 25
Project: NAO_pytorch Author: renqianluo File: train_search.py License: GNU General Public License v3.0 | 5 votes |
def build_cifar10(model_state_dict=None, optimizer_state_dict=None, **kwargs): epoch = kwargs.pop('epoch') ratio = kwargs.pop('ratio') train_transform, valid_transform = utils._data_transforms_cifar10(args.child_cutout_size) train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=valid_transform) num_train = len(train_data) assert num_train == len(valid_data) indices = list(range(num_train)) split = int(np.floor(ratio * num_train)) np.random.shuffle(indices) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.child_batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True, num_workers=16) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.child_eval_batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]), pin_memory=True, num_workers=16) model = NASWSNetworkCIFAR(args, 10, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob, args.child_use_aux_head, args.steps) model = model.cuda() train_criterion = nn.CrossEntropyLoss().cuda() eval_criterion = nn.CrossEntropyLoss().cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) optimizer = torch.optim.SGD( model.parameters(), args.child_lr_max, momentum=0.9, weight_decay=args.child_l2_reg, ) if model_state_dict is not None: model.load_state_dict(model_state_dict) if optimizer_state_dict is not None: optimizer.load_state_dict(optimizer_state_dict) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.child_epochs, args.child_lr_min, epoch) return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
Example 26
Project: NAO_pytorch Author: renqianluo File: test_cifar.py License: GNU General Public License v3.0 | 5 votes |
def build_cifar10(model_state_dict, optimizer_state_dict, **kwargs): epoch = kwargs.pop('epoch') train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size) train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=16) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.eval_batch_size, shuffle=False, pin_memory=True, num_workers=16) model = NASNetworkCIFAR(args, 10, args.layers, args.nodes, args.channels, args.keep_prob, args.drop_path_keep_prob, args.use_aux_head, args.steps, args.arch) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) logging.info("multi adds = %fM", model.multi_adds / 1000000) if model_state_dict is not None: model.load_state_dict(model_state_dict) if torch.cuda.device_count() > 1: logging.info("Use %d %s", torch.cuda.device_count(), "GPUs !") model = nn.DataParallel(model) model = model.cuda() train_criterion = nn.CrossEntropyLoss().cuda() eval_criterion = nn.CrossEntropyLoss().cuda() optimizer = torch.optim.SGD( model.parameters(), args.lr_max, momentum=0.9, weight_decay=args.l2_reg, ) if optimizer_state_dict is not None: optimizer.load_state_dict(optimizer_state_dict) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.lr_min, epoch) return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
Example 27
Project: NAO_pytorch Author: renqianluo File: test_cifar.py License: GNU General Public License v3.0 | 5 votes |
def build_cifar100(model_state_dict, optimizer_state_dict, **kwargs): epoch = kwargs.pop('epoch') train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size) train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=16) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.eval_batch_size, shuffle=False, pin_memory=True, num_workers=16) model = NASNetworkCIFAR(args, 100, args.layers, args.nodes, args.channels, args.keep_prob, args.drop_path_keep_prob, args.use_aux_head, args.steps, args.arch) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) logging.info("multi adds = %fM", model.multi_adds / 1000000) if model_state_dict is not None: model.load_state_dict(model_state_dict) if torch.cuda.device_count() > 1: logging.info("Use %d %s", torch.cuda.device_count(), "GPUs !") model = nn.DataParallel(model) model = model.cuda() train_criterion = nn.CrossEntropyLoss().cuda() eval_criterion = nn.CrossEntropyLoss().cuda() optimizer = torch.optim.SGD( model.parameters(), args.lr_max, momentum=0.9, weight_decay=args.l2_reg, ) if optimizer_state_dict is not None: optimizer.load_state_dict(optimizer_state_dict) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.lr_min, epoch) return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
Example 28
Project: NAO_pytorch Author: renqianluo File: train_cifar.py License: GNU General Public License v3.0 | 5 votes |
def build_cifar10(model_state_dict, optimizer_state_dict, **kwargs): epoch = kwargs.pop('epoch') train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size, args.autoaugment) train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=16) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.eval_batch_size, shuffle=False, pin_memory=True, num_workers=16) model = NASNetworkCIFAR(args, 10, args.layers, args.nodes, args.channels, args.keep_prob, args.drop_path_keep_prob, args.use_aux_head, args.steps, args.arch) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) if model_state_dict is not None: model.load_state_dict(model_state_dict) if torch.cuda.device_count() > 1: logging.info("Use %d %s", torch.cuda.device_count(), "GPUs !") model = nn.DataParallel(model) model = model.cuda() train_criterion = nn.CrossEntropyLoss().cuda() eval_criterion = nn.CrossEntropyLoss().cuda() optimizer = torch.optim.SGD( model.parameters(), args.lr_max, momentum=0.9, weight_decay=args.l2_reg, ) if optimizer_state_dict is not None: optimizer.load_state_dict(optimizer_state_dict) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.lr_min, epoch) return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
Example 29
Project: NAO_pytorch Author: renqianluo File: train_search.py License: GNU General Public License v3.0 | 5 votes |
def build_cifar10(model_state_dict=None, optimizer_state_dict=None, **kwargs): epoch = kwargs.pop('epoch') ratio = kwargs.pop('ratio') train_transform, valid_transform = utils._data_transforms_cifar10(args.child_cutout_size) train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=valid_transform) num_train = len(train_data) assert num_train == len(valid_data) indices = list(range(num_train)) split = int(np.floor(ratio * num_train)) np.random.shuffle(indices) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.child_batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True, num_workers=16) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.child_eval_batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]), pin_memory=True, num_workers=16) model = NASWSNetworkCIFAR(10, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob, args.child_use_aux_head, args.steps) model = model.cuda() train_criterion = nn.CrossEntropyLoss().cuda() eval_criterion = nn.CrossEntropyLoss().cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) optimizer = torch.optim.SGD( model.parameters(), args.child_lr_max, momentum=0.9, weight_decay=args.child_l2_reg, ) if model_state_dict is not None: model.load_state_dict(model_state_dict) if optimizer_state_dict is not None: optimizer.load_state_dict(optimizer_state_dict) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.child_epochs, args.child_lr_min, epoch) return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler
Example 30
Project: NAO_pytorch Author: renqianluo File: test_cifar.py License: GNU General Public License v3.0 | 5 votes |
def build_cifar100(model_state_dict, optimizer_state_dict, **kwargs): epoch = kwargs.pop('epoch') train_transform, valid_transform = utils._data_transforms_cifar10(args.cutout_size) train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=16) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.eval_batch_size, shuffle=False, pin_memory=True, num_workers=16) model = NASNetworkCIFAR(args, 100, args.layers, args.nodes, args.channels, args.keep_prob, args.drop_path_keep_prob, args.use_aux_head, args.steps, args.arch) logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) logging.info("multi adds = %fM", model.multi_adds / 1000000) if model_state_dict is not None: model.load_state_dict(model_state_dict) if torch.cuda.device_count() > 1: logging.info("Use %d %s", torch.cuda.device_count(), "GPUs !") model = nn.DataParallel(model) model = model.cuda() train_criterion = nn.CrossEntropyLoss().cuda() eval_criterion = nn.CrossEntropyLoss().cuda() optimizer = torch.optim.SGD( model.parameters(), args.lr_max, momentum=0.9, weight_decay=args.l2_reg, ) if optimizer_state_dict is not None: optimizer.load_state_dict(optimizer_state_dict) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), args.lr_min, epoch) return train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler