Python torchvision.transforms.Normalize() Examples

The following are 30 code examples for showing how to use torchvision.transforms.Normalize(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module torchvision.transforms , or try the search function .

Example 1
Project: transferlearning   Author: jindongwang   File: data_loader.py    License: MIT License 9 votes vote down vote up
def load_data(root_path, dir, batch_size, phase):
    transform_dict = {
        'src': transforms.Compose(
        [transforms.RandomResizedCrop(224),
         transforms.RandomHorizontalFlip(),
         transforms.ToTensor(),
         transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225]),
         ]),
        'tar': transforms.Compose(
        [transforms.Resize(224),
         transforms.ToTensor(),
         transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225]),
         ])}
    data = datasets.ImageFolder(root=root_path + dir, transform=transform_dict[phase])
    data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4)
    return data_loader 
Example 2
Project: dogTorch   Author: ehsanik   File: nyu_walkable_surface_dataset.py    License: MIT License 7 votes vote down vote up
def __init__(self, args, train=True):
        self.root_dir = args.data

        if train:
            self.data_set_list = train_set_list
        elif args.use_test_for_val:
            self.data_set_list = test_set_list
        else:
            self.data_set_list = val_set_list

        self.data_set_list = ['%06d.png' % (x) for x in self.data_set_list]
        self.args = args
        self.read_features = args.read_features

        self.features_dir = args.features_dir
        self.transform = transforms.Compose([
            transforms.Scale((args.image_size, args.image_size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])
        self.transform_segmentation = transforms.Compose([
            transforms.Scale((args.segmentation_size, args.segmentation_size)),
            transforms.ToTensor(),
        ]) 
Example 3
Project: iAI   Author: aimuch   File: model.py    License: MIT License 7 votes vote down vote up
def __init__(self):
        self.batch_size = 64
        self.test_batch_size = 100
        self.learning_rate = 0.01
        self.sgd_momentum = 0.9
        self.log_interval = 100
        # Fetch MNIST data set.
        self.train_loader = torch.utils.data.DataLoader(
            datasets.MNIST('/tmp/mnist/data', train=True, download=True, transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307,), (0.3081,))
                ])),
            batch_size=self.batch_size,
            shuffle=True)
        self.test_loader = torch.utils.data.DataLoader(
            datasets.MNIST('/tmp/mnist/data', train=False, transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307,), (0.3081,))
                ])),
            batch_size=self.test_batch_size,
            shuffle=True)
        self.network = Net()

    # Train the network for several epochs, validating after each epoch. 
Example 4
Project: transferlearning   Author: jindongwang   File: data_load.py    License: MIT License 6 votes vote down vote up
def load_data(data_folder, batch_size, phase='train', train_val_split=True, train_ratio=.8):
    transform_dict = {
        'train': transforms.Compose(
            [transforms.Resize(256),
             transforms.RandomCrop(224),
             transforms.RandomHorizontalFlip(),
             transforms.ToTensor(),
             transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                  std=[0.229, 0.224, 0.225]),
             ]),
        'test': transforms.Compose(
            [transforms.Resize(224),
             transforms.ToTensor(),
             transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                  std=[0.229, 0.224, 0.225]),
             ])}

    data = datasets.ImageFolder(root=data_folder, transform=transform_dict[phase])
    if phase == 'train':
        if train_val_split:
            train_size = int(train_ratio * len(data))
            test_size = len(data) - train_size
            data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size])
            train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=True,
                                                    num_workers=4)
            val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=False, drop_last=False,
                                                num_workers=4)
            return [train_loader, val_loader]
        else:
            train_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True,
                                                    num_workers=4)
            return train_loader
    else: 
        test_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=False, drop_last=False,
                                                    num_workers=4)
        return test_loader

## Below are for ImageCLEF datasets 
Example 5
Project: transferlearning   Author: jindongwang   File: data_load.py    License: MIT License 6 votes vote down vote up
def load_imageclef_train(root_path, domain, batch_size, phase):
    transform_dict = {
        'src': transforms.Compose(
            [transforms.Resize((256, 256)),
             transforms.RandomCrop(224),
             transforms.RandomHorizontalFlip(),
             transforms.ToTensor(),
             transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                  std=[0.229, 0.224, 0.225]),
             ]),
        'tar': transforms.Compose(
            [transforms.Resize((224, 224)),
             transforms.ToTensor(),
             transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                  std=[0.229, 0.224, 0.225]),
             ])}
    data = ImageCLEF(root_dir=root_path, domain=domain, transform=transform_dict[phase])
    train_size = int(0.8 * len(data))
    test_size = len(data) - train_size
    data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size])
    train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=False,
                                               num_workers=4)
    val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, drop_last=False,
                                             num_workers=4)
    return train_loader, val_loader 
Example 6
Project: transferlearning   Author: jindongwang   File: data_load.py    License: MIT License 6 votes vote down vote up
def load_imageclef_test(root_path, domain, batch_size, phase):
    transform_dict = {
        'src': transforms.Compose(
            [transforms.Resize((256,256)),
             transforms.RandomCrop(224),
             transforms.RandomHorizontalFlip(),
             transforms.ToTensor(),
             transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                  std=[0.229, 0.224, 0.225]),
             ]),
        'tar': transforms.Compose(
            [transforms.Resize((224, 224)),
             transforms.ToTensor(),
             transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                  std=[0.229, 0.224, 0.225]),
             ])}
    data = ImageCLEF(root_dir=root_path, domain=domain, transform=transform_dict[phase])
    data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4)
    return data_loader 
Example 7
Project: transferlearning   Author: jindongwang   File: data_loader.py    License: MIT License 6 votes vote down vote up
def load_data(data_folder, batch_size, train, kwargs):
    transform = {
        'train': transforms.Compose(
            [transforms.Resize([256, 256]),
                transforms.RandomCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                  std=[0.229, 0.224, 0.225])]),
        'test': transforms.Compose(
            [transforms.Resize([224, 224]),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                  std=[0.229, 0.224, 0.225])])
        }
    data = datasets.ImageFolder(root = data_folder, transform=transform['train' if train else 'test'])
    data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, **kwargs, drop_last = True if train else False)
    return data_loader 
Example 8
Project: transferlearning   Author: jindongwang   File: data_loader.py    License: MIT License 6 votes vote down vote up
def load_train(root_path, dir, batch_size, phase):
    transform_dict = {
        'src': transforms.Compose(
            [transforms.RandomResizedCrop(224),
             transforms.RandomHorizontalFlip(),
             transforms.ToTensor(),
             transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                  std=[0.229, 0.224, 0.225]),
             ]),
        'tar': transforms.Compose(
            [transforms.Resize(224),
             transforms.ToTensor(),
             transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                  std=[0.229, 0.224, 0.225]),
             ])}
    data = datasets.ImageFolder(root=root_path + dir, transform=transform_dict[phase])
    train_size = int(0.8 * len(data))
    test_size = len(data) - train_size
    data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size])
    train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4)
    val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4)
    return train_loader, val_loader 
Example 9
Project: steppy-toolkit   Author: minerva-ml   File: segmentation.py    License: MIT License 6 votes vote down vote up
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params):
        super().__init__(train_mode, loader_params, dataset_params, augmentation_params)

        self.image_transform = transforms.Compose([transforms.Grayscale(num_output_channels=3),
                                                   transforms.ToTensor(),
                                                   transforms.Normalize(mean=self.dataset_params.MEAN,
                                                                        std=self.dataset_params.STD),
                                                   ])
        self.mask_transform = transforms.Compose([transforms.Lambda(to_array),
                                                  transforms.Lambda(to_tensor),
                                                  ])

        self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train'])
        self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train'])
        self.image_augment_inference = ImgAug(self.augmentation_params['image_augment_inference'])
        self.image_augment_with_target_inference = ImgAug(
            self.augmentation_params['image_augment_with_target_inference'])

        if self.dataset_params.target_format == 'png':
            self.dataset = ImageSegmentationPngDataset
        elif self.dataset_params.target_format == 'json':
            self.dataset = ImageSegmentationJsonDataset
        else:
            raise Exception('files must be png or json') 
Example 10
Project: steppy-toolkit   Author: minerva-ml   File: segmentation.py    License: MIT License 6 votes vote down vote up
def __init__(self, train_mode, loader_params, dataset_params, augmentation_params):
        super().__init__(train_mode, loader_params, dataset_params, augmentation_params)

        self.image_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w)),
                                                   transforms.Grayscale(num_output_channels=3),
                                                   transforms.ToTensor(),
                                                   transforms.Normalize(mean=self.dataset_params.MEAN,
                                                                        std=self.dataset_params.STD),
                                                   ])
        self.mask_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w),
                                                                    interpolation=0),
                                                  transforms.Lambda(to_array),
                                                  transforms.Lambda(to_tensor),
                                                  ])

        self.image_augment_train = ImgAug(self.augmentation_params['image_augment_train'])
        self.image_augment_with_target_train = ImgAug(self.augmentation_params['image_augment_with_target_train'])

        if self.dataset_params.target_format == 'png':
            self.dataset = ImageSegmentationPngDataset
        elif self.dataset_params.target_format == 'json':
            self.dataset = ImageSegmentationJsonDataset
        else:
            raise Exception('files must be png or json') 
Example 11
Project: steppy-toolkit   Author: minerva-ml   File: segmentation.py    License: MIT License 6 votes vote down vote up
def __init__(self, loader_params, dataset_params, augmentation_params):
        super().__init__(loader_params, dataset_params, augmentation_params)

        self.image_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w)),
                                                   transforms.Grayscale(num_output_channels=3),
                                                   transforms.ToTensor(),
                                                   transforms.Normalize(mean=self.dataset_params.MEAN,
                                                                        std=self.dataset_params.STD),
                                                   ])
        self.mask_transform = transforms.Compose([transforms.Resize((self.dataset_params.h, self.dataset_params.w),
                                                                    interpolation=0),
                                                  transforms.Lambda(to_array),
                                                  transforms.Lambda(to_tensor),
                                                  ])

        self.dataset = ImageSegmentationTTADataset 
Example 12
Project: pytorch-multigpu   Author: dnddnjs   File: train.py    License: MIT License 6 votes vote down vote up
def main():
    best_acc = 0

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    print('==> Preparing data..')
    transforms_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])

    dataset_train = CIFAR10(root='../data', train=True, download=True, 
                            transform=transforms_train)

    train_loader = DataLoader(dataset_train, batch_size=args.batch_size, 
                              shuffle=True, num_workers=args.num_worker)

    # there are 10 classes so the dataset name is cifar-10
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 
               'dog', 'frog', 'horse', 'ship', 'truck')

    print('==> Making model..')

    net = pyramidnet()
    net = nn.DataParallel(net)
    net = net.to(device)
    num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
    print('The number of parameters of model is', num_params)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    # optimizer = optim.SGD(net.parameters(), lr=args.lr, 
    #                       momentum=0.9, weight_decay=1e-4)
    
    train(net, criterion, optimizer, train_loader, device) 
Example 13
Project: iAI   Author: aimuch   File: model.py    License: MIT License 6 votes vote down vote up
def __init__(self):
        self.batch_size = 64
        self.test_batch_size = 100
        self.learning_rate = 0.0025
        self.sgd_momentum = 0.9
        self.log_interval = 100
        # Fetch MNIST data set.
        self.train_loader = torch.utils.data.DataLoader(
            datasets.MNIST('/tmp/mnist/data', train=True, download=True, transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307,), (0.3081,))
                ])),
            batch_size=self.batch_size,
            shuffle=True)
        self.test_loader = torch.utils.data.DataLoader(
            datasets.MNIST('/tmp/mnist/data', train=False, transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307,), (0.3081,))
                ])),
            batch_size=self.test_batch_size,
            shuffle=True)
        self.network = Net()

    # Train the network for one or more epochs, validating after each epoch. 
Example 14
Project: iAI   Author: aimuch   File: model.py    License: MIT License 6 votes vote down vote up
def __init__(self):
        self.batch_size = 64
        self.test_batch_size = 100
        self.learning_rate = 0.0025
        self.sgd_momentum = 0.9
        self.log_interval = 100
        # Fetch MNIST data set.
        self.train_loader = torch.utils.data.DataLoader(
            datasets.MNIST('/tmp/mnist/data', train=True, download=True, transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307,), (0.3081,))
                ])),
            batch_size=self.batch_size,
            shuffle=True)
        self.test_loader = torch.utils.data.DataLoader(
            datasets.MNIST('/tmp/mnist/data', train=False, transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.1307,), (0.3081,))
                ])),
            batch_size=self.test_batch_size,
            shuffle=True)
        self.network = Net()

    # Train the network for one or more epochs, validating after each epoch. 
Example 15
Project: dfw   Author: oval-group   File: loaders.py    License: MIT License 6 votes vote down vote up
def loaders_mnist(dataset, batch_size=64, cuda=0,
                  train_size=50000, val_size=10000, test_size=10000,
                  test_batch_size=1000, **kwargs):

    assert dataset == 'mnist'
    root = '{}/{}'.format(os.environ['VISION_DATA'], dataset)

    # Data loading code
    normalize = transforms.Normalize(mean=(0.1307,),
                                     std=(0.3081,))

    transform = transforms.Compose([transforms.ToTensor(), normalize])

    # define two datasets in order to have different transforms
    # on training and validation
    dataset_train = datasets.MNIST(root=root, train=True, transform=transform)
    dataset_val = datasets.MNIST(root=root, train=True, transform=transform)
    dataset_test = datasets.MNIST(root=root, train=False, transform=transform)

    return create_loaders(dataset_train, dataset_val,
                          dataset_test, train_size, val_size, test_size,
                          batch_size=batch_size,
                          test_batch_size=test_batch_size,
                          cuda=cuda, num_workers=0) 
Example 16
Project: robosat   Author: mapbox   File: serve.py    License: MIT License 6 votes vote down vote up
def segment(self, image):
        # don't track tensors with autograd during prediction
        with torch.no_grad():
            mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]

            transform = Compose([ConvertImageMode(mode="RGB"), ImageToTensor(), Normalize(mean=mean, std=std)])
            image = transform(image)

            batch = image.unsqueeze(0).to(self.device)

            output = self.net(batch)

            output = output.cpu().data.numpy()
            output = output.squeeze(0)

            mask = output.argmax(axis=0).astype(np.uint8)

            mask = Image.fromarray(mask, mode="P")

            palette = make_palette(*self.dataset["common"]["colors"])
            mask.putpalette(palette)

            return mask 
Example 17
Project: pytorch-atda   Author: corenel   File: usps.py    License: MIT License 6 votes vote down vote up
def get_usps(train, get_dataset=False, batch_size=cfg.batch_size):
    """Get USPS dataset loader."""
    # image pre-processing
    pre_process = transforms.Compose([transforms.ToTensor(),
                                      transforms.Normalize(
                                          mean=cfg.dataset_mean,
                                          std=cfg.dataset_std)])

    # dataset and data loader
    usps_dataset = USPS(root=cfg.data_root,
                        train=train,
                        transform=pre_process,
                        download=True)

    if get_dataset:
        return usps_dataset
    else:
        usps_data_loader = torch.utils.data.DataLoader(
            dataset=usps_dataset,
            batch_size=batch_size,
            shuffle=True)
        return usps_data_loader 
Example 18
Project: pytorch-atda   Author: corenel   File: mnist_m.py    License: MIT License 6 votes vote down vote up
def get_mnist_m(train, get_dataset=False, batch_size=cfg.batch_size):
    """Get MNIST-M dataset loader."""
    # image pre-processing
    pre_process = transforms.Compose([transforms.ToTensor(),
                                      transforms.Normalize(
                                          mean=cfg.dataset_mean,
                                          std=cfg.dataset_std)])

    # dataset and data loader
    mnist_m_dataset = MNIST_M(root=cfg.data_root,
                              train=train,
                              transform=pre_process,
                              download=True)

    if get_dataset:
        return mnist_m_dataset
    else:
        mnist_m_data_loader = torch.utils.data.DataLoader(
            dataset=mnist_m_dataset,
            batch_size=batch_size,
            shuffle=True)
        return mnist_m_data_loader 
Example 19
Project: pytorch-atda   Author: corenel   File: svhn.py    License: MIT License 6 votes vote down vote up
def get_svhn(train, get_dataset=False, batch_size=cfg.batch_size):
    """Get SVHN dataset loader."""
    # image pre-processing
    pre_process = transforms.Compose([transforms.ToTensor(),
                                      transforms.Normalize(
                                          mean=cfg.dataset_mean,
                                          std=cfg.dataset_std)])

    # dataset and data loader
    svhn_dataset = datasets.SVHN(root=cfg.data_root,
                                 split='train' if train else 'test',
                                 transform=pre_process,
                                 download=True)

    if get_dataset:
        return svhn_dataset
    else:
        svhn_data_loader = torch.utils.data.DataLoader(
            dataset=svhn_dataset,
            batch_size=batch_size,
            shuffle=True)
        return svhn_data_loader 
Example 20
Project: pytorch-atda   Author: corenel   File: mnist.py    License: MIT License 6 votes vote down vote up
def get_mnist(train, get_dataset=False, batch_size=cfg.batch_size):
    """Get MNIST dataset loader."""
    # image pre-processing
    convert_to_3_channels = transforms.Lambda(
        lambda x: torch.cat([x, x, x], 0))
    pre_process = transforms.Compose([transforms.ToTensor(),
                                      transforms.Normalize(
                                          mean=cfg.dataset_mean,
                                          std=cfg.dataset_std),
                                      convert_to_3_channels])

    # dataset and data loader
    mnist_dataset = datasets.MNIST(root=cfg.data_root,
                                   train=train,
                                   transform=pre_process,
                                   download=True)

    if get_dataset:
        return mnist_dataset
    else:
        mnist_data_loader = torch.utils.data.DataLoader(
            dataset=mnist_dataset,
            batch_size=batch_size,
            shuffle=True)
        return mnist_data_loader 
Example 21
Project: sgd-influence   Author: sato9hara   File: outlier.py    License: MIT License 6 votes vote down vote up
def cifar10():
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])
    trainset = MyCIFAR10.CIFAR10(root='./data', train=True, download=True, transform=transform_train, seed=0)
    valset = MyCIFAR10.CIFAR10(root='./data', train=True, download=True, transform=transform_test, seed=0)
    testset = MyCIFAR10.CIFAR10(root='./data', train=False, download=True, transform=transform_test, seed=0)
    net_func = MyNet.CifarAE
    return net_func, trainset, valset, testset 
Example 22
Project: sgd-influence   Author: sato9hara   File: train.py    License: MIT License 6 votes vote down vote up
def cifar10():
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])
    trainset = MyCIFAR10.CIFAR10(root='./data', train=True, download=True, transform=transform_train, seed=0)
    valset = MyCIFAR10.CIFAR10(root='./data', train=True, download=True, transform=transform_test, seed=0)
    testset = MyCIFAR10.CIFAR10(root='./data', train=False, download=True, transform=transform_test, seed=0)
    net_func = MyNet.CifarNet
    return net_func, trainset, valset, testset 
Example 23
Project: cycada_release   Author: jhoffman   File: data_loader.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_transform(params, image_size, num_channels):
    # Transforms for PIL Images: Gray <-> RGB
    Gray2RGB = transforms.Lambda(lambda x: x.convert('RGB'))
    RGB2Gray = transforms.Lambda(lambda x: x.convert('L'))

    transform = []
    # Does size request match original size?
    if not image_size == params.image_size:
        transform.append(transforms.Resize(image_size))
   
    # Does number of channels requested match original?
    if not num_channels == params.num_channels:
        if num_channels == 1:
            transform.append(RGB2Gray)
        elif num_channels == 3:
            transform.append(Gray2RGB)
        else:
            print('NumChannels should be 1 or 3', num_channels)
            raise Exception

    transform += [transforms.ToTensor(), 
            transforms.Normalize((params.mean,), (params.std,))]

    return transforms.Compose(transform) 
Example 24
Project: VSE-C   Author: ExplorerFreda   File: data.py    License: MIT License 5 votes vote down vote up
def get_transform(data_name, split_name, opt):
    normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                      std=[0.229, 0.224, 0.225])
    t_list = []
    if split_name == 'train':
        t_list = [transforms.RandomSizedCrop(opt.crop_size),
                  transforms.RandomHorizontalFlip()]
    elif split_name == 'val':
        t_list = [transforms.Scale(256), transforms.CenterCrop(224)]
    elif split_name == 'test':
        t_list = [transforms.Scale(256), transforms.CenterCrop(224)]

    t_end = [transforms.ToTensor(), normalizer]
    transform = transforms.Compose(t_list + t_end)
    return transform 
Example 25
Project: VSE-C   Author: ExplorerFreda   File: saliency_visualization.py    License: MIT License 5 votes vote down vote up
def build_image_transforms(self):
        self.image_transform = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]) 
Example 26
Project: Pytorch-Project-Template   Author: moemen95   File: celebA.py    License: MIT License 5 votes vote down vote up
def __init__(self, config):
        self.config = config

        if config.data_mode == "imgs":
            transform = v_transforms.Compose(
                [v_transforms.ToTensor(),
                 v_transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])

            dataset = v_datasets.ImageFolder(self.config.data_folder, transform=transform)

            self.dataset_len = len(dataset)

            self.num_iterations = (self.dataset_len + config.batch_size - 1) // config.batch_size

            self.loader = DataLoader(dataset,
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     num_workers=config.data_loader_workers,
                                     pin_memory=config.pin_memory)
        elif config.data_mode == "numpy":
            raise NotImplementedError("This mode is not implemented YET")
        else:
            raise Exception("Please specify in the json a specified mode in data_mode") 
Example 27
Project: dogTorch   Author: ehsanik   File: dog_multi_image_dataset.py    License: MIT License 5 votes vote down vote up
def __init__(self, args, train=True):
        root_dir = args.data
        if train:
            json_file = os.path.join(root_dir, 'train.json')
        elif args.use_test_for_val:
            json_file = os.path.join(root_dir, 'test.json')
        else:
            json_file = os.path.join(root_dir, 'val.json')

        self.num_classes = args.num_classes
        self.sequence_length = args.sequence_length
        self.experiment_type = args.experiment_type
        self.regression = args.regression

        self.read_features = args.read_features
        self.frames_metadata, self.idx_to_fid, self.centroids = _read_labels(
            json_file, args.imus, args.sequence_length)

        self.root_dir = root_dir
        self.features_dir = args.features_dir
        self.transform = transforms.Compose([
            transforms.Scale((args.image_size, args.image_size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ]) 
Example 28
Project: dogTorch   Author: ehsanik   File: sun_dataset.py    License: MIT License 5 votes vote down vote up
def __init__(self, args, train=True):
        self.root_dir = args.data
        root_dir = self.root_dir
        if train:
            self.data_set_list = os.path.join(root_dir,
                                              args.trainset_image_list)
        else:
            self.data_set_list = os.path.join(root_dir, args.testset_image_list)

        self.categ_dict = get_class_names(
            os.path.join(root_dir, 'ClassName.txt'))

        self.data_set_list = parse_file(self.data_set_list, self.categ_dict)

        self.args = args
        self.read_features = args.read_features

        self.features_dir = args.features_dir
        if train:
            self.transform = transforms.Compose([
                transforms.RandomSizedCrop(args.image_size),
                transforms.RandomHorizontalFlip(),
                transforms.Scale((args.image_size, args.image_size)),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
            ])
        else:
            self.transform = transforms.Compose([
                transforms.Scale((args.image_size, args.image_size)),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
            ]) 
Example 29
Project: dogTorch   Author: ehsanik   File: dog_clip_regression_dataset.py    License: MIT License 5 votes vote down vote up
def __init__(self, args, train=True):
        root_dir = args.data
        if train or args.read_feature_and_image:
            json_file = os.path.join(root_dir, 'train.json')
        elif args.use_test_for_val:
            json_file = os.path.join(root_dir, 'test.json')
        else:
            json_file = os.path.join(root_dir, 'val.json')

        self.num_classes = args.num_classes
        self.sequence_length = args.sequence_length
        self.experiment_type = args.experiment_type
        self.regression = args.regression
        self.args = args

        self.read_features = args.read_features
        self.frames_metadata, self.idx_to_fid, self.centroids = _read_labels(
            json_file, args.imus, args.sequence_length)

        self.root_dir = root_dir
        self.features_dir = args.features_dir
        self.transform = transforms.Compose([
            transforms.Scale((args.image_size, args.image_size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ]) 
Example 30
Project: transferlearning   Author: jindongwang   File: digit_data_loader.py    License: MIT License 5 votes vote down vote up
def load_data(domain, root_dir, batch_size):
    src_train_img, src_train_label, src_test_img, src_test_label = load_dataset(domain['src'], root_dir)
    tar_train_img, tar_train_label, tar_test_img, tar_test_label = load_dataset(domain['tar'], root_dir)
    transform = transforms.Compose([
        transforms.Resize(32),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    data_src_train, data_src_test = GetDataset(src_train_img, src_train_label,
                                               transform), GetDataset(src_test_img,
                                                                      src_test_label,
                                                                      transform)
    data_tar_train, data_tar_test = GetDataset(tar_train_img, tar_train_label,
                                               transform), GetDataset(tar_test_img,
                                                                      tar_test_label,
                                                                      transform)
    dataloaders = {}
    dataloaders['src'] = torch.utils.data.DataLoader(data_src_train, batch_size=batch_size, shuffle=True,
                                                     drop_last=False,
                                                     num_workers=4)
    dataloaders['val'] = torch.utils.data.DataLoader(data_src_test, batch_size=batch_size, shuffle=True,
                                                     drop_last=False,
                                                     num_workers=4)
    dataloaders['tar'] = torch.utils.data.DataLoader(data_tar_train, batch_size=batch_size, shuffle=True,
                                                     drop_last=False,
                                                     num_workers=4)
    return dataloaders