Python torchvision.transforms.Pad() Examples

The following are 28 code examples of torchvision.transforms.Pad(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torchvision.transforms , or try the search function .
Example #1
Source File: inference_detection.py    From pytorch-mot-tracking with MIT License 7 votes vote down vote up
def detect_image(img, model, img_size=416, conf_threshold=0.8, nms_threshold=0.4):
    # resize and pad image
    ratio = min(img_size/img.size[0], img_size/img.size[1])
    imw = round(img.size[0] * ratio)
    imh = round(img.size[1] * ratio)
    img_transforms = transforms.Compose([
        transforms.Resize((imh, imw)),
        transforms.Pad((
            max(int((imh-imw)/2),0),
            max(int((imw-imh)/2),0)), fill=(128,128,128)),
        transforms.ToTensor(),
    ])

    # convert image to Tensor
    Tensor = torch.cuda.FloatTensor
    tensor = img_transforms(img).float()
    tensor = tensor.unsqueeze_(0)
    input_image = Variable(tensor.type(Tensor))

    # run inference on the model and get detections
    with torch.no_grad():
        detections = model(input_image)
        detections = non_max_suppression(detections, 80, conf_threshold, nms_threshold)
    return detections[0] 
Example #2
Source File: train.py    From RelationNetworks-CLEVR with MIT License 6 votes vote down vote up
def initialize_dataset(clevr_dir, dictionaries, state_description=True):
    if not state_description:
        train_transforms = transforms.Compose([transforms.Resize((128, 128)),
                                           transforms.Pad(8),
                                           transforms.RandomCrop((128, 128)),
                                           transforms.RandomRotation(2.8),  # .05 rad
                                           transforms.ToTensor()])
        test_transforms = transforms.Compose([transforms.Resize((128, 128)),
                                          transforms.ToTensor()])
                                          
        clevr_dataset_train = ClevrDataset(clevr_dir, True, dictionaries, train_transforms)
        clevr_dataset_test = ClevrDataset(clevr_dir, False, dictionaries, test_transforms)
        
    else:
        clevr_dataset_train = ClevrDatasetStateDescription(clevr_dir, True, dictionaries)
        clevr_dataset_test = ClevrDatasetStateDescription(clevr_dir, False, dictionaries)
    
    return clevr_dataset_train, clevr_dataset_test 
Example #3
Source File: __init__.py    From reid_baseline_with_syncbn with MIT License 6 votes vote down vote up
def get_trm(cfg, is_train=True):
    normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
    if is_train:
        transform = T.Compose([
            T.Resize(cfg.INPUT.SIZE_TRAIN),
            T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
            T.Pad(cfg.INPUT.PADDING),
            T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
            T.ToTensor(),
            normalize_transform,
            RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
        ])
    else:
        transform = T.Compose([
            T.Resize(cfg.INPUT.SIZE_TEST),
            T.ToTensor(),
            normalize_transform
        ])
    return transform 
Example #4
Source File: mulitidatasets.py    From Adversarial-Continual-Learning with MIT License 5 votes vote down vote up
def get_dataset(self, dataset_idx, task_num, num_samples_per_class=False, normalize=True):
        dataset_name = list(mean_datasets.keys())[dataset_idx]
        nspc = num_samples_per_class
        if normalize:
            transformation = transforms.Compose([transforms.ToTensor(),
                                                 transforms.Normalize(mean_datasets[dataset_name],std_datasets[dataset_name])])
            mnist_transformation = transforms.Compose([
                transforms.Pad(padding=2, fill=0),
                transforms.ToTensor(),
                transforms.Normalize(mean_datasets[dataset_name], std_datasets[dataset_name])])
        else:
            transformation = transforms.Compose([transforms.ToTensor()])
            mnist_transformation = transforms.Compose([
                transforms.Pad(padding=2, fill=0),
                transforms.ToTensor(),
                ])

        # target_transormation = transforms.Compose([transforms.ToTensor()])
        target_transormation = None

        if dataset_idx == 0:
            trainset = CIFAR10_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=True, download=self.download, target_transform = target_transormation, transform=transformation)
            testset = CIFAR10_(root=self.root,  task_num=task_num, num_samples_per_class=nspc, train=False, download=self.download, target_transform = target_transormation, transform=transformation)

        if dataset_idx == 1:
            trainset = notMNIST_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=True, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
            testset = notMNIST_(root=self.root,  task_num=task_num, num_samples_per_class=nspc, train=False, download=self.download, target_transform = target_transormation, transform=mnist_transformation)

        if dataset_idx == 2:
            trainset = MNIST_RGB(root=self.root, train=True, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
            testset = MNIST_RGB(root=self.root,  train=False, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=mnist_transformation)

        if dataset_idx == 3:
            trainset = SVHN_(root=self.root,  train=True, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=transformation)
            testset = SVHN_(root=self.root,  train=False, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=transformation)

        if dataset_idx == 4:
            trainset = FashionMNIST_(root=self.root, num_samples_per_class=nspc, task_num=task_num, train=True, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
            testset = FashionMNIST_(root=self.root,  num_samples_per_class=nspc, task_num=task_num, train=False, download=self.download, target_transform = target_transormation, transform=mnist_transformation)

        return trainset, testset 
Example #5
Source File: drawrect.py    From DFL-CNN with MIT License 5 votes vote down vote up
def transform_onlysize():
    transform_list = []
    transform_list.append(transforms.Resize(448))
    transform_list.append(transforms.CenterCrop((448, 448)))
    transform_list.append(transforms.Pad((42, 42)))
    return transforms.Compose(transform_list) 
Example #6
Source File: base.py    From Continual-Learning-Benchmark with MIT License 5 votes vote down vote up
def MNIST(dataroot, train_aug=False):
    # Add padding to make 32x32
    #normalize = transforms.Normalize(mean=(0.1307,), std=(0.3081,))  # for 28x28
    normalize = transforms.Normalize(mean=(0.1000,), std=(0.2752,))  # for 32x32

    val_transform = transforms.Compose([
        transforms.Pad(2, fill=0, padding_mode='constant'),
        transforms.ToTensor(),
        normalize,
    ])
    train_transform = val_transform
    if train_aug:
        train_transform = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.ToTensor(),
            normalize,
        ])

    train_dataset = torchvision.datasets.MNIST(
        root=dataroot,
        train=True,
        download=True,
        transform=train_transform
    )
    train_dataset = CacheClassLabel(train_dataset)

    val_dataset = torchvision.datasets.MNIST(
        dataroot,
        train=False,
        transform=val_transform
    )
    val_dataset = CacheClassLabel(val_dataset)

    return train_dataset, val_dataset 
Example #7
Source File: main.py    From binary-wide-resnet with MIT License 5 votes vote down vote up
def create_dataset(args, train):
    transform = T.Compose([
        T.ToTensor(),
        T.Normalize(np.array([125.3, 123.0, 113.9]) / 255.0,
                    np.array([63.0, 62.1, 66.7]) / 255.0),
    ])
    if train:
        transform = T.Compose([
            T.Pad(4, padding_mode='reflect'),
            T.RandomHorizontalFlip(),
            T.RandomCrop(32),
            transform
        ])
    return getattr(datasets, args.dataset)(args.dataroot, train=train, download=True, transform=transform) 
Example #8
Source File: dataset.py    From pytorch-playground with MIT License 5 votes vote down vote up
def get(batch_size, data_root='/mnt/local0/public_dataset/pytorch/', train=True, val=True, **kwargs):
    data_root = os.path.expanduser(os.path.join(data_root, 'stl10-data'))
    num_workers = kwargs.setdefault('num_workers', 1)
    kwargs.pop('input_size', None)
    print("Building STL10 data loader with {} workers".format(num_workers))
    ds = []
    if train:
        train_loader = torch.utils.data.DataLoader(
            datasets.STL10(
                root=data_root, split='train', download=True,
                transform=transforms.Compose([
                    transforms.Pad(4),
                    transforms.RandomCrop(96),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                ])),
            batch_size=batch_size, shuffle=True, **kwargs)
        ds.append(train_loader)

    if val:
        test_loader = torch.utils.data.DataLoader(
            datasets.STL10(
                root=data_root, split='test', download=True,
                transform=transforms.Compose([
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                ])),
            batch_size=batch_size, shuffle=False, **kwargs)
        ds.append(test_loader)

    ds = ds[0] if len(ds) == 1 else ds
    return ds 
Example #9
Source File: dataset.py    From pytorch-playground with MIT License 5 votes vote down vote up
def get100(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
    data_root = os.path.expanduser(os.path.join(data_root, 'cifar100-data'))
    num_workers = kwargs.setdefault('num_workers', 1)
    kwargs.pop('input_size', None)
    print("Building CIFAR-100 data loader with {} workers".format(num_workers))
    ds = []
    if train:
        train_loader = torch.utils.data.DataLoader(
            datasets.CIFAR100(
                root=data_root, train=True, download=True,
                transform=transforms.Compose([
                    transforms.Pad(4),
                    transforms.RandomCrop(32),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                ])),
            batch_size=batch_size, shuffle=True, **kwargs)
        ds.append(train_loader)

    if val:
        test_loader = torch.utils.data.DataLoader(
            datasets.CIFAR100(
                root=data_root, train=False, download=True,
                transform=transforms.Compose([
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                ])),
            batch_size=batch_size, shuffle=False, **kwargs)
        ds.append(test_loader)
    ds = ds[0] if len(ds) == 1 else ds
    return ds 
Example #10
Source File: dataset.py    From pytorch-playground with MIT License 5 votes vote down vote up
def get10(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
    data_root = os.path.expanduser(os.path.join(data_root, 'cifar10-data'))
    num_workers = kwargs.setdefault('num_workers', 1)
    kwargs.pop('input_size', None)
    print("Building CIFAR-10 data loader with {} workers".format(num_workers))
    ds = []
    if train:
        train_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10(
                root=data_root, train=True, download=True,
                transform=transforms.Compose([
                    transforms.Pad(4),
                    transforms.RandomCrop(32),
                    transforms.RandomHorizontalFlip(),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                ])),
            batch_size=batch_size, shuffle=True, **kwargs)
        ds.append(train_loader)
    if val:
        test_loader = torch.utils.data.DataLoader(
            datasets.CIFAR10(
                root=data_root, train=False, download=True,
                transform=transforms.Compose([
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                ])),
            batch_size=batch_size, shuffle=False, **kwargs)
        ds.append(test_loader)
    ds = ds[0] if len(ds) == 1 else ds
    return ds 
Example #11
Source File: preprocess.py    From pytorch_quantization with MIT License 5 votes vote down vote up
def cifar_transform(is_training=True):
    if is_training:
        transform_list = [transforms.RandomHorizontalFlip(),
                          transforms.Pad(padding=4, padding_mode='reflect'),
                          transforms.RandomCrop(32, padding=0),
                          transforms.ToTensor(),
                          transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]
    else:
        transform_list = [transforms.ToTensor(),
                          transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]

    transform_list = transforms.Compose(transform_list)
    return transform_list 
Example #12
Source File: loader.py    From nni with MIT License 5 votes vote down vote up
def get_test_loader(batch_size=16, index=0, dev_mode=False, pad_mode='edge'):
    test_meta = get_test_meta()
    if dev_mode:
        test_meta = test_meta.iloc[:10]
    test_set = ImageDataset(False, test_meta,
                            image_augment=None if pad_mode == 'resize' else transforms.Pad((13,13,14,14), padding_mode=pad_mode),
                            image_transform=get_tta_transforms(index, pad_mode))
    test_loader = data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=test_set.collate_fn, drop_last=False)
    test_loader.num = len(test_set)
    test_loader.meta = test_set.meta

    return test_loader 
Example #13
Source File: check_dataset.py    From L2T-ww with MIT License 5 votes vote down vote up
def check_dataset(opt):
    normalize_transform = transforms.Compose([transforms.ToTensor(),
                                              transforms.Normalize((0.485, 0.456, 0.406),
                                                                   (0.229, 0.224, 0.225))])
    train_large_transform = transforms.Compose([transforms.RandomResizedCrop(224),
                                                transforms.RandomHorizontalFlip()])
    val_large_transform = transforms.Compose([transforms.Resize(256),
                                              transforms.CenterCrop(224)])
    train_small_transform = transforms.Compose([transforms.Pad(4),
                                                transforms.RandomCrop(32),
                                                transforms.RandomHorizontalFlip()])

    splits = check_split(opt)

    if opt.dataset in ['cub200', 'indoor', 'stanford40', 'dog']:
        train, val = 'train', 'test'
        train_transform = transforms.Compose([train_large_transform, normalize_transform])
        val_transform = transforms.Compose([val_large_transform, normalize_transform])
        sets = [dset.ImageFolder(root=os.path.join(opt.dataroot, train), transform=train_transform),
                dset.ImageFolder(root=os.path.join(opt.dataroot, train), transform=val_transform),
                dset.ImageFolder(root=os.path.join(opt.dataroot, val), transform=val_transform)]
        sets = [FolderSubset(dataset, *split) for dataset, split in zip(sets, splits)]

        opt.num_classes = len(splits[0][0])

    else:
        raise Exception('Unknown dataset')

    loaders = [torch.utils.data.DataLoader(dataset,
                                           batch_size=opt.batchSize,
                                           shuffle=True,
                                           num_workers=0) for dataset in sets]
    return loaders 
Example #14
Source File: test_trainer.py    From torchgan with MIT License 5 votes vote down vote up
def mnist_dataloader():
    train_dataset = dsets.MNIST(
        root="./mnist",
        train=True,
        transform=transforms.Compose(
            [
                transforms.Pad((2, 2)),
                transforms.ToTensor(),
                transforms.Normalize(mean=(0.5,), std=(0.5,)),
            ]
        ),
        download=True,
    )
    train_loader = data.DataLoader(train_dataset, batch_size=128, shuffle=True)
    return train_loader 
Example #15
Source File: mnist.py    From Torchelie with MIT License 5 votes vote down vote up
def build_transforms():
    tfms = TF.Compose([
        TF.Resize(32),
        TF.ToTensor(),
        TF.Normalize([0.5] * 3, [0.5] * 3, True),
    ])
    train_tfms = TF.Compose([
        TF.Pad(4),
        TF.RandomCrop(32),
        TF.ColorJitter(0.5, 0.5, 0.4, 0.05),
        TF.RandomHorizontalFlip(),
        TF.ToTensor(),
        TF.Normalize([0.5] * 3, [0.5] * 3, True),
    ])
    return tfms, train_tfms 
Example #16
Source File: preprocessing.py    From pytorch_DoReFaNet with MIT License 5 votes vote down vote up
def cifar_transform(is_training=True):
  if is_training:
    transform_list = [transforms.RandomHorizontalFlip(),
                      transforms.Pad(padding=4, padding_mode='reflect'),
                      transforms.RandomCrop(32, padding=0),
                      transforms.ToTensor(),
                      transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]
  else:
    transform_list = [transforms.ToTensor(),
                      transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]

  transform_list = transforms.Compose(transform_list)
  return transform_list 
Example #17
Source File: preprocess.py    From bigBatch with MIT License 5 votes vote down vote up
def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats, fill=0):
    padding = int((scale_size - input_size) / 2)
    return transforms.Compose([
        transforms.Pad(padding, fill=fill),
        transforms.RandomCrop(input_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(**normalize),
    ]) 
Example #18
Source File: sampler.py    From Counting-ICCV-DSSINet with MIT License 5 votes vote down vote up
def __index__(self, index):
        data, gt_density, gt_count = self.blob_list[index]
        fname = self.dataloader.query_fname(index)
        W, H = data.size
        fixed_size = self.fixed_size
        transform_img = []

        if fixed_size != -1 and not (H % fixed_size == 0 and W % fixed_size == 0):
            pad_h = ((H / fixed_size + 1) * fixed_size - H) % fixed_size
            pad_w = ((W / fixed_size + 1) * fixed_size - W) % fixed_size
            image_pads = (pad_w / 2, pad_h / 2, pad_w - pad_w / 2, pad_h - pad_h / 2)

            transform_img.append(transforms.Pad(image_pads, fill=0))
            H = H + pad_h
            W = W + pad_w
            mask = torch.zeros((H, W),dtype=torch.uint8).byte()
            mask[pad_h / 2:H - (pad_h - pad_h / 2), pad_w / 2:W - (pad_w - pad_w / 2)] = 1
        elif H % fixed_size == 0 and W % fixed_size == 0:
            mask = torch.ones((H, W),dtype=torch.uint8).byte()
        else:
            mask = None 

        normalizor = transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])
        if fixed_size != -1:
            crop_indexs = [(x * fixed_size, y * fixed_size) for x, y in itertools.product(range(W / fixed_size), range(H / fixed_size))]
            transform_img.append(transforms.Lambda(lambda img: multi_crop(img, crop_indexs, fixed_size, fixed_size)))
            transform_img.append(transforms.Lambda(lambda crops: [transforms.ToTensor()(crop) for crop in crops]))
            transform_img.append(transforms.Lambda(lambda crops: torch.stack([normalizor(crop) for crop in crops])))
        else:
            transform_img.append(transforms.ToTensor())
            transform_img.append(normalizor)

        if self.dataloader.test:
            return index, fname, transforms.Compose(transform_img)(data.copy()), mask, gt_count
        else:
            return index, fname, transforms.Compose(transform_img)(data.copy()), mask, gt_density, gt_count 
Example #19
Source File: datasets.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 5 votes vote down vote up
def cifar100(n_labels, data_root='./data-local/cifar100/'):
    channel_stats = dict(mean = [0.5071, 0.4867, 0.4408],
                         std = [0.2675, 0.2565, 0.2761])
    train_transform = transforms.Compose([
        transforms.Pad(2, padding_mode='reflect'),
        transforms.ColorJitter(brightness=0.4, contrast=0.4,
                               saturation=0.4, hue=0.1),
        transforms.RandomCrop(32),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(**channel_stats)
    ])
    eval_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(**channel_stats)
    ])
    trainset = tv.datasets.CIFAR100(data_root, train=True, download=True,
                                   transform=train_transform)
    evalset = tv.datasets.CIFAR100(data_root, train=False, download=True,
                                   transform=eval_transform)
    num_classes = 100
    label_per_class = n_labels // num_classes
    labeled_idxs, unlabed_idxs = split_relabel_data(
                                    np.array(trainset.train_labels),
                                    trainset.train_labels,
                                    label_per_class,
                                    num_classes)
    return {
        'trainset': trainset,
        'evalset': evalset,
        'labeled_idxs': labeled_idxs,
        'unlabeled_idxs': unlabed_idxs,
        'num_classes': num_classes
    } 
Example #20
Source File: datasets.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 5 votes vote down vote up
def wscifar10(n_labels, data_root='./data-local/cifar10/'):
    channel_stats = dict(mean = [0.4914, 0.4822, 0.4465],
                         std = [0.2023, 0.1994, 0.2010])
    weak = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.Pad(2, padding_mode='reflect'),
        transforms.RandomCrop(32),
        transforms.ToTensor(),
        transforms.Normalize(**channel_stats)
    ])
    strong = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.Pad(2, padding_mode='reflect'),
        transforms.RandomCrop(32),
        RandAugmentMC(n=2, m=10),
        transforms.ToTensor(),
        transforms.Normalize(**channel_stats)
    ])
    train_transform = wstwice(weak, strong)
    eval_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(**channel_stats)
    ])
    trainset = tv.datasets.CIFAR10(data_root, train=True, download=True,
                                   transform=train_transform)
    evalset = tv.datasets.CIFAR10(data_root, train=False, download=True,
                                   transform=eval_transform)
    num_classes = 10
    label_per_class = n_labels // num_classes
    labeled_idxs, unlabed_idxs = split_relabel_data(
                                    np.array(trainset.train_labels),
                                    trainset.train_labels,
                                    label_per_class,
                                    num_classes)
    return {
        'trainset': trainset,
        'evalset': evalset,
        'label_idxs': labeled_idxs,
        'unlab_idxs': unlabed_idxs,
        'num_classes': num_classes
    } 
Example #21
Source File: datasets.py    From Tricks-of-Semi-supervisedDeepLeanring-Pytorch with MIT License 5 votes vote down vote up
def cifar10(n_labels, data_root='./data-local/cifar10/'):
    channel_stats = dict(mean = [0.4914, 0.4822, 0.4465],
                         std = [0.2023, 0.1994, 0.2010])
    train_transform = transforms.Compose([
        transforms.Pad(2, padding_mode='reflect'),
        transforms.ColorJitter(brightness=0.4, contrast=0.4,
                               saturation=0.4, hue=0.1),
        transforms.RandomCrop(32),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(**channel_stats)
    ])
    eval_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(**channel_stats)
    ])
    trainset = tv.datasets.CIFAR10(data_root, train=True, download=True,
                                   transform=train_transform)
    evalset = tv.datasets.CIFAR10(data_root, train=False, download=True,
                                   transform=eval_transform)
    num_classes = 10
    label_per_class = n_labels // num_classes
    labeled_idxs, unlabed_idxs = split_relabel_data(
                                    np.array(trainset.train_labels),
                                    trainset.train_labels,
                                    label_per_class,
                                    num_classes)
    return {
        'trainset': trainset,
        'evalset': evalset,
        'label_idxs': labeled_idxs,
        'unlab_idxs': unlabed_idxs,
        'num_classes': num_classes
    } 
Example #22
Source File: PILTransform.py    From ext_portrait_segmentation with MIT License 5 votes vote down vote up
def __call__(self, rgb_img, label_img):
        w, h = rgb_img.size
        pad_along_w = max(0, int((1 + self.crop_size[0] - w) / 2))
        pad_along_h = max(0, int((1 + self.crop_size[1] - h) / 2))
        # padd the images
        rgb_img = Pad(padding=(pad_along_w, pad_along_h), fill=0, padding_mode='constant')(rgb_img)
        label_img = Pad(padding=(pad_along_w, pad_along_h), fill=self.ignore_idx, padding_mode='constant')(label_img)

        i, j, h, w = self.get_params(rgb_img, self.crop_size)
        rgb_img = F.crop(rgb_img, i, j, h, w)
        label_img = F.crop(label_img, i, j, h, w)
        return rgb_img, label_img 
Example #23
Source File: build.py    From fast-reid with Apache License 2.0 4 votes vote down vote up
def build_transforms(cfg, is_train=True):
    res = []

    if is_train:
        size_train = cfg.INPUT.SIZE_TRAIN

        # augmix augmentation
        do_augmix = cfg.INPUT.DO_AUGMIX

        # auto augmentation
        do_autoaug = cfg.INPUT.DO_AUTOAUG
        total_iter = cfg.SOLVER.MAX_ITER

        # horizontal filp
        do_flip = cfg.INPUT.DO_FLIP
        flip_prob = cfg.INPUT.FLIP_PROB

        # padding
        do_pad = cfg.INPUT.DO_PAD
        padding = cfg.INPUT.PADDING
        padding_mode = cfg.INPUT.PADDING_MODE

        # color jitter
        do_cj = cfg.INPUT.DO_CJ

        # random erasing
        do_rea = cfg.INPUT.REA.ENABLED
        rea_prob = cfg.INPUT.REA.PROB
        rea_mean = cfg.INPUT.REA.MEAN
        # random patch
        do_rpt = cfg.INPUT.RPT.ENABLED
        rpt_prob = cfg.INPUT.RPT.PROB

        if do_autoaug:
            res.append(ImageNetPolicy(total_iter))
        res.append(T.Resize(size_train, interpolation=3))
        if do_flip:
            res.append(T.RandomHorizontalFlip(p=flip_prob))
        if do_pad:
            res.extend([T.Pad(padding, padding_mode=padding_mode),
                        T.RandomCrop(size_train)])
        if do_cj:
            res.append(T.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0))
        if do_augmix:
            res.append(AugMix())
        if do_rea:
            res.append(RandomErasing(probability=rea_prob, mean=rea_mean))
        if do_rpt:
            res.append(RandomPatch(prob_happen=rpt_prob))
    else:
        size_test = cfg.INPUT.SIZE_TEST
        res.append(T.Resize(size_test, interpolation=3))
    res.append(ToTensor())
    return T.Compose(res) 
Example #24
Source File: load_data.py    From integer_discrete_flows with MIT License 4 votes vote down vote up
def load_cifar10(args, **kwargs):
    # set args
    args.input_size = [3, 32, 32]
    args.input_type = 'continuous'
    args.dynamic_binarization = False

    from keras.datasets import cifar10
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    x_train = x_train.transpose(0, 3, 1, 2)
    x_test = x_test.transpose(0, 3, 1, 2)

    import math

    if args.data_augmentation_level == 2:
        data_transform = transforms.Compose([
                transforms.ToPILImage(),
                transforms.RandomHorizontalFlip(),
                transforms.Pad(int(math.ceil(32 * 0.05)), padding_mode='edge'),
                transforms.RandomAffine(degrees=0, translate=(0.05, 0.05)),
                transforms.CenterCrop(32)
            ])
    elif args.data_augmentation_level == 1:
        data_transform = transforms.Compose([
                transforms.ToPILImage(),
                transforms.RandomHorizontalFlip(),
            ])
    else:
        data_transform = transforms.Compose([
                transforms.ToPILImage(),
            ])

    x_val = x_train[-10000:]
    y_val = y_train[-10000:]

    x_train = x_train[:-10000]
    y_train = y_train[:-10000]

    train = CustomTensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train), transform=data_transform)
    train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)

    validation = data_utils.TensorDataset(torch.from_numpy(x_val), torch.from_numpy(y_val))
    val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)

    test = data_utils.TensorDataset(torch.from_numpy(x_test), torch.from_numpy(y_test))
    test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)

    return train_loader, val_loader, test_loader, args 
Example #25
Source File: build.py    From CVWC2019-Amur-Tiger-Re-ID with Apache License 2.0 4 votes vote down vote up
def build_transforms(cfg, is_train=True):
    normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
    if is_train:
        transform_ = T.Compose([
            T.Resize(cfg.INPUT.SIZE_TRAIN),
            T.RandomRotation(cfg.INPUT.RO_DEGREE),
            T.ColorJitter(brightness=cfg.INPUT.BRIGHT_PROB, saturation=cfg.INPUT.SATURA_PROB, contrast=cfg.INPUT.CONTRAST_PROB, hue=cfg.INPUT.HUE_PROB),
            RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN),
            T.Pad(cfg.INPUT.PADDING),
            T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
            T.ToTensor(),
            normalize_transform
        ])
        transform_body = T.Compose([
            T.Resize(cfg.PART.SIZE_BODY),
            T.RandomRotation(cfg.INPUT.RO_DEGREE),
            T.ColorJitter(brightness=cfg.INPUT.BRIGHT_PROB, saturation=cfg.INPUT.SATURA_PROB,
                          contrast=cfg.INPUT.CONTRAST_PROB, hue=cfg.INPUT.HUE_PROB),
            RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN),
            T.Pad(cfg.INPUT.PADDING),
            T.RandomCrop(cfg.PART.SIZE_BODY),
            T.ToTensor(),
            normalize_transform
        ])
        transform_paw = T.Compose([
            T.Resize(cfg.PART.SIZE_PAW),
            T.RandomRotation(cfg.INPUT.RO_DEGREE),
            T.ColorJitter(brightness=cfg.INPUT.BRIGHT_PROB, saturation=cfg.INPUT.SATURA_PROB,
                          contrast=cfg.INPUT.CONTRAST_PROB, hue=cfg.INPUT.HUE_PROB),
            RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN),
            T.Pad(cfg.INPUT.PADDING),
            T.RandomCrop(cfg.PART.SIZE_PAW),
            T.ToTensor(),
            normalize_transform
        ])
        return transform_, transform_body, transform_paw
    else:
        transform = T.Compose([
            T.Resize(cfg.INPUT.SIZE_TEST),
            T.ToTensor(),
            normalize_transform
        ])
        return transform 
Example #26
Source File: cifar10_main.py    From online-normalization with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def main(args):
    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    # Data loading code
    print('=> create train dataset')
    normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
                                     std=[0.2470, 0.2435, 0.2616])

    train_transform = transforms.Compose([transforms.Pad(4),
                                          transforms.RandomCrop(size=32),
                                          transforms.RandomHorizontalFlip(),
                                          transforms.ToTensor(), normalize])
    train_dataset = datasets.CIFAR10(args.data, train=True,
                                     transform=train_transform,
                                     target_transform=None,
                                     download=True)

    print('=> create train dataloader')
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('=> create val dataset')
    val_transform = transforms.Compose([transforms.ToTensor(), normalize])
    val_dataset = datasets.CIFAR10(args.data, train=False,
                                   transform=val_transform,
                                   target_transform=None,
                                   download=True)

    print('=> create val dataloader')
    print('=> creating validation dataloader...')
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    main_worker(train_loader, val_loader, NUM_CLASSES, args, cifar=True) 
Example #27
Source File: cifar100_main.py    From online-normalization with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def main(args):
    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    # Data loading code
    print('=> create train dataset')
    normalize = transforms.Normalize(mean=[0.5071, 0.4865, 0.4409],
                                     std=[0.2673, 0.2564, 0.2762])

    train_transform = transforms.Compose([transforms.Pad(4),
                                          transforms.RandomCrop(size=32),
                                          transforms.RandomHorizontalFlip(),
                                          transforms.ToTensor(), normalize])
    train_dataset = datasets.CIFAR100(args.data, train=True,
                                     transform=train_transform,
                                     target_transform=None,
                                     download=True)

    print('=> create train dataloader')
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('=> create val dataset')
    val_transform = transforms.Compose([transforms.ToTensor(), normalize])
    val_dataset = datasets.CIFAR100(args.data, train=False,
                                   transform=val_transform,
                                   target_transform=None,
                                   download=True)

    print('=> create val dataloader')
    print('=> creating validation dataloader...')
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    main_worker(train_loader, val_loader, NUM_CLASSES, args, cifar=True) 
Example #28
Source File: dataset.py    From ACNet with MIT License 4 votes vote down vote up
def create_dataset(dataset_name, subset, batch_size):
    assert dataset_name in ['imagenet', 'cifar10', 'ch', 'svhn', 'mnist']
    assert subset in ['train', 'val']
    if dataset_name == 'imagenet':
        raise ValueError('TODO')

    #   copied from https://github.com/pytorch/examples/blob/master/mnist/main.py
    elif dataset_name == 'mnist':
        if subset == 'train':
            return InfiniteDataLoader(datasets.MNIST(MNIST_PATH, train=True, download=True,
                               transform=transforms.Compose([
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.1307,), (0.3081,))])), batch_size=batch_size, shuffle=True)
        else:
            return InfiniteDataLoader(datasets.MNIST(MNIST_PATH, train=False, transform=transforms.Compose([
                transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),
                batch_size=batch_size, shuffle=False)



    elif dataset_name == 'cifar10':
        if subset == 'train':
            return InfiniteDataLoader(datasets.CIFAR10(CIFAR10_PATH, train=True, download=False,
                               transform=transforms.Compose([
                                   transforms.Pad(padding=(4, 4, 4, 4)),
                                   transforms.RandomCrop(32),
                                   transforms.RandomHorizontalFlip(),
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])),
                                batch_size=batch_size, shuffle=True)
        else:
            return InfiniteDataLoader(datasets.CIFAR10(CIFAR10_PATH, train=False,
                                transform=transforms.Compose([
                                    transforms.ToTensor(),
                                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])),
                                batch_size=batch_size, shuffle=False)

    elif dataset_name == 'ch':
        if subset == 'train':
            return InfiniteDataLoader(datasets.CIFAR100(CH_PATH, train=True, download=True,
                               transform=transforms.Compose([
                                   transforms.Pad(padding=(4, 4, 4, 4)),
                                   transforms.RandomCrop(32),
                                   transforms.RandomHorizontalFlip(),
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])),
                                batch_size=batch_size, shuffle=True)
        else:
            return InfiniteDataLoader(datasets.CIFAR100(CH_PATH, train=False,
                                transform=transforms.Compose([
                                    transforms.ToTensor(),
                                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])),
                                batch_size=batch_size, shuffle=False)

    else:
        assert False