Python torch.utils.data.cuda() Examples

The following are 30 code examples of torch.utils.data.cuda(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.utils.data , or try the search function .
Example #1
Source File: main_fullv_gpd.py    From PointNetGPD with MIT License 6 votes vote down vote up
def test(model, loader):
    model.eval()
    torch.set_grad_enabled(False)
    test_loss = 0
    correct = 0
    dataset_size = 0
    da = {}
    db = {}
    res = []
    for batch_idx, (data, target, obj_name) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        output = model(data) # N*C
        test_loss += F.nll_loss(output, target, size_average=False).cpu().item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        for i, j, k in zip(obj_name, pred.data.cpu().numpy(), target.data.cpu().numpy()):
            res.append((i, j[0], k))

    test_loss /= len(loader.dataset)
    acc = float(correct)/float(dataset_size)
    return acc, test_loss 
Example #2
Source File: vae_normal.py    From VAE_NBP with MIT License 6 votes vote down vote up
def train(epoch):
    model.train()
    train_loss = 0
    for batch_idx, (data, _) in enumerate(train_loader):
        data = Variable(data)
        if args.cuda:
            data = data.cuda()
        optimizer.zero_grad()
        recon_batch, mu, logvar = model(data)
        loss = loss_function(recon_batch, data, mu, logvar)
        loss.backward()
        train_loss += loss.data[0]
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader),
                loss.data[0] / len(data)))

    print('====> Epoch: {} Average loss: {:.4f}'.format(
          epoch, train_loss / len(train_loader.dataset))) 
Example #3
Source File: image_helper.py    From backdoor_federated_learning with MIT License 6 votes vote down vote up
def create_model(self):
        local_model = ResNet18(name='Local',
                    created_time=self.params['current_time'])
        local_model.cuda()
        target_model = ResNet18(name='Target',
                        created_time=self.params['current_time'])
        target_model.cuda()
        if self.params['resumed_model']:
            loaded_params = torch.load(f"saved_models/{self.params['resumed_model']}")
            target_model.load_state_dict(loaded_params['state_dict'])
            self.start_epoch = loaded_params['epoch']
            self.params['lr'] = loaded_params.get('lr', self.params['lr'])
            logger.info(f"Loaded parameters from saved model: LR is"
                        f" {self.params['lr']} and current epoch is {self.start_epoch}")
        else:
            self.start_epoch = 1

        self.local_model = local_model
        self.target_model = target_model 
Example #4
Source File: imagenet_models.py    From imagenet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def build_model(self):
		# Create model from scratch or use a pretrained one
		print("=> using model '{}'".format(self._arch))
		self._model = models.__dict__[self._arch](num_classes=len(self._labels))
		print("=> loading checkpoint '{}'".format(self._ckp))
		if self._cuda:
			checkpoint = torch.load(self._ckp)
		else:
			# Load GPU model on CPU
			checkpoint = torch.load(self._ckp, map_location=lambda storage, loc: storage)
		# Load weights
		self._model.load_state_dict(checkpoint['state_dict'])

		if self._cuda:
			self._model.cuda()
		else:
			self._model.cpu()


	# Preprocess Images to be ImageNet-compliant 
Example #5
Source File: evaluator.py    From advex-uar with Apache License 2.0 6 votes vote down vote up
def __init__(self, **kwargs):
        default_attr = dict(
            # eval options
            model=None, batch_size=32, stride=10,
            dataset_path=None, # val dir for imagenet, base dir for CIFAR-10-C
            nb_classes=None,
            # attack options
            attack=None,
            # Communication options
            fp16_allreduce=False,
            # Logging options
            logger=None)
        default_attr.update(kwargs)
        for k in default_attr:
            setattr(self, k, default_attr[k])
        if self.dataset not in ['imagenet', 'imagenet-c', 'cifar-10', 'cifar-10-c']:
            raise NotImplementedError
        self.cuda = True
        if self.cuda:
            self.model.cuda()
        self.attack = self.attack()
        self._init_loaders() 
Example #6
Source File: test_vae_pytorch_example.py    From UnsupervisedDeepLearning-Pytorch with MIT License 6 votes vote down vote up
def test(epoch):
    model.eval()
    test_loss = 0
    for i, (data, _) in enumerate(test_loader):
        if args.cuda:
            data = data.cuda()
        data = Variable(data, volatile=True)
        recon_batch, mu, logvar = model(data)
        test_loss += loss_function(recon_batch, data, mu, logvar).data[0]
        if i == 0:
          n = min(data.size(0), 8)
          comparison = torch.cat([data[:n],
                                  recon_batch.view(args.batch_size, 1, 28, 28)[:n]])
          save_image(comparison.data.cpu(),
                     'results/reconstruction_' + str(epoch) + '.png', nrow=n)

    test_loss /= len(test_loader.dataset)
    print('====> Test set loss: {:.4f}'.format(test_loss)) 
Example #7
Source File: test_vae_pytorch_example.py    From UnsupervisedDeepLearning-Pytorch with MIT License 6 votes vote down vote up
def train(epoch):
    model.train()
    train_loss = 0
    for batch_idx, (data, _) in enumerate(train_loader):
        data = Variable(data)
        if args.cuda:
            data = data.cuda()
        optimizer.zero_grad()
        recon_batch, mu, logvar = model(data)
        loss = loss_function(recon_batch, data, mu, logvar)
        loss.backward()
        train_loss += loss.data[0]
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader),
                loss.data[0] / len(data)))

    print('====> Epoch: {} Average loss: {:.4f}'.format(
          epoch, train_loss / len(train_loader.dataset))) 
Example #8
Source File: main_1v_mc.py    From PointNetGPD with MIT License 6 votes vote down vote up
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output, _ = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * args.batch_size, len(loader.dataset),
            100. * batch_idx * args.batch_size / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
Example #9
Source File: main_fullv_gpd.py    From PointNetGPD with MIT License 6 votes vote down vote up
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * len(data), len(loader.dataset),
            100. * batch_idx * len(data) / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
Example #10
Source File: main_fullv.py    From PointNetGPD with MIT License 6 votes vote down vote up
def test(model, loader):
    model.eval()
    torch.set_grad_enabled(False)
    test_loss = 0
    correct = 0
    dataset_size = 0
    da = {}
    db = {}
    res = []
    for data, target, obj_name in loader:
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        output, _ = model(data) # N*C
        test_loss += F.nll_loss(output, target, size_average=False).cpu().item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        for i, j, k in zip(obj_name, pred.data.cpu().numpy(), target.data.cpu().numpy()):
            res.append((i, j[0], k))

    test_loss /= len(loader.dataset)
    acc = float(correct)/float(dataset_size)
    return acc, test_loss 
Example #11
Source File: main_fullv.py    From PointNetGPD with MIT License 6 votes vote down vote up
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output, _ = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * args.batch_size, len(loader.dataset),
            100. * batch_idx * args.batch_size / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
Example #12
Source File: main_1v_gpd.py    From PointNetGPD with MIT License 6 votes vote down vote up
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * len(data), len(loader.dataset),
            100. * batch_idx * len(data) / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
Example #13
Source File: main_fullv_mc.py    From PointNetGPD with MIT License 6 votes vote down vote up
def test(model, loader):
    model.eval()
    torch.set_grad_enabled(False)
    test_loss = 0
    correct = 0
    dataset_size = 0
    da = {}
    db = {}
    res = []
    for data, target, obj_name in loader:
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        output, _ = model(data) # N*C
        test_loss += F.nll_loss(output, target, size_average=False).cpu().item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        for i, j, k in zip(obj_name, pred.data.cpu().numpy(), target.data.cpu().numpy()):
            res.append((i, j[0], k))

    test_loss /= len(loader.dataset)
    acc = float(correct)/float(dataset_size)
    return acc, test_loss 
Example #14
Source File: main_fullv_mc.py    From PointNetGPD with MIT License 6 votes vote down vote up
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output, _ = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * args.batch_size, len(loader.dataset),
            100. * batch_idx * args.batch_size / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
Example #15
Source File: main_1v.py    From PointNetGPD with MIT License 6 votes vote down vote up
def test(model, loader):
    model.eval()
    torch.set_grad_enabled(False)
    test_loss = 0
    correct = 0
    dataset_size = 0
    da = {}
    db = {}
    res = []
    for data, target, obj_name in loader:
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        output, _ = model(data) # N*C
        test_loss += F.nll_loss(output, target, size_average=False).cpu().item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        for i, j, k in zip(obj_name, pred.data.cpu().numpy(), target.data.cpu().numpy()):
            res.append((i, j[0], k))

    test_loss /= len(loader.dataset)
    acc = float(correct)/float(dataset_size)
    return acc, test_loss 
Example #16
Source File: main_1v.py    From PointNetGPD with MIT License 6 votes vote down vote up
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output, _ = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * args.batch_size, len(loader.dataset),
            100. * batch_idx * args.batch_size / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
Example #17
Source File: cnn_train.py    From Evolutionary-Autoencoders with MIT License 6 votes vote down vote up
def half_mask(self, inp, gpuID):
        mask = torch.FloatTensor(1, inp.size(1), inp.size(2), inp.size(3)).fill_(1.0)
        w = int(inp.size(2)/2)
        r = np.random.rand()
        if r < 0.25: # left
            mask[:,:,:, 0:w] = 0.0
        elif r < 0.5: # up
            mask[:,:,0:w,:] = 0.0
        elif r < 0.75: # right
            mask[:,:,:,w:inp.size(3)] = 0.0
        else: # bottom
            mask[:,:,w:inp.size(2),:] = 0.0

        mask = mask.cuda(gpuID)
        mask = Variable(mask)
        out = torch.mul(inp, mask)
        return out 
Example #18
Source File: data.py    From retinanet-examples with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __iter__(self):
        for output in self.dataloader:
            if self.dataset.training:
                data, target = output
            else:
                data, ids, ratio = output

            if torch.cuda.is_available():
                data = data.cuda(non_blocking=True)

            if self.dataset.training:
                if torch.cuda.is_available():
                    target = target.cuda(non_blocking=True)
                yield data, target
            else:
                if torch.cuda.is_available():
                    ids = ids.cuda(non_blocking=True)
                    ratio = ratio.cuda(non_blocking=True)
                yield data, ids, ratio 
Example #19
Source File: data.py    From retinanet-examples with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __iter__(self):
        for output in self.dataloader:
            if self.dataset.training:
                data, target = output
            else:
                data, ids, ratio = output

            if torch.cuda.is_available():
                data = data.cuda(non_blocking=True)

            if self.dataset.training:
                if torch.cuda.is_available():
                    target = target.cuda(non_blocking=True)
                yield data, target
            else:
                if torch.cuda.is_available():
                    ids = ids.cuda(non_blocking=True)
                    ratio = ratio.cuda(non_blocking=True)
                yield data, ids, ratio 
Example #20
Source File: cnn_train.py    From cgp-cnn-PyTorch with MIT License 5 votes vote down vote up
def __test_per_std(self, model, criterion, gpuID, input, label):
        test_loss = 0
        total = 0
        correct = 0
        ite = 0
        for _, (data, target) in enumerate(self.test_dataloader):
            if self.dataset_name == 'mnsit':
                data = data[:,0:1,:,:]
            data = data.cuda(gpuID)
            target = target.cuda(gpuID)
            input.resize_as_(data).copy_(data)
            input_ = Variable(input)
            label.resize_as_(target).copy_(target)
            label_ = Variable(label)
            try:
                output = model(input_, None)
            except:
                import traceback
                traceback.print_exc()
                return 0.
            loss = criterion(output, label_)
            test_loss += loss.data[0]
            _, predicted = torch.max(output.data, 1)
            total += label_.size(0)
            correct += predicted.eq(label_.data).cpu().sum()
            ite += 1
        print('Test set : Average loss: {:.4f}'.format(test_loss))
        print('Test set : (%d/%d)' % (correct, total))
        print('Test set : Average Acc : {:.4f}'.format(correct/total))

        return (correct/total) 
Example #21
Source File: cnn_train.py    From Evolutionary-Autoencoders with MIT License 5 votes vote down vote up
def random_pixel_mask(self, inp, image_shape, gpuID, fraction_masked=0.8):
        mask = torch.rand(image_shape)
        mask[mask<fraction_masked] = 0.0
        mask = mask.cuda(gpuID)
        mask = Variable(mask)
        out = torch.mul(inp, mask)
        return out 
Example #22
Source File: imagenet_models.py    From imagenet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def classify(self):
		"""Classify the current test batch"""
		self._model.eval()
		for data, _ in self._test_loader:
			if self._cuda:
				data = data.cuda()
			data = torch.autograd.Variable(data, volatile=True)
			output = self._model(data)
			# Take last layer output
			if isinstance(output, tuple):
				output = output[len(output)-1]

			lab = self._labels[numpy.asscalar(output.data.max(1, keepdim=True)[1].cpu().numpy())]
			print (self._labels, lab)
		return lab 
Example #23
Source File: main.py    From VQ-VAE with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_net(epoch, model, test_loader, cuda, save_path, args, writer):
    model.eval()
    loss_dict = model.latest_losses()
    losses = {k + '_test': 0 for k, v in loss_dict.items()}
    i, data = None, None
    with torch.no_grad():
        for i, (data, _) in enumerate(test_loader):
            if cuda:
                data = data.cuda()
            outputs = model(data)
            model.loss_function(data, *outputs)
            latest_losses = model.latest_losses()
            for key in latest_losses:
                losses[key + '_test'] += float(latest_losses[key])
            if i == 0:
                write_images(data, outputs, writer, 'test')

                save_reconstructed_images(data, epoch, outputs[0], save_path, 'reconstruction_test')
                save_checkpoint(model, epoch, save_path)
            if args.dataset == 'imagenet' and i * len(data) > 1000:
                break

    for key in losses:
        if args.dataset not in ['imagenet', 'custom']:
            losses[key] /= (len(test_loader.dataset) / test_loader.batch_size)
        else:
            losses[key] /= (i * len(data))
    loss_string = ' '.join(['{}: {:.6f}'.format(k, v) for k, v in losses.items()])
    logging.info('====> Test set losses: {}'.format(loss_string))
    return losses 
Example #24
Source File: imagenet_models.py    From imagenet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self,
				 arch="resnet18",
				 ckp="/model/model_best.pth.tar",
				 train_dir="/input/train",
				 evalf="/eval"):
		"""MNIST ConvNet Builder
		Args:
			ckp: path to model checkpoint file (to continue training).
			evalf: path to evaluate sample.
		"""
		# Path to model weight
		self._ckp = ckp
		# Use CUDA?
		self._cuda = torch.cuda.is_available()
		try:
			os.path.isfile(ckp)
			self._ckp = ckp
		except IOError as e:
			# Does not exist OR no read permissions
			print ("Unable to open ckp file")
		self._evalf = evalf
		self._arch = arch
		 # Size on model
		if arch.startswith('inception'):
			self._size = (299, 299)
		else:
			self._size = (224, 256)
		# Get labels
		self._labels = self._get_label(train_dir)


	# Build the model loading the weights 
Example #25
Source File: image_helper.py    From backdoor_federated_learning with MIT License 5 votes vote down vote up
def get_batch(self, train_data, bptt, evaluation=False):
        data, target = bptt
        data = data.cuda()
        target = target.cuda()
        if evaluation:
            data.requires_grad_(False)
            target.requires_grad_(False)
        return data, target 
Example #26
Source File: vae_normal.py    From VAE_NBP with MIT License 5 votes vote down vote up
def test(epoch):
    model.eval()
    test_loss = 0
    for data, _ in test_loader:
        if args.cuda:
            data = data.cuda()
        data = Variable(data, volatile=True)
        recon_batch, mu, logvar = model(data)
        test_loss += loss_function(recon_batch, data, mu, logvar).data[0]

    test_loss /= len(test_loader.dataset)
    print('====> Test set loss: {:.4f}'.format(test_loss)) 
Example #27
Source File: vae_normal.py    From VAE_NBP with MIT License 5 votes vote down vote up
def sample(self, n):
        z = Variable(torch.cuda.FloatTensor(n, 20).normal_()).cuda()
        return self.decode(z) 
Example #28
Source File: vae_normal.py    From VAE_NBP with MIT License 5 votes vote down vote up
def reparametrize(self, mu, logvar):
        std = logvar.mul(0.5).exp_()
        if args.cuda:
            eps = torch.cuda.FloatTensor(std.size()).normal_()
        else:
            eps = torch.FloatTensor(std.size()).normal_()
        eps = Variable(eps)
        return eps.mul(std).add_(mu) 
Example #29
Source File: vae_dp.py    From VAE_NBP with MIT License 5 votes vote down vote up
def train(epoch, prior):
    model.train()
    train_loss = 0
    #prior = BayesianGaussianMixture(n_components=1, covariance_type='diag')
    tmp = []
    for (data,_) in train_loader:
        data = Variable(data)
        if args.cuda:
            data = data.cuda()
        recon_batch, mu, logvar, z = model(data)
        tmp.append(z.cpu().data.numpy())
    print('Update Prior')
    prior.fit(np.vstack(tmp))
    print('prior: '+str(prior.weights_))
    for batch_idx, (data, _) in enumerate(train_loader):
        data = Variable(data)
        if args.cuda:
            data = data.cuda()
        optimizer.zero_grad()
        recon_batch, mu, logvar, z = model(data)
        loss = loss_function(recon_batch, data, mu, logvar, prior, z)
        loss.backward()
        train_loss += loss.data[0]
        optimizer.step()
        #if batch_idx % args.log_interval == 0:
        #    print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
        #        epoch, batch_idx * len(data), len(train_loader.dataset),
        #        100. * batch_idx / len(train_loader),
        #        loss.data[0] / len(data)))

    print('====> Epoch: {} Average loss: {:.4f}'.format(
          epoch, train_loss / len(train_loader.dataset)))
    return prior 
Example #30
Source File: vae_dp.py    From VAE_NBP with MIT License 5 votes vote down vote up
def getz():
    tmp = []
    for (data,_) in train_loader:
        data = Variable(data)
        if args.cuda:
            data = data.cuda()
        recon_batch, mu, logvar, z = model(data)
        tmp.append(z.cpu().data.numpy())
    return np.vstack(tmp)