Python torch.utils.data.shape() Examples

The following are 30 code examples of torch.utils.data.shape(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.utils.data , or try the search function .
Example #1
Source File: main_fullv_mc.py    From PointNetGPD with MIT License 6 votes vote down vote up
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output, _ = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * args.batch_size, len(loader.dataset),
            100. * batch_idx * args.batch_size / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
Example #2
Source File: SecenFlowLoaderfix.py    From StereoNet-ActiveStereoNet with MIT License 6 votes vote down vote up
def disparity_loader(path):
    path_prefix = path.split('.')[0]
    # print(path_prefix)
    path1 = path_prefix + '_exception_assign_minus_1.npy'
    path2 = path_prefix + '.npy'
    path3 = path_prefix + '.pfm'
    import os.path as ospath
    if ospath.exists(path1):
        return np.load(path1)
    else:

        # from readpfm import readPFMreadPFM
        from readpfm import readPFM
        data, _ = readPFM(path3)
        np.save(path2, data)
        for i in range(data.shape[0]):
            for j in range(data.shape[1]):
                if j - data[i][j] < 0:
                    data[i][j] = -1
        np.save(path1, data)
        return data 
Example #3
Source File: SceneFlowLoader_demo.py    From StereoNet-ActiveStereoNet with MIT License 6 votes vote down vote up
def disparity_loader(path):
    path_prefix = path.split('.')[0]
    path1 = path_prefix + '_exception_assign_minus_1.npy'
    path2 = path_prefix + '.npy'
    path3 = path_prefix + '.pfm'
    import os.path as ospath
    if ospath.exists(path1):
        return np.load(path1)
    else:
        if ospath.exists(path2):
            data = np.load(path2)
        else:
            from readpfm import readPFM
            data, _ = readPFM(path3)
            np.save(path2, data)
        for i in range(data.shape[0]):
            for j in range(data.shape[1]):
                if j - data[i][j] < 0:
                    data[i][j] = -1
        np.save(path1, data)
        return data 
Example #4
Source File: SecenFlowLoader.py    From StereoNet-ActiveStereoNet with MIT License 6 votes vote down vote up
def disparity_loader(path):
    path_prefix = path.split('.')[0]
    # print(path_prefix)
    path1 = path_prefix + '_exception_assign_minus_1.npy'
    path2 = path_prefix + '.npy'
    path3 = path_prefix + '.pfm'
    import os.path as ospath
    if ospath.exists(path1):
        return np.load(path1)
    else:

        # from readpfm import readPFMreadPFM
        from readpfm import readPFM
        data, _ = readPFM(path3)
        np.save(path2, data)
        for i in range(data.shape[0]):
            for j in range(data.shape[1]):
                if j - data[i][j] < 0:
                    data[i][j] = -1
        np.save(path1, data)
        return data 
Example #5
Source File: main_1v_mc.py    From PointNetGPD with MIT License 6 votes vote down vote up
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output, _ = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * args.batch_size, len(loader.dataset),
            100. * batch_idx * args.batch_size / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
Example #6
Source File: main_fullv_gpd.py    From PointNetGPD with MIT License 6 votes vote down vote up
def test(model, loader):
    model.eval()
    torch.set_grad_enabled(False)
    test_loss = 0
    correct = 0
    dataset_size = 0
    da = {}
    db = {}
    res = []
    for batch_idx, (data, target, obj_name) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        output = model(data) # N*C
        test_loss += F.nll_loss(output, target, size_average=False).cpu().item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        for i, j, k in zip(obj_name, pred.data.cpu().numpy(), target.data.cpu().numpy()):
            res.append((i, j[0], k))

    test_loss /= len(loader.dataset)
    acc = float(correct)/float(dataset_size)
    return acc, test_loss 
Example #7
Source File: main_fullv_gpd.py    From PointNetGPD with MIT License 6 votes vote down vote up
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * len(data), len(loader.dataset),
            100. * batch_idx * len(data) / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
Example #8
Source File: main_fullv.py    From PointNetGPD with MIT License 6 votes vote down vote up
def test(model, loader):
    model.eval()
    torch.set_grad_enabled(False)
    test_loss = 0
    correct = 0
    dataset_size = 0
    da = {}
    db = {}
    res = []
    for data, target, obj_name in loader:
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        output, _ = model(data) # N*C
        test_loss += F.nll_loss(output, target, size_average=False).cpu().item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        for i, j, k in zip(obj_name, pred.data.cpu().numpy(), target.data.cpu().numpy()):
            res.append((i, j[0], k))

    test_loss /= len(loader.dataset)
    acc = float(correct)/float(dataset_size)
    return acc, test_loss 
Example #9
Source File: main_fullv.py    From PointNetGPD with MIT License 6 votes vote down vote up
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output, _ = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * args.batch_size, len(loader.dataset),
            100. * batch_idx * args.batch_size / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
Example #10
Source File: main_1v_gpd.py    From PointNetGPD with MIT License 6 votes vote down vote up
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * len(data), len(loader.dataset),
            100. * batch_idx * len(data) / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
Example #11
Source File: main_fullv_mc.py    From PointNetGPD with MIT License 6 votes vote down vote up
def test(model, loader):
    model.eval()
    torch.set_grad_enabled(False)
    test_loss = 0
    correct = 0
    dataset_size = 0
    da = {}
    db = {}
    res = []
    for data, target, obj_name in loader:
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        output, _ = model(data) # N*C
        test_loss += F.nll_loss(output, target, size_average=False).cpu().item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        for i, j, k in zip(obj_name, pred.data.cpu().numpy(), target.data.cpu().numpy()):
            res.append((i, j[0], k))

    test_loss /= len(loader.dataset)
    acc = float(correct)/float(dataset_size)
    return acc, test_loss 
Example #12
Source File: main_1v.py    From PointNetGPD with MIT License 6 votes vote down vote up
def test(model, loader):
    model.eval()
    torch.set_grad_enabled(False)
    test_loss = 0
    correct = 0
    dataset_size = 0
    da = {}
    db = {}
    res = []
    for data, target, obj_name in loader:
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        output, _ = model(data) # N*C
        test_loss += F.nll_loss(output, target, size_average=False).cpu().item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        for i, j, k in zip(obj_name, pred.data.cpu().numpy(), target.data.cpu().numpy()):
            res.append((i, j[0], k))

    test_loss /= len(loader.dataset)
    acc = float(correct)/float(dataset_size)
    return acc, test_loss 
Example #13
Source File: main_1v.py    From PointNetGPD with MIT License 6 votes vote down vote up
def train(model, loader, epoch):
    scheduler.step()
    model.train()
    torch.set_grad_enabled(True)
    correct = 0
    dataset_size = 0
    for batch_idx, (data, target) in enumerate(loader):
        dataset_size += data.shape[0]
        data, target = data.float(), target.long().squeeze()
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output, _ = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.view_as(pred)).long().cpu().sum()
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t{}'.format(
            epoch, batch_idx * args.batch_size, len(loader.dataset),
            100. * batch_idx * args.batch_size / len(loader.dataset), loss.item(), args.tag))
            logger.add_scalar('train_loss', loss.cpu().item(),
                    batch_idx + epoch * len(loader))
    return float(correct)/float(dataset_size) 
Example #14
Source File: GAN.py    From selfies with Apache License 2.0 6 votes vote down vote up
def train_discriminator(optimizer, real_data, fake_data, discriminator, criterion):
    optimizer.zero_grad()
    
    # 1.1 Train on Real Data
    prediction_real = discriminator(real_data)
    y_real = Variable(torch.ones(prediction_real.shape[0], 1))
    if torch.cuda.is_available(): 
        D_real_loss = criterion(prediction_real, y_real.cuda())
    else: 
        D_real_loss = criterion(prediction_real, y_real)

    # 1.2 Train on Fake Data
    prediction_fake = discriminator(fake_data)
    y_fake = Variable(torch.zeros(prediction_fake.shape[0], 1))
    if torch.cuda.is_available(): 
        D_fake_loss = criterion(prediction_fake, y_fake.cuda())
    else: 
        D_fake_loss = criterion(prediction_fake, y_fake)
    
    D_loss = D_real_loss + D_fake_loss
    D_loss.backward()
    optimizer.step()
    
    # Return error
    return D_real_loss + D_fake_loss, prediction_real, prediction_fake, discriminator 
Example #15
Source File: imagenet.py    From nn_tools with MIT License 6 votes vote down vote up
def _read_from_lmdb(self):
        self.cur.next()
        if not self.cur.key():
            self.cur.first()
        dataset = pb2.Dataset().FromString(self.cur.value())
        for datum in dataset.datums:
            data = np.fromstring(datum.data, np.uint8)
            try:
                data = self.data_transfrom(data, datum.other)
            except:
                print 'cannot trans ', data.shape
                continue
            target = int(datum.target)
            target = self.target_transfrom(target)
            self.data.put(data)
            self.target.put(target)
            # print 'read_from_lmdb', time.time()-r
        del dataset

    # def read_from_lmdb(self):
    #     process=multiprocessing.Process(target=self._read_from_lmdb)
    #     process.start() 
Example #16
Source File: imagenet.py    From nn_tools with MIT License 6 votes vote down vote up
def data_transfrom(self,data,other):
        data=data.astype(np.float32)
        if self.train:
            shape=np.fromstring(other[0],np.uint16)
            data=data.reshape(shape)
            # Random crop
            _, w, h = data.shape
            x1 = np.random.randint(0, w - 224)
            y1 = np.random.randint(0, h - 224)
            data=data[:,x1:x1+224 ,y1:y1 + 224]
            # HorizontalFlip
            #TODO horizontal flip
        else:
            data = data.reshape([3, 224, 224])
        data = (data - mean) / std
        tensor = torch.Tensor(data)
        del data
        return tensor 
Example #17
Source File: GAN.py    From selfies with Apache License 2.0 6 votes vote down vote up
def train_discriminator(optimizer, real_data, fake_data, discriminator, criterion):
    optimizer.zero_grad()
    
    # 1.1 Train on Real Data
    prediction_real = discriminator(real_data)
    y_real = Variable(torch.ones(prediction_real.shape[0], 1))
    if torch.cuda.is_available(): 
        D_real_loss = criterion(prediction_real, y_real.cuda())
    else: 
        D_real_loss = criterion(prediction_real, y_real)

    # 1.2 Train on Fake Data
    prediction_fake = discriminator(fake_data)
    y_fake = Variable(torch.zeros(prediction_fake.shape[0], 1))
    if torch.cuda.is_available(): 
        D_fake_loss = criterion(prediction_fake, y_fake.cuda())
    else: 
        D_fake_loss = criterion(prediction_fake, y_fake)
    
    D_loss = D_real_loss + D_fake_loss
    D_loss.backward()
    optimizer.step()
    
    return D_real_loss + D_fake_loss, prediction_real, prediction_fake, discriminator 
Example #18
Source File: datasets.py    From pointwise with MIT License 6 votes vote down vote up
def __init__(self, root, training=True):
        self.root = root
        self.training = training
        if self.training:
            self.filenames = train_files
        else:
            self.filenames = test_files
        for fn in self.filenames:
            fp = os.path.join(self.root, 'scenenn_seg_' + fn + '.hdf5')
            print(fp)
            with h5py.File(fp, 'r') as f:
                data = np.array(f['data'])
                label = np.array(f['label'])
                if not hasattr(self, 'data'):
                    self.data = data
                    self.label = label
                    self.num_points = data.shape[1]
                    self.num_channels = data.shape[2]
                elif data.shape[0] > 0:
                    self.data = np.concatenate((self.data, data))
                    self.label = np.concatenate((self.label, label)) 
Example #19
Source File: trainclassify.py    From WaveUNet with MIT License 6 votes vote down vote up
def test(epoch):  # testing data
    model.eval()
    start_time = time.time()
    with torch.no_grad():
        for iloader, xtrain, ytrain in loadtest:
            iloader=iloader.item()
            listofpred0 = []
            cnt,aveloss=0,0
            for ind in range(0, xtrain.shape[-1] - sampleSize, sampleSize):
                output = model(xtrain[:, :,ind:ind + sampleSize].to(device))
                loss = criterion(output, (ytrain[:, ind:ind + sampleSize].to(device)))
                cnt += 1
                aveloss += float(loss)
                _,output = torch.max(output,1)
                listofpred0.append(output.reshape(-1))
            aveloss /= cnt
            print('loss for test:{},num{},epoch{}'.format(aveloss, iloader,epoch))
            ans0 = quan_mu_law_decode(np.concatenate(listofpred0))
            if not os.path.exists('vsCorpus/'): os.makedirs('vsCorpus/')
            sf.write(savemusic.format(iloader), ans0, sample_rate)
            print('test stored done', np.round(time.time() - start_time)) 
Example #20
Source File: ctgan.py    From SDGym with MIT License 5 votes vote down vote up
def __init__(self, data, output_info):
        super(Sampler, self).__init__()
        self.data = data
        self.model = []
        self.n = len(data)

        st = 0
        skip = False
        for item in output_info:
            if item[1] == 'tanh':
                st += item[0]
                skip = True
            elif item[1] == 'softmax':
                if skip:
                    skip = False
                    st += item[0]
                    continue
                ed = st + item[0]
                tmp = []
                for j in range(item[0]):
                    tmp.append(np.nonzero(data[:, st + j])[0])
                self.model.append(tmp)
                st = ed
            else:
                assert 0
        assert st == data.shape[1] 
Example #21
Source File: sr_dataset.py    From pykaldi2 with MIT License 5 votes vote down vote up
def _utt2seg(data, seg_len, seg_shift):
    """ Cut an utterance (MxN matrix) to segments. """
    if data.ndim == 1:
        data = np.reshape(data, (1, data.size))
    dim, n_fr = data.shape
    n_seg = int(np.floor((n_fr - seg_len) / seg_shift)) + 1
    seg = []
    for i in range(n_seg):
        start = i * seg_shift
        stop = start + seg_len
        seg.append(data[:, start:stop])

    return seg 
Example #22
Source File: run.py    From ShuffleNetV2-pytorch with MIT License 5 votes vote down vote up
def find_bounds_clr(model, loader, optimizer, criterion, device, dtype, min_lr=8e-6, max_lr=8e-5, step_size=2000,
                    mode='triangular', save_path='.'):
    model.train()
    correct1, correct5 = 0, 0
    scheduler = CyclicLR(optimizer, base_lr=min_lr, max_lr=max_lr, step_size=step_size, mode=mode)
    epoch_count = step_size // len(loader)  # Assuming step_size is multiple of batch per epoch
    accuracy = []
    for _ in trange(epoch_count):
        for batch_idx, (data, target) in enumerate(tqdm(loader)):
            if scheduler is not None:
                scheduler.batch_step()
            data, target = data.to(device=device, dtype=dtype), target.to(device=device)

            optimizer.zero_grad()
            output = model(data)

            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

            corr = correct(output, target)
            accuracy.append(corr[0] / data.shape[0])

    lrs = np.linspace(min_lr, max_lr, step_size)
    plt.plot(lrs, accuracy)
    plt.show()
    plt.savefig(os.path.join(save_path, 'find_bounds_clr.png'))
    np.save(os.path.join(save_path, 'acc.npy'), accuracy)
    return 
Example #23
Source File: nii_dataload.py    From cortex with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def maskData(self, data):
        """

        Args:
            data:

        Returns:

        """

        msk = nib.load(self.mask)
        mskD = msk.get_data()
        if not np.all(np.bitwise_or(mskD == 0, mskD == 1)):
            raise ValueError("Mask has incorrect values.")
        # nVox = np.sum(mskD.flatten())
        if data.shape[0:3] != mskD.shape:
            raise ValueError((data.shape, mskD.shape))

        msk_f = mskD.flatten()
        msk_idx = np.where(msk_f == 1)[0]

        if len(data.shape) == 3:
            data_masked = data.flatten()[msk_idx]

        if len(data.shape) == 4:
            data = np.transpose(data, (3, 0, 1, 2))
            data_masked = np.zeros((data.shape[0], int(mskD.sum())))
            for i, x in enumerate(data):
                data_masked[i] = x.flatten()[msk_idx]

        img = data_masked

        return np.array(img) 
Example #24
Source File: UPS_Synth_Dataset.py    From SDPS-Net with MIT License 5 votes vote down vote up
def _getInputPath(self, index):
        shape, mtrl = self.shape_list[index].split('/')
        normal_path = os.path.join(self.root, 'Images', shape, shape + '_normal.png')
        img_dir     = os.path.join(self.root, 'Images', self.shape_list[index])
        img_list    = util.readList(os.path.join(img_dir, '%s_%s.txt' % (shape, mtrl)))

        data = np.genfromtxt(img_list, dtype='str', delimiter=' ')
        select_idx = np.random.permutation(data.shape[0])[:self.args.in_img_num]
        idxs = ['%03d' % (idx) for idx in select_idx]
        data = data[select_idx, :]
        imgs = [os.path.join(img_dir, img) for img in data[:, 0]]
        dirs = data[:, 1:4].astype(np.float32)
        return normal_path, imgs, dirs 
Example #25
Source File: megaface.py    From Face_Pytorch with Apache License 2.0 5 votes vote down vote up
def img_loader(path):
    try:
        with open(path, 'rb') as f:
            img = cv2.imread(path)
            if len(img.shape) == 2:
                img = np.stack([img] * 3, 2)
            return img
    except IOError:
        print('Cannot load image ' + path) 
Example #26
Source File: GAN.py    From selfies with Apache License 2.0 5 votes vote down vote up
def train_generator(optimizer, fake_data, criterion, discriminator):
    optimizer.zero_grad()
    prediction = discriminator(fake_data)
    y = Variable(torch.ones(prediction.shape[0], 1))
    if torch.cuda.is_available(): 
        G_loss = criterion(prediction, y.cuda(0))
    else: 
        G_loss = criterion(prediction, y)
    G_loss.backward()

    optimizer.step()
    return G_loss.data.item(), discriminator 
Example #27
Source File: GAN.py    From selfies with Apache License 2.0 5 votes vote down vote up
def train_generator(optimizer, fake_data, criterion, discriminator):
    optimizer.zero_grad()
    prediction = discriminator(fake_data)
    y = Variable(torch.ones(prediction.shape[0], 1))
    if torch.cuda.is_available(): 
        G_loss = criterion(prediction, y.cuda(0))
    else: 
        G_loss = criterion(prediction, y)
    G_loss.backward()

    optimizer.step()
    return G_loss.data.item(), discriminator 
Example #28
Source File: trainclassify.py    From WaveUNet with MIT License 5 votes vote down vote up
def val(epoch):
    model.eval()
    start_time = time.time()
    cnt, aveloss = 0, 0
    with torch.no_grad():
        for iloader, xtrain, ytrain in loadval:
            for ind in range(0, xtrain.shape[-1] - sampleSize, sampleSize):
                output = model(xtrain[:, :, ind:ind + sampleSize].to(device))
                loss = criterion(output, (ytrain[:, ind:ind + sampleSize].to(device)))
                cnt += 1
                aveloss += float(loss)
        aveloss /= cnt
        print('loss for validation:{:.5f},epoch{},valtime{}'.format(aveloss, epoch,np.round(time.time() - start_time)))
        if (USEBOARD): writer.add_scalar('waveunet val loss', aveloss, iteration) 
Example #29
Source File: ctgan.py    From SDGym with MIT License 5 votes vote down vote up
def random_choice_prob_index(a, axis=1):
    r = np.expand_dims(np.random.rand(a.shape[1 - axis]), axis=axis)
    return (a.cumsum(axis=axis) > r).argmax(axis=axis) 
Example #30
Source File: datasets.py    From pointwise with MIT License 5 votes vote down vote up
def __len__(self):
        return self.data.shape[0]