Python config.num_epochs() Examples

The following are 5 code examples of config.num_epochs(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module config , or try the search function .
Example #1
Source File: main.py    From AugMix_TF2 with MIT License 5 votes vote down vote up
def main():
    args = parse_args()
    print('\nCalled with args:')
    for key in args:
        print(f"{key:<10}:  {args[key]}")
    print("="*78)

    # get the command line args
    config.max_lr = args["max_lr"]
    config.min_lr = args["min_lr"]
    config.batch_size = args["batch_size"]
    config.num_epochs = args["epochs"]
    config.IMAGE_SIZE = args["img_size"]
    config.plot_name = args["plot_name"]
    
    if args["save_dir_path"] == "":
        config.save_dir_path = './model_checkpoints'
    else:
        config.save_dir_path = args["save_dir_path"]
        


    # get the data
    print("\nLoading data now.", end=" ")
    x_train, y_train, x_test, y_test, y_train_cat, y_test_cat = get_cifar_data()
    training_data = [x_train, y_train, y_train_cat]
    validation_data = [x_test, y_test, y_test_cat]
    print("Data loading complete. \n")

    # pass the arguments to the trainer
    train(training_data=training_data,
            validation_data=validation_data,
            batch_size=config.batch_size, 
            nb_epochs=config.num_epochs,
            min_lr=config.min_lr,
            max_lr=config.max_lr,
            save_dir_path=config.save_dir_path)
    

########################################################################### 
Example #2
Source File: main.py    From wide-resnet.pytorch with MIT License 5 votes vote down vote up
def train(epoch):
    net.train()
    net.training = True
    train_loss = 0
    correct = 0
    total = 0
    optimizer = optim.SGD(net.parameters(), lr=cf.learning_rate(args.lr, epoch), momentum=0.9, weight_decay=5e-4)

    print('\n=> Training Epoch #%d, LR=%.4f' %(epoch, cf.learning_rate(args.lr, epoch)))
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda() # GPU settings
        optimizer.zero_grad()
        inputs, targets = Variable(inputs), Variable(targets)
        outputs = net(inputs)               # Forward Propagation
        loss = criterion(outputs, targets)  # Loss
        loss.backward()  # Backward Propagation
        optimizer.step() # Optimizer update

        train_loss += loss.item()
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

        sys.stdout.write('\r')
        sys.stdout.write('| Epoch [%3d/%3d] Iter[%3d/%3d]\t\tLoss: %.4f Acc@1: %.3f%%'
                %(epoch, num_epochs, batch_idx+1,
                    (len(trainset)//batch_size)+1, loss.item(), 100.*correct/total))
        sys.stdout.flush() 
Example #3
Source File: main.py    From dl2 with MIT License 5 votes vote down vote up
def test(epoch):
    global best_acc, best_model
    net.eval()
    test_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(validloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = Variable(inputs, volatile=True), Variable(targets)
        outputs = net(inputs)
        loss = criterion(outputs, targets)

        test_loss += loss.item()
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

    # Save checkpoint when best model
    acc = 100.*float(correct)/total
    print("\n| Validation Epoch #%d\t\t\tLoss: %.4f Acc@1: %.2f%%" %(epoch, loss.item(), acc))

    if acc > best_acc:
        #print('| Saving Best model...\t\t\tTop1 = %.2f%%' %(acc))
        best_model = save(acc, num_epochs, net, best=True)
        best_acc = acc 
Example #4
Source File: trainer.py    From neural-question-generation with MIT License 5 votes vote down vote up
def train(self):
        batch_num = len(self.train_loader)
        best_loss = 1e10
        for epoch in range(1, config.num_epochs + 1):
            self.model.train()
            print("epoch {}/{} :".format(epoch, config.num_epochs), end="\r")
            start = time.time()
            # halving the learning rate after epoch 8
            if epoch >= 8 and epoch % 2 == 0:
                self.lr *= 0.5
                state_dict = self.optim.state_dict()
                for param_group in state_dict["param_groups"]:
                    param_group["lr"] = self.lr
                self.optim.load_state_dict(state_dict)

            for batch_idx, train_data in enumerate(self.train_loader, start=1):
                batch_loss = self.step(train_data)

                self.model.zero_grad()
                batch_loss.backward()
                # gradient clipping
                nn.utils.clip_grad_norm_(self.model.parameters(),
                                         config.max_grad_norm)

                self.optim.step()
                batch_loss = batch_loss.detach().item()
                msg = "{}/{} {} - ETA : {} - loss : {:.4f}" \
                    .format(batch_idx, batch_num, progress_bar(batch_idx, batch_num),
                            eta(start, batch_idx, batch_num), batch_loss)
                print(msg, end="\r")

            val_loss = self.evaluate(msg)
            if val_loss <= best_loss:
                best_loss = val_loss
                self.save_model(val_loss, epoch)

            print("Epoch {} took {} - final loss : {:.4f} - val loss :{:.4f}"
                  .format(epoch, user_friendly_time(time_since(start)), batch_loss, val_loss)) 
Example #5
Source File: train.py    From garbageClassifier with MIT License 4 votes vote down vote up
def train(config):
	# prepare
	if not os.path.exists(config.save_dir):
		os.mkdir(config.save_dir)
	use_cuda = torch.cuda.is_available()
	# define the model
	model = NetsTorch(net_name=config.net_name, pretrained=config.load_pretrained, num_classes=config.num_classes)
	if use_cuda:
		os.environ['CUDA_VISIBLE_DEVICES'] = config.gpus
		if config.ngpus > 1:
			model = nn.DataParallel(model).cuda()
		else:
			model = model.cuda()
	model.train()
	# dataset
	dataset_train = ImageFolder(data_dir=config.traindata_dir, image_size=config.image_size, is_train=True)
	saveClasses(dataset_train.classes, config.clsnamespath)
	dataset_test = ImageFolder(data_dir=config.testdata_dir, image_size=config.image_size, is_train=False)
	dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
	dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
	Logging('Train dataset size: %d...' % len(dataset_train), config.logfile)
	Logging('Test dataset size: %d...' % len(dataset_test), config.logfile)
	# optimizer
	optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
	criterion = nn.CrossEntropyLoss()
	# train
	FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
	for epoch in range(1, config.num_epochs+1):
		Logging('[INFO]: epoch now is %d...' % epoch, config.logfile)
		for batch_i, (imgs, labels) in enumerate(dataloader_train):
			imgs = imgs.type(FloatTensor)
			labels = labels.type(FloatTensor)
			optimizer.zero_grad()
			preds = model(imgs)
			loss = criterion(preds, labels.long())
			if config.ngpus > 1:
				loss = loss.mean()
			Logging('[INFO]: batch%d of epoch%d, loss is %.2f...' % (batch_i, epoch, loss.item()), config.logfile)
			loss.backward()
			optimizer.step()
		if ((epoch % config.save_interval == 0) and (epoch > 0)) or (epoch == config.num_epochs):
			pklpath = os.path.join(config.save_dir, 'epoch_%s.pkl' % str(epoch))
			if config.ngpus > 1:
				cur_model = model.module
			else:
				cur_model = model
			torch.save(cur_model.state_dict(), pklpath)
			acc = test(model, dataloader_test)
			Logging('[INFO]: Accuracy of epoch %d is %.2f...' % (epoch, acc), config.logfile)