Python config.learning_rate() Examples

The following are 5 code examples of config.learning_rate(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module config , or try the search function .
Example #1
Source File: model.py    From MemTrack with MIT License 6 votes vote down vote up
def get_train_op(loss, mode):

    if mode != ModeKeys.TRAIN:
        return None

    global_step = tf.train.get_or_create_global_step()
    learning_rate = tf.train.exponential_decay(config.learning_rate, global_step, config.decay_circles, config.lr_decay, staircase=True)
    tf.summary.scalar('learning_rate', learning_rate)

    tvars = tf.trainable_variables()
    regularizer = tf.contrib.layers.l2_regularizer(config.weight_decay)
    regularizer_loss = tf.contrib.layers.apply_regularization(regularizer, tvars)
    loss += regularizer_loss
    grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), config.clip_gradients)
    # optimizer = tf.train.GradientDescentOptimizer(self.lr)
    optimizer = tf.train.AdamOptimizer(learning_rate)

    batchnorm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(batchnorm_update_ops):
        train_op = optimizer.apply_gradients(zip(grads, tvars), global_step)

    return train_op 
Example #2
Source File: main.py    From wide-resnet.pytorch with MIT License 5 votes vote down vote up
def train(epoch):
    net.train()
    net.training = True
    train_loss = 0
    correct = 0
    total = 0
    optimizer = optim.SGD(net.parameters(), lr=cf.learning_rate(args.lr, epoch), momentum=0.9, weight_decay=5e-4)

    print('\n=> Training Epoch #%d, LR=%.4f' %(epoch, cf.learning_rate(args.lr, epoch)))
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda() # GPU settings
        optimizer.zero_grad()
        inputs, targets = Variable(inputs), Variable(targets)
        outputs = net(inputs)               # Forward Propagation
        loss = criterion(outputs, targets)  # Loss
        loss.backward()  # Backward Propagation
        optimizer.step() # Optimizer update

        train_loss += loss.item()
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

        sys.stdout.write('\r')
        sys.stdout.write('| Epoch [%3d/%3d] Iter[%3d/%3d]\t\tLoss: %.4f Acc@1: %.3f%%'
                %(epoch, num_epochs, batch_idx+1,
                    (len(trainset)//batch_size)+1, loss.item(), 100.*correct/total))
        sys.stdout.flush() 
Example #3
Source File: rfl_net.py    From RFL with MIT License 5 votes vote down vote up
def __init__(self, is_train, z_examplar=None, x_crops=None, y_crops=None, init_z_exemplar=None):

        self._is_train = is_train
        input_shape = z_examplar.get_shape().as_list()

        self._batch_size = input_shape[0]
        self._time_steps = input_shape[1]
        x_shape = x_crops.get_shape().as_list()
        self._z_examplar = tf.reshape(z_examplar, [-1, config.z_exemplar_size, config.z_exemplar_size, 3])
        self._x_crops = tf.reshape(x_crops, [-1]+ x_shape[2:])
        self._y_crops = y_crops
        self._response_size = config.response_size-int(2*8/config.stride) if config.is_augment and is_train else config.response_size
        self._gt_pos = tf.convert_to_tensor(np.floor([self._response_size/2, self._response_size/2]), tf.float32)
        if init_z_exemplar is not None:
            self.init_z_exemplar = tf.reshape(init_z_exemplar, [-1, config.z_exemplar_size, config.z_exemplar_size, 3])

        self.filter
        self.response
        if y_crops is not None:
            self.loss
            self.dist_error
        else:
            self.init_state_filter
        if is_train:

            self._global_step = tf.get_variable('global_step', [], tf.int64, initializer=tf.constant_initializer(0),
                                          trainable=False)
            self._lr = tf.train.exponential_decay(config.learning_rate, self._global_step, config.decay_circles,
                                                  config.lr_decay, staircase=True)
            tf.summary.scalar('learning_rate', self._lr)
            self.optimize

        self._summary = tf.summary.merge_all()
        self._saver = tf.train.Saver(tf.global_variables()) 
Example #4
Source File: train.py    From garbageClassifier with MIT License 4 votes vote down vote up
def train(config):
	# prepare
	if not os.path.exists(config.save_dir):
		os.mkdir(config.save_dir)
	use_cuda = torch.cuda.is_available()
	# define the model
	model = NetsTorch(net_name=config.net_name, pretrained=config.load_pretrained, num_classes=config.num_classes)
	if use_cuda:
		os.environ['CUDA_VISIBLE_DEVICES'] = config.gpus
		if config.ngpus > 1:
			model = nn.DataParallel(model).cuda()
		else:
			model = model.cuda()
	model.train()
	# dataset
	dataset_train = ImageFolder(data_dir=config.traindata_dir, image_size=config.image_size, is_train=True)
	saveClasses(dataset_train.classes, config.clsnamespath)
	dataset_test = ImageFolder(data_dir=config.testdata_dir, image_size=config.image_size, is_train=False)
	dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
	dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
	Logging('Train dataset size: %d...' % len(dataset_train), config.logfile)
	Logging('Test dataset size: %d...' % len(dataset_test), config.logfile)
	# optimizer
	optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
	criterion = nn.CrossEntropyLoss()
	# train
	FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
	for epoch in range(1, config.num_epochs+1):
		Logging('[INFO]: epoch now is %d...' % epoch, config.logfile)
		for batch_i, (imgs, labels) in enumerate(dataloader_train):
			imgs = imgs.type(FloatTensor)
			labels = labels.type(FloatTensor)
			optimizer.zero_grad()
			preds = model(imgs)
			loss = criterion(preds, labels.long())
			if config.ngpus > 1:
				loss = loss.mean()
			Logging('[INFO]: batch%d of epoch%d, loss is %.2f...' % (batch_i, epoch, loss.item()), config.logfile)
			loss.backward()
			optimizer.step()
		if ((epoch % config.save_interval == 0) and (epoch > 0)) or (epoch == config.num_epochs):
			pklpath = os.path.join(config.save_dir, 'epoch_%s.pkl' % str(epoch))
			if config.ngpus > 1:
				cur_model = model.module
			else:
				cur_model = model
			torch.save(cur_model.state_dict(), pklpath)
			acc = test(model, dataloader_test)
			Logging('[INFO]: Accuracy of epoch %d is %.2f...' % (epoch, acc), config.logfile) 
Example #5
Source File: main.py    From QANet-pytorch with MIT License 4 votes vote down vote up
def train_entry():
    from models import QANet

    with open(config.word_emb_file, "r") as fh:
        word_mat = np.array(json.load(fh), dtype=np.float32)
    with open(config.char_emb_file, "r") as fh:
        char_mat = np.array(json.load(fh), dtype=np.float32)
    with open(config.train_eval_file, "r") as fh:
        train_eval_file = json.load(fh)
    with open(config.dev_eval_file, "r") as fh:
        dev_eval_file = json.load(fh)

    print("Building model...")

    train_dataset = SQuADDataset(config.train_record_file, config.num_steps, config.batch_size)
    dev_dataset = SQuADDataset(config.dev_record_file, config.test_num_batches, config.batch_size)

    lr = config.learning_rate
    base_lr = 1.0
    warm_up = config.lr_warm_up_num

    model = QANet(word_mat, char_mat).to(device)
    ema = EMA(config.ema_decay)
    for name, p in model.named_parameters():
        if p.requires_grad: ema.set(name, p)
    params = filter(lambda param: param.requires_grad, model.parameters())
    optimizer = optim.Adam(lr=base_lr, betas=(config.beta1, config.beta2), eps=1e-7, weight_decay=3e-7, params=params)
    cr = lr / log2(warm_up)
    scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda ee: cr * log2(ee + 1) if ee < warm_up else lr)
    L = config.checkpoint
    N = config.num_steps
    best_f1 = best_em = patience = 0
    for iter in range(0, N, L):
        train(model, optimizer, scheduler, ema, train_dataset, iter, L)
        valid(model, train_dataset, train_eval_file)
        metrics = test(model, dev_dataset, dev_eval_file)
        print("Learning rate: {}".format(scheduler.get_lr()))
        dev_f1 = metrics["f1"]
        dev_em = metrics["exact_match"]
        if dev_f1 < best_f1 and dev_em < best_em:
            patience += 1
            if patience > config.early_stop: break
        else:
            patience = 0
            best_f1 = max(best_f1, dev_f1)
            best_em = max(best_em, dev_em)

        fn = os.path.join(config.save_dir, "model.pt")
        torch.save(model, fn)