Python utils.logger.Logger() Examples
The following are 13
code examples of utils.logger.Logger().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils.logger
, or try the search function
.
Example #1
Source File: trainer.py From ESRNN-GPU with MIT License | 6 votes |
def __init__(self, model, dataloader, run_id, config, ohe_headers): super(ESRNNTrainer, self).__init__() self.model = model.to(config['device']) self.config = config self.dl = dataloader self.ohe_headers = ohe_headers self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config['learning_rate']) # self.optimizer = torch.optim.ASGD(self.model.parameters(), lr=config['learning_rate']) self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=config['lr_anneal_step'], gamma=config['lr_anneal_rate']) self.criterion = PinballLoss(self.config['training_tau'], self.config['output_size'] * self.config['batch_size'], self.config['device']) self.epochs = 0 self.max_epochs = config['num_of_train_epochs'] self.run_id = str(run_id) self.prod_str = 'prod' if config['prod'] else 'dev' self.log = Logger("../logs/train%s%s%s" % (self.config['variable'], self.prod_str, self.run_id)) self.csv_save_path = None
Example #2
Source File: overfit.py From 3D-HourGlass-Network with MIT License | 5 votes |
def main(): opt = opts().parse() now = datetime.datetime.now() logger = Logger(opt.saveDir + '/logs_{}'.format(now.isoformat())) if opt.loadModel == 'none': model = inflate(opt).cuda() elif opt.loadModel == 'scratch': model = Pose3D(opt.nChannels, opt.nStack, opt.nModules, opt.numReductions, opt.nRegModules, opt.nRegFrames, ref.nJoints).cuda() else : model = torch.load(opt.loadModel).cuda() train_loader = torch.utils.data.DataLoader( h36m('train',opt), batch_size = opt.dataloaderSize, shuffle = False, num_workers = int(ref.nThreads) ) optimizer = torch.optim.RMSprop( [{'params': model.parameters(), 'lr': opt.LRhg}], alpha = ref.alpha, eps = ref.epsilon, weight_decay = ref.weightDecay, momentum = ref.momentum ) for epoch in range(1, opt.nEpochs + 1): loss_train, acc_train = train(epoch, opt, train_loader, model, optimizer) logger.scalar_summary('loss_train', loss_train, epoch) logger.scalar_summary('acc_train', acc_train, epoch) logger.write('{:8f} {:8f} \n'.format(loss_train, acc_train)) logger.close()
Example #3
Source File: Experiment.py From Point-Then-Operate with Apache License 2.0 | 5 votes |
def build_tensorboard(self): """Build a tensorboard logger.""" from utils.logger import Logger self.logger = Logger(config.log_dir)
Example #4
Source File: Experiment.py From Point-Then-Operate with Apache License 2.0 | 5 votes |
def build_tensorboard(self): """Build a tensorboard logger.""" from utils.logger import Logger self.logger = Logger(config.log_dir)
Example #5
Source File: example.py From Tensorflow-Project-Template with Apache License 2.0 | 5 votes |
def main(): # capture the config path from the run arguments # then process the json configuration file try: args = get_args() config = process_config(args.config) except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) # create tensorflow session sess = tf.Session() # create your data generator data = DataGenerator(config) # create an instance of the model you want model = ExampleModel(config) # create tensorboard logger logger = Logger(sess, config) # create trainer and pass all the previous components to it trainer = ExampleTrainer(sess, model, data, config, logger) #load model if exists model.load(sess) # here you train your model trainer.train()
Example #6
Source File: timer.py From simhashpy with Apache License 2.0 | 5 votes |
def __init__(self, time_grain=u'ms', verbose_in=True, verbose_out=True, verbose=True, msg_in=u'', msg_out=u'', msg=u'', logfile=None): self.time_grain = time_grain self.verbose_in = verbose_in self.verbose_out = verbose_out self.verbose = verbose self.msg_in = msg_in self.msg_out = msg_out self.msg = msg if logfile: self.logger = Logger('flogger', log2console=False, log2file=True, logfile=logfile).get_logger() else: self.logger = clogger
Example #7
Source File: equant.py From equant with GNU General Public License v2.0 | 5 votes |
def main(): # 创建日志模块 logger = Logger() log_process = Process(target=run_log_process, args=(logger,)) log_process.start() saveMainPid(os.getpid()) # 检查软件更新 checkUpdate(logger) # 创建策略引擎到界面的队列,发送资金数据 eg2ui_q = Queue(10000) # 创建界面到策略引擎的队列,发送策略全路径 ui2eg_q = Queue(10000) # 创建策略引擎 engine = StrategyEngine(logger, eg2ui_q, ui2eg_q) engine_process = Process(target=run_engine_process, args=(engine,)) engine_process.start() control = Controller(logger, ui2eg_q, eg2ui_q) control.run() time.sleep(3) import atexit def exitHandler(): control.receiveEgThread.stop() # 1. 先关闭策略进程, 现在策略进程会成为僵尸进程 # todo 此处需要重载engine的terminate函数 # 2. 关闭engine进程 engine_process.terminate() engine_process.join() log_process.terminate() log_process.join() atexit.register(exitHandler)
Example #8
Source File: main.py From StarMap with GNU General Public License v3.0 | 4 votes |
def main(): now = datetime.datetime.now() logger = Logger(opt.saveDir + '/logs_{}'.format(now.isoformat())) model, optimizer = getModel(opt) criterion = torch.nn.MSELoss() if opt.GPU > -1: print('Using GPU', opt.GPU) model = model.cuda(opt.GPU) criterion = criterion.cuda(opt.GPU) val_loader = torch.utils.data.DataLoader( Dataset(opt, 'val'), batch_size = 1, shuffle = True if opt.DEBUG > 1 else False, num_workers = 1 ) if opt.test: _, preds = val(0, opt, val_loader, model, criterion) torch.save({'opt': opt, 'preds': preds}, os.path.join(opt.saveDir, 'preds.pth')) return train_loader = torch.utils.data.DataLoader( Dataset(opt, 'train'), batch_size = opt.trainBatch, shuffle = True, num_workers = int(opt.nThreads) ) for epoch in range(1, opt.nEpochs + 1): mark = epoch if opt.saveAllModels else 'last' log_dict_train, _ = train(epoch, opt, train_loader, model, criterion, optimizer) for k, v in log_dict_train.items(): logger.scalar_summary('train_{}'.format(k), v, epoch) logger.write('{} {:8f} | '.format(k, v)) if epoch % opt.valIntervals == 0: log_dict_val, preds = val(epoch, opt, val_loader, model, criterion) for k, v in log_dict_val.items(): logger.scalar_summary('val_{}'.format(k), v, epoch) logger.write('{} {:8f} | '.format(k, v)) saveModel(os.path.join(opt.saveDir, 'model_{}.checkpoint'.format(mark)), model) # optimizer logger.write('\n') if epoch % opt.dropLR == 0: lr = opt.LR * (0.1 ** (epoch // opt.dropLR)) print('Drop LR to', lr) for param_group in optimizer.param_groups: param_group['lr'] = lr logger.close() torch.save(model.cpu(), os.path.join(opt.saveDir, 'model_cpu.pth'))
Example #9
Source File: train.py From pytorch-cpn with GNU General Public License v3.0 | 4 votes |
def main(args): # create checkpoint dir if not isdir(args.checkpoint): mkdir_p(args.checkpoint) # create model model = network.__dict__[cfg.model](cfg.output_shape, cfg.num_class, pretrained = True) model = torch.nn.DataParallel(model).cuda() # define loss function (criterion) and optimizer criterion1 = torch.nn.MSELoss().cuda() # for Global loss criterion2 = torch.nn.MSELoss(reduce=False).cuda() # for refine loss optimizer = torch.optim.Adam(model.parameters(), lr = cfg.lr, weight_decay=cfg.weight_decay) if args.resume: if isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) pretrained_dict = checkpoint['state_dict'] model.load_state_dict(pretrained_dict) args.start_epoch = checkpoint['epoch'] optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) logger = Logger(join(args.checkpoint, 'log.txt'), resume=True) else: print("=> no checkpoint found at '{}'".format(args.resume)) else: logger = Logger(join(args.checkpoint, 'log.txt')) logger.set_names(['Epoch', 'LR', 'Train Loss']) cudnn.benchmark = True print(' Total params: %.2fMB' % (sum(p.numel() for p in model.parameters())/(1024*1024)*4)) train_loader = torch.utils.data.DataLoader( MscocoMulti(cfg), batch_size=cfg.batch_size*args.num_gpus, shuffle=True, num_workers=args.workers, pin_memory=True) for epoch in range(args.start_epoch, args.epochs): lr = adjust_learning_rate(optimizer, epoch, cfg.lr_dec_epoch, cfg.lr_gamma) print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr)) # train for one epoch train_loss = train(train_loader, model, [criterion1, criterion2], optimizer) print('train_loss: ',train_loss) # append logger file logger.append([epoch + 1, lr, train_loss]) save_model({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict(), }, checkpoint=args.checkpoint) logger.close()
Example #10
Source File: train.py From pytorch-cpn with GNU General Public License v3.0 | 4 votes |
def main(args): # create checkpoint dir if not isdir(args.checkpoint): mkdir_p(args.checkpoint) # create model model = network.__dict__[cfg.model](cfg.output_shape, cfg.num_class, pretrained = True) model = torch.nn.DataParallel(model).cuda() # define loss function (criterion) and optimizer criterion1 = torch.nn.MSELoss().cuda() # for Global loss criterion2 = torch.nn.MSELoss(reduce=False).cuda() # for refine loss optimizer = torch.optim.Adam(model.parameters(), lr = cfg.lr, weight_decay=cfg.weight_decay) if args.resume: if isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) pretrained_dict = checkpoint['state_dict'] model.load_state_dict(pretrained_dict) args.start_epoch = checkpoint['epoch'] optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) logger = Logger(join(args.checkpoint, 'log.txt'), resume=True) else: print("=> no checkpoint found at '{}'".format(args.resume)) else: logger = Logger(join(args.checkpoint, 'log.txt')) logger.set_names(['Epoch', 'LR', 'Train Loss']) cudnn.benchmark = True print(' Total params: %.2fMB' % (sum(p.numel() for p in model.parameters())/(1024*1024)*4)) train_loader = torch.utils.data.DataLoader( MscocoMulti(cfg), batch_size=cfg.batch_size*args.num_gpus, shuffle=True, num_workers=args.workers, pin_memory=True) for epoch in range(args.start_epoch, args.epochs): lr = adjust_learning_rate(optimizer, epoch, cfg.lr_dec_epoch, cfg.lr_gamma) print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr)) # train for one epoch train_loss = train(train_loader, model, [criterion1, criterion2], optimizer) print('train_loss: ',train_loss) # append logger file logger.append([epoch + 1, lr, train_loss]) save_model({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict(), }, checkpoint=args.checkpoint) logger.close()
Example #11
Source File: VAE_model.py From VAE-GMVAE with Apache License 2.0 | 4 votes |
def train(self, data_train, data_valid, enable_es=1): with tf.Session(graph=self.graph) as session: tf.set_random_seed(1234) logger = Logger(session, self.summary_dir) # here you initialize the tensorflow saver that will be used in saving the checkpoints. # max_to_keep: defaults to keeping the 5 most recent checkpoints of your model saver = tf.train.Saver() early_stopping = EarlyStopping() if(self.restore==1 and self.load(session, saver) ): num_epochs_trained = self.vae_graph.cur_epoch_tensor.eval(session) print('EPOCHS trained: ', num_epochs_trained) else: print('Initizalizing Variables ...') tf.global_variables_initializer().run() if(self.vae_graph.cur_epoch_tensor.eval(session) == self.epochs): return for cur_epoch in range(self.vae_graph.cur_epoch_tensor.eval(session), self.epochs + 1, 1): print('EPOCH: ', cur_epoch) self.current_epoch = cur_epoch # beta=utils.sigmoid(cur_epoch- 50) beta = 1. loss_tr, recons_tr, cond_prior_tr, L2_loss = self.train_epoch(session, logger, data_train, beta=beta) if np.isnan(loss_tr): print ('Encountered NaN, stopping training. Please check the learning_rate settings and the momentum.') print('Recons: ', recons_tr) print('KL: ', cond_prior_tr) sys.exit() loss_val, recons_val, cond_prior_val = self.valid_epoch(session, logger, data_valid, beta=beta) print('TRAIN | Loss: ', loss_tr, ' | Recons: ', recons_tr, ' | KL: ', cond_prior_tr, ' | L2_loss: ', L2_loss) print('VALID | Loss: ', loss_val, ' | Recons: ', recons_val, ' | KL: ', cond_prior_val) if(cur_epoch>0 and cur_epoch % 10 == 0): self.save(session, saver, self.vae_graph.global_step_tensor.eval(session)) z_matrix = self.vae_graph.get_z_matrix(session, data_valid.random_batch(self.batch_size)) np.savez(self.z_file, z_matrix) session.run(self.vae_graph.increment_cur_epoch_tensor) #Early stopping if(enable_es==1 and early_stopping.stop(loss_val)): print('Early Stopping!') break self.save(session,saver, self.vae_graph.global_step_tensor.eval(session)) z_matrix = self.vae_graph.get_z_matrix(session, data_valid.random_batch(self.batch_size)) np.savez(self.z_file, z_matrix) return
Example #12
Source File: main.py From pytorch-PyraNet with MIT License | 4 votes |
def main(): opt = opts().parse() now = datetime.datetime.now() logger = Logger(opt.saveDir, now.isoformat()) model, optimizer = getModel(opt) criterion = torch.nn.MSELoss().cuda() # if opt.GPU > -1: # print('Using GPU {}',format(opt.GPU)) # model = model.cuda(opt.GPU) # criterion = criterion.cuda(opt.GPU) # dev = opt.device model = model.cuda() val_loader = torch.utils.data.DataLoader( MPII(opt, 'val'), batch_size = 1, shuffle = False, num_workers = int(ref.nThreads) ) if opt.test: log_dict_train, preds = val(0, opt, val_loader, model, criterion) sio.savemat(os.path.join(opt.saveDir, 'preds.mat'), mdict = {'preds': preds}) return # pyramidnet pretrain一次,先定义gen的训练数据loader train_loader = torch.utils.data.DataLoader( MPII(opt, 'train'), batch_size = opt.trainBatch, shuffle = True if opt.DEBUG == 0 else False, num_workers = int(ref.nThreads) ) # 调用train方法 for epoch in range(1, opt.nEpochs + 1): log_dict_train, _ = train(epoch, opt, train_loader, model, criterion, optimizer) for k, v in log_dict_train.items(): logger.scalar_summary('train_{}'.format(k), v, epoch) logger.write('{} {:8f} | '.format(k, v)) if epoch % opt.valIntervals == 0: log_dict_val, preds = val(epoch, opt, val_loader, model, criterion) for k, v in log_dict_val.items(): logger.scalar_summary('val_{}'.format(k), v, epoch) logger.write('{} {:8f} | '.format(k, v)) #saveModel(model, optimizer, os.path.join(opt.saveDir, 'model_{}.checkpoint'.format(epoch))) torch.save(model, os.path.join(opt.saveDir, 'model_{}.pth'.format(epoch))) sio.savemat(os.path.join(opt.saveDir, 'preds_{}.mat'.format(epoch)), mdict = {'preds': preds}) logger.write('\n') if epoch % opt.dropLR == 0: lr = opt.LR * (0.1 ** (epoch // opt.dropLR)) print('Drop LR to {}'.format(lr)) adjust_learning_rate(optimizer, lr) logger.close() torch.save(model.cpu(), os.path.join(opt.saveDir, 'model_cpu.pth'))
Example #13
Source File: train.py From high-res-stereo with MIT License | 4 votes |
def main(): log = logger.Logger(args.savemodel, name=args.logname) total_iters = 0 for epoch in range(1, args.epochs+1): total_train_loss = 0 adjust_learning_rate(optimizer,epoch) ## training ## for batch_idx, (imgL_crop, imgR_crop, disp_crop_L) in enumerate(TrainImgLoader): start_time = time.time() loss,vis = train(imgL_crop,imgR_crop, disp_crop_L) print('Iter %d training loss = %.3f , time = %.2f' %(batch_idx, loss, time.time() - start_time)) total_train_loss += loss if total_iters %10 == 0: log.scalar_summary('train/loss_batch',loss, total_iters) if total_iters %100 == 0: log.image_summary('train/left',imgL_crop[0:1],total_iters) log.image_summary('train/right',imgR_crop[0:1],total_iters) log.image_summary('train/gt0',disp_crop_L[0:1],total_iters) log.image_summary('train/entropy',vis['entropy'][0:1],total_iters) log.histo_summary('train/disparity_hist',vis['output3'], total_iters) log.histo_summary('train/gt_hist',np.asarray(disp_crop_L), total_iters) log.image_summary('train/output3',vis['output3'][0:1],total_iters) log.image_summary('train/output4',vis['output4'][0:1],total_iters) log.image_summary('train/output5',vis['output5'][0:1],total_iters) log.image_summary('train/output6',vis['output6'][0:1],total_iters) total_iters += 1 if (total_iters + 1)%2000==0: #SAVE savefilename = args.savemodel+'/'+args.logname+'/finetune_'+str(total_iters)+'.tar' torch.save({ 'iters': total_iters, 'state_dict': model.state_dict(), 'train_loss': total_train_loss/len(TrainImgLoader), }, savefilename) log.scalar_summary('train/loss',total_train_loss/len(TrainImgLoader), epoch) torch.cuda.empty_cache()