Python config.cfg.epoch() Examples
The following are 3
code examples of config.cfg.epoch().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
config.cfg
, or try the search function
.
Example #1
Source File: main.py From DFN-tensorflow with Apache License 2.0 | 5 votes |
def main(_): # get dataset info result = create_image_lists(cfg.images) max_iters = len(result["train"]) * cfg.epoch // cfg.batch_size tf.logging.info('Loading Graph...') model = DFN(max_iters, batch_size=cfg.batch_size, init_lr=cfg.init_lr, power=cfg.power, momentum=cfg.momentum, stddev=cfg.stddev, regularization_scale=cfg.regularization_scale, alpha=cfg.alpha, gamma=cfg.gamma, fl_weight=cfg.fl_weight) tf.logging.info('Graph loaded.') if cfg.is_training: if not tf.gfile.Exists(cfg.logdir): tf.gfile.MakeDirs(cfg.logdir) if not tf.gfile.Exists(cfg.models): tf.gfile.MakeDirs(cfg.models) if os.path.exists(cfg.log): os.remove(cfg.log) fd = open(cfg.log, "a") tf.logging.info('Start training...') fd.write('Start training...\n') train(result, model, cfg.logdir, cfg.train_sum_freq, cfg.val_sum_freq, cfg.save_freq, cfg.models, fd) tf.logging.info('Training done.') fd.write('Training done.') fd.close() else: if not tf.gfile.Exists(cfg.test_outputs): tf.gfile.MakeDirs(cfg.test_outputs) tf.logging.info('Start testing...') test(result, model, cfg.models, cfg.test_outputs) tf.logging.info('Testing done.')
Example #2
Source File: main.py From capsule-networks with MIT License | 4 votes |
def train(model, supervisor, num_label): trX, trY, num_tr_batch, valX, valY, num_val_batch = load_data(cfg.dataset, cfg.batch_size, is_training=True) Y = valY[:num_val_batch * cfg.batch_size].reshape((-1, 1)) fd_train_acc, fd_loss, fd_val_acc = save_to() config = tf.ConfigProto() config.gpu_options.allow_growth = True with supervisor.managed_session(config=config) as sess: print("\nNote: all of results will be saved to directory: " + cfg.results) for epoch in range(cfg.epoch): print('Training for epoch ' + str(epoch) + '/' + str(cfg.epoch) + ':') if supervisor.should_stop(): print('supervisor stoped!') break for step in tqdm(range(num_tr_batch), total=num_tr_batch, ncols=70, leave=False, unit='b'): start = step * cfg.batch_size end = start + cfg.batch_size global_step = epoch * num_tr_batch + step if global_step % cfg.train_sum_freq == 0: _, loss, train_acc, summary_str = sess.run([model.train_op, model.total_loss, model.accuracy, model.train_summary]) assert not np.isnan(loss), 'Something wrong! loss is nan...' supervisor.summary_writer.add_summary(summary_str, global_step) fd_loss.write(str(global_step) + ',' + str(loss) + "\n") fd_loss.flush() fd_train_acc.write(str(global_step) + ',' + str(train_acc / cfg.batch_size) + "\n") fd_train_acc.flush() else: sess.run(model.train_op) if cfg.val_sum_freq != 0 and (global_step) % cfg.val_sum_freq == 0: val_acc = 0 for i in range(num_val_batch): start = i * cfg.batch_size end = start + cfg.batch_size acc = sess.run(model.accuracy, {model.X: valX[start:end], model.labels: valY[start:end]}) val_acc += acc val_acc = val_acc / (cfg.batch_size * num_val_batch) fd_val_acc.write(str(global_step) + ',' + str(val_acc) + '\n') fd_val_acc.flush() if (epoch + 1) % cfg.save_freq == 0: supervisor.saver.save(sess, cfg.logdir + '/model_epoch_%04d_step_%02d' % (epoch, global_step)) fd_val_acc.close() fd_train_acc.close() fd_loss.close()
Example #3
Source File: main.py From CapsNet-Tensorflow with Apache License 2.0 | 4 votes |
def train(model, supervisor, num_label): trX, trY, num_tr_batch, valX, valY, num_val_batch = load_data(cfg.dataset, cfg.batch_size, is_training=True) Y = valY[:num_val_batch * cfg.batch_size].reshape((-1, 1)) fd_train_acc, fd_loss, fd_val_acc = save_to() config = tf.ConfigProto() config.gpu_options.allow_growth = True with supervisor.managed_session(config=config) as sess: print("\nNote: all of results will be saved to directory: " + cfg.results) for epoch in range(cfg.epoch): print("Training for epoch %d/%d:" % (epoch, cfg.epoch)) if supervisor.should_stop(): print('supervisor stoped!') break for step in tqdm(range(num_tr_batch), total=num_tr_batch, ncols=70, leave=False, unit='b'): start = step * cfg.batch_size end = start + cfg.batch_size global_step = epoch * num_tr_batch + step if global_step % cfg.train_sum_freq == 0: _, loss, train_acc, summary_str = sess.run([model.train_op, model.total_loss, model.accuracy, model.train_summary]) assert not np.isnan(loss), 'Something wrong! loss is nan...' supervisor.summary_writer.add_summary(summary_str, global_step) fd_loss.write(str(global_step) + ',' + str(loss) + "\n") fd_loss.flush() fd_train_acc.write(str(global_step) + ',' + str(train_acc / cfg.batch_size) + "\n") fd_train_acc.flush() else: sess.run(model.train_op) if cfg.val_sum_freq != 0 and (global_step) % cfg.val_sum_freq == 0: val_acc = 0 for i in range(num_val_batch): start = i * cfg.batch_size end = start + cfg.batch_size acc = sess.run(model.accuracy, {model.X: valX[start:end], model.labels: valY[start:end]}) val_acc += acc val_acc = val_acc / (cfg.batch_size * num_val_batch) fd_val_acc.write(str(global_step) + ',' + str(val_acc) + '\n') fd_val_acc.flush() if (epoch + 1) % cfg.save_freq == 0: supervisor.saver.save(sess, cfg.logdir + '/model_epoch_%04d_step_%02d' % (epoch, global_step)) fd_val_acc.close() fd_train_acc.close() fd_loss.close()