Python tensorflow.slim() Examples

The following are 26 code examples of tensorflow.slim(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 6 votes vote down vote up
def test_slim_stacked_conv2d(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
          name='test_slim_stacked_conv2d/input')
      with slim.arg_scope([slim.conv2d], padding='SAME',
          weights_initializer=tf.truncated_normal_initializer(stddev=0.3),
          weights_regularizer=slim.l2_regularizer(0.0005)):
        net = slim.conv2d(inputs, 2, [5, 5], scope='conv1')
        net = slim.conv2d(net, 4, [3, 3], padding='VALID', scope='conv2')
        net = slim.conv2d(net, 8, [3, 3], scope='conv3')

    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_stacked_conv2d/input:0":[1,16,16,3]},
        output_name, delta=1e-2) 
Example #2
Source File: base_model.py    From hfnet with MIT License 6 votes vote down vote up
def _checkpoint_var_search(self, checkpoint_path):
        reader = tf.train.NewCheckpointReader(checkpoint_path)
        saved_shapes = reader.get_variable_to_shape_map()
        model_names = tf.model_variables()  # Used by tf.slim layers
        if not len(tf.model_variables()):
            model_names = tf.global_variables()  # Fallback when slim is not used
        model_names = set([v.name.split(':')[0] for v in model_names])
        checkpoint_names = set(saved_shapes.keys())
        found_names = model_names & checkpoint_names
        missing_names = model_names - checkpoint_names
        shape_conflicts = set()
        restored = []
        with tf.variable_scope('', reuse=True):
            for name in found_names:
                var = tf.get_variable(name)
                var_shape = var.get_shape().as_list()
                if var_shape == saved_shapes[name]:
                    restored.append(var)
                else:
                    shape_conflicts.add(name)
        found_names -= shape_conflicts
        return (restored, sorted(found_names),
                sorted(missing_names), sorted(shape_conflicts)) 
Example #3
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 6 votes vote down vote up
def test_custom_tile(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None, 8], name='input')
      with slim.arg_scope([slim.fully_connected],
                          weights_initializer=tf.truncated_normal_initializer(0.0, 0.2),
                          weights_regularizer=slim.l2_regularizer(0.0005)):
        y = slim.fully_connected(inputs, 10, scope='fc')
        y = slim.unit_norm(y, dim=1)

    output_name = [y.op.name]
    coreml_model = self._test_tf_model(graph,
                        {"input:0": [1, 8]},
                        output_name,
                        check_numerical_accuracy=False,
                        add_custom_layers=True)

    spec = coreml_model.get_spec()
    layers = spec.neuralNetwork.layers
    self.assertIsNotNone(layers[9].custom)
    self.assertEqual('Tile', layers[9].custom.className) 
Example #4
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 6 votes vote down vote up
def test_slim_dilated_depthwise_conv(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
          name='test_slim_separable_conv2d/input')
      with slim.arg_scope([slim.separable_conv2d], padding='SAME',
          weights_initializer=tf.truncated_normal_initializer(stddev=0.3)):
        net = slim.separable_conv2d(inputs,
            num_outputs=None,
            stride=1,
            depth_multiplier=1,
            kernel_size=[3, 3],
            rate=2,
            scope='conv1')

    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_separable_conv2d/input:0":[1,16,16,3]},
        output_name, delta=1e-2) 
Example #5
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 6 votes vote down vote up
def test_slim_lenet(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,28,28,1],
          name='test_slim_lenet/input')
      net = slim.conv2d(inputs, 4, [5,5], scope='conv1')
      net = slim.avg_pool2d(net, [2,2], scope='pool1')
      net = slim.conv2d(net, 6, [5,5], scope='conv2')
      net = slim.max_pool2d(net, [2,2], scope='pool2')
      net = slim.flatten(net, scope='flatten3')
      net = slim.fully_connected(net, 10, scope='fc4')
      net = slim.fully_connected(net, 10, activation_fn=None, scope='fc5')

    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_lenet/input:0":[1,28,28,1]},
        output_name, delta=1e-2) 
Example #6
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 6 votes vote down vote up
def test_slim_convnet(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,8,8,3],
          name='test_slim_convnet/input')
      with slim.arg_scope([slim.conv2d, slim.fully_connected],
          weights_initializer=tf.truncated_normal_initializer(0.0, 0.2),
          weights_regularizer=slim.l2_regularizer(0.0005)):
        net = slim.conv2d(inputs, 2, [3, 3], scope='conv1')
        net = slim.flatten(net, scope='flatten3')
        net = slim.fully_connected(net, 6, scope='fc6')

    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_convnet/input:0":[1,8,8,3]},
        output_name, delta=1e-2) 
Example #7
Source File: base_model.py    From hierarchical_loc with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _checkpoint_var_search(self, checkpoint_path):
        reader = tf.train.NewCheckpointReader(checkpoint_path)
        saved_shapes = reader.get_variable_to_shape_map()
        model_names = tf.model_variables()  # Used by tf.slim layers
        if not len(tf.model_variables()):
            model_names = tf.global_variables()  # Fallback when slim is not used
        model_names = set([v.name.split(':')[0] for v in model_names])
        checkpoint_names = set(saved_shapes.keys())
        found_names = model_names & checkpoint_names
        missing_names = model_names - checkpoint_names
        shape_conflicts = set()
        restored = []
        with tf.variable_scope('', reuse=True):
            for name in found_names:
                # print(tf.global_variables())
                # print(name, name in model_names, name in checkpoint_names)
                var = tf.get_variable(name)
                var_shape = var.get_shape().as_list()
                if var_shape == saved_shapes[name]:
                    restored.append(var)
                else:
                    shape_conflicts.add(name)
        found_names -= shape_conflicts
        return (restored, sorted(found_names),
                sorted(missing_names), sorted(shape_conflicts)) 
Example #8
Source File: launch.py    From in-silico-labeling with Apache License 2.0 6 votes vote down vote up
def export(gitapp: controller.GetInputTargetAndPredictedParameters):
  g = tf.Graph()
  with g.as_default():
    assert FLAGS.metric == METRIC_STITCH

    controller.setup_stitch(gitapp)

    log_entry_points(g)

    signature_map = dict(
        [(o.name, o) for o in g.get_operations() if 'entry_point' in o.name])

    logging.info('Exporting checkpoint at %s to %s', FLAGS.restore_directory,
                 FLAGS.export_directory)
    slim.export_for_serving(
        g,
        checkpoint_dir=FLAGS.restore_directory,
        export_dir=FLAGS.export_directory,
        generic_signature_tensor_map=signature_map) 
Example #9
Source File: launch.py    From in-silico-labeling with Apache License 2.0 6 votes vote down vote up
def eval_stitch(gitapp: controller.GetInputTargetAndPredictedParameters):
  g = tf.Graph()
  with g.as_default():
    controller.setup_stitch(gitapp)

    summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
    input_summary_op = next(
        x for x in summary_ops if 'input_error_panel' in x.name)
    target_summary_op = next(
        x for x in summary_ops if 'target_error_panel' in x.name)

    log_entry_points(g)

    slim.evaluation.evaluation_loop(
        master=FLAGS.master,
        num_evals=0,
        checkpoint_dir=train_directory(),
        logdir=output_directory(),
        # Merge the summaries to keep the graph state in sync.
        summary_op=tf.summary.merge([input_summary_op, target_summary_op]),
        eval_interval_secs=FLAGS.eval_interval_secs) 
Example #10
Source File: imagenet_train.py    From tensorflow-litterbox with Apache License 2.0 6 votes vote down vote up
def main(_):
    util.check_tensorflow_version()

    dataset = ImagenetData(subset=FLAGS.subset)

    processor = ProcessorImagenet()
    processor.label_offset = FLAGS.label_offset

    feed = FeedImagesWithLabels(dataset=dataset, processor=processor)

    model_params = {
        'num_classes': feed.num_classes_for_network(),
        'network': FLAGS.network,
    }

    if FLAGS.my:
        # My variants of Resnet, Inception, and VGG networks
        model = ModelMySlim(params=model_params)
    else:
        # Google's tf.slim models
        model = ModelGoogleSlim(params=model_params)
        model.check_norm(processor.normalize)

    exec_train.train(feed=feed, model=model) 
Example #11
Source File: imagenet_eval.py    From tensorflow-litterbox with Apache License 2.0 6 votes vote down vote up
def main(_):
    util.check_tensorflow_version()

    dataset = ImagenetData(subset=FLAGS.subset)

    processor = ProcessorImagenet()
    processor.label_offset = FLAGS.label_offset

    feed = FeedImagesWithLabels(dataset=dataset, processor=processor)

    model_params = {
        'num_classes': feed.num_classes_for_network(),
        'network': FLAGS.network,
    }

    if FLAGS.my:
        # My variants of Resnet, Inception, and VGG networks
        model = ModelMySlim(params=model_params)
    else:
        # Google's tf.slim models
        model = ModelGoogleSlim(params=model_params)
        model.check_norm(processor.normalize)

    exec_eval.evaluate(feed=feed, model=model) 
Example #12
Source File: base_network.py    From Table-Detection-using-Deep-learning with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _build(self, inputs, is_training=False):
        inputs = self.preprocess(inputs)
        with slim.arg_scope(self.arg_scope):
            net, end_points = self.network(is_training=is_training)(inputs)

            return {
                'net': net,
                'end_points': end_points,
            } 
Example #13
Source File: example_predict.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def main(_):
    util.check_tensorflow_version()

    dataset = ExampleData(subset='')

    processor = ProcessorImagenet()
    processor.output_offset = FLAGS.output_offset

    feed = FeedImagesWithLabels(dataset=dataset, processor=processor)

    model_params = {
        'num_classes': feed.num_classes_for_network(),
        'network': FLAGS.network,
    }
    if FLAGS.my:
        # My variants of Resnet, Inception, and VGG networks
        model = ModelMySlim(params=model_params)
    else:
        # Google's tf.slim models
        model = ModelGoogleSlim(params=model_params)
        model.check_norm(processor.normalize)

    output, num_entries = exec_predict.predict(feed, model)

    output_columns = ['Img']
    if FLAGS.output_prob:
        # Dump class probabilities to CSV file.
        class_labels = []
        for c in range(dataset.num_classes()):
            class_labels.append("c%s" % c)
        output_columns += class_labels
        output = np.vstack([np.column_stack([o[1], o[0]]) for o in output])
    else:
        # Dump class index to CSV file
        output_columns += ['Class']
        output = np.vstack([np.column_stack([o[1], np.argmax(o[0], axis=1)]) for o in output])

    df = pd.DataFrame(output, columns=output_columns)
    df.Img = df.Img.apply(lambda x: os.path.basename(x.decode()))
    df.to_csv('./output.csv', index=False) 
Example #14
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_slim_unit_norm(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,8],
          name='test_slim_unit_norm/input')
      with slim.arg_scope([slim.fully_connected],
          weights_initializer=tf.truncated_normal_initializer(0.0, 0.2),
          weights_regularizer=slim.l2_regularizer(0.0005)):
        net = slim.fully_connected(inputs, 10, scope='fc')
        net = slim.unit_norm(net,1)
    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_unit_norm/input:0":[1,8]},
        output_name, delta=1e-2) 
Example #15
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_slim_deconv(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
          name='test_slim_decconv2d/input')
      with slim.arg_scope([slim.separable_conv2d], padding='SAME',
          weights_initializer=tf.truncated_normal_initializer(stddev=0.3)):
        net = slim.conv2d_transpose(inputs, 2, [3, 3], scope='conv1')

    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_decconv2d/input:0":[1,16,16,3]},
        output_name, delta=1e-2)

  # TODO - this fails due to unsupported op "Tile" 
Example #16
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_slim_separable_conv(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
          name='test_slim_separable_conv2d/input')
      with slim.arg_scope([slim.separable_conv2d], padding='SAME',
          weights_initializer=tf.truncated_normal_initializer(stddev=0.3)):
        net = slim.separable_conv2d(inputs, 2, [5, 5], 2, scope='conv1')

    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_separable_conv2d/input:0":[1,16,16,3]},
        output_name, delta=1e-2) 
Example #17
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_slim_conv_bn_no_beta(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
          name='test_slim_conv_bn_no_beta/input')
      with slim.arg_scope([slim.conv2d], padding='SAME',
          weights_initializer=tf.truncated_normal_initializer(stddev=0.3),
          weights_regularizer=slim.l2_regularizer(0.0005)):
        net = slim.conv2d(inputs, 2, [5, 5], scope='conv1')
        net = slim.batch_norm(net, center=False, scale=False, is_training=False)
    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_conv_bn_no_beta/input:0":[1,16,16,3]},
        output_name, delta=1e-2) 
Example #18
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_slim_conv_bn(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
          name='test_slim_conv2d_bn/input')
      with slim.arg_scope([slim.conv2d], padding='SAME',
          weights_initializer=tf.truncated_normal_initializer(stddev=0.3),
          weights_regularizer=slim.l2_regularizer(0.0005)):
        net = slim.conv2d(inputs, 2, [5, 5], scope='conv1')
        net = slim.batch_norm(net, center=True, scale=True, is_training=False)
    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_conv2d_bn/input:0":[1,16,16,3]},
        output_name, delta=1e-2) 
Example #19
Source File: base_network.py    From Tabulo with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _build(self, inputs, is_training=False):
        inputs = self.preprocess(inputs)
        with slim.arg_scope(self.arg_scope):
            net, end_points = self.network(is_training=is_training)(inputs)

            return {
                'net': net,
                'end_points': end_points,
            } 
Example #20
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_slim_fc(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,8],
          name='test_slim_vgg_fc/input')
      with slim.arg_scope([slim.fully_connected],
          weights_initializer=tf.truncated_normal_initializer(0.0, 0.2),
          weights_regularizer=slim.l2_regularizer(0.0005)):
        net = slim.fully_connected(inputs, 10, scope='fc')

    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_vgg_fc/input:0":[1,8]},
        output_name, delta=1e-2) 
Example #21
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_slim_repeat(self):
    graph = tf.Graph()
    with graph.as_default() as g:
      inputs = tf.placeholder(tf.float32, shape=[None,16,16,3],
          name='test_slim_repeat/input')
      with slim.arg_scope([slim.conv2d], padding='SAME',
          weights_initializer=tf.truncated_normal_initializer(stddev=0.3),
          weights_regularizer=slim.l2_regularizer(0.0005)):
        net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')

    output_name = [net.op.name]
    self._test_tf_model(graph,
        {"test_slim_repeat/input:0":[1,16,16,3]},
        output_name, delta=1e-2) 
Example #22
Source File: test.py    From taskonomy with MIT License 5 votes vote down vote up
def setup_metrics( inputs, model, cfg ):
    # predictions = model[ 'model' ].
    # Choose the metrics to compute:
    # names_to_values, names_to_updates = slim.metrics.aggregate_metric_map( {} )
    return  {}, {} 
Example #23
Source File: base_network.py    From luminoth with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _build(self, inputs, is_training=False):
        inputs = self.preprocess(inputs)
        with slim.arg_scope(self.arg_scope):
            net, end_points = self.network(is_training=is_training)(inputs)

            return {
                'net': net,
                'end_points': end_points,
            } 
Example #24
Source File: launch.py    From in-silico-labeling with Apache License 2.0 5 votes vote down vote up
def eval_loss(gitapp: controller.GetInputTargetAndPredictedParameters):
  g = tf.Graph()
  with g.as_default():
    total_loss_op, input_loss_lts, target_loss_lts = total_loss(gitapp)

    metric_names = ['total_loss']
    metric_values = [total_loss_op]
    for name, loss_lt in dict(input_loss_lts, **target_loss_lts).items():
      metric_names.append(name)
      metric_values.append(loss_lt.tensor)
    metric_names = ['metric/' + n for n in metric_names]
    metric_values = [metrics.streaming_mean(v) for v in metric_values]

    names_to_values, names_to_updates = metrics.aggregate_metric_map(
        dict(zip(metric_names, metric_values)))

    for name, value in names_to_values.iteritems():
      slim.summaries.add_scalar_summary(value, name, print_summary=True)

    log_entry_points(g)

    num_batches = FLAGS.metric_num_examples // gitapp.bp.size

    slim.evaluation.evaluation_loop(
        master=FLAGS.master,
        checkpoint_dir=train_directory(),
        logdir=output_directory(),
        num_evals=num_batches,
        eval_op=names_to_updates.values(),
        eval_interval_secs=FLAGS.eval_interval_secs) 
Example #25
Source File: launch.py    From in-silico-labeling with Apache License 2.0 4 votes vote down vote up
def train(gitapp: controller.GetInputTargetAndPredictedParameters):
  """Train a model."""
  g = tf.Graph()
  with g.as_default():
    total_loss_op, _, _ = total_loss(gitapp)

    if FLAGS.optimizer == OPTIMIZER_MOMENTUM:
      # TODO(ericmc): We may want to do weight decay with the other
      # optimizers, too.
      learning_rate = tf.train.exponential_decay(
          FLAGS.learning_rate,
          slim.variables.get_global_step(),
          FLAGS.learning_decay_steps,
          0.999,
          staircase=False)
      tf.summary.scalar('learning_rate', learning_rate)

      optimizer = tf.train.MomentumOptimizer(learning_rate, 0.875)
    elif FLAGS.optimizer == OPTIMIZER_ADAGRAD:
      optimizer = tf.train.AdagradOptimizer(FLAGS.learning_rate)
    elif FLAGS.optimizer == OPTIMIZER_ADAM:
      optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    else:
      raise NotImplementedError('Unsupported optimizer: %s' % FLAGS.optimizer)

    # Set up training.
    train_op = slim.learning.create_train_op(
        total_loss_op, optimizer, summarize_gradients=True)

    if FLAGS.restore_directory:
      init_fn = util.restore_model(FLAGS.restore_directory,
                                   FLAGS.restore_logits)

    else:
      logging.info('Training a new model.')
      init_fn = None

    total_variable_size, _ = slim.model_analyzer.analyze_vars(
        slim.get_variables(), print_info=True)
    logging.info('Total number of variables: %d', total_variable_size)

    log_entry_points(g)

    slim.learning.train(
        train_op=train_op,
        logdir=output_directory(),
        master=FLAGS.master,
        is_chief=FLAGS.task == 0,
        number_of_steps=None,
        save_summaries_secs=FLAGS.save_summaries_secs,
        save_interval_secs=FLAGS.save_interval_secs,
        init_fn=init_fn,
        saver=tf.train.Saver(keep_checkpoint_every_n_hours=2.0)) 
Example #26
Source File: test.py    From taskonomy with MIT License 4 votes vote down vote up
def run_training( cfg ):
    # set up logging
    tf.logging.set_verbosity( tf.logging.INFO )

    with tf.Graph().as_default() as g:
        # create ops and placeholders
        inputs = utils.setup_input( cfg, is_training=False, use_filename_queue=True )
        RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )
        RuntimeDeterminedEnviromentVars.populate_registered_variables()

        # build model (and losses and train_op)
        model = setup_model( inputs, cfg, is_training=False )

        # set up metrics to evaluate
        names_to_values, names_to_updates = setup_metrics( inputs, model, cfg )

        # execute training 
        start_time = time.time()
        utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=False )

        training_runners = { 'sess': tf.Session(), 'coord': tf.train.Coordinator() }
        data_prefetch_init_fn = utils.get_data_prefetch_threads_init_fn( inputs, cfg, is_training=False, use_filename_queue=True )
        training_runners[ 'threads' ] = data_prefetch_init_fn( training_runners[ 'sess' ], training_runners[ 'coord' ] )
        try:
            # This just returns the imput as output. It is for testing data
            #  input only. 
            for step in xrange( inputs[ 'max_steps' ] ):
                input_batch, target_batch, data_idx = training_runners['sess'].run( [ 
                        model['input_batch'],  model['target_batch'], model[ 'data_idxs' ] ] )

                if training_runners['coord'].should_stop():
                    break
        finally:
            utils.request_data_loading_end( training_runners )
            utils.end_data_loading_and_sess( training_runners )
        # else: # Use tf.slim
        #     train_log_dir = os.path.join( cfg['log_dir'], 'slim-train' )

        #     # When ready to use a model, use the code below
        #     train(  model[ 'train_op' ],
        #             train_log_dir,
        #             get_data_prefetch_threads_init_fn( inputs, cfg ), 
        #             global_step=model[ 'global_step' ],
        #             number_of_steps=inputs[ 'max_steps' ],
        #             init_fn=model[ 'init_fn' ],
        #             save_summaries_secs=300,
        #             save_interval_secs=600,
        #             saver=model[ 'saver_op' ] ) 

        end_train_time = time.time() - start_time
        print('time to train %d epochs: %.3f hrs' % (cfg['num_epochs'], end_train_time/(60*60)))
        print('avg time per epoch: %.3f hrs' % ( (end_train_time/(60*60)) / cfg['num_epochs']) )