Python tensorflow.merge_all_summaries() Examples

The following are 30 code examples of tensorflow.merge_all_summaries(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: pretrain_LSTM_D.py    From show-adapt-and-tell with MIT License 6 votes vote down vote up
def train(self):

	self.train_op = self.optim.minimize(self.loss, global_step=self.global_step)
        self.writer = tf.train.SummaryWriter("./logs/D_pretrained", self.sess.graph)
	self.summary_op = tf.merge_all_summaries()
        tf.initialize_all_variables().run()
        self.saver = tf.train.Saver(var_list=self.D_params_dict, max_to_keep=self.max_to_keep)
        count = 0
	for idx in range(self.max_iter//3000):
            self.save(self.checkpoint_dir, count)
            self.evaluate('test', count)
	    self.evaluate('train', count)
            for k in tqdm(range(3000)):
		right_images, right_text, _ = self.dataset.sequential_sample(self.batch_size)
		right_length = np.sum((right_text!=self.NOT)+0, 1)
		fake_images, fake_text, _ = self.negative_dataset.sequential_sample(self.batch_size)
		fake_length = np.sum((fake_text!=self.NOT)+0, 1)
		wrong_text = self.dataset.get_wrong_text(self.batch_size)
		wrong_length = np.sum((wrong_text!=self.NOT)+0, 1)
		feed_dict = {self.right_images:right_images, self.right_text:right_text, self.right_length:right_length, 
				self.fake_images:fake_images, self.fake_text:fake_text, self.fake_length:fake_length, 
				self.wrong_images:right_images, self.wrong_text:wrong_text, self.wrong_length:wrong_length}
		_, loss, summary_str = self.sess.run([self.train_op, self.loss, self.summary_op], feed_dict)
		self.writer.add_summary(summary_str, count)
                count += 1 
Example #2
Source File: actor_learner.py    From async-deep-rl with Apache License 2.0 6 votes vote down vote up
def setup_summaries(self):
        episode_reward = tf.Variable(0.)
        s1 = tf.scalar_summary("Episode Reward " + str(self.actor_id), episode_reward)
        if self.alg_type == "a3c":
            summary_vars = [episode_reward]
        else:
            episode_ave_max_q = tf.Variable(0.)
            s2 = tf.scalar_summary("Max Q Value " + str(self.actor_id), episode_ave_max_q)
            logged_epsilon = tf.Variable(0.)
            s3 = tf.scalar_summary("Epsilon " + str(self.actor_id), logged_epsilon)
            summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon]
        summary_placeholders = [tf.placeholder("float") for _ in range(len(summary_vars))]
        update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]
        with tf.control_dependencies(update_ops):
            summary_ops = tf.merge_all_summaries()
        return summary_placeholders, update_ops, summary_ops 
Example #3
Source File: Trainer.py    From MOTSFusion with MIT License 6 votes vote down vote up
def init_summaries(self, config, grad_norm=None):
    summdir = config.dir("summary_dir", "summaries")
    model = config.string("model")
    summdir += model + "/"
    tf.gfile.MakeDirs(summdir)
    summary_writer = tf.summary.FileWriter(summdir, self.session.graph)
    summary_op = None
    summary_op_test = None
    if config.bool("write_summaries", True):
      if self.train_network is not None:
        train_summs = self.train_network.summaries
        if grad_norm is not None:
          grad_norm_summary = tf.summary.scalar("grad_norm", grad_norm)
          train_summs.append(grad_norm_summary)
        # better do not merge ALL summaries, since otherwise we get summaries from different networks
        # and might execute (parts of) the test network while training
        # self.summary_op = tf.merge_all_summaries()
        if len(train_summs) > 0:
          summary_op = tf.summary.merge(self.train_network.summaries)
      if self.test_network is not None and len(self.test_network.summaries) > 0:
        summary_op_test = tf.summary.merge(self.test_network.summaries)
    return summary_writer, summary_op, summary_op_test 
Example #4
Source File: selector.py    From dynamic-coattention-network with MIT License 6 votes vote down vote up
def _add_train_op(self):
    params = self._params

    self._lr_rate = tf.maximum(
        params.min_lr,
        tf.train.exponential_decay(params.lr, self._global_step, 30000, 0.98))

    tvars = tf.trainable_variables()
    # use reserved gpu for gradient computation
    with tf.device(self._get_gpu(self._num_gpus-1)):
      grads, global_norm = tf.clip_by_global_norm(
          tf.gradients(self._loss, tvars), params.max_grad_norm)
    tf.scalar_summary('global_norm', global_norm)
    optimizer = tf.train.AdamOptimizer(self._lr_rate)
    tf.scalar_summary('learning rate', self._lr_rate)
    with tf.device(self._next_device()):
      self._train_op = optimizer.apply_gradients(
          zip(grads, tvars), global_step=self._global_step, name='train_step')
    self._summaries = tf.merge_all_summaries()

    return self._train_op, self._loss, 
Example #5
Source File: human_pose_nn.py    From gait-recognition with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _init_summaries(self):
        if self.is_train:
            logdir = os.path.join(SUMMARY_PATH, self.log_name, 'train')

            self.summary_writer = tf.summary.FileWriter(logdir)
            self.summary_writer_by_points = [tf.summary.FileWriter(os.path.join(logdir, 'point_%02d' % i))
                                             for i in range(16)]

            tf.scalar_summary('Average euclidean distance', self.euclidean_dist, collections = [KEY_SUMMARIES])

            for i in range(16):
                tf.scalar_summary('Joint euclidean distance', self.euclidean_dist_per_joint[i],
                                  collections = [KEY_SUMMARIES_PER_JOINT[i]])

            self.create_summary_from_weights()

            self.ALL_SUMMARIES = tf.merge_all_summaries(KEY_SUMMARIES)
            self.SUMMARIES_PER_JOINT = [tf.merge_all_summaries(KEY_SUMMARIES_PER_JOINT[i]) for i in range(16)]
        else:
            logdir = os.path.join(SUMMARY_PATH, self.log_name, 'test')
            self.summary_writer = tf.summary.FileWriter(logdir) 
Example #6
Source File: summary_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testMergeAllSummaries(self):
    with tf.Graph().as_default():
      const = tf.constant(10.0)
      summ1 = tf.summary.histogram("h", const)
      summ2 = tf.summary.scalar("o", const, collections=["foo_key"])
      summ3 = tf.summary.scalar("c", const)
      merge = tf.summary.merge_all()
      self.assertEqual("MergeSummary", merge.op.type)
      self.assertEqual(2, len(merge.op.inputs))
      self.assertEqual(summ1, merge.op.inputs[0])
      self.assertEqual(summ3, merge.op.inputs[1])
      merge = tf.merge_all_summaries("foo_key")
      self.assertEqual("MergeSummary", merge.op.type)
      self.assertEqual(1, len(merge.op.inputs))
      self.assertEqual(summ2, merge.op.inputs[0])
      self.assertTrue(tf.merge_all_summaries("bar_key") is None) 
Example #7
Source File: atari_1step_qlearning.py    From FRU with MIT License 6 votes vote down vote up
def build_summaries():
    episode_reward = tf.Variable(0.)
    scalar_summary("Reward", episode_reward)
    episode_ave_max_q = tf.Variable(0.)
    scalar_summary("Qmax Value", episode_ave_max_q)
    logged_epsilon = tf.Variable(0.)
    scalar_summary("Epsilon", logged_epsilon)
    # Threads shouldn't modify the main graph, so we use placeholders
    # to assign the value of every summary (instead of using assign method
    # in every thread, that would keep creating new ops in the graph)
    summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon]
    summary_placeholders = [tf.placeholder("float")
                            for i in range(len(summary_vars))]
    assign_ops = [summary_vars[i].assign(summary_placeholders[i])
                  for i in range(len(summary_vars))]
    summary_op = merge_all_summaries()
    return summary_placeholders, assign_ops, summary_op 
Example #8
Source File: Trainer.py    From PReMVOS with MIT License 6 votes vote down vote up
def init_summaries(self, config, grad_norm=None):
    summdir = config.dir("summary_dir", "summaries")
    model = config.str("model")
    summdir += model + "/"
    tf.gfile.MakeDirs(summdir)
    summary_writer = tf.summary.FileWriter(summdir, self.session.graph)
    summary_op = None
    summary_op_test = None
    if config.bool("write_summaries", True):
      if self.train_network is not None and len(self.train_network.summaries) > 0:
        # better do not merge ALL summaries, since otherwise we get summaries from different networks
        # and might execute (parts of) the test network while training
        # self.summary_op = tf.merge_all_summaries()
        # atm we only collect summaries from the train network
        if grad_norm is None:
          summary_op = tf.summary.merge(self.train_network.summaries)
        else:
          #grad_norm = tf.Print(grad_norm, [grad_norm], "grad_norm")
          grad_norm_summary = tf.summary.scalar("grad_norm", grad_norm)
          summary_op = tf.summary.merge(self.train_network.summaries + [grad_norm_summary])
      if self.test_network is not None and len(self.test_network.summaries) > 0:
        summary_op_test = tf.summary.merge(self.test_network.summaries)
    return summary_writer, summary_op, summary_op_test 
Example #9
Source File: Trainer.py    From PReMVOS with MIT License 6 votes vote down vote up
def init_summaries(self, config, grad_norm=None):
    summdir = config.dir("summary_dir", "summaries")
    model = config.string("model")
    summdir += model + "/"
    tf.gfile.MakeDirs(summdir)
    summary_writer = tf.summary.FileWriter(summdir, self.session.graph)
    summary_op = None
    summary_op_test = None
    if config.bool("write_summaries", True):
      if self.train_network is not None:
        train_summs = self.train_network.summaries
        if grad_norm is not None:
          grad_norm_summary = tf.summary.scalar("grad_norm", grad_norm)
          train_summs.append(grad_norm_summary)
        # better do not merge ALL summaries, since otherwise we get summaries from different networks
        # and might execute (parts of) the test network while training
        # self.summary_op = tf.merge_all_summaries()
        if len(train_summs) > 0:
          summary_op = tf.summary.merge(self.train_network.summaries)
      if self.test_network is not None and len(self.test_network.summaries) > 0:
        summary_op_test = tf.summary.merge(self.test_network.summaries)
    return summary_writer, summary_op, summary_op_test 
Example #10
Source File: Trainer.py    From TrackR-CNN with MIT License 6 votes vote down vote up
def init_summaries(self, config, grad_norm=None):
    summdir = config.dir("summary_dir", "summaries")
    model = config.string("model")
    summdir += model + "/"
    tf.gfile.MakeDirs(summdir)
    summary_writer = None
    summary_op = None
    summary_op_test = None
    if config.bool("write_summaries", True):
      summary_writer = tf.summary.FileWriter(summdir, self.session.graph)
      if self.train_network is not None:
        train_summs = self.train_network.summaries
        if grad_norm is not None:
          grad_norm_summary = tf.summary.scalar("grad_norm", grad_norm)
          train_summs.append(grad_norm_summary)
        # better do not merge ALL summaries, since otherwise we get summaries from different networks
        # and might execute (parts of) the test network while training
        # self.summary_op = tf.merge_all_summaries()
        if len(train_summs) > 0:
          summary_op = tf.summary.merge(train_summs)
      if self.test_network is not None and len(self.test_network.summaries) > 0:
        summary_op_test = tf.summary.merge(self.test_network.summaries)
    return summary_writer, summary_op, summary_op_test 
Example #11
Source File: ranknet.py    From tfranknet with GNU General Public License v2.0 5 votes vote down vote up
def initialize_graph(self, input_dim):
        self.input_dim = input_dim
        self._setup_base_graph()
        with self.graph.as_default():
            self.sess = tf.Session()
            self.init_op = tf.initialize_all_variables()
            self.summary = tf.merge_all_summaries()
            self.sess.run(self.init_op)
        self.initialized = True 
Example #12
Source File: __init__.py    From iwcs2017-answer-selection with Apache License 2.0 5 votes vote down vote up
def summary(self):
        if self.__summary is None:
            self.__summary = tf.merge_all_summaries(key='summaries')
        return self.__summary 
Example #13
Source File: base.py    From neural-el with Apache License 2.0 5 votes vote down vote up
def initialize(self, log_dir="./logs"):
        self.merged_sum = tf.merge_all_summaries()
        self.writer = tf.train.SummaryWriter(log_dir, self.sess.graph_def)

        tf.initialize_all_variables().run()
        self.load(self.checkpoint_dir)

        start_iter = self.step.eval() 
Example #14
Source File: inception_eval.py    From inception_v3 with Apache License 2.0 5 votes vote down vote up
def evaluate(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels from the dataset.
    images, labels, _ = image_processing.inputs(dataset)

    # Number of classes in the Dataset label set plus 1.
    # Label 0 is reserved for an (unused) background class.
    num_classes = dataset.num_classes() + 1

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits, _ = inception.inference(images, num_classes)

    # Calculate predictions.
    top_1_op = tf.nn.in_top_k(logits, labels, 1)
    top_5_op = tf.nn.in_top_k(logits, labels, 5)
    
    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs) 
Example #15
Source File: actor_learner.py    From async-deep-rl with Apache License 2.0 5 votes vote down vote up
def run(self):
        self.session = tf.Session()
#         self.session = tf.Session(config=tf.ConfigProto(
#              inter_op_parallelism_threads=1,
#              intra_op_parallelism_threads=1))

        if (self.actor_id==0):
            #Initizlize Tensorboard summaries
            self.summary_op = tf.merge_all_summaries()
            self.summary_writer = tf.train.SummaryWriter(
                            "{}/{}".format(self.summ_base_dir, self.actor_id), self.session.graph_def) 

            # Initialize network parameters
            g_step = utils.restore_vars(self.saver, self.session, self.game, self.alg_type, self.max_local_steps)
            self.global_step.val.value = g_step
            self.last_saving_step = g_step   
            logger.debug("T{}: Initializing shared memory...".format(self.actor_id))
            self.init_shared_memory()

        # Wait until actor 0 finishes initializing shared memory
        self.barrier.wait()
        
        if self.actor_id > 0:
            logger.debug("T{}: Syncing with shared memory...".format(self.actor_id))
            self.sync_net_with_shared_memory(self.local_network, self.learning_vars)  
            if self.alg_type <> "a3c":
                self.sync_net_with_shared_memory(self.target_network, self.target_vars)

        # Wait until all actors are ready to start 
        self.barrier.wait()
        
        # Introduce a different start delay for each actor, so that they do not run in synchronism.
        # This is to avoid concurrent updates of parameters as much as possible 
        time.sleep(0.1877 * self.actor_id) 
Example #16
Source File: selector.py    From dynamic-coattention-network with MIT License 5 votes vote down vote up
def build_graph(self):
    self._add_placeholders()
    self._build_encoder()
    self._build_decoder()
    if self._params.mode != 'decode':
      alpha_true, beta_true = tf.split(0, 2, self._answers)
      self._global_step = tf.Variable(0, name='global_step', trainable=False)
      self._loss = self._loss_multitask(self._alpha, alpha_true,
                                        self._beta, beta_true)
    if self._params.mode == 'train':
      self._add_train_op()
    self._summaries = tf.merge_all_summaries()
    tf.logging.info('graph built...') 
Example #17
Source File: evaluate.py    From iLID with MIT License 5 votes vote down vote up
def evaluate():
  """Eval for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels for 10.
    eval_data = FLAGS.eval_data == 'test'
    images, labels, keys = experiment.inputs(eval_data=eval_data)

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits = experiment.inference(images)

    # Calculate predictions.
    top_k_op = tf.nn.in_top_k(logits, labels, 1)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        experiment.MOVING_AVERAGE_DECAY)
    variables_to_restore = {}
    for v in tf.all_variables():
      if v in tf.trainable_variables():
        restore_name = variable_averages.average_name(v)
      else:
        restore_name = v.op.name
      variables_to_restore[restore_name] = v

    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      eval_once(saver, summary_writer, top_k_op, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs) 
Example #18
Source File: network.py    From iLID with MIT License 5 votes vote down vote up
def train(self, batch_size, iterations, display_step = 100):
        self.set_activation_summary()

        init = tf.initialize_all_variables()
        self.merged_summary_op = tf.merge_all_summaries()

        with tf.Session() as sess:
            self.saver = tf.train.Saver()
            self.summary_writer = tf.train.SummaryWriter(self.log_path, sess.graph_def)
            sess.run(init)
            self.optimize(sess, batch_size, iterations, display_step)
            return self.evaluate(sess, batch_size) 
Example #19
Source File: neural_network.py    From sentiment-analysis-tensorflow with Apache License 2.0 5 votes vote down vote up
def __init__(self, hidden_size, vocab_size, embedding_size, max_length, n_classes=2, learning_rate=0.01,
                 random_state=None):
        """
        Builds a TensorFlow LSTM model
        :param hidden_size: Array holding the number of units in the LSTM cell of each rnn layer
        :param vocab_size: Vocabulary size (number of possible words that may appear in a sample)
        :param embedding_size: Words will be encoded using a vector of this size
        :param max_length: Maximum length of an input tensor
        :param n_classes: Number of classification classes
        :param learning_rate: Learning rate of RMSProp algorithm
        :param random_state: Random state for dropout
        """
        # Build TensorFlow graph
        self.input = self.__input(max_length)
        self.seq_len = self.__seq_len()
        self.target = self.__target(n_classes)
        self.dropout_keep_prob = self.__dropout_keep_prob()
        self.word_embeddings = self.__word_embeddings(self.input, vocab_size, embedding_size, random_state)
        self.scores = self.__scores(self.word_embeddings, self.seq_len, hidden_size, n_classes, self.dropout_keep_prob,
                                    random_state)
        self.predict = self.__predict(self.scores)
        self.losses = self.__losses(self.scores, self.target)
        self.loss = self.__loss(self.losses)
        self.train_step = self.__train_step(learning_rate, self.loss)
        self.accuracy = self.__accuracy(self.predict, self.target)
        self.merged = tf.merge_all_summaries() 
Example #20
Source File: event_accumulator_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testTFSummaryScalar(self):
    """Verify processing of tf.summary.scalar, which uses TensorSummary op."""
    event_sink = _EventGenerator(zero_out_timestamps=True)
    writer = SummaryToEventTransformer(event_sink)
    with self.test_session() as sess:
      ipt = tf.placeholder(tf.float32)
      tf.summary.scalar('scalar1', ipt)
      tf.summary.scalar('scalar2', ipt * ipt)
      merged = tf.merge_all_summaries()
      writer.add_graph(sess.graph)
      for i in xrange(10):
        summ = sess.run(merged, feed_dict={ipt: i})
        writer.add_summary(summ, global_step=i)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    seq1 = [ea.ScalarEvent(wall_time=0, step=i, value=i) for i in xrange(10)]
    seq2 = [
        ea.ScalarEvent(
            wall_time=0, step=i, value=i * i) for i in xrange(10)
    ]

    self.assertTagsEqual(accumulator.Tags(), {
        ea.IMAGES: [],
        ea.AUDIO: [],
        ea.SCALARS: ['scalar1', 'scalar2'],
        ea.HISTOGRAMS: [],
        ea.COMPRESSED_HISTOGRAMS: [],
        ea.GRAPH: True,
        ea.META_GRAPH: False,
        ea.RUN_METADATA: []
    })

    self.assertEqual(accumulator.Scalars('scalar1'), seq1)
    self.assertEqual(accumulator.Scalars('scalar2'), seq2)
    first_value = accumulator.Scalars('scalar1')[0].value
    self.assertTrue(isinstance(first_value, float)) 
Example #21
Source File: event_accumulator_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testTFSummaryImage(self):
    """Verify processing of tf.summary.image."""
    event_sink = _EventGenerator(zero_out_timestamps=True)
    writer = SummaryToEventTransformer(event_sink)
    with self.test_session() as sess:
      ipt = tf.ones([10, 4, 4, 3], tf.uint8)
      # This is an interesting example, because the old tf.image_summary op
      # would throw an error here, because it would be tag reuse.
      # Using the tf node name instead allows argument re-use to the image
      # summary.
      with tf.name_scope('1'):
        tf.summary.image('images', ipt, max_outputs=1)
      with tf.name_scope('2'):
        tf.summary.image('images', ipt, max_outputs=2)
      with tf.name_scope('3'):
        tf.summary.image('images', ipt, max_outputs=3)
      merged = tf.merge_all_summaries()
      writer.add_graph(sess.graph)
      for i in xrange(10):
        summ = sess.run(merged)
        writer.add_summary(summ, global_step=i)

    accumulator = ea.EventAccumulator(event_sink)
    accumulator.Reload()

    tags = [
        u'1/images/image', u'2/images/image/0', u'2/images/image/1',
        u'3/images/image/0', u'3/images/image/1', u'3/images/image/2'
    ]

    self.assertTagsEqual(accumulator.Tags(), {
        ea.IMAGES: tags,
        ea.AUDIO: [],
        ea.SCALARS: [],
        ea.HISTOGRAMS: [],
        ea.COMPRESSED_HISTOGRAMS: [],
        ea.GRAPH: True,
        ea.META_GRAPH: False,
        ea.RUN_METADATA: []
    }) 
Example #22
Source File: cifar10_eval.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def evaluate():
  """Eval CIFAR-10 for a number of steps."""
  with tf.Graph().as_default() as g:
    # Get images and labels for CIFAR-10.
    eval_data = FLAGS.eval_data == 'test'
    images, labels = cifar10.inputs(eval_data=eval_data)

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits = cifar10.inference(images)

    # Calculate predictions.
    top_k_op = tf.nn.in_top_k(logits, labels, 1)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        cifar10.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)

    while True:
      eval_once(saver, summary_writer, top_k_op, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs) 
Example #23
Source File: eval.py    From web_page_classification with MIT License 5 votes vote down vote up
def evaluate():
    """Eval CNN for a number of steps."""
    with tf.Graph().as_default() as g, tf.device("/cpu:0"):
        # Get sequences and labels
        sequences, labels = model.inputs_eval()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = model.inference(sequences)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        # # Restore the moving average version of the learned variables for eval.
        # variable_averages = tf.train.ExponentialMovingAverage(
        #     model.MOVING_AVERAGE_DECAY)
        # variables_to_restore = variable_averages.variables_to_restore()
        # saver = tf.train.Saver(variables_to_restore)
        saver = tf.train.Saver(tf.all_variables())

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        summary_writer = tf.train.SummaryWriter(EVAL_DIR, g)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                print("eval only once, stope eval")
                break
            print("sleep for {} seconds".format(FLAGS.eval_interval_secs))
            time.sleep(FLAGS.eval_interval_secs) 
Example #24
Source File: eval.py    From web_page_classification with MIT License 5 votes vote down vote up
def evaluate():
    """Eval CNN for a number of steps."""
    with tf.Graph().as_default() as g, tf.device("/cpu:0"):
        # Get sequences and labels
        sequences, labels = model.inputs_eval()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = model.inference(sequences)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        # # Restore the moving average version of the learned variables for eval.
        # variable_averages = tf.train.ExponentialMovingAverage(
        #     model.MOVING_AVERAGE_DECAY)
        # variables_to_restore = variable_averages.variables_to_restore()
        # saver = tf.train.Saver(variables_to_restore)
        saver = tf.train.Saver(tf.all_variables())

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        summary_writer = tf.train.SummaryWriter(EVAL_DIR, g)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                print("eval only once, stope eval")
                break
            print("sleep for {} seconds".format(FLAGS.eval_interval_secs))
            time.sleep(FLAGS.eval_interval_secs) 
Example #25
Source File: base.py    From VDAIC2017 with MIT License 5 votes vote down vote up
def _init_summary(self):
        if not hasattr(logger, 'LOG_DIR'):
            raise RuntimeError("Please use logger.set_logger_dir at the beginning of your script.")
        self.summary_writer = tf.train.SummaryWriter(
            logger.LOG_DIR, graph=self.sess.graph)
        self.summary_op = tf.merge_all_summaries()
        # create an empty StatHolder
        self.stat_holder = StatHolder(logger.LOG_DIR)
        # save global_step in stat.json, but don't print it
        self.stat_holder.add_blacklist_tag(['global_step']) 
Example #26
Source File: resnet_model.py    From deeplearning-benchmark with Apache License 2.0 5 votes vote down vote up
def build_graph(self):
    """Build a whole graph for the model."""
    # This code has been modified to build Resnet 152
    self.global_step = tf.Variable(0, name='global_step', trainable=False)
    self._build_model()
    if self.mode == 'train':
      self._build_train_op()
    self.summaries = tf.merge_all_summaries() 
Example #27
Source File: inception_eval.py    From deeplearning-benchmark with Apache License 2.0 5 votes vote down vote up
def evaluate(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels from the dataset.
    images, labels = image_processing.inputs(dataset)

    # Number of classes in the Dataset label set plus 1.
    # Label 0 is reserved for an (unused) background class.
    num_classes = dataset.num_classes() + 1

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits, _ = inception.inference(images, num_classes)

    # Calculate predictions.
    top_1_op = tf.nn.in_top_k(logits, labels, 1)
    top_5_op = tf.nn.in_top_k(logits, labels, 5)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs) 
Example #28
Source File: GAN_models.py    From WassersteinGAN.tensorflow with MIT License 5 votes vote down vote up
def initialize_network(self, logs_dir):
        print("Initializing network...")
        self.logs_dir = logs_dir
        self.sess = tf.Session()
        self.summary_op = tf.merge_all_summaries()
        self.saver = tf.train.Saver()
        self.summary_writer = tf.train.SummaryWriter(self.logs_dir, self.sess.graph)

        self.sess.run(tf.initialize_all_variables())
        ckpt = tf.train.get_checkpoint_state(self.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            self.saver.restore(self.sess, ckpt.model_checkpoint_path)
            print("Model restored...")
        self.coord = tf.train.Coordinator()
        self.threads = tf.train.start_queue_runners(self.sess, self.coord) 
Example #29
Source File: q_network.py    From agent-trainer with MIT License 5 votes vote down vote up
def __init__(self,
                 screen_width,
                 screen_height,
                 num_channels,
                 num_actions,
                 metrics_directory,
                 batched_forward_pass_size,
                 hyperparameters=QNetworkHyperparameters()):
        self.logger = logging.getLogger(__name__)
        self.screen_width = screen_width
        self.screen_height = screen_height
        self.num_channels = num_channels
        self.num_actions = num_actions
        self.batched_forward_pass_size = batched_forward_pass_size
        self.hyperparameters = hyperparameters

        self.tf_graph = tf.Graph()
        self.tf_graph_forward_pass_bundle_single = self._build_graph_forward_pass_bundle(self.tf_graph, 1)
        self.tf_graph_forward_pass_bundle_batched = self._build_graph_forward_pass_bundle(self.tf_graph, batched_forward_pass_size)
        self.tf_graph_train_bundle = self._build_graph_train_bundle(self.tf_graph)

        self.tf_session = tf.Session(graph=self.tf_graph)

        with self.tf_graph.as_default():
            self.tf_all_summaries = tf.merge_all_summaries()
            self.tf_summary_writer = tf.train.SummaryWriter(logdir=metrics_directory, graph=self.tf_graph)
            self.tf_saver = tf.train.Saver()
            tf.initialize_all_variables().run(session=self.tf_session)

        self.assigns_train_to_forward_pass_variables = self._build_assigns_train_to_forward_pass_variables() 
Example #30
Source File: d_model_singleScale.py    From medSynthesis with MIT License 5 votes vote down vote up
def build_model(self):
        self.inputMR=tf.placeholder(tf.float32, shape=[None, self.depth_MR, self.height_MR, self.width_MR, 1])
        self.CT_GT=tf.placeholder(tf.float32, shape=[None, self.depth_CT, self.height_CT, self.width_CT, 1])
        batch_size_tf = tf.shape(self.inputMR)[0]  #variable batchsize so we can test here
        self.train_phase = tf.placeholder(tf.bool, name='phase_train')
        self.G = self.generator(self.inputMR,batch_size_tf)
        print 'shape output G ',self.G.get_shape()
        self.global_step = tf.Variable(0, name='global_step', trainable=False)
        self.g_loss=lp_loss(self.G, self.CT_GT, self.l_num, batch_size_tf)
        print 'learning rate ',self.learning_rate
        self.g_optim =tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.g_loss)
        self.merged = tf.merge_all_summaries()
        self.writer = tf.train.SummaryWriter("./summaries", self.sess.graph)
        self.saver = tf.train.Saver()