Python tensorflow.merge_all_summaries() Examples

The following are 30 code examples of tensorflow.merge_all_summaries(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: summary_ops_test.py    From deep_image_model with Apache License 2.0 7 votes vote down vote up
def testMergeAllSummaries(self):
    with tf.Graph().as_default():
      const = tf.constant(10.0)
      summ1 = tf.summary.histogram("h", const)
      summ2 = tf.summary.scalar("o", const, collections=["foo_key"])
      summ3 = tf.summary.scalar("c", const)
      merge = tf.summary.merge_all()
      self.assertEqual("MergeSummary", merge.op.type)
      self.assertEqual(2, len(merge.op.inputs))
      self.assertEqual(summ1, merge.op.inputs[0])
      self.assertEqual(summ3, merge.op.inputs[1])
      merge = tf.merge_all_summaries("foo_key")
      self.assertEqual("MergeSummary", merge.op.type)
      self.assertEqual(1, len(merge.op.inputs))
      self.assertEqual(summ2, merge.op.inputs[0])
      self.assertTrue(tf.merge_all_summaries("bar_key") is None) 
Example #2
Source File: Trainer.py    From PReMVOS with MIT License 6 votes vote down vote up
def init_summaries(self, config, grad_norm=None):
    summdir = config.dir("summary_dir", "summaries")
    model = config.string("model")
    summdir += model + "/"
    tf.gfile.MakeDirs(summdir)
    summary_writer = tf.summary.FileWriter(summdir, self.session.graph)
    summary_op = None
    summary_op_test = None
    if config.bool("write_summaries", True):
      if self.train_network is not None:
        train_summs = self.train_network.summaries
        if grad_norm is not None:
          grad_norm_summary = tf.summary.scalar("grad_norm", grad_norm)
          train_summs.append(grad_norm_summary)
        # better do not merge ALL summaries, since otherwise we get summaries from different networks
        # and might execute (parts of) the test network while training
        # self.summary_op = tf.merge_all_summaries()
        if len(train_summs) > 0:
          summary_op = tf.summary.merge(self.train_network.summaries)
      if self.test_network is not None and len(self.test_network.summaries) > 0:
        summary_op_test = tf.summary.merge(self.test_network.summaries)
    return summary_writer, summary_op, summary_op_test 
Example #3
Source File: Trainer.py    From PReMVOS with MIT License 6 votes vote down vote up
def init_summaries(self, config, grad_norm=None):
    summdir = config.dir("summary_dir", "summaries")
    model = config.str("model")
    summdir += model + "/"
    tf.gfile.MakeDirs(summdir)
    summary_writer = tf.summary.FileWriter(summdir, self.session.graph)
    summary_op = None
    summary_op_test = None
    if config.bool("write_summaries", True):
      if self.train_network is not None and len(self.train_network.summaries) > 0:
        # better do not merge ALL summaries, since otherwise we get summaries from different networks
        # and might execute (parts of) the test network while training
        # self.summary_op = tf.merge_all_summaries()
        # atm we only collect summaries from the train network
        if grad_norm is None:
          summary_op = tf.summary.merge(self.train_network.summaries)
        else:
          #grad_norm = tf.Print(grad_norm, [grad_norm], "grad_norm")
          grad_norm_summary = tf.summary.scalar("grad_norm", grad_norm)
          summary_op = tf.summary.merge(self.train_network.summaries + [grad_norm_summary])
      if self.test_network is not None and len(self.test_network.summaries) > 0:
        summary_op_test = tf.summary.merge(self.test_network.summaries)
    return summary_writer, summary_op, summary_op_test 
Example #4
Source File: pretrain_LSTM_D.py    From show-adapt-and-tell with MIT License 6 votes vote down vote up
def train(self):

	self.train_op = self.optim.minimize(self.loss, global_step=self.global_step)
        self.writer = tf.train.SummaryWriter("./logs/D_pretrained", self.sess.graph)
	self.summary_op = tf.merge_all_summaries()
        tf.initialize_all_variables().run()
        self.saver = tf.train.Saver(var_list=self.D_params_dict, max_to_keep=self.max_to_keep)
        count = 0
	for idx in range(self.max_iter//3000):
            self.save(self.checkpoint_dir, count)
            self.evaluate('test', count)
	    self.evaluate('train', count)
            for k in tqdm(range(3000)):
		right_images, right_text, _ = self.dataset.sequential_sample(self.batch_size)
		right_length = np.sum((right_text!=self.NOT)+0, 1)
		fake_images, fake_text, _ = self.negative_dataset.sequential_sample(self.batch_size)
		fake_length = np.sum((fake_text!=self.NOT)+0, 1)
		wrong_text = self.dataset.get_wrong_text(self.batch_size)
		wrong_length = np.sum((wrong_text!=self.NOT)+0, 1)
		feed_dict = {self.right_images:right_images, self.right_text:right_text, self.right_length:right_length, 
				self.fake_images:fake_images, self.fake_text:fake_text, self.fake_length:fake_length, 
				self.wrong_images:right_images, self.wrong_text:wrong_text, self.wrong_length:wrong_length}
		_, loss, summary_str = self.sess.run([self.train_op, self.loss, self.summary_op], feed_dict)
		self.writer.add_summary(summary_str, count)
                count += 1 
Example #5
Source File: human_pose_nn.py    From gait-recognition with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _init_summaries(self):
        if self.is_train:
            logdir = os.path.join(SUMMARY_PATH, self.log_name, 'train')

            self.summary_writer = tf.summary.FileWriter(logdir)
            self.summary_writer_by_points = [tf.summary.FileWriter(os.path.join(logdir, 'point_%02d' % i))
                                             for i in range(16)]

            tf.scalar_summary('Average euclidean distance', self.euclidean_dist, collections = [KEY_SUMMARIES])

            for i in range(16):
                tf.scalar_summary('Joint euclidean distance', self.euclidean_dist_per_joint[i],
                                  collections = [KEY_SUMMARIES_PER_JOINT[i]])

            self.create_summary_from_weights()

            self.ALL_SUMMARIES = tf.merge_all_summaries(KEY_SUMMARIES)
            self.SUMMARIES_PER_JOINT = [tf.merge_all_summaries(KEY_SUMMARIES_PER_JOINT[i]) for i in range(16)]
        else:
            logdir = os.path.join(SUMMARY_PATH, self.log_name, 'test')
            self.summary_writer = tf.summary.FileWriter(logdir) 
Example #6
Source File: Trainer.py    From TrackR-CNN with MIT License 6 votes vote down vote up
def init_summaries(self, config, grad_norm=None):
    summdir = config.dir("summary_dir", "summaries")
    model = config.string("model")
    summdir += model + "/"
    tf.gfile.MakeDirs(summdir)
    summary_writer = None
    summary_op = None
    summary_op_test = None
    if config.bool("write_summaries", True):
      summary_writer = tf.summary.FileWriter(summdir, self.session.graph)
      if self.train_network is not None:
        train_summs = self.train_network.summaries
        if grad_norm is not None:
          grad_norm_summary = tf.summary.scalar("grad_norm", grad_norm)
          train_summs.append(grad_norm_summary)
        # better do not merge ALL summaries, since otherwise we get summaries from different networks
        # and might execute (parts of) the test network while training
        # self.summary_op = tf.merge_all_summaries()
        if len(train_summs) > 0:
          summary_op = tf.summary.merge(train_summs)
      if self.test_network is not None and len(self.test_network.summaries) > 0:
        summary_op_test = tf.summary.merge(self.test_network.summaries)
    return summary_writer, summary_op, summary_op_test 
Example #7
Source File: atari_1step_qlearning.py    From FRU with MIT License 6 votes vote down vote up
def build_summaries():
    episode_reward = tf.Variable(0.)
    scalar_summary("Reward", episode_reward)
    episode_ave_max_q = tf.Variable(0.)
    scalar_summary("Qmax Value", episode_ave_max_q)
    logged_epsilon = tf.Variable(0.)
    scalar_summary("Epsilon", logged_epsilon)
    # Threads shouldn't modify the main graph, so we use placeholders
    # to assign the value of every summary (instead of using assign method
    # in every thread, that would keep creating new ops in the graph)
    summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon]
    summary_placeholders = [tf.placeholder("float")
                            for i in range(len(summary_vars))]
    assign_ops = [summary_vars[i].assign(summary_placeholders[i])
                  for i in range(len(summary_vars))]
    summary_op = merge_all_summaries()
    return summary_placeholders, assign_ops, summary_op 
Example #8
Source File: actor_learner.py    From async-deep-rl with Apache License 2.0 6 votes vote down vote up
def setup_summaries(self):
        episode_reward = tf.Variable(0.)
        s1 = tf.scalar_summary("Episode Reward " + str(self.actor_id), episode_reward)
        if self.alg_type == "a3c":
            summary_vars = [episode_reward]
        else:
            episode_ave_max_q = tf.Variable(0.)
            s2 = tf.scalar_summary("Max Q Value " + str(self.actor_id), episode_ave_max_q)
            logged_epsilon = tf.Variable(0.)
            s3 = tf.scalar_summary("Epsilon " + str(self.actor_id), logged_epsilon)
            summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon]
        summary_placeholders = [tf.placeholder("float") for _ in range(len(summary_vars))]
        update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]
        with tf.control_dependencies(update_ops):
            summary_ops = tf.merge_all_summaries()
        return summary_placeholders, update_ops, summary_ops 
Example #9
Source File: selector.py    From dynamic-coattention-network with MIT License 6 votes vote down vote up
def _add_train_op(self):
    params = self._params

    self._lr_rate = tf.maximum(
        params.min_lr,
        tf.train.exponential_decay(params.lr, self._global_step, 30000, 0.98))

    tvars = tf.trainable_variables()
    # use reserved gpu for gradient computation
    with tf.device(self._get_gpu(self._num_gpus-1)):
      grads, global_norm = tf.clip_by_global_norm(
          tf.gradients(self._loss, tvars), params.max_grad_norm)
    tf.scalar_summary('global_norm', global_norm)
    optimizer = tf.train.AdamOptimizer(self._lr_rate)
    tf.scalar_summary('learning rate', self._lr_rate)
    with tf.device(self._next_device()):
      self._train_op = optimizer.apply_gradients(
          zip(grads, tvars), global_step=self._global_step, name='train_step')
    self._summaries = tf.merge_all_summaries()

    return self._train_op, self._loss, 
Example #10
Source File: Trainer.py    From MOTSFusion with MIT License 6 votes vote down vote up
def init_summaries(self, config, grad_norm=None):
    summdir = config.dir("summary_dir", "summaries")
    model = config.string("model")
    summdir += model + "/"
    tf.gfile.MakeDirs(summdir)
    summary_writer = tf.summary.FileWriter(summdir, self.session.graph)
    summary_op = None
    summary_op_test = None
    if config.bool("write_summaries", True):
      if self.train_network is not None:
        train_summs = self.train_network.summaries
        if grad_norm is not None:
          grad_norm_summary = tf.summary.scalar("grad_norm", grad_norm)
          train_summs.append(grad_norm_summary)
        # better do not merge ALL summaries, since otherwise we get summaries from different networks
        # and might execute (parts of) the test network while training
        # self.summary_op = tf.merge_all_summaries()
        if len(train_summs) > 0:
          summary_op = tf.summary.merge(self.train_network.summaries)
      if self.test_network is not None and len(self.test_network.summaries) > 0:
        summary_op_test = tf.summary.merge(self.test_network.summaries)
    return summary_writer, summary_op, summary_op_test 
Example #11
Source File: base.py    From ternarynet with Apache License 2.0 5 votes vote down vote up
def _init_summary(self):
        if not hasattr(logger, 'LOG_DIR'):
            raise RuntimeError("Please use logger.set_logger_dir at the beginning of your script.")
        self.summary_writer = tf.train.SummaryWriter(
            logger.LOG_DIR, graph=self.sess.graph)
        self.summary_op = tf.merge_all_summaries()
        # create an empty StatHolder
        self.stat_holder = StatHolder(logger.LOG_DIR)
        # save global_step in stat.json, but don't print it
        self.stat_holder.add_blacklist_tag(['global_step']) 
Example #12
Source File: inception_eval.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def evaluate(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels from the dataset.
    images, labels = image_processing.inputs(dataset)

    # Number of classes in the Dataset label set plus 1.
    # Label 0 is reserved for an (unused) background class.
    num_classes = dataset.num_classes() + 1

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits, _ = inception.inference(images, num_classes)

    # Calculate predictions.
    top_1_op = tf.nn.in_top_k(logits, labels, 1)
    top_5_op = tf.nn.in_top_k(logits, labels, 5)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs) 
Example #13
Source File: model.py    From bi-att-flow with Apache License 2.0 5 votes vote down vote up
def __init__(self, config):
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W, H = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size, config.max_tree_height
        self.x = tf.placeholder('int32', [None, M, JX], name='x')
        self.cx = tf.placeholder('int32', [None, M, JX, W], name='cx')
        self.q = tf.placeholder('int32', [None, JQ], name='q')
        self.cq = tf.placeholder('int32', [None, JQ, W], name='cq')
        self.tx = tf.placeholder('int32', [None, M, H, JX], name='tx')
        self.tx_edge_mask = tf.placeholder('bool', [None, M, H, JX, JX], name='tx_edge_mask')
        self.y = tf.placeholder('bool', [None, M, H, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')

        # Define misc

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()

        self.ema_op = self._get_ema_op()
        self.summary = tf.merge_all_summaries() 
Example #14
Source File: model.py    From adversarial-squad with MIT License 5 votes vote down vote up
def __init__(self, config):
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W, H = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size, config.max_tree_height
        self.x = tf.placeholder('int32', [None, M, JX], name='x')
        self.cx = tf.placeholder('int32', [None, M, JX, W], name='cx')
        self.q = tf.placeholder('int32', [None, JQ], name='q')
        self.cq = tf.placeholder('int32', [None, JQ, W], name='cq')
        self.tx = tf.placeholder('int32', [None, M, H, JX], name='tx')
        self.tx_edge_mask = tf.placeholder('bool', [None, M, H, JX, JX], name='tx_edge_mask')
        self.y = tf.placeholder('bool', [None, M, H, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')

        # Define misc

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()

        self.ema_op = self._get_ema_op()
        self.summary = tf.merge_all_summaries() 
Example #15
Source File: callbacks.py    From KerasNeuralFingerprint with MIT License 5 votes vote down vote up
def _set_model(self, model):
        import tensorflow as tf
        import keras.backend.tensorflow_backend as KTF

        self.model = model
        self.sess = KTF.get_session()
        if self.histogram_freq and self.merged is None:
            layers = self.model.layers
            for layer in layers:
                if hasattr(layer, 'W'):
                    tf.histogram_summary('{}_W'.format(layer), layer.W)
                if hasattr(layer, 'b'):
                    tf.histogram_summary('{}_b'.format(layer), layer.b)
                if hasattr(layer, 'output'):
                    tf.histogram_summary('{}_out'.format(layer),
                                         layer.output)
        self.merged = tf.merge_all_summaries()
        if self.write_graph:
            if parse_version(tf.__version__) >= parse_version('0.8.0'):
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph_def)
        else:
            self.writer = tf.train.SummaryWriter(self.log_dir) 
Example #16
Source File: didineuralmodel.py    From Supply-demand-forecasting with MIT License 5 votes vote down vote up
def add_visualize_node(self):
        # Merge all the summaries and write them out to /tmp/mnist_logs (by default)
        self.merged = tf.merge_all_summaries()
        self.train_writer = tf.train.SummaryWriter(self.summaries_dir+ '/train',
                                        self.graph)
        self.test_writer = tf.train.SummaryWriter(self.summaries_dir + '/test')

        return 
Example #17
Source File: model.py    From adversarial-squad with MIT License 5 votes vote down vote up
def __init__(self, config, scope):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, M, None], name='x')
        self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, JQ], name='q')
        self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
        self.y = tf.placeholder('bool', [N, M, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope)) 
Example #18
Source File: model.py    From convai-bot-1337 with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, config, scope):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, M, None], name='x')
        self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, JQ], name='q')
        self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
        self.y = tf.placeholder('bool', [N, M, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope)) 
Example #19
Source File: model.py    From convai-bot-1337 with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, config):
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W, H = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size, config.max_tree_height
        self.x = tf.placeholder('int32', [None, M, JX], name='x')
        self.cx = tf.placeholder('int32', [None, M, JX, W], name='cx')
        self.q = tf.placeholder('int32', [None, JQ], name='q')
        self.cq = tf.placeholder('int32', [None, JQ, W], name='cq')
        self.tx = tf.placeholder('int32', [None, M, H, JX], name='tx')
        self.tx_edge_mask = tf.placeholder('bool', [None, M, H, JX, JX], name='tx_edge_mask')
        self.y = tf.placeholder('bool', [None, M, H, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')

        # Define misc

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()

        self.ema_op = self._get_ema_op()
        self.summary = tf.merge_all_summaries() 
Example #20
Source File: inception_eval.py    From AI_Reader with Apache License 2.0 5 votes vote down vote up
def evaluate(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels from the dataset.
    images, labels = image_processing.inputs(dataset)

    # Number of classes in the Dataset label set plus 1.
    # Label 0 is reserved for an (unused) background class.
    num_classes = dataset.num_classes() + 1

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits, _ = inception.inference(images, num_classes)

    # Calculate predictions.
    top_1_op = tf.nn.in_top_k(logits, labels, 1)
    top_5_op = tf.nn.in_top_k(logits, labels, 5)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs) 
Example #21
Source File: resnet_model.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def build_graph(self):
    """Build a whole graph for the model."""
    self.global_step = tf.Variable(0, name='global_step', trainable=False)
    self._build_model()
    if self.mode == 'train':
      self._build_train_op()
    self.summaries = tf.merge_all_summaries() 
Example #22
Source File: seq2seq_attention_model.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def build_graph(self):
    self._add_placeholders()
    self._add_seq2seq()
    self.global_step = tf.Variable(0, name='global_step', trainable=False)
    if self._hps.mode == 'train':
      self._add_train_op()
    self._summaries = tf.merge_all_summaries() 
Example #23
Source File: model.py    From bi-att-flow with Apache License 2.0 5 votes vote down vote up
def __init__(self, config, scope):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, M, None], name='x')
        self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, JQ], name='q')
        self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
        self.y = tf.placeholder('bool', [N, M, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope)) 
Example #24
Source File: inception_eval.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def evaluate(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels from the dataset.
    images, labels = image_processing.inputs(dataset)

    # Number of classes in the Dataset label set plus 1.
    # Label 0 is reserved for an (unused) background class.
    num_classes = dataset.num_classes() + 1

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits, _ = inception.inference(images, num_classes)

    # Calculate predictions.
    top_1_op = tf.nn.in_top_k(logits, labels, 1)
    top_5_op = tf.nn.in_top_k(logits, labels, 5)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs) 
Example #25
Source File: tfbasemodel.py    From Supply-demand-forecasting with MIT License 5 votes vote down vote up
def add_visualize_node(self):
        # Merge all the summaries and write them out to /tmp/mnist_logs (by default)
        self.merged = tf.merge_all_summaries()
        self.train_writer = tf.train.SummaryWriter(self.summaries_dir+ '/train',
                                        self.graph)
        self.test_writer = tf.train.SummaryWriter(self.summaries_dir + '/test')

        return 
Example #26
Source File: tensorboard.py    From zipline-tensorboard with MIT License 5 votes vote down vote up
def __init__(self, log_dir='./logs', max_queue=10, flush_secs=120):
        self.log_dir = log_dir
        self.merged = tf.merge_all_summaries()
        self.writer = tf.train.SummaryWriter(self.log_dir,
                                             max_queue=max_queue,
                                             flush_secs=flush_secs,
                                             graph_def=None) 
Example #27
Source File: nn_q_table.py    From drivebot with MIT License 5 votes vote down vote up
def setup_models(self, hidden_layer_size, summary_file):
        # setup the seperate core and target networks
        self.core_state, self.core_q_values = build_model("core", self.state_size, self.num_actions, hidden_layer_size)
        self.target_state, self.target_q_values = build_model("target", self.state_size, self.num_actions, hidden_layer_size)

        # build the global copy op that will copy core network onto target
        self.clobber_target_net_op = copy_all_vars(from_namespace="core", to_namespace="target",
                                                   affine_coefficient=self.target_network_update_coeff)

        # left hand side of the bellman update; Q(s1, a)
        self.core_action_mask = tf.placeholder(dtype=tf.float32, shape=[None, self.num_actions],
                                               name="core_action_mask")
        self.core_q_value_for_action = tf.reduce_sum(self.core_q_values * self.core_action_mask)

        # right hand side of bellman update; reward + max_a Q(s2, a')
        self.reward = tf.placeholder(dtype=tf.float32, name="reward")
        self.discount_p = tf.placeholder(dtype=tf.float32, name="discount")
        self.max_target_q_value_plus_reward = self.reward + (self.discount_p * tf.stop_gradient(tf.reduce_max(self.target_q_values)))

        # for loss just use squared loss on the difference
        self.temporal_difference_loss = tf.reduce_mean(tf.pow(self.max_target_q_value_plus_reward - self.core_q_value_for_action, 2))
        self.learning_rate_p = tf.placeholder(dtype=tf.float32, name="learning_rate")
        optimizer = tf.train.GradientDescentOptimizer(self.learning_rate_p)
        #optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate, decay=0.9)
        gradients = optimizer.compute_gradients(self.temporal_difference_loss)
        for i, (gradient, variable) in enumerate(gradients):
            if gradient is None:  # eg stop gradient cases
                continue
            gradients[i] = (tf.clip_by_norm(gradient, self.gradient_clip), variable)
            tf.histogram_summary(variable.name, variable)
            tf.histogram_summary(variable.name + '/gradients', gradient)
        tf.scalar_summary("temporal_difference_loss", self.temporal_difference_loss)
        self.train_op = optimizer.apply_gradients(gradients)

        # build session
        self.sess = tf.Session()
        self.sess.run(tf.initialize_all_variables())
        self.summaries = tf.merge_all_summaries()
        self.summary_writer = tf.train.SummaryWriter(summary_file, self.sess.graph_def) 
Example #28
Source File: g_model.py    From medSynthesis with MIT License 5 votes vote down vote up
def build_model(self):
    	self.inputMR=tf.placeholder(tf.float32, shape=[None, self.depth_MR, self.height_MR, self.width_MR, 1])
        self.CT_GT=tf.placeholder(tf.float32, shape=[None, self.depth_CT, self.height_CT, self.width_CT, 1])
        batch_size_tf = tf.shape(self.inputMR)[0]  #variable batchsize so we can test here
        self.train_phase = tf.placeholder(tf.bool, name='phase_train')
        self.G = self.generator(self.inputMR,batch_size_tf)
        print 'shape output G ',self.G.get_shape()
        self.global_step = tf.Variable(0, name='global_step', trainable=False)
        self.g_loss=lp_loss(self.G, self.CT_GT, self.l_num, batch_size_tf)
        print 'learning rate ',self.learning_rate
        #self.g_optim =tf.train.AdamOptimizer(self.learning_rate).minimize(self.g_loss)
        #tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.g_loss)
        self.merged = tf.merge_all_summaries()
        self.writer = tf.train.SummaryWriter("./summaries", self.sess.graph)
        self.saver = tf.train.Saver() 
Example #29
Source File: d_model_singleScale.py    From medSynthesis with MIT License 5 votes vote down vote up
def build_model(self):
        self.inputMR=tf.placeholder(tf.float32, shape=[None, self.depth_MR, self.height_MR, self.width_MR, 1])
        self.CT_GT=tf.placeholder(tf.float32, shape=[None, self.depth_CT, self.height_CT, self.width_CT, 1])
        batch_size_tf = tf.shape(self.inputMR)[0]  #variable batchsize so we can test here
        self.train_phase = tf.placeholder(tf.bool, name='phase_train')
        self.G = self.generator(self.inputMR,batch_size_tf)
        print 'shape output G ',self.G.get_shape()
        self.global_step = tf.Variable(0, name='global_step', trainable=False)
        self.g_loss=lp_loss(self.G, self.CT_GT, self.l_num, batch_size_tf)
        print 'learning rate ',self.learning_rate
        self.g_optim =tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.g_loss)
        self.merged = tf.merge_all_summaries()
        self.writer = tf.train.SummaryWriter("./summaries", self.sess.graph)
        self.saver = tf.train.Saver() 
Example #30
Source File: cifar10_eval.py    From dlbench with MIT License 5 votes vote down vote up
def evaluate():
  """Eval CIFAR-10 for a number of steps."""
  with tf.Graph().as_default() as g:
    # Get images and labels for CIFAR-10.
    eval_data = FLAGS.eval_data == 'test'
    images, labels = cifar10_input.inputs(eval_data, FLAGS.data_dir, FLAGS.batch_size)

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits = inference(images)

    # Calculate predictions.
    top_k_op = tf.nn.in_top_k(logits, labels, 1)

    # Restore the moving average version of the learned variables for eval.
    #variable_averages = tf.train.ExponentialMovingAverage(
    #    cifar10.MOVING_AVERAGE_DECAY)
    #variables_to_restore = variable_averages.variables_to_restore()
    #saver = tf.train.Saver(variables_to_restore)
    saver = tf.train.Saver()

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)

    while True:
      eval_once(saver, summary_writer, top_k_op, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)