Python tensorflow.merge_summary() Examples

The following are 30 code examples of tensorflow.merge_summary(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: trainer.py    From StackGAN with MIT License 6 votes vote down vote up
def define_summaries(self):
        '''Helper function for init_opt'''
        all_sum = {'g': [], 'd': [], 'hr_g': [], 'hr_d': [], 'hist': []}
        for k, v in self.log_vars:
            if k.startswith('g'):
                all_sum['g'].append(tf.scalar_summary(k, v))
            elif k.startswith('d'):
                all_sum['d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hr_g'):
                all_sum['hr_g'].append(tf.scalar_summary(k, v))
            elif k.startswith('hr_d'):
                all_sum['hr_d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hist'):
                all_sum['hist'].append(tf.histogram_summary(k, v))

        self.g_sum = tf.merge_summary(all_sum['g'])
        self.d_sum = tf.merge_summary(all_sum['d'])
        self.hr_g_sum = tf.merge_summary(all_sum['hr_g'])
        self.hr_d_sum = tf.merge_summary(all_sum['hr_d'])
        self.hist_sum = tf.merge_summary(all_sum['hist']) 
Example #2
Source File: trainer.py    From StackGAN with MIT License 6 votes vote down vote up
def visualization(self, n):
        fake_sum_train, superimage_train =\
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test =\
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])

        hr_fake_sum_train, hr_superimage_train =\
            self.visualize_one_superimage(self.hr_fake_images[:n * n],
                                          self.hr_images[:n * n, :, :, :],
                                          n, "hr_train")
        hr_fake_sum_test, hr_superimage_test =\
            self.visualize_one_superimage(self.hr_fake_images[n * n:2 * n * n],
                                          self.hr_images[n * n:2 * n * n],
                                          n, "hr_test")
        self.hr_superimages =\
            tf.concat(0, [hr_superimage_train, hr_superimage_test])
        self.hr_image_summary =\
            tf.merge_summary([hr_fake_sum_train, hr_fake_sum_test]) 
Example #3
Source File: trainer.py    From how_to_convert_text_to_images with MIT License 6 votes vote down vote up
def visualization(self, n):
        fake_sum_train, superimage_train =\
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test =\
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test])

        hr_fake_sum_train, hr_superimage_train =\
            self.visualize_one_superimage(self.hr_fake_images[:n * n],
                                          self.hr_images[:n * n, :, :, :],
                                          n, "hr_train")
        hr_fake_sum_test, hr_superimage_test =\
            self.visualize_one_superimage(self.hr_fake_images[n * n:2 * n * n],
                                          self.hr_images[n * n:2 * n * n],
                                          n, "hr_test")
        self.hr_superimages =\
            tf.concat(0, [hr_superimage_train, hr_superimage_test])
        self.hr_image_summary =\
            tf.merge_summary([hr_fake_sum_train, hr_fake_sum_test]) 
Example #4
Source File: sampling_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testCanBeCalledMultipleTimes(self):
    batch_size = 20
    val_input_batch = [tf.zeros([2, 3, 4])]
    lbl_input_batch = tf.ones([], dtype=tf.int32)
    probs = np.array([0, 1, 0, 0, 0])
    batches = tf.contrib.training.stratified_sample(
        val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
    batches += tf.contrib.training.stratified_sample(
        val_input_batch, lbl_input_batch, probs, batch_size, init_probs=probs)
    summary_op = tf.merge_summary(tf.get_collection(tf.GraphKeys.SUMMARIES))

    with self.test_session() as sess:
      coord = tf.train.Coordinator()
      threads = tf.train.start_queue_runners(coord=coord)

      sess.run(batches + (summary_op,))

      coord.request_stop()
      coord.join(threads) 
Example #5
Source File: trainer.py    From how_to_convert_text_to_images with MIT License 6 votes vote down vote up
def define_summaries(self):
        '''Helper function for init_opt'''
        all_sum = {'g': [], 'd': [], 'hr_g': [], 'hr_d': [], 'hist': []}
        for k, v in self.log_vars:
            if k.startswith('g'):
                all_sum['g'].append(tf.scalar_summary(k, v))
            elif k.startswith('d'):
                all_sum['d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hr_g'):
                all_sum['hr_g'].append(tf.scalar_summary(k, v))
            elif k.startswith('hr_d'):
                all_sum['hr_d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hist'):
                all_sum['hist'].append(tf.histogram_summary(k, v))

        self.g_sum = tf.merge_summary(all_sum['g'])
        self.d_sum = tf.merge_summary(all_sum['d'])
        self.hr_g_sum = tf.merge_summary(all_sum['hr_g'])
        self.hr_d_sum = tf.merge_summary(all_sum['hr_d'])
        self.hist_sum = tf.merge_summary(all_sum['hist']) 
Example #6
Source File: trainer.py    From FRU with MIT License 6 votes vote down vote up
def create_summaries(self, verbose=2):
        """ Create summaries with `verbose` level """

        summ_collection = self.name + "_training_summaries"

        if verbose in [3]:
            # Summarize activations
            activations = tf.get_collection(tf.GraphKeys.ACTIVATIONS)
            summarize_activations(activations, summ_collection)
        if verbose in [2, 3]:
            # Summarize variable weights
            summarize_variables(self.train_vars, summ_collection)
        if verbose in [1, 2, 3]:
            # Summarize gradients
            summarize_gradients(self.grad, summ_collection)

        self.summ_op = merge_summary(tf.get_collection(summ_collection)) 
Example #7
Source File: summarizer.py    From FRU with MIT License 6 votes vote down vote up
def summarize_variables(train_vars=None, summary_collection="tflearn_summ"):
    """ summarize_variables.

    Arguemnts:
        train_vars: list of `Variable`. The variable weights to monitor.
        summary_collection: A collection to add this summary to and
            also used for returning a merged summary over all its elements.
            Default: 'tflearn_summ'.

    Returns:
        `Tensor`. Merge of all summary in 'summary_collection'

    """
    if not train_vars: train_vars = tf.trainable_variables()
    summaries.add_trainable_vars_summary(train_vars, "", "", summary_collection)
    return merge_summary(tf.get_collection(summary_collection)) 
Example #8
Source File: summarizer.py    From FRU with MIT License 6 votes vote down vote up
def summarize(value, type, name, summary_collection="tflearn_summ"):
    """ summarize.

    A custom summarization op.

    Arguemnts:
        value: `Tensor`. The tensor value to monitor.
        type: `str` among 'histogram', 'scalar'. The data monitoring type.
        name: `str`. A name for this summary.
        summary_collection: A collection to add this summary to and
            also used for returning a merged summary over all its elements.
            Default: 'tflearn_summ'.

    Returns:
        `Tensor`. Merge of all summary in 'summary_collection'.

    """
    if tf012:
        name = name.replace(':', '_')
    summaries.get_summary(type, name, value, summary_collection)
    return merge_summary(tf.get_collection(summary_collection)) 
Example #9
Source File: summarizer.py    From FRU with MIT License 5 votes vote down vote up
def summarize_gradients(grads, summary_collection="tflearn_summ"):
    """ summarize_gradients.

    Arguemnts:
        grads: list of `Tensor`. The gradients to monitor.
        summary_collection: A collection to add this summary to and
            also used for returning a merged summary over all its elements.
            Default: 'tflearn_summ'.

    Returns:
        `Tensor`. Merge of all summary in 'summary_collection'

    """
    summaries.add_gradients_summary(grads, "", "", summary_collection)
    return merge_summary(tf.get_collection(summary_collection)) 
Example #10
Source File: model.py    From adversarial-squad with MIT License 5 votes vote down vote up
def __init__(self, config, scope):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, M, None], name='x')
        self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, JQ], name='q')
        self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
        self.y = tf.placeholder('bool', [N, M, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope)) 
Example #11
Source File: model.py    From bi-att-flow with Apache License 2.0 5 votes vote down vote up
def __init__(self, config, scope):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, M, None], name='x')
        self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, JQ], name='q')
        self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
        self.y = tf.placeholder('bool', [N, M, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope)) 
Example #12
Source File: model.py    From convai-bot-1337 with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, config, scope):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, M, None], name='x')
        self.cx = tf.placeholder('int32', [N, M, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, M, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, JQ], name='q')
        self.cq = tf.placeholder('int32', [N, JQ, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, JQ], name='q_mask')
        self.y = tf.placeholder('bool', [N, M, JX], name='y')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope)) 
Example #13
Source File: trainer.py    From how_to_convert_text_to_images with MIT License 5 votes vote down vote up
def visualization(self, n):
        fake_sum_train, superimage_train = \
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test = \
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test]) 
Example #14
Source File: trainer.py    From how_to_convert_text_to_images with MIT License 5 votes vote down vote up
def define_summaries(self):
        '''Helper function for init_opt'''
        all_sum = {'g': [], 'd': [], 'hist': []}
        for k, v in self.log_vars:
            if k.startswith('g'):
                all_sum['g'].append(tf.scalar_summary(k, v))
            elif k.startswith('d'):
                all_sum['d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hist'):
                all_sum['hist'].append(tf.histogram_summary(k, v))

        self.g_sum = tf.merge_summary(all_sum['g'])
        self.d_sum = tf.merge_summary(all_sum['d'])
        self.hist_sum = tf.merge_summary(all_sum['hist']) 
Example #15
Source File: model.py    From personalized-dialog with MIT License 5 votes vote down vote up
def _init_summaries(self):
        self.accuracy = tf.placeholder_with_default(0.0, shape=(), name='Accuracy')
        self.accuracy_summary = tf.scalar_summary('Accuracy summary', self.accuracy)

        self.f_pos_summary = tf.histogram_summary('f_pos', self.f_pos)
        self.f_neg_summary = tf.histogram_summary('f_neg', self.f_neg)

        self.loss_summary = tf.scalar_summary('Mini-batch loss', self.loss)
        self.summary_op = tf.merge_summary(
            [
                self.f_pos_summary,
                self.f_neg_summary,
                self.loss_summary
            ]
        ) 
Example #16
Source File: model.py    From jiji-with-tensorflow-example with MIT License 5 votes vote down vote up
def __setup_ops(self):
        cross_entropy = -tf.reduce_sum(self.actual_class * tf.log(self.output))
        self.summary = tf.scalar_summary(self.label, cross_entropy)
        self.train_op = tf.train.AdamOptimizer(0.0001).minimize(cross_entropy)
        self.merge_summaries = tf.merge_summary([self.summary])
        correct_prediction = tf.equal(tf.argmax(self.output,1), tf.argmax(self.actual_class,1))
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 
Example #17
Source File: icnn.py    From icnn with Apache License 2.0 5 votes vote down vote up
def __init__(self, inputs, outputs, summary_ops=None, summary_writer=None, session=None):
        self._inputs = inputs if type(inputs) == list else [inputs]
        self._outputs = outputs
        # self._summary_op = tf.merge_summary(summary_ops) if type(summary_ops) == list else summary_ops
        self._summary_op = tf.merge_summary(summary_ops) if type(summary_ops) == list else summary_ops
        self._session = session or tf.get_default_session()
        self._writer = summary_writer 
Example #18
Source File: summarizer.py    From FRU with MIT License 5 votes vote down vote up
def summarize_activations(activations, summary_collection="tflearn_summ"):
    """ summarize_activations.

    Arguemnts:
        activations: list of `Tensor`. The activations to monitor.
        summary_collection: A collection to add this summary to and
            also used for returning a merged summary over all its elements.
            Default: 'tflearn_summ'.

    Returns:
        `Tensor`. Merge of all summary in 'summary_collection'

    """
    summaries.add_activations_summary(activations, "", "", summary_collection)
    return merge_summary(tf.get_collection(summary_collection)) 
Example #19
Source File: ddpg.py    From icnn with Apache License 2.0 5 votes vote down vote up
def __init__(self, inputs, outputs, summary_ops=None, summary_writer=None, session=None):
        self._inputs = inputs if type(inputs) == list else [inputs]
        self._outputs = outputs
        self._summary_op = tf.merge_summary(summary_ops) if type(summary_ops) == list else summary_ops
        self._session = session or tf.get_default_session()
        self._writer = summary_writer 
Example #20
Source File: naf.py    From icnn with Apache License 2.0 5 votes vote down vote up
def __init__(self, inputs, outputs, summary_ops=None, summary_writer=None, session=None):
        self._inputs = inputs if type(inputs) == list else [inputs]
        self._outputs = outputs
        self._summary_op = tf.merge_summary(summary_ops) if type(summary_ops) == list else summary_ops
        self._session = session or tf.get_default_session()
        self._writer = summary_writer 
Example #21
Source File: abstract_learning.py    From blocks with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, model, loss, train_step, update_summaries):
        """ Creates constructor for an abstract learning setup """

        self.model = model
        self.loss = loss
        self.train_step = train_step
        self.update_summary = tf.merge_summary(update_summaries)
        self.update_iter = 0 
Example #22
Source File: trainer.py    From StackGAN with MIT License 5 votes vote down vote up
def visualization(self, n):
        fake_sum_train, superimage_train = \
            self.visualize_one_superimage(self.fake_images[:n * n],
                                          self.images[:n * n],
                                          n, "train")
        fake_sum_test, superimage_test = \
            self.visualize_one_superimage(self.fake_images[n * n:2 * n * n],
                                          self.images[n * n:2 * n * n],
                                          n, "test")
        self.superimages = tf.concat(0, [superimage_train, superimage_test])
        self.image_summary = tf.merge_summary([fake_sum_train, fake_sum_test]) 
Example #23
Source File: trainer.py    From StackGAN with MIT License 5 votes vote down vote up
def define_summaries(self):
        '''Helper function for init_opt'''
        all_sum = {'g': [], 'd': [], 'hist': []}
        for k, v in self.log_vars:
            if k.startswith('g'):
                all_sum['g'].append(tf.scalar_summary(k, v))
            elif k.startswith('d'):
                all_sum['d'].append(tf.scalar_summary(k, v))
            elif k.startswith('hist'):
                all_sum['hist'].append(tf.histogram_summary(k, v))

        self.g_sum = tf.merge_summary(all_sum['g'])
        self.d_sum = tf.merge_summary(all_sum['d'])
        self.hist_sum = tf.merge_summary(all_sum['hist']) 
Example #24
Source File: hooks.py    From pycodesuggest with MIT License 5 votes vote down vote up
def __init__(self, summary_writer):
        self.summary_writer = summary_writer
        self.title_placeholder = tf.placeholder(tf.string)
        self.value_placeholder = tf.placeholder(tf.float64)
        cur_summary = tf.scalar_summary(self.title_placeholder, self.value_placeholder)
        self.merged_summary_op = tf.merge_summary([cur_summary]) 
Example #25
Source File: train_rnn_classify.py    From RNN_Text_Classify with Apache License 2.0 4 votes vote down vote up
def train_step():

    print("loading the dataset...")
    config = Config()
    eval_config=Config()
    eval_config.keep_prob=1.0

    train_data,valid_data,test_data=data_helper.load_data(FLAGS.max_len,batch_size=config.batch_size)

    print("begin training")

    # gpu_config=tf.ConfigProto()
    # gpu_config.gpu_options.allow_growth=True
    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-1*FLAGS.init_scale,1*FLAGS.init_scale)
        with tf.variable_scope("model",reuse=None,initializer=initializer):
            model = RNN_Model(config=config,is_training=True)

        with tf.variable_scope("model",reuse=True,initializer=initializer):
            valid_model = RNN_Model(config=eval_config,is_training=False)
            test_model = RNN_Model(config=eval_config,is_training=False)

        #add summary
        # train_summary_op = tf.merge_summary([model.loss_summary,model.accuracy])
        train_summary_dir = os.path.join(config.out_dir,"summaries","train")
        train_summary_writer =  tf.train.SummaryWriter(train_summary_dir,session.graph)

        # dev_summary_op = tf.merge_summary([valid_model.loss_summary,valid_model.accuracy])
        dev_summary_dir = os.path.join(eval_config.out_dir,"summaries","dev")
        dev_summary_writer =  tf.train.SummaryWriter(dev_summary_dir,session.graph)

        #add checkpoint
        checkpoint_dir = os.path.abspath(os.path.join(config.out_dir, "checkpoints"))
        checkpoint_prefix = os.path.join(checkpoint_dir, "model")
        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)
        saver = tf.train.Saver(tf.all_variables())


        tf.initialize_all_variables().run()
        global_steps=1
        begin_time=int(time.time())

        for i in range(config.num_epoch):
            print("the %d epoch training..."%(i+1))
            lr_decay = config.lr_decay ** max(i-config.max_decay_epoch,0.0)
            model.assign_new_lr(session,config.lr*lr_decay)
            global_steps=run_epoch(model,session,train_data,global_steps,valid_model,valid_data,train_summary_writer,dev_summary_writer)

            if i% config.checkpoint_every==0:
                path = saver.save(session,checkpoint_prefix,global_steps)
                print("Saved model chechpoint to{}\n".format(path))

        print("the train is finished")
        end_time=int(time.time())
        print("training takes %d seconds already\n"%(end_time-begin_time))
        test_accuracy=evaluate(test_model,session,test_data)
        print("the test data accuracy is %f"%test_accuracy)
        print("program end!") 
Example #26
Source File: model.py    From bi-att-flow with Apache License 2.0 4 votes vote down vote up
def __init__(self, config, scope, rep=True):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, None, None], name='x')
        self.cx = tf.placeholder('int32', [N, None, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, None, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, None], name='q')
        self.cq = tf.placeholder('int32', [N, None, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, None], name='q_mask')
        self.y = tf.placeholder('bool', [N, None, None], name='y')
        self.y2 = tf.placeholder('bool', [N, None, None], name='y2')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        self.var_ema = None
        if rep:
            self._build_var_ema()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope)) 
Example #27
Source File: model.py    From adversarial-squad with MIT License 4 votes vote down vote up
def __init__(self, config, scope, rep=True):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, None, None], name='x')
        self.cx = tf.placeholder('int32', [N, None, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, None, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, None], name='q')
        self.cq = tf.placeholder('int32', [N, None, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, None], name='q_mask')
        self.y = tf.placeholder('bool', [N, None, None], name='y')
        self.y2 = tf.placeholder('bool', [N, None, None], name='y2')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        self.var_ema = None
        if rep:
            self._build_var_ema()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope)) 
Example #28
Source File: model.py    From convai-bot-1337 with GNU General Public License v3.0 4 votes vote down vote up
def __init__(self, config, scope, rep=True):
        self.scope = scope
        self.config = config
        self.global_step = tf.get_variable('global_step', shape=[], dtype='int32',
                                           initializer=tf.constant_initializer(0), trainable=False)

        # Define forward inputs here
        N, M, JX, JQ, VW, VC, W = \
            config.batch_size, config.max_num_sents, config.max_sent_size, \
            config.max_ques_size, config.word_vocab_size, config.char_vocab_size, config.max_word_size
        self.x = tf.placeholder('int32', [N, None, None], name='x')
        self.cx = tf.placeholder('int32', [N, None, None, W], name='cx')
        self.x_mask = tf.placeholder('bool', [N, None, None], name='x_mask')
        self.q = tf.placeholder('int32', [N, None], name='q')
        self.cq = tf.placeholder('int32', [N, None, W], name='cq')
        self.q_mask = tf.placeholder('bool', [N, None], name='q_mask')
        self.y = tf.placeholder('bool', [N, None, None], name='y')
        self.y2 = tf.placeholder('bool', [N, None, None], name='y2')
        self.is_train = tf.placeholder('bool', [], name='is_train')
        self.new_emb_mat = tf.placeholder('float', [None, config.word_emb_size], name='new_emb_mat')

        # Define misc
        self.tensor_dict = {}

        # Forward outputs / loss inputs
        self.logits = None
        self.yp = None
        self.var_list = None

        # Loss outputs
        self.loss = None

        self._build_forward()
        self._build_loss()
        self.var_ema = None
        if rep:
            self._build_var_ema()
        if config.mode == 'train':
            self._build_ema()

        self.summary = tf.merge_all_summaries()
        self.summary = tf.merge_summary(tf.get_collection("summaries", scope=self.scope)) 
Example #29
Source File: stacked_dae.py    From StackedDAE with Apache License 2.0 4 votes vote down vote up
def pretrain_sdae(input_x, shape):
    with tf.Graph().as_default():# as g:
        sess = tf.Session()
        
        sdae = Stacked_DAE(net_shape=shape, session=sess, selfish_layers=False)

        for layer in sdae.get_layers[:-1]:
            with tf.variable_scope("pretrain_{0}".format(layer.which)):
                cost = layer.get_loss
                train_op, global_step = sdae.train(cost, layer=layer.which)

                summary_dir = pjoin(FLAGS.summary_dir, 'pretraining_{0}'.format(layer.which))
                summary_writer = tf.train.SummaryWriter(summary_dir, graph_def=sess.graph_def, flush_secs=FLAGS.flush_secs)
                summary_vars = [layer.get_w_b[0], layer.get_w_b[1]]
                        
                hist_summarries = [tf.histogram_summary(v.op.name, v) for v in summary_vars]
                hist_summarries.append(sdae.loss_summaries)
                summary_op = tf.merge_summary(hist_summarries)

                '''
                 You can get all the trainable variables using tf.trainable_variables(),
                 and exclude the variables which should be restored from the pretrained model.
                 Then you can initialize the other variables.
                '''

                layer.vars_to_init.append(global_step)
                sess.run(tf.initialize_variables(layer.vars_to_init))

                print("\n\n")
                print "|  Layer   |   Epoch    |   Step   |    Loss    |"
                
                for step in xrange(FLAGS.pretraining_epochs * input_x.train.num_examples):
                    feed_dict = fill_feed_dict_dae(input_x.train, sdae._x)
    
                    loss, _ = sess.run([cost, train_op], feed_dict=feed_dict)
                    
                    if step % 1000 == 0:
                        summary_str = sess.run(summary_op, feed_dict=feed_dict)
                        summary_writer.add_summary(summary_str, step)
                        
                        output = "| Layer {0}  | Epoch {1}    |  {2:>6}  | {3:10.4f} |"\
                                     .format(layer.which, step // input_x.train.num_examples + 1, step, loss)
                        print output
    
                # Note: Use this style if you are using the shelfish_layer choice.
                # This way you keep the activated data to be fed to the next layer.
                # next_dataset = sdae.genrate_next_dataset(from_dataset=input_x.all, layer=layer.which)
                # input_x = load_data_sets_pretraining(next_dataset, split_only=False)

        # Save Weights and Biases for all layers
        for n in xrange(len(shape) - 2):
            w = sdae.get_layers[n].get_w
            b = sdae.get_layers[n].get_b
            W, B = sess.run([w, b])

            np.savetxt(pjoin(FLAGS.output_dir, 'Layer_' + str(n) + '_Weights.txt'), np.asarray(W), delimiter='\t')
            np.savetxt(pjoin(FLAGS.output_dir, 'Layer_' + str(n) + '_Biases.txt'), np.asarray(B), delimiter='\t')
            make_heatmap(W, 'weights_'+ str(n))

    print "\nPretraining Finished...\n"
    return sdae 
Example #30
Source File: network.py    From dist-dqn with MIT License 4 votes vote down vote up
def _init_network(self, config):
    # Placeholders
    self.x_placeholder = tf.placeholder(tf.float32, [None] + self.input_shape)
    self.q_placeholder = tf.placeholder(tf.float32, [None])
    self.action_placeholder = tf.placeholder(tf.float32, 
                                             [None, self.num_actions])

    summaries = []

    # Params and layers
    with tf.device(self.ps_device):
      params = self._init_params(
        config,
        input_shape=self.input_shape,
        output_size=self.num_actions,
        summaries=summaries,
      )
    self.q_output, reg_loss = self._init_layers(
      config,
      inputs=self.x_placeholder,
      params=params,
      summaries=summaries,
    )

    # Loss and training
    self.global_step = tf.Variable(0, name='global_step', trainable=False)
    loss = self._init_loss(
      config,
      q=self.q_output,
      expected_q=self.q_placeholder,
      actions=self.action_placeholder,
      reg_loss=reg_loss,
      summaries=summaries,
    )
    self.train_op = self._init_optimizer(
      config,
      params=params,
      loss=loss,
      num_replicas=self.num_replicas,
      global_step=self.global_step,
      summaries=summaries,
    )

    # Target network
    self.target_q_output, self.target_update_ops = self._init_target_network(
      config,
      inputs=self.x_placeholder,
      input_shape=self.input_shape,
      output_size=self.num_actions,
      params=params,
      ps_device=self.ps_device,
      worker_device=self.worker_device,
      summaries=summaries,
    )

    # Merge all the summaries in this graph
    if summaries:
      self.summary_op = tf.merge_summary(summaries)