Python tensorflow.Session() Examples

The following are code examples for showing how to use tensorflow.Session(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the ones you don't like. You can also save this page to your account.

Example 1
Project: variational-text-tensorflow   Author: carpedm20   File: main.py    (MIT License) View Source Project 7 votes vote down vote up
def main(_):
  pp.pprint(flags.FLAGS.__flags)

  data_path = "./data/%s" % FLAGS.dataset
  reader = TextReader(data_path)

  with tf.Session() as sess:
    m = MODELS[FLAGS.model]
    model = m(sess, reader, dataset=FLAGS.dataset,
              embed_dim=FLAGS.embed_dim, h_dim=FLAGS.h_dim,
              learning_rate=FLAGS.learning_rate, max_iter=FLAGS.max_iter,
              checkpoint_dir=FLAGS.checkpoint_dir)

    if FLAGS.forward_only:
      model.load(FLAGS.checkpoint_dir)
    else:
      model.train(FLAGS)

    while True:
      text = raw_input(" [*] Enter text to test: ")
      model.sample(5, text) 
Example 2
Project: RaspberryPi-Robot   Author: timestocome   File: label_cats.py    (MIT License) View Source Project 7 votes vote down vote up
def read_tensor_from_image_file(file_name='test.jpg', input_height=128, input_width=128,
				input_mean=0, input_std=255):
    
    
  input_name = "file_reader"
  output_name = "normalized"
  file_reader = tf.read_file(file_name, input_name)
  image_reader = tf.image.decode_jpeg(file_reader, channels = 3, name='jpeg_reader')
  float_caster = tf.cast(image_reader, tf.float32)
  dims_expander = tf.expand_dims(float_caster, 0);
  resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
  normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
  sess = tf.Session()
  result = sess.run(normalized)

  return result 
Example 3
Project: deep-summarization   Author: harpribot   File: sequenceNet.py    (MIT License) View Source Project 6 votes vote down vote up
def _start_session(self):
        """
        Starts the Tensorflow Session

        :return: None
        """
        self.sess.run(tf.global_variables_initializer())
        # initialize the saver node
        # print tf.GraphKeys.GLOBAL_VARIABLES
        self.saver = tf.train.Saver(tf.global_variables())
        # get the latest checkpoint
        last_checkpoint_path = self.checkpointer.get_last_checkpoint()
        if last_checkpoint_path is not None:
            print 'Previous saved tensorflow objects found... Extracting...'
            # restore the tensorflow variables
            self.saver.restore(self.sess, last_checkpoint_path)
            print 'Extraction Complete. Moving Forward....' 
Example 4
Project: almond-nnparser   Author: Stanford-Mobisocial-IoT-Lab   File: run_server.py    (license) View Source Project 6 votes vote down vote up
def load_language(app, tokenizer_service, tag, model_dir):
    config = Config.load(['./default.conf', './default.' + tag + '.conf', os.path.join(model_dir, 'model.conf')])
    model = create_model(config)
    
    graph = tf.Graph()
    session = tf.Session(graph=graph)
    with graph.as_default():
        # Force everything to run on CPU, we run on single inputs so there is not much point
        # on going through the GPU
        with tf.device('/cpu:0'):
            model.build()
            loader = tf.train.Saver()

        with session.as_default():
            loader.restore(session, os.path.join(model_dir, 'best'))
    tokenizer = Tokenizer(tokenizer_service, tag)
    app.add_language(tag, LanguageContext(tag, tokenizer, session, config, model))
    print('Loaded language ' + tag) 
Example 5
Project: tensorflow_qrnn   Author: icoxfog417   File: test_tf_qrnn_forward.py    (MIT License) View Source Project 6 votes vote down vote up
def test_qrnn_linear_forward(self):
        batch_size = 100
        sentence_length = 5
        word_size = 10
        size = 5
        data = self.create_test_data(batch_size, sentence_length, word_size)

        with tf.Graph().as_default() as q_linear:
            qrnn = QRNN(in_size=word_size, size=size, conv_size=1)
            X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size])
            forward_graph = qrnn.forward(X)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                hidden = sess.run(forward_graph, feed_dict={X: data})
                self.assertEqual((batch_size, size), hidden.shape) 
Example 6
Project: tensorflow_qrnn   Author: icoxfog417   File: test_tf_qrnn_forward.py    (MIT License) View Source Project 6 votes vote down vote up
def test_qrnn_with_previous(self):
        batch_size = 100
        sentence_length = 5
        word_size = 10
        size = 5
        data = self.create_test_data(batch_size, sentence_length, word_size)

        with tf.Graph().as_default() as q_with_previous:
            qrnn = QRNN(in_size=word_size, size=size, conv_size=2)
            X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size])
            forward_graph = qrnn.forward(X)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                hidden = sess.run(forward_graph, feed_dict={X: data})
                self.assertEqual((batch_size, size), hidden.shape) 
Example 7
Project: tensorflow_qrnn   Author: icoxfog417   File: test_tf_qrnn_forward.py    (MIT License) View Source Project 6 votes vote down vote up
def test_qrnn_convolution(self):
        batch_size = 100
        sentence_length = 5
        word_size = 10
        size = 5
        data = self.create_test_data(batch_size, sentence_length, word_size)

        with tf.Graph().as_default() as q_conv:
            qrnn = QRNN(in_size=word_size, size=size, conv_size=3)
            X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size])
            forward_graph = qrnn.forward(X)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                hidden = sess.run(forward_graph, feed_dict={X: data})
                self.assertEqual((batch_size, size), hidden.shape) 
Example 8
Project: yolo_tensorflow   Author: hizhangp   File: test.py    (MIT License) View Source Project 6 votes vote down vote up
def __init__(self, net, weight_file):
        self.net = net
        self.weights_file = weight_file

        self.classes = cfg.CLASSES
        self.num_class = len(self.classes)
        self.image_size = cfg.IMAGE_SIZE
        self.cell_size = cfg.CELL_SIZE
        self.boxes_per_cell = cfg.BOXES_PER_CELL
        self.threshold = cfg.THRESHOLD
        self.iou_threshold = cfg.IOU_THRESHOLD
        self.boundary1 = self.cell_size * self.cell_size * self.num_class
        self.boundary2 = self.boundary1 + self.cell_size * self.cell_size * self.boxes_per_cell

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())

        print 'Restoring weights from: ' + self.weights_file
        self.saver = tf.train.Saver()
        self.saver.restore(self.sess, self.weights_file) 
Example 9
Project: dl-classification   Author: matthieuo   File: inference.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def __init__(self, check_):
        self.img_feed = tf.placeholder(tf.float32)

        self.output_logits = tf.nn.softmax(
            models.foodv_test(
                self.img_feed,
                reg_val=0.0,
                is_train=False,
                dropout_p=1.0))

        self.sess = tf.Session()

        self.checkpoint_name = check_

        saver = tf.train.Saver()
        print("loading model...")

        saver.restore(self.sess, self.checkpoint_name)

        print("Model loaded !") 
Example 10
Project: tf_rnnlm   Author: Ubiqus   File: loglikes.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def _run(self):
    with tf.Session() as session:
      self.io.restore_session(session)

      inputs = sys.stdin
      singsen = SingleSentenceData()
      scounter = SpeedCounter().start()
      while True:
        senlen = singsen.read_from_file(sys.stdin, self.io.w2id)
        if senlen is None:
          break
        if senlen < 2:
          print(-9999)
          continue

        o = run_epoch(session, self.test_model, singsen)
        scounter.next()
        if self.params.progress and scounter.val % 20 ==0:
          print("\rLoglikes per secs: %f" % scounter.speed, end="", file=sys.stderr)
        print("%f" % o) 
Example 11
Project: deep-learning   Author: ljanyst   File: test-embedding.py    (license) View Source Project 6 votes vote down vote up
def __init__(self, embedding):
        self.sess         = tf.Session()
        self.inputs       = tf.placeholder(tf.float32,
                                           [None, embedding.shape[1]],
                                           name='inputs')
        self.test_vec     = tf.placeholder(tf.float32, [1, embedding.shape[1]],
                                           name='test_vec')
        self.cos_distance = tf.matmul(self.inputs, tf.transpose(self.test_vec))

        #-----------------------------------------------------------------------
        # Compute normalized embedding matrix
        #-----------------------------------------------------------------------
        row_sum    = tf.reduce_sum(tf.square(self.inputs), axis=1,
                                   keep_dims=True)
        norm       = tf.sqrt(row_sum)
        self.normalized = self.inputs / norm
        self.embedding = self.sess.run(self.normalized,
                                       feed_dict={self.inputs: embedding})

    #--------------------------------------------------------------------------- 
Example 12
Project: vae-npvc   Author: JeremyCCHsu   File: wrapper.py    (license) View Source Project 6 votes vote down vote up
def configure_gpu_settings(gpu_cfg=None):
    session_conf = None
    if gpu_cfg:
        with open(gpu_cfg) as f:
            cfg = json.load(f)
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=cfg['per_process_gpu_memory_fraction'])
        session_conf = tf.ConfigProto(
            allow_soft_placement=cfg['allow_soft_placement'],
            log_device_placement=cfg['log_device_placement'],
            inter_op_parallelism_threads=cfg['inter_op_parallelism_threads'],
            intra_op_parallelism_threads=cfg['intra_op_parallelism_threads'],
            gpu_options=gpu_options)
        # Timeline
        # jit_level = 0
        # session_conf.graph_options.optimizer_options.global_jit_level = jit_level
    #     sess = tf.Session(
    #         config=session_conf)
    # else:
    #     sess = tf.Session()
    return session_conf 
Example 13
Project: HandDetection   Author: YunqiuXu   File: train_val.py    (license) View Source Project 6 votes vote down vote up
def train_net(network, imdb, roidb, valroidb, output_dir, tb_dir,
              pretrained_model=None,
              max_iters=40000):
  """Train a Faster R-CNN network."""
  roidb = filter_roidb(roidb)
  valroidb = filter_roidb(valroidb)

  tfconfig = tf.ConfigProto(allow_soft_placement=True)
  tfconfig.gpu_options.allow_growth = True

  with tf.Session(config=tfconfig) as sess:
    sw = SolverWrapper(sess, network, imdb, roidb, valroidb, output_dir, tb_dir,
                       pretrained_model=pretrained_model)
    print('Solving...')
    sw.train_model(sess, max_iters)
    print('done solving') 
Example 14
Project: cxflow-tensorflow   Author: Cognexa   File: sparse_test.py    (license) View Source Project 6 votes vote down vote up
def test_dense_to_sparse(self):
        """ Test if `dense_to_sparse` works properly."""

        with tf.Session().as_default():
            dense = tf.constant([[1., 2., 0.], [0., 0., 3.]], dtype=tf.float32)

            sparse = dense_to_sparse(dense)

            self.assertTrue(np.array_equal(sparse.indices.eval(), np.array([[0, 0], [0, 1], [1, 2]])))
            self.assertTrue(np.array_equal(sparse.values.eval(), np.array([1., 2., 3.])))

            mask = tf.constant([[0, 1, 0], [1, 0, 0]], dtype=tf.int32)

            masked = dense_to_sparse(dense, mask)
            self.assertTrue(np.array_equal(masked.indices.eval(), np.array([[0, 1], [1, 0]])))
            self.assertTrue(np.array_equal(masked.values.eval(), np.array([2., 0.]))) 
Example 15
Project: cxflow-tensorflow   Author: Cognexa   File: repeat_test.py    (license) View Source Project 6 votes vote down vote up
def test_repeat(self):
        """ Test if `repeat` works the same as np.repeat."""

        with tf.Session().as_default():
            # try different tensor types
            for npdtype, tfdtype in [(np.int32, tf.int32), (np.float32, tf.float32)]:
                for init_value in [np.array([0, 1, 2, 3], dtype=npdtype),
                                   np.array([[0, 1], [2, 3], [4, 5]], dtype=npdtype)]:
                    # and all their axes
                    for axis in range(len(init_value.shape)):
                        for repeats in [1, 2, 3, 11]:
                            tensor = tf.constant(init_value, dtype=tfdtype)

                            repeated_value = repeat(tensor, repeats=repeats, axis=axis).eval()
                            expected_value = np.repeat(init_value, repeats=repeats, axis=axis)

                            self.assertTrue(np.all(repeated_value == expected_value)) 
Example 16
Project: cxflow-tensorflow   Author: Cognexa   File: utils_test.py    (license) View Source Project 6 votes vote down vote up
def test_create_optimizer(self):
        """Test if create optimizer does work with tf optimizers."""

        optimizer_config = {'learning_rate': 0.1}

        # test missing required entry `class`
        self.assertRaises(AssertionError, create_optimizer, optimizer_config)

        optimizer_config['class'] = 'tensorflow.python.training.gradient_descent.GradientDescentOptimizer'

        with tf.Session().as_default():
            # test if the optimizer is created correctlyW
            optimizer = create_optimizer(optimizer_config)
            self.assertIsInstance(optimizer, tf.train.GradientDescentOptimizer)

            # test if learning_rate variable is created with the correct value
            lr_tensor = tf.get_default_graph().get_tensor_by_name('learning_rate:0')
            tf.get_default_session().run(tf.global_variables_initializer())
            self.assertAlmostEqual(lr_tensor.eval(), 0.1)

        optimizer_config2 = {'learning_rate': 0.1, 'class': 'tensorflow.python.training.momentum.MomentumOptimizer'}

        # test missing required argument (momentum in this case)
        with tf.Graph().as_default():
            self.assertRaises(TypeError, create_optimizer, optimizer_config2) 
Example 17
Project: VAE-MF-TensorFlow   Author: arongdari   File: movielens_vae_test.py    (MIT License) View Source Project 6 votes vote down vote up
def cross_validation():
    M = read_dataset()
    n_fold = 10

    rating_idx = np.array(M.nonzero()).T
    kf = KFold(n_splits=n_fold, random_state=0)

    with tf.Session() as sess:
        model = VAEMF(sess, num_user, num_item,
                      hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
                      latent_dim=latent_dim, output_dim=output_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param)

        for i, (train_idx, test_idx) in enumerate(kf.split(rating_idx)):
            print("{0}/{1} Fold start| Train size={2}, Test size={3}".format(i,
                                                                             n_fold, train_idx.size, test_idx.size))
            model.train(M, train_idx=train_idx,
                        test_idx=test_idx, n_steps=n_steps) 
Example 18
Project: VAE-MF-TensorFlow   Author: arongdari   File: movielens_test.py    (MIT License) View Source Project 6 votes vote down vote up
def cross_validation():
    M = read_dataset()
    n_fold = 10

    rating_idx = np.array(M.nonzero()).T
    kf = KFold(n_splits=n_fold, random_state=0)

    with tf.Session() as sess:
        model = VAEMF(sess, num_user, num_item,
                      hidden_encoder_dim=hidden_encoder_dim, hidden_decoder_dim=hidden_decoder_dim,
                      latent_dim=latent_dim, output_dim=output_dim, learning_rate=learning_rate, batch_size=batch_size, reg_param=reg_param, one_hot=one_hot)

        for i, (train_idx, test_idx) in enumerate(kf.split(rating_idx)):
            print("{0}/{1} Fold start| Train size={2}, Test size={3}".format(i,
                                                                             n_fold, train_idx.size, test_idx.size))
            model.train(M, train_idx=train_idx,
                        test_idx=test_idx, n_steps=n_steps) 
Example 19
Project: lung-cancer-detector   Author: YichenGong   File: unet.py    (MIT License) View Source Project 6 votes vote down vote up
def predict(self, model_path, x_test):
        """
        Uses the model to create a prediction for the given data
        
        :param model_path: path to the model checkpoint to restore
        :param x_test: Data to predict on. Shape [n, nx, ny, channels]
        :returns prediction: The unet prediction Shape [n, px, py, labels] (px=nx-self.offset/2) 
        """
        
        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            # Initialize variables
            sess.run(init)
        
            # Restore model weights from previously saved model
            self.restore(sess, model_path)
            
            y_dummy = np.empty((x_test.shape[0], x_test.shape[1], x_test.shape[2], self.n_class))
            prediction = sess.run(self.predicter, feed_dict={self.x: x_test, self.y: y_dummy, self.keep_prob: 1.})
            
        return prediction 
Example 20
Project: lung-cancer-detector   Author: YichenGong   File: aggressive_multi_head_UNET_2d.py    (MIT License) View Source Project 6 votes vote down vote up
def start(self, restore=False):
		self._sess = tf.Session()
		self._init = tf.global_variables_initializer()
		self._saver = tf.train.Saver()

		self._summary = tf.summary.merge_all()
		self._summary_writer = tf.summary.FileWriter(self.config.model_save_path, graph=self._sess.graph)
		self._summary_writer.flush()

		self._sess.run(self._init)

		if restore:
			checkpoint = tf.train.get_checkpoint_state(self.config.model_save_path)
			if checkpoint and checkpoint.model_checkpoint_path:
				tf.train.restore(self._sess, checkpoint.model_checkpoint_path)

		self._started = True 
Example 21
Project: DeepWorks   Author: daigo0927   File: train.py    (license) View Source Project 6 votes vote down vote up
def __init__(self,
                 z_dim, image_size,
                 lr_d, lr_g):

        self.sess = tf.Session()

        self.z_dim = z_dim
        self.image_size = image_size

        self.gen = GeneratorDeconv(input_size = z_dim,
                                   image_size = image_size)
        self.disc = Discriminator()

        self._build_graph(lr_d = lr_d, lr_g = lr_g)

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer()) 
Example 22
Project: DeepWorks   Author: daigo0927   File: train.py    (license) View Source Project 6 votes vote down vote up
def __init__(self,
                 label_size,
                 z_dim, image_size,
                 lr_d, lr_g):

        self.sess = tf.Session()

        self.label_size = label_size
        self.z_dim = z_dim
        self.image_size = image_size

        self.gen = GeneratorDeconv(input_size = z_dim+label_size,
                                   image_size = image_size)
        self.disc = Discriminator()

        self._build_graph(lr_d = lr_d, lr_g = lr_g)

        self.saver = tf.train.Saver()
        self.sess.run(tf.global_variables_initializer()) 
Example 23
Project: kaggle-review   Author: daxiongshu   File: BaseUnet.py    (license) View Source Project 6 votes vote down vote up
def predictPL(self):
        B = self.flags.batch_size
        W,H,C = self.flags.width, self.flags.height, self.flags.color
        inputs = tf.placeholder(dtype=tf.float32,shape=[None,H,W,C])

        #with open(self.flags.pred_path,'w') as f:
        #    pass

        self._build(inputs,resize=False)
        counter = 0
        with tf.Session() as sess:
            self.sess = sess
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            for imgs,imgnames in self.DATA.test_generator():
                pred = sess.run(self.logit,feed_dict={inputs:imgs})
                np.save("%s/%d.npy"%(self.flags.pred_path,counter),{"pred":pred,"name":imgnames})
                counter+=len(imgs)
                if counter/B%10 ==0:
                    print_mem_time("%d images predicted"%counter)

    # train with placeholders 
Example 24
Project: kaggle-review   Author: daxiongshu   File: BaseModel.py    (license) View Source Project 6 votes vote down vote up
def predict_from_placeholder(self,activation=None):
        self._build()
        self._get_summary()
        if activation is not None:
            self.logit = self._activate(self.logit,activation)
        with open(self.flags.pred_path,'w') as f:
            pass
        count = 0
        with tf.Session() as sess:
            self.sess = sess
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            if self.flags.log_path and self.flags.visualize is not None:
                summary_writer = tf.summary.FileWriter(self.flags.log_path, sess.graph)
            for batch in self._batch_gen_test():
                x,_,epoch = batch
                if self.flags.log_path and self.flags.visualize is not None:
                    summary,pred = sess.run([self.summ_op,self.logit],feed_dict={self.inputs:x,self.is_training:0})
                    summary_writer.add_summary(summary, count)
                else:
                    pred = sess.run(self.logit,feed_dict={self.inputs:x,self.is_training:0})
                count+=1
                if count%self.flags.verbosity == 0:
                    print_mem_time("Epoch %d Batch %d "%(epoch,count))
                self.write_pred(pred) 
Example 25
Project: text_classification   Author: brightmart   File: p71_TextRCNN_model.py    (license) View Source Project 6 votes vote down vote up
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=10
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1#0.5
    textRNN=TextRCNN(num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length)) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1]) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([textRNN.loss_val,textRNN.accuracy,textRNN.predictions,textRNN.train_op],
                                        feed_dict={textRNN.input_x:input_x,textRNN.input_y:input_y,textRNN.dropout_keep_prob:dropout_keep_prob})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
#test() 
Example 26
Project: text_classification   Author: brightmart   File: p5_fastTextB_model.py    (license) View Source Project 6 votes vote down vote up
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=19
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1
    fastText=fastTextB(num_classes, learning_rate, batch_size, decay_steps, decay_rate,5,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length),dtype=np.int32) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1],dtype=np.int32) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([fastText.loss_val,fastText.accuracy,fastText.predictions,fastText.train_op],
                                        feed_dict={fastText.sentence:input_x,fastText.labels:input_y})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
#test() 
Example 27
Project: text_classification   Author: brightmart   File: p8_TextRNN_model.py    (license) View Source Project 6 votes vote down vote up
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=10
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1#0.5
    textRNN=TextRNN(num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length)) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1]) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([textRNN.loss_val,textRNN.accuracy,textRNN.predictions,textRNN.train_op],feed_dict={textRNN.input_x:input_x,textRNN.input_y:input_y,textRNN.dropout_keep_prob:dropout_keep_prob})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict) 
Example 28
Project: tfutils   Author: neuroailab   File: test_base.py    (MIT License) View Source Project 6 votes vote down vote up
def custom_train_loop(sess, train_targets, **loop_params):
        """Define Custom training loop.

        Args:
            sess (tf.Session): Current tensorflow session.
            train_targets (list): Description.
            **loop_params: Optional kwargs needed to perform custom train loop.

        Returns:
            dict: A dictionary containing train targets evaluated by the session.

        """
        train_results = sess.run(train_targets)
        for i, result in enumerate(train_results):
            print('Model {} has loss {}'.format(i, result['loss']))
        return train_results 
Example 29
Project: tfutils   Author: neuroailab   File: test_dbinterface.py    (MIT License) View Source Project 6 votes vote down vote up
def setUp(self):
        """Set up class before _each_ test method is executed.

        Creates a tensorflow session and instantiates a dbinterface.

        """
        self.setup_model()
        self.sess = tf.Session(
            config=tf.ConfigProto(
                allow_soft_placement=True,
                gpu_options=tf.GPUOptions(allow_growth=True),
                log_device_placement=self.params['log_device_placement'],
                inter_op_parallelism_threads=self.params['inter_op_parallelism_threads']))

        # TODO: Determine whether this should be called here or
        # in dbinterface.initialize()
        self.sess.run(tf.global_variables_initializer())

        self.dbinterface = base.DBInterface(sess=self.sess,
                                            params=self.params,
                                            cache_dir=self.CACHE_DIR,
                                            save_params=self.save_params,
                                            load_params=self.load_params)

        self.step = 0 
Example 30
Project: tfutils   Author: neuroailab   File: test.py    (MIT License) View Source Project 6 votes vote down vote up
def custom_train_loop(sess, train_targets, **loop_params):
    """Define Custom training loop.

    Args:
        sess (tf.Session): Current tensorflow session.
        train_targets (list): Description.
        **loop_params: Optional kwargs needed to perform custom train loop.

    Returns:
        dict: A dictionary containing train targets evaluated by the session.

    """
    print('Calling custom training loop...')
    train_results = sess.run(train_targets)
    for i, result in enumerate(train_results):
        print('Model {} has loss {}'.format(i, result['loss']))
    return train_results 
Example 31
Project: tfutils   Author: neuroailab   File: test_data_tfrecords.py    (MIT License) View Source Project 6 votes vote down vote up
def test_ops():
    """Tests the basic init_ops funcions.
    """
    dp = d.TFRecordsParallelByFileProvider(source_paths,
                                           trans_dicts=trans_dicts,
                                           n_threads=4,
                                           batch_size=20,
                                           shuffle=False)
    sess = tf.Session()
    tf.train.start_queue_runners(sess=sess)

    N = 1000
    for i in range(N):
        res = sess.run([[fq.dequeue() for fq in fqs] for fqs in dp.file_queues])
        x, y = res[0]
        print('%d of %d' % (i, N))
        assert x.split('/')[-1] == y.split('/')[-1] 
Example 32
Project: tfutils   Author: neuroailab   File: data.py    (MIT License) View Source Project 6 votes vote down vote up
def get_input_op(self, fq, *args, **kwargs):
        """
        This is the main method that returns a tensorflow data reading operation.

        This method will get called n_threads * n_attrs times in the method init_ops (see above).
        Specifically, it is called once for each thread id and each attribute group.

        The arguments are:
             fq:  filename queue object.  When run in a tf session, this object will act
                  as a queue of filenames.  When fq.dequeue() is called in a tf.Session, it
                  will produce the next filename to begin reading from.   Note: it only makes
                  sense to dequeue from fq if the current file being read has been completed.
             *args: any position arguments to the reader.  these are specified on a
                  per-attribute-group basis (eg. across thread ids, calls for the same attribute
                  group will get the same args).
             *kwargs: any keyward arguments to the reader.  like for *args, these are specified
                  on a per-attribute-group basis.

        As an example of this method, see the TFRecordParallelByFileProvider.get_input_ops.
        """
        raise NotImplementedError() 
Example 33
Project: DmsMsgRcg   Author: bshao001   File: convertmodel.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def s1_predict(config_file, model_dir, model_file, predict_file_list, out_dir):
    """
    This function serves as a test/validation tool during the model development. It is not used as
    a final product in part of the pipeline.
    """
    with open(config_file) as config_buffer:
        config = json.loads(config_buffer.read())

    with tf.Graph().as_default() as graph:
        converted_model = ConvertedModel(config, graph, 's1_keras', model_dir, model_file)

    with tf.Session(graph=graph) as sess:
        for img_file in predict_file_list:
            image = cv2.imread(img_file)
            boxes = converted_model.predict(sess, image)
            image = draw_boxes(image, boxes)

            _, filename = os.path.split(img_file)
            cv2.imwrite(os.path.join(out_dir, filename), image) 
Example 34
Project: neural-fonts   Author: periannath   File: train.py    (license) View Source Project 6 votes vote down vote up
def main(_):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        model = UNet(args.experiment_dir, batch_size=args.batch_size, experiment_id=args.experiment_id,
                     input_width=args.image_size, output_width=args.image_size, embedding_num=args.embedding_num,
                     embedding_dim=args.embedding_dim, L1_penalty=args.L1_penalty, Lconst_penalty=args.Lconst_penalty,
                     Ltv_penalty=args.Ltv_penalty, Lcategory_penalty=args.Lcategory_penalty)
        model.register_session(sess)
        if args.flip_labels:
            model.build_model(is_training=True, inst_norm=args.inst_norm, no_target_source=True)
        else:
            model.build_model(is_training=True, inst_norm=args.inst_norm)
        fine_tune_list = None
        if args.fine_tune:
            ids = args.fine_tune.split(",")
            fine_tune_list = set([int(i) for i in ids])
        model.train(lr=args.lr, epoch=args.epoch, resume=args.resume,
                    schedule=args.schedule, freeze_encoder=args.freeze_encoder, fine_tune=fine_tune_list,
                    sample_steps=args.sample_steps, checkpoint_steps=args.checkpoint_steps,
                    flip_labels=args.flip_labels, no_val=args.no_val) 
Example 35
Project: tf-image-interpreter   Author: ThoughtWorksInc   File: test_feature.py    (MIT License) View Source Project 6 votes vote down vote up
def test_vgg():
  vgg = Vgg16()
  image_tensor = tf.placeholder(tf.float32)
  with tf.Session() as sess:
    vgg.build(image_tensor)
    init = tf.initialize_all_variables()
    sess.run(init)

    load_feature_layer_params('/Users/dtong/code/data/tf-image-interpreter/pretrain/vgg16_weights.npz', sess)

    for v in tf.get_collection(tf.GraphKeys.VARIABLES):
      print_op = tf.Print(v, [v], message=v.name, first_n=10)
      sess.run(print_op)

    roidb = RoiDb('val.txt', 2007)
    batch_gen = BatchGenerator(roidb)

    for i in range(10):
      image, scale, bboxes = batch_gen.next_batch()

      print(sess.run(vgg.conv5_3, feed_dict={image_tensor: image})) 
Example 36
Project: tf-image-interpreter   Author: ThoughtWorksInc   File: test_minibatch.py    (MIT License) View Source Project 6 votes vote down vote up
def main():
  roidb = RoiDb('val.txt', 2007)
  batch_gen = BatchGenerator(roidb)

  image_tensor = tf.placeholder(dtype=tf.float32)
  scale_tensor = tf.placeholder(dtype=tf.float32)
  bboxes_tensor = tf.placeholder(dtype=tf.float32)
  p_op = tf.Print(image_tensor, [tf.shape(image_tensor), scale_tensor, bboxes_tensor])

  sess = tf.Session()
  init = tf.initialize_all_variables()
  sess.run(init)

  coord = tf.train.Coordinator()
  queue_threads = queue_runner.start_queue_runners(sess, coord=coord)

  for i in range(10):
    if coord.should_stop():
      break
    image, scale, bboxes = batch_gen.next_batch()

    sess.run([p_op], feed_dict={image_tensor: image, scale_tensor: scale, bboxes_tensor:bboxes})

  coord.request_stop()
  coord.join(queue_threads) 
Example 37
Project: tf-image-interpreter   Author: ThoughtWorksInc   File: test_rpn.py    (MIT License) View Source Project 6 votes vote down vote up
def test_rpn():
  vgg = Vgg16()
  rpn = RpnNet()
  image_tensor = tf.placeholder(tf.float32)
  with tf.Session() as sess:
    vgg.build(image_tensor)
    rpn.build(vgg.conv5_3, None)
    init = tf.initialize_all_variables()
    sess.run(init)

    load_feature_layer_params('/Users/dtong/code/data/tf-image-interpreter/pretrain/vgg16_weights.npz', sess)

    roidb = RoiDb('val.txt', 2007)
    batch_gen = BatchGenerator(roidb)

    for i in range(10):
      image, scale, bboxes = batch_gen.next_batch()
      feature_shape = tf.shape(rpn.rpn_cls_score_reshape)
      print_feat_shape = tf.Print(feature_shape, [feature_shape], summarize=5)
      sess.run(print_feat_shape, feed_dict={image_tensor: image})

      # print(sess.run(vgg.conv5_3, feed_dict={image_tensor: image})) 
Example 38
Project: speechless   Author: JuliusKunze   File: test_ctc_decoders.py    (MIT License) View Source Project 6 votes vote down vote up
def test(self):
        def decode_greedily(beam_search: bool, merge_repeated: bool):
            aa_ctc_blank_aa_logits = tf.constant(np.array([[[1.0, 0.0]], [[1.0, 0.0]], [[0.0, 1.0]],
                                                           [[1.0, 0.0]], [[1.0, 0.0]]], dtype=np.float32))
            sequence_length = tf.constant(np.array([5], dtype=np.int32))

            (decoded_list,), log_probabilities = \
                tf.nn.ctc_beam_search_decoder(inputs=aa_ctc_blank_aa_logits,
                                              sequence_length=sequence_length,
                                              merge_repeated=merge_repeated,
                                              beam_width=1) \
                    if beam_search else \
                    tf.nn.ctc_greedy_decoder(inputs=aa_ctc_blank_aa_logits,
                                             sequence_length=sequence_length,
                                             merge_repeated=merge_repeated)

            return list(tf.Session().run(tf.sparse_tensor_to_dense(decoded_list)[0]))

        self.assertEqual([0], decode_greedily(beam_search=True, merge_repeated=True))
        self.assertEqual([0, 0], decode_greedily(beam_search=True, merge_repeated=False))
        self.assertEqual([0, 0], decode_greedily(beam_search=False, merge_repeated=True))
        self.assertEqual([0, 0, 0, 0], decode_greedily(beam_search=False, merge_repeated=False)) 
Example 39
Project: DeepPath   Author: xwhan   File: policy_agent.py    (license) View Source Project 6 votes vote down vote up
def retrain():
	print 'Start retraining'
	tf.reset_default_graph()
	policy_network = PolicyNetwork(scope = 'supervised_policy')

	f = open(relationPath)
	training_pairs = f.readlines()
	f.close()

	saver = tf.train.Saver()
	with tf.Session() as sess:
		saver.restore(sess, 'models/policy_supervised_' + relation)
		print "sl_policy restored"
		episodes = len(training_pairs)
		if episodes > 300:
			episodes = 300
		REINFORCE(training_pairs, policy_network, episodes)
		saver.save(sess, 'models/policy_retrained' + relation)
	print 'Retrained model saved' 
Example 40
Project: deep-summarization   Author: harpribot   File: sequenceNet.py    (MIT License) View Source Project 5 votes vote down vote up
def begin_session(self):
        """
        Begins the session

        :return: None
        """
        # start the tensorflow session
        ops.reset_default_graph()
        # initialize interactive session
        self.sess = tf.Session() 
Example 41
Project: almond-nnparser   Author: Stanford-Mobisocial-IoT-Lab   File: run_test.py    (license) View Source Project 5 votes vote down vote up
def run():
    if len(sys.argv) < 3:
        print("** Usage: python3 " + sys.argv[0] + " <<Model Directory>> <<Test Set>>")
        sys.exit(1)

    np.random.seed(42)
    model_dir = sys.argv[1]
    config = Config.load(['./default.conf', os.path.join(model_dir, 'model.conf')])
    model = create_model(config)
    test_data = load_data(sys.argv[2], config.dictionary, config.grammar, config.max_length)
    print("unknown", unknown_tokens)

    with tf.Graph().as_default():
        tf.set_random_seed(1234)
        with tf.device('/cpu:0'):
            model.build()
        
            test_eval = Seq2SeqEvaluator(model, config.grammar, test_data, 'test', config.reverse_dictionary, beam_size=config.beam_size, batch_size=config.batch_size)
            loader = tf.train.Saver()

            with tf.Session() as sess:
                loader.restore(sess, os.path.join(model_dir, 'best'))
                
                #sess = tf_debug.LocalCLIDebugWrapperSession(sess)
                #sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)

                test_eval.eval(sess, save_to_file=True) 
Example 42
Project: almond-nnparser   Author: Stanford-Mobisocial-IoT-Lab   File: eval_output_embeddings.py    (license) View Source Project 5 votes vote down vote up
def run():
    if len(sys.argv) < 4:
        print("** Usage: python3 " + sys.argv[0] + " <<Model Directory>> <<Everything Set>> <<Test Set>>")
        sys.exit(1)

    np.random.seed(42)
    model_dir = sys.argv[1]
    config = Config.load(['./default.conf', os.path.join(model_dir, 'model.conf')])
    model = create_model(config)

    everything_labels, everything_label_lengths = load_programs(config, sys.argv[2])
    test_labels, test_label_lengths = load_programs(config, sys.argv[3])
    #test_labels, test_label_lengths = sample(config.grammar, test_labels, test_label_lengths)
    print("unknown", unknown_tokens)

    with tf.Graph().as_default():
        tf.set_random_seed(1234)
        model.build()
        loader = tf.train.Saver()

        train_bag_of_tokens = bag_of_tokens(config, everything_labels, everything_label_lengths)
        V, mean = pca_fit(train_bag_of_tokens, n_components=2)

        eval_bag_of_tokens = bag_of_tokens(config, test_labels, test_label_lengths)
        transformed = pca_transform(eval_bag_of_tokens, V, mean)

        with tf.Session() as sess:
            loader.restore(sess, os.path.join(model_dir, 'best'))
            transformed = transformed.eval(session=sess)
        
        programs = reconstruct_programs(test_labels, test_label_lengths, config.grammar.tokens)
        show_pca(transformed, programs) 
Example 43
Project: AVSR-Deep-Speech   Author: pandeydivesh15   File: exported_model.py    (GNU General Public License v2.0) View Source Project 5 votes vote down vote up
def __init__(self, export_dir, model_name, use_spell_check=False, use_visual_features=False):
		'''
		Args:
			export_dir(type = str):	Path to directory where trained model 
									has been exported (with trailing slash).
			model_name(type = str):	Name of the model exported.
		'''
		self.export_dir = export_dir
		self.session = tf.Session()
		self.name = model_name
		self.use_spell_check = use_spell_check

		self.use_visual_features = use_visual_features 
Example 44
Project: visual-search   Author: GYXie   File: visual_search.py    (MIT License) View Source Project 5 votes vote down vote up
def extract_feature(imgs):
    x, fc6 = initModel()
    # init = tf.initialize_all_variables()
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    return sess.run(fc6, feed_dict={x: imgs}) 
Example 45
Project: human-rl   Author: gsastry   File: classifier_tf.py    (MIT License) View Source Project 5 votes vote down vote up
def __init__(self, checkpoint_file):

        checkpoint_dir = os.path.dirname(checkpoint_file)
        hparams_file = os.path.join(checkpoint_dir, "hparams.txt")
        hparams_dict = {}
        if os.path.isfile(hparams_file):
            with open(hparams_file) as f:
                hparams_dict = ast.literal_eval(f.read())
        self.hparams = TensorflowClassifierHparams(**hparams_dict)
        self.graph = tf.Graph()
        with self.graph.as_default():
            print("loading from file {}".format(checkpoint_file))
            config = tf.ConfigProto(
                device_count={'GPU': 0}, )
            config.gpu_options.visible_device_list = ""
            self.session = tf.Session(config=config)
            new_saver = tf.train.import_meta_graph(checkpoint_file + ".meta", clear_devices=True)
            new_saver.restore(self.session, checkpoint_file)

            self.features = {}

            if self.hparams.use_image:
                self.features["image"] = self.graph.get_tensor_by_name("image:0")
            if self.hparams.use_observation:
                self.features["observation"] = self.graph.get_tensor_by_name("observation:0")
            if self.hparams.use_action:
                self.features["action"] = self.graph.get_tensor_by_name("action:0")
            self.prediction = tf.get_collection('prediction')[0]
            self.loss = tf.get_collection('loss')[0]
            self.threshold = tf.get_collection('threshold')[0] 
Example 46
Project: human-rl   Author: gsastry   File: classifier_tf.py    (MIT License) View Source Project 5 votes vote down vote up
def __init__(self, checkpoint_file):

        checkpoint_dir = os.path.dirname(checkpoint_file)
        hparams_file = os.path.join(checkpoint_dir, "hparams.txt")
        hparams_dict = {}
        if os.path.isfile(hparams_file):
            with open(hparams_file) as f:
                hparams_dict = ast.literal_eval(f.read())
        self.hparams = TensorflowClassifierHparams(**hparams_dict)
        self.graph = tf.Graph()
        with self.graph.as_default():
            print("loading from file {}".format(checkpoint_file))
            config = tf.ConfigProto(
                device_count={'GPU': 0}, )
            config.gpu_options.visible_device_list = ""
            self.session = tf.Session(config=config)
            new_saver = tf.train.import_meta_graph(checkpoint_file + ".meta", clear_devices=True)
            new_saver.restore(self.session, checkpoint_file)

            self.features = {}

            if self.hparams.use_image:
                self.features["image"] = self.graph.get_tensor_by_name("image:0")
            if self.hparams.use_observation:
                self.features["observation"] = self.graph.get_tensor_by_name("observation:0")
            if self.hparams.use_action:
                self.features["action"] = self.graph.get_tensor_by_name("action:0")
            self.prediction = tf.get_collection('prediction')[0]
            self.loss = tf.get_collection('loss')[0]
            self.threshold = tf.get_collection('threshold')[0] 
Example 47
Project: human-rl   Author: gsastry   File: classifier_tf.py    (MIT License) View Source Project 5 votes vote down vote up
def __init__(self, checkpoint_file):

        checkpoint_dir = os.path.dirname(checkpoint_file)
        hparams_file = os.path.join(checkpoint_dir, "hparams.txt")
        hparams_dict = {}
        if os.path.isfile(hparams_file):
            with open(hparams_file) as f:
                hparams_dict = ast.literal_eval(f.read())
        self.hparams = TensorflowClassifierHparams(**hparams_dict)
        self.graph = tf.Graph()
        with self.graph.as_default():
            print("loading from file {}".format(checkpoint_file))
            config = tf.ConfigProto(
                device_count={'GPU': 0}, )
            config.gpu_options.visible_device_list = ""
            self.session = tf.Session(config=config)
            new_saver = tf.train.import_meta_graph(checkpoint_file + ".meta", clear_devices=True)
            new_saver.restore(self.session, checkpoint_file)

            self.features = {}

            if self.hparams.use_image:
                self.features["image"] = self.graph.get_tensor_by_name("image:0")
            if self.hparams.use_observation:
                self.features["observation"] = self.graph.get_tensor_by_name("observation:0")
            if self.hparams.use_action:
                self.features["action"] = self.graph.get_tensor_by_name("action:0")
            self.prediction = tf.get_collection('prediction')[0]
            self.loss = tf.get_collection('loss')[0]
            self.threshold = tf.get_collection('threshold')[0] 
Example 48
Project: human-rl   Author: gsastry   File: classifier_tf.py    (MIT License) View Source Project 5 votes vote down vote up
def __init__(self, checkpoint_file):

        checkpoint_dir = os.path.dirname(checkpoint_file)
        hparams_file = os.path.join(checkpoint_dir, "hparams.txt")
        hparams_dict = {}
        if os.path.isfile(hparams_file):
            with open(hparams_file) as f:
                hparams_dict = ast.literal_eval(f.read())
        self.hparams = TensorflowClassifierHparams(**hparams_dict)
        self.graph = tf.Graph()
        with self.graph.as_default():
            print("loading from file {}".format(checkpoint_file))
            config = tf.ConfigProto(
                device_count={'GPU': 0}, )
            config.gpu_options.visible_device_list = ""
            self.session = tf.Session(config=config)
            new_saver = tf.train.import_meta_graph(checkpoint_file + ".meta", clear_devices=True)
            new_saver.restore(self.session, checkpoint_file)

            self.features = {}

            if self.hparams.use_image:
                self.features["image"] = self.graph.get_tensor_by_name("image:0")
            if self.hparams.use_observation:
                self.features["observation"] = self.graph.get_tensor_by_name("observation:0")
            if self.hparams.use_action:
                self.features["action"] = self.graph.get_tensor_by_name("action:0")
            self.prediction = tf.get_collection('prediction')[0]
            self.loss = tf.get_collection('loss')[0]
            self.threshold = tf.get_collection('threshold')[0] 
Example 49
Project: human-rl   Author: gsastry   File: classifier_tf.py    (MIT License) View Source Project 5 votes vote down vote up
def __init__(self, checkpoint_file):

        checkpoint_dir = os.path.dirname(checkpoint_file)
        hparams_file = os.path.join(checkpoint_dir, "hparams.txt")
        hparams_dict = {}
        if os.path.isfile(hparams_file):
            with open(hparams_file) as f:
                hparams_dict = ast.literal_eval(f.read())
        self.hparams = TensorflowClassifierHparams(**hparams_dict)
        self.graph = tf.Graph()
        with self.graph.as_default():
            print("loading from file {}".format(checkpoint_file))
            config = tf.ConfigProto(
                device_count={'GPU': 0}, )
            config.gpu_options.visible_device_list = ""
            self.session = tf.Session(config=config)
            new_saver = tf.train.import_meta_graph(checkpoint_file + ".meta", clear_devices=True)
            new_saver.restore(self.session, checkpoint_file)

            self.features = {}

            if self.hparams.use_image:
                self.features["image"] = self.graph.get_tensor_by_name("image:0")
            if self.hparams.use_observation:
                self.features["observation"] = self.graph.get_tensor_by_name("observation:0")
            if self.hparams.use_action:
                self.features["action"] = self.graph.get_tensor_by_name("action:0")
            self.prediction = tf.get_collection('prediction')[0]
            self.loss = tf.get_collection('loss')[0]
            self.threshold = tf.get_collection('threshold')[0] 
Example 50
Project: distributional_perspective_on_RL   Author: Kiwoo   File: tf_util.py    (license) View Source Project 5 votes vote down vote up
def single_threaded_session():
    tf_config = tf.ConfigProto(
        inter_op_parallelism_threads=1,
        intra_op_parallelism_threads=1)
    return tf.Session(config=tf_config)