Python tensorflow.initialize_local_variables() Examples

The following are 20 code examples of tensorflow.initialize_local_variables(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: language_model_test.py    From lm with MIT License 6 votes vote down vote up
def test_lm(self):
        hps = get_test_hparams()

        with tf.variable_scope("model"):
            model = LM(hps)

        with self.test_session() as sess:
            tf.initialize_all_variables().run()
            tf.initialize_local_variables().run()

            loss = 1e5
            for i in range(50):
                x, y, w = simple_data_generator(hps.batch_size, hps.num_steps)
                loss, _ = sess.run([model.loss, model.train_op], {model.x: x, model.y: y, model.w: w})
                print("%d: %.3f %.3f" % (i, loss, np.exp(loss)))
                if np.isnan(loss):
                    print("NaN detected")
                    break

            self.assertLess(loss, 1.0) 
Example #2
Source File: language_model_test.py    From f-lm with MIT License 6 votes vote down vote up
def test_lm(self):
        hps = get_test_hparams()

        with tf.variable_scope("model"):
            model = LM(hps)

        with self.test_session() as sess:
            tf.initialize_all_variables().run()
            tf.initialize_local_variables().run()

            loss = 1e5
            for i in range(50):
                x, y, w = simple_data_generator(hps.batch_size, hps.num_steps)
                loss, _ = sess.run([model.loss, model.train_op], {model.x: x, model.y: y, model.w: w})
                print("%d: %.3f %.3f" % (i, loss, np.exp(loss)))
                if np.isnan(loss):
                    print("NaN detected")
                    break

            self.assertLess(loss, 1.0) 
Example #3
Source File: test_write_read_variable.py    From deep-koalarization with MIT License 5 votes vote down vote up
def test_variable_size_record(self):
        # WRITING
        with VariableSizeTypesRecordWriter("variable.tfrecord", DIR_TFRECORDS) as writer:
            for i in range(2):
                writer.write_test()

        # READING
        reader = VariableSizeTypesRecordReader("variable.tfrecord", DIR_TFRECORDS)
        read_one_example = reader.read_operation

        with tf.Session() as sess:
            sess.run(
                [tf.global_variables_initializer(), tf.initialize_local_variables()]
            )

            # Coordinate the queue of tfrecord files.
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            # Reading examples sequentially one by one
            for j in range(3):
                fetches = sess.run(read_one_example)
                print("Read:", fetches)

            # Finish off the queue coordinator.
            coord.request_stop()
            coord.join(threads) 
Example #4
Source File: print_clusterid_from_tfrecords.py    From ConvNetQuake with MIT License 5 votes vote down vote up
def main(_):


    cfg = config.Config()
    cfg.batch_size = 1
    cfg.n_epochs = 1


    data_pipeline = dpp.DataPipeline(FLAGS.data_path,
                                     config=cfg,
                                     is_training=False)
    samples = data_pipeline.samples
    labels = data_pipeline.labels
    start_time = data_pipeline.start_time
    end_time = data_pipeline.end_time

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        tf.initialize_local_variables().run()
        threads = tf.train.start_queue_runners(coord=coord)

        try:
            for i in (range(FLAGS.windows)):
                to_fetch= [samples, labels, start_time, end_time]
                sample, label, starttime, endtime = sess.run(to_fetch)
                # assert starttime < endtime
                print('starttime {}, endtime {}'.format(UTCDateTime(starttime),
                                                        UTCDateTime(endtime)))
                print("label", label[0])
                sample = np.squeeze(sample, axis=(0,))
                target = np.squeeze(label, axis=(0,))
        except tf.errors.OutOfRangeError:
            print 'Evaluation completed ({} epochs).'.format(cfg.n_epochs)

        print "{} windows seen".format(i+1)
        coord.request_stop()
        coord.join(threads) 
Example #5
Source File: kpn_data_provider.py    From burst-denoising with Apache License 2.0 5 votes vote down vote up
def load_tfrecord(filename):
  g = tf.Graph()
  with g.as_default():
    tf.logging.set_verbosity(tf.logging.INFO)

    mosaic, demosaic_truth, readvar, shotfactor = read_and_decode_single(filename)
    init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())
    with tf.Session() as sess:
      sess.run(init_op)
      mosaic, demosaic_truth, readvar, shotfactor = \
        sess.run([mosaic, demosaic_truth, readvar, shotfactor])

      return mosaic, demosaic_truth, readvar, shotfactor 
Example #6
Source File: LSTM_eval.py    From AssociativeRetrieval with Apache License 2.0 5 votes vote down vote up
def train(config):
  with tf.Graph().as_default():
    model = LSTM_model(config)
    inputs_seqs_batch, outputs_batch = model.reader.read(shuffle=False, num_epochs=1)
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())

    sess = tf.Session()
    sess.run(init_op)
    saver = tf.train.Saver(tf.all_variables())
    global_steps = 0

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    saver.restore(sess, "./save/LSTM/save-60000")

    correct_count = 0
    evaled_count = 0
    try:
      while not coord.should_stop():
        input_data, targets = sess.run([inputs_seqs_batch, outputs_batch])
        probs = sess.run([model.probs], {model.input_data: input_data,
                                                          model.targets: targets})
        probs = np.array(probs).reshape([-1, config.vocab_size])
        targets = np.array([t[0] for t in targets])
        output = np.argmax(probs, axis=1)

        correct_count += np.sum(output == targets)
        evaled_count += len(output)

    except tf.errors.OutOfRangeError:
        pass
    finally:
      # When done, ask the threads to stop.
      coord.request_stop()
    print("Accuracy: %f" % (float(correct_count) / evaled_count))
    coord.join(threads)
    sess.close() 
Example #7
Source File: FW_eval.py    From AssociativeRetrieval with Apache License 2.0 5 votes vote down vote up
def train(config):
  with tf.Graph().as_default():
    model = FW_model(config)
    inputs_seqs_batch, outputs_batch = model.reader.read(shuffle=False, num_epochs=1)
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())

    sess = tf.Session()
    sess.run(init_op)
    saver = tf.train.Saver(tf.all_variables())
    global_steps = 0

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    saver.restore(sess, "./save/FW/save-60000")

    correct_count = 0
    evaled_count = 0
    try:
      while not coord.should_stop():
        input_data, targets = sess.run([inputs_seqs_batch, outputs_batch])
        probs = sess.run([model.probs], {model.input_data: input_data,
                                                          model.targets: targets})
        probs = np.array(probs).reshape([-1, config.vocab_size])
        targets = np.array([t[0] for t in targets])
        output = np.argmax(probs, axis=1)

        correct_count += np.sum(output == targets)
        evaled_count += len(output)

    except tf.errors.OutOfRangeError:
        pass
    finally:
      # When done, ask the threads to stop.
      coord.request_stop()
    print("Accuracy: %f" % (float(correct_count) / evaled_count))
    coord.join(threads)
    sess.close() 
Example #8
Source File: FW_model.py    From AssociativeRetrieval with Apache License 2.0 5 votes vote down vote up
def load_validation(self):
    data_reader = utils.DataReader(data_filename="input_seqs_validation", batch_size=16)
    inputs_seqs_batch, outputs_batch = data_reader.read(False, 1)
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())

    sess = tf.Session()
    sess.run(init_op)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    self.validation_inputs = []
    self.validation_targets = []
    try:
      while not coord.should_stop():
        input_data, targets = sess.run([inputs_seqs_batch, outputs_batch])
        self.validation_inputs.append(input_data)
        self.validation_targets.append(targets)
    except tf.errors.OutOfRangeError:
      pass
    finally:
      coord.request_stop()
    coord.join(threads)
    sess.close()

    self.validation_inputs = np.array(self.validation_inputs).reshape([-1, self.config.input_length])
    self.validation_targets = np.array(self.validation_targets).reshape([-1, 1]) 
Example #9
Source File: LSTM_model.py    From AssociativeRetrieval with Apache License 2.0 5 votes vote down vote up
def load_validation(self):
    data_reader = utils.DataReader(data_filename="input_seqs_validation", batch_size=16)
    inputs_seqs_batch, outputs_batch = data_reader.read(False, 1)
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())

    sess = tf.Session()
    sess.run(init_op)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    self.validation_inputs = []
    self.validation_targets = []
    try:
      while not coord.should_stop():
        input_data, targets = sess.run([inputs_seqs_batch, outputs_batch])
        self.validation_inputs.append(input_data)
        self.validation_targets.append(targets)
    except tf.errors.OutOfRangeError:
      pass
    finally:
      coord.request_stop()
    coord.join(threads)
    sess.close()

    self.validation_inputs = np.array(self.validation_inputs).reshape([-1, self.config.input_length])
    self.validation_targets = np.array(self.validation_targets).reshape([-1, 1]) 
Example #10
Source File: FW_train.py    From AssociativeRetrieval with Apache License 2.0 5 votes vote down vote up
def train(config):
  with tf.Graph().as_default():
    model = FW_model(config)
    inputs_seqs_batch, outputs_batch = model.reader.read()
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())

    sess = tf.Session()
    sess.run(init_op)
    saver = tf.train.Saver(tf.all_variables())
    global_steps = 0

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    train_writer = tf.train.SummaryWriter("./log/FW/train", sess.graph)
    validation_writer = tf.train.SummaryWriter("./log/FW/validation", sess.graph)
    try:
      while not coord.should_stop():
        input_data, targets = sess.run([inputs_seqs_batch, outputs_batch])
        cost, _, summary= sess.run([model.cost, model.train_op, model.summary_all], {model.input_data: input_data,
                                                                                     model.targets: targets})
        print("Step %d: cost:%f" % (global_steps,  cost))
        train_writer.add_summary(summary, global_steps)

        global_steps += 1
        if global_steps % 1000 == 0:
          (accuracy, summary) = sess.run([model.accuracy, model.summary_accuracy], {model.input_data: model.validation_inputs,
                                                                                    model.targets: model.validation_targets})
          validation_writer.add_summary(summary, global_steps)
          print("Accuracy: %f" % accuracy)
          print(saver.save(sess, "./save/FW/save", global_step=global_steps))
        if global_steps > 60000:
          break
    except tf.errors.OutOfRangeError:
      print("Error")
    finally:
      # When done, ask the threads to stop.
      coord.request_stop()
    coord.join(threads)
    sess.close() 
Example #11
Source File: readtf.py    From udacity-driving-reader with Apache License 2.0 5 votes vote down vote up
def main():
    data_dir = '/output/combined'
    num_images = 1452601

    # Build graph and initialize variables
    read_op = create_read_graph(data_dir, 'combined')
    init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())
    sess = tf.Session()
    sess.run(init_op)

    # Start input enqueue threads
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    read_count = 0
    try:
        while read_count < num_images and not coord.should_stop():
            images, timestamps, angles, _ = sess.run(read_op)
            for i in range(images.shape[0]):
                decoded_image = images[i]
                assert decoded_image.shape[2] == 3
                print(angles[i])
                read_count += 1
            if not read_count % 1000:
                print("Read %d examples" % read_count)

    except tf.errors.OutOfRangeError:
        print("Reading stopped by Queue")
    finally:
        # Ask the threads to stop.
        coord.request_stop()

    print("Done reading %d images" % read_count)

    # Wait for threads to finish.
    coord.join(threads)
    sess.close() 
Example #12
Source File: main.py    From gan-image-similarity with GNU General Public License v3.0 5 votes vote down vote up
def export_intermediate(FLAGS, sess, dataset):
    # Models
    x = tf.placeholder(tf.float32, shape=[
        None, IMAGE_SIZE['resized'][0], IMAGE_SIZE['resized'][1], 3])
    dropout = tf.placeholder(tf.float32)
    feat_model = discriminator(x, reuse=False, dropout=dropout, int_feats=True)

    # Init
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())
    sess.run(init_op)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    # Restore
    saver = tf.train.Saver()
    checkpoint = tf.train.latest_checkpoint(FLAGS.logdir)
    saver.restore(sess, checkpoint)

    # Run
    all_features = np.zeros((dataset['size'], feat_model.get_shape()[1]))
    all_paths = []
    for i in itertools.count():
        try:
            images, paths = sess.run(dataset['batch'])
        except tf.errors.OutOfRangeError:
            break
        if i % 10 == 0:
            print(i * FLAGS.batch_size, dataset['size'])
        im_features = sess.run(feat_model, feed_dict={x: images, dropout: 1, })
        all_features[FLAGS.batch_size * i:FLAGS.batch_size * i + im_features.shape[0]] = im_features
        all_paths += list(paths)

    # Finish off the filename queue coordinator.
    coord.request_stop()
    coord.join(threads)

    return all_features, all_paths 
Example #13
Source File: t2t_prune.py    From training_results_v0.5 with Apache License 2.0 4 votes vote down vote up
def main(argv):
  tf.logging.set_verbosity(tf.logging.INFO)
  trainer_lib.set_random_seed(FLAGS.random_seed)
  usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
  t2t_trainer.maybe_log_registry_and_exit()


  if FLAGS.generate_data:
    t2t_trainer.generate_data()

  if argv:
    t2t_trainer.set_hparams_from_args(argv[1:])
  hparams = t2t_trainer.create_hparams()
  trainer_lib.add_problem_hparams(hparams, FLAGS.problem)
  pruning_params = create_pruning_params()
  pruning_strategy = create_pruning_strategy(pruning_params.strategy)

  config = t2t_trainer.create_run_config(hparams)
  params = {"batch_size": hparams.batch_size}

  # add "_rev" as a hack to avoid image standardization
  problem = registry.problem(FLAGS.problem)
  input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.EVAL,
                                             hparams)
  dataset = input_fn(params, config).repeat()
  features, labels = dataset.make_one_shot_iterator().get_next()

  sess = tf.Session()

  model_fn = t2t_model.T2TModel.make_estimator_model_fn(
      FLAGS.model, hparams, use_tpu=FLAGS.use_tpu)
  spec = model_fn(
      features,
      labels,
      tf.estimator.ModeKeys.EVAL,
      params=hparams,
      config=config)

  # Restore weights
  saver = tf.train.Saver()
  checkpoint_path = os.path.expanduser(FLAGS.output_dir or
                                       FLAGS.checkpoint_path)
  saver.restore(sess, tf.train.latest_checkpoint(checkpoint_path))

  def eval_model():
    preds = spec.predictions["predictions"]
    preds = tf.argmax(preds, -1, output_type=labels.dtype)
    _, acc_update_op = tf.metrics.accuracy(labels=labels, predictions=preds)
    sess.run(tf.initialize_local_variables())
    for _ in range(FLAGS.eval_steps):
      acc = sess.run(acc_update_op)
    return acc

  pruning_utils.sparsify(sess, eval_model, pruning_strategy, pruning_params) 
Example #14
Source File: t2t_prune.py    From training_results_v0.5 with Apache License 2.0 4 votes vote down vote up
def main(argv):
  tf.logging.set_verbosity(tf.logging.INFO)
  trainer_lib.set_random_seed(FLAGS.random_seed)
  usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
  t2t_trainer.maybe_log_registry_and_exit()


  if FLAGS.generate_data:
    t2t_trainer.generate_data()

  if argv:
    t2t_trainer.set_hparams_from_args(argv[1:])
  hparams = t2t_trainer.create_hparams()
  trainer_lib.add_problem_hparams(hparams, FLAGS.problem)
  pruning_params = create_pruning_params()
  pruning_strategy = create_pruning_strategy(pruning_params.strategy)

  config = t2t_trainer.create_run_config(hparams)
  params = {"batch_size": hparams.batch_size}

  # add "_rev" as a hack to avoid image standardization
  problem = registry.problem(FLAGS.problem)
  input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.EVAL,
                                             hparams)
  dataset = input_fn(params, config).repeat()
  features, labels = dataset.make_one_shot_iterator().get_next()

  sess = tf.Session()

  model_fn = t2t_model.T2TModel.make_estimator_model_fn(
      FLAGS.model, hparams, use_tpu=FLAGS.use_tpu)
  spec = model_fn(
      features,
      labels,
      tf.estimator.ModeKeys.EVAL,
      params=hparams,
      config=config)

  # Restore weights
  saver = tf.train.Saver()
  checkpoint_path = os.path.expanduser(FLAGS.output_dir or
                                       FLAGS.checkpoint_path)
  saver.restore(sess, tf.train.latest_checkpoint(checkpoint_path))

  def eval_model():
    preds = spec.predictions["predictions"]
    preds = tf.argmax(preds, -1, output_type=labels.dtype)
    _, acc_update_op = tf.metrics.accuracy(labels=labels, predictions=preds)
    sess.run(tf.initialize_local_variables())
    for _ in range(FLAGS.eval_steps):
      acc = sess.run(acc_update_op)
    return acc

  pruning_utils.sparsify(sess, eval_model, pruning_strategy, pruning_params) 
Example #15
Source File: t2t_prune.py    From BERT with Apache License 2.0 4 votes vote down vote up
def main(argv):
  tf.logging.set_verbosity(tf.logging.INFO)
  trainer_lib.set_random_seed(FLAGS.random_seed)
  usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
  t2t_trainer.maybe_log_registry_and_exit()


  if FLAGS.generate_data:
    t2t_trainer.generate_data()

  if argv:
    t2t_trainer.set_hparams_from_args(argv[1:])
  hparams = t2t_trainer.create_hparams()
  trainer_lib.add_problem_hparams(hparams, FLAGS.problem)
  pruning_params = create_pruning_params()
  pruning_strategy = create_pruning_strategy(pruning_params.strategy)

  config = t2t_trainer.create_run_config(hparams)
  params = {"batch_size": hparams.batch_size}

  # add "_rev" as a hack to avoid image standardization
  problem = registry.problem(FLAGS.problem)
  input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.EVAL,
                                             hparams)
  dataset = input_fn(params, config).repeat()
  features, labels = dataset.make_one_shot_iterator().get_next()

  sess = tf.Session()

  model_fn = t2t_model.T2TModel.make_estimator_model_fn(
      FLAGS.model, hparams, use_tpu=FLAGS.use_tpu)
  spec = model_fn(
      features,
      labels,
      tf.estimator.ModeKeys.EVAL,
      params=hparams,
      config=config)

  # Restore weights
  saver = tf.train.Saver()
  checkpoint_path = os.path.expanduser(FLAGS.output_dir or
                                       FLAGS.checkpoint_path)
  saver.restore(sess, tf.train.latest_checkpoint(checkpoint_path))

  def eval_model():
    preds = spec.predictions["predictions"]
    preds = tf.argmax(preds, -1, output_type=labels.dtype)
    _, acc_update_op = tf.metrics.accuracy(labels=labels, predictions=preds)
    sess.run(tf.initialize_local_variables())
    for _ in range(FLAGS.eval_steps):
      acc = sess.run(acc_update_op)
    return acc

  pruning_utils.sparsify(sess, eval_model, pruning_strategy, pruning_params) 
Example #16
Source File: main.py    From gan-image-similarity with GNU General Public License v3.0 4 votes vote down vote up
def similarity(FLAGS, sess, all_features, all_paths):
    def select_images(distances):
        indices = np.argsort(distances)
        images = []
        size = 40
        for i in range(size):
            images += [dict(path=all_paths[indices[i]],
                            index=indices[i],
                            distance=distances[indices[i]])]
        return images

    # Distance
    x1 = tf.placeholder(tf.float32, shape=[None, all_features.shape[1]])
    x2 = tf.placeholder(tf.float32, shape=[None, all_features.shape[1]])
    l2diff = tf.sqrt(tf.reduce_sum(tf.square(tf.sub(x1, x2)), reduction_indices=1))

    # Init
    init_op = tf.group(tf.initialize_all_variables(),
                       tf.initialize_local_variables())
    sess.run(init_op)

    #
    clip = 1e-3
    np.clip(all_features, -clip, clip, all_features)

    # Get distances
    result = []
    bs = 100
    needles = [randint(0, all_features.shape[0]) for x in range(10)]
    for needle in needles:
        item_block = np.reshape(np.tile(all_features[needle], bs), [bs, -1])
        distances = np.zeros(all_features.shape[0])
        for i in range(0, all_features.shape[0], bs):
            if i + bs > all_features.shape[0]:
                bs = all_features.shape[0] - i
            distances[i:i + bs] = sess.run(
                l2diff, feed_dict={x1: item_block[:bs], x2: all_features[i:i + bs]})

        # Pick best matches
        result += [select_images(distances)]

    with open('logs/data.json', 'w') as f:
        json.dump(dict(data=result), f)
    return


########
# Main #
######## 
Example #17
Source File: pretrained.py    From SSD_tensorflow_VOC with Apache License 2.0 4 votes vote down vote up
def use_fined_model(self):
        image_size = inception.inception_v4.default_image_size
        batch_size = 3
        flowers_data_dir = "../../data/flower"
        train_dir = '/tmp/inception_finetuned/'
        
        with tf.Graph().as_default():
            tf.logging.set_verbosity(tf.logging.INFO)
            
            dataset = flowers.get_split('train', flowers_data_dir)
            images, images_raw, labels = self.load_batch(dataset, height=image_size, width=image_size)
            
            # Create the model, use the default arg scope to configure the batch norm parameters.
            with slim.arg_scope(inception.inception_v4_arg_scope()):
                logits, _ = inception.inception_v4(images, num_classes=dataset.num_classes, is_training=True)
        
            probabilities = tf.nn.softmax(logits)
            
            checkpoint_path = tf.train.latest_checkpoint(train_dir)
            init_fn = slim.assign_from_checkpoint_fn(
              checkpoint_path,
              slim.get_variables_to_restore())
            
            with tf.Session() as sess:
                with slim.queues.QueueRunners(sess):
                    sess.run(tf.initialize_local_variables())
                    init_fn(sess)
                    np_probabilities, np_images_raw, np_labels = sess.run([probabilities, images_raw, labels])
            
                    for i in range(batch_size): 
                        image = np_images_raw[i, :, :, :]
                        true_label = np_labels[i]
                        predicted_label = np.argmax(np_probabilities[i, :])
                        predicted_name = dataset.labels_to_names[predicted_label]
                        true_name = dataset.labels_to_names[true_label]
                        
                        plt.figure()
                        plt.imshow(image.astype(np.uint8))
                        plt.title('Ground Truth: [%s], Prediction [%s]' % (true_name, predicted_name))
                        plt.axis('off')
                        plt.show()
                return 
Example #18
Source File: run_utils.py    From f-lm with MIT License 4 votes vote down vote up
def run_eval(dataset, hps, logdir, mode, num_eval_steps):
    with tf.variable_scope("model"):
        hps.num_sampled = 0  # Always using full softmax at evaluation.
        hps.keep_prob = 1.0
        #model = LM(hps, "eval", "/cpu:0")
        model = LM(hps, "eval", "/gpu:0")

    if hps.average_params:
        print("Averaging parameters for evaluation.")
        saver = tf.train.Saver(model.avg_dict)
    else:
        saver = tf.train.Saver()

    # Use only 4 threads for the evaluation.
    #config = tf.ConfigProto(allow_soft_placement=True,
    #                        intra_op_parallelism_threads=20,
    #                        inter_op_parallelism_threads=1)
    config = tf.ConfigProto(allow_soft_placement=True)
    sess = tf.Session(config=config)
    sw = tf.summary.FileWriter(logdir + "/" + mode, sess.graph)
    ckpt_loader = CheckpointLoader(saver, model.global_step, logdir + "/train")

    with sess.as_default():
        while ckpt_loader.load_checkpoint():
            global_step = ckpt_loader.last_global_step
            data_iterator = dataset.iterate_once(hps.batch_size * hps.num_gpus, hps.num_steps)
            #tf.initialize_local_variables().run()
            tf.local_variables_initializer().run()
            loss_nom = 0.0
            loss_den = 0.0
            #for i, (x, y, w) in enumerate(data_iterator):
            for i, (x, y) in enumerate(data_iterator):
                if i >= num_eval_steps and mode!="eval_full":
                    break

                #loss = sess.run(model.loss, {model.x: x, model.y: y, model.w: w})
                loss = sess.run(model.loss, {model.x: x, model.y: y})
                loss_nom += loss
                loss_den += 1 # ???
                #loss_den += w.mean()
                loss = loss_nom / loss_den
                sys.stdout.write("%d: %.3f (%.3f) ... " % (i, loss, np.exp(loss)))
                sys.stdout.flush()
            sys.stdout.write("\n")

            log_perplexity = loss_nom / loss_den
            print("Results at %d: log_perplexity = %.3f perplexity = %.3f" % (
                global_step, log_perplexity, np.exp(log_perplexity)))

            summary = tf.Summary()
            summary.value.add(tag='eval/log_perplexity', simple_value=log_perplexity)
            summary.value.add(tag='eval/perplexity', simple_value=np.exp(log_perplexity))
            sw.add_summary(summary, global_step)
            sw.flush()
            if mode == "eval_full":
                break #we don't need to wait for other checkpoints in this mode 
Example #19
Source File: parse_yt8m_v2_all.py    From Youtube-8M with Apache License 2.0 4 votes vote down vote up
def main(files_pattern):
    data_files = gfile.Glob(files_pattern)
    filename_queue = tf.train.string_input_producer(
            data_files, num_epochs=1, shuffle=False)

    reader = YT8MFrameFeatureReader(feature_sizes=[1024, 128], feature_names=["rgb", "audio"]) 
    vals = reader.prepare_reader(filename_queue)

    with tf.Session() as sess:
        sess.run(tf.initialize_local_variables())
        sess.run(tf.initialize_all_variables())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        vid_num = 0
        all_data = []
        try:
            while not coord.should_stop():
                vid, features, audios, labels, nframes = sess.run(vals)
                label_index = np.where(labels==True)[0].tolist()
                vid_num += 1

                #print vid, features.shape, audios.shape, label_index, nframes
                #sys.exit()
 
                features_int = features.astype(np.uint8)
                audios_int = audios.astype(np.uint8)

                dd = {}
                dd['video']   = vid
                dd['feature'] = features_int
                dd['audio']   = audios_int
                dd['label']   = label_index
                dd['nframes'] = nframes
                all_data.append(dd)

        except tf.errors.OutOfRangeError:
            print('Finished extracting.')

        finally:
            coord.request_stop()
            coord.join(threads)

    print vid_num

    record_name = files_pattern.split('/')[-1].split('.')[0]
    outp = open('./validate_pkl_all/%s.pkl'%record_name, 'wb')
    cPickle.dump(all_data, outp, protocol=cPickle.HIGHEST_PROTOCOL)
    outp.close() 
Example #20
Source File: run_utils.py    From lm with MIT License 4 votes vote down vote up
def run_eval(dataset, hps, logdir, mode, num_eval_steps):
    with tf.variable_scope("model"):
        hps.num_sampled = 0  # Always using full softmax at evaluation.
        hps.keep_prob = 1.0
        model = LM(hps, "eval", "/cpu:0")

    if hps.average_params:
        print("Averaging parameters for evaluation.")
        saver = tf.train.Saver(model.avg_dict)
    else:
        saver = tf.train.Saver()

    # Use only 4 threads for the evaluation.
    config = tf.ConfigProto(allow_soft_placement=True,
                            intra_op_parallelism_threads=20,
                            inter_op_parallelism_threads=1)
    sess = tf.Session(config=config)
    sw = tf.train.SummaryWriter(logdir + "/" + mode, sess.graph)
    ckpt_loader = CheckpointLoader(saver, model.global_step, logdir + "/train")

    with sess.as_default():
        while ckpt_loader.load_checkpoint():
            global_step = ckpt_loader.last_global_step
            data_iterator = dataset.iterate_once(hps.batch_size * hps.num_gpus, hps.num_steps)
            tf.initialize_local_variables().run()
            loss_nom = 0.0
            loss_den = 0.0
            for i, (x, y, w) in enumerate(data_iterator):
                if i >= num_eval_steps:
                    break

                loss = sess.run(model.loss, {model.x: x, model.y: y, model.w: w})
                loss_nom += loss
                loss_den += w.mean()
                loss = loss_nom / loss_den
                sys.stdout.write("%d: %.3f (%.3f) ... " % (i, loss, np.exp(loss)))
                sys.stdout.flush()
            sys.stdout.write("\n")

            log_perplexity = loss_nom / loss_den
            print("Results at %d: log_perplexity = %.3f perplexity = %.3f" % (
                global_step, log_perplexity, np.exp(log_perplexity)))

            summary = tf.Summary()
            summary.value.add(tag='eval/log_perplexity', simple_value=log_perplexity)
            summary.value.add(tag='eval/perplexity', simple_value=np.exp(log_perplexity))
            sw.add_summary(summary, global_step)
            sw.flush()