Python tensorflow.container() Examples

The following are 30 code examples of tensorflow.container(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: process.py    From DeepRNN with MIT License 6 votes vote down vote up
def create_train_model(model_creator, hparams, data_dir):
    """Create train graph, model, and iterator."""
    train_data_path = []
    for root, _, name in os.walk(os.path.join(data_dir, 'train_data')):
        for x in name:
            if x.split('.')[-1] == 'mat':
                train_data_path.append(os.path.join(root, x))
    assert len(train_data_path) == 1
    train_data = scio.loadmat(*train_data_path)['data']
    assert hparams.src_len == hparams.tgt_len == train_data.shape[1]
    graph = tf.Graph()

    with graph.as_default(), tf.container("train"):
        # channels: [features, SBP, DBP, MBP]
        train_src_data = train_data[:, :, 0:hparams.src_feature_size]
        train_tgt_data = train_data[:, :, hparams.src_feature_size:hparams.src_feature_size + hparams.tgt_feature_size]
        src_dataset = tf.data.Dataset.from_tensor_slices(train_src_data)
        tgt_dataset = tf.data.Dataset.from_tensor_slices(train_tgt_data)
        iterator = get_iterator(src_dataset, tgt_dataset, batch_size=hparams.batch_size,
                                random_seed=hparams.random_seed, is_train=True)
        model = model_creator(hparams, iterator=iterator, mode=tf.contrib.learn.ModeKeys.TRAIN)
    return TrainModel(graph=graph, model=model, iterator=iterator) 
Example #2
Source File: server_lib_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testMultipleContainers(self):
    with tf.container("test0"):
      v0 = tf.Variable(1.0, name="v0")
    with tf.container("test1"):
      v1 = tf.Variable(2.0, name="v0")
    server = tf.train.Server.create_local_server()
    sess = tf.Session(server.target)
    sess.run(tf.global_variables_initializer())
    self.assertAllEqual(1.0, sess.run(v0))
    self.assertAllEqual(2.0, sess.run(v1))

    # Resets container. Session aborts.
    tf.Session.reset(server.target, ["test0"])
    with self.assertRaises(tf.errors.AbortedError):
      sess.run(v1)

    # Connects to the same target. Device memory for the v0 would have
    # been released, so it will be uninitialized. But v1 should still
    # be valid.
    sess = tf.Session(server.target)
    with self.assertRaises(tf.errors.FailedPreconditionError):
      sess.run(v0)
    self.assertAllEqual(2.0, sess.run(v1))

  # Verifies various reset failures. 
Example #3
Source File: fifo_queue_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testConstructor(self):
    with tf.Graph().as_default():
      q = tf.FIFOQueue(5, (tf.int32, tf.float32), names=("i", "j"),
                       shared_name="foo", name="Q")
    self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
    self.assertEquals(tf.string_ref, q.queue_ref.dtype)
    self.assertProtoEquals("""
      name:'Q' op:'FIFOQueue'
      attr { key: 'component_types' value { list {
        type: DT_INT32 type : DT_FLOAT
      } } }
      attr { key: 'shapes' value { list {} } }
      attr { key: 'capacity' value { i: 5 } }
      attr { key: 'container' value { s: '' } }
      attr { key: 'shared_name' value { s: 'foo' } }
      """, q.queue_ref.op.node_def)
    self.assertEqual(["i", "j"], q.names) 
Example #4
Source File: fifo_queue_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testConstructorWithShapes(self):
    with tf.Graph().as_default():
      q = tf.FIFOQueue(5, (tf.int32, tf.float32),
                       shapes=(tf.TensorShape([1, 1, 2, 3]),
                               tf.TensorShape([5, 8])), name="Q")
    self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
    self.assertEquals(tf.string_ref, q.queue_ref.dtype)
    self.assertProtoEquals("""
      name:'Q' op:'FIFOQueue'
      attr { key: 'component_types' value { list {
        type: DT_INT32 type : DT_FLOAT
      } } }
      attr { key: 'shapes' value { list {
        shape { dim { size: 1 }
                dim { size: 1 }
                dim { size: 2 }
                dim { size: 3 } }
        shape { dim { size: 5 }
                dim { size: 8 } }
      } } }
      attr { key: 'capacity' value { i: 5 } }
      attr { key: 'container' value { s: '' } }
      attr { key: 'shared_name' value { s: '' } }
      """, q.queue_ref.op.node_def) 
Example #5
Source File: variables_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testContainer(self):
    with tf.Graph().as_default():
      v0 = tf.Variable([0])
      with tf.container("l1"):
        v1 = tf.Variable([1])
        with tf.container("l2"):
          v2 = tf.Variable([2])
          special_v = gen_state_ops._variable(shape=[1], dtype=tf.float32, 
              name="VariableInL3", container="l3", shared_name="")
        v3 = tf.Variable([3])
      v4 = tf.Variable([4])
    self.assertEqual(tf.compat.as_bytes(""), v0.op.get_attr("container"))
    self.assertEqual(tf.compat.as_bytes("l1"), v1.op.get_attr("container"))
    self.assertEqual(tf.compat.as_bytes("l2"), v2.op.get_attr("container"))
    self.assertEqual(tf.compat.as_bytes("l3"),
                     special_v.op.get_attr("container"))
    self.assertEqual(tf.compat.as_bytes("l1"), v3.op.get_attr("container"))
    self.assertEqual(tf.compat.as_bytes(""), v4.op.get_attr("container")) 
Example #6
Source File: process.py    From DeepRNN with MIT License 6 votes vote down vote up
def create_eval_model(model_creator, hparams, data_dir):
    """Create eval graph, model and iterator."""
    eval_data_path = []
    for root, _, name in os.walk(os.path.join(data_dir, 'eval_data')):
        for x in name:
            if x.split('.')[-1] == 'mat':
                eval_data_path.append(os.path.join(root, x))
    assert len(eval_data_path) == 1
    eval_data = scio.loadmat(*eval_data_path)['data']
    data_mean, data_std = load_data_mean_std(hparams, data_dir)
    batch_size = eval_data.shape[0]
    graph = tf.Graph()

    with graph.as_default(), tf.container("eval"):
        eval_src_data = eval_data[:, :, 0:hparams.src_feature_size]
        # channels: [features, SBP, DBP, MBP]
        eval_tgt_data = eval_data[:, :, hparams.src_feature_size:hparams.src_feature_size + hparams.tgt_feature_size]
        src_dataset = tf.data.Dataset.from_tensor_slices(eval_src_data)
        tgt_dataset = tf.data.Dataset.from_tensor_slices(eval_tgt_data)
        iterator = get_iterator(src_dataset, tgt_dataset, batch_size=batch_size,
                                random_seed=hparams.random_seed, is_train=False)
        model = model_creator(hparams, iterator=iterator, mode=tf.contrib.learn.ModeKeys.EVAL)
    return EvalModel(graph=graph, model=model, iterator=iterator, data_mean=data_mean, data_std=data_std) 
Example #7
Source File: hred_helper.py    From THRED with MIT License 5 votes vote down vote up
def create_eval_model(hparams, scope=None):
    """Create train graph, model, src/tgt file holders, and iterator."""

    graph = tf.Graph()

    with graph.as_default(), tf.container(scope or "eval"):
        vocab_table = vocab.create_vocab_table(hparams.vocab_file)
        eval_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)

        eval_dataset = tf.data.TextLineDataset(eval_file_placeholder)
        iterator = hred_iterators.get_iterator(
            eval_dataset,
            vocab_table,
            hparams.batch_size,
            hparams.num_turns,
            hparams.num_buckets,
            hparams.src_max_len,
            hparams.tgt_max_len)

        model = HierarchichalSeq2SeqModel(mode=tf.contrib.learn.ModeKeys.EVAL,
                                          iterator=iterator,
                                          num_turns=hparams.num_turns,
                                          params=hparams,
                                          scope=scope,
                                          log_trainables=False)
    return EvalModel(
        graph=graph,
        model=model,
        eval_file_placeholder=eval_file_placeholder,
        iterator=iterator) 
Example #8
Source File: model_helper.py    From chinese_semantic_role_labeling with MIT License 5 votes vote down vote up
def create_train_model(
        model_creator,
        hparams):

    graph = tf.Graph()
    with graph.as_default(), tf.container("train"):
        model = model_creator(
                hparams,
                tf.contrib.learn.ModeKeys.TRAIN,
                )
        return TrainModel(
            graph=graph,
            model=model,
        ) 
Example #9
Source File: model_helper.py    From LSTM-CNN-CWS with Apache License 2.0 5 votes vote down vote up
def create_eval_model(hparams, model_creator):
  vocab_file = hparams.vocab_file
  index_file = hparams.index_file
  graph = tf.Graph()

  with graph.as_default(), tf.container("eval"):
    vocab_table = lookup_ops.index_table_from_file(
      vocab_file, default_value = UNK_ID)
    # for the labels
    index_table = lookup_ops.index_table_from_file(
      index_file, default_value = 0)

    # the file's name
    txt_file_placeholder = tf.placeholder(shape = (), dtype = tf.string)
    lb_file_placeholder = tf.placeholder(shape = (), dtype = tf.string)
    txt_dataset = tf.data.TextLineDataset(txt_file_placeholder)
    lb_dataset = tf.data.TextLineDataset(lb_file_placeholder)

    iterator = data_iterator.get_iterator(
        txt_dataset,
        lb_dataset,
        vocab_table,
        index_table,
        batch_size = hparams.batch_size,
        num_buckets = hparams.num_buckets,
        max_len = hparams.max_len)

    model = model_creator(
        hparams,
        iterator = iterator,
        mode = tf.contrib.learn.ModeKeys.EVAL,
        vocab_table = vocab_table)

  return EvalModel(
      graph = graph,
      model = model,
      txt_file_placeholder = txt_file_placeholder,
      lb_file_placeholder = lb_file_placeholder,
      iterator = iterator) 
Example #10
Source File: model_helper.py    From LSTM-CNN-CWS with Apache License 2.0 5 votes vote down vote up
def create_infer_model(hparams, model_creator):
  """Create inference model."""
  graph = tf.Graph()
  vocab_file = hparams.vocab_file

  with graph.as_default(), tf.container("infer"):
    vocab_table = lookup_ops.index_table_from_file(
      vocab_file, default_value = UNK_ID)
    # for the labels
    '''
    Although this is nonsense for the inference procedure, this is to ensure
    the labels are not None when building the model graph.
    (refer to model.BasicModel._decode_layer)
    '''
    mapping_strings = tf.constant(['0'])
    index_table = tf.contrib.lookup.index_table_from_tensor(
    mapping = mapping_strings, default_value = 0)

    txt_placeholder = tf.placeholder(shape=[None], dtype = tf.string)
    batch_size_placeholder = tf.placeholder(shape = [], dtype = tf.int64)

    txt_dataset = tf.data.Dataset.from_tensor_slices(
        txt_placeholder)
    iterator = data_iterator.get_infer_iterator(
        txt_dataset,
        vocab_table,
        index_table,
        batch_size = batch_size_placeholder)

    model = model_creator(
        hparams,
        iterator = iterator,
        mode = tf.contrib.learn.ModeKeys.INFER,
        vocab_table = vocab_table)

  return InferModel(
      graph = graph,
      model = model,
      txt_placeholder = txt_placeholder,
      batch_size_placeholder = batch_size_placeholder,
      iterator = iterator) 
Example #11
Source File: model_helper.py    From inference with Apache License 2.0 5 votes vote down vote up
def create_infer_model(model_creator, hparams, scope=None, extra_args=None):
  """Create inference model."""
  graph = tf.Graph()
  src_vocab_file = hparams.src_vocab_file
  tgt_vocab_file = hparams.tgt_vocab_file

  with graph.as_default(), tf.container(scope or "infer"):
    src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
        src_vocab_file, tgt_vocab_file, hparams.share_vocab)
    reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
        tgt_vocab_file, default_value=vocab_utils.UNK)

    src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
    batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)

    src_dataset = tf.data.Dataset.from_tensor_slices(
        src_placeholder)
    iterator = iterator_utils.get_infer_iterator(
        src_dataset,
        src_vocab_table,
        batch_size=batch_size_placeholder,
        eos=hparams.eos,
        src_max_len=hparams.src_max_len_infer,
        use_char_encode=hparams.use_char_encode)
    model = model_creator(
        hparams,
        iterator=iterator,
        mode=tf.contrib.learn.ModeKeys.INFER,
        source_vocab_table=src_vocab_table,
        target_vocab_table=tgt_vocab_table,
        reverse_target_vocab_table=reverse_tgt_vocab_table,
        scope=scope,
        extra_args=extra_args)
  return InferModel(
      graph=graph,
      model=model,
      src_placeholder=src_placeholder,
      batch_size_placeholder=batch_size_placeholder,
      iterator=iterator) 
Example #12
Source File: hred_helper.py    From THRED with MIT License 5 votes vote down vote up
def create_train_model(hparams, scope=None, num_workers=1, jobid=0):
    """Create train graph, model, and iterator."""

    graph = tf.Graph()

    vocab.create_vocabulary(hparams.vocab_file, hparams.train_data, hparams.vocab_size)

    with graph.as_default(), tf.container(scope or "train"):
        vocab_table = vocab.create_vocab_table(hparams.vocab_file)

        dataset = tf.data.TextLineDataset(hparams.train_data)
        skip_count_placeholder = tf.placeholder(shape=(), dtype=tf.int64)

        iterator = hred_iterators.get_iterator(
            dataset,
            vocab_table,
            hparams.batch_size,
            hparams.num_turns,
            hparams.num_buckets,
            hparams.src_max_len,
            hparams.tgt_max_len,
            skip_count=skip_count_placeholder,
            num_shards=num_workers,
            shard_index=jobid)

        # Note: One can set model_device_fn to
        # `tf.train.replica_device_setter(ps_tasks)` for distributed training.
        model_device_fn = None
        # if extra_args: model_device_fn = extra_args.model_device_fn
        with tf.device(model_device_fn):
            model = HierarchichalSeq2SeqModel(
                mode=tf.contrib.learn.ModeKeys.TRAIN,
                iterator=iterator,
                num_turns=hparams.num_turns,
                params=hparams,
                scope=scope)

    return TrainModel(graph=graph,
                      model=model,
                      iterator=iterator,
                      skip_count_placeholder=skip_count_placeholder) 
Example #13
Source File: model_helper.py    From nmt with Apache License 2.0 5 votes vote down vote up
def create_infer_model(model_creator, hparams, scope=None, extra_args=None):
  """Create inference model."""
  graph = tf.Graph()
  src_vocab_file = hparams.src_vocab_file
  tgt_vocab_file = hparams.tgt_vocab_file

  with graph.as_default(), tf.container(scope or "infer"):
    src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
        src_vocab_file, tgt_vocab_file, hparams.share_vocab)
    reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
        tgt_vocab_file, default_value=vocab_utils.UNK)

    src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
    batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)

    src_dataset = tf.data.Dataset.from_tensor_slices(
        src_placeholder)
    iterator = iterator_utils.get_infer_iterator(
        src_dataset,
        src_vocab_table,
        batch_size=batch_size_placeholder,
        eos=hparams.eos,
        src_max_len=hparams.src_max_len_infer,
        use_char_encode=hparams.use_char_encode)
    model = model_creator(
        hparams,
        iterator=iterator,
        mode=tf.contrib.learn.ModeKeys.INFER,
        source_vocab_table=src_vocab_table,
        target_vocab_table=tgt_vocab_table,
        reverse_target_vocab_table=reverse_tgt_vocab_table,
        scope=scope,
        extra_args=extra_args)
  return InferModel(
      graph=graph,
      model=model,
      src_placeholder=src_placeholder,
      batch_size_placeholder=batch_size_placeholder,
      iterator=iterator) 
Example #14
Source File: hred_helper.py    From THRED with MIT License 5 votes vote down vote up
def create_pretrain_model(hparams, scope=None, num_workers=1, jobid=0):
    """Create train graph, model, and iterator."""
    graph = tf.Graph()

    with graph.as_default(), tf.container(scope or "pretrain"):
        vocab_table = vocab.create_vocab_table(hparams.vocab_file)

        iterator = hred_iterators.get_iterator(
            hparams.pretrain_data,
            vocab_table,
            hparams.batch_size,
            hparams.num_pretrain_turns,
            hparams.num_buckets,
            hparams.src_max_len,
            hparams.tgt_max_len,
            num_shards=num_workers,
            shard_index=jobid)

        model = HierarchichalSeq2SeqModel(mode=tf.contrib.learn.ModeKeys.TRAIN,
                                          iterator=iterator,
                                          num_turns=hparams.num_pretrain_turns,
                                          params=hparams,
                                          scope=scope,
                                          log_trainables=False)

    return TrainModel(
        graph=graph,
        model=model,
        iterator=iterator,
        skip_count_placeholder=None) 
Example #15
Source File: vanilla_helper.py    From THRED with MIT License 5 votes vote down vote up
def create_eval_model(hparams, scope=None):
    """Create train graph, model, src/tgt file holders, and iterator."""
    vocab_file = hparams.vocab_file
    graph = tf.Graph()

    with graph.as_default(), tf.container(scope or "eval"):
        vocab_table = vocab.create_vocab_table(vocab_file)
        eval_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)

        eval_dataset = tf.data.TextLineDataset(eval_file_placeholder)
        iterator = vanilla_iterators.get_iterator(
            eval_dataset,
            vocab_table,
            hparams.batch_size,
            num_buckets=hparams.num_buckets,
            src_max_len=hparams.src_max_len,
            tgt_max_len=hparams.tgt_max_len)
        model = VanillaSeq2SeqModel(
            mode=tf.contrib.learn.ModeKeys.EVAL,
            iterator=iterator,
            params=hparams,
            scope=scope,
            log_trainables=False)
    return EvalModel(
        graph=graph,
        model=model,
        eval_file_placeholder=eval_file_placeholder,
        iterator=iterator) 
Example #16
Source File: vanilla_helper.py    From THRED with MIT License 5 votes vote down vote up
def create_infer_model(hparams, scope=None):
    """Create inference model."""
    graph = tf.Graph()
    vocab_file = hparams.vocab_file

    with graph.as_default(), tf.container(scope or "infer"):
        vocab_table = vocab.create_vocab_table(vocab_file)
        reverse_vocab_table = vocab.create_rev_vocab_table(vocab_file)

        src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
        batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)

        src_dataset = tf.data.Dataset.from_tensor_slices(
            src_placeholder)
        iterator = vanilla_iterators.get_infer_iterator(
            src_dataset,
            vocab_table,
            batch_size=batch_size_placeholder,
            src_max_len=hparams.src_max_len)
        model = VanillaSeq2SeqModel(
            mode=tf.contrib.learn.ModeKeys.INFER,
            iterator=iterator,
            params=hparams,
            rev_vocab_table=reverse_vocab_table,
            scope=scope)
    return InferModel(
        graph=graph,
        model=model,
        src_placeholder=src_placeholder,
        batch_size_placeholder=batch_size_placeholder,
        iterator=iterator) 
Example #17
Source File: hred_helper.py    From THRED with MIT License 5 votes vote down vote up
def create_infer_model(hparams, scope=None):
    """Create inference model."""
    graph = tf.Graph()

    with graph.as_default(), tf.container(scope or "infer"):
        vocab_table = vocab.create_vocab_table(hparams.vocab_file)
        reverse_vocab_table = vocab.create_rev_vocab_table(hparams.vocab_file)

        src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
        batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)

        src_dataset = tf.data.Dataset.from_tensor_slices(src_placeholder)

        iterator = hred_iterators.get_infer_iterator(
            src_dataset,
            vocab_table,
            batch_size=batch_size_placeholder,
            num_turns=hparams.num_turns,
            src_max_len=hparams.src_max_len)

        model = HierarchichalSeq2SeqModel(mode=tf.contrib.learn.ModeKeys.INFER,
                                          iterator=iterator,
                                          num_turns=hparams.num_turns,
                                          params=hparams,
                                          rev_vocab_table=reverse_vocab_table,
                                          scope=scope)
    return InferModel(
        graph=graph,
        model=model,
        src_placeholder=src_placeholder,
        batch_size_placeholder=batch_size_placeholder,
        iterator=iterator) 
Example #18
Source File: taware_helper.py    From THRED with MIT License 5 votes vote down vote up
def create_train_model(hparams, scope=None, num_workers=1, jobid=0, extra_args=None):
    """Create train graph, model, and iterator."""
    train_file = hparams.train_data

    graph = tf.Graph()

    with graph.as_default(), tf.container(scope or "train"):
        vocab_table = vocab.create_vocab_table(hparams.vocab_file)

        dataset = tf.data.TextLineDataset(train_file)
        skip_count_placeholder = tf.placeholder(shape=(), dtype=tf.int64)

        iterator = taware_iterators.get_iterator(
            dataset,
            vocab_table,
            batch_size=hparams.batch_size,
            num_buckets=hparams.num_buckets,
            topic_words_per_utterance=hparams.topic_words_per_utterance,
            src_max_len=hparams.src_max_len,
            tgt_max_len=hparams.tgt_max_len,
            skip_count=skip_count_placeholder,
            num_shards=num_workers,
            shard_index=jobid)

        # Note: One can set model_device_fn to
        # `tf.train.replica_device_setter(ps_tasks)` for distributed training.
        model_device_fn = None
        # if extra_args: model_device_fn = extra_args.model_device_fn
        with tf.device(model_device_fn):
            model = TopicAwareSeq2SeqModel(
                mode=tf.contrib.learn.ModeKeys.TRAIN,
                iterator=iterator,
                params=hparams,
                scope=scope)

    return TrainModel(
        graph=graph,
        model=model,
        iterator=iterator,
        skip_count_placeholder=skip_count_placeholder) 
Example #19
Source File: taware_helper.py    From THRED with MIT License 5 votes vote down vote up
def create_infer_model(hparams, scope=None):
    """Create inference model."""
    graph = tf.Graph()
    vocab_file = hparams.vocab_file

    with graph.as_default(), tf.container(scope or "infer"):
        vocab_table = vocab.create_vocab_table(vocab_file)
        reverse_vocab_table = vocab.create_rev_vocab_table(vocab_file)

        src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
        batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)

        src_dataset = tf.data.Dataset.from_tensor_slices(
            src_placeholder)
        iterator = taware_iterators.get_infer_iterator(
            src_dataset,
            vocab_table,
            batch_size=batch_size_placeholder,
            topic_words_per_utterance=hparams.topic_words_per_utterance,
            src_max_len=hparams.src_max_len)
        model = TopicAwareSeq2SeqModel(
            mode=tf.contrib.learn.ModeKeys.INFER,
            iterator=iterator,
            params=hparams,
            rev_vocab_table=reverse_vocab_table,
            scope=scope,
            log_trainables=False)
    return InferModel(
        graph=graph,
        model=model,
        src_placeholder=src_placeholder,
        batch_size_placeholder=batch_size_placeholder,
        iterator=iterator) 
Example #20
Source File: thred_helper.py    From THRED with MIT License 5 votes vote down vote up
def create_pretrain_model(hparams, scope=None, num_workers=1, jobid=0):
    """Create train graph, model, and iterator."""
    graph = tf.Graph()

    with graph.as_default(), tf.container(scope or "pretrain"):
        vocab_table = vocab.create_vocab_table(hparams.vocab_file)

        iterator = get_iterator(
            hparams.pretrain_data,
            vocab_table,
            hparams.batch_size,
            hparams.num_pretrain_turns,
            hparams.num_buckets,
            hparams.topic_words_per_utterance,
            hparams.src_max_len,
            hparams.tgt_max_len,
            num_shards=num_workers,
            shard_index=jobid)

        model = TopicAwareHierarchicalSeq2SeqModel(
            mode=tf.contrib.learn.ModeKeys.TRAIN,
            iterator=iterator,
            num_turns=hparams.num_pretrain_turns,
            params=hparams,
            scope=scope,
            log_trainables=False)

    return TrainModel(
        graph=graph,
        model=model,
        iterator=iterator,
        skip_count_placeholder=None) 
Example #21
Source File: thred_helper.py    From THRED with MIT License 5 votes vote down vote up
def create_eval_model(hparams, scope=None):
    """Create train graph, model, src/tgt file holders, and iterator."""

    graph = tf.Graph()

    with graph.as_default(), tf.container(scope or "eval"):
        vocab_table = vocab.create_vocab_table(hparams.vocab_file)
        eval_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)

        eval_dataset = tf.data.TextLineDataset(eval_file_placeholder)
        iterator = get_iterator(
            eval_dataset,
            vocab_table,
            hparams.batch_size,
            hparams.num_turns,
            hparams.num_buckets,
            hparams.topic_words_per_utterance,
            hparams.src_max_len,
            hparams.tgt_max_len)

        model = TopicAwareHierarchicalSeq2SeqModel(
            mode=tf.contrib.learn.ModeKeys.EVAL,
            iterator=iterator,
            num_turns=hparams.num_turns,
            params=hparams,
            scope=scope,
            log_trainables=False)

    return EvalModel(
        graph=graph,
        model=model,
        eval_file_placeholder=eval_file_placeholder,
        iterator=iterator) 
Example #22
Source File: thred_helper.py    From THRED with MIT License 5 votes vote down vote up
def create_infer_model(hparams, scope=None):
    """Create inference model."""
    graph = tf.Graph()

    with graph.as_default(), tf.container(scope or "infer"):
        vocab_table = vocab.create_vocab_table(hparams.vocab_file)
        reverse_vocab_table = vocab.create_rev_vocab_table(hparams.vocab_file)

        src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
        batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)

        src_dataset = tf.data.Dataset.from_tensor_slices(src_placeholder)

        iterator = get_infer_iterator(
            src_dataset,
            vocab_table,
            batch_size=batch_size_placeholder,
            num_turns=hparams.num_turns,
            topic_words_per_utterance=hparams.topic_words_per_utterance,
            src_max_len=hparams.src_max_len)

        model = TopicAwareHierarchicalSeq2SeqModel(
            mode=tf.contrib.learn.ModeKeys.INFER,
            iterator=iterator,
            num_turns=hparams.num_turns,
            params=hparams,
            rev_vocab_table=reverse_vocab_table,
            scope=scope,
            log_trainables=False)

    return InferModel(
        graph=graph,
        model=model,
        src_placeholder=src_placeholder,
        batch_size_placeholder=batch_size_placeholder,
        iterator=iterator) 
Example #23
Source File: server_lib_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testSameVariablesClear(self):
    server = tf.train.Server.create_local_server()

    # Creates a graph with 2 variables.
    v0 = tf.Variable([[2, 1]], name="v0")
    v1 = tf.Variable([[1], [2]], name="v1")
    v2 = tf.matmul(v0, v1)

    # Verifies that both sessions connecting to the same target return
    # the same results.
    sess_1 = tf.Session(server.target)
    sess_2 = tf.Session(server.target)
    sess_1.run(tf.global_variables_initializer())
    self.assertAllEqual([[4]], sess_1.run(v2))
    self.assertAllEqual([[4]], sess_2.run(v2))

    # Resets target. sessions abort. Use sess_2 to verify.
    tf.Session.reset(server.target)
    with self.assertRaises(tf.errors.AbortedError):
      self.assertAllEqual([[4]], sess_2.run(v2))

    # Connects to the same target. Device memory for the variables would have
    # been released, so they will be uninitialized.
    sess_2 = tf.Session(server.target)
    with self.assertRaises(tf.errors.FailedPreconditionError):
      sess_2.run(v2)
    # Reinitializes the variables.
    sess_2.run(tf.global_variables_initializer())
    self.assertAllEqual([[4]], sess_2.run(v2))
    sess_2.close()

  # Verifies behavior of tf.Session.reset() with multiple containers using
  # default container names as defined by the target name. 
Example #24
Source File: process.py    From DeepRNN with MIT License 5 votes vote down vote up
def create_infer_model(model_creator, hparams, infer_data, batch_size):
    """Create inference model."""
    graph = tf.Graph()

    with graph.as_default(), tf.container("infer"):
        infer_src_data = infer_data[:, :, 0:hparams.src_feature_size]
        # channels:[features, SBP, SBP, MBP]
        infer_tgt_data = infer_data[:, :, hparams.src_feature_size:hparams.src_feature_size + hparams.tgt_feature_size]
        src_dataset = tf.data.Dataset.from_tensor_slices(infer_src_data)
        tgt_dataset = tf.data.Dataset.from_tensor_slices(infer_tgt_data)
        iterator = get_iterator(src_dataset, tgt_dataset, batch_size=batch_size,
                                random_seed=hparams.random_seed, is_train=False)
        model = model_creator(hparams, iterator=iterator, mode=tf.contrib.learn.ModeKeys.INFER)
    return InferModel(graph=graph, model=model, iterator=iterator) 
Example #25
Source File: fifo_queue_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testContainer(self):
    with tf.Graph().as_default():
      with tf.container("test"):
        q = tf.FIFOQueue(10, tf.float32)
    self.assertEqual(tf.compat.as_bytes("test"),
                     q.queue_ref.op.get_attr("container")) 
Example #26
Source File: fifo_queue_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testDequeueWithTimeout(self):
    with self.test_session(
        config=tf.ConfigProto(operation_timeout_in_ms=20)) as sess:
      q = tf.FIFOQueue(10, tf.float32)
      self.assertEqual(tf.compat.as_bytes(""),
                       q.queue_ref.op.get_attr("container"))
      dequeued_t = q.dequeue()

      # Intentionally do not run any enqueue_ops so that dequeue will block
      # until operation_timeout_in_ms.
      with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
                                   "Timed out waiting for notification"):
        sess.run(dequeued_t) 
Example #27
Source File: fifo_queue_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testMultiQueueConstructor(self):
    with tf.Graph().as_default():
      q = tf.FIFOQueue(5, (tf.int32, tf.float32), shared_name="foo", name="Q")
    self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
    self.assertEquals(tf.string_ref, q.queue_ref.dtype)
    self.assertProtoEquals("""
      name:'Q' op:'FIFOQueue'
      attr { key: 'component_types' value { list {
        type: DT_INT32 type : DT_FLOAT
      } } }
      attr { key: 'shapes' value { list {} } }
      attr { key: 'capacity' value { i: 5 } }
      attr { key: 'container' value { s: '' } }
      attr { key: 'shared_name' value { s: 'foo' } }
      """, q.queue_ref.op.node_def) 
Example #28
Source File: fifo_queue_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testConstructor(self):
    with tf.Graph().as_default():
      q = tf.FIFOQueue(10, tf.float32, name="Q")
    self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
    self.assertEquals(tf.string_ref, q.queue_ref.dtype)
    self.assertProtoEquals("""
      name:'Q' op:'FIFOQueue'
      attr { key: 'component_types' value { list { type: DT_FLOAT } } }
      attr { key: 'shapes' value { list {} } }
      attr { key: 'capacity' value { i: 10 } }
      attr { key: 'container' value { s: '' } }
      attr { key: 'shared_name' value { s: '' } }
      """, q.queue_ref.op.node_def) 
Example #29
Source File: model_helper.py    From parallax with Apache License 2.0 5 votes vote down vote up
def create_infer_model(model_creator, hparams, scope=None, extra_args=None):
  """Create inference model."""
  graph = tf.Graph()
  src_vocab_file = hparams.src_vocab_file
  tgt_vocab_file = hparams.tgt_vocab_file

  with graph.as_default(), tf.container(scope or "infer"):
    src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
        src_vocab_file, tgt_vocab_file, hparams.share_vocab)
    reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
        tgt_vocab_file, default_value=vocab_utils.UNK)

    src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
    batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)

    src_dataset = tf.data.Dataset.from_tensor_slices(
        src_placeholder)
    iterator = iterator_utils.get_infer_iterator(
        src_dataset,
        src_vocab_table,
        batch_size=batch_size_placeholder,
        eos=hparams.eos,
        src_max_len=hparams.src_max_len_infer)
    model = model_creator(
        hparams,
        iterator=iterator,
        mode=tf.contrib.learn.ModeKeys.INFER,
        source_vocab_table=src_vocab_table,
        target_vocab_table=tgt_vocab_table,
        reverse_target_vocab_table=reverse_tgt_vocab_table,
        scope=scope,
        extra_args=extra_args)
  return InferModel(
      graph=graph,
      model=model,
      src_placeholder=src_placeholder,
      batch_size_placeholder=batch_size_placeholder,
      iterator=iterator) 
Example #30
Source File: model_helper.py    From parallax with Apache License 2.0 5 votes vote down vote up
def create_eval_model(model_creator, hparams, scope=None, extra_args=None):
  """Create train graph, model, src/tgt file holders, and iterator."""
  src_vocab_file = hparams.src_vocab_file
  tgt_vocab_file = hparams.tgt_vocab_file
  graph = tf.Graph()

  with graph.as_default(), tf.container(scope or "eval"):
    src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
        src_vocab_file, tgt_vocab_file, hparams.share_vocab)
    src_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
    tgt_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
    src_dataset = tf.data.TextLineDataset(src_file_placeholder)
    tgt_dataset = tf.data.TextLineDataset(tgt_file_placeholder)
    with tf.device('CPU:0'):
      iterator = iterator_utils.get_iterator(
        src_dataset,
        tgt_dataset,
        src_vocab_table,
        tgt_vocab_table,
        hparams.batch_size,
        sos=hparams.sos,
        eos=hparams.eos,
        random_seed=hparams.random_seed,
        num_buckets=hparams.num_buckets,
        src_max_len=hparams.src_max_len_infer,
        tgt_max_len=hparams.tgt_max_len_infer)
    model = model_creator(
        hparams,
        iterator=iterator,
        mode=tf.contrib.learn.ModeKeys.EVAL,
        source_vocab_table=src_vocab_table,
        target_vocab_table=tgt_vocab_table,
        scope=scope,
        extra_args=extra_args)
  return EvalModel(
      graph=graph,
      model=model,
      src_file_placeholder=src_file_placeholder,
      tgt_file_placeholder=tgt_file_placeholder,
      iterator=iterator)