Python tensorflow.placeholder_with_default() Examples

The following are 30 code examples of tensorflow.placeholder_with_default(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: test_case.py    From ros_people_object_detection_tensorflow with Apache License 2.0 6 votes vote down vote up
def execute_cpu(self, graph_fn, inputs):
    """Constructs the graph, executes it on CPU and returns the result.

    Args:
      graph_fn: a callable that constructs the tensorflow graph to test. The
        arguments of this function should correspond to `inputs`.
      inputs: a list of numpy arrays to feed input to the computation graph.

    Returns:
      A list of numpy arrays or a scalar returned from executing the tensorflow
      graph.
    """
    with self.test_session(graph=tf.Graph()) as sess:
      placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
      results = graph_fn(*placeholders)
      sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
                tf.local_variables_initializer()])
      materialized_results = sess.run(results, feed_dict=dict(zip(placeholders,
                                                                  inputs)))
      if (len(materialized_results) == 1
          and (isinstance(materialized_results, list)
               or isinstance(materialized_results, tuple))):
        materialized_results = materialized_results[0]
    return materialized_results 
Example #2
Source File: lstm_test.py    From BERT with Apache License 2.0 6 votes vote down vote up
def testLSTMSeq2SeqAttention(self):
    vocab_size = 9
    x = np.random.randint(1, high=vocab_size, size=(3, 5, 1, 1))
    y = np.random.randint(1, high=vocab_size, size=(3, 6, 1, 1))
    hparams = lstm.lstm_attention()

    p_hparams = problem_hparams.test_problem_hparams(vocab_size,
                                                     vocab_size,
                                                     hparams)
    x = tf.constant(x, dtype=tf.int32)
    x = tf.placeholder_with_default(x, shape=[None, None, 1, 1])

    with self.test_session() as session:
      features = {
          "inputs": x,
          "targets": tf.constant(y, dtype=tf.int32),
      }
      model = lstm.LSTMSeq2seqAttention(
          hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
      logits, _ = model(features)
      session.run(tf.global_variables_initializer())
      res = session.run(logits)
    self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size)) 
Example #3
Source File: Trainer.py    From MOTSFusion with MIT License 6 votes vote down vote up
def __init__(self, config, train_network, test_network, global_step, session):
    self.opt_str = config.string("optimizer", "adam").lower()
    self.train_network = train_network
    self.test_network = test_network
    self.session = session
    self.global_step = global_step
    self.validation_step_number = 0
    self.gradient_clipping = config.float("gradient_clipping", -1.0)
    self.learning_rates = config.int_key_dict("learning_rates")
    self.curr_learning_rate = self.learning_rates[1]
    self.lr_var = tf.placeholder(tf.float32, shape=[], name="learning_rate")
    self.loss_scale_var = tf.placeholder_with_default(1.0, shape=[], name="loss_scale")
    self.opt, self.reset_opt_op = self.create_optimizer(config)

    grad_norm = None
    if train_network is not None:
      self._step_op, grad_norm = self.create_step_op_and_grad_norm()
      self._update_ops = self.train_network.update_ops
    else:
      self._step_op = None
      self._update_ops = None
    self.summary_writer, self.summary_op_train, self.summary_op_test = self.init_summaries(config, grad_norm) 
Example #4
Source File: simulate_test.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_reset_forced(self):
    reset = tf.placeholder_with_default(False, ())
    batch_env = self._create_test_batch_env((2, 4))
    algo = tools.MockAlgorithm(batch_env)
    done, _, _ = tools.simulate(batch_env, algo, False, reset)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(done)
      sess.run(done, {reset: True})
      sess.run(done)
      sess.run(done, {reset: True})
      sess.run(done)
      sess.run(done)
      sess.run(done)
    self.assertAllEqual([1, 2, 2, 2], batch_env[0].steps)
    self.assertAllEqual([1, 2, 4], batch_env[1].steps) 
Example #5
Source File: test_case.py    From Person-Detection-and-Tracking with MIT License 6 votes vote down vote up
def execute_cpu(self, graph_fn, inputs):
    """Constructs the graph, executes it on CPU and returns the result.

    Args:
      graph_fn: a callable that constructs the tensorflow graph to test. The
        arguments of this function should correspond to `inputs`.
      inputs: a list of numpy arrays to feed input to the computation graph.

    Returns:
      A list of numpy arrays or a scalar returned from executing the tensorflow
      graph.
    """
    with self.test_session(graph=tf.Graph()) as sess:
      placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
      results = graph_fn(*placeholders)
      sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
                tf.local_variables_initializer()])
      materialized_results = sess.run(results, feed_dict=dict(zip(placeholders,
                                                                  inputs)))
      if (len(materialized_results) == 1
          and (isinstance(materialized_results, list)
               or isinstance(materialized_results, tuple))):
        materialized_results = materialized_results[0]
    return materialized_results 
Example #6
Source File: resnet18.py    From meta-transfer-learning with MIT License 6 votes vote down vote up
def __init__(self):
        # Set the dimension number for the input feature maps
        self.dim_input = FLAGS.img_size * FLAGS.img_size * 3
        # Set the dimension number for the outputs
        self.dim_output = FLAGS.way_num
        # Load base learning rates from FLAGS
        self.update_lr = FLAGS.base_lr
        # Load the pre-train phase class number from FLAGS
        self.pretrain_class_num = FLAGS.pretrain_class_num
        # Set the initial meta learning rate
        self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())
        # Set the initial pre-train learning rate
        self.pretrain_lr = tf.placeholder_with_default(FLAGS.pre_lr, ())

        # Set the default objective functions for meta-train and pre-train
        self.loss_func = xent
        self.pretrain_loss_func = softmaxloss

        # Set the default channel number to 3
        self.channels = 3
        # Load the image size from FLAGS
        self.img_size = FLAGS.img_size 
Example #7
Source File: resnet12.py    From meta-transfer-learning with MIT License 6 votes vote down vote up
def __init__(self):
        # Set the dimension number for the input feature maps
        self.dim_input = FLAGS.img_size * FLAGS.img_size * 3
        # Set the dimension number for the outputs
        self.dim_output = FLAGS.way_num
        # Load base learning rates from FLAGS
        self.update_lr = FLAGS.base_lr
        # Load the pre-train phase class number from FLAGS
        self.pretrain_class_num = FLAGS.pretrain_class_num
        # Set the initial meta learning rate
        self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())
        # Set the initial pre-train learning rate
        self.pretrain_lr = tf.placeholder_with_default(FLAGS.pre_lr, ())

        # Set the default objective functions for meta-train and pre-train
        self.loss_func = xent
        self.pretrain_loss_func = softmaxloss

        # Set the default channel number to 3
        self.channels = 3
        # Load the image size from FLAGS
        self.img_size = FLAGS.img_size 
Example #8
Source File: lstm_test.py    From fine-lm with MIT License 6 votes vote down vote up
def testLSTMSeq2SeqAttention(self):
    vocab_size = 9
    x = np.random.random_integers(1, high=vocab_size - 1, size=(3, 5, 1, 1))
    y = np.random.random_integers(1, high=vocab_size - 1, size=(3, 6, 1, 1))
    hparams = lstm.lstm_attention()

    p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
    x = tf.constant(x, dtype=tf.int32)
    x = tf.placeholder_with_default(x, shape=[None, None, 1, 1])

    with self.test_session() as session:
      features = {
          "inputs": x,
          "targets": tf.constant(y, dtype=tf.int32),
      }
      model = lstm.LSTMSeq2seqAttention(
          hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
      logits, _ = model(features)
      session.run(tf.global_variables_initializer())
      res = session.run(logits)
    self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size)) 
Example #9
Source File: lstm_test.py    From fine-lm with MIT License 6 votes vote down vote up
def testLSTMSeq2seqAttentionBidirectionalEncoder(self):
    vocab_size = 9
    x = np.random.random_integers(1, high=vocab_size - 1, size=(3, 5, 1, 1))
    y = np.random.random_integers(1, high=vocab_size - 1, size=(3, 6, 1, 1))
    hparams = lstm.lstm_attention()

    p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
    x = tf.constant(x, dtype=tf.int32)
    x = tf.placeholder_with_default(x, shape=[None, None, 1, 1])

    with self.test_session() as session:
      features = {
          "inputs": x,
          "targets": tf.constant(y, dtype=tf.int32),
      }
      model = lstm.LSTMSeq2seqAttentionBidirectionalEncoder(
          hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
      logits, _ = model(features)
      session.run(tf.global_variables_initializer())
      res = session.run(logits)
    self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size)) 
Example #10
Source File: ml_100k.py    From recommender-tensorflow with MIT License 6 votes vote down vote up
def serving_input_fn():
    feature_placeholders = {
        "user_id": tf.placeholder(tf.int32, [None]),
        "item_id": tf.placeholder(tf.int32, [None]),

        "age": tf.placeholder(tf.int32, [None]),
        "gender": tf.placeholder(tf.string, [None]),
        "occupation": tf.placeholder(tf.string, [None]),
        "zipcode": tf.placeholder(tf.string, [None]),

        "release_year": tf.placeholder(tf.int32, [None]),
    }
    feature_placeholders.update({
        col: tf.placeholder_with_default(tf.constant([0]), [None]) for col in GENRE
    })

    features = {
        key: tf.expand_dims(tensor, -1)
        for key, tensor in feature_placeholders.items()
    }

    return tf.estimator.export.ServingInputReceiver(
        features=features,
        receiver_tensors=feature_placeholders
    ) 
Example #11
Source File: qa_model.py    From cs224n-win18-squad with Apache License 2.0 6 votes vote down vote up
def add_placeholders(self):
        """
        Add placeholders to the graph. Placeholders are used to feed in inputs.
        """
        # Add placeholders for inputs.
        # These are all batch-first: the None corresponds to batch_size and
        # allows you to run the same model with variable batch_size
        self.context_ids = tf.placeholder(tf.int32, shape=[None, self.FLAGS.context_len])
        self.context_mask = tf.placeholder(tf.int32, shape=[None, self.FLAGS.context_len])
        self.qn_ids = tf.placeholder(tf.int32, shape=[None, self.FLAGS.question_len])
        self.qn_mask = tf.placeholder(tf.int32, shape=[None, self.FLAGS.question_len])
        self.ans_span = tf.placeholder(tf.int32, shape=[None, 2])

        # Add a placeholder to feed in the keep probability (for dropout).
        # This is necessary so that we can instruct the model to use dropout when training, but not when testing
        self.keep_prob = tf.placeholder_with_default(1.0, shape=()) 
Example #12
Source File: layers.py    From PADME with MIT License 6 votes vote down vote up
def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)
    try:
      shape = self._shape
    except NotImplementedError:
      shape = None
    if len(in_layers) > 0:
      queue = in_layers[0]
      placeholder = queue.out_tensors[self.get_pre_q_name()]
      self.out_tensor = tf.placeholder_with_default(placeholder, self._shape)
      return self.out_tensor
    out_tensor = tf.placeholder(dtype=self.dtype, shape=self._shape)
    if set_tensors:
      self.out_tensor = out_tensor
    return out_tensor 
Example #13
Source File: model.py    From AdaIN-TF with MIT License 5 votes vote down vote up
def build_model(self, vgg_weights):
        self.content_imgs = tf.placeholder(shape=(None, None, None, 3), name='content_imgs', dtype=tf.float32)
        self.style_imgs = tf.placeholder(shape=(None, None, None, 3), name='style_imgs', dtype=tf.float32)
        self.alpha = tf.placeholder_with_default(1., shape=[], name='alpha')

        ### Load shared VGG model up to relu4_1
        with tf.name_scope('encoder'):
            self.vgg_model = vgg_from_t7(vgg_weights, target_layer='relu4_1')
        print(self.vgg_model.summary())

        ### Build encoders for content layer
        with tf.name_scope('content_layer_encoder'):
            # Build content layer encoding model
            content_layer = self.vgg_model.get_layer('relu4_1').output
            self.content_encoder_model = Model(inputs=self.vgg_model.input, outputs=content_layer)

            # Setup content layer encodings for content/style images
            self.content_encoded = self.content_encoder_model(self.content_imgs)
            self.style_encoded = self.content_encoder_model(self.style_imgs)
            
            # Apply affine Adaptive Instance Norm transform
            self.adain_encoded = adain(self.content_encoded, self.style_encoded, self.alpha)

        ### Build decoder
        with tf.name_scope('decoder'):
            n_channels = self.adain_encoded.get_shape()[-1].value
            self.decoder_model = self.build_decoder(input_shape=(None, None, n_channels))

            # Setup a placeholder that defaults to the adain tensor but can be substituted with a feed_dict. Needed for interpolation.
            self.adain_encoded_pl = tf.placeholder_with_default(self.adain_encoded, shape=self.adain_encoded.get_shape())
            
            # Stylized/decoded output from AdaIN transformed encoding
            self.decoded = self.decoder_model(Lambda(lambda x: x)(self.adain_encoded_pl)) # Lambda converts TF tensor to Keras

        # Content layer encoding for stylized out
        self.decoded_encoded = self.content_encoder_model(self.decoded) 
Example #14
Source File: test_case.py    From Person-Detection-and-Tracking with MIT License 5 votes vote down vote up
def execute_tpu(self, graph_fn, inputs):
    """Constructs the graph, executes it on TPU and returns the result.

    Args:
      graph_fn: a callable that constructs the tensorflow graph to test. The
        arguments of this function should correspond to `inputs`.
      inputs: a list of numpy arrays to feed input to the computation graph.

    Returns:
      A list of numpy arrays or a scalar returned from executing the tensorflow
      graph.
    """
    with self.test_session(graph=tf.Graph()) as sess:
      placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
      tpu_computation = tpu.rewrite(graph_fn, placeholders)
      sess.run(tpu.initialize_system())
      sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
                tf.local_variables_initializer()])
      materialized_results = sess.run(tpu_computation,
                                      feed_dict=dict(zip(placeholders, inputs)))
      sess.run(tpu.shutdown_system())
      if (len(materialized_results) == 1
          and (isinstance(materialized_results, list)
               or isinstance(materialized_results, tuple))):
        materialized_results = materialized_results[0]
    return materialized_results 
Example #15
Source File: model.py    From Activation-Visualization-Histogram with MIT License 5 votes vote down vote up
def __init__(self, config,
                 debug_information=False,
                 is_train=True):
        self.debug = debug_information

        self.config = config
        self.batch_size = self.config.batch_size
        self.input_height = self.config.data_info[0]
        self.input_width = self.config.data_info[1]
        self.num_class = self.config.data_info[2]
        self.c_dim = self.config.data_info[3]
        self.visualize_shape = self.config.visualize_shape
        self.conv_info = self.config.conv_info
        self.activation_fn = {
            'selu': selu,
            'relu': tf.nn.relu,
            'lrelu': lrelu,
        }[self.config.activation]

        # create placeholders for the input
        self.image = tf.placeholder(
            name='image', dtype=tf.float32,
            shape=[self.batch_size, self.input_height, self.input_width, self.c_dim],
        )
        self.label = tf.placeholder(
            name='label', dtype=tf.float32, shape=[self.batch_size, self.num_class],
        )

        self.is_training = tf.placeholder_with_default(bool(is_train), [], name='is_training')

        self.build(is_train=is_train) 
Example #16
Source File: train_eval.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def get_message_and_key(self):
    """Generate random pseudo-boolean key and message values."""

    batch_size = tf.placeholder_with_default(FLAGS.batch_size, shape=[])

    in_m = batch_of_random_bools(batch_size, TEXT_SIZE)
    in_k = batch_of_random_bools(batch_size, KEY_SIZE)
    return in_m, in_k 
Example #17
Source File: tf_model.py    From char-rnn-text-generation with MIT License 5 votes vote down vote up
def build_train_graph(loss, learning_rate=0.001, clip_norm=5.0):
    """
    builds training graph
    """
    train_args = {"learning_rate": learning_rate, "clip_norm": clip_norm}
    logger.debug("building training graph: %s.", train_args)

    learning_rate = tf.placeholder_with_default(learning_rate, [], "learning_rate")
    global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = layers.optimize_loss(loss, global_step, learning_rate, "Adam",
                                    clip_gradients=clip_norm)

    model = {"global_step": global_step, "train_op": train_op,
             "learning_rate": learning_rate, "train_args": train_args}
    return model 
Example #18
Source File: speech_model.py    From speechT with Apache License 2.0 5 votes vote down vote up
def add_decoding_ops(self, language_model: str = None, lm_weight: float = 0.8, word_count_weight: float = 0.0,
                       valid_word_count_weight: float = 2.3):
    """
    Add the ops for decoding
j
    Args:
      language_model: the file path to the language model to use for beam search decoding or None
      word_count_weight: The weight added for each added word
      valid_word_count_weight: The weight added for each in vocabulary word
      lm_weight: The weight multiplied with the language model scoring
    """
    with tf.name_scope('decoding'):
      self.lm_weight = tf.placeholder_with_default(lm_weight, shape=(), name='language_model_weight')
      self.word_count_weight = tf.placeholder_with_default(word_count_weight, shape=(), name='word_count_weight')
      self.valid_word_count_weight = tf.placeholder_with_default(valid_word_count_weight, shape=(),
                                                                 name='valid_word_count_weight')

      if language_model:
        self.softmaxed = tf.log(tf.nn.softmax(self.logits, name='softmax') + 1e-8) / math.log(10)
        self.decoded, self.log_probabilities = tf.nn.ctc_beam_search_decoder(self.softmaxed,
                                                                             self.sequence_lengths // 2,
                                                                             kenlm_directory_path=language_model,
                                                                             kenlm_weight=self.lm_weight,
                                                                             word_count_weight=self.word_count_weight,
                                                                             valid_word_count_weight=self.valid_word_count_weight,
                                                                             beam_width=100,
                                                                             merge_repeated=False,
                                                                             top_paths=1)
      else:
        self.decoded, self.log_probabilities = tf.nn.ctc_greedy_decoder(self.logits,
                                                                        self.sequence_lengths // 2,
                                                                        merge_repeated=True) 
Example #19
Source File: network.py    From LIP_JPPNet with MIT License 5 votes vote down vote up
def __init__(self, inputs, trainable=True, is_training=False, n_classes=20):
        # The input nodes for this network
        self.inputs = inputs
        # The current list of terminal nodes
        self.terminals = []
        # Mapping from layer names to layers
        self.layers = dict(inputs)
        # If true, the resulting variables are set as trainable
        self.trainable = trainable
        # Switch variable for dropout
        self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
                                                       shape=[],
                                                       name='use_dropout')
        self.setup(is_training, n_classes) 
Example #20
Source File: beam_search_test.py    From BERT with Apache License 2.0 5 votes vote down vote up
def testStatesAfterLoop(self):
    batch_size = 1
    beam_size = 1
    vocab_size = 2
    decode_length = 3

    initial_ids = tf.constant([0] * batch_size)  # GO
    probabilities = tf.constant([[[0.7, 0.3]], [[0.4, 0.6]], [[0.5, 0.5]]])

    def symbols_to_logits(ids, _, states):
      pos = tf.shape(ids)[1] - 1
      logits = tf.to_float(tf.log(probabilities[pos, :]))
      states["state"] += 1
      return logits, states

    states = {
        "state": tf.zeros((batch_size, 1)),
    }
    states["state"] = tf.placeholder_with_default(
        states["state"], shape=(None, 1))

    _, _, final_states = beam_search.beam_search(
        symbols_to_logits,
        initial_ids,
        beam_size,
        decode_length,
        vocab_size,
        0.0,
        eos_id=1,
        states=states)

    with self.test_session() as sess:
      final_states = sess.run(final_states)
    self.assertAllEqual([[[2]]], final_states["state"]) 
Example #21
Source File: model_base.py    From EasyRL with Apache License 2.0 5 votes vote down vote up
def add_extra_summary_op(self):
        """add extra summary op.

        summary_ops add in this function will be exported when the `session.run` called in training stage.
        Note: be careful to add summary_op, any input of summary_op missing will raise an error.
        """
        self.extra_episode_return = tf.placeholder_with_default(
            tf.constant([0.0], dtype=tf.float32),
            shape=[None],
            name="episode_return")
        self.summary_ops["extra"].extend([
            tf.summary.scalar(
                name="episode_return",
                tensor=tf.reduce_mean(self.extra_episode_return))
        ]) 
Example #22
Source File: network.py    From Siamese-RPN-tensorflow with MIT License 5 votes vote down vote up
def __init__(self, inputs, trainable=True):
        self.k=5
        # The input nodes for this network
        self.inputs = inputs
        # The current list of terminal nodes
        self.terminals = []
        # Mapping from layer names to layers
        self.layers = dict(inputs)
        # If true, the resulting variables are set as trainable
        self.trainable = trainable
        # Switch variable for dropout
        self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
                                                       shape=[],
                                                       name='use_dropout')
        self.setup() 
Example #23
Source File: maml.py    From maml with MIT License 5 votes vote down vote up
def __init__(self, dim_input=1, dim_output=1, test_num_updates=5):
        """ must call construct_model() after initializing MAML! """
        self.dim_input = dim_input
        self.dim_output = dim_output
        self.update_lr = FLAGS.update_lr
        self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())
        self.classification = False
        self.test_num_updates = test_num_updates
        if FLAGS.datasource == 'sinusoid':
            self.dim_hidden = [40, 40]
            self.loss_func = mse
            self.forward = self.forward_fc
            self.construct_weights = self.construct_fc_weights
        elif FLAGS.datasource == 'omniglot' or FLAGS.datasource == 'miniimagenet':
            self.loss_func = xent
            self.classification = True
            if FLAGS.conv:
                self.dim_hidden = FLAGS.num_filters
                self.forward = self.forward_conv
                self.construct_weights = self.construct_conv_weights
            else:
                self.dim_hidden = [256, 128, 64, 64]
                self.forward=self.forward_fc
                self.construct_weights = self.construct_fc_weights
            if FLAGS.datasource == 'miniimagenet':
                self.channels = 3
            else:
                self.channels = 1
            self.img_size = int(np.sqrt(self.dim_input/self.channels))
        else:
            raise ValueError('Unrecognized data source.') 
Example #24
Source File: test_case.py    From ros_people_object_detection_tensorflow with Apache License 2.0 5 votes vote down vote up
def execute_tpu(self, graph_fn, inputs):
    """Constructs the graph, executes it on TPU and returns the result.

    Args:
      graph_fn: a callable that constructs the tensorflow graph to test. The
        arguments of this function should correspond to `inputs`.
      inputs: a list of numpy arrays to feed input to the computation graph.

    Returns:
      A list of numpy arrays or a scalar returned from executing the tensorflow
      graph.
    """
    with self.test_session(graph=tf.Graph()) as sess:
      placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
      tpu_computation = tpu.rewrite(graph_fn, placeholders)
      sess.run(tpu.initialize_system())
      sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
                tf.local_variables_initializer()])
      materialized_results = sess.run(tpu_computation,
                                      feed_dict=dict(zip(placeholders, inputs)))
      sess.run(tpu.shutdown_system())
      if (len(materialized_results) == 1
          and (isinstance(materialized_results, list)
               or isinstance(materialized_results, tuple))):
        materialized_results = materialized_results[0]
    return materialized_results 
Example #25
Source File: baseop.py    From Traffic_sign_detection_YOLO with MIT License 5 votes vote down vote up
def wrap_pholder(self, ph, feed):
        """wrap layer.h into placeholders"""
        phtype = type(self.lay.h[ph])
        if phtype is not dict: return

        sig = '{}/{}'.format(self.scope, ph)
        val = self.lay.h[ph]

        self.lay.h[ph] = tf.placeholder_with_default(
            val['dfault'], val['shape'], name = sig)
        feed[self.lay.h[ph]] = val['feed'] 
Example #26
Source File: models.py    From CausalGAN with MIT License 5 votes vote down vote up
def __init__(self, N, hidden_size=10,z_dim=10):
        with tf.variable_scope('Gen') as scope:
            self.N=tf.placeholder_with_default(N,shape=[])
            self.hidden_size=hidden_size
            self.z_dim=z_dim
            self.build()
            self.tr_var = tf.contrib.framework.get_variables(scope)
            self.step=tf.Variable(0,name='step',trainable=False)
            self.var = tf.contrib.framework.get_variables(scope) 
Example #27
Source File: models.py    From CausalGAN with MIT License 5 votes vote down vote up
def __init__(self,N):
        with tf.variable_scope('Arrow') as scope:
            self.N=tf.placeholder_with_default(N,shape=[])
            #self.N=tf.constant(N) #how many to sample at a time
            self.e1=tf.random_uniform([self.N,1],0,1)
            self.e2=tf.random_uniform([self.N,1],0,1)
            self.e3=tf.random_uniform([self.N,1],0,1)
            self.build()
            #WARN. some of these are not trainable: i.e. poly
            self.var = tf.contrib.framework.get_variables(scope) 
Example #28
Source File: network_base.py    From tf-pose with Apache License 2.0 5 votes vote down vote up
def __init__(self, inputs, trainable=True):
        # The input nodes for this network
        self.inputs = inputs
        # The current list of terminal nodes
        self.terminals = []
        # Mapping from layer names to layers
        self.layers = dict(inputs)
        # If true, the resulting variables are set as trainable
        self.trainable = trainable
        # Switch variable for dropout
        self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
                                                       shape=[],
                                                       name='use_dropout')
        self.setup() 
Example #29
Source File: test_layers.py    From aboleth with Apache License 2.0 5 votes vote down vote up
def test_input_sample(make_data):
    """Test the input and tiling layer."""
    x, _, X = make_data
    n_samples = tf.placeholder_with_default(3, [])
    s = ab.InputLayer(name='myname', n_samples=n_samples)

    F, KL = s(myname=x)
    tc = tf.test.TestCase()
    with tc.test_session():
        f = F.eval()
        X_array = X.eval()
        assert KL == 0.0
        assert np.array_equal(f, X_array)
        for i in range(3):
            assert np.array_equal(f[i], x) 
Example #30
Source File: sample.py    From CausalGAN with MIT License 5 votes vote down vote up
def interpret_dict( a_dict, model,n_times=1, on_logits=True):
    '''
    pass either a do_dict or a cond_dict.
    The rules for converting arguments to numpy arrays to pass
    to tensorflow are identical
    '''
    if a_dict is None:
        return {}
    elif len(a_dict)==0:
        return {}

    if n_times>1:
        token=tf.placeholder_with_default(2.22)
        a_dict[token]=-2.22

    p_a_dict=take_product(a_dict)

    ##Need divisible batch_size for most models
    if len(p_a_dict)>0:
        L=len(p_a_dict.values()[0])
    else:
        L=0
    print("L is " + str(L))
    print(p_a_dict)

    ##Check compatability batch_size and L
    if L>=model.batch_size:
        if not L % model.batch_size == 0:
            raise ValueError('a_dict must be dividable by batch_size\
                             but instead product of inputs was of length',L)
    elif model.batch_size % L == 0:
        p_a_dict = {key:np.repeat(value,model.batch_size/L,axis=0) for key,value in p_a_dict.items()}
    else:
        raise ValueError('No. of intervened values must divide batch_size.')
    return p_a_dict