Python tensorflow.constant() Examples

The following are 30 code examples of tensorflow.constant(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: component.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def build_structured_training(self, state, network_states):
    """Builds a beam search based training loop for this component.

    The default implementation builds a dummy graph and raises a
    TensorFlow runtime exception to indicate that structured training
    is not implemented.

    Args:
      state: MasterState from the 'AdvanceMaster' op that advances the
        underlying master to this component.
      network_states: dictionary of component NetworkState objects.

    Returns:
      (handle, cost, correct, total) -- These are TF ops corresponding
      to the final handle after unrolling, the total cost, and the
      total number of actions. Since the number of correctly predicted
      actions is not applicable in the structured training setting, a
      dummy value should returned.
    """
    del network_states  # Unused.
    with tf.control_dependencies([tf.Assert(False, ['Not implemented.'])]):
      handle = tf.identity(state.handle)
    cost = tf.constant(0.)
    correct, total = tf.constant(0), tf.constant(0)
    return handle, cost, correct, total 
Example #2
Source File: preview.py    From cwavegan with MIT License 6 votes vote down vote up
def noise_input_fn(params):
    """Input function for generating samples for PREDICT mode.

  Generates a single Tensor of fixed random noise. Use tf.data.Dataset to
  signal to the estimator when to terminate the generator returned by
  predict().

  Args:
    params: param `dict` passed by TPUEstimator.

  Returns:
    1-element `dict` containing the randomly generated noise.
  """

    # random noise
    np.random.seed(0)
    noise_dataset = tf.data.Dataset.from_tensors(tf.constant(
        np.random.randn(params['batch_size'], FLAGS.noise_dim), dtype=tf.float32))
    noise = noise_dataset.make_one_shot_iterator().get_next()
    return {'random_noise': noise}, None 
Example #3
Source File: face_attack.py    From Adversarial-Face-Attack with GNU General Public License v3.0 6 votes vote down vote up
def structure(self, input_tensor):
        """
        Args:
            input_tensor: NHWC
        """
        rnd = tf.random_uniform((), 135, 160, dtype=tf.int32)
        rescaled = tf.image.resize_images(
            input_tensor, [rnd, rnd], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
        h_rem = 160 - rnd
        w_rem = 160 - rnd
        pad_left = tf.random_uniform((), 0, w_rem, dtype=tf.int32)
        pad_right = w_rem - pad_left
        pad_top = tf.random_uniform((), 0, h_rem, dtype=tf.int32)
        pad_bottom = h_rem - pad_top
        padded = tf.pad(rescaled, [[0, 0], [pad_top, pad_bottom], [
                        pad_left, pad_right], [0, 0]])
        padded.set_shape((input_tensor.shape[0], 160, 160, 3))
        output = tf.cond(tf.random_uniform(shape=[1])[0] < tf.constant(0.9),
                         lambda: padded, lambda: input_tensor)
        return output 
Example #4
Source File: face_attack.py    From Adversarial-Face-Attack with GNU General Public License v3.0 6 votes vote down vote up
def build_pgd_attack(self, eps):
        victim_embeddings = tf.constant(self.victim_embeddings, dtype=tf.float32)

        def one_step_attack(image, grad):
            """
            core components of this attack are:
            (a) PGD adversarial attack (https://arxiv.org/pdf/1706.06083.pdf)
            (b) momentum (https://arxiv.org/pdf/1710.06081.pdf)
            (c) input diversity (https://arxiv.org/pdf/1803.06978.pdf)
            """
            orig_image = image
            image = self.structure(image)
            image = (image - 127.5) / 128.0
            image = image + tf.random_uniform(tf.shape(image), minval=-1e-2, maxval=1e-2)
            prelogits, _ = self.network.inference(image, 1.0, False, bottleneck_layer_size=512)
            embeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')

            embeddings = tf.reshape(embeddings[0], [512, 1])
            objective = tf.reduce_mean(tf.matmul(victim_embeddings, embeddings))  # to be maximized

            noise, = tf.gradients(objective, orig_image)

            noise = noise / tf.reduce_mean(tf.abs(noise), [1, 2, 3], keep_dims=True)
            noise = 0.9 * grad + noise

            adv = tf.clip_by_value(orig_image + tf.sign(noise) * 1.0, lower_bound, upper_bound)
            return adv, noise

        input = tf.to_float(self.image_batch)
        lower_bound = tf.clip_by_value(input - eps, 0, 255.)
        upper_bound = tf.clip_by_value(input + eps, 0, 255.)

        with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
            adv, _ = tf.while_loop(
                lambda _, __: True, one_step_attack,
                (input, tf.zeros_like(input)),
                back_prop=False,
                maximum_iterations=100,
                parallel_iterations=1)
        self.adv_image = adv
        return adv 
Example #5
Source File: test_utils_tf.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_clip_eta_goldilocks(self):
        # Test that the clipping handles perturbations that are
        # too small, just right, and too big correctly
        eta = tf.constant([[2.], [3.], [4.]])
        assert eta.dtype == tf.float32, eta.dtype
        eps = 3.
        for ord_arg in [np.inf, 1, 2]:
            for sign in [-1., 1.]:
                clipped = clip_eta(eta * sign, ord_arg, eps)
                clipped_value = self.sess.run(clipped)
                gold = sign * np.array([[2.], [3.], [3.]])
                self.assertClose(clipped_value, gold)
                grad, = tf.gradients(clipped, eta)
                grad_value = self.sess.run(grad)
                # Note: the second 1. is debatable (the left-sided derivative
                # and the right-sided derivative do not match, so formally
                # the derivative is not defined). This test makes sure that
                # we at least handle this oddity consistently across all the
                # argument values we test
                gold = sign * np.array([[1.], [1.], [0.]])
                assert np.allclose(grad_value, gold) 
Example #6
Source File: test_dropout.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_drop():
    # Make sure dropout is activated successfully

    # We would like to configure the test to deterministically drop,
    # so that the test does not need to use multiple runs.
    # However, tf.nn.dropout divides by include_prob, so zero or
    # infinitesimal include_prob causes NaNs.
    # 1e-8 does not cause NaNs and shouldn't be a significant source
    # of test flakiness relative to dependency downloads failing, etc.
    model = MLP(input_shape=[1, 1], layers=[Dropout(name='output',
                                                    include_prob=1e-8)])
    x = tf.constant([[1]], dtype=tf.float32)
    y = model.get_layer(x, 'output', dropout=True)
    sess = tf.Session()
    y_value = sess.run(y)
    # Subject to very rare random failure because include_prob is not exact 0
    assert y_value == 0., y_value 
Example #7
Source File: tensor.py    From spleeter with MIT License 6 votes vote down vote up
def check_tensor_shape(tensor_tf, target_shape):
    """ Return a Tensorflow boolean graph that indicates whether
    sample[features_key] has the specified target shape. Only check
    not None entries of target_shape.

    :param tensor_tf: Tensor to check shape for.
    :param target_shape: Target shape to compare tensor to.
    :returns: True if shape is valid, False otherwise (as TF boolean).
    """
    result = tf.constant(True)
    for i, target_length in enumerate(target_shape):
        if target_length:
            result = tf.logical_and(
                result,
                tf.equal(tf.constant(target_length), tf.shape(tensor_tf)[i]))
    return result 
Example #8
Source File: model_deploy_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testCreateLogisticClassifier(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = LogisticClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 2)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'LogisticClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, 'GPU:0')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(update_ops, []) 
Example #9
Source File: model_deploy_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      clone = clones[0]
      self.assertEqual(len(slim.get_variables()), 5)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, 'CPU:0')
        self.assertDeviceEqual(v.value().device, 'CPU:0')
      self.assertEqual(clone.outputs.op.name,
                       'BatchNormClassifier/fully_connected/Sigmoid')
      self.assertEqual(clone.scope, '')
      self.assertDeviceEqual(clone.device, 'GPU:0')
      self.assertEqual(len(slim.losses.get_losses()), 1)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2) 
Example #10
Source File: model_deploy_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testCreateOnecloneWithPS(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1,
                                                    num_ps_tasks=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(clones), 1)
      clone = clones[0]
      self.assertEqual(clone.outputs.op.name,
                       'BatchNormClassifier/fully_connected/Sigmoid')
      self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:0')
      self.assertEqual(clone.scope, '')
      self.assertEqual(len(slim.get_variables()), 5)
      for v in slim.get_variables():
        self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
        self.assertDeviceEqual(v.device, v.value().device) 
Example #11
Source File: model_deploy_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testCreateSingleclone(self):
    g = tf.Graph()
    with g.as_default():
      tf.set_random_seed(0)
      tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
      tf_labels = tf.constant(self._labels, dtype=tf.float32)

      model_fn = BatchNormClassifier
      clone_args = (tf_inputs, tf_labels)
      deploy_config = model_deploy.DeploymentConfig(num_clones=1)

      self.assertEqual(slim.get_variables(), [])
      clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
      self.assertEqual(len(slim.get_variables()), 5)
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      self.assertEqual(len(update_ops), 2)

      optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
      total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
                                                                optimizer)
      self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
      self.assertEqual(total_loss.op.name, 'total_loss')
      for g, v in grads_and_vars:
        self.assertDeviceEqual(g.device, 'GPU:0')
        self.assertDeviceEqual(v.device, 'CPU:0') 
Example #12
Source File: model_deploy_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testNoSummariesOnGPUForEvals(self):
    with tf.Graph().as_default():
      deploy_config = model_deploy.DeploymentConfig(num_clones=2)

      # clone function creates a fully_connected layer with a regularizer loss.
      def ModelFn():
        inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
        reg = tf.contrib.layers.l2_regularizer(0.001)
        tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg)

      # No optimizer here, it's an eval.
      model = model_deploy.deploy(deploy_config, ModelFn)
      # The model summary op should have a few summary inputs and all of them
      # should be on the CPU.
      self.assertTrue(model.summary_op.op.inputs)
      for inp in  model.summary_op.op.inputs:
        self.assertEqual('/device:CPU:0', inp.device) 
Example #13
Source File: layers.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               vocab_size,
               embedding_dim,
               normalize=False,
               vocab_freqs=None,
               keep_prob=1.,
               **kwargs):
    self.vocab_size = vocab_size
    self.embedding_dim = embedding_dim
    self.normalized = normalize
    self.keep_prob = keep_prob

    if normalize:
      assert vocab_freqs is not None
      self.vocab_freqs = tf.constant(
          vocab_freqs, dtype=tf.float32, shape=(vocab_size, 1))

    super(Embedding, self).__init__(**kwargs) 
Example #14
Source File: model_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def initialize_fakes(self):
    self.images_shape = (self.batch_size, self.image_height, self.image_width,
                         3)
    self.fake_images = tf.constant(
        self.rng.randint(low=0, high=255,
                         size=self.images_shape).astype('float32'),
        name='input_node')
    self.fake_conv_tower_np = self.rng.randn(
        *self.conv_tower_shape).astype('float32')
    self.fake_conv_tower = tf.constant(self.fake_conv_tower_np)
    self.fake_logits = tf.constant(
        self.rng.randn(*self.chars_logit_shape).astype('float32'))
    self.fake_labels = tf.constant(
        self.rng.randint(
            low=0,
            high=self.num_char_classes,
            size=(self.batch_size, self.seq_length)).astype('int64')) 
Example #15
Source File: vision_baseline_lstm.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def lstm_setup(name, x, batch_size, is_single_step, lstm_dim, lstm_out,
               num_steps, state_input_op):
  # returns state_name, state_init_op, updated_state_op, out_op 
  with tf.name_scope('reshape_'+name):
    sh = x.get_shape().as_list()
    x = tf.reshape(x, shape=[batch_size, -1, sh[-1]])

  with tf.variable_scope(name) as varscope:
    cell = tf.contrib.rnn.LSTMCell(
      num_units=lstm_dim, forget_bias=1.0, state_is_tuple=False,
      num_proj=lstm_out, use_peepholes=True,
      initializer=tf.random_uniform_initializer(-0.01, 0.01, seed=0),
      cell_clip=None, proj_clip=None)

    sh = [batch_size, 1, lstm_dim+lstm_out]
    state_init_op = tf.constant(0., dtype=tf.float32, shape=sh)

    fn = lambda ns: lstm_online(cell, ns, x, state_input_op, varscope)
    out_op, updated_state_op = tf.cond(is_single_step, lambda: fn(1), lambda:
                                       fn(num_steps))

  return name, state_init_op, updated_state_op, out_op 
Example #16
Source File: resnet_model.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _build_train_op(self):
    """Build training specific ops for the graph."""
    self.lrn_rate = tf.constant(self.hps.lrn_rate, tf.float32)
    tf.summary.scalar('learning_rate', self.lrn_rate)

    trainable_variables = tf.trainable_variables()
    grads = tf.gradients(self.cost, trainable_variables)

    if self.hps.optimizer == 'sgd':
      optimizer = tf.train.GradientDescentOptimizer(self.lrn_rate)
    elif self.hps.optimizer == 'mom':
      optimizer = tf.train.MomentumOptimizer(self.lrn_rate, 0.9)

    apply_op = optimizer.apply_gradients(
        zip(grads, trainable_variables),
        global_step=self.global_step, name='train_step')

    train_ops = [apply_op] + self._extra_train_ops
    self.train_op = tf.group(*train_ops)

  # TODO(xpan): Consider batch_norm in contrib/layers/python/layers/layers.py 
Example #17
Source File: graph_builder.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _create_learning_rate(hyperparams, step_var):
  """Creates learning rate var, with decay and switching for CompositeOptimizer.

  Args:
    hyperparams: a GridPoint proto containing optimizer spec, particularly
      learning_method to determine optimizer class to use.
    step_var: tf.Variable, global training step.

  Returns:
    a scalar `Tensor`, the learning rate based on current step and hyperparams.
  """
  if hyperparams.learning_method != 'composite':
    base_rate = hyperparams.learning_rate
  else:
    spec = hyperparams.composite_optimizer_spec
    switch = tf.less(step_var, spec.switch_after_steps)
    base_rate = tf.cond(switch, lambda: tf.constant(spec.method1.learning_rate),
                        lambda: tf.constant(spec.method2.learning_rate))
  return tf.train.exponential_decay(
      base_rate,
      step_var,
      hyperparams.decay_steps,
      hyperparams.decay_base,
      staircase=hyperparams.decay_staircase) 
Example #18
Source File: digraph_ops_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testArcSourcePotentialsFromTokens(self):
    with self.test_session():
      tokens = tf.constant([[[4, 5, 6],
                             [5, 6, 7],
                             [6, 7, 8]],
                            [[6, 7, 8],
                             [5, 6, 7],
                             [4, 5, 6]]], tf.float32)
      weights = tf.constant([2, 3, 5], tf.float32)

      arcs = digraph_ops.ArcSourcePotentialsFromTokens(tokens, weights)

      self.assertAllEqual(arcs.eval(), [[[53, 53, 53],
                                         [63, 63, 63],
                                         [73, 73, 73]],
                                        [[73, 73, 73],
                                         [63, 63, 63],
                                         [53, 53, 53]]]) 
Example #19
Source File: digraph_ops_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testRootPotentialsFromTokens(self):
    with self.test_session():
      root = tf.constant([1, 2], tf.float32)
      tokens = tf.constant([[[4, 5, 6],
                             [5, 6, 7],
                             [6, 7, 8]],
                            [[6, 7, 8],
                             [5, 6, 7],
                             [4, 5, 6]]], tf.float32)
      weights = tf.constant([[2, 3, 5],
                             [7, 11, 13]],
                            tf.float32)

      roots = digraph_ops.RootPotentialsFromTokens(root, tokens, weights)

      self.assertAllEqual(roots.eval(), [[375, 447, 519],
                                         [519, 447, 375]]) 
Example #20
Source File: train.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _setup_learning_rate(config, global_step):
  """Sets up the learning rate with optional exponential decay.

  Args:
    config: Object containing learning rate configuration parameters.
    global_step: Tensor; the global step.

  Returns:
    learning_rate: Tensor; the learning rate with exponential decay.
  """
  if config.learning_rate_decay_factor > 0:
    learning_rate = tf.train.exponential_decay(
        learning_rate=float(config.learning_rate),
        global_step=global_step,
        decay_steps=config.learning_rate_decay_steps,
        decay_rate=config.learning_rate_decay_factor,
        staircase=False)
  else:
    learning_rate = tf.constant(config.learning_rate)
  return learning_rate 
Example #21
Source File: digraph_ops_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testCombineArcAndRootPotentials(self):
    with self.test_session():
      arcs = tf.constant([[[1, 2, 3],
                           [2, 3, 4],
                           [3, 4, 5]],
                          [[3, 4, 5],
                           [2, 3, 4],
                           [1, 2, 3]]], tf.float32)
      roots = tf.constant([[6, 7, 8],
                           [8, 7, 6]], tf.float32)

      potentials = digraph_ops.CombineArcAndRootPotentials(arcs, roots)

      self.assertAllEqual(potentials.eval(), [[[6, 2, 3],
                                               [2, 7, 4],
                                               [3, 4, 8]],
                                              [[8, 4, 5],
                                               [2, 7, 4],
                                               [1, 2, 6]]]) 
Example #22
Source File: optimization_test.py    From BERT-Classification-Tutorial with Apache License 2.0 6 votes vote down vote up
def test_adam(self):
        with self.test_session() as sess:
            w = tf.get_variable(
                "w",
                shape=[3],
                initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
            x = tf.constant([0.4, 0.2, -0.5])
            loss = tf.reduce_mean(tf.square(x - w))
            tvars = tf.trainable_variables()
            grads = tf.gradients(loss, tvars)
            global_step = tf.train.get_or_create_global_step()
            optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
            train_op = optimizer.apply_gradients(zip(grads, tvars), global_step)
            init_op = tf.group(tf.global_variables_initializer(),
                               tf.local_variables_initializer())
            sess.run(init_op)
            for _ in range(100):
                sess.run(train_op)
            w_np = sess.run(w)
            self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2) 
Example #23
Source File: digraph_ops_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testLabelPotentialsFromTokens(self):
    with self.test_session():
      tokens = tf.constant([[[1, 2],
                             [3, 4],
                             [5, 6]],
                            [[6, 5],
                             [4, 3],
                             [2, 1]]], tf.float32)


      weights = tf.constant([[ 2,  3],
                             [ 5,  7],
                             [11, 13]], tf.float32)

      labels = digraph_ops.LabelPotentialsFromTokens(tokens, weights)

      self.assertAllEqual(labels.eval(),

                          [[[  8,  19,  37],
                            [ 18,  43,  85],
                            [ 28,  67, 133]],
                           [[ 27,  65, 131],
                            [ 17,  41,  83],
                            [  7,  17,  35]]]) 
Example #24
Source File: networks.py    From disentangling_conditional_gans with MIT License 5 votes vote down vote up
def leaky_relu(x, alpha=0.2):
    with tf.name_scope('LeakyRelu'):
        alpha = tf.constant(alpha, dtype=x.dtype, name='alpha')
        return tf.maximum(x * alpha, x)

#----------------------------------------------------------------------------
# Nearest-neighbor upscaling layer. 
Example #25
Source File: metrics.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def sequence_accuracy(predictions, targets, rej_char, streaming=False):
  """Computes sequence level accuracy.

  Both input tensors should have the same shape: [batch_size x seq_length].

  Args:
    predictions: predicted character classes.
    targets: ground truth character classes.
    rej_char: the character id used to mark empty element (end of sequence).
    streaming: if True, uses the streaming mean from the slim.metric module.

  Returns:
    a update_ops for execution and value tensor whose value on evaluation
    returns the total sequence accuracy.
  """

  with tf.variable_scope('SequenceAccuracy'):
    predictions.get_shape().assert_is_compatible_with(targets.get_shape())

    targets = tf.to_int32(targets)
    const_rej_char = tf.constant(
        rej_char, shape=targets.get_shape(), dtype=tf.int32)
    include_mask = tf.not_equal(targets, const_rej_char)
    include_predictions = tf.to_int32(
        tf.where(include_mask, predictions,
                 tf.zeros_like(predictions) + rej_char))
    correct_chars = tf.to_float(tf.equal(include_predictions, targets))
    correct_chars_counts = tf.cast(
        tf.reduce_sum(correct_chars, reduction_indices=[1]), dtype=tf.int32)
    target_length = targets.get_shape().dims[1].value
    target_chars_counts = tf.constant(
        target_length, shape=correct_chars_counts.get_shape())
    accuracy_per_example = tf.to_float(
        tf.equal(correct_chars_counts, target_chars_counts))
    if streaming:
      return tf.contrib.metrics.streaming_mean(accuracy_per_example)
    else:
      return tf.reduce_mean(accuracy_per_example) 
Example #26
Source File: sequence_layers.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def orthogonal_initializer(shape, dtype=tf.float32, *args, **kwargs):
  """Generates orthonormal matrices with random values.

  Orthonormal initialization is important for RNNs:
    http://arxiv.org/abs/1312.6120
    http://smerity.com/articles/2016/orthogonal_init.html

  For non-square shapes the returned matrix will be semi-orthonormal: if the
  number of columns exceeds the number of rows, then the rows are orthonormal
  vectors; but if the number of rows exceeds the number of columns, then the
  columns are orthonormal vectors.

  We use SVD decomposition to generate an orthonormal matrix with random
  values. The same way as it is done in the Lasagne library for Theano. Note
  that both u and v returned by the svd are orthogonal and random. We just need
  to pick one with the right shape.

  Args:
    shape: a shape of the tensor matrix to initialize.
    dtype: a dtype of the initialized tensor.
    *args: not used.
    **kwargs: not used.

  Returns:
    An initialized tensor.
  """
  del args
  del kwargs
  flat_shape = (shape[0], np.prod(shape[1:]))
  w = np.random.randn(*flat_shape)
  u, _, v = np.linalg.svd(w, full_matrices=False)
  w = u if u.shape == flat_shape else v
  return tf.constant(w.reshape(shape), dtype=dtype) 
Example #27
Source File: networks.py    From disentangling_conditional_gans with MIT License 5 votes vote down vote up
def get_weight(shape, gain=np.sqrt(2), use_wscale=False, fan_in=None):
    if fan_in is None: fan_in = np.prod(shape[:-1])
    std = gain / np.sqrt(fan_in) # He init
    if use_wscale:
        wscale = tf.constant(np.float32(std), name='wscale')
        return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal()) * wscale
    else:
        return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal(0, std))

#----------------------------------------------------------------------------
# Fully-connected layer. 
Example #28
Source File: model_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def encode_coordinates_alt(self, net):
    """An alternative implemenation for the encoding coordinates.

    Args:
      net: a tensor of shape=[batch_size, height, width, num_features]

    Returns:
      a list of tensors with encoded image coordinates in them.
    """
    batch_size, h, w, _ = net.shape.as_list()
    h_loc = [
      tf.tile(
          tf.reshape(
              tf.contrib.layers.one_hot_encoding(
                  tf.constant([i]), num_classes=h), [h, 1]), [1, w])
      for i in xrange(h)
    ]
    h_loc = tf.concat([tf.expand_dims(t, 2) for t in h_loc], 2)
    w_loc = [
      tf.tile(
          tf.contrib.layers.one_hot_encoding(tf.constant([i]), num_classes=w),
          [h, 1]) for i in xrange(w)
    ]
    w_loc = tf.concat([tf.expand_dims(t, 2) for t in w_loc], 2)
    loc = tf.concat([h_loc, w_loc], 2)
    loc = tf.tile(tf.expand_dims(loc, 0), [batch_size, 1, 1, 1])
    return tf.concat([net, loc], 3) 
Example #29
Source File: model.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def __init__(self, charset, default_character='?'):
    """Creates a lookup table.

    Args:
      charset: a dictionary with id-to-character mapping.
    """
    mapping_strings = tf.constant(_dict_to_array(charset, default_character))
    self.table = tf.contrib.lookup.index_to_string_table_from_tensor(
        mapping=mapping_strings, default_value=default_character) 
Example #30
Source File: model_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def test_text_corresponds_to_ids(self):
    charset = create_fake_charset(36)
    ids = tf.constant(
        [[17, 14, 21, 21, 24], [32, 24, 27, 21, 13]], dtype=tf.int64)
    charset_mapper = model.CharsetMapper(charset)

    with self.test_session() as sess:
      tf.tables_initializer().run()
      text = sess.run(charset_mapper.get_text(ids))

    self.assertAllEqual(text, ['hello', 'world'])