Python tensorflow.compat.v1.one_hot() Examples

The following are 30 code examples of tensorflow.compat.v1.one_hot(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v1 , or try the search function .
Example #1
Source File: slate_decomp_q_agent.py    From recsim with Apache License 2.0 6 votes vote down vote up
def _build_select_slate_op(self):
    p_no_click = self._prob_no_click_ph
    p = self._doc_affinity_scores_ph
    q = self._net_outputs.q_values[0]
    with tf.name_scope('select_slate'):
      self._output_slate = self._select_slate_fn(self._slate_size, p_no_click,
                                                 p, q)

    self._output_slate = tf.Print(
        self._output_slate, [tf.constant('cp 1'), self._output_slate, p, q],
        summarize=10000)
    self._output_slate = tf.reshape(self._output_slate, (self._slate_size,))

    self._action_counts = tf.get_variable(
        'action_counts',
        shape=[self._num_candidates],
        initializer=tf.zeros_initializer())
    output_slate = tf.reshape(self._output_slate, [-1])
    output_one_hot = tf.one_hot(output_slate, self._num_candidates)
    update_ops = []
    for i in range(self._slate_size):
      update_ops.append(tf.assign_add(self._action_counts, output_one_hot[i]))
    self._select_action_update_op = tf.group(*update_ops) 
Example #2
Source File: neural_stack.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def initialize_write_strengths(self, batch_size):
    """Initialize write strengths which write in both directions.

    Unlike in Grefenstette et al., It's writing out from the center of the
    memory so that it doesn't need to shift the entire memory forward at each
    step.

    Args:
      batch_size: The size of the current batch.

    Returns:
      A tf.float32 tensor of shape [num_write_heads, memory_size, 1].
    """
    memory_center = self._memory_size // 2
    return tf.expand_dims(
        tf.concat([
            # The write strength for the deque bottom.
            # Should be shifted back at each timestep.
            tf.one_hot([[memory_center - 1]] * batch_size,
                       depth=self._memory_size, dtype=tf.float32),
            # The write strength for the deque top.
            # Should be shifted forward at each timestep.
            tf.one_hot([[memory_center]] * batch_size,
                       depth=self._memory_size, dtype=tf.float32)
        ], axis=1), axis=3) 
Example #3
Source File: metrics_test.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def testMultilabelMatch3(self):
    predictions = np.random.randint(1, 5, size=(100, 1, 1, 1))
    targets = np.random.randint(1, 5, size=(100, 10, 1, 1))
    weights = np.random.randint(0, 2, size=(100, 1, 1, 1))
    targets *= weights

    predictions_repeat = np.repeat(predictions, 10, axis=1)
    expected = (predictions_repeat == targets).astype(float)
    expected = np.sum(expected, axis=(1, 2, 3))
    expected = np.minimum(expected / 3.0, 1.)
    expected = np.sum(expected * weights[:, 0, 0, 0]) / weights.shape[0]
    with self.test_session() as session:
      scores, weights_ = metrics.multilabel_accuracy_match3(
          tf.one_hot(predictions, depth=5, dtype=tf.float32),
          tf.constant(targets, dtype=tf.int32))
      a, a_op = tf.metrics.mean(scores, weights_)
      session.run(tf.local_variables_initializer())
      session.run(tf.global_variables_initializer())
      _ = session.run(a_op)
      actual = session.run(a)
    self.assertAlmostEqual(actual, expected, places=6) 
Example #4
Source File: metrics_test.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def testNegativeLogPerplexityMaskedAssert(self):
    predictions = np.random.randint(4, size=(12, 12, 12, 1))
    targets = np.random.randint(4, size=(12, 12, 12, 1))
    features = {}

    with self.assertRaisesRegexp(
        ValueError,
        'masked_neg_log_perplexity requires targets_mask feature'):
      with self.test_session() as session:
        scores, _ = metrics.padded_neg_log_perplexity_with_masking(
            tf.one_hot(predictions, depth=4, dtype=tf.float32),
            tf.constant(targets, dtype=tf.int32),
            features)
        a = tf.reduce_mean(scores)
        session.run(tf.global_variables_initializer())
        _ = session.run(a) 
Example #5
Source File: metrics_test.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def testSequenceEditDistanceMetric(self):
    predictions = np.array([[3, 4, 5, 1, 0, 0],
                            [2, 1, 3, 4, 0, 0],
                            [2, 1, 3, 4, 0, 0]])
    # Targets are just a bit different:
    #  - first sequence has a different prediction
    #  - second sequence has a different prediction and one extra step
    #  - third sequence is identical
    targets = np.array([[5, 4, 5, 1, 0, 0],
                        [2, 5, 3, 4, 1, 0],
                        [2, 1, 3, 4, 0, 0]])
    # Reshape to match expected input format by metric fns.
    predictions = np.reshape(predictions, [3, 6, 1, 1])
    targets = np.reshape(targets, [3, 6, 1, 1])
    with self.test_session() as session:
      scores, weight = metrics.sequence_edit_distance(
          tf.one_hot(predictions, depth=6, dtype=tf.float32),
          tf.constant(targets, dtype=tf.int32))
      session.run(tf.global_variables_initializer())
      actual_scores, actual_weight = session.run([scores, weight])
    self.assertAlmostEqual(actual_scores, 3.0 / 13)
    self.assertEqual(actual_weight, 13) 
Example #6
Source File: metrics_test.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def testPrefixAccuracy(self):
    vocab_size = 10
    predictions = tf.one_hot(
        tf.constant([[[1], [2], [3], [4], [9], [6], [7], [8]],
                     [[1], [2], [3], [4], [5], [9], [7], [8]],
                     [[1], [2], [3], [4], [5], [9], [7], [0]]]),
        vocab_size)
    labels = tf.expand_dims(
        tf.constant([[[1], [2], [3], [4], [5], [6], [7], [8]],
                     [[1], [2], [3], [4], [5], [6], [7], [8]],
                     [[1], [2], [3], [4], [5], [6], [7], [0]]]),
        axis=-1)
    expected_accuracy = np.average([4.0 / 8.0,
                                    5.0 / 8.0,
                                    5.0 / 7.0])
    accuracy, _ = metrics.prefix_accuracy(predictions, labels)
    with self.test_session() as session:
      accuracy_value = session.run(accuracy)
      self.assertAlmostEqual(expected_accuracy, accuracy_value) 
Example #7
Source File: metrics_test.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def testAccuracyTopKMetric(self):
    predictions = np.random.randint(1, 5, size=(12, 12, 12, 1))
    targets = np.random.randint(1, 5, size=(12, 12, 12, 1))
    expected = np.mean((predictions == targets).astype(float))
    with self.test_session() as session:
      predicted = tf.one_hot(predictions, depth=5, dtype=tf.float32)
      scores1, _ = metrics.padded_accuracy_topk(
          predicted, tf.constant(targets, dtype=tf.int32), k=1)
      scores2, _ = metrics.padded_accuracy_topk(
          predicted, tf.constant(targets, dtype=tf.int32), k=7)
      a1 = tf.reduce_mean(scores1)
      a2 = tf.reduce_mean(scores2)
      session.run(tf.global_variables_initializer())
      actual1, actual2 = session.run([a1, a2])
    self.assertAlmostEqual(actual1, expected)
    self.assertAlmostEqual(actual2, 1.0) 
Example #8
Source File: metrics.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def sigmoid_recall_one_hot(logits, labels, weights_fn=None):
  """Calculate recall for a set, given one-hot labels and logits.

  Predictions are converted to one-hot,
  as predictions[example][arg-max(example)] = 1

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    recall (scalar), weights
  """
  with tf.variable_scope("sigmoid_recall_one_hot", values=[logits, labels]):
    del weights_fn
    num_classes = logits.shape[-1]
    predictions = tf.nn.sigmoid(logits)
    predictions = tf.argmax(predictions, -1)
    predictions = tf.one_hot(predictions, num_classes)
    _, recall = tf.metrics.recall(labels=labels, predictions=predictions)
    return recall, tf.constant(1.0) 
Example #9
Source File: metrics.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def sigmoid_precision_one_hot(logits, labels, weights_fn=None):
  """Calculate precision for a set, given one-hot labels and logits.

  Predictions are converted to one-hot,
  as predictions[example][arg-max(example)] = 1

  Args:
    logits: Tensor of size [batch-size, o=1, p=1, num-classes]
    labels: Tensor of size [batch-size, o=1, p=1, num-classes]
    weights_fn: Function that takes in labels and weighs examples (unused)
  Returns:
    precision (scalar), weights
  """
  with tf.variable_scope("sigmoid_precision_one_hot", values=[logits, labels]):
    del weights_fn
    num_classes = logits.shape[-1]
    predictions = tf.nn.sigmoid(logits)
    predictions = tf.argmax(predictions, -1)
    predictions = tf.one_hot(predictions, num_classes)
    _, precision = tf.metrics.precision(labels=labels, predictions=predictions)
    return precision, tf.constant(1.0) 
Example #10
Source File: metrics.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero):
  """Recall of set predictions.

  Args:
    predictions : A Tensor of scores of shape [batch, nlabels].
    labels: A Tensor of int32s giving true set elements,
      of shape [batch, seq_length].
    weights_fn: A function to weight the elements.

  Returns:
    hits: A Tensor of shape [batch, nlabels].
    weights: A Tensor of shape [batch, nlabels].
  """
  with tf.variable_scope("set_recall", values=[predictions, labels]):
    labels = tf.squeeze(labels, [2, 3])
    weights = weights_fn(labels)
    labels = tf.one_hot(labels, predictions.shape[-1])
    labels = tf.reduce_max(labels, axis=1)
    labels = tf.cast(labels, tf.bool)
    return tf.to_float(tf.equal(labels, predictions)), weights 
Example #11
Source File: metrics.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def set_precision(predictions, labels,
                  weights_fn=common_layers.weights_nonzero):
  """Precision of set predictions.

  Args:
    predictions : A Tensor of scores of shape [batch, nlabels].
    labels: A Tensor of int32s giving true set elements,
      of shape [batch, seq_length].
    weights_fn: A function to weight the elements.

  Returns:
    hits: A Tensor of shape [batch, nlabels].
    weights: A Tensor of shape [batch, nlabels].
  """
  with tf.variable_scope("set_precision", values=[predictions, labels]):
    labels = tf.squeeze(labels, [2, 3])
    weights = weights_fn(labels)
    labels = tf.one_hot(labels, predictions.shape[-1])
    labels = tf.reduce_max(labels, axis=1)
    labels = tf.cast(labels, tf.bool)
    return tf.to_float(tf.equal(labels, predictions)), weights 
Example #12
Source File: neural_stack.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def initialize_write_strengths(self, batch_size):
    """Initialize write strengths to write to the first memory address.

    This is exposed as its own function so that it can be overridden to provide
    alternate write adressing schemes.

    Args:
      batch_size: The size of the current batch.

    Returns:
      A tf.float32 tensor of shape [num_write_heads, memory_size, 1] where the
      first element in the second dimension is set to 1.0.
    """
    return tf.expand_dims(
        tf.one_hot([[0] * self._num_write_heads] * batch_size,
                   depth=self._memory_size, dtype=tf.float32), axis=3) 
Example #13
Source File: transformer_nat.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def vq_nearest_neighbor(x, hparams):
  """Find the nearest element in means to elements in x."""
  bottleneck_size = 2**hparams.bottleneck_bits
  means = hparams.means
  x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
  means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
  scalar_prod = tf.matmul(x, means, transpose_b=True)
  dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
  if hparams.bottleneck_kind == "em":
    x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples)
    x_means_hot = tf.one_hot(
        x_means_idx, depth=bottleneck_size)
    x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
  else:
    x_means_idx = tf.argmax(-dist, axis=-1)
    x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size)
  x_means = tf.matmul(x_means_hot, means)
  e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
  return x_means_hot, e_loss 
Example #14
Source File: universal_transformer_util.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def fill_memory_slot(memory, value, index):
  """Fills the memory slot at a particular index with the given value.

  Args:
    memory: a 4-d tensor [memory_size, batch, length, channel] containing
      the state of all steps
    value: a 3-d tensor [batch, length, channel] as the sate
    index: integer in [0, memory_size)

  Returns:
    filled memory

  """
  mask = tf.to_float(
      tf.one_hot(index,
                 tf.shape(memory)[0])[:, None, None, None])
  fill_memory = (1 - mask) * memory + mask * value[None, ...]
  return fill_memory 
Example #15
Source File: autoencoders.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def gumbel_sample(self, reconstr_gan):
    hparams = self.hparams
    is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
    vocab_size = self._problem_hparams.vocab_size["targets"]
    if hasattr(self._hparams, "vocab_divisor"):
      vocab_size += (-vocab_size) % self._hparams.vocab_divisor
    reconstr_gan = tf.nn.log_softmax(reconstr_gan)
    if is_training and hparams.gumbel_temperature > 0.0:
      gumbel_samples = discretization.gumbel_sample(
          common_layers.shape_list(reconstr_gan))
      gumbel_samples *= hparams.gumbel_noise_factor
      reconstr_gan += gumbel_samples
      reconstr_sample = latent_layers.multinomial_sample(
          reconstr_gan, temperature=hparams.gumbel_temperature)
      reconstr_gan = tf.nn.softmax(reconstr_gan / hparams.gumbel_temperature)
    else:
      reconstr_sample = tf.argmax(reconstr_gan, axis=-1)
      reconstr_gan = tf.nn.softmax(reconstr_gan / 0.1)  # Sharpen a bit.
    # Use 1-hot forward, softmax backward.
    reconstr_hot = tf.one_hot(reconstr_sample, vocab_size)
    reconstr_gan += reconstr_hot - tf.stop_gradient(reconstr_gan)
    return reconstr_gan 
Example #16
Source File: modeling.py    From gpt2-ml with Apache License 2.0 6 votes vote down vote up
def lm_loss(self):
        """
        :return: stuff
        """
        target_ids_flat = tf.reshape(self.target_ids, [-1])

        # 1 if it's valid and 0 otherwise.
        label_weights = tf.cast(tf.not_equal(target_ids_flat, self.pad_token_id), dtype=self.logits_flat.dtype)

        # [batch_size * seq_length, vocab_size]
        one_hot_labels = tf.one_hot(target_ids_flat,
                                    depth=self.config.vocab_size,
                                    dtype=self.logits_flat.dtype)

        # [batch_size * seq_length, vocab_size]
        logprobs_flat = tf.nn.log_softmax(self.logits_flat, axis=-1)

        per_example_loss = -tf.reduce_sum(logprobs_flat * one_hot_labels, axis=[-1])

        # per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits_flat, labels=target_ids_flat)

        numerator = tf.reduce_sum(label_weights * per_example_loss)
        denominator = tf.reduce_sum(label_weights) + 1e-5
        loss = numerator / denominator
        return loss 
Example #17
Source File: metrics.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def two_class_log_likelihood(predictions, labels, weights_fn=None):
  """Log-likelihood for two class classification with 0/1 labels.

  Args:
    predictions: A float valued tensor of shape [`batch_size`].  Each
      component should be between 0 and 1.
    labels: An int valued tensor of shape [`batch_size`].  Each component
      should either be 0 or 1.
    weights_fn: unused.

  Returns:
    A pair, with the average log likelihood in the first component.
  """
  del weights_fn
  float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64)
  batch_probs = tf.stack([1. - float_predictions, float_predictions], axis=-1)
  int_labels = tf.cast(tf.squeeze(labels), dtype=tf.int32)
  onehot_targets = tf.cast(tf.one_hot(int_labels, 2), dtype=tf.float64)
  chosen_probs = tf.einsum(
      "ij,ij->i", batch_probs, onehot_targets, name="chosen_probs")
  avg_log_likelihood = tf.reduce_mean(tf.log(chosen_probs))
  return avg_log_likelihood, tf.constant(1.0) 
Example #18
Source File: loss.py    From interval-bound-propagation with Apache License 2.0 6 votes vote down vote up
def _build_nominal_loss(self, labels):
    """Build natural cross-entropy loss on clean data."""
    # Cross-entropy.
    nominal_logits = self._predictor.logits
    if self._label_smoothing > 0:
      num_classes = nominal_logits.shape[1].value
      one_hot_labels = tf.one_hot(labels, num_classes)
      smooth_positives = 1. - self._label_smoothing
      smooth_negatives = self._label_smoothing / num_classes
      one_hot_labels = one_hot_labels * smooth_positives + smooth_negatives
      nominal_cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
          labels=one_hot_labels, logits=nominal_logits)
      self._one_hot_labels = one_hot_labels
    else:
      nominal_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
          labels=labels, logits=nominal_logits)
    self._cross_entropy = tf.reduce_mean(nominal_cross_entropy)
    # Accuracy.
    nominal_correct_examples = tf.equal(labels, tf.argmax(nominal_logits, 1))
    self._nominal_accuracy = tf.reduce_mean(
        tf.cast(nominal_correct_examples, tf.float32)) 
Example #19
Source File: crown.py    From interval-bound-propagation with Apache License 2.0 6 votes vote down vote up
def create_initial_backward_bounds(spec, modules):
  """Create the initial BackwardBounds according to specification."""
  last_bounds = bounds.IntervalBounds.convert(modules[-1].input_bounds)
  if isinstance(spec, specification_lib.ClassificationSpecification):
    c_correct = tf.expand_dims(
        tf.one_hot(spec.correct_idx[:, 1], spec.num_specifications + 1), 1)
    c_wrong = tf.one_hot(spec.wrong_idx[:, :, 1], spec.num_specifications + 1)
    c = c_wrong - c_correct
    b = tf.zeros(spec.num_specifications)
    lb = ub = fastlin.LinearExpression(w=c, b=b, lower=last_bounds.lower,
                                       upper=last_bounds.upper)
  elif isinstance(spec, specification_lib.LinearSpecification):
    b = spec.d if spec.d is not None else tf.zeros(spec.num_specifications)
    lb = ub = fastlin.LinearExpression(w=spec.c, b=b, lower=last_bounds.lower,
                                       upper=last_bounds.upper)
  else:
    raise ValueError('Unknown specification class type "{}"'.format(str(spec)))
  return BackwardBounds(lb, ub) 
Example #20
Source File: discrete.py    From tensor2robot with Apache License 2.0 6 votes vote down vote up
def GetDiscreteActionLoss(logits, action_labels, bin_centers, num_bins):
  """Convert labels to one-hot, compute cross-entropy loss, and return loss.

  Args:
    logits: Tensor corresponding to the predicted action logits, with size
      [batch_size, timesteps, action_dim*num_bins]
    action_labels: Tensor corresponding to the real valued action labels, with
      size [batch_size, 1, timesteps, action_dim]
    bin_centers: numpy array of size [num_bins, action_dim] corresponding to
      the centers of each bin for each dimension.
    num_bins: number of discrete bins.
  Returns:
    Scalar tensor, cross entropy loss between the predicted actions and labels.
  """
  action_labels = tf.expand_dims(action_labels, -2)
  bin_centers = tf.constant(bin_centers, dtype=tf.float32)
  while len(bin_centers.shape) < len(action_labels.shape):
    bin_centers = tf.expand_dims(bin_centers, 0)
  discrete_labels = tf.argmin((action_labels - bin_centers)**2, -2)
  onehot_labels = tf.one_hot(discrete_labels, num_bins)
  onehot_labels = tf.reshape(onehot_labels, (-1, num_bins))
  logits = tf.reshape(logits, (-1, num_bins))
  loss = tf.nn.softmax_cross_entropy_with_logits_v2(onehot_labels, logits)
  loss = tf.reduce_mean(loss)
  return loss 
Example #21
Source File: visualization.py    From tensor2robot with Apache License 2.0 6 votes vote down vote up
def plot_labels(labels, max_label=1, predictions=None, name=''):
  """Plots integer labels and optionally predictions as images.

  By default takes the first 3 in the batch.

  Args:
    labels: Batch x 1 size tensor of labels
    max_label:  An integer indicating the largest possible label
    predictions: Batch x max_label size tensor of predictions (range 0-1.0)
    name: string to name tensorflow summary
  """
  if max_label > 1:
    labels = tf.one_hot(
        labels, max_label, on_value=1.0, off_value=0.0, dtype=tf.float32)
  labels_image = tf.reshape(labels[:3], (1, 3, max_label, 1))
  empty_image = tf.zeros((1, 3, max_label, 1))
  image = tf.concat([labels_image, empty_image, empty_image], axis=-1)
  if predictions is not None:
    pred_image = tf.reshape(predictions[:3], (1, 3, 4, 1))
    image2 = tf.concat([empty_image, pred_image, empty_image], axis=-1)
    image = tf.concat([image, image2], axis=1)
  tf.summary.image('labels_' + six.ensure_str(name), image, max_outputs=1) 
Example #22
Source File: mdn_test.py    From tensor2robot with Apache License 2.0 6 votes vote down vote up
def test_gaussian_mixture_approximate_mode(self):
    sample_size = 10
    num_alphas = 5
    # Manually set alphas to 1 in zero-th column and 0 elsewhere, making the
    # first component the most likely.
    alphas = tf.one_hot(2 * [0], num_alphas)
    mus = tf.random.normal((2, num_alphas, sample_size))
    sigmas = tf.ones_like(mus)
    mix_dist = tfp.distributions.Categorical(logits=alphas)
    comp_dist = tfp.distributions.MultivariateNormalDiag(
        loc=mus, scale_diag=sigmas)
    gm = tfp.distributions.MixtureSameFamily(
        mixture_distribution=mix_dist, components_distribution=comp_dist)
    approximate_mode = mdn.gaussian_mixture_approximate_mode(gm)
    with self.test_session() as sess:
      approximate_mode_np, mus_np = sess.run([approximate_mode, mus])
      # The approximate mode should be the mean of the zero-th (most likely)
      # component.
      self.assertAllClose(approximate_mode_np, mus_np[:, 0, :]) 
Example #23
Source File: distri.py    From nni with MIT License 6 votes vote down vote up
def neglogp(self, x):
        """
        return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x)
        Note: we can't use sparse_softmax_cross_entropy_with_logits because
              the implementation does not allow second-order derivatives...
        """
        if x.dtype in {tf.uint8, tf.int32, tf.int64}:
            # one-hot encoding
            x_shape_list = x.shape.as_list()
            logits_shape_list = self.logits.get_shape().as_list()[:-1]
            for xs, ls in zip(x_shape_list, logits_shape_list):
                if xs is not None and ls is not None:
                    assert xs == ls, 'shape mismatch: {} in x vs {} in logits'.format(xs, ls)

            x = tf.one_hot(x, self.logits.get_shape().as_list()[-1])
        else:
            # already encoded
            assert x.shape.as_list() == self.logits.shape.as_list()

        return tf.nn.softmax_cross_entropy_with_logits_v2(
            logits=self.logits,
            labels=x) 
Example #24
Source File: mnist_benchmark.py    From autograph with Apache License 2.0 6 votes vote down vote up
def get_data_and_params():
  """Set up input dataset and variables."""
  (train_x, train_y), _ = tf.keras.datasets.mnist.load_data()
  tf.set_random_seed(0)
  hparams = contrib_training.HParams(
      batch_size=200,
      learning_rate=0.1,
      train_steps=101,
  )
  dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y))
  dataset = dataset.repeat()
  dataset = dataset.shuffle(hparams.batch_size * 10)
  dataset = dataset.batch(hparams.batch_size)

  def reshape_ex(x, y):
    return (tf.to_float(tf.reshape(x, (-1, 28 * 28))) / 256.0,
            tf.one_hot(tf.squeeze(y), 10))

  dataset = dataset.map(reshape_ex)
  w = tf.get_variable('w0', (28 * 28, 10))
  b = tf.get_variable('b0', (10,), initializer=tf.zeros_initializer())
  opt = tf.train.GradientDescentOptimizer(hparams.learning_rate)
  return dataset, opt, hparams, w, b 
Example #25
Source File: beam_search.py    From language with Apache License 2.0 6 votes vote down vote up
def _one_hot_tensor_3d(x, index, total_length):
  """One-hot encodes a 2d Tensor in a 3d Tensor.

  This could potentially be implemented in a simpler way using tf.pad but
  this method is compatible with XLA's restriction on static shapes defined
  by constants.

  Args:
    x: A Tensor of shape [m, n].
    index: The "on" index of the inner dimension of the output Tensor.
    total_length: Total length of the output Tensor.

  Returns:
    A Tensor of shape [m, n, total_length].
  """
  m = x.get_shape()[0]
  n = x.get_shape()[1]
  x = tf.expand_dims(x, 2)
  index_tiled = tf.tile(tf.expand_dims(index, 0), [m * n])
  one_hot = tf.one_hot(index_tiled, total_length, on_value=1)
  one_hot = tf.cast(one_hot, dtype=x.dtype)
  one_hot = tf.reshape(one_hot, [m, n, total_length])
  return one_hot * x 
Example #26
Source File: model_fns.py    From language with Apache License 2.0 6 votes vote down vote up
def sparse_reduce(sp_tensor, rank, agg_fn="sum", axis=-1):
  """Reduce SparseTensor along the given axis.

  Args:
    sp_tensor: SparseTensor of arbitrary rank.
    rank: Integer rank of the sparse tensor.
    agg_fn: Reduce function for aggregation.
    axis: Integer specifying axis to sum over.

  Returns:
    sp_tensor: SparseTensor of one less rank.
  """
  if axis < 0:
    axis = rank + axis
  axes_to_keep = tf.one_hot(
      axis, rank, on_value=False, off_value=True, dtype=tf.bool)
  indices_to_keep = tf.boolean_mask(sp_tensor.indices, axes_to_keep, axis=1)
  new_shape = tf.boolean_mask(sp_tensor.dense_shape, axes_to_keep)
  indices_to_keep.set_shape([None, rank - 1])
  indices, values = aggregate_sparse_indices(
      indices_to_keep, sp_tensor.values, new_shape, agg_fn=agg_fn)
  return tf.sparse.reorder(tf.SparseTensor(indices, values, new_shape)) 
Example #27
Source File: run_pretraining.py    From albert with Apache License 2.0 6 votes vote down vote up
def get_sentence_order_output(albert_config, input_tensor, labels):
  """Get loss and log probs for the next sentence prediction."""

  # Simple binary classification. Note that 0 is "next sentence" and 1 is
  # "random sentence". This weight matrix is not used after pre-training.
  with tf.variable_scope("cls/seq_relationship"):
    output_weights = tf.get_variable(
        "output_weights",
        shape=[2, albert_config.hidden_size],
        initializer=modeling.create_initializer(
            albert_config.initializer_range))
    output_bias = tf.get_variable(
        "output_bias", shape=[2], initializer=tf.zeros_initializer())

    logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
    logits = tf.nn.bias_add(logits, output_bias)
    log_probs = tf.nn.log_softmax(logits, axis=-1)
    labels = tf.reshape(labels, [-1])
    one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
    per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
    loss = tf.reduce_mean(per_example_loss)
    return (loss, per_example_loss, log_probs) 
Example #28
Source File: metrics_test.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def testNegativeLogPerplexity(self):
    predictions = np.random.randint(4, size=(12, 12, 12, 1))
    targets = np.random.randint(4, size=(12, 12, 12, 1))
    with self.test_session() as session:
      scores, _ = metrics.padded_neg_log_perplexity(
          tf.one_hot(predictions, depth=4, dtype=tf.float32),
          tf.constant(targets, dtype=tf.int32))
      a = tf.reduce_mean(scores)
      session.run(tf.global_variables_initializer())
      actual = session.run(a)
    self.assertEqual(actual.shape, ()) 
Example #29
Source File: modalities.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def symbol_one_hot_bottom(x, model_hparams, vocab_size):
  del model_hparams  # unused arg
  return tf.one_hot(x, vocab_size) 
Example #30
Source File: modalities.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def image_targets_bottom(x, model_hparams, vocab_size):
  """Bottom transformation for target images."""
  pixel_embedding_size = 64
  inputs = x
  with tf.variable_scope("image_modality"):
    if not tf.executing_eagerly():
      tf.summary.image(
          "targets_bottom",
          common_layers.tpu_safe_image_summary(inputs),
          max_outputs=1)
    inputs_shape = common_layers.shape_list(inputs)
    if len(inputs_shape) != 4:
      raise ValueError("Assuming images given as int tensors in the format "
                       "[batch, height, width, channels] (256 values).")
    # We embed each of 256=vocab_size possible pixel values.
    embedding_var = tf.get_variable(
        "pixel_embedding",
        [vocab_size, pixel_embedding_size])
    hot_inputs = tf.one_hot(tf.to_int32(inputs), vocab_size)
    hot_inputs = tf.reshape(hot_inputs, [-1, vocab_size])
    embedded = tf.matmul(hot_inputs, embedding_var)
    # Let's now merge all channels that were embedded into a single vector.
    merged_size = pixel_embedding_size * inputs_shape[3]
    embedded = tf.reshape(embedded, inputs_shape[:3] + [merged_size])
    merged = tf.layers.dense(
        embedded,
        model_hparams.hidden_size,
        name="merge_pixel_embedded_channels")
    return merged