Python tensorflow.python.ops.math_ops.reduce_logsumexp() Examples

The following are 30 code examples of tensorflow.python.ops.math_ops.reduce_logsumexp(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.math_ops , or try the search function .
Example #1
Source File: bijector.py    From keras-lambda with MIT License 6 votes vote down vote up
def _forward_log_det_jacobian(self, x):
    if self._static_event_ndims == 0:
      return x - 2. * nn_ops.softplus(x)
    else:
      # This code is similar to nn_ops.log_softmax but different because we have
      # an implicit zero column to handle. I.e., instead of:
      #   reduce_sum(logits - reduce_sum(exp(logits), dim))
      # we must do:
      #   log_normalization = 1 + reduce_sum(exp(logits))
      #   -log_normalization + reduce_sum(logits - log_normalization)
      log_normalization = nn_ops.softplus(
          math_ops.reduce_logsumexp(x, reduction_indices=-1, keep_dims=True))
      fldj = (-log_normalization +
              math_ops.reduce_sum(x - log_normalization,
                                  reduction_indices=-1,
                                  keep_dims=True))
      return array_ops.squeeze(fldj, squeeze_dims=-1) 
Example #2
Source File: math_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testOverflow(self):
    x = [1000, 1001, 1002, 1003]
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.array(x, dtype=dtype)
      max_np = np.max(x_np)
      with self.assertRaisesRegexp(RuntimeWarning,
                                   "overflow encountered in exp"):
        out = log(np.sum(exp(x_np)))
        if out == np.inf:
          raise RuntimeWarning("overflow encountered in exp")

      with self.test_session(use_gpu=True):
        x_tf = constant_op.constant(x_np, shape=x_np.shape)
        y_tf_np = math_ops.reduce_logsumexp(x_tf).eval()
        y_np = log(np.sum(exp(x_np - max_np))) + max_np
        self.assertAllClose(y_tf_np, y_np) 
Example #3
Source File: softmax_centered_impl.py    From lambda-packs with MIT License 6 votes vote down vote up
def _forward_log_det_jacobian(self, x):
    if self._static_event_ndims == 0:
      return x - 2. * nn_ops.softplus(x)
    else:
      # This code is similar to nn_ops.log_softmax but different because we have
      # an implicit zero column to handle. I.e., instead of:
      #   reduce_sum(logits - reduce_sum(exp(logits), dim))
      # we must do:
      #   log_normalization = 1 + reduce_sum(exp(logits))
      #   -log_normalization + reduce_sum(logits - log_normalization)
      log_normalization = nn_ops.softplus(
          math_ops.reduce_logsumexp(x, axis=-1, keep_dims=True))
      fldj = (-log_normalization +
              math_ops.reduce_sum(x - log_normalization,
                                  axis=-1,
                                  keep_dims=True))
      return array_ops.squeeze(fldj, squeeze_dims=-1) 
Example #4
Source File: backend.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def logsumexp(x, axis=None, keepdims=False):
  """Computes log(sum(exp(elements across dimensions of a tensor))).

  This function is more numerically stable than log(sum(exp(x))).
  It avoids overflows caused by taking the exp of large inputs and
  underflows caused by taking the log of small inputs.

  Arguments:
      x: A tensor or variable.
      axis: An integer, the axis to reduce over.
      keepdims: A boolean, whether to keep the dimensions or not.
          If `keepdims` is `False`, the rank of the tensor is reduced
          by 1. If `keepdims` is `True`, the reduced dimension is
          retained with length 1.

  Returns:
      The reduced tensor.
  """
  return math_ops.reduce_logsumexp(x, axis=axis, keep_dims=keepdims) 
Example #5
Source File: bijector.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _forward_log_det_jacobian(self, x):
    if self._static_event_ndims == 0:
      return x - 2. * nn_ops.softplus(x)
    else:
      # This code is similar to nn_ops.log_softmax but different because we have
      # an implicit zero column to handle. I.e., instead of:
      #   reduce_sum(logits - reduce_sum(exp(logits), dim))
      # we must do:
      #   log_normalization = 1 + reduce_sum(exp(logits))
      #   -log_normalization + reduce_sum(logits - log_normalization)
      log_normalization = nn_ops.softplus(
          math_ops.reduce_logsumexp(x, reduction_indices=-1, keep_dims=True))
      fldj = (-log_normalization +
              math_ops.reduce_sum(x - log_normalization,
                                  reduction_indices=-1,
                                  keep_dims=True))
      return array_ops.squeeze(fldj, squeeze_dims=-1) 
Example #6
Source File: bijector.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def _forward_log_det_jacobian(self, x):
    if self._static_event_ndims == 0:
      return x - 2. * nn_ops.softplus(x)
    else:
      # This code is similar to nn_ops.log_softmax but different because we have
      # an implicit zero column to handle. I.e., instead of:
      #   reduce_sum(logits - reduce_sum(exp(logits), dim))
      # we must do:
      #   log_normalization = 1 + reduce_sum(exp(logits))
      #   -log_normalization + reduce_sum(logits - log_normalization)
      log_normalization = nn_ops.softplus(
          math_ops.reduce_logsumexp(x, reduction_indices=-1, keep_dims=True))
      fldj = (-log_normalization +
              math_ops.reduce_sum(x - log_normalization,
                                  reduction_indices=-1,
                                  keep_dims=True))
      return array_ops.squeeze(fldj, squeeze_dims=-1) 
Example #7
Source File: math_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testKeepDims(self):
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.random.rand(5, 5).astype(dtype)
      with self.test_session(use_gpu=True):
        y_tf_np = math_ops.reduce_logsumexp(x_np, keep_dims=True).eval()
        self.assertEqual(y_tf_np.ndim, x_np.ndim)
        y_np = log(np.sum(exp(x_np), keepdims=True))
        self.assertAllClose(y_tf_np, y_np) 
Example #8
Source File: math_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testReductionIndices(self):
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.random.rand(5, 5).astype(dtype)
      with self.test_session(use_gpu=True):
        y_tf = math_ops.reduce_logsumexp(x_np, reduction_indices=[0])
        y_np = log(np.sum(exp(x_np), axis=0))
        self.assertShapeEqual(y_np, y_tf)
        y_tf_np = y_tf.eval()
        self.assertAllClose(y_tf_np, y_np) 
Example #9
Source File: crf.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def crf_log_norm(inputs, sequence_lengths, transition_params):
  """Computes the normalization for a CRF.

  Args:
    inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
        to use as input to the CRF layer.
    sequence_lengths: A [batch_size] vector of true sequence lengths.
    transition_params: A [num_tags, num_tags] transition matrix.
  Returns:
    log_norm: A [batch_size] vector of normalizers for a CRF.
  """
  # Split up the first and rest of the inputs in preparation for the forward
  # algorithm.
  first_input = array_ops.slice(inputs, [0, 0, 0], [-1, 1, -1])
  first_input = array_ops.squeeze(first_input, [1])
  rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1])

  # Compute the alpha values in the forward algorithm in order to get the
  # partition function.
  forward_cell = CrfForwardRnnCell(transition_params)
  _, alphas = rnn.dynamic_rnn(
      cell=forward_cell,
      inputs=rest_of_input,
      sequence_length=sequence_lengths - 1,
      initial_state=first_input,
      dtype=dtypes.float32)
  log_norm = math_ops.reduce_logsumexp(alphas, [1])
  return log_norm 
Example #10
Source File: crf.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def __call__(self, inputs, state, scope=None):
    """Build the CrfForwardRnnCell.

    Args:
      inputs: A [batch_size, num_tags] matrix of unary potentials.
      state: A [batch_size, num_tags] matrix containing the previous alpha
          values.
      scope: Unused variable scope of this cell.

    Returns:
      new_alphas, new_alphas: A pair of [batch_size, num_tags] matrices
          values containing the new alpha values.
    """
    state = array_ops.expand_dims(state, 2)

    # This addition op broadcasts self._transitions_params along the zeroth
    # dimension and state along the second dimension. This performs the
    # multiplication of previous alpha values and the current binary potentials
    # in log space.
    transition_scores = state + self._transition_params
    new_alphas = inputs + math_ops.reduce_logsumexp(transition_scores, [1])

    # Both the state and the output of this RNN cell contain the alphas values.
    # The output value is currently unused and simply satisfies the RNN API.
    # This could be useful in the future if we need to compute marginal
    # probabilities, which would require the accumulated alpha values at every
    # time step.
    return new_alphas, new_alphas 
Example #11
Source File: mixture.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _log_prob(self, x):
    with ops.control_dependencies(self._assertions):
      x = ops.convert_to_tensor(x, name="x")
      distribution_log_probs = [d.log_prob(x) for d in self.components]
      cat_log_probs = self._cat_probs(log_probs=True)
      final_log_probs = [
          cat_lp + d_lp
          for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
      ]
      concat_log_probs = array_ops.pack(final_log_probs, 0)
      log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
      return log_sum_exp 
Example #12
Source File: von_mises_fisher.py    From s-vae-tf with MIT License 5 votes vote down vote up
def __sample_w3(self, n, seed):
        shape = array_ops.concat(([n], self.batch_shape_tensor()[:-1], [1]), 0)
        u = random_ops.random_uniform(shape, dtype=self.dtype, seed=seed)
        self.__w = 1 + math_ops.reduce_logsumexp([math_ops.log(u), math_ops.log(1 - u) - 2 * self.scale], axis=0) / self.scale
        return self.__w 
Example #13
Source File: transformed_distribution.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _log_prob(self, y):
    # For caching to work, it is imperative that the bijector is the first to
    # modify the input.
    x = self.bijector.inverse(y)
    ildj = self.bijector.inverse_log_det_jacobian(y)
    if self.bijector._is_injective:  # pylint: disable=protected-access
      return self._finish_log_prob_for_one_fiber(y, x, ildj)

    lp_on_fibers = [
        self._finish_log_prob_for_one_fiber(y, x_i, ildj_i)
        for x_i, ildj_i in zip(x, ildj)]
    return math_ops.reduce_logsumexp(array_ops.stack(lp_on_fibers), axis=0) 
Example #14
Source File: crf.py    From keras-lambda with MIT License 5 votes vote down vote up
def crf_log_norm(inputs, sequence_lengths, transition_params):
  """Computes the normalization for a CRF.

  Args:
    inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
        to use as input to the CRF layer.
    sequence_lengths: A [batch_size] vector of true sequence lengths.
    transition_params: A [num_tags, num_tags] transition matrix.
  Returns:
    log_norm: A [batch_size] vector of normalizers for a CRF.
  """
  # Split up the first and rest of the inputs in preparation for the forward
  # algorithm.
  first_input = array_ops.slice(inputs, [0, 0, 0], [-1, 1, -1])
  first_input = array_ops.squeeze(first_input, [1])
  rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1])

  # Compute the alpha values in the forward algorithm in order to get the
  # partition function.
  forward_cell = CrfForwardRnnCell(transition_params)
  _, alphas = rnn.dynamic_rnn(
      cell=forward_cell,
      inputs=rest_of_input,
      sequence_length=sequence_lengths - 1,
      initial_state=first_input,
      dtype=dtypes.float32)
  log_norm = math_ops.reduce_logsumexp(alphas, [1])
  return log_norm 
Example #15
Source File: crf.py    From keras-lambda with MIT License 5 votes vote down vote up
def __call__(self, inputs, state, scope=None):
    """Build the CrfForwardRnnCell.

    Args:
      inputs: A [batch_size, num_tags] matrix of unary potentials.
      state: A [batch_size, num_tags] matrix containing the previous alpha
          values.
      scope: Unused variable scope of this cell.

    Returns:
      new_alphas, new_alphas: A pair of [batch_size, num_tags] matrices
          values containing the new alpha values.
    """
    state = array_ops.expand_dims(state, 2)

    # This addition op broadcasts self._transitions_params along the zeroth
    # dimension and state along the second dimension. This performs the
    # multiplication of previous alpha values and the current binary potentials
    # in log space.
    transition_scores = state + self._transition_params
    new_alphas = inputs + math_ops.reduce_logsumexp(transition_scores, [1])

    # Both the state and the output of this RNN cell contain the alphas values.
    # The output value is currently unused and simply satisfies the RNN API.
    # This could be useful in the future if we need to compute marginal
    # probabilities, which would require the accumulated alpha values at every
    # time step.
    return new_alphas, new_alphas 
Example #16
Source File: mixture.py    From keras-lambda with MIT License 5 votes vote down vote up
def _log_prob(self, x):
    with ops.control_dependencies(self._assertions):
      x = ops.convert_to_tensor(x, name="x")
      distribution_log_probs = [d.log_prob(x) for d in self.components]
      cat_log_probs = self._cat_probs(log_probs=True)
      final_log_probs = [
          cat_lp + d_lp
          for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
      ]
      concat_log_probs = array_ops.stack(final_log_probs, 0)
      log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
      return log_sum_exp 
Example #17
Source File: relaxed_onehot_categorical.py    From keras-lambda with MIT License 5 votes vote down vote up
def _assert_valid_sample(self, x):
    if not self.validate_args: return x
    return control_flow_ops.with_dependencies([
        check_ops.assert_non_positive(x),
        distribution_util.assert_close(
            array_ops.zeros((), dtype=self.dtype),
            math_ops.reduce_logsumexp(x, reduction_indices=[-1])),
    ], x) 
Example #18
Source File: math_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testReductionIndices2(self):
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.random.rand(5, 5).astype(dtype)
      with self.test_session(use_gpu=True):
        y_tf = math_ops.reduce_logsumexp(x_np, reduction_indices=0)
        y_np = log(np.sum(exp(x_np), axis=0))
        self.assertShapeEqual(y_np, y_tf)
        y_tf_np = y_tf.eval()
        self.assertAllClose(y_tf_np, y_np) 
Example #19
Source File: gmm_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def _define_prior_log_prob_operation(self, shard_id):
    """Computes the prior probability of all samples.

    Updates a vector where each item is the prior probabibility of an
    input example.

    Args:
      shard_id: id of current shard_id.
    """
    self._prior_probs[shard_id] = math_ops.reduce_logsumexp(
        self._probs[shard_id], axis=1, keep_dims=True) 
Example #20
Source File: math_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testReduceLogSumExp(self):
    for dtype in [np.float16, np.float32, np.double]:
      x_np = np.random.rand(5, 5).astype(dtype)
      with self.test_session(use_gpu=True):
        y_tf_np = math_ops.reduce_logsumexp(x_np).eval()
        y_np = log(np.sum(exp(x_np)))
        self.assertAllClose(y_tf_np, y_np) 
Example #21
Source File: crf.py    From tensorflow_nlp with Apache License 2.0 5 votes vote down vote up
def __call__(self, inputs, state, scope=None):
    """Build the CrfForwardRnnCell.

    Args:
      inputs: A [batch_size, num_tags] matrix of unary potentials.
      state: A [batch_size, num_tags] matrix containing the previous alpha
          values.
      scope: Unused variable scope of this cell.

    Returns:
      new_alphas, new_alphas: A pair of [batch_size, num_tags] matrices
          values containing the new alpha values.
    """
    state = array_ops.expand_dims(state, 2)

    # This addition op broadcasts self._transitions_params along the zeroth
    # dimension and state along the second dimension. This performs the
    # multiplication of previous alpha values and the current binary potentials
    # in log space.
    transition_scores = state + self._transition_params
    new_alphas = inputs + math_ops.reduce_logsumexp(transition_scores, [1])

    # Both the state and the output of this RNN cell contain the alphas values.
    # The output value is currently unused and simply satisfies the RNN API.
    # This could be useful in the future if we need to compute marginal
    # probabilities, which would require the accumulated alpha values at every
    # time step.
    return new_alphas, new_alphas 
Example #22
Source File: crf.py    From tensorflow_nlp with Apache License 2.0 5 votes vote down vote up
def crf_log_norm(inputs, sequence_lengths, transition_params):
  """Computes the normalization for a CRF.

  Args:
    inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
        to use as input to the CRF layer.
    sequence_lengths: A [batch_size] vector of true sequence lengths.
    transition_params: A [num_tags, num_tags] transition matrix.
  Returns:
    log_norm: A [batch_size] vector of normalizers for a CRF.
  """
  # Split up the first and rest of the inputs in preparation for the forward
  # algorithm.
  first_input = array_ops.slice(inputs, [0, 0, 0], [-1, 1, -1])
  first_input = array_ops.squeeze(first_input, [1])
  rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1])

  # Compute the alpha values in the forward algorithm in order to get the
  # partition function.
  forward_cell = CrfForwardRnnCell(transition_params)
  _, alphas = rnn.dynamic_rnn(
      cell=forward_cell,
      inputs=rest_of_input,
      sequence_length=sequence_lengths - 1,
      initial_state=first_input,
      dtype=dtypes.float32)
  log_norm = math_ops.reduce_logsumexp(alphas, [1])
  return log_norm

# 对数似然 
Example #23
Source File: relaxed_onehot_categorical.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _assert_valid_sample(self, x):
    if not self.validate_args: return x
    return control_flow_ops.with_dependencies([
        check_ops.assert_non_positive(x),
        distribution_util.assert_close(
            array_ops.zeros((), dtype=self.dtype),
            math_ops.reduce_logsumexp(x, reduction_indices=[-1])),
    ], x) 
Example #24
Source File: mixture.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _log_prob(self, x):
    with ops.control_dependencies(self._assertions):
      x = ops.convert_to_tensor(x, name="x")
      distribution_log_probs = [d.log_prob(x) for d in self.components]
      cat_log_probs = self._cat_probs(log_probs=True)
      final_log_probs = [
          cat_lp + d_lp
          for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
      ]
      concat_log_probs = array_ops.stack(final_log_probs, 0)
      log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
      return log_sum_exp 
Example #25
Source File: crf.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def __call__(self, inputs, state, scope=None):
    """Build the CrfForwardRnnCell.

    Args:
      inputs: A [batch_size, num_tags] matrix of unary potentials.
      state: A [batch_size, num_tags] matrix containing the previous alpha
          values.
      scope: Unused variable scope of this cell.

    Returns:
      new_alphas, new_alphas: A pair of [batch_size, num_tags] matrices
          values containing the new alpha values.
    """
    state = array_ops.expand_dims(state, 2)

    # This addition op broadcasts self._transitions_params along the zeroth
    # dimension and state along the second dimension. This performs the
    # multiplication of previous alpha values and the current binary potentials
    # in log space.
    transition_scores = state + self._transition_params
    new_alphas = inputs + math_ops.reduce_logsumexp(transition_scores, [1])

    # Both the state and the output of this RNN cell contain the alphas values.
    # The output value is currently unused and simply satisfies the RNN API.
    # This could be useful in the future if we need to compute marginal
    # probabilities, which would require the accumulated alpha values at every
    # time step.
    return new_alphas, new_alphas 
Example #26
Source File: crf.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def crf_log_norm(inputs, sequence_lengths, transition_params):
  """Computes the normalization for a CRF.

  Args:
    inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
        to use as input to the CRF layer.
    sequence_lengths: A [batch_size] vector of true sequence lengths.
    transition_params: A [num_tags, num_tags] transition matrix.
  Returns:
    log_norm: A [batch_size] vector of normalizers for a CRF.
  """
  # Split up the first and rest of the inputs in preparation for the forward
  # algorithm.
  first_input = array_ops.slice(inputs, [0, 0, 0], [-1, 1, -1])
  first_input = array_ops.squeeze(first_input, [1])
  rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1])

  # Compute the alpha values in the forward algorithm in order to get the
  # partition function.
  forward_cell = CrfForwardRnnCell(transition_params)
  _, alphas = rnn.dynamic_rnn(
      cell=forward_cell,
      inputs=rest_of_input,
      sequence_length=sequence_lengths - 1,
      initial_state=first_input,
      dtype=dtypes.float32)
  log_norm = math_ops.reduce_logsumexp(alphas, [1])
  return log_norm 
Example #27
Source File: relaxed_onehot_categorical.py    From lambda-packs with MIT License 5 votes vote down vote up
def _assert_valid_sample(self, x):
    if not self.validate_args:
      return x
    return control_flow_ops.with_dependencies([
        check_ops.assert_non_positive(x),
        distribution_util.assert_close(
            array_ops.zeros([], dtype=self.dtype),
            math_ops.reduce_logsumexp(x, axis=[-1])),
    ], x) 
Example #28
Source File: mixture.py    From lambda-packs with MIT License 5 votes vote down vote up
def _log_prob(self, x):
    with ops.control_dependencies(self._assertions):
      x = ops.convert_to_tensor(x, name="x")
      distribution_log_probs = [d.log_prob(x) for d in self.components]
      cat_log_probs = self._cat_probs(log_probs=True)
      final_log_probs = [
          cat_lp + d_lp
          for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
      ]
      concat_log_probs = array_ops.stack(final_log_probs, 0)
      log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
      return log_sum_exp 
Example #29
Source File: onehot_categorical.py    From lambda-packs with MIT License 5 votes vote down vote up
def _assert_valid_sample(self, x):
    if not self.validate_args:
      return x
    return control_flow_ops.with_dependencies([
        check_ops.assert_non_positive(x),
        distribution_util.assert_close(
            array_ops.zeros([], dtype=self.dtype),
            math_ops.reduce_logsumexp(x, axis=[-1])),
    ], x) 
Example #30
Source File: crf.py    From lambda-packs with MIT License 5 votes vote down vote up
def __call__(self, inputs, state, scope=None):
    """Build the CrfForwardRnnCell.

    Args:
      inputs: A [batch_size, num_tags] matrix of unary potentials.
      state: A [batch_size, num_tags] matrix containing the previous alpha
          values.
      scope: Unused variable scope of this cell.

    Returns:
      new_alphas, new_alphas: A pair of [batch_size, num_tags] matrices
          values containing the new alpha values.
    """
    state = array_ops.expand_dims(state, 2)

    # This addition op broadcasts self._transitions_params along the zeroth
    # dimension and state along the second dimension. This performs the
    # multiplication of previous alpha values and the current binary potentials
    # in log space.
    transition_scores = state + self._transition_params
    new_alphas = inputs + math_ops.reduce_logsumexp(transition_scores, [1])

    # Both the state and the output of this RNN cell contain the alphas values.
    # The output value is currently unused and simply satisfies the RNN API.
    # This could be useful in the future if we need to compute marginal
    # probabilities, which would require the accumulated alpha values at every
    # time step.
    return new_alphas, new_alphas