Python tensorflow.logical_or() Examples

The following are 30 code examples of tensorflow.logical_or(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: beam_search_decoder.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def _check_batch_beam(t, batch_size, beam_width):
  """Returns an Assert operation checking that the elements of the stacked
  TensorArray can be reshaped to [batch_size, beam_size, -1]. At this point,
  the TensorArray elements have a known rank of at least 1.
  """
  error_message = ("TensorArray reordering expects elements to be "
                   "reshapable to [batch_size, beam_size, -1] which is "
                   "incompatible with the dynamic shape of %s elements. "
                   "Consider setting reorder_tensor_arrays to False to disable "
                   "TensorArray reordering during the beam search."
                   % (t.name))
  rank = t.shape.ndims
  shape = tf.shape(t)
  if rank == 2:
    condition = tf.equal(shape[1], batch_size * beam_width)
  else:
    condition = tf.logical_or(
        tf.equal(shape[1], batch_size * beam_width),
        tf.logical_and(
            tf.equal(shape[1], batch_size),
            tf.equal(shape[2], beam_width)))
  return tf.Assert(condition, [error_message]) 
Example #2
Source File: asserts.py    From graphics with Apache License 2.0 6 votes vote down vote up
def assert_binary(tensor, name=None):
  """Asserts that all the values in the tensor are zeros or ones.

  Args:
    tensor: A tensor of shape `[A1, ..., An]` containing the values we want to
      check.
    name: A name for this op. Defaults to "assert_binary".

  Returns:
    The input tensor, with dependence on the assertion operator in the graph.

  Raises:
    tf.errors.InvalidArgumentError: If any of the values in the tensor is not
    zero or one.
  """
  if not FLAGS[tfg_flags.TFG_ADD_ASSERTS_TO_GRAPH].value:
    return tensor

  with tf.compat.v1.name_scope(name, 'assert_binary', [tensor]):
    tensor = tf.convert_to_tensor(value=tensor)
    condition = tf.reduce_all(
        input_tensor=tf.logical_or(tf.equal(tensor, 0), tf.equal(tensor, 1)))

    with tf.control_dependencies([tf.Assert(condition, data=[tensor])]):
      return tf.identity(tensor) 
Example #3
Source File: tf_atari_wrappers.py    From fine-lm with MIT License 6 votes vote down vote up
def simulate(self, action):
    with tf.name_scope("environment/simulate"):  # Do we need this?
      initializer = (tf.zeros_like(self._observ),
                     tf.fill((len(self),), 0.0), tf.fill((len(self),), False))

      def not_done_step(a, _):
        reward, done = self._batch_env.simulate(action)
        with tf.control_dependencies([reward, done]):
          # TODO(piotrmilos): possibly ignore envs with done
          r0 = tf.maximum(a[0], self._batch_env.observ)
          r1 = tf.add(a[1], reward)
          r2 = tf.logical_or(a[2], done)

          return (r0, r1, r2)

      simulate_ret = tf.scan(not_done_step, tf.range(self.skip),
                             initializer=initializer, parallel_iterations=1,
                             infer_shape=False)
      simulate_ret = [ret[-1, ...] for ret in simulate_ret]

      with tf.control_dependencies([self._observ.assign(simulate_ret[0])]):
        return tf.identity(simulate_ret[1]), tf.identity(simulate_ret[2]) 
Example #4
Source File: tf_atari_wrappers.py    From fine-lm with MIT License 6 votes vote down vote up
def simulate(self, action):
    with tf.name_scope("environment/simulate"):  # Do we need this?
      initializer = (tf.zeros(self.old_shape, dtype=tf.float32),
                     tf.fill((len(self),), 0.0), tf.fill((len(self),), False))

      def not_done_step(a, _):
        reward, done = self._batch_env.simulate(action)
        with tf.control_dependencies([reward, done]):
          r0 = self._batch_env.observ + 0
          r1 = tf.add(a[1], reward)
          r2 = tf.logical_or(a[2], done)
          return (r0, r1, r2)

      simulate_ret = tf.scan(not_done_step, tf.range(self.skip),
                             initializer=initializer, parallel_iterations=1,
                             infer_shape=False)
      observations, rewards, dones = simulate_ret
      split_observations = tf.split(observations, self.skip, axis=0)
      split_observations = [tf.squeeze(o, axis=0) for o in split_observations]
      observation = tf.concat(split_observations, axis=-1)
      with tf.control_dependencies([self._observ.assign(observation)]):
        return tf.identity(rewards[-1, ...]), tf.identity(dones[-1, ...]) 
Example #5
Source File: beam_search_decoder.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def _check_batch_beam(t, batch_size, beam_width):
  """Returns an Assert operation checking that the elements of the stacked
  TensorArray can be reshaped to [batch_size, beam_size, -1]. At this point,
  the TensorArray elements have a known rank of at least 1.
  """
  error_message = ("TensorArray reordering expects elements to be "
                   "reshapable to [batch_size, beam_size, -1] which is "
                   "incompatible with the dynamic shape of %s elements. "
                   "Consider setting reorder_tensor_arrays to False to disable "
                   "TensorArray reordering during the beam search."
                   % (t.name))
  rank = t.shape.ndims
  shape = tf.shape(t)
  if rank == 2:
    condition = tf.equal(shape[1], batch_size * beam_width)
  else:
    condition = tf.logical_or(
        tf.equal(shape[1], batch_size * beam_width),
        tf.logical_and(
            tf.equal(shape[1], batch_size),
            tf.equal(shape[2], beam_width)))
  return tf.Assert(condition, [error_message]) 
Example #6
Source File: beam_search_decoder.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def _check_batch_beam(t, batch_size, beam_width):
  """Returns an Assert operation checking that the elements of the stacked
  TensorArray can be reshaped to [batch_size, beam_size, -1]. At this point,
  the TensorArray elements have a known rank of at least 1.
  """
  error_message = ("TensorArray reordering expects elements to be "
                   "reshapable to [batch_size, beam_size, -1] which is "
                   "incompatible with the dynamic shape of %s elements. "
                   "Consider setting reorder_tensor_arrays to False to disable "
                   "TensorArray reordering during the beam search."
                   % (t.name))
  rank = t.shape.ndims
  shape = tf.shape(t)
  if rank == 2:
    condition = tf.equal(shape[1], batch_size * beam_width)
  else:
    condition = tf.logical_or(
        tf.equal(shape[1], batch_size * beam_width),
        tf.logical_and(
            tf.equal(shape[1], batch_size),
            tf.equal(shape[2], beam_width)))
  return tf.Assert(condition, [error_message]) 
Example #7
Source File: uniform.py    From tensorprob with MIT License 6 votes vote down vote up
def Uniform(name=None):
    X = tf.placeholder(config.dtype, name=name)

    Distribution.logp = tf.fill(tf.shape(X), config.dtype(0))

    def integral(lower, upper):
        return tf.cond(
            tf.logical_or(
                tf.is_inf(tf.cast(lower, config.dtype)),
                tf.is_inf(tf.cast(upper, config.dtype))
            ),
            lambda: tf.constant(1, dtype=config.dtype),
            lambda: tf.cast(upper, config.dtype) - tf.cast(lower, config.dtype),
        )

    Distribution.integral = integral

    return X 
Example #8
Source File: uniform.py    From tensorprob with MIT License 6 votes vote down vote up
def UniformInt(name=None):
    X = tf.placeholder(config.int_dtype, name=name)

    Distribution.logp = tf.fill(tf.shape(X), config.dtype(0))

    def integral(lower, upper):
        val = tf.cond(
            tf.logical_or(
                tf.is_inf(tf.ceil(tf.cast(lower, config.dtype))),
                tf.is_inf(tf.floor(tf.cast(upper, config.dtype)))
            ),
            lambda: tf.constant(1, dtype=config.dtype),
            lambda: tf.cast(upper, config.dtype) - tf.cast(lower, config.dtype),
        )
        return val

    Distribution.integral = integral

    return X 
Example #9
Source File: path_context_reader.py    From code2vec with MIT License 6 votes vote down vote up
def _filter_input_rows(self, *row_parts) -> tf.bool:
        row_parts = self.model_input_tensors_former.from_model_input_form(row_parts)

        #assert all(tensor.shape == (self.config.MAX_CONTEXTS,) for tensor in
        #           {row_parts.path_source_token_indices, row_parts.path_indices,
        #            row_parts.path_target_token_indices, row_parts.context_valid_mask})

        # FIXME: Does "valid" here mean just "no padding" or "neither padding nor OOV"? I assumed just "no padding".
        any_word_valid_mask_per_context_part = [
            tf.not_equal(tf.reduce_max(row_parts.path_source_token_indices, axis=0),
                         self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]),
            tf.not_equal(tf.reduce_max(row_parts.path_target_token_indices, axis=0),
                         self.vocabs.token_vocab.word_to_index[self.vocabs.token_vocab.special_words.PAD]),
            tf.not_equal(tf.reduce_max(row_parts.path_indices, axis=0),
                         self.vocabs.path_vocab.word_to_index[self.vocabs.path_vocab.special_words.PAD])]
        any_contexts_is_valid = reduce(tf.logical_or, any_word_valid_mask_per_context_part)  # scalar

        if self.estimator_action.is_evaluate:
            cond = any_contexts_is_valid  # scalar
        else:  # training
            word_is_valid = tf.greater(
                row_parts.target_index, self.vocabs.target_vocab.word_to_index[self.vocabs.target_vocab.special_words.OOV])  # scalar
            cond = tf.logical_and(word_is_valid, any_contexts_is_valid)  # scalar

        return cond  # scalar 
Example #10
Source File: utils.py    From zhusuan with MIT License 5 votes vote down vote up
def __ror__(self, other):
        return tf.logical_or(other, self) 
Example #11
Source File: rnn_decoder_helpers.py    From Counterfactual-StoryRW with MIT License 5 votes vote down vote up
def next_inputs(self, time, outputs, state, sample_ids, name=None,
                    reach_max_time=None):
        if self._use_finish:
            hard_ids = tf.argmax(sample_ids, axis=-1, output_type=tf.int32)
            finished = tf.equal(hard_ids, self._end_token)
        else:
            finished = tf.tile([False], [self._batch_size])
        all_finished = tf.reduce_all(finished)

        if reach_max_time is not None:
            all_finished = tf.logical_or(all_finished, reach_max_time)

        if self._stop_gradient:
            sample_ids = tf.stop_gradient(sample_ids)

        if self._embedding_args_cnt == 1:
            del time, outputs  # unused by next_inputs_fn
            next_inputs = tf.cond(
                all_finished,
                # If we're finished, the next_inputs value doesn't matter
                lambda: self._start_inputs,
                lambda: self._embedding_fn(soft_ids=sample_ids))
        elif self._embedding_args_cnt == 2:
            # Prepare the position embedding of the next step
            times = tf.ones(self._batch_size, dtype=tf.int32) * (time+1)
            next_inputs = tf.cond(
                all_finished,
                # If we're finished, the next_inputs value doesn't matter
                lambda: self._start_inputs,
                lambda: self._embedding_fn(soft_ids=sample_ids, times=times))

        return (finished, next_inputs, state) 
Example #12
Source File: balanced_positive_negative_sampler.py    From vehicle_counting_tensorflow with MIT License 5 votes vote down vote up
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
                                     num_end_samples, total_num_samples):
    """slices num_start_samples and last num_end_samples from input_tensor.

    Args:
      input_tensor: An int32 tensor of shape [N] to be sliced.
      num_start_samples: Number of examples to be sliced from the beginning
        of the input tensor.
      num_end_samples: Number of examples to be sliced from the end of the
        input tensor.
      total_num_samples: Sum of is num_start_samples and num_end_samples. This
        should be a scalar.

    Returns:
      A tensor containing the first num_start_samples and last num_end_samples
      from input_tensor.

    """
    input_length = tf.shape(input_tensor)[0]
    start_positions = tf.less(tf.range(input_length), num_start_samples)
    end_positions = tf.greater_equal(
        tf.range(input_length), input_length - num_end_samples)
    selected_positions = tf.logical_or(start_positions, end_positions)
    selected_positions = tf.cast(selected_positions, tf.float32)
    indexed_positions = tf.multiply(tf.cumsum(selected_positions),
                                    selected_positions)
    one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
                                  total_num_samples,
                                  dtype=tf.float32)
    return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
                                one_hot_selector, axes=[0, 0]), tf.int32) 
Example #13
Source File: balanced_positive_negative_sampler.py    From tpu_models with Apache License 2.0 5 votes vote down vote up
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
                                     num_end_samples, total_num_samples):
    """slices num_start_samples and last num_end_samples from input_tensor.

    Args:
      input_tensor: An int32 tensor of shape [N] to be sliced.
      num_start_samples: Number of examples to be sliced from the beginning
        of the input tensor.
      num_end_samples: Number of examples to be sliced from the end of the
        input tensor.
      total_num_samples: Sum of is num_start_samples and num_end_samples. This
        should be a scalar.

    Returns:
      A tensor containing the first num_start_samples and last num_end_samples
      from input_tensor.

    """
    input_length = tf.shape(input_tensor)[0]
    start_positions = tf.less(tf.range(input_length), num_start_samples)
    end_positions = tf.greater_equal(
        tf.range(input_length), input_length - num_end_samples)
    selected_positions = tf.logical_or(start_positions, end_positions)
    selected_positions = tf.cast(selected_positions, tf.float32)
    indexed_positions = tf.multiply(tf.cumsum(selected_positions),
                                    selected_positions)
    one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
                                  total_num_samples,
                                  dtype=tf.float32)
    return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
                                one_hot_selector, axes=[0, 0]), tf.int32) 
Example #14
Source File: networks.py    From kss with Apache License 2.0 5 votes vote down vote up
def Attention(Q, K, V, mononotic_attention=False, prev_max_attentions=None):
    '''
    Args:
      Q: Queries. (B, T/r, d)
      K: Keys. (B, N, d)
      V: Values. (B, N, d)
      mononotic_attention: A boolean. At training, it is False.
      prev_max_attentions: (B,). At training, it is set to None.

    Returns:
      R: [Context Vectors; Q]. (B, T/r, 2d)
      alignments: (B, N, T/r)
      max_attentions: (B, T/r)
    '''
    A = tf.matmul(Q, K, transpose_b=True) * tf.rsqrt(tf.to_float(hp.d))
    if mononotic_attention:  # for inference
        key_masks = tf.sequence_mask(prev_max_attentions, hp.max_N)
        reverse_masks = tf.sequence_mask(hp.max_N - hp.attention_win_size - prev_max_attentions, hp.max_N)[:, ::-1]
        masks = tf.logical_or(key_masks, reverse_masks)
        masks = tf.tile(tf.expand_dims(masks, 1), [1, hp.max_T, 1])
        paddings = tf.ones_like(A) * (-2 ** 32 + 1)  # (B, T/r, N)
        A = tf.where(tf.equal(masks, False), A, paddings)
    A = tf.nn.softmax(A) # (B, T/r, N)
    max_attentions = tf.argmax(A, -1)  # (B, T/r)
    R = tf.matmul(A, V)
    R = tf.concat((R, Q), -1)

    alignments = tf.transpose(A, [0, 2, 1]) # (B, N, T/r)

    return R, alignments, max_attentions 
Example #15
Source File: beam_search_decoder.py    From addons with Apache License 2.0 5 votes vote down vote up
def _check_batch_beam(t, batch_size, beam_width):
    """Returns an Assert operation checking that the elements of the stacked
    TensorArray can be reshaped to [batch_size, beam_size, -1].

    At this point, the TensorArray elements have a known rank of at
    least 1.
    """
    error_message = (
        "TensorArray reordering expects elements to be "
        "reshapable to [batch_size, beam_size, -1] which is "
        "incompatible with the dynamic shape of %s elements. "
        "Consider setting reorder_tensor_arrays to False to disable "
        "TensorArray reordering during the beam search."
        % (t if tf.executing_eagerly() else t.name)
    )
    rank = t.shape.ndims
    shape = tf.shape(t)
    if rank == 2:
        condition = tf.equal(shape[1], batch_size * beam_width)
    else:
        condition = tf.logical_or(
            tf.equal(shape[1], batch_size * beam_width),
            tf.logical_and(
                tf.equal(shape[1], batch_size), tf.equal(shape[2], beam_width)
            ),
        )
    return tf.Assert(condition, [error_message]) 
Example #16
Source File: utils.py    From zhusuan with MIT License 5 votes vote down vote up
def __or__(self, other):
        return tf.logical_or(self, other) 
Example #17
Source File: hardshrink.py    From addons with Apache License 2.0 5 votes vote down vote up
def _hardshrink_py(
    x: types.TensorLike, lower: Number = -0.5, upper: Number = 0.5
) -> tf.Tensor:
    if lower > upper:
        raise ValueError(
            "The value of lower is {} and should"
            " not be higher than the value "
            "variable upper, which is {} .".format(lower, upper)
        )
    mask_lower = x < lower
    mask_upper = upper < x
    mask = tf.logical_or(mask_lower, mask_upper)
    mask = tf.cast(mask, x.dtype)
    return x * mask 
Example #18
Source File: tf_helpers.py    From Counterfactual-StoryRW with MIT License 5 votes vote down vote up
def next_inputs(self, time, outputs, state, sample_ids, name=None,
                    reach_max_time=None):
        """Gets the inputs for next step."""
        finished = math_ops.equal(sample_ids, self._end_token)
        all_finished = math_ops.reduce_all(finished)
        if reach_max_time is not None:
            all_finished = tf.logical_or(all_finished, reach_max_time)

        if self._embedding_args_cnt == 1:
            del time, outputs  # unused by next_inputs_fn
            next_inputs = control_flow_ops.cond(
                all_finished,
                # If we're finished, the next_inputs value doesn't matter
                lambda: self._start_inputs,
                lambda: self._embedding_fn(sample_ids))
        elif self._embedding_args_cnt == 2:
            del outputs
            # Prepare the position embedding of the next step
            times = tf.ones(self._batch_size, dtype=tf.int32) * (time+1)
            next_inputs = control_flow_ops.cond(
                all_finished,
                # If we're finished, the next_inputs value doesn't matter
                lambda: self._start_inputs,
                lambda: self._embedding_fn(sample_ids, times))

        return finished, next_inputs, state 
Example #19
Source File: text.py    From OpenNMT-tf with MIT License 5 votes vote down vote up
def tokens_to_words(tokens, subword_token="■", is_spacer=None):
  """Converts a sequence of tokens to a sequence of words.

  Example:

    >>> opennmt.data.tokens_to_words(["He@@", "llo", "W@@", "orld", "@@!"], subword_token="@@")
    <tf.RaggedTensor [[b'He@@', b'llo'], [b'W@@', b'orld', b'@@!']]>

  Args:
    tokens: A 1D string ``tf.Tensor``.
    subword_token: The special token used by the subword tokenizer.
    is_spacer: Whether :obj:`subword_token` is used as a spacer (as in
      SentencePiece) or a joiner (as in BPE). If ``None``, will infer
      directly from :obj:`subword_token`.

  Returns:
    The words as a 2D string ``tf.RaggedTensor``.
  """
  if is_spacer is None:
    is_spacer = subword_token == "▁"
  if is_spacer:
    # First token implicitly starts with a spacer.
    left_and_single = tf.logical_or(
        tf.strings.regex_full_match(tokens, "%s.*" % subword_token),
        tf.one_hot(0, tf.shape(tokens)[0], on_value=True, off_value=False))
    right = tf.strings.regex_full_match(tokens, ".+%s" % subword_token)
    word_start = tf.logical_or(tf.roll(right, shift=1, axis=0), left_and_single)
  else:
    right = tf.strings.regex_full_match(tokens, ".*%s" % subword_token)
    left = tf.strings.regex_full_match(tokens, "%s.*" % subword_token)
    subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)
    word_start = tf.logical_not(subword)
  start_indices = tf.squeeze(tf.where(word_start), -1)
  return tf.RaggedTensor.from_row_starts(tokens, start_indices) 
Example #20
Source File: balanced_positive_negative_sampler.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
                                     num_end_samples, total_num_samples):
    """slices num_start_samples and last num_end_samples from input_tensor.

    Args:
      input_tensor: An int32 tensor of shape [N] to be sliced.
      num_start_samples: Number of examples to be sliced from the beginning
        of the input tensor.
      num_end_samples: Number of examples to be sliced from the end of the
        input tensor.
      total_num_samples: Sum of is num_start_samples and num_end_samples. This
        should be a scalar.

    Returns:
      A tensor containing the first num_start_samples and last num_end_samples
      from input_tensor.

    """
    input_length = tf.shape(input_tensor)[0]
    start_positions = tf.less(tf.range(input_length), num_start_samples)
    end_positions = tf.greater_equal(
        tf.range(input_length), input_length - num_end_samples)
    selected_positions = tf.logical_or(start_positions, end_positions)
    selected_positions = tf.cast(selected_positions, tf.float32)
    indexed_positions = tf.multiply(tf.cumsum(selected_positions),
                                    selected_positions)
    one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
                                  total_num_samples,
                                  dtype=tf.float32)
    return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
                                one_hot_selector, axes=[0, 0]), tf.int32) 
Example #21
Source File: mask_rcnn_model.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def _rpn_score_loss(score_outputs, score_targets, normalizer=1.0):
  """Computes score loss."""
  # score_targets has three values: (1) score_targets[i]=1, the anchor is a
  # positive sample. (2) score_targets[i]=0, negative. (3) score_targets[i]=-1,
  # the anchor is don't care (ignore).
  with tf.name_scope('rpn_score_loss'):
    mask = tf.logical_or(tf.equal(score_targets, 1), tf.equal(score_targets, 0))
    score_targets = tf.maximum(score_targets, tf.zeros_like(score_targets))
    # RPN score loss is sum over all except ignored samples.
    score_loss = tf.losses.sigmoid_cross_entropy(
        score_targets, score_outputs, weights=mask,
        reduction=tf.losses.Reduction.SUM)
    score_loss /= normalizer
    return score_loss 
Example #22
Source File: ops.py    From lsm with MIT License 5 votes vote down vote up
def nearest3(grid, idx, clip=False):
    with tf.variable_scope('NearestInterp'):
        _, h, w, d, f = grid.get_shape().as_list()
        x, y, z = idx[:, 1], idx[:, 2], idx[:, 3]
        g_val = tf.gather_nd(grid, tf.cast(tf.round(idx), 'int32'))
        if clip:
            x_inv = tf.logical_or(x < 0, x > h - 1)
            y_inv = tf.logical_or(y < 0, y > w - 1)
            z_inv = tf.logical_or(z < 0, x > d - 1)
            valid_idx = 1 - \
                tf.to_float(tf.logical_or(tf.logical_or(x_inv, y_inv), z_inv))
            g_val = g_val * valid_idx[tf.newaxis, ...]
        return g_val 
Example #23
Source File: networks.py    From dc_tts with Apache License 2.0 5 votes vote down vote up
def Attention(Q, K, V, mononotic_attention=False, prev_max_attentions=None):
    '''
    Args:
      Q: Queries. (B, T/r, d)
      K: Keys. (B, N, d)
      V: Values. (B, N, d)
      mononotic_attention: A boolean. At training, it is False.
      prev_max_attentions: (B,). At training, it is set to None.

    Returns:
      R: [Context Vectors; Q]. (B, T/r, 2d)
      alignments: (B, N, T/r)
      max_attentions: (B, T/r)
    '''
    A = tf.matmul(Q, K, transpose_b=True) * tf.rsqrt(tf.to_float(hp.d))
    if mononotic_attention:  # for inference
        key_masks = tf.sequence_mask(prev_max_attentions, hp.max_N)
        reverse_masks = tf.sequence_mask(hp.max_N - hp.attention_win_size - prev_max_attentions, hp.max_N)[:, ::-1]
        masks = tf.logical_or(key_masks, reverse_masks)
        masks = tf.tile(tf.expand_dims(masks, 1), [1, hp.max_T, 1])
        paddings = tf.ones_like(A) * (-2 ** 32 + 1)  # (B, T/r, N)
        A = tf.where(tf.equal(masks, False), A, paddings)
    A = tf.nn.softmax(A) # (B, T/r, N)
    max_attentions = tf.argmax(A, -1)  # (B, T/r)
    R = tf.matmul(A, V)
    R = tf.concat((R, Q), -1)

    alignments = tf.transpose(A, [0, 2, 1]) # (B, N, T/r)

    return R, alignments, max_attentions 
Example #24
Source File: balanced_positive_negative_sampler.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
                                     num_end_samples, total_num_samples):
    """slices num_start_samples and last num_end_samples from input_tensor.

    Args:
      input_tensor: An int32 tensor of shape [N] to be sliced.
      num_start_samples: Number of examples to be sliced from the beginning
        of the input tensor.
      num_end_samples: Number of examples to be sliced from the end of the
        input tensor.
      total_num_samples: Sum of is num_start_samples and num_end_samples. This
        should be a scalar.

    Returns:
      A tensor containing the first num_start_samples and last num_end_samples
      from input_tensor.

    """
    input_length = tf.shape(input_tensor)[0]
    start_positions = tf.less(tf.range(input_length), num_start_samples)
    end_positions = tf.greater_equal(
        tf.range(input_length), input_length - num_end_samples)
    selected_positions = tf.logical_or(start_positions, end_positions)
    selected_positions = tf.cast(selected_positions, tf.float32)
    indexed_positions = tf.multiply(tf.cumsum(selected_positions),
                                    selected_positions)
    one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
                                  total_num_samples,
                                  dtype=tf.float32)
    return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
                                one_hot_selector, axes=[0, 0]), tf.int32) 
Example #25
Source File: utilities.py    From tensorprob with MIT License 5 votes vote down vote up
def set_logp_to_neg_inf(X, logp, bounds):
    """Set `logp` to negative infinity when `X` is outside the allowed bounds.

    # Arguments
        X: tensorflow.Tensor
            The variable to apply the bounds to
        logp: tensorflow.Tensor
            The log probability corrosponding to `X`
        bounds: list of `Region` objects
            The regions corrosponding to allowed regions of `X`

    # Returns
        logp: tensorflow.Tensor
            The newly bounded log probability
    """
    conditions = []
    for l, u in bounds:
        lower_is_neg_inf = not isinstance(l, tf.Tensor) and np.isneginf(l)
        upper_is_pos_inf = not isinstance(u, tf.Tensor) and np.isposinf(u)

        if not lower_is_neg_inf and upper_is_pos_inf:
            conditions.append(tf.greater(X, l))
        elif lower_is_neg_inf and not upper_is_pos_inf:
            conditions.append(tf.less(X, u))
        elif not (lower_is_neg_inf or upper_is_pos_inf):
            conditions.append(tf.logical_and(tf.greater(X, l), tf.less(X, u)))

    if len(conditions) > 0:
        is_inside_bounds = conditions[0]
        for condition in conditions[1:]:
            is_inside_bounds = tf.logical_or(is_inside_bounds, condition)

        logp = tf.select(
            is_inside_bounds,
            logp,
            tf.fill(tf.shape(X), config.dtype(-np.inf))
        )

    return logp 
Example #26
Source File: resize.py    From onnx-tensorflow with Apache License 2.0 5 votes vote down vote up
def version_10(cls, node, **kwargs):
    x = kwargs["tensor_dict"][node.inputs[0]]
    x_shape = tf_shape(x)
    scales = kwargs["tensor_dict"][node.inputs[1]]

    n_in_scales_is_one = tf.equal(scales[0], 1)
    c_in_scales_is_one = tf.logical_or(tf.equal(scales[1], 1),
                                       tf.equal(scales[3], 1))
    assert_n_c_in_scales_are_ones = tf.Assert(
        tf.logical_and(n_in_scales_is_one, c_in_scales_is_one), [scales])

    with tf.control_dependencies([assert_n_c_in_scales_are_ones]):
      x_in_NCHW_format = tf.equal(scales[1], 1)
      h_w_scale = tf.where(x_in_NCHW_format, scales[2:], scales[1:3])
      h_w_shape = tf.where(x_in_NCHW_format, x_shape[2:], x_shape[1:3])
      new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, scales.dtype),
                              tf.int32)

      mode = node.attrs.get("mode", "nearest")
      if mode.lower() == "linear":
        mode = tf.image.ResizeMethod.BILINEAR
      else:
        mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR

      def process_NCHW_format(x):
        x_t = tf.transpose(x, perm=[0, 2, 3, 1])
        y = tf.image.resize(x_t, size=new_h_w_shape, method=mode)
        y_t = tf.transpose(y, perm=[0, 3, 1, 2])
        return y_t

      def process_NHWC_format(x):
        y = tf.image.resize(x, size=new_h_w_shape, method=mode)
        return y

      output = tf.cond(x_in_NCHW_format, lambda: process_NCHW_format(x),
                       lambda: process_NHWC_format(x))

      return [output] 
Example #27
Source File: nuelus_sampling_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def nucleus_sampling(logits, vocab_size, p=0.9, 
					input_ids=None, input_ori_ids=None,
					**kargs):
	input_shape_list = bert_utils.get_shape_list(logits, expected_rank=[2,3])
	if len(input_shape_list) == 3:
		logits = tf.reshape(logits, (-1, vocab_size))
	probs = tf.nn.softmax(logits, axis=-1)
	# [batch_size, seq, vocab_perm]
	# indices = tf.argsort(probs, direction='DESCENDING')
	indices = tf.contrib.framework.argsort(probs, direction='DESCENDING')

	cumulative_probabilities = tf.math.cumsum(tf.batch_gather(probs, indices), axis=-1, exclusive=False)
	
	# find the top pth index to cut off. careful we don't want to cutoff everything!
	# result will be [batch_size, seq, vocab_perm]
	exclude_mask = tf.logical_not(
	tf.logical_or(cumulative_probabilities < p, tf.range(vocab_size)[None] < 1))
	exclude_mask = tf.cast(exclude_mask, tf.float32)

	indices_v1 = tf.contrib.framework.argsort(indices)
	exclude_mask = reorder(exclude_mask, tf.cast(indices_v1, dtype=tf.int32))
	if len(input_shape_list) == 3:
		exclude_mask = tf.reshape(exclude_mask, input_shape_list)
		# logits = tf.reshape(logits, input_shape_list)

	if input_ids is not None and input_ori_ids is not None:
		exclude_mask, input_ori_ids = get_extra_mask(
								input_ids, input_ori_ids, 
								exclude_mask, vocab_size,
								**kargs)

		return [exclude_mask, input_ori_ids]
	else:
		return [exclude_mask] 
Example #28
Source File: networks.py    From bangla-tts with GNU General Public License v3.0 5 votes vote down vote up
def Attention(Q, K, V, mononotic_attention=False, prev_max_attentions=None):
    '''
    Args:
      Q: Queries. (B, T/r, d)
      K: Keys. (B, N, d)
      V: Values. (B, N, d)
      mononotic_attention: A boolean. At training, it is False.
      prev_max_attentions: (B,). At training, it is set to None.

    Returns:
      R: [Context Vectors; Q]. (B, T/r, 2d)
      alignments: (B, N, T/r)
      max_attentions: (B, T/r)
    '''
    A = tf.matmul(Q, K, transpose_b=True) * tf.rsqrt(tf.to_float(d))
    if mononotic_attention:  # for inference
        key_masks = tf.sequence_mask(prev_max_attentions, max_N)
        reverse_masks = tf.sequence_mask(max_N - attention_win_size - prev_max_attentions, max_N)[:, ::-1]
        masks = tf.logical_or(key_masks, reverse_masks)
        masks = tf.tile(tf.expand_dims(masks, 1), [1, max_T, 1])
        paddings = tf.ones_like(A) * (-2 ** 32 + 1)  # (B, T/r, N)
        A = tf.where(tf.equal(masks, False), A, paddings)
    A = tf.nn.softmax(A) # (B, T/r, N)
    max_attentions = tf.argmax(A, -1)  # (B, T/r)
    R = tf.matmul(A, V)
    R = tf.concat((R, Q), -1)

    alignments = tf.transpose(A, [0, 2, 1]) # (B, N, T/r)

    return R, alignments, max_attentions 
Example #29
Source File: balanced_positive_negative_sampler.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
                                     num_end_samples, total_num_samples):
    """slices num_start_samples and last num_end_samples from input_tensor.

    Args:
      input_tensor: An int32 tensor of shape [N] to be sliced.
      num_start_samples: Number of examples to be sliced from the beginning
        of the input tensor.
      num_end_samples: Number of examples to be sliced from the end of the
        input tensor.
      total_num_samples: Sum of is num_start_samples and num_end_samples. This
        should be a scalar.

    Returns:
      A tensor containing the first num_start_samples and last num_end_samples
      from input_tensor.

    """
    input_length = tf.shape(input_tensor)[0]
    start_positions = tf.less(tf.range(input_length), num_start_samples)
    end_positions = tf.greater_equal(
        tf.range(input_length), input_length - num_end_samples)
    selected_positions = tf.logical_or(start_positions, end_positions)
    selected_positions = tf.cast(selected_positions, tf.float32)
    indexed_positions = tf.multiply(tf.cumsum(selected_positions),
                                    selected_positions)
    one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
                                  total_num_samples,
                                  dtype=tf.float32)
    return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
                                one_hot_selector, axes=[0, 0]), tf.int32) 
Example #30
Source File: mask_rcnn_model.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def _rpn_score_loss(score_outputs, score_targets, normalizer=1.0):
  """Computes score loss."""
  # score_targets has three values: (1) score_targets[i]=1, the anchor is a
  # positive sample. (2) score_targets[i]=0, negative. (3) score_targets[i]=-1,
  # the anchor is don't care (ignore).
  with tf.name_scope('rpn_score_loss'):
    mask = tf.logical_or(tf.equal(score_targets, 1), tf.equal(score_targets, 0))
    score_targets = tf.maximum(score_targets, tf.zeros_like(score_targets))
    # RPN score loss is sum over all except ignored samples.
    score_loss = tf.losses.sigmoid_cross_entropy(
        score_targets, score_outputs, weights=mask,
        reduction=tf.losses.Reduction.SUM)
    score_loss /= normalizer
    return score_loss