Python tensorflow.cumsum() Examples

The following are 30 code examples of tensorflow.cumsum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: sequence.py    From icme2019 with MIT License 6 votes vote down vote up
def call(self, x):
        if (self.size == None) or (self.mode == 'sum'):
            self.size = int(x.shape[-1])

        position_j = 1. / \
            K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size)
        position_j = K.expand_dims(position_j, 0)

        position_i = tf.cumsum(K.ones_like(x[:, :, 0]), 1) - 1
        position_i = K.expand_dims(position_i, 2)
        position_ij = K.dot(position_i, position_j)
        outputs = K.concatenate(
            [K.cos(position_ij), K.sin(position_ij)], 2)

        if self.mode == 'sum':
            if self.scale:
                outputs = outputs * outputs ** 0.5
            return x + outputs
        elif self.mode == 'concat':
            return K.concatenate([outputs, x], 2) 
Example #2
Source File: attention_wrapper.py    From addons with Apache License 2.0 6 votes vote down vote up
def safe_cumprod(x: TensorLike, *args, **kwargs) -> tf.Tensor:
    """Computes cumprod of x in logspace using cumsum to avoid underflow.

    The cumprod function and its gradient can result in numerical instabilities
    when its argument has very small and/or zero values.  As long as the
    argument is all positive, we can instead compute the cumulative product as
    exp(cumsum(log(x))).  This function can be called identically to
    tf.cumprod.

    Args:
      x: Tensor to take the cumulative product of.
      *args: Passed on to cumsum; these are identical to those in cumprod.
      **kwargs: Passed on to cumsum; these are identical to those in cumprod.
    Returns:
      Cumulative product of x.
    """
    with tf.name_scope("SafeCumprod"):
        x = tf.convert_to_tensor(x, name="x")
        tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
        return tf.exp(
            tf.cumsum(tf.math.log(tf.clip_by_value(x, tiny, 1)), *args, **kwargs)
        ) 
Example #3
Source File: gnr.py    From GloballyNormalizedReader with Apache License 2.0 6 votes vote down vote up
def slice_sentences(document_features, picks, sentence_lengths):
    """Extract selected sentence spans from the document features.

    Arguments:
        document_features:  A `[batch, length, features]` representation
                            of the documents.
        picks:              Sentence to extract with shape
                            `[batch, selections]`.
        sentence_lengths:   Length of each sentence in the document with shape
                            `[batch, num_sentences]`.

    Returns extracted features for each selected sentence as a tensor with shape
        `[batch, selections, max_sentence_len, features]`
    """
    sentence_offsets = tf.cumsum(
        sentence_lengths, axis=1, exclusive=True)

    starts = ops.gather_from_rows(sentence_offsets, picks)
    lengths = ops.gather_from_rows(sentence_lengths, picks)
    sentence_embeddings = ops.slice_fragments(
        document_features, starts, lengths)
    return sentence_embeddings 
Example #4
Source File: generator_utils.py    From BERT with Apache License 2.0 6 votes vote down vote up
def _compute_auxiliary_structure(self, contents_and_mask):
    """Compute segment and position metadata."""
    contents = contents_and_mask[:, :self._num_sequences]
    start_mask = tf.cast(contents_and_mask[:, self._num_sequences:],
                         dtype=INDEX_DTYPE)

    segment = tf.cumsum(start_mask, axis=0)
    uniform_count = tf.ones_like(segment[:, 0])
    position = []
    for i in range(self._num_sequences):
      segment_slice = segment[:, i]
      counts = tf.math.segment_sum(uniform_count, segment[:, i])
      position.append(tf.range(self._packed_length) -  tf.cumsum(
          tf.gather(counts, segment_slice - 1) * start_mask[:, i]))
    position = tf.concat([i[:, tf.newaxis] for i in position], axis=1)

    # Correct for padding tokens.
    pad_mask = tf.cast(tf.not_equal(contents, 0), dtype=INDEX_DTYPE)
    segment *= pad_mask
    position *= pad_mask

    return segment, position 
Example #5
Source File: ppo.py    From BERT with Apache License 2.0 6 votes vote down vote up
def _distributional_to_value(value_d, size, subscale, threshold):
  """Get a scalar value out of a value distribution in distributional RL."""
  half = size // 2
  value_range = (tf.to_float(tf.range(-half, half)) + 0.5) * subscale
  probs = tf.nn.softmax(value_d)

  if threshold == 0.0:
    return tf.reduce_sum(probs * value_range, axis=-1)

  # accumulated_probs[..., i] is the sum of probabilities in buckets upto i
  # so it is the probability that value <= i'th bucket value
  accumulated_probs = tf.cumsum(probs, axis=-1)
  # New probs are 0 on all lower buckets, until the threshold
  probs = tf.where(accumulated_probs < threshold, tf.zeros_like(probs), probs)
  probs /= tf.reduce_sum(probs, axis=-1, keepdims=True)  # Re-normalize.
  return tf.reduce_sum(probs * value_range, axis=-1) 
Example #6
Source File: common_layers.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def weights_multi_problem_all(labels, taskid=-1):
  """Assign weight 1.0 to only examples from the given task."""
  weights = tf.to_float(tf.not_equal(labels, 0))
  if taskid < 0:
    raise ValueError("Task ID must be non-negative.")

  past_taskid = tf.cumsum(tf.to_float(tf.equal(labels, taskid)), axis=1)
  # Additionally zero out the task id location
  past_taskid *= tf.to_float(tf.not_equal(labels, taskid))
  non_taskid = tf.to_float(labels)
  example_mask = tf.to_float(tf.not_equal(past_taskid * non_taskid, 0))
  example_mask = tf.reduce_sum(example_mask, axis=1)
  example_mask = tf.to_float(
      tf.greater(example_mask, tf.zeros_like(example_mask)))

  return weights * tf.expand_dims(example_mask, axis=-1) 
Example #7
Source File: rewards.py    From Counterfactual-StoryRW with MIT License 6 votes vote down vote up
def _discount_reward_py_2d(reward, sequence_length=None,
                           discount=1., dtype=None):
    if sequence_length is not None:
        reward = mask_sequences(reward, sequence_length, dtype=dtype)

    dtype = dtype or reward.dtype

    if discount == 1.:
        disc_reward = np.cumsum(
            reward[:, ::-1], axis=1, dtype=dtype)[:, ::-1]
    else:
        disc_reward = np.copy(reward)
        for i in range(reward.shape[1]-2, -1, -1):
            disc_reward[:, i] += disc_reward[:, i+1] * discount

    return disc_reward 
Example #8
Source File: rewards.py    From Counterfactual-StoryRW with MIT License 6 votes vote down vote up
def _discount_reward_tensor_2d(reward, sequence_length=None,
                               discount=1., dtype=None):
    if sequence_length is not None:
        reward = mask_sequences(
            reward, sequence_length, dtype=dtype, tensor_rank=2)

    if discount == 1.:
        disc_reward = tf.cumsum(reward, axis=1, reverse=True)
    else:
        # [max_time, batch_size]
        rev_reward_T = tf.transpose(tf.reverse(reward, [1]), [1, 0])
        rev_reward_T_cum = tf.scan(
            fn=lambda acc, cur: cur + discount * acc,
            elems=rev_reward_T,
            initializer=tf.zeros_like(reward[:, 1]),
            back_prop=False)
        disc_reward = tf.reverse(
            tf.transpose(rev_reward_T_cum, [1, 0]), [1])

    return disc_reward 
Example #9
Source File: core.py    From lm-human-preferences with MIT License 6 votes vote down vote up
def take_top_p_logits(logits, p):
    """Nucleus sampling"""
    batch, sequence, _ = logits.shape.as_list()
    sorted_logits = tf.sort(logits, direction='DESCENDING', axis=-1)
    cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
    indices = tf.stack([
        tf.range(0, batch)[:, tf.newaxis],
        tf.range(0, sequence)[tf.newaxis, :],
        # number of indices to include
        tf.maximum(tf.reduce_sum(tf.cast(cumulative_probs <= p, tf.int32), axis=-1) - 1, 0),
    ], axis=-1)
    min_values = tf.gather_nd(sorted_logits, indices)
    return tf.where(
        logits < min_values,
        tf.ones_like(logits) * -1e10,
        logits,
    ) 
Example #10
Source File: modes.py    From spektral with MIT License 6 votes vote down vote up
def _vectorised_get_cum_graph_size(nodes, graph_sizes):
    """
    Takes a list of node ids and graph sizes ordered by segment ID and returns the
    number of nodes contained in graphs with smaller segment ID.

    :param nodes: List of node ids of shape (nodes)
    :param graph_sizes: List of graph sizes (i.e. tf.math.segment_sum(tf.ones_like(I), I) where I are the
    segment IDs).
    :return: A list of shape (nodes) where each entry corresponds to the number of nodes contained in graphs
    with smaller segment ID for each node.
    """
    def get_cum_graph_size(node):
        cum_graph_sizes = tf.cumsum(graph_sizes, exclusive=True)
        indicator_if_smaller = tf.cast(node - cum_graph_sizes >= 0, tf.int32)
        graph_id = tf.reduce_sum(indicator_if_smaller) - 1
        return tf.cumsum(graph_sizes, exclusive=True)[graph_id]

    return tf.map_fn(get_cum_graph_size, nodes) 
Example #11
Source File: modes.py    From spektral with MIT License 6 votes vote down vote up
def disjoint_signal_to_batch(X, I):
    """
    Converts a disjoint graph signal to batch node by zero-padding.

    :param X: Tensor, node features of shape (nodes, features).
    :param I: Tensor, graph IDs of shape `(N, )`;
    :return batch: Tensor, batched node features of shape (batch, N_max, F)
    """
    I = tf.cast(I, tf.int32)
    num_nodes = tf.math.segment_sum(tf.ones_like(I), I)
    start_index = tf.cumsum(num_nodes, exclusive=True)
    n_graphs = tf.shape(num_nodes)[0]
    max_n_nodes = tf.reduce_max(num_nodes)
    batch_n_nodes = tf.shape(I)[0]
    feature_dim = tf.shape(X)[-1]

    index = tf.range(batch_n_nodes)
    index = (index - tf.gather(start_index, I)) + (I * max_n_nodes)
    dense = tf.zeros((n_graphs * max_n_nodes, feature_dim), dtype=X.dtype)
    dense = tf.tensor_scatter_nd_update(dense, index[..., None], X)

    batch = tf.reshape(dense, (n_graphs, max_n_nodes, feature_dim))

    return batch 
Example #12
Source File: loss.py    From mobile-segmentation with Apache License 2.0 5 votes vote down vote up
def lovasz_grad(gt_sorted):
    """
    Computes gradient of the Lovasz extension w.r.t sorted errors
    See Alg. 1 in paper
    """
    gts = tf.reduce_sum(gt_sorted)
    intersection = gts - tf.cumsum(gt_sorted)
    union = gts + tf.cumsum(1. - gt_sorted)
    jaccard = 1. - intersection / union
    jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)
    return jaccard


# --------------------------- BINARY LOSSES --------------------------- 
Example #13
Source File: common_layers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def cumsum(x, axis=0, exclusive=False):
  """TPU hack for tf.cumsum.

  This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless
  the axis dimension is very large.

  Args:
    x: a Tensor
    axis: an integer
    exclusive: a boolean

  Returns:
    Tensor of the same shape as x.
  """
  if not is_xla_compiled():
    return tf.cumsum(x, axis=axis, exclusive=exclusive)
  x_shape = shape_list(x)
  rank = len(x_shape)
  length = x_shape[axis]
  my_range = tf.range(length)
  comparator = tf.less if exclusive else tf.less_equal
  mask = tf.cast(
      comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
      x.dtype)
  ret = tf.tensordot(x, mask, axes=[[axis], [0]])
  if axis != rank - 1:
    ret = tf.transpose(
        ret,
        list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
  return ret 
Example #14
Source File: common_layers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def gather(params, indices, dtype=tf.float32):
  """Version of tf.gather that works faster on tpu."""
  if not is_xla_compiled():
    return tf.gather(params, indices)
  vocab_size = params.get_shape().as_list()[0]
  indices_flat = tf.reshape(indices, [-1])
  out = tf.matmul(tf.one_hot(indices_flat, vocab_size, dtype=dtype), params)
  out = reshape_like(out, tf.expand_dims(indices, -1))
  return out


# TODO(noam): remove this function after TPUs do cumsum faster. 
Example #15
Source File: common_layers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def weights_prepend_inputs_to_targets(labels):
  """Assign weight 1.0 to only the "targets" portion of the labels.

  Weight 1.0 is assigned to all nonzero labels past the first zero.
  See prepend_mode in common_hparams.py

  Args:
    labels: A Tensor of int32s.

  Returns:
    A Tensor of floats.
  """
  past_first_zero = tf.cumsum(to_float(tf.equal(labels, 0)), axis=1)
  nonzero = to_float(labels)
  return to_float(tf.not_equal(past_first_zero * nonzero, 0)) 
Example #16
Source File: normalisation.py    From DeepXi with Mozilla Public License 2.0 5 votes vote down vote up
def SeqCausalLayerNorm(x, seq_len, centre=True, scale=True):
	'''
	Sequence-wise causal layer normalisation with sequence masking (causal layer norm version of https://arxiv.org/pdf/1510.01378.pdf). 

	Input/s:
		x - input.
		seq_len - length of each sequence. 
		centre - centre parameter.
		scale - scale parameter. 

	Output/s:
		normalised input.
	'''
	global count
	count += 1
	with tf.variable_scope('LayerNorm' + str(count)):
		input_size = x.get_shape().as_list()[-1]
		mask = tf.cast(tf.sequence_mask(seq_len), tf.float32) # convert mask to float.
		den = tf.multiply(tf.range(1.0, tf.add(tf.cast(tf.shape(mask)[-1], tf.float32), 1.0), dtype=tf.float32), input_size)
		mu = tf.expand_dims(tf.truediv(tf.cumsum(tf.reduce_sum(x, -1), -1), den), 2)
		sigma = tf.expand_dims(tf.truediv(tf.cumsum(tf.reduce_sum(tf.square(tf.subtract(x, 
			mu)), -1), -1), den),2)
		if centre: beta = tf.get_variable("beta", input_size, dtype=tf.float32,  
			initializer=tf.constant_initializer(0.0), trainable=True)
		else: beta = tf.constant(np.zeros(input_size), name="beta", dtype=tf.float32)
		if scale: gamma = tf.get_variable("Gamma", input_size, dtype=tf.float32,  
			initializer=tf.constant_initializer(1.0), trainable=True)
		else: gamma = tf.constant(np.ones(input_size), name="Gamma", dtype=tf.float32)
		return tf.multiply(tf.nn.batch_normalization(x, mu, sigma, offset=beta, scale=gamma, 
			variance_epsilon = 1e-12), tf.expand_dims(mask, 2)) 
Example #17
Source File: common_layers.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def cumsum(x, axis=0, exclusive=False):
  """TPU hack for tf.cumsum.

  This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless
  the axis dimension is very large.

  Args:
    x: a Tensor
    axis: an integer
    exclusive: a boolean

  Returns:
    Tensor of the same shape as x.
  """
  if not is_xla_compiled():
    return tf.cumsum(x, axis=axis, exclusive=exclusive)
  x_shape = shape_list(x)
  rank = len(x_shape)
  length = x_shape[axis]
  my_range = tf.range(length)
  comparator = tf.less if exclusive else tf.less_equal
  mask = tf.cast(
      comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
      x.dtype)
  ret = tf.tensordot(x, mask, axes=[[axis], [0]])
  if axis != rank - 1:
    ret = tf.transpose(
        ret,
        list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
  return ret 
Example #18
Source File: transformer_aan.py    From zero with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def average_attention_strategy(strategy, x, mask, state, layer, params):
    strategy = strategy.lower()

    is_training = ('decoder' not in state)

    if strategy == "aan":
        if is_training:
            if params.aan_mask:
                aan_bias = func.attention_bias(mask, "aan")
                x_fwd = tf.matmul(aan_bias, x)
            else:
                aan_bias = tf.cumsum(mask, axis=1)
                aan_bias = tf.where(tf.less_equal(aan_bias, 0.),
                                    tf.ones_like(aan_bias), aan_bias)
                aan_bias = tf.expand_dims(dtype.tf_to_float(aan_bias), 2)

                x_fwd = tf.cumsum(x, axis=1) / aan_bias
        else:
            cache = state['decoder']['state']['layer_{}'.format(layer)]
            x_fwd = (x + cache['aan']) / dtype.tf_to_float(state['time'] + 1)
            cache['aan'] = x + cache['aan']

        return x_fwd

    else:
        raise NotImplementedError("Not supported: {}".format(strategy)) 
Example #19
Source File: common_layers.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def gather(params, indices, dtype=tf.float32):
  """Version of tf.gather that works faster on tpu."""
  if not is_xla_compiled():
    return tf.gather(params, indices)
  vocab_size = params.get_shape().as_list()[0]
  indices_flat = tf.reshape(indices, [-1])
  out = tf.matmul(tf.one_hot(indices_flat, vocab_size, dtype=dtype), params)
  out = reshape_like(out, tf.expand_dims(indices, -1))
  return out


# TODO(noam): remove this function after TPUs do cumsum faster. 
Example #20
Source File: common_layers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def weights_concatenated(labels):
  """Assign weight 1.0 to the "target" part of the concatenated labels.

  The labels look like:
    source English I love you . ID1 target French Je t'aime . ID1 source
      English the cat ID1 target French le chat ID1 source English ...

  We want to assign weight 1.0 to all words in the target text (including the
  ID1 end symbol), but not to the source text or the boilerplate.  In the
  above example, the target words that get positive weight are:
    Je t'aime . ID1 le chat ID1

  Args:
    labels: a Tensor
  Returns:
    a Tensor
  """
  eos_mask = tf.to_int32(tf.equal(labels, 1))
  sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)
  in_target = tf.equal(tf.mod(sentence_num, 2), 1)
  # first two tokens of each sentence are boilerplate.
  sentence_num_plus_one = sentence_num + 1
  shifted = tf.pad(sentence_num_plus_one,
                   [[0, 0], [2, 0], [0, 0], [0, 0]])[:, :-2, :, :]
  nonboilerplate = tf.equal(sentence_num_plus_one, shifted)
  ret = to_float(tf.logical_and(nonboilerplate, in_target))
  return ret 
Example #21
Source File: balanced_positive_negative_sampler.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
                                     num_end_samples, total_num_samples):
    """slices num_start_samples and last num_end_samples from input_tensor.

    Args:
      input_tensor: An int32 tensor of shape [N] to be sliced.
      num_start_samples: Number of examples to be sliced from the beginning
        of the input tensor.
      num_end_samples: Number of examples to be sliced from the end of the
        input tensor.
      total_num_samples: Sum of is num_start_samples and num_end_samples. This
        should be a scalar.

    Returns:
      A tensor containing the first num_start_samples and last num_end_samples
      from input_tensor.

    """
    input_length = tf.shape(input_tensor)[0]
    start_positions = tf.less(tf.range(input_length), num_start_samples)
    end_positions = tf.greater_equal(
        tf.range(input_length), input_length - num_end_samples)
    selected_positions = tf.logical_or(start_positions, end_positions)
    selected_positions = tf.cast(selected_positions, tf.float32)
    indexed_positions = tf.multiply(tf.cumsum(selected_positions),
                                    selected_positions)
    one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
                                  total_num_samples,
                                  dtype=tf.float32)
    return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
                                one_hot_selector, axes=[0, 0]), tf.int32) 
Example #22
Source File: common_layers.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def weights_concatenated(labels):
  """Assign weight 1.0 to the "target" part of the concatenated labels.

  The labels look like:
    source English I love you . ID1 target French Je t'aime . ID1 source
      English the cat ID1 target French le chat ID1 source English ...

  We want to assign weight 1.0 to all words in the target text (including the
  ID1 end symbol), but not to the source text or the boilerplate.  In the
  above example, the target words that get positive weight are:
    Je t'aime . ID1 le chat ID1

  Args:
    labels: a Tensor
  Returns:
    a Tensor
  """
  eos_mask = tf.to_int32(tf.equal(labels, 1))
  sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)
  in_target = tf.equal(tf.mod(sentence_num, 2), 1)
  # first two tokens of each sentence are boilerplate.
  sentence_num_plus_one = sentence_num + 1
  shifted = tf.pad(sentence_num_plus_one,
                   [[0, 0], [2, 0], [0, 0], [0, 0]])[:, :-2, :, :]
  nonboilerplate = tf.equal(sentence_num_plus_one, shifted)
  ret = tf.to_float(tf.logical_and(nonboilerplate, in_target))
  return ret 
Example #23
Source File: beam_search.py    From seq2seq with Apache License 2.0 5 votes vote down vote up
def get_weights(sequence, eos_id, include_first_eos=True):
    cumsum = tf.cumsum(tf.to_float(tf.not_equal(sequence, eos_id)), axis=1)
    range_ = tf.range(start=1, limit=tf.shape(sequence)[1] + 1)
    range_ = tf.tile(tf.expand_dims(range_, axis=0), [tf.shape(sequence)[0], 1])
    weights = tf.to_float(tf.equal(cumsum, tf.to_float(range_)))

    if include_first_eos:
        weights = weights[:,:-1]
        shape = [tf.shape(weights)[0], 1]
        weights = tf.concat([tf.ones(tf.stack(shape)), weights], axis=1)

    return tf.stop_gradient(weights) 
Example #24
Source File: mean_average_precision.py    From blueoil with Apache License 2.0 5 votes vote down vote up
def _calc_precision_recall(tp, fp, num_gt_boxes):
    """Calculate precision and recall array.

    Args:
        tp(np.ndarray): sorted tp.
        fp(np.ndarray): sorted fp.
        num_gt_boxes(int): number of gt boxes

    Return:
        precision: detection boxes size precision array
        recall: detection boxes size recall array
        scalar_precision: precision
        scalar_recall: recall
    """
    fp = np.cumsum(fp)
    tp = np.cumsum(tp)

    # when gt_boxes is zero, Recall define 100%.
    if num_gt_boxes == 0:
        recall = np.ones(tp.size)
    else:
        recall = tp / num_gt_boxes

    # avoid divide by zero in case the first detection matches a difficult
    # ground truth
    precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)

    scalar_precision = precision[-1] if precision.size != 0 else 0
    scalar_recall = recall[-1] if recall.size != 0 else 0

    return precision, recall, scalar_precision, scalar_recall 
Example #25
Source File: common_attention.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def attention_bias_prepend_inputs_full_attention(padding):
  """Create a bias tensor for prepend_mode="prepend_inputs_full_attention".

  See prepend_inputs in common_hparams.py.

  Produces a bias tensor to be used in self-attention.

  This bias tensor allows for full connectivity in the "inputs" part of
  the sequence and masked connectivity in the targets part.

  Args:
    padding: a float `Tensor` with shape [batch, length] with
      ones in positions corresponding to padding.  In each row, a single
      padding position separates the input part from the target part.

  Returns:
    a `Tensor` with shape [batch, 1, length, length].
  """
  # Everything past the first padding position is part of the target.
  # This Tensor has zeros for the source portion and separator,
  # and ones for the target portion.
  in_target = tf.cumsum(padding, axis=1, exclusive=True)
  # The position within the target, or 0 if part of the source.
  target_pos = tf.cumsum(in_target, axis=1)
  # A position with a lesser target_pos cannot see a position with greater
  # target_pos.
  illegal_connections = tf.greater(
      tf.expand_dims(target_pos, 1), tf.expand_dims(target_pos, 2))
  bias = tf.to_float(illegal_connections) * -1e9
  bias = tf.expand_dims(bias, 1)
  return bias 
Example #26
Source File: balanced_positive_negative_sampler.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
                                     num_end_samples, total_num_samples):
    """slices num_start_samples and last num_end_samples from input_tensor.

    Args:
      input_tensor: An int32 tensor of shape [N] to be sliced.
      num_start_samples: Number of examples to be sliced from the beginning
        of the input tensor.
      num_end_samples: Number of examples to be sliced from the end of the
        input tensor.
      total_num_samples: Sum of is num_start_samples and num_end_samples. This
        should be a scalar.

    Returns:
      A tensor containing the first num_start_samples and last num_end_samples
      from input_tensor.

    """
    input_length = tf.shape(input_tensor)[0]
    start_positions = tf.less(tf.range(input_length), num_start_samples)
    end_positions = tf.greater_equal(
        tf.range(input_length), input_length - num_end_samples)
    selected_positions = tf.logical_or(start_positions, end_positions)
    selected_positions = tf.cast(selected_positions, tf.float32)
    indexed_positions = tf.multiply(tf.cumsum(selected_positions),
                                    selected_positions)
    one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
                                  total_num_samples,
                                  dtype=tf.float32)
    return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
                                one_hot_selector, axes=[0, 0]), tf.int32) 
Example #27
Source File: mean_average_precision.py    From blueoil with Apache License 2.0 5 votes vote down vote up
def _cummax(x, reverse=False, name=None):
    """Compute the cumulative maximum of the tensor `x` along `axis`. This
    operation is similar to the more classic `cumsum`. Only support 1D Tensor
    for now.
    Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
       axis: A `Tensor` of type `int32` (default: 0).
       reverse: A `bool` (default: False).
       name: A name for the operation (optional).
    Returns:
    A `Tensor`. Has the same type as `x`.
    """
    with tf.compat.v1.name_scope(name, "Cummax", [x]) as name:
        # x = tf.convert_to_tensor(x, name="x")
        # Not very optimal: should directly integrate reverse into tf.scan.
        if reverse:
            x = tf.reverse(x, axis=[0])
        # 'Accumlating' maximum: ensure it is always increasing.
        cmax = tf.scan(lambda a, y: tf.maximum(a, y), x,
                       initializer=None, parallel_iterations=1,
                       back_prop=False, swap_memory=False)
        if reverse:
            cmax = tf.reverse(cmax, axis=[0])
    return cmax


# ===========================================================================
# Mean average precision computations on numpy
# =========================================================================== 
Example #28
Source File: mean_average_precision.py    From blueoil with Apache License 2.0 5 votes vote down vote up
def _precision_recall(
        tp,
        fp,
        scores,
        num_gt_boxes,
        class_name,
        dtype=tf.float64,
        scope=None):
    """Compute precision and recall from scores, true positives and false
    positives booleans arrays
    """
    default_name = 'precision_recall_{}'.format(class_name)
    # Sort by score.
    with tf.compat.v1.name_scope(scope, default_name, [num_gt_boxes, tp, fp, scores]):
        num_detections = tf.size(scores)
        # Sort detections by score.
        scores, idxes = tf.nn.top_k(scores, k=num_detections, sorted=True)
        tp = tf.gather(tp, idxes)
        fp = tf.gather(fp, idxes)
        # Computer recall and precision.
        tp = tf.cumsum(tf.cast(tp, dtype), axis=0)
        fp = tf.cumsum(tf.cast(fp, dtype), axis=0)

        recall = _safe_div_ones(tp, tf.cast(num_gt_boxes, dtype), 'recall')
        precision = _safe_div_zeros(tp, tp + fp, 'precision')

        scalar_precision = tf.cond(
            tf.equal(tf.size(precision), 0),
            true_fn=lambda: tf.constant(0, dtype=dtype),
            false_fn=lambda: precision[-1],
            name="scalar_precision"
        )

        scalar_recall = tf.cond(
            tf.equal(tf.size(recall), 0),
            true_fn=lambda: tf.constant(0, dtype=dtype),
            false_fn=lambda: recall[-1],
            name="scalar_recall"
        )

        return tf.tuple([precision, recall, scalar_precision, scalar_recall]) 
Example #29
Source File: objective.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def shift_values(values, discount, rollout, final_values=0.0):
  """Shift values up by some amount of time.

  Those values that shift from a value beyond the last value
  are calculated using final_values.

  """
  roll_range = tf.cumsum(tf.ones_like(values[:rollout, :]), 0,
                         exclusive=True, reverse=True)
  final_pad = tf.expand_dims(final_values, 0) * discount ** roll_range
  return tf.concat([discount ** rollout * values[rollout:, :],
                    final_pad], 0) 
Example #30
Source File: common_layers.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def weights_prepend_inputs_to_targets(labels):
  """Assign weight 1.0 to only the "targets" portion of the labels.

  Weight 1.0 is assigned to all nonzero labels past the first zero.
  See prepend_mode in common_hparams.py

  Args:
    labels: A Tensor of int32s.

  Returns:
    A Tensor of floats.
  """
  past_first_zero = tf.cumsum(tf.to_float(tf.equal(labels, 0)), axis=1)
  nonzero = tf.to_float(labels)
  return tf.to_float(tf.not_equal(past_first_zero * nonzero, 0))