Python tensorflow.minimum() Examples

The following are 30 code examples of tensorflow.minimum(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: memory.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def get_hint_pool_idxs(self, normalized_query):
    """Get small set of idxs to compute nearest neighbor queries on.

    This is an expensive look-up on the whole memory that is used to
    avoid more expensive operations later on.

    Args:
      normalized_query: A Tensor of shape [None, key_dim].

    Returns:
      A Tensor of shape [None, choose_k] of indices in memory
      that are closest to the queries.

    """
    # get hash of query vecs
    hash_slot_idxs = self.get_hash_slots(normalized_query)

    # grab mem idxs in the hash slots
    hint_pool_idxs = [
        tf.maximum(tf.minimum(
            tf.gather(self.hash_slots[i], idxs),
            self.memory_size - 1), 0)
        for i, idxs in enumerate(hash_slot_idxs)]

    return tf.concat(axis=1, values=hint_pool_idxs) 
Example #2
Source File: box_list_ops.py    From object_detector_app with MIT License 6 votes vote down vote up
def matched_intersection(boxlist1, boxlist2, scope=None):
  """Compute intersection areas between corresponding boxes in two boxlists.

  Args:
    boxlist1: BoxList holding N boxes
    boxlist2: BoxList holding N boxes
    scope: name scope.

  Returns:
    a tensor with shape [N] representing pairwise intersections
  """
  with tf.name_scope(scope, 'MatchedIntersection'):
    y_min1, x_min1, y_max1, x_max1 = tf.split(
        value=boxlist1.get(), num_or_size_splits=4, axis=1)
    y_min2, x_min2, y_max2, x_max2 = tf.split(
        value=boxlist2.get(), num_or_size_splits=4, axis=1)
    min_ymax = tf.minimum(y_max1, y_max2)
    max_ymin = tf.maximum(y_min1, y_min2)
    intersect_heights = tf.maximum(0.0, min_ymax - max_ymin)
    min_xmax = tf.minimum(x_max1, x_max2)
    max_xmin = tf.maximum(x_min1, x_min2)
    intersect_widths = tf.maximum(0.0, min_xmax - max_xmin)
    return tf.reshape(intersect_heights * intersect_widths, [-1]) 
Example #3
Source File: sac.py    From cs294-112_hws with MIT License 6 votes vote down vote up
def _value_function_loss_for(self, policy, q_function, q_function2, value_function):
        ### Problem 1.2.A
        ### YOUR CODE HERE
        actions, log_pis = policy(self._observations_ph)
        if q_function2 is None:
            q_n = tf.squeeze(q_function((self._observations_ph, actions)), axis=1)
        else:
            q_n = tf.minimum(
                tf.squeeze(q_function((self._observations_ph, actions)), axis=1),
                tf.squeeze(q_function2((self._observations_ph, actions)), axis=1)
            )
        v_n = tf.squeeze(value_function(self._observations_ph), axis=1)
        value_function_loss = tf.losses.mean_squared_error(
            q_n - self._alpha * log_pis,
            v_n
        )
        return value_function_loss 
Example #4
Source File: box_list_ops.py    From object_detector_app with MIT License 6 votes vote down vote up
def intersection(boxlist1, boxlist2, scope=None):
  """Compute pairwise intersection areas between boxes.

  Args:
    boxlist1: BoxList holding N boxes
    boxlist2: BoxList holding M boxes
    scope: name scope.

  Returns:
    a tensor with shape [N, M] representing pairwise intersections
  """
  with tf.name_scope(scope, 'Intersection'):
    y_min1, x_min1, y_max1, x_max1 = tf.split(
        value=boxlist1.get(), num_or_size_splits=4, axis=1)
    y_min2, x_min2, y_max2, x_max2 = tf.split(
        value=boxlist2.get(), num_or_size_splits=4, axis=1)
    all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
    all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
    intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
    all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
    all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
    intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
    return intersect_heights * intersect_widths 
Example #5
Source File: metrics.py    From fine-lm with MIT License 6 votes vote down vote up
def padded_accuracy_topk(predictions,
                         labels,
                         k,
                         weights_fn=common_layers.weights_nonzero):
  """Percentage of times that top-k predictions matches labels on non-0s."""
  with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]):
    padded_predictions, padded_labels = common_layers.pad_with_zeros(
        predictions, labels)
    weights = weights_fn(padded_labels)
    effective_k = tf.minimum(k,
                             common_layers.shape_list(padded_predictions)[-1])
    _, outputs = tf.nn.top_k(padded_predictions, k=effective_k)
    outputs = tf.to_int32(outputs)
    padded_labels = tf.to_int32(padded_labels)
    padded_labels = tf.expand_dims(padded_labels, axis=-1)
    padded_labels += tf.zeros_like(outputs)  # Pad to same shape.
    same = tf.to_float(tf.equal(outputs, padded_labels))
    same_topk = tf.reduce_sum(same, axis=-1)
    return same_topk, weights 
Example #6
Source File: box_list_ops.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def matched_intersection(boxlist1, boxlist2, scope=None):
  """Compute intersection areas between corresponding boxes in two boxlists.

  Args:
    boxlist1: BoxList holding N boxes
    boxlist2: BoxList holding N boxes
    scope: name scope.

  Returns:
    a tensor with shape [N] representing pairwise intersections
  """
  with tf.name_scope(scope, 'MatchedIntersection'):
    y_min1, x_min1, y_max1, x_max1 = tf.split(
        value=boxlist1.get(), num_or_size_splits=4, axis=1)
    y_min2, x_min2, y_max2, x_max2 = tf.split(
        value=boxlist2.get(), num_or_size_splits=4, axis=1)
    min_ymax = tf.minimum(y_max1, y_max2)
    max_ymin = tf.maximum(y_min1, y_min2)
    intersect_heights = tf.maximum(0.0, min_ymax - max_ymin)
    min_xmax = tf.minimum(x_max1, x_max2)
    max_xmin = tf.maximum(x_min1, x_min2)
    intersect_widths = tf.maximum(0.0, min_xmax - max_xmin)
    return tf.reshape(intersect_heights * intersect_widths, [-1]) 
Example #7
Source File: box_list_ops.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def intersection(boxlist1, boxlist2, scope=None):
  """Compute pairwise intersection areas between boxes.

  Args:
    boxlist1: BoxList holding N boxes
    boxlist2: BoxList holding M boxes
    scope: name scope.

  Returns:
    a tensor with shape [N, M] representing pairwise intersections
  """
  with tf.name_scope(scope, 'Intersection'):
    y_min1, x_min1, y_max1, x_max1 = tf.split(
        value=boxlist1.get(), num_or_size_splits=4, axis=1)
    y_min2, x_min2, y_max2, x_max2 = tf.split(
        value=boxlist2.get(), num_or_size_splits=4, axis=1)
    all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
    all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
    intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
    all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
    all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
    intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
    return intersect_heights * intersect_widths 
Example #8
Source File: diet.py    From fine-lm with MIT License 6 votes vote down vote up
def _quantize(x, params, randomize=True):
  """Quantize x according to params, optionally randomizing the rounding."""
  if not params.quantize:
    return x

  if not randomize:
    return tf.bitcast(
        tf.cast(x / params.quantization_scale, tf.int16), tf.float16)

  abs_x = tf.abs(x)
  sign_x = tf.sign(x)
  y = abs_x / params.quantization_scale
  y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
  y = tf.minimum(y, tf.int16.max) * sign_x
  q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
  return q 
Example #9
Source File: pwl_calibration_lib.py    From lattice with Apache License 2.0 6 votes vote down vote up
def compute_interpolation_weights(inputs, keypoints, lengths):
  """Computes weights for PWL calibration.

  Args:
    inputs: Tensor of shape: `(D0, D1, ..., DN, 1)` which represents inputs to
      to the pwl function. A typical shape is: `(batch_size, 1)`.
    keypoints: Rank-1 tensor of shape `(num_keypoints - 1)` which represents
      left keypoint of pieces of piecewise linear function along X axis.
    lengths: Rank-1 tensor of shape `(num_keypoints - 1)` which represents
      lengths of pieces of piecewise linear function along X axis.

  Returns:
    Interpolation weights tensor of shape: `(D0, D1, ..., DN, num_keypoints)`.
  """
  weights = (inputs - keypoints) / lengths
  weights = tf.minimum(weights, 1.0)
  weights = tf.maximum(weights, 0.0)
  # Prepend 1.0 at the beginning to add bias unconditionally.
  return tf.concat([tf.ones_like(inputs), weights], axis=-1) 
Example #10
Source File: learning_rate.py    From fine-lm with MIT License 6 votes vote down vote up
def learning_rate_factor(name, step_num, hparams):
  """Compute the designated learning rate factor from hparams."""
  if name == "constant":
    tf.logging.info("Base learning rate: %f", hparams.learning_rate_constant)
    return hparams.learning_rate_constant
  elif name == "linear_warmup":
    return tf.minimum(1.0, step_num / hparams.learning_rate_warmup_steps)
  elif name == "linear_decay":
    ret = (hparams.train_steps - step_num) / hparams.learning_rate_decay_steps
    return tf.minimum(1.0, tf.maximum(0.0, ret))
  elif name == "rsqrt_decay":
    return tf.rsqrt(tf.maximum(step_num, hparams.learning_rate_warmup_steps))
  elif name == "rsqrt_hidden_size":
    return hparams.hidden_size ** -0.5
  elif name == "legacy":
    return legacy_learning_rate_schedule(hparams)
  else:
    raise ValueError("unknown learning rate factor %s" % name) 
Example #11
Source File: attacks_tf.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max):
    """
    TensorFlow implementation for apply perturbations to input features based
    on salency maps
    :param i: index of first selected feature
    :param j: index of second selected feature
    :param X: a matrix containing our input features for our sample
    :param increase: boolean; true if we are increasing pixels, false otherwise
    :param theta: delta for each feature adjustment
    :param clip_min: mininum value for a feature in our sample
    :param clip_max: maximum value for a feature in our sample
    : return: a perturbed input feature matrix for a target class
    """

    # perturb our input sample
    if increase:
        X[0, i] = np.minimum(clip_max, X[0, i] + theta)
        X[0, j] = np.minimum(clip_max, X[0, j] + theta)
    else:
        X[0, i] = np.maximum(clip_min, X[0, i] - theta)
        X[0, j] = np.maximum(clip_min, X[0, j] - theta)

    return X 
Example #12
Source File: networks.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def minibatch_stddev_layer(x, group_size=4):
    with tf.variable_scope('MinibatchStddev'):
        group_size = tf.minimum(group_size, tf.shape(x)[0])     # Minibatch must be divisible by (or smaller than) group_size.
        s = x.shape                                             # [NCHW]  Input shape.
        y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]])   # [GMCHW] Split minibatch into M groups of size G.
        y = tf.cast(y, tf.float32)                              # [GMCHW] Cast to FP32.
        y -= tf.reduce_mean(y, axis=0, keep_dims=True)           # [GMCHW] Subtract mean over group.
        y = tf.reduce_mean(tf.square(y), axis=0)                # [MCHW]  Calc variance over group.
        y = tf.sqrt(y + 1e-8)                                   # [MCHW]  Calc stddev over group.
        y = tf.reduce_mean(y, axis=[1,2,3], keep_dims=True)      # [M111]  Take average over fmaps and pixels.
        y = tf.cast(y, x.dtype)                                 # [M111]  Cast back to original data type.
        y = tf.tile(y, [group_size, 1, s[2], s[3]])             # [N1HW]  Replicate over group and pixels.
        return tf.concat([x, y], axis=1)                        # [NCHW]  Append as new fmap.

#----------------------------------------------------------------------------
# Generator network used in the paper. 
Example #13
Source File: internal_utils.py    From lattice with Apache License 2.0 5 votes vote down vote up
def _min_projection(weights, sorted_indices, key_less_than_values, step):
  """Returns an approximate partial min projection with the given step_size.

  Args:
    weights: A list of tensors of shape `(units,)` to be approximatly projected
      based on the monotonicity constraints.
    sorted_indices: Topologically sorted list of indices based on the
      monotonicity constraints.
    key_less_than_values: A defaultdict from index to a list of indices, such
      that for `j` in `key_less_than_values[i]` we must have
      `weight[i] <= weight[j]`.
    step: A value defining if we should apply a full projection (`step == 1`) or
      a partial projection (`step < 1`).

  Returns:
    Projected list of tensors.
  """
  projected_weights = list(weights)  # copy
  for i in sorted_indices[::-1]:
    if key_less_than_values[i]:
      min_projection = projected_weights[i]
      for j in key_less_than_values[i]:
        min_projection = tf.minimum(min_projection, projected_weights[j])
      if step == 1:
        projected_weights[i] = min_projection
      else:
        projected_weights[i] = (
            step * min_projection + (1 - step) * projected_weights[i])
  return projected_weights 
Example #14
Source File: pwl_calibration_lib.py    From lattice with Apache License 2.0 5 votes vote down vote up
def _approximately_project_convexity(heights, lengths, convexity):
  """Strictly projects convexity, but is not exact with respect to the L2 norm.

  Projects by iterating over pieces of piecewise linear function left to right
  and aligning current slope with previous one if it violates convexity.

  Args:
    heights: `(num_heights, units)`-shape tensor which represents heights.
    lengths: `(num_heights)`-shape tensor which represents lengths of segments
      which correspond to heights.
    convexity: -1 or 1 where 1 stands for convex function and -1 for concave.

  Returns:
    Projected heights.
  """
  if convexity == 0:
    return heights
  heights = tf.unstack(heights, axis=0)
  lengths = tf.unstack(lengths, axis=0)
  for i in range(1, len(heights)):
    temp = heights[i - 1] * (lengths[i] / lengths[i - 1])
    if convexity == 1:
      heights[i] = tf.maximum(heights[i], temp)
    else:
      heights[i] = tf.minimum(heights[i], temp)

  return tf.stack(heights, axis=0) 
Example #15
Source File: pwl_calibration_lib.py    From lattice with Apache License 2.0 5 votes vote down vote up
def _project_monotonicity(heights, monotonicity):
  """Projects into monotonic function."""
  if monotonicity == 0:
    return heights
  elif monotonicity == 1:
    return tf.maximum(heights, 0.0)
  else:
    return tf.minimum(heights, 0.0) 
Example #16
Source File: inception_v2.py    From STORK with MIT License 5 votes vote down vote up
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  """Define kernel size which is automatically reduced for small input.

  If the shape of the input images is unknown at graph construction time this
  function assumes that the input images are is large enough.

  Args:
    input_tensor: input tensor of size [batch_size, height, width, channels].
    kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]

  Returns:
    a tensor with the kernel size.

  TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  can be done with the code below. Problems are two-fold: (1) If the shape was
  known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  handle tensors that define the kernel size.
      shape = tf.shape(input_tensor)
      return = tf.pack([tf.minimum(shape[1], kernel_size[0]),
                        tf.minimum(shape[2], kernel_size[1])])

  """
  shape = input_tensor.get_shape().as_list()
  if shape[1] is None or shape[2] is None:
    kernel_size_out = kernel_size
  else:
    kernel_size_out = [min(shape[1], kernel_size[0]),
                       min(shape[2], kernel_size[1])]
  return kernel_size_out 
Example #17
Source File: sequence_classifier.py    From knowledge-net with MIT License 5 votes vote down vote up
def encode(self, positions):
    positions = tf.minimum(positions, self.maximum_position)
    #positions = tf.maximum(positions, -1*self.maximum_position)
    #c = tf.constant(self.maximum_position)
    #indexes = tf.math.add(positions, c)
    return tf.nn.embedding_lookup(self.embedding, positions) 
Example #18
Source File: util.py    From exposure with MIT License 5 votes vote down vote up
def leaky_clamp(x, lower, upper, leak=0.1, name="leaky_clamp"):
  with tf.variable_scope(name):
    x = (x - lower) / (upper - lower)
    return tf.minimum(tf.maximum(leak * x, x), leak * x -
                      (leak - 1)) * (upper - lower) + lower 
Example #19
Source File: util.py    From exposure with MIT License 5 votes vote down vote up
def double_lrelu(x, leak=0.1, name="double_lrelu"):
  with tf.variable_scope(name):
    return tf.minimum(tf.maximum(leak * x, x), leak * x - (leak - 1))


# clamp to lower, upper; leak is RELATIVE 
Example #20
Source File: inception_v3.py    From STORK with MIT License 5 votes vote down vote up
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  """Define kernel size which is automatically reduced for small input.

  If the shape of the input images is unknown at graph construction time this
  function assumes that the input images are is large enough.

  Args:
    input_tensor: input tensor of size [batch_size, height, width, channels].
    kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]

  Returns:
    a tensor with the kernel size.

  TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  can be done with the code below. Problems are two-fold: (1) If the shape was
  known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  handle tensors that define the kernel size.
      shape = tf.shape(input_tensor)
      return = tf.pack([tf.minimum(shape[1], kernel_size[0]),
                        tf.minimum(shape[2], kernel_size[1])])

  """
  shape = input_tensor.get_shape().as_list()
  if shape[1] is None or shape[2] is None:
    kernel_size_out = kernel_size
  else:
    kernel_size_out = [min(shape[1], kernel_size[0]),
                       min(shape[2], kernel_size[1])]
  return kernel_size_out 
Example #21
Source File: common_layers.py    From fine-lm with MIT License 5 votes vote down vote up
def saturating_sigmoid(x):
  """Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1]."""
  with tf.name_scope("saturating_sigmoid", values=[x]):
    y = tf.sigmoid(x)
    return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1)) 
Example #22
Source File: preprocessor.py    From object_detector_app with MIT License 5 votes vote down vote up
def _random_integer(minval, maxval, seed):
  """Returns a random 0-D tensor between minval and maxval.

  Args:
    minval: minimum value of the random tensor.
    maxval: maximum value of the random tensor.
    seed: random seed.

  Returns:
    A random 0-D tensor between minval and maxval.
  """
  return tf.random_uniform(
      [], minval=minval, maxval=maxval, dtype=tf.int32, seed=seed) 
Example #23
Source File: box_list_ops.py    From object_detector_app with MIT License 5 votes vote down vote up
def clip_to_window(boxlist, window, filter_nonoverlapping=True, scope=None):
  """Clip bounding boxes to a window.

  This op clips any input bounding boxes (represented by bounding box
  corners) to a window, optionally filtering out boxes that do not
  overlap at all with the window.

  Args:
    boxlist: BoxList holding M_in boxes
    window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
      window to which the op should clip boxes.
    filter_nonoverlapping: whether to filter out boxes that do not overlap at
      all with the window.
    scope: name scope.

  Returns:
    a BoxList holding M_out boxes where M_out <= M_in
  """
  with tf.name_scope(scope, 'ClipToWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min)
    y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min)
    x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min)
    x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min)
    clipped = box_list.BoxList(
        tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped],
                  1))
    clipped = _copy_extra_fields(clipped, boxlist)
    if filter_nonoverlapping:
      areas = area(clipped)
      nonzero_area_indices = tf.cast(
          tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32)
      clipped = gather(clipped, nonzero_area_indices)
    return clipped 
Example #24
Source File: minibatch_sampler.py    From object_detector_app with MIT License 5 votes vote down vote up
def subsample_indicator(indicator, num_samples):
    """Subsample indicator vector.

    Given a boolean indicator vector with M elements set to `True`, the function
    assigns all but `num_samples` of these previously `True` elements to
    `False`. If `num_samples` is greater than M, the original indicator vector
    is returned.

    Args:
      indicator: a 1-dimensional boolean tensor indicating which elements
        are allowed to be sampled and which are not.
      num_samples: int32 scalar tensor

    Returns:
      a boolean tensor with the same shape as input (indicator) tensor
    """
    indices = tf.where(indicator)
    indices = tf.random_shuffle(indices)
    indices = tf.reshape(indices, [-1])

    num_samples = tf.minimum(tf.size(indices), num_samples)
    selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))

    selected_indicator = ops.indices_to_dense_vector(selected_indices,
                                                     tf.shape(indicator)[0])

    return tf.equal(selected_indicator, 1) 
Example #25
Source File: acer.py    From HardRLWithYoutube with MIT License 5 votes vote down vote up
def q_retrace(R, D, q_i, v, rho_i, nenvs, nsteps, gamma):
    """
    Calculates q_retrace targets

    :param R: Rewards
    :param D: Dones
    :param q_i: Q values for actions taken
    :param v: V values
    :param rho_i: Importance weight for each action
    :return: Q_retrace values
    """
    rho_bar = batch_to_seq(tf.minimum(1.0, rho_i), nenvs, nsteps, True)  # list of len steps, shape [nenvs]
    rs = batch_to_seq(R, nenvs, nsteps, True)  # list of len steps, shape [nenvs]
    ds = batch_to_seq(D, nenvs, nsteps, True)  # list of len steps, shape [nenvs]
    q_is = batch_to_seq(q_i, nenvs, nsteps, True)
    vs = batch_to_seq(v, nenvs, nsteps + 1, True)
    v_final = vs[-1]
    qret = v_final
    qrets = []
    for i in range(nsteps - 1, -1, -1):
        check_shape([qret, ds[i], rs[i], rho_bar[i], q_is[i], vs[i]], [[nenvs]] * 6)
        qret = rs[i] + gamma * qret * (1.0 - ds[i])
        qrets.append(qret)
        qret = (rho_bar[i] * (qret - q_is[i])) + vs[i]
    qrets = qrets[::-1]
    qret = seq_to_batch(qrets, flat=True)
    return qret

# For ACER with PPO clipping instead of trust region
# def clip(ratio, eps_clip):
#     # assume 0 <= eps_clip <= 1
#     return tf.minimum(1 + eps_clip, tf.maximum(1 - eps_clip, ratio)) 
Example #26
Source File: utils.py    From kss with Apache License 2.0 5 votes vote down vote up
def learning_rate_decay(init_lr, global_step, warmup_steps = 4000.0):
    '''Noam scheme from tensor2tensor'''
    step = tf.to_float(global_step + 1)
    return init_lr * warmup_steps**0.5 * tf.minimum(step * warmup_steps**-1.5, step**-0.5) 
Example #27
Source File: acer_simple.py    From lirpg with MIT License 5 votes vote down vote up
def q_retrace(R, D, q_i, v, rho_i, nenvs, nsteps, gamma):
    """
    Calculates q_retrace targets

    :param R: Rewards
    :param D: Dones
    :param q_i: Q values for actions taken
    :param v: V values
    :param rho_i: Importance weight for each action
    :return: Q_retrace values
    """
    rho_bar = batch_to_seq(tf.minimum(1.0, rho_i), nenvs, nsteps, True)  # list of len steps, shape [nenvs]
    rs = batch_to_seq(R, nenvs, nsteps, True)  # list of len steps, shape [nenvs]
    ds = batch_to_seq(D, nenvs, nsteps, True)  # list of len steps, shape [nenvs]
    q_is = batch_to_seq(q_i, nenvs, nsteps, True)
    vs = batch_to_seq(v, nenvs, nsteps + 1, True)
    v_final = vs[-1]
    qret = v_final
    qrets = []
    for i in range(nsteps - 1, -1, -1):
        check_shape([qret, ds[i], rs[i], rho_bar[i], q_is[i], vs[i]], [[nenvs]] * 6)
        qret = rs[i] + gamma * qret * (1.0 - ds[i])
        qrets.append(qret)
        qret = (rho_bar[i] * (qret - q_is[i])) + vs[i]
    qrets = qrets[::-1]
    qret = seq_to_batch(qrets, flat=True)
    return qret

# For ACER with PPO clipping instead of trust region
# def clip(ratio, eps_clip):
#     # assume 0 <= eps_clip <= 1
#     return tf.minimum(1 + eps_clip, tf.maximum(1 - eps_clip, ratio)) 
Example #28
Source File: common_layers.py    From fine-lm with MIT License 5 votes vote down vote up
def inverse_lin_decay(max_step, min_value=0.01):
  """Inverse-decay linearly from 0.01 to 1.0 reached at max_step."""
  step = tf.to_float(tf.train.get_global_step())
  progress = tf.minimum(step / float(max_step), 1.0)
  return progress * (1.0 - min_value) + min_value 
Example #29
Source File: common_layers.py    From fine-lm with MIT License 5 votes vote down vote up
def hard_sigmoid(x, saturation_limit=0.9):
  saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
  x_shifted = 0.5 * x + 0.5
  return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost 
Example #30
Source File: utils.py    From dc_tts with Apache License 2.0 5 votes vote down vote up
def learning_rate_decay(init_lr, global_step, warmup_steps = 4000.0):
    '''Noam scheme from tensor2tensor'''
    step = tf.to_float(global_step + 1)
    return init_lr * warmup_steps**0.5 * tf.minimum(step * warmup_steps**-1.5, step**-0.5)