Python tensorflow.reduce_min() Examples

The following are 30 code examples of tensorflow.reduce_min(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: shape_utils.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
  """Asserts the input box tensor is normalized.

  Args:
    boxes: a tensor of shape [N, 4] where N is the number of boxes.
    maximum_normalized_coordinate: Maximum coordinate value to be considered
      as normalized, default to 1.1.

  Returns:
    a tf.Assert op which fails when the input box tensor is not normalized.

  Raises:
    ValueError: When the input box tensor is not normalized.
  """
  box_minimum = tf.reduce_min(boxes)
  box_maximum = tf.reduce_max(boxes)
  return tf.Assert(
      tf.logical_and(
          tf.less_equal(box_maximum, maximum_normalized_coordinate),
          tf.greater_equal(box_minimum, 0)),
      [boxes]) 
Example #2
Source File: tensor.py    From spleeter with MIT License 6 votes vote down vote up
def from_float32_to_uint8(
        tensor,
        tensor_key='tensor',
        min_key='min',
        max_key='max'):
    """

    :param tensor:
    :param tensor_key:
    :param min_key:
    :param max_key:
    :returns:
    """
    tensor_min = tf.reduce_min(tensor)
    tensor_max = tf.reduce_max(tensor)
    return {
        tensor_key: tf.cast(
            (tensor - tensor_min) / (tensor_max - tensor_min + 1e-16)
            * 255.9999, dtype=tf.uint8),
        min_key: tensor_min,
        max_key: tensor_max
    } 
Example #3
Source File: common.py    From HyperGAN with MIT License 6 votes vote down vote up
def distribution_accuracy(a, b):
    """
    Each point of a is measured against the closest point on b.  Distance differences are added together.  
    
    This works best on a large batch of small inputs."""
    tiled_a = a
    tiled_a = tf.reshape(tiled_a, [int(tiled_a.get_shape()[0]), 1, int(tiled_a.get_shape()[1])])

    tiled_a = tf.tile(tiled_a, [1, int(tiled_a.get_shape()[0]), 1])

    tiled_b = b
    tiled_b = tf.reshape(tiled_b, [1, int(tiled_b.get_shape()[0]), int(tiled_b.get_shape()[1])])
    tiled_b = tf.tile(tiled_b, [int(tiled_b.get_shape()[0]), 1, 1])

    difference = tf.abs(tiled_a-tiled_b)
    difference = tf.reduce_min(difference, axis=1)
    difference = tf.reduce_sum(difference, axis=1)
    return tf.reduce_sum(difference, axis=0) 
Example #4
Source File: test_utils_test.py    From model-optimization with Apache License 2.0 6 votes vote down vote up
def test_get_tensor_with_random_shape(self):
    x = test_utils.get_tensor_with_random_shape()
    self.assertIsInstance(x, tf.Tensor)
    self.assertFalse(x.shape.is_fully_defined())
    # Rank of the Tensor should be known, even though the dimension is not.
    self.assertEqual(1, x.shape.ndims)

    # Assert that unknown shape corresponds to a value of actually random shape
    # at execution time.
    samples = [self.evaluate(x) for _ in range(10)]
    self.assertGreater(len(set([len(s) for s in samples])), 1)

    # Test that source_fn has effect on the output values.
    x_uniform = test_utils.get_tensor_with_random_shape(
        expected_num_elements=50, source_fn=tf.random.uniform)
    x_normal = test_utils.get_tensor_with_random_shape(
        expected_num_elements=50, source_fn=tf.random.normal)
    self.assertGreaterEqual(self.evaluate(tf.reduce_min(x_uniform)), 0.0)
    self.assertLess(self.evaluate(tf.reduce_min(x_normal)), 0.0) 
Example #5
Source File: tf_variable_summaries.py    From fcn8s_tensorflow with GNU General Public License v3.0 6 votes vote down vote up
def add_variable_summaries(variable, scope):
  '''
  Attach some summaries to a tensor for TensorBoard visualization, namely
  mean, standard deviation, minimum, maximum, and histogram.

  Arguments:
    var (TensorFlow Variable): A TensorFlow Variable of any shape to which to
        add summary operations. Must be a numerical data type.
  '''
  with tf.name_scope(scope):
    mean = tf.reduce_mean(variable)
    tf.summary.scalar('mean', mean)
    with tf.name_scope('stddev'):
        stddev = tf.sqrt(tf.reduce_mean(tf.square(variable - mean)))
    tf.summary.scalar('stddev', stddev)
    tf.summary.scalar('max', tf.reduce_max(variable))
    tf.summary.scalar('min', tf.reduce_min(variable))
    tf.summary.histogram('histogram', variable) 
Example #6
Source File: network_blocks.py    From KPConv with MIT License 6 votes vote down vote up
def ind_max_pool(x, inds):
    """
    This tensorflow operation compute a maxpooling according to the list of indices 'inds'.
    > x = [n1, d] features matrix
    > inds = [n2, max_num] each row of this tensor is a list of indices of features to be pooled together
    >> output = [n2, d] pooled features matrix
    """

    # Add a last row with minimum features for shadow pools
    x = tf.concat([x, tf.reduce_min(x, axis=0, keep_dims=True)], axis=0)

    # Get features for each pooling cell [n2, max_num, d]
    pool_features = tf.gather(x, inds, axis=0)

    # Pool the maximum
    return tf.reduce_max(pool_features, axis=1) 
Example #7
Source File: kfac_utils.py    From stable-baselines with MIT License 6 votes vote down vote up
def detect_min_val(input_mat, var, threshold=1e-6, name='', debug=False):
    """
    If debug is not set, will run clipout_neg. Else, will clip and print out odd eigen values

    :param input_mat: (TensorFlow Tensor)
    :param var: (TensorFlow Tensor) variable
    :param threshold: (float) the cutoff threshold
    :param name: (str) the name of the variable
    :param debug: (bool) debug function
    :return: (TensorFlow Tensor) clipped tensor
    """
    eigen_min = tf.reduce_min(input_mat)
    eigen_max = tf.reduce_max(input_mat)
    eigen_ratio = eigen_max / eigen_min
    input_mat_clipped = clipout_neg(input_mat, threshold)

    if debug:
        input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)),
                                    lambda: input_mat_clipped, lambda: tf.Print(
                input_mat_clipped,
                [tf.convert_to_tensor('odd ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name),
                 eigen_min, eigen_max, eigen_ratio]))

    return input_mat_clipped 
Example #8
Source File: tfops.py    From glow with MIT License 6 votes vote down vote up
def print_act_stats(x, _str=""):
    if not do_print_act_stats:
        return x
    if hvd.rank() != 0:
        return x
    if len(x.get_shape()) == 1:
        x_mean, x_var = tf.nn.moments(x, [0], keep_dims=True)
    if len(x.get_shape()) == 2:
        x_mean, x_var = tf.nn.moments(x, [0], keep_dims=True)
    if len(x.get_shape()) == 4:
        x_mean, x_var = tf.nn.moments(x, [0, 1, 2], keep_dims=True)
    stats = [tf.reduce_min(x_mean), tf.reduce_mean(x_mean), tf.reduce_max(x_mean),
             tf.reduce_min(tf.sqrt(x_var)), tf.reduce_mean(tf.sqrt(x_var)), tf.reduce_max(tf.sqrt(x_var))]
    return tf.Print(x, stats, "["+_str+"] "+x.name)

# Allreduce methods 
Example #9
Source File: sparse_weights.py    From nupic.tensorflow with GNU Affero General Public License v3.0 6 votes vote down vote up
def _build(self, input_shape, dtype=tf.float32):
        """
        Called on the first iteration once the input shape is known
        :param input_shape: Input shape including batch size
        """
        with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
            non_zeros = int(round(input_shape[-1].value * self.percent_on))

            # Create random mask with k elements set to 1, all other elements set to 0
            values = tf.random_uniform(input_shape)
            top_k, _ = tf.math.top_k(input=values, k=non_zeros, sorted=False)
            kth = tf.reduce_min(top_k, axis=1, keepdims=True)
            mask = tf.cast(tf.greater_equal(values, kth), dtype=dtype)
            self.mask = tf.get_variable(
                self.name,
                initializer=mask,
                trainable=False,
                synchronization=tf.VariableSynchronization.NONE,
            )
            keras.backend.track_variable(self.mask)
            self._built = True 
Example #10
Source File: summary.py    From CapsLayer with Apache License 2.0 6 votes vote down vote up
def tensor_stats(name, tensor, verbose=True, collections=None, family=None):
    """
    Args:
        tensor: A non-scalar tensor.
    """
    if verbose:
        with tf.name_scope(name):
            mean = tf.reduce_mean(tensor)
            tf.summary.scalar('mean', mean, collections=collections, family=family)

            with tf.name_scope('stddev'):
                stddev = tf.sqrt(tf.reduce_mean(tf.square(tensor - mean)))
            tf.summary.scalar('stddev', stddev, collections=collections, family=family)
            tf.summary.scalar('max', tf.reduce_max(tensor), collections=collections, family=family)
            tf.summary.scalar('min', tf.reduce_min(tensor), collections=collections, family=family)
            tf.summary.histogram('histogram', tensor, collections=collections, family=family)
    else:
        pass 
Example #11
Source File: discretization.py    From fine-lm with MIT License 6 votes vote down vote up
def top_k_softmax(x, k):
  """Calculate softmax(x), select top-k and rescale to sum to 1.

  Args:
    x: Input to softmax over.
    k: Number of top-k to select.

  Returns:
    softmax(x) and maximum item.
  """
  x = tf.nn.softmax(x)
  top_x, _ = tf.nn.top_k(x, k=k + 1)
  min_top = tf.reduce_min(top_x, axis=-1, keep_dims=True)
  x = tf.nn.relu((x - min_top) + 1e-12)
  x /= tf.reduce_sum(x, axis=-1, keep_dims=True)
  return x, tf.reduce_max(top_x, axis=-1) 
Example #12
Source File: util.py    From R-Net with MIT License 6 votes vote down vote up
def get_batch_dataset(record_file, parser, config):
    num_threads = tf.constant(config.num_threads, dtype=tf.int32)
    dataset = tf.data.TFRecordDataset(record_file).map(
        parser, num_parallel_calls=num_threads).shuffle(config.capacity).repeat()
    if config.is_bucket:
        buckets = [tf.constant(num) for num in range(*config.bucket_range)]

        def key_func(context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id):
            c_len = tf.reduce_sum(
                tf.cast(tf.cast(context_idxs, tf.bool), tf.int32))
            buckets_min = [np.iinfo(np.int32).min] + buckets
            buckets_max = buckets + [np.iinfo(np.int32).max]
            conditions_c = tf.logical_and(
                tf.less(buckets_min, c_len), tf.less_equal(c_len, buckets_max))
            bucket_id = tf.reduce_min(tf.where(conditions_c))
            return bucket_id

        def reduce_func(key, elements):
            return elements.batch(config.batch_size)

        dataset = dataset.apply(tf.contrib.data.group_by_window(
            key_func, reduce_func, window_size=5 * config.batch_size)).shuffle(len(buckets) * 25)
    else:
        dataset = dataset.batch(config.batch_size)
    return dataset 
Example #13
Source File: normalize.py    From rlgraph with Apache License 2.0 6 votes vote down vote up
def _graph_fn_call(self, inputs):
        min_value = inputs
        max_value = inputs

        if get_backend() == "tf":
            # Iteratively reduce dimensionality across all axes to get the min/max values for each sample in the batch.
            for axis in self.axes:
                min_value = tf.reduce_min(input_tensor=min_value, axis=axis, keep_dims=True)
                max_value = tf.reduce_max(input_tensor=max_value, axis=axis, keep_dims=True)
        elif get_backend() == "pytorch":
            for axis in self.axes:
                min_value = torch.min(min_value, axis)
                max_value = torch.max(max_value, axis)

        # Add some small constant to never let the range be zero.
        return (inputs - min_value) / (max_value - min_value + SMALL_NUMBER) 
Example #14
Source File: sac_loss_function.py    From rlgraph with Apache License 2.0 6 votes vote down vote up
def _graph_fn_critic_loss(self, log_probs_next_sampled, q_values_next_sampled, q_values, rewards, terminals, alpha):
        # In case log_probs come in as shape=(), expand last rank to 1.
        if log_probs_next_sampled.shape.as_list()[-1] is None:
            log_probs_next_sampled = tf.expand_dims(log_probs_next_sampled, axis=-1)

        log_probs_next_sampled = tf.reduce_sum(log_probs_next_sampled, axis=1, keepdims=True)
        rewards = tf.expand_dims(rewards, axis=-1)
        terminals = tf.expand_dims(terminals, axis=-1)

        q_min_next = tf.reduce_min(tf.concat(q_values_next_sampled, axis=1), axis=1, keepdims=True)
        assert q_min_next.shape.as_list() == [None, 1]
        soft_state_value = q_min_next - alpha * log_probs_next_sampled
        q_target = rewards + self.discount * (1.0 - tf.cast(terminals, tf.float32)) * soft_state_value
        total_loss = 0.0
        if self.num_q_functions < 2:
            q_values = [q_values]
        for i, q_value in enumerate(q_values):
            loss = 0.5 * (q_value - tf.stop_gradient(q_target)) ** 2
            loss = tf.identity(loss, "critic_loss_per_item_{}".format(i + 1))
            total_loss += loss
        return tf.squeeze(total_loss, axis=1) 
Example #15
Source File: box_list_ops.py    From ros_people_object_detection_tensorflow with Apache License 2.0 5 votes vote down vote up
def get_minimal_coverage_box(boxlist,
                             default_box=None,
                             scope=None):
  """Creates a single bounding box which covers all boxes in the boxlist.

  Args:
    boxlist: A Boxlist.
    default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`,
      this default box will be returned. If None, will use a default box of
      [[0., 0., 1., 1.]].
    scope: Name scope.

  Returns:
    A [1, 4] float32 tensor with a bounding box that tightly covers all the
    boxes in the box list. If the boxlist does not contain any boxes, the
    default box is returned.
  """
  with tf.name_scope(scope, 'CreateCoverageBox'):
    num_boxes = boxlist.num_boxes()

    def coverage_box(bboxes):
      y_min, x_min, y_max, x_max = tf.split(
          value=bboxes, num_or_size_splits=4, axis=1)
      y_min_coverage = tf.reduce_min(y_min, axis=0)
      x_min_coverage = tf.reduce_min(x_min, axis=0)
      y_max_coverage = tf.reduce_max(y_max, axis=0)
      x_max_coverage = tf.reduce_max(x_max, axis=0)
      return tf.stack(
          [y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage],
          axis=1)

    default_box = default_box or tf.constant([[0., 0., 1., 1.]])
    return tf.cond(
        tf.greater_equal(num_boxes, 1),
        true_fn=lambda: coverage_box(boxlist.get()),
        false_fn=lambda: default_box) 
Example #16
Source File: kfac_utils.py    From rl_graph_generation with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def detectMinVal(input_mat, var, threshold=1e-6, name='', debug=False):
    eigen_min = tf.reduce_min(input_mat)
    eigen_max = tf.reduce_max(input_mat)
    eigen_ratio = eigen_max / eigen_min
    input_mat_clipped = clipoutNeg(input_mat, threshold)

    if debug:
        input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print(
            input_mat_clipped, [tf.convert_to_tensor('screwed ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio]))

    return input_mat_clipped 
Example #17
Source File: layers.py    From MobileNet with Apache License 2.0 5 votes vote down vote up
def __variable_summaries(var):
    """
    Attach a lot of summaries to a Tensor (for TensorBoard visualization).
    :param var: variable to be summarized
    :return: None
    """
    with tf.name_scope('summaries'):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('stddev', stddev)
        tf.summary.scalar('max', tf.reduce_max(var))
        tf.summary.scalar('min', tf.reduce_min(var))
        tf.summary.histogram('histogram', var) 
Example #18
Source File: clip.py    From onnx-tensorflow with Apache License 2.0 5 votes vote down vote up
def _common(cls, node, **kwargs):
    tensor_dict = kwargs["tensor_dict"]
    x = tensor_dict[node.inputs[0]]
    x_dtype = x.dtype

    if cls.SINCE_VERSION < 11:
      # min/max were required and passed as attributes
      clip_value_min = node.attrs.get("min", tf.reduce_min(x))
      clip_value_max = node.attrs.get("max", tf.reduce_max(x))
    else:
      # min/max are optional and passed as inputs
      clip_value_min = tensor_dict[node.inputs[1]] if len(
          node.inputs) > 1 and node.inputs[1] != "" else x_dtype.min
      clip_value_max = tensor_dict[node.inputs[2]] if len(
          node.inputs) > 2 and node.inputs[2] != "" else x_dtype.max

    # tf.clip_by_value doesn't support uint8, uint16, uint32, int8 and int16
    # dtype for x, therefore need to upcast it to tf.int32 or tf.int64
    if x_dtype in [tf.uint8, tf.uint16, tf.uint32, tf.int8, tf.int16]:
      cast_to = tf.int64 if x_dtype == tf.uint32 else tf.int32
      x = tf.cast(x, cast_to)
      clip_value_min = tf.cast(clip_value_min, cast_to)
      clip_value_max = tf.cast(clip_value_max, cast_to)
      y = tf.clip_by_value(x, clip_value_min, clip_value_max)
      y = tf.cast(y, x_dtype)
    else:
      y = tf.clip_by_value(x, clip_value_min, clip_value_max)

    return [y] 
Example #19
Source File: dqn.py    From RLSeq2Seq with MIT License 5 votes vote down vote up
def variable_summaries(self, var_name, var):
        """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
        with tf.name_scope('summaries_{}'.format(var_name)):
            mean = tf.reduce_mean(var)
            tf.summary.scalar('mean', mean)
            with tf.name_scope('stddev'):
                stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
            tf.summary.scalar('stddev', stddev)
            tf.summary.scalar('max', tf.reduce_max(var))
            tf.summary.scalar('min', tf.reduce_min(var))
            tf.summary.histogram('histogram', var) 
Example #20
Source File: model.py    From RLSeq2Seq with MIT License 5 votes vote down vote up
def variable_summaries(self, var_name, var):
    """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
    with tf.name_scope('summaries_{}'.format(var_name)):
      mean = tf.reduce_mean(var)
      tf.summary.scalar('mean', mean)
      with tf.name_scope('stddev'):
        stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
      tf.summary.scalar('stddev', stddev)
      tf.summary.scalar('max', tf.reduce_max(var))
      tf.summary.scalar('min', tf.reduce_min(var))
      tf.summary.histogram('histogram', var) 
Example #21
Source File: tensor.py    From dgl with Apache License 2.0 5 votes vote down vote up
def reduce_min(input):
    return tf.reduce_min(input) 
Example #22
Source File: gather_encoder_test.py    From model-optimization with Apache License 2.0 5 votes vote down vote up
def _aggregate_one(values, mode):
  if mode == encoding_stage.StateAggregationMode.SUM:
    return tf.reduce_sum(tf.stack(values), axis=0)
  elif mode == encoding_stage.StateAggregationMode.MIN:
    return tf.reduce_min(tf.stack(values), axis=0)
  elif mode == encoding_stage.StateAggregationMode.MAX:
    return tf.reduce_max(tf.stack(values), axis=0)
  elif mode == encoding_stage.StateAggregationMode.STACK:
    return tf.stack(values) 
Example #23
Source File: test_utils.py    From model-optimization with Apache License 2.0 5 votes vote down vote up
def update_state(self, state, state_update_tensors):
    """See base class."""
    del state  # Unused.
    return {
        self.LAST_SUM_STATE_KEY:
            tf.reduce_sum(state_update_tensors[self.SUM_STATE_UPDATE_KEY]),
        self.LAST_MIN_STATE_KEY:
            tf.reduce_min(state_update_tensors[self.MIN_STATE_UPDATE_KEY]),
        self.LAST_MAX_STATE_KEY:
            tf.reduce_max(state_update_tensors[self.MAX_STATE_UPDATE_KEY]),
        self.LAST_COUNT_STATE_KEY:
            tf.reduce_prod(
                tf.shape(state_update_tensors[self.STACK_STATE_UPDATE_KEY]))
    } 
Example #24
Source File: stages_impl.py    From model-optimization with Apache License 2.0 5 votes vote down vote up
def encode(self, x, encode_params):
    """See base class."""
    if self.MIN_MAX_VALUES_KEY in encode_params:
      min_max = tf.cast(encode_params[self.MIN_MAX_VALUES_KEY], x.dtype)
      min_x, max_x = min_max[0], min_max[1]
      x = tf.clip_by_value(x, min_x, max_x)
    else:
      min_x = tf.reduce_min(x)
      max_x = tf.reduce_max(x)

    max_value = tf.cast(encode_params[self.MAX_INT_VALUE_PARAMS_KEY], x.dtype)
    # Shift the values to range [0, max_value].
    # In the case of min_x == max_x, this will return all zeros.
    x = tf.compat.v1.div_no_nan(x - min_x, max_x - min_x) * max_value
    if self._stochastic:  # Randomized rounding.
      floored_x = tf.floor(x)
      bernoulli = tf.random.uniform(tf.shape(x), dtype=x.dtype)
      bernoulli = bernoulli < (x - floored_x)
      quantized_x = floored_x + tf.cast(bernoulli, x.dtype)
    else:  # Deterministic rounding.
      quantized_x = tf.round(x)

    encoded_tensors = {self.ENCODED_VALUES_KEY: quantized_x}
    if self.MIN_MAX_VALUES_KEY not in encode_params:
      encoded_tensors[self.MIN_MAX_VALUES_KEY] = tf.stack([min_x, max_x])
    return encoded_tensors 
Example #25
Source File: quantization.py    From model-optimization with Apache License 2.0 5 votes vote down vote up
def encode(self, x, encode_params):
    """See base class."""
    dim = tf.shape(x)[-1]
    x = tf.reshape(x, [-1, dim])

    # Per-channel min and max.
    min_x = tf.reduce_min(x, axis=0)
    max_x = tf.reduce_max(x, axis=0)

    max_value = tf.cast(encode_params[self.MAX_INT_VALUE_PARAMS_KEY], x.dtype)
    # Shift the values to range [0, max_value].
    # In the case of min_x == max_x, this will return all zeros.
    x = tf.compat.v1.div_no_nan(x - min_x, max_x - min_x) * max_value
    if self._stochastic:  # Randomized rounding.
      floored_x = tf.floor(x)
      bernoulli = tf.random.uniform(tf.shape(x), dtype=x.dtype)
      bernoulli = bernoulli < (x - floored_x)
      quantized_x = floored_x + tf.cast(bernoulli, x.dtype)
    else:  # Deterministic rounding.
      quantized_x = tf.round(x)

    encoded_tensors = {
        self.ENCODED_VALUES_KEY: quantized_x,
        self.MIN_MAX_VALUES_KEY: tf.stack([min_x, max_x])
    }

    return encoded_tensors 
Example #26
Source File: quantization.py    From model-optimization with Apache License 2.0 5 votes vote down vote up
def encode(self, x, encode_params):
    """See base class."""
    min_x = tf.reduce_min(x)
    max_x = tf.reduce_max(x)

    max_value = tf.cast(encode_params[self.MAX_INT_VALUE_PARAMS_KEY], x.dtype)
    # Shift the values to range [0, max_value].
    # In the case of min_x == max_x, this will return all zeros.
    x = tf.compat.v1.div_no_nan(x - min_x, max_x - min_x) * max_value

    # Randomized rounding.
    floored_x = tf.floor(x)
    random_seed = tf.random.uniform((2,), maxval=tf.int64.max, dtype=tf.int64)
    num_elements = tf.reduce_prod(tf.shape(x))
    rounding_floats = tf.reshape(
        self._random_floats(num_elements, random_seed, x.dtype), tf.shape(x))

    bernoulli = rounding_floats < (x - floored_x)
    quantized_x = floored_x + tf.cast(bernoulli, x.dtype)

    # Include the random seed in the encoded tensors so that it can be used to
    # generate the same random sequence in the decode method.
    encoded_tensors = {
        self.ENCODED_VALUES_KEY: quantized_x,
        self.SEED_PARAMS_KEY: random_seed,
        self.MIN_MAX_VALUES_KEY: tf.stack([min_x, max_x])
    }

    return encoded_tensors 
Example #27
Source File: clustering_centroids.py    From model-optimization with Apache License 2.0 5 votes vote down vote up
def get_cluster_centroids(self):
    weight_min = tf.reduce_min(self.weights)
    weight_max = tf.reduce_max(self.weights)
    cluster_centroids = tf.random.uniform(shape=(self.number_of_clusters,),
                                          minval=weight_min,
                                          maxval=weight_max,
                                          dtype=self.weights.dtype)
    return cluster_centroids 
Example #28
Source File: clustering_centroids.py    From model-optimization with Apache License 2.0 5 votes vote down vote up
def get_cluster_centroids(self):
    weight_min = tf.reduce_min(self.weights)
    weight_max = tf.reduce_max(self.weights)
    cluster_centroids = tf.linspace(weight_min,
                                    weight_max,
                                    self.number_of_clusters)
    return cluster_centroids 
Example #29
Source File: common.py    From HyperGAN with MIT License 5 votes vote down vote up
def batch_accuracy(a, b):
    "Difference from a to b.  Meant for reconstruction measurements."
    difference = tf.abs(a-b)
    difference = tf.reduce_min(difference, axis=1)
    difference = tf.reduce_sum(difference, axis=1)
    return tf.reduce_sum( tf.reduce_sum(difference, axis=0) , axis=0) 
Example #30
Source File: model.py    From rgn with MIT License 5 votes vote down vote up
def _accumulate_loss(config, numerator, denominator, name_prefix=''):
    """ Constructs ops to accumulate and reduce loss and maintain a memory of lowest loss achieved """

    if config['num_evaluation_invocations'] == 1:
        # return simple loss
        accumulated_loss = tf.divide(numerator, denominator, name=name_prefix)
        update_op = reduce_op = tf.no_op()
    else:
        # create accumulator variables. note that tf.Variable uses name_scope (not variable_scope) for naming, which is what's desired in this instance
        numerator_accumulator   = tf.Variable(initial_value=0., trainable=False, name=name_prefix + '_numerator_accumulator')
        denominator_accumulator = tf.Variable(initial_value=0., trainable=False, name=name_prefix + '_denominator_accumulator')

        # accumulate
        with tf.control_dependencies([numerator, denominator, numerator_accumulator, denominator_accumulator]):
            accumulate_numerator   = tf.assign_add(numerator_accumulator, numerator)
            accumulate_denominator = tf.assign_add(denominator_accumulator, denominator)
            update_op = tf.group(accumulate_numerator, accumulate_denominator, name=name_prefix + '_accumulate_op')

        # divide to get final quotient
        with tf.control_dependencies([update_op]):
            accumulated_loss = tf.divide(numerator_accumulator, denominator_accumulator, name=name_prefix + '_accumulated')

        # zero accumulators
        with tf.control_dependencies([accumulated_loss]):
            zero_numerator   = tf.assign(numerator_accumulator,   0.)
            zero_denominator = tf.assign(denominator_accumulator, 0.)
            reduce_op = tf.group(zero_numerator, zero_denominator, name=name_prefix + '_reduce_op')

    min_loss_achieved = tf.Variable(initial_value=float('inf'), trainable=False, name='min_' + name_prefix + '_achieved')
    min_loss_op = tf.assign(min_loss_achieved, tf.reduce_min([min_loss_achieved, accumulated_loss]), name='min_' + name_prefix + '_achieved_op')
    with tf.control_dependencies([min_loss_op]):
        min_loss_achieved = tf.identity(min_loss_achieved)

    return accumulated_loss, min_loss_achieved, min_loss_op, update_op, reduce_op