Python tensorflow.broadcast_to() Examples

The following are 30 code examples of tensorflow.broadcast_to(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: utils.py    From tf2-yolo3 with Apache License 2.0 7 votes vote down vote up
def broadcast_iou(box_1, box_2):
    # box_1: (..., (x1, y1, x2, y2))
    # box_2: (N, (x1, y1, x2, y2))

    # broadcast boxes
    box_1 = tf.expand_dims(box_1, -2)
    box_2 = tf.expand_dims(box_2, 0)
    # new_shape: (..., N, (x1, y1, x2, y2))
    new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2))
    box_1 = tf.broadcast_to(box_1, new_shape)
    box_2 = tf.broadcast_to(box_2, new_shape)

    int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) - tf.maximum(box_1[..., 0], box_2[..., 0]), 0)
    int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) - tf.maximum(box_1[..., 1], box_2[..., 1]), 0)
    int_area = int_w * int_h
    box_1_area = (box_1[..., 2] - box_1[..., 0]) * (box_1[..., 3] - box_1[..., 1])
    box_2_area = (box_2[..., 2] - box_2[..., 0]) * (box_2[..., 3] - box_2[..., 1])
    return int_area / (box_1_area + box_2_area - int_area) 
Example #2
Source File: utils.py    From DirectML with MIT License 6 votes vote down vote up
def broadcast_iou(box_1, box_2):
    # box_1: (..., (x1, y1, x2, y2))
    # box_2: (N, (x1, y1, x2, y2))

    # broadcast boxes
    box_1 = tf.expand_dims(box_1, -2)
    box_2 = tf.expand_dims(box_2, 0)
    # new_shape: (..., N, (x1, y1, x2, y2))
    new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2))
    box_1 = tf.broadcast_to(box_1, new_shape)
    box_2 = tf.broadcast_to(box_2, new_shape)

    int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) -
                       tf.maximum(box_1[..., 0], box_2[..., 0]), 0)
    int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) -
                       tf.maximum(box_1[..., 1], box_2[..., 1]), 0)
    int_area = int_w * int_h
    box_1_area = (box_1[..., 2] - box_1[..., 0]) * \
        (box_1[..., 3] - box_1[..., 1])
    box_2_area = (box_2[..., 2] - box_2[..., 0]) * \
        (box_2[..., 3] - box_2[..., 1])
    return int_area / (box_1_area + box_2_area - int_area) 
Example #3
Source File: keras_layer_test.py    From hub with Apache License 2.0 6 votes vote down vote up
def _save_model_with_obscurely_shaped_list_output(export_dir):
  """Writes SavedModel with hard-to-predict output shapes."""
  def broadcast_obscurely_to(input, shape):
    """Like tf.broadcast_to(), but hostile to static shape propagation."""
    obscured_shape = tf.cast(tf.cast(shape, tf.float32)
                             # Add small random noise that gets rounded away.
                             + 0.1*tf.sin(tf.random.uniform((), -3, +3)) + 0.3,
                             tf.int32)
    return tf.broadcast_to(input, obscured_shape)

  @tf.function(
      input_signature=[tf.TensorSpec(shape=(None, 1), dtype=tf.float32)])
  def call_fn(x):
    # For each batch element x, the three outputs are
    #   value x with shape (1)
    #   value 2*x broadcast to shape (2,2)
    #   value 3*x broadcast to shape (3,3,3)
    batch_size = tf.shape(x)[0]
    return [broadcast_obscurely_to(tf.reshape(i*x, [batch_size] + [1]*i),
                                   tf.concat([[batch_size], [i]*i], axis=0))
            for i in range(1, 4)]

  obj = tf.train.Checkpoint()
  obj.__call__ = call_fn
  tf.saved_model.save(obj, export_dir) 
Example #4
Source File: distributions.py    From probflow with MIT License 6 votes vote down vote up
def __call__(self):
        """Get the distribution object from the backend"""
        if get_backend() == 'pytorch':
            import torch.distributions as tod
            raise NotImplementedError
        else:
            import tensorflow as tf
            from tensorflow_probability import distributions as tfd

            # Convert to tensorflow distributions if probflow distributions
            if isinstance(self.distributions, BaseDistribution):
                self.distributions = self.distributions()

            # Broadcast probs/logits
            shape = self.distributions.batch_shape
            args = {'logits': None, 'probs': None}
            if self.logits is not None:
                args['logits'] = tf.broadcast_to(self['logits'], shape)
            else:
                args['probs'] = tf.broadcast_to(self['probs'], shape)

            # Return TFP distribution object
            return tfd.MixtureSameFamily(
                    tfd.Categorical(**args),
                    self.distributions) 
Example #5
Source File: agents_test.py    From seed_rl with Apache License 2.0 6 votes vote down vote up
def test_agent_is_checkpointable(self):
    agent = networks.ImpalaDeep(9)
    output0 = _run_actor(agent)

    checkpoint_dir = '/tmp/training_checkpoints'
    checkpoint_prefix = os.path.join(checkpoint_dir, 'model.ckpt')
    ckpt = tf.train.Checkpoint(agent=agent)

    ckpt.save(file_prefix=checkpoint_prefix)

    for v in agent.trainable_variables:
      v.assign_add(tf.broadcast_to(1., v.shape))

    output1 = _run_actor(agent)

    ckpt_path = tf.train.latest_checkpoint(checkpoint_dir)
    ckpt.restore(ckpt_path).assert_consumed()

    output2 = _run_actor(agent)

    self.assertEqual(len(agent.trainable_variables), 39)
    self.assertAllEqual(output0[0].policy_logits, output2[0].policy_logits)
    self.assertNotAllEqual(output0[0].policy_logits, output1[0].policy_logits) 
Example #6
Source File: tf_utils.py    From transform with Apache License 2.0 6 votes vote down vote up
def _broadcast_to_x_shape(x, y):
  """Broadcasts y to same shape as x as needed.

  Args:
    x: An input feature.
    y: A feature that is either the same shape as x or has the same outer
      dimensions as x. If the latter, y is broadcast to the same shape as x.

  Returns:
    A Tensor that contains the broadcasted feature, y.
  """
  # The batch dimension of x and y must be the same, and y must be 1D.
  x_shape = tf.shape(input=x)
  y_shape = tf.shape(input=y)
  assert_eq = tf.compat.v1.assert_equal(x_shape[0], y_shape[0])
  with tf.control_dependencies([assert_eq]):
    y = tf.identity(y)
  rank_delta = tf.rank(x) - tf.rank(y)
  target_shape = tf.concat(
      [tf.shape(y), tf.ones(rank_delta, dtype=tf.int32)], axis=0)
  matched_rank = tf.reshape(y, target_shape)
  return tf.broadcast_to(matched_rank, x_shape) 
Example #7
Source File: as_conformal_as_possible_test.py    From graphics with Apache License 2.0 6 votes vote down vote up
def test_energy_identity(self):
    """Checks that energy evaluated between the rest pose and itself is zero."""
    number_vertices = np.random.randint(3, 10)
    batch_size = np.random.randint(3)
    batch_shape = np.random.randint(1, 10, size=(batch_size)).tolist()
    vertices_rest_pose = np.random.uniform(size=(number_vertices, 3))
    vertices_deformed_pose = tf.broadcast_to(
        vertices_rest_pose, shape=batch_shape + [number_vertices, 3])
    quaternions = quaternion.from_euler(
        np.zeros(shape=batch_shape + [number_vertices, 3]))
    num_edges = int(round(number_vertices / 2))
    edges = np.zeros(shape=(num_edges, 2), dtype=np.int32)
    edges[..., 0] = np.linspace(
        0, number_vertices / 2 - 1, num_edges, dtype=np.int32)
    edges[..., 1] = np.linspace(
        number_vertices / 2, number_vertices - 1, num_edges, dtype=np.int32)

    energy = as_conformal_as_possible.energy(
        vertices_rest_pose=vertices_rest_pose,
        vertices_deformed_pose=vertices_deformed_pose,
        quaternions=quaternions,
        edges=edges,
        conformal_energy=False)

    self.assertAllClose(energy, tf.zeros_like(energy)) 
Example #8
Source File: top_utils.py    From bert-multitask-learning with MIT License 5 votes vote down vote up
def create_seq_smooth_label(params, labels, num_classes):
    # since crf dose not take the smoothed label, consider the
    # 'hard' smoothing. That is, sample a tag based on smooth factor
    if params.label_smoothing > 0:

        true_labels = tf.stack(
            [labels]*int(num_classes/params.label_smoothing), axis=-1)
        single_label_set = tf.stack([tf.range(
            num_classes)]*params.max_seq_len, axis=0)
        batch_size_this_turn = tf.shape(true_labels)[0]
        label_set = tf.broadcast_to(
            input=single_label_set, shape=[
                batch_size_this_turn,
                single_label_set.shape.as_list()[
                    0],
                single_label_set.shape.as_list()[1]])
        sample_set = tf.concat([true_labels, label_set], axis=-1)

        dims = tf.shape(sample_set)
        sample_set = tf.reshape(sample_set, shape=[-1, dims[-1]])

        samples_index = tf.random_uniform(
            shape=[tf.shape(sample_set)[0], 1], minval=0, maxval=tf.shape(sample_set)[1], dtype=tf.int32)
        flat_offsets = tf.reshape(
            tf.range(0, tf.shape(sample_set)[0], dtype=tf.int32) * tf.shape(sample_set)[1], [-1, 1])
        flat_index = tf.reshape(samples_index+flat_offsets, [-1])
        sampled_label = tf.gather(
            tf.reshape(sample_set, [-1]), flat_index)
        sampled_label = tf.reshape(sampled_label, dims[:-1])
    else:
        sampled_label = labels
    return sampled_label 
Example #9
Source File: imagenet_preprocessing.py    From models with Apache License 2.0 5 votes vote down vote up
def _mean_image_subtraction(image, means, num_channels):
  """Subtracts the given means from each image channel.

  For example:
    means = [123.68, 116.779, 103.939]
    image = _mean_image_subtraction(image, means)

  Note that the rank of `image` must be known.

  Args:
    image: a tensor of size [height, width, C].
    means: a C-vector of values to subtract from each channel.
    num_channels: number of color channels in the image that will be distorted.

  Returns:
    the centered image.

  Raises:
    ValueError: If the rank of `image` is unknown, if `image` has a rank other
      than three or if the number of channels in `image` doesn't match the
      number of values in `means`.
  """
  if image.get_shape().ndims != 3:
    raise ValueError('Input must be of size [height, width, C>0]')

  if len(means) != num_channels:
    raise ValueError('len(means) must match the number of channels')

  # We have a 1-D tensor of means; convert to 3-D.
  # Note(b/130245863): we explicitly call `broadcast` instead of simply
  # expanding dimensions for better performance.
  means = tf.broadcast_to(means, tf.shape(image))

  return image - means 
Example #10
Source File: tensorflow_computation_factory.py    From federated with Apache License 2.0 5 votes vote down vote up
def create_broadcast_scalar_to_shape(scalar_type: tf.DType,
                                     shape: tf.TensorShape) -> pb.Computation:
  """Returns a tensorflow computation returning the result of `tf.broadcast_to`.

  The returned computation has the type signature `(T -> U)`, where
  `T` is `scalar_type` and the `U` is a `tff.TensorType` with a dtype of
  `scalar_type` and a `shape`.

  Args:
    scalar_type: A `tf.DType`, the type of the scalar to broadcast.
    shape: A `tf.TensorShape` to broadcast to. Must be fully defined.

  Raises:
    TypeError: If `scalar_type` is not a `tf.DType` or if `shape` is not a
      `tf.TensorShape`.
    ValueError: If `shape` is not fully defined.
  """
  py_typecheck.check_type(scalar_type, tf.DType)
  py_typecheck.check_type(shape, tf.TensorShape)
  shape.assert_is_fully_defined()
  parameter_type = computation_types.TensorType(scalar_type, shape=())

  with tf.Graph().as_default() as graph:
    parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
        'x', parameter_type, graph)
    result = tf.broadcast_to(parameter_value, shape)
    result_type, result_binding = tensorflow_utils.capture_result_from_graph(
        result, graph)

  type_signature = computation_types.FunctionType(parameter_type, result_type)
  tensorflow = pb.TensorFlow(
      graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
      parameter=parameter_binding,
      result=result_binding)
  return pb.Computation(
      type=type_serialization.serialize_type(type_signature),
      tensorflow=tensorflow) 
Example #11
Source File: inputs.py    From GPT2 with MIT License 5 votes vote down vote up
def gpt2_pred_input(params, text=None):
    from models.gpt2 import encoder
    enc = encoder.get_encoder(params["encoder_path"])
    tokens = enc.encode(text)
    if len(tokens) > 1024:
        tokens = tokens[:1024]
    t = tf.broadcast_to(tokens, [params["batch_size"], len(tokens)])
    dataset = tf.data.Dataset.from_tensors(t)
    return dataset 
Example #12
Source File: xlnet_modeling.py    From models with Apache License 2.0 5 votes vote down vote up
def call(self, q_head, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat,
           r_w_bias, r_r_bias, r_s_bias, attn_mask):
    """Implements call() for the layer."""

    # content based attention score
    ac = tf.einsum('ibnd,jbnd->ijbn', q_head + r_w_bias, k_head_h)

    # position based attention score
    bd = tf.einsum('ibnd,jbnd->ijbn', q_head + r_r_bias, k_head_r)
    bd = rel_shift(bd, klen=tf.shape(ac)[1])

    # segment-based attention score
    if seg_mat is None:
      ef = 0
    else:
      ef = tf.einsum('ibnd,snd->isbn', q_head + r_s_bias, seg_embed)
      tgt_shape = tf.shape(bd)
      ef = tf.where(
          tf.broadcast_to(tf.expand_dims(seg_mat, 3), tgt_shape),
          tf.broadcast_to(ef[:, 1:, :, :], tgt_shape),
          tf.broadcast_to(ef[:, :1, :, :], tgt_shape))

    # merges attention scores and performs masking
    attn_score = (ac + bd + ef) * self.scale
    if attn_mask is not None:
      attn_score = attn_score - 1e30 * attn_mask

    # attention probability
    attn_prob = tf.nn.softmax(attn_score, 1)
    attn_prob = self.attention_probs_dropout(attn_prob)

    # attention output
    attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)

    return attn_vec 
Example #13
Source File: position_embedding.py    From models with Apache License 2.0 5 votes vote down vote up
def call(self, inputs):
    """Implements call() for the layer."""
    input_shape = tf_utils.get_shape_list(inputs, expected_rank=3)
    if self._use_dynamic_slicing:
      position_embeddings = self._position_embeddings[:input_shape[1], :]
    else:
      position_embeddings = self._position_embeddings

    return tf.broadcast_to(position_embeddings, input_shape) 
Example #14
Source File: regularizers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def __call__(self, x):
    """Computes regularization using an unbiased Monte Carlo estimate."""
    prior = ed.Independent(
        ed.HalfCauchy(
            loc=tf.broadcast_to(self.loc, x.distribution.event_shape),
            scale=tf.broadcast_to(self.scale, x.distribution.event_shape)
        ).distribution,
        reinterpreted_batch_ndims=len(x.distribution.event_shape))
    negative_entropy = x.distribution.log_prob(x)
    cross_entropy = -prior.distribution.log_prob(x)
    return negative_entropy + cross_entropy 
Example #15
Source File: data.py    From zoo with Apache License 2.0 5 votes vote down vote up
def _normalize(image, mean_rgb=MEAN_RGB, stddev_rgb=STDDEV_RGB):
    """Normalizes images to variance 1 and mean 0 over the whole dataset"""

    image -= tf.broadcast_to(mean_rgb, tf.shape(image))
    image /= tf.broadcast_to(stddev_rgb, tf.shape(image))

    return image 
Example #16
Source File: pairwise_distance.py    From neural-structured-learning with Apache License 2.0 5 votes vote down vote up
def _replicate_sources(self, sources, targets):
    """Replicates `sources` to match the shape of `targets`.

    `targets` should either have an additional neighborhood size dimension at
    axis -2 or be of the same rank as `sources`. If `targets` has an additional
    dimension and `sources` has rank k, the first k - 1 dimensions and last
    dimension of `sources` and `targets` should match. If `sources` and
    `targets` have the same rank, the last k - 1 dimensions should match and the
    first dimension of `targets` should be a multiple of the first dimension of
    `sources`. This multiple represents the fixed neighborhood size of each
    sample.

    Args:
      sources: Tensor with shape [..., feature_size] from which distance will be
        calculated.
      targets: Either a tensor with shape [..., neighborhood_size, feature_size]
        or [sources.shape[0] * neighborhood_size] + sources.shape[1:].

    Returns:
      `sources` replicated to be shape-compatible with `targets`.
    """
    # Depending on the rank of `sources` and `targets`, decide to broadcast
    # first, or replicate directly.
    if (sources.shape.ndims is not None and targets.shape.ndims is not None and
        sources.shape.ndims + 1 == targets.shape.ndims):
      return tf.broadcast_to(
          tf.expand_dims(sources, axis=-2), tf.shape(targets))

    return utils.replicate_embeddings(
        sources,
        tf.shape(targets)[0] // tf.shape(sources)[0]) 
Example #17
Source File: transformer_test.py    From OpenNMT-tf with MIT License 5 votes vote down vote up
def testMultiHeadAttentionMask(self):
    attention = transformer.MultiHeadAttention(4, 20, return_attention=True)
    queries = tf.random.uniform([4, 5, 10])
    memory = tf.random.uniform([4, 3, 10])
    mask = tf.sequence_mask([1, 3, 2, 2])
    _, _, attention = attention(queries, memory=memory, mask=mask)
    attention = tf.reshape(attention, [4, -1, 3])
    mask = tf.broadcast_to(tf.expand_dims(mask, 1), attention.shape)
    padding = tf.boolean_mask(attention, tf.logical_not(mask))
    self.assertAllEqual(tf.reduce_sum(padding), 0) 
Example #18
Source File: utils.py    From seed_rl with Apache License 2.0 5 votes vote down vote up
def tpu_decode(ts, structure=None):
  """Decodes a nest of Tensors encoded with tpu_encode.

  Args:
    ts: A nest of Tensors or TPUEncodedUInt8 composite tensors.
    structure: If not None, a nest of Tensors or TPUEncodedUInt8 composite
      tensors (possibly within PerReplica's) that are only used to recreate the
      structure of `ts` which then should be a list without composite tensors.

  Returns:
    A nest of decoded tensors packed as `structure` if available, otherwise
    packed as `ts`.
  """
  def visit(t, s):  
    s = s.values[0] if isinstance(s, values_lib.PerReplica) else s
    if isinstance(s, TPUEncodedUInt8):
      x = t.encoded if isinstance(t, TPUEncodedUInt8) else t
      x = tf.reshape(x, [-1, 32, 1])
      x = tf.broadcast_to(x, x.shape[:-1] + [4])
      x = tf.reshape(x, [-1, 128])
      x = tf.bitwise.bitwise_and(x, [0xFF, 0xFF00, 0xFF0000, 0xFF000000] * 32)
      x = tf.bitwise.right_shift(x, [0, 8, 16, 24] * 32)
      rank = s.original_shape.rank
      perm = [rank - 1] + list(range(rank - 1))
      inverted_shape = np.array(s.original_shape)[np.argsort(perm)]
      x = tf.reshape(x, inverted_shape)
      x = tf.transpose(x, perm)
      return x
    elif isinstance(s, TPUEncodedF32):
      x = t.encoded if isinstance(t, TPUEncodedF32) else t
      x = tf.reshape(x, s.original_shape)
      return x
    else:
      return t

  return tf.nest.map_structure(visit, ts, structure or ts) 
Example #19
Source File: networks.py    From auto_yolo with MIT License 5 votes vote down vote up
def _call(self, input_signal, input_locs, output_locs, is_training):
        if not self.is_built:
            self.value_func = self.build_mlp(scope="value_func")
            self.after_func = self.build_mlp(scope="after")

            if self.do_object_wise:
                self.object_wise_func = self.build_object_wise(scope="object_wise")

            self.is_built = True

        batch_size, n_inp, _ = tf_shape(input_signal)
        loc_dim = tf_shape(input_locs)[-1]
        n_outp = tf_shape(output_locs)[-2]
        input_locs = tf.broadcast_to(input_locs, (batch_size, n_inp, loc_dim))
        output_locs = tf.broadcast_to(output_locs, (batch_size, n_outp, loc_dim))

        dist = output_locs[:, :, None, :] - input_locs[:, None, :, :]
        proximity = tf.exp(-0.5 * tf.reduce_sum((dist / self.kernel_std)**2, axis=3))
        proximity = proximity / (2 * np.pi)**(0.5 * loc_dim) / self.kernel_std**loc_dim

        V = apply_object_wise(
            self.value_func, input_signal,
            output_size=self.n_hidden, is_training=is_training)  # (batch_size, n_inp, value_dim)

        result = tf.matmul(proximity, V)  # (batch_size, n_outp, value_dim)

        # `after_func` is applied to the concatenation of the head outputs, and the result is added to the original
        # signal. Next, if `object_wise_func` is not None and `do_object_wise` is True, object_wise_func is
        # applied object wise and in a ResNet-style manner.

        output = apply_object_wise(self.after_func, result, output_size=self.n_hidden, is_training=is_training)
        output = tf.layers.dropout(output, self.p_dropout, training=is_training)
        signal = tf.contrib.layers.layer_norm(output)

        if self.do_object_wise:
            output = apply_object_wise(self.object_wise_func, signal, output_size=self.n_hidden, is_training=is_training)
            output = tf.layers.dropout(output, self.p_dropout, training=is_training)
            signal = tf.contrib.layers.layer_norm(signal + output)

        return signal 
Example #20
Source File: imagenet.py    From rethinking-bnn-optimization with Apache License 2.0 5 votes vote down vote up
def _normalize(image, mean_rgb=MEAN_RGB, stddev_rgb=STDDEV_RGB):
    """Normalizes images to variance 1 and mean 0 over the whole dataset"""
    # TODO: Evaluate if it makes sense to use this as a first layer
    # and do the computation on the GPU instead

    image -= tf.broadcast_to(mean_rgb, tf.shape(image))
    image /= tf.broadcast_to(stddev_rgb, tf.shape(image))

    return image 
Example #21
Source File: tensorflow.py    From deepx with MIT License 5 votes vote down vote up
def dot(self, x, y):
        if len(x.get_shape()) != len(y.get_shape()):
            len_y = len(y.get_shape())
            new_y_shape = tf.concat([tf.shape(x)[:-len_y], tf.shape(y)], 0)
            y = tf.broadcast_to(y, new_y_shape)
        return tf.matmul(x, y) 
Example #22
Source File: imagenet_preprocessing.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 5 votes vote down vote up
def _mean_image_subtraction(image, means, num_channels):
  """Subtracts the given means from each image channel.

  For example:
    means = [123.68, 116.779, 103.939]
    image = _mean_image_subtraction(image, means)

  Note that the rank of `image` must be known.

  Args:
    image: a tensor of size [height, width, C].
    means: a C-vector of values to subtract from each channel.
    num_channels: number of color channels in the image that will be distorted.

  Returns:
    the centered image.

  Raises:
    ValueError: If the rank of `image` is unknown, if `image` has a rank other
      than three or if the number of channels in `image` doesn't match the
      number of values in `means`.
  """
  if image.get_shape().ndims != 3:
    raise ValueError('Input must be of size [height, width, C>0]')

  if len(means) != num_channels:
    raise ValueError('len(means) must match the number of channels')

  # We have a 1-D tensor of means; convert to 3-D.
  # Note(b/130245863): we explicitly call `broadcast` instead of simply
  # expanding dimensions for better performance.
  means = tf.broadcast_to(means, tf.shape(image))

  return image - means 
Example #23
Source File: models_iic.py    From IIC with MIT License 5 votes vote down vote up
def __iic_loss(self, pi_x, pi_gx):

        # up-sample non-perturbed to match the number of repeat samples
        pi_x = tf.tile(pi_x, [self.num_repeats] + [1] * len(pi_x.shape.as_list()[1:]))

        # get K
        k = pi_x.shape.as_list()[1]

        # compute P
        p = tf.transpose(pi_x) @ pi_gx

        # enforce symmetry
        p = (p + tf.transpose(p)) / 2

        # enforce minimum value
        p = tf.clip_by_value(p, clip_value_min=1e-6, clip_value_max=tf.float32.max)

        # normalize
        p /= tf.reduce_sum(p)

        # get marginals
        pi = tf.broadcast_to(tf.reshape(tf.reduce_sum(p, axis=0), (k, 1)), (k, k))
        pj = tf.broadcast_to(tf.reshape(tf.reduce_sum(p, axis=1), (1, k)), (k, k))

        # complete the loss
        loss = -tf.reduce_sum(p * (tf.math.log(p) - tf.math.log(pi) - tf.math.log(pj)))

        return loss 
Example #24
Source File: reformer_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def sort_key_val(t1, t2, dim=-1):
    values = tf.sort(t1, axis=dim)
    t2 = tf.broadcast_to(t2, t1.shape)
    return values, tf.gather(t2, tf.argsort(t1, axis=dim), axis=dim) 
Example #25
Source File: imagenet_preprocessing.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 5 votes vote down vote up
def _mean_image_subtraction(image, means, num_channels):
  """Subtracts the given means from each image channel.

  For example:
    means = [123.68, 116.779, 103.939]
    image = _mean_image_subtraction(image, means)

  Note that the rank of `image` must be known.

  Args:
    image: a tensor of size [height, width, C].
    means: a C-vector of values to subtract from each channel.
    num_channels: number of color channels in the image that will be distorted.

  Returns:
    the centered image.

  Raises:
    ValueError: If the rank of `image` is unknown, if `image` has a rank other
      than three or if the number of channels in `image` doesn't match the
      number of values in `means`.
  """
  if image.get_shape().ndims != 3:
    raise ValueError('Input must be of size [height, width, C>0]')

  if len(means) != num_channels:
    raise ValueError('len(means) must match the number of channels')

  # We have a 1-D tensor of means; convert to 3-D.
  # Note(b/130245863): we explicitly call `broadcast` instead of simply
  # expanding dimensions for better performance.
  means = tf.broadcast_to(means, tf.shape(image))

  return image - means 
Example #26
Source File: noise.py    From OpenNMT-tf with MIT License 5 votes vote down vote up
def _apply(self, words):
    if self.probability == 0:
      return tf.identity(words)
    shape = tf.shape(words)
    replace_mask = random_mask(shape[:1], self.probability)
    filler = tf.fill([shape[0], 1], self.filler)
    filler = tf.pad(filler, [[0, 0], [0, shape[-1] - 1]])
    return tf.where(
        tf.broadcast_to(tf.expand_dims(replace_mask, -1), tf.shape(words)),
        x=filler,
        y=words) 
Example #27
Source File: parameter.py    From garage with MIT License 5 votes vote down vote up
def parameter(input_var,
              length,
              initializer=tf.zeros_initializer(),
              dtype=tf.float32,
              trainable=True,
              name='parameter'):
    """Parameter layer.

    Used as layer that could be broadcast to a certain shape to
    match with input variable during training.

    For recurrent usage, use garage.tf.models.recurrent_parameter().

    Example: A trainable parameter variable with shape (2,), it needs to be
    broadcasted to (32, 2) when applied to a batch with size 32.

    Args:
        input_var (tf.Tensor): Input tf.Tensor.
        length (int): Integer dimension of the variable.
        initializer (callable): Initializer of the variable. The function
            should return a tf.Tensor.
        dtype: Data type of the variable (default is tf.float32).
        trainable (bool): Whether the variable is trainable.
        name (str): Variable scope of the variable.

    Return:
        A tensor of the broadcasted variables.
    """
    with tf.compat.v1.variable_scope(name):
        p = tf.compat.v1.get_variable('parameter',
                                      shape=(length, ),
                                      dtype=dtype,
                                      initializer=initializer,
                                      trainable=trainable)
        batch_dim = tf.shape(input_var)[0]
        broadcast_shape = tf.concat(axis=0, values=[[batch_dim], [length]])
        p_broadcast = tf.broadcast_to(p, shape=broadcast_shape)
        return p_broadcast 
Example #28
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def _test_broadcast_to_from_tensor(in_shape):
    """ One iteration of broadcast_to with unknown shape at graph build"""

    data = np.random.uniform(size=in_shape).astype('float32')

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(
            shape=[None], dtype=data.dtype)

        shape_data = tf.multiply(tf.shape(in_data), 32)
        tf.broadcast_to(in_data, shape_data)

        compare_tf_with_tvm(data, 'Placeholder:0', 'BroadcastTo:0') 
Example #29
Source File: preprocessing.py    From models with Apache License 2.0 5 votes vote down vote up
def eval_image(image, height, width, resize_method,
               central_fraction=0.875, scope=None):

  with tf.compat.v1.name_scope('eval_image'):
    if resize_method == 'crop':
      shape = tf.shape(input=image)
      image = tf.cond(pred=tf.less(shape[0], shape[1]),
                      true_fn=lambda: tf.image.resize(image,
                                                     tf.convert_to_tensor(value=[256, 256 * shape[1] / shape[0]],
                                                                          dtype=tf.int32)),
                      false_fn=lambda: tf.image.resize(image,
                                                     tf.convert_to_tensor(value=[256 * shape[0] / shape[1], 256],
                                                                          dtype=tf.int32)))

      shape = tf.shape(input=image)
      y0 = (shape[0] - height) // 2
      x0 = (shape[1] - width) // 2
      distorted_image = tf.image.crop_to_bounding_box(image, y0, x0, height, width)
      distorted_image.set_shape([height, width, 3])
      means = tf.broadcast_to([123.68, 116.78, 103.94], tf.shape(input=distorted_image))
      return distorted_image - means
    else:  # bilinear
      if image.dtype != tf.float32:
        image = tf.image.convert_image_dtype(image, dtype=tf.float32)
      # Crop the central region of the image with an area containing 87.5% of
      # the original image.
      if central_fraction:
        image = tf.image.central_crop(image, central_fraction=central_fraction)

      if height and width:
        # Resize the image to the specified height and width.
        image = tf.expand_dims(image, 0)
        image = tf.image.resize(image, [height, width],
                                         method=tf.image.ResizeMethod.BILINEAR)
        image = tf.squeeze(image, [0])
      image = tf.subtract(image, 0.5)
      image = tf.multiply(image, 2.0)
      return image 
Example #30
Source File: preprocessing_benchmark.py    From models with Apache License 2.0 5 votes vote down vote up
def eval_image(image, height, width, resize_method,
               central_fraction=0.875, scope=None):
  with tf.compat.v1.name_scope('eval_image'):
    if resize_method == 'crop':
      shape = tf.shape(input=image)
      image = tf.cond(pred=tf.less(shape[0], shape[1]),
                      true_fn=lambda: tf.image.resize(image,
                                                     tf.convert_to_tensor(value=[256, 256 * shape[1] / shape[0]],
                                                                          dtype=tf.int32)),
                      false_fn=lambda: tf.image.resize(image,
                                                     tf.convert_to_tensor(value=[256 * shape[0] / shape[1], 256],
                                                                          dtype=tf.int32)))
      shape = tf.shape(input=image)
      y0 = (shape[0] - height) // 2
      x0 = (shape[1] - width) // 2
      distorted_image = tf.image.crop_to_bounding_box(image, y0, x0, height, width)
      distorted_image.set_shape([height, width, 3])
      means = tf.broadcast_to([123.68, 116.78, 103.94], tf.shape(input=distorted_image))
      return distorted_image - means
    else:  # bilinear
      if image.dtype != tf.float32:
        image = tf.image.convert_image_dtype(image, dtype=tf.float32)
      # Crop the central region of the image with an area containing 87.5% of
      # the original image.
      if central_fraction:
        image = tf.image.central_crop(image, central_fraction=central_fraction)

      if height and width:
        # Resize the image to the specified height and width.
        image = tf.expand_dims(image, 0)
        image = tf.image.resize(image, [height, width],
                                         method=tf.image.ResizeMethod.BILINEAR)
        image = tf.squeeze(image, [0])
      image = tf.subtract(image, 0.5)
      image = tf.multiply(image, 2.0)
      return image