Python tensorflow.roll() Examples

The following are 6 code examples of tensorflow.roll(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: net.py    From gcdn with MIT License 6 votes vote down vote up
def compute_graph(self, h):
		
		id_mat = 2*tf.eye(self.N)

		h = tf.cast(h, tf.float64)

		sq_norms = tf.reduce_sum(h*h,2) # (B,N)
		D = tf.abs( tf.expand_dims(sq_norms, 2) + tf.expand_dims(sq_norms, 1) - 2*tf.matmul(h, h, transpose_b=True) ) # (B, N, N)
		D = tf.cast(D, tf.float32)	
		D = tf.multiply(D, self.local_mask)
		D = D - id_mat

		h = tf.cast(h, tf.float32) 

		return D


	# same as new tf.roll but only for 3D input and axis=2 
Example #2
Source File: reversible_layers.py    From BERT with Apache License 2.0 5 votes vote down vote up
def one_hot_minus(inputs, shift):
  """Performs (inputs - shift) % vocab_size in the one-hot space.

  Args:
    inputs: Tensor of shape `[..., vocab_size]`. Typically a soft/hard one-hot
      Tensor.
    shift: Tensor of shape `[..., vocab_size]`. Typically a soft/hard one-hot
      Tensor specifying how much to shift the corresponding one-hot vector in
      inputs. Soft values perform a "weighted shift": for example,
      shift=[0.2, 0.3, 0.5] performs a linear combination of 0.2 * shifting by
      zero; 0.3 * shifting by one; and 0.5 * shifting by two.

  Returns:
    Tensor of same shape and dtype as inputs.
  """
  # TODO(trandustin): Implement with circular conv1d.
  inputs = tf.convert_to_tensor(inputs)
  shift = tf.cast(shift, inputs.dtype)
  vocab_size = inputs.shape[-1].value
  # Form a [..., vocab_size, vocab_size] matrix. Each batch element of
  # inputs will vector-matrix multiply the vocab_size x vocab_size matrix. This
  # "shifts" the inputs batch element by the corresponding shift batch element.
  shift_matrix = tf.stack([tf.roll(shift, i, axis=-1)
                           for i in range(vocab_size)], axis=-2)
  outputs = tf.einsum('...v,...uv->...u', inputs, shift_matrix)
  return outputs 
Example #3
Source File: net_conv2.py    From gcdn with MIT License 5 votes vote down vote up
def create_bn_variables(self, name, Nfeat):

		self.scale['bn_scale_'+name] = tf.get_variable('bn_scale_'+name, [Nfeat], initializer=tf.ones_initializer())
		self.beta['bn_beta_'+name]  = tf.get_variable('bn_beta_'+name , [Nfeat], initializer=tf.constant_initializer(0.0))
		self.pop_mean['bn_pop_mean_'+name] = tf.get_variable('bn_pop_mean_'+name, [Nfeat], initializer=tf.constant_initializer(0.0), trainable=False)
		self.pop_var['bn_pop_var_'+name ]  = tf.get_variable('bn_pop_var_'+name , [Nfeat], initializer=tf.ones_initializer(), trainable=False)
		self.dn_vars = self.dn_vars + [self.scale['bn_scale_'+name], self.beta['bn_beta_'+name]]


	# same as new tf.roll but only for 3D input and axis=2 
Example #4
Source File: shapes.py    From Counterfactual-StoryRW with MIT License 4 votes vote down vote up
def varlength_concat_py(x, y, x_length, dtype=None):
    """Concatenates rows of `x` and `y` where each row of
    `x` has a variable length.

    The function has the same semantic as :func:`varlength_concat`,
    except that this function is for numpy arrays instead of TF tensors.

    Both `x` and `y` are of numeric dtypes, such as `int32` and `float32`,
    with mask value `0`. The two arrays must be of the same dtype.

    Args:
        x: A array of shape `[batch_size, x_dim_2, other_dims]`.
        y: A array of shape `[batch_size, y_dim_2, other_dims]`.
            All dimensions except the 2nd dimension must be the same
            with those of `x`.
        x_length: A 1D int array of shape `[batch_size]` containing
            the length of each `x` row.
            Elements beyond the respective lengths will be
            made zero.
        dtype: Type of :attr:`x`. If `None`, inferred from
            :attr:`x` automatically.

    Returns:
        An array of shape `[batch_size, x_dim_2 + y_dim_2, other_dims]`.

    Example:
        .. code-block:: python

            x = np.asarray([[1, 1, 0, 0],
                            [1, 1, 1, 0]])
            x_length = [2, 3]
            y = np.asarray([[2, 2, 0],
                            [2, 2, 2]])

            out = varlength_concat(x, y, x_length)
            # out = [[1, 1, 2, 2, 0, 0, 0]
            #        [1, 1, 1, 2, 2, 2, 0]]
    """
    x = np.asarray(x, dtype=dtype)
    y = np.asarray(y, dtype=dtype)

    x_masked = mask_sequences(x, x_length, dtype=dtype)
    zeros_y = np.zeros_like(y)
    x_aug = np.concatenate([x_masked, zeros_y], axis=1)

    zeros_x = np.zeros_like(x)
    y_aug = np.concatenate([zeros_x, y], axis=1)

    # Now, x_aug.shape == y_aug.shape

    max_length_x = x.shape[1]
    batch_size = x.shape[0]

    for index in np.arange(batch_size):
        y_aug_i_rolled = np.roll(
            a=y_aug[index],
            shift=x_length[index] - max_length_x,
            axis=0)
        x_aug[index] += y_aug_i_rolled

    return x_aug 
Example #5
Source File: shapes.py    From texar with Apache License 2.0 4 votes vote down vote up
def varlength_concat_py(x, y, x_length, dtype=None):
    """Concatenates rows of `x` and `y` where each row of
    `x` has a variable length.

    The function has the same semantic as :func:`varlength_concat`,
    except that this function is for numpy arrays instead of TF tensors.

    Both `x` and `y` are of numeric dtypes, such as `int32` and `float32`,
    with mask value `0`. The two arrays must be of the same dtype.

    Args:
        x: A array of shape `[batch_size, x_dim_2, other_dims]`.
        y: A array of shape `[batch_size, y_dim_2, other_dims]`.
            All dimensions except the 2nd dimension must be the same
            with those of `x`.
        x_length: A 1D int array of shape `[batch_size]` containing
            the length of each `x` row.
            Elements beyond the respective lengths will be
            made zero.
        dtype: Type of :attr:`x`. If `None`, inferred from
            :attr:`x` automatically.

    Returns:
        An array of shape `[batch_size, x_dim_2 + y_dim_2, other_dims]`.

    Example:
        .. code-block:: python

            x = np.asarray([[1, 1, 0, 0],
                            [1, 1, 1, 0]])
            x_length = [2, 3]
            y = np.asarray([[2, 2, 0],
                            [2, 2, 2]])

            out = varlength_concat_py(x, y, x_length)
            # out = [[1, 1, 2, 2, 0, 0, 0]
            #        [1, 1, 1, 2, 2, 2, 0]]
    """
    x = np.asarray(x, dtype=dtype)
    y = np.asarray(y, dtype=dtype)

    x_masked = mask_sequences(x, x_length, dtype=dtype)
    zeros_y = np.zeros_like(y)
    x_aug = np.concatenate([x_masked, zeros_y], axis=1)

    zeros_x = np.zeros_like(x)
    y_aug = np.concatenate([zeros_x, y], axis=1)

    # Now, x_aug.shape == y_aug.shape

    max_length_x = x.shape[1]
    batch_size = x.shape[0]

    for index in np.arange(batch_size):
        y_aug_i_rolled = np.roll(
            a=y_aug[index],
            shift=x_length[index] - max_length_x,
            axis=0)
        x_aug[index] += y_aug_i_rolled

    return x_aug 
Example #6
Source File: utility.py    From dreamer with Apache License 2.0 4 votes vote down vote up
def compute_cpc_loss(pred, features, config):
  if config.cpc_contrast == 'batch':
    ta = tf.TensorArray(tf.float32, 0, True, element_shape=[None, None])
    _, _, ta = tf.while_loop(
        lambda i, f, ta: tf.less(i, tf.shape(f)[0]),
        lambda i, f, ta: (
            i + 1, f, ta.write(ta.size(), pred.log_prob(tf.roll(f, i, 0)))),
        (0, features, ta), back_prop=True, swap_memory=True)
    positive = pred.log_prob(features)
    negative = tf.reduce_logsumexp(ta.stack(), 0)
    return positive - negative
  elif config.cpc_contrast == 'time':
    ta = tf.TensorArray(tf.float32, 0, True, element_shape=[None, None])
    _, _, ta = tf.while_loop(
        lambda i, f, ta: tf.less(i, tf.shape(f)[1]),
        lambda i, f, ta: (
            i + 1, f, ta.write(ta.size(), pred.log_prob(tf.roll(f, i, 1)))),
        (0, features, ta), back_prop=True, swap_memory=True)
    positive = pred.log_prob(features)
    negative = tf.reduce_logsumexp(ta.stack(), 0)
    return positive - negative
  elif config.cpc_contrast == 'window':
    assert config.cpc_batch_amount <= config.batch_shape[0]
    assert config.cpc_time_amount <= config.batch_shape[1]
    total_amount = config.cpc_batch_amount * config.cpc_time_amount
    ta = tf.TensorArray(tf.float32, 0, True, element_shape=[None, None])
    def compute_negatives(index, ta):
      batch_shift = tf.math.floordiv(index, config.cpc_time_amount)
      time_shift = tf.mod(index, config.cpc_time_amount)
      batch_shift -= config.cpc_batch_amount // 2
      time_shift -= config.cpc_time_amount // 2
      rolled = tf.roll(tf.roll(features, batch_shift, 0), time_shift, 1)
      return ta.write(ta.size(), pred.log_prob(rolled))
    _, ta = tf.while_loop(
        lambda index, ta: tf.less(index, total_amount),
        lambda index, ta: (index + 1, compute_negatives(index, ta)),
        (0, ta), back_prop=True, swap_memory=True)
    positive = pred.log_prob(features)
    negative = tf.reduce_logsumexp(ta.stack(), 0)
    return positive - negative
  else:
    raise NotImplementedError(config.cpc_contrast)