Python tensorflow.compat.v1.AUTO_REUSE Examples

The following are 30 code examples of tensorflow.compat.v1.AUTO_REUSE(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v1 , or try the search function .
Example #1
Source File: modalities.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def video_bitwise_targets_bottom(x, model_hparams, vocab_size):
  """Bottom transformation for embedding target video bitwise."""
  pixel_embedding_size = 64
  inputs = x
  with tf.variable_scope("video_modality_bitwise", reuse=tf.AUTO_REUSE):
    common_layers.summarize_video(inputs, "targets_bottom")
    # Embed bitwise.
    assert vocab_size == 256
    embedded = discretization.int_to_bit_embed(inputs, 8,
                                               pixel_embedding_size)
    # Transpose and project.
    transposed = common_layers.time_to_channels(embedded)
    return tf.layers.dense(
        transposed,
        model_hparams.hidden_size,
        name="merge_pixel_embedded_frames") 
Example #2
Source File: glow_ops.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def actnorm_3d(name, x, logscale_factor=3.):
  """Applies actnorm to each time-step independently.

  There are a total of 2*n_channels*n_steps parameters learnt.

  Args:
    name: variable scope.
    x: 5-D Tensor, (NTHWC)
    logscale_factor: Increases the learning rate of the scale by
                     logscale_factor.
  Returns:
    x: 5-D Tensor, (NTHWC) with the per-timestep, per-channel normalization.
  """
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    x = tf.unstack(x, axis=1)
    x_normed = []
    for ind, x_step in enumerate(x):
      x_step, _ = actnorm("actnorm_%d" % ind, x_step,
                          logscale_factor=logscale_factor)
      x_normed.append(x_step)
    return tf.stack(x_normed, axis=1), None 
Example #3
Source File: networks.py    From tensor2robot with Apache License 2.0 6 votes vote down vote up
def Embedding(image, mode, params, reuse=tf.AUTO_REUSE, scope='scene'):
  """Implements scene or goal embedding.

  Args:
    image: Batch of images corresponding to scene or goal.
    mode: Mode is tf.estimator.ModeKeys.EVAL, TRAIN, or PREDICT (unused).
    params: Hyperparameters for the network.
    reuse: Reuse parameter for variable scope.
    scope: The variable_scope to use for the variables.
  Returns:
    A tuple (batch of summed embeddings, batch of embedding maps).
  """
  del params
  is_training = mode == tf.estimator.ModeKeys.TRAIN
  with tf.variable_scope(scope, reuse=reuse):
    scene = resnet.get_resnet50_spatial(image, is_training)
    scene = tf.nn.relu(scene)
    summed_scene = tf.reduce_mean(scene, axis=[1, 2])
  return summed_scene, scene 
Example #4
Source File: maml_inner_loop_test.py    From tensor2robot with Apache License 2.0 6 votes vote down vote up
def learned_model_train_fn(features,
                           labels,
                           inference_outputs,
                           mode=None,
                           config=None,
                           params=None):
  """A model_train_fn where the loss function itself is learned."""
  del features, labels, mode, config, params
  with tf.variable_scope('learned_loss', reuse=tf.AUTO_REUSE):
    learned_label = tf.get_variable(
        'learned_label',
        shape=(1,),
        dtype=tf.float32,
        initializer=tf.constant_initializer([1.0], dtype=tf.float32))
  return tf.losses.mean_squared_error(
      labels=learned_label, predictions=inference_outputs['prediction']) 
Example #5
Source File: lstm_models.py    From magenta with Apache License 2.0 6 votes vote down vote up
def build(self, hparams, output_depth, is_training=True):
    self.hparams = hparams
    self._output_depth = output_depth
    self._total_length = hparams.max_seq_len
    if self._total_length != np.prod(self._level_lengths):
      raise ValueError(
          'The product of the HierarchicalLstmDecoder level lengths (%d) must '
          'equal the padded input sequence length (%d).' % (
              np.prod(self._level_lengths), self._total_length))
    tf.logging.info('\nHierarchical Decoder:\n'
                    '  input length: %d\n'
                    '  level output lengths: %s\n',
                    self._total_length,
                    self._level_lengths)

    self._hier_cells = [
        lstm_utils.rnn_cell(
            hparams.dec_rnn_size,
            dropout_keep_prob=hparams.dropout_keep_prob,
            residual=hparams.residual_decoder)
        # Subtract 1 for the core decoder level
        for _ in range(len(self._level_lengths) - 1)]

    with tf.variable_scope('core_decoder', reuse=tf.AUTO_REUSE):
      self._core_decoder.build(hparams, output_depth, is_training) 
Example #6
Source File: lstm_models.py    From magenta with Apache License 2.0 6 votes vote down vote up
def build(self, hparams, is_training=True):
    self._total_length = hparams.max_seq_len
    if self._total_length != np.prod(self._level_lengths):
      raise ValueError(
          'The product of the HierarchicalLstmEncoder level lengths (%d) must '
          'equal the padded input sequence length (%d).' % (
              np.prod(self._level_lengths), self._total_length))
    tf.logging.info('\nHierarchical Encoder:\n'
                    '  input length: %d\n'
                    '  level lengths: %s\n',
                    self._total_length,
                    self._level_lengths)
    self._hierarchical_encoders = []
    num_splits = int(np.prod(self._level_lengths))
    for i, l in enumerate(self._level_lengths):
      num_splits //= l
      tf.logging.info('Level %d splits: %d', i, num_splits)
      h_encoder = self._core_encoder_cls()
      h_encoder.build(
          hparams, is_training,
          name_or_scope=tf.VariableScope(
              tf.AUTO_REUSE, 'encoder/hierarchical_level_%d' % i))
      self._hierarchical_encoders.append((num_splits, h_encoder)) 
Example #7
Source File: glow_ops.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def single_conv_dist(name, x, output_channels=None):
  """A 3x3 convolution mapping x to a standard normal distribution at init.

  Args:
    name: variable scope.
    x: 4-D Tensor.
    output_channels: number of channels of the mean and std.
  """
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    x_shape = common_layers.shape_list(x)
    if output_channels is None:
      output_channels = x_shape[-1]
    mean_log_scale = conv("conv2d", x, output_channels=2*output_channels,
                          conv_init="zeros", apply_actnorm=False)
    mean = mean_log_scale[:, :, :, 0::2]
    log_scale = mean_log_scale[:, :, :, 1::2]
    return tf.distributions.Normal(mean, tf.exp(log_scale)) 
Example #8
Source File: adv_attack_utils.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def fprop(self, x):
    if x.name in self._logits_dict:
      return self._logits_dict[x.name]

    x = tf.map_fn(tf.image.per_image_standardization, x)
    self._additional_features['inputs'] = x

    if self._scope is None:
      scope = tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE)
    else:
      scope = tf.variable_scope(self._scope, reuse=tf.AUTO_REUSE)

    with scope:
      logits = self._model_fn(
          self._additional_features,
          None,
          'attack',
          params=self._params,
          config=self._config)
    self._logits_dict[x.name] = logits

    return {model.Model.O_LOGITS: tf.reshape(logits, [-1, logits.shape[-1]])} 
Example #9
Source File: glow_ops.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def revnet(name, x, hparams, reverse=True):
  """'hparams.depth' steps of generative flow.

  Args:
    name: variable scope for the revnet block.
    x: 4-D Tensor, shape=(NHWC).
    hparams: HParams.
    reverse: bool, forward or backward pass.
  Returns:
    x: 4-D Tensor, shape=(NHWC).
    objective: float.
  """
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    steps = np.arange(hparams.depth)
    if reverse:
      steps = steps[::-1]

    objective = 0.0
    for step in steps:
      x, curr_obj = revnet_step(
          "revnet_step_%d" % step, x, hparams, reverse=reverse)
      objective += curr_obj
    return x, objective 
Example #10
Source File: glow_ops.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def scale_gaussian_prior(name, z, logscale_factor=3.0, trainable=True):
  """Returns N(s^i * z^i, std^i) where s^i and std^i are pre-component.

  s^i is a learnable parameter with identity initialization.
  std^i is optionally learnable with identity initialization.

  Args:
    name: variable scope.
    z: input_tensor
    logscale_factor: equivalent to scaling up the learning_rate by a factor
                     of logscale_factor.
    trainable: Whether or not std^i is learnt.
  """
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    z_shape = common_layers.shape_list(z)
    latent_multiplier = tf.get_variable(
        "latent_multiplier", shape=z_shape, dtype=tf.float32,
        initializer=tf.ones_initializer())
    log_scale = tf.get_variable(
        "log_scale_latent", shape=z_shape, dtype=tf.float32,
        initializer=tf.zeros_initializer(), trainable=trainable)
    log_scale = log_scale * logscale_factor
    return tfp.distributions.Normal(
        loc=latent_multiplier * z, scale=tf.exp(log_scale)) 
Example #11
Source File: svg_decoder.py    From magenta with Apache License 2.0 6 votes vote down vote up
def pretrained_visual_encoder(self, features, hparams):
    # we want the exact hparams used for training this vv
    vae_hparams = trainer_lib.create_hparams(
        hparams.vae_hparam_set, hparams.vae_hparams,
        data_dir=hparams.vae_data_dir, problem_name=hparams.vae_problem)

    # go back to root variable scope
    with tf.variable_scope(tf.VariableScope(tf.AUTO_REUSE, ''),
                           reuse=tf.AUTO_REUSE, auxiliary_name_scope=False):
      vae = image_vae.ImageVAE(vae_hparams, mode=self._hparams.mode,
                               problem_hparams=vae_hparams.problem_hparams)
      # the real input to vae will be features['rendered_targets']
      vae_features = copy.copy(features)
      vae_features['inputs'] = tf.reshape(vae_features['targets_psr'][:, -1, :],
                                          [-1, 64, 64, 1])
      vae_features['targets'] = vae_features['inputs']
      # we want vae to return bottleneck
      vae_features['bottleneck'] = tf.zeros((0, 128))
      sampled_bottleneck, _ = vae(vae_features)
      vae.initialize_from_ckpt(hparams.vae_ckpt_dir)

      if tf.executing_eagerly():
        sampled_bottleneck, _ = vae(vae_features)

    return sampled_bottleneck 
Example #12
Source File: transformer_vae_flow_prior_ops.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def transformer_decoder_layers(name,
                               n_layers,
                               decoder_input,
                               **kwargs):
  """A transformation block composed of transformer decoder layers."""
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    hparams = kwargs["hparams"]
    outputs = decoder_input
    with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
      for layer_idx in range(n_layers):
        outputs = transformer_decoder_layer(
            decoder_input=outputs,
            layer_idx=layer_idx,
            **kwargs)
      outputs = common_layers.layer_preprocess(outputs, hparams)
    return outputs 
Example #13
Source File: transformer_vae_flow_prior_ops.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def posterior(
    name, hparams, targets, targets_mask, decoder_self_attention_bias,
    **kwargs):
  """Compute mu and sigma for diagonal normal posterior q(z|x,y)."""
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    decoder_input = drop_2d(targets, hparams.mode, hparams.posterior_2d_dropout)
    decoder_input = common_attention.add_timing_signal_1d(decoder_input)
    decoder_input = tf.nn.dropout(decoder_input,
                                  rate=hparams.layer_prepostprocess_dropout)
    decoder_output = transformer_decoder_layers(
        "block",
        n_layers=hparams.n_posterior_layers,
        decoder_input=decoder_input,
        hparams=hparams,
        decoder_self_attention_bias=decoder_self_attention_bias,
        **kwargs)
    decoder_output = gops.dense_weightnorm(
        "h2o_out", decoder_output, hparams.latent_size * 2, targets_mask,
        init_scale=0.0, init=False)
    return decoder_output 
Example #14
Source File: residual_shuffle_exchange.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def residual_shuffle_network(inputs, hparams):
  """Residual Shuffle-Exchange network with weight sharing.

  Args:
    inputs: inputs to the Shuffle-Exchange network. Should be in length of power
      of 2.
    hparams: Model configuration

  Returns:
    tf.Tensor: Outputs of the Shuffle-Exchange last layer
  """
  input_shape = tf.shape(inputs)
  n_bits = tf.log(tf.cast(input_shape[1] - 1, tf.float32)) / tf.log(2.0)
  n_bits = tf.cast(n_bits, tf.int32) + 1

  block_out = inputs

  for k in range(hparams.num_hidden_layers):
    with tf.variable_scope("benes_block_" + str(k), reuse=tf.AUTO_REUSE):
      forward_output = forward_part(block_out, hparams, n_bits)
      block_out = reverse_part(forward_output, hparams, n_bits)

  return RSU("last_layer", hparams.dropout, hparams.mode)(block_out) 
Example #15
Source File: transformer_vae_flow_prior_ops.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def decoder(name, latents, hparams, decoder_self_attention_bias, **kwargs):
  """Compute final hidden states for p(y|z,x)."""
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    decoder_input = drop_2d(latents, hparams.mode, hparams.decoder_2d_dropout)
    if hparams.pos_attn:
      decoder_input = gops.positional_attention(
          "pos_attn", decoder_input, decoder_self_attention_bias, hparams)
    else:
      decoder_input = common_attention.add_timing_signal_1d(decoder_input)
    if common_layers.shape_list(latents)[-1] != hparams.hidden_size:
      decoder_input = gops.dense("lat2hid", latents, hparams.hidden_size)
    decoder_output = transformer_decoder_layers(
        "block",
        n_layers=hparams.n_decoder_layers,
        decoder_input=decoder_input,
        hparams=hparams,
        decoder_self_attention_bias=decoder_self_attention_bias,
        **kwargs)
    batch_size, targets_length = common_layers.shape_list(decoder_output)[:2]
    decoder_output = tf.reshape(
        decoder_output, [batch_size, targets_length, 1, hparams.hidden_size])
    # Expand since t2t expects 4d tensors.
    return decoder_output 
Example #16
Source File: batch_norm_source_op_handler_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testCreateRegularizer_Sliced(self):
    # Call handler to create regularizer.
    handler = batch_norm_source_op_handler.BatchNormSourceOpHandler(
        _GAMMA_THRESHOLD)
    batch_norm_op_slice = orm.OpSlice(self.batch_norm_op, orm.Slice(0, 3))
    regularizer = handler.create_regularizer(batch_norm_op_slice)

    # Verify regularizer is the gamma tensor.
    with self.cached_session():
      # Initialize the gamma tensor to check value equality.
      with tf.variable_scope('', reuse=tf.AUTO_REUSE):
        gamma_tensor = tf.get_variable('conv1/BatchNorm/gamma')
      init = tf.variables_initializer([gamma_tensor])
      init.run()

      # Verify regularizer is the sliced gamma tensor.
      self.assertAllEqual(gamma_tensor.eval()[0:3],
                          regularizer._gamma.eval()) 
Example #17
Source File: transformer_memory.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def post_attention(self, token, x):
    """Called after self-attention. The memory can be updated here.

    Args:
      token: Data returned by pre_attention, which can be used to carry over
        state related to the current memory operation.
      x: a Tensor of data after self-attention and feed-forward
    Returns:
      a (possibly modified) version of the input x
    """
    with tf.variable_scope(self.name + "/post_attention", reuse=tf.AUTO_REUSE):
      depth = common_layers.shape_list(x)[-1]
      actual_batch_size = common_layers.shape_list(x)[0]
      memory_output = tf.gather(token["retrieved_mem"],
                                tf.range(actual_batch_size))
      output = tf.add(tf.layers.dense(x, depth, use_bias=False),
                      tf.layers.dense(memory_output, depth))
      with tf.control_dependencies([output]):
        with tf.control_dependencies([
            self.write(token["x"], token["access_logits"])]):
          return tf.identity(output) 
Example #18
Source File: transformer_glow_layers_ops.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def dense_weightnorm(
    name, x, n_out, x_mask, init_scale, init, dtype=tf.float32):
  """Dense layer with weight normalization."""
  n_in = common_layers.shape_list(x)[2]
  eps = tf.keras.backend.epsilon()
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    v = tf.get_variable(
        "v", [n_in, n_out], dtype,
        initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
    v = v / tf.norm(v, axis=0, keepdims=True)
    t = tf.matmul(x, v)  # [B, L, n_out]
    mean, var = moments_over_bl(t, x_mask)
    g_init = init_scale / (tf.sqrt(var) + eps)
    g = get_variable_ddi(
        "g", [n_out], g_init, init,
        initializer=tf.zeros_initializer, dtype=dtype, trainable=True)
    b = get_variable_ddi(
        "b", [n_out], -mean*g_init, init,
        initializer=tf.zeros_initializer, dtype=dtype, trainable=True)
    w = g * v
    y = tf.matmul(x, w) + b
    tf.summary.histogram("_g", g)
    return y 
Example #19
Source File: vqa_self_attention.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def iterative_encoder_decoder(encoder_input,
                              encoder_self_attention_bias,
                              encoder_decoder_attention_bias,
                              query,
                              hparams):
  """Iterative encoder decoder."""
  for _ in range(hparams.num_rec_steps):
    with tf.variable_scope("step", reuse=tf.AUTO_REUSE):
      encoder_output = image_question_encoder(
          encoder_input,
          encoder_self_attention_bias,
          hparams,
          query)

      decoder_output = decoder(
          query,
          encoder_output,
          None,
          encoder_decoder_attention_bias,
          hparams)

      encoder_input = encoder_output
      query = decoder_output

      return decoder_output 
Example #20
Source File: transformer_glow_layers.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def flow_step_glow(name, x, x_mask, split_dims, inverse, init, dtype, **kwargs):
  """One step of flow."""
  conv_fn = multihead_invertible_1x1_conv_np
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    reversible_ops = []
    for _, split_dim in enumerate(split_dims):
      identity_first = True
      reversible_ops += [functools.partial(actnorm, name="actnorm", init=init)]
      if split_dim in "ca":
        multihead_split = "a" if split_dim == "c" else "c"
        reversible_ops += [functools.partial(
            conv_fn, name="conv_{}".format(multihead_split),
            multihead_split=multihead_split, dtype=dtype)]
      reversible_ops += [functools.partial(
          coupling, name="coupling_{}".format(split_dim),
          split_dim=split_dim, identity_first=identity_first, init=init,
          **kwargs)]
    if inverse:
      reversible_ops = reversible_ops[::-1]

    logabsdets = tf.constant(0.0, dtype=dtype)
    for reversible_op in reversible_ops:
      x, logabsdet = reversible_op(x=x, x_mask=x_mask, inverse=inverse)
      logabsdets += logabsdet
    return x, logabsdets 
Example #21
Source File: transformer_glow_layers.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def flow_level(
    name, x, x_mask, depth, split_dims, prior, inverse, init, dtype, **kwargs):
  """One level of flow."""
  flow_step_fn = flow_step_glow
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    reversible_ops = []
    for step in np.arange(depth):
      reversible_ops += [functools.partial(
          flow_step_fn, name="{}_step".format(step), split_dims=split_dims,
          init=init, dtype=dtype, **kwargs)]
    if prior:
      reversible_ops += [functools.partial(
          coupling, name="{}_prior".format(depth), split_dim="c",
          identity_first=True, init=init, **kwargs)]
    if inverse:
      reversible_ops = reversible_ops[::-1]

    logabsdets = tf.constant(0.0, dtype=dtype)
    for reversible_op in reversible_ops:
      x, logabsdet = reversible_op(x=x, x_mask=x_mask, inverse=inverse)
      logabsdets += logabsdet
    return x, logabsdets 
Example #22
Source File: transformer_glow_layers_ops.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def dense(name, x, n_out, dtype=tf.float32, init_w=0.05):
  """Dense layer."""
  n_in = common_layers.shape_list(x)[2]
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    w = tf.get_variable(
        "w", [n_in, n_out], dtype,
        initializer=tf.random_normal_initializer(0.0, init_w), trainable=True)
    b = tf.get_variable(
        "b", [n_out,], dtype, initializer=tf.zeros_initializer, trainable=True)
    x = tf.matmul(x, w) + b
    return x 
Example #23
Source File: discretization_test.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def testDiscreteBottleneckVQ(self):
    hidden_size = 60
    z_size = 4
    x = tf.zeros(shape=[100, 1, hidden_size], dtype=tf.float32)
    with tf.variable_scope("test", reuse=tf.AUTO_REUSE):
      means = tf.get_variable("means",
                              shape=[1, 1, 2**z_size, hidden_size],
                              initializer=tf.constant_initializer(0.),
                              dtype=tf.float32)
      ema_count = []
      ema_count_i = tf.get_variable(
          "ema_count",
          [1, 2**z_size],
          initializer=tf.constant_initializer(0),
          trainable=False)
      ema_count.append(ema_count_i)
      ema_means = []
      with tf.colocate_with(means):
        ema_means_i = tf.get_variable("ema_means",
                                      initializer=means.initialized_value()[0],
                                      trainable=False)
        ema_means.append(ema_means_i)
      x_means_dense, x_means_hot, _, _, _ = discretization.discrete_bottleneck(
          x, hidden_size, z_size, 32, means=means, num_blocks=1,
          ema_means=ema_means, ema_count=ema_count, name="test")
      with self.test_session() as sess:
        sess.run(tf.global_variables_initializer())
        x_means_dense_eval, x_means_hot_eval = sess.run(
            [x_means_dense, x_means_hot])
        means_eval = sess.run(means)
      self.assertEqual(x_means_dense_eval.shape, (100, 1, hidden_size))
      self.assertEqual(x_means_hot_eval.shape, (100, 1))
      self.assertTrue(np.all(means_eval == np.zeros(
          (1, 1, 2**z_size, hidden_size)))) 
Example #24
Source File: common_layers.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def apply_spectral_norm(x):
  """Normalizes x using the spectral norm.

  The implementation follows Algorithm 1 of
  https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
  reshaped such that the number of channels (last-dimension) is the same.

  Args:
    x: Tensor with the last dimension equal to the number of filters.

  Returns:
    x: Tensor with the same shape as x normalized by the spectral norm.
    assign_op: Op to be run after every step to update the vector "u".
  """
  weights_shape = shape_list(x)
  other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]

  # Reshape into a 2-D matrix with outer size num_filters.
  weights_2d = tf.reshape(x, (other, num_filters))

  # v = Wu / ||W u||
  with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
    u = tf.get_variable(
        "u", [num_filters, 1],
        initializer=tf.truncated_normal_initializer(),
        trainable=False)
  v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))

  # u_new = vW / ||v W||
  u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))

  # s = v*W*u
  spectral_norm = tf.squeeze(
      tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))

  # set u equal to u_new in the next iteration.
  assign_op = tf.assign(u, tf.transpose(u_new))
  return tf.divide(x, spectral_norm), assign_op 
Example #25
Source File: critic_model.py    From tensor2robot with Apache License 2.0 5 votes vote down vote up
def q_func(self,
             features,
             scope,
             mode,
             config = None,
             params = None,
             reuse=tf.AUTO_REUSE):
    """Q(state, action) value function.

    We only need to define the q_func and loss_fn to have a proper model.
    For more specialization please overwrite inference_network_fn, model_*_fn.

    Args:
      features: This is the first item returned from the input_fn and parsed by
        tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
        requirements of the self.get_feature_specification.
      scope: String specifying variable scope.
      mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
      config: (Optional tf.estimator.RunConfig or contrib_tpu.RunConfig) Will
        receive what is passed to Estimator in config parameter, or the default
        config (tf.estimator.RunConfig). Allows updating things in your model_fn
        based on  configuration such as num_ps_replicas, or model_dir.
      params: An optional dict of hyper parameters that will be passed into
        input_fn and model_fn. Keys are names of parameters, values are basic
        python types. There are reserved keys for TPUEstimator, including
        'batch_size'.
      reuse: Whether or not to reuse variables under variable scope 'scope'.

    Returns:
      outputs: A {key: Tensor} mapping. The key 'q_predicted' is required.
    """ 
Example #26
Source File: transformer_glow_layers_ops.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def standard_normal(x, name="normal"):
  """Return standard normal distribution with same shape as x."""
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    dist = tfp.distributions.Normal(
        loc=tf.zeros_like(x),
        scale=tf.ones_like(x),
        allow_nan_stats=False)
    return dist 
Example #27
Source File: transformer_glow_layers_ops.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def diagonal_normal(outputs, name="normal"):
  """Split outputs into mu and log_sigma and return z."""
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    loc, log_scale = tf.split(outputs, 2, axis=-1)
    scale = tf.exp(log_scale)
    dist = tfp.distributions.Normal(
        loc=loc,
        scale=scale + tf.keras.backend.epsilon(),
        allow_nan_stats=False)
    return dist 
Example #28
Source File: transformer_memory.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def pre_attention(self, segment_number, query_antecedent,
                    memory_antecedent, bias):
    """Called prior to self-attention, to incorporate memory items.

    Args:
      segment_number: an integer Tensor with shape [batch]
      query_antecedent: a Tensor with shape [batch, length_q, channels]
      memory_antecedent: must be None. Attention normally allows this to be a
        Tensor with shape [batch, length_m, channels], but we currently only
        support memory for decoder-side self-attention.
      bias: bias Tensor (see attention_bias())
    Returns:
      (data, new_query_antecedent, new_memory_antecedent, new_bias)
    """
    with tf.variable_scope(self.name + "/pre_attention", reuse=tf.AUTO_REUSE):
      assert memory_antecedent is None, "We only support language modeling"
      with tf.control_dependencies([
          tf.assert_greater_equal(self.batch_size, tf.size(segment_number))]):
        difference = self.batch_size - tf.size(segment_number)
        segment_number = tf.pad(segment_number, [[0, difference]])
        reset_op = self.reset(tf.reshape(tf.where(
            tf.less(segment_number, self.segment_number)), [-1]))
      memory_results = {}
      with tf.control_dependencies([reset_op]):
        with tf.control_dependencies([
            self.update_segment_number(segment_number)]):
          x = tf.pad(query_antecedent, [
              [0, difference], [0, 0], [0, 0]])
          access_logits, retrieved_mem = self.read(x)
      memory_results["x"] = x
      memory_results["access_logits"] = access_logits
      memory_results["retrieved_mem"] = retrieved_mem
      return memory_results, query_antecedent, memory_antecedent, bias 
Example #29
Source File: common_video.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def basic_lstm(inputs, state, num_units, name=None):
  """Basic LSTM."""
  input_shape = common_layers.shape_list(inputs)
  # reuse parameters across time-steps.
  cell = tf.nn.rnn_cell.BasicLSTMCell(
      num_units, name=name, reuse=tf.AUTO_REUSE)
  if state is None:
    state = cell.zero_state(input_shape[0], tf.float32)
  outputs, new_state = cell(inputs, state)
  return outputs, new_state 
Example #30
Source File: regression_model.py    From tensor2robot with Apache License 2.0 5 votes vote down vote up
def a_func(self,
             features,
             scope,
             mode,
             config = None,
             params = None,
             reuse=tf.AUTO_REUSE):
    """A(state) regression function.

    This function can return a stochastic or a deterministic tensor.

    We only need to define the a_func and loss_fn to have a proper model.
    For more specialization please overwrite inference_network_fn, model_*_fn.

    Args:
      features: This is the first item returned from the input_fn and parsed by
        tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
        requirements of the self.get_feature_specification.
      scope: String specifying variable scope.
      mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
      config: Optional configuration object. Will receive what is passed to
        Estimator in config parameter, or the default config. Allows updating
        things in your model_fn based on configuration such as num_ps_replicas,
        or model_dir.
      params: An optional dict of hyper parameters that will be passed into
        input_fn and model_fn. Keys are names of parameters, values are basic
        python types. There are reserved keys for TPUEstimator, including
        'batch_size'.
      reuse: Whether or not to reuse variables under variable scope 'scope'.

    Returns:
      outputs: A {key: Tensor} mapping. The key 'inference_output' is required.
    """