Python tensorflow.compat.v1.add_to_collection() Examples

The following are 19 code examples of tensorflow.compat.v1.add_to_collection(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v1 , or try the search function .
Example #1
Source File: data_helpers.py    From magenta with Apache License 2.0 6 votes vote down vote up
def provide_data(self, batch_size):
    """Returns a batch of data and one-hot labels."""
    with tf.name_scope('inputs'):
      with tf.device('/cpu:0'):
        dataset = self.dataset.provide_dataset()
        dataset = dataset.shuffle(buffer_size=1000)
        dataset = dataset.map(self._map_fn, num_parallel_calls=4)
        dataset = dataset.batch(batch_size)
        dataset = dataset.prefetch(1)

        iterator = dataset.make_initializable_iterator()
        tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
                             iterator.initializer)

        data, one_hot_labels = iterator.get_next()
        data.set_shape([batch_size, None, None, None])
        one_hot_labels.set_shape([batch_size, None])
        return data, one_hot_labels 
Example #2
Source File: deep_cnn.py    From privacy with Apache License 2.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #3
Source File: utils.py    From Object_Detection_Tracking with Apache License 2.0 5 votes vote down vote up
def call(self, *args, **kwargs):
    outputs = super(BatchNormalization, self).call(*args, **kwargs)
    # A temporary hack for tf1 compatibility with keras batch norm.
    for u in self.updates:
      tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u)
    return outputs 
Example #4
Source File: dataset_builder_test.py    From models with Apache License 2.0 5 votes vote down vote up
def get_iterator_next_for_testing(dataset, is_tf2):
  iterator = dataset.make_initializable_iterator()
  if not is_tf2:
    tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
  return iterator.get_next() 
Example #5
Source File: dataset_builder.py    From models with Apache License 2.0 5 votes vote down vote up
def make_initializable_iterator(dataset):
  """Creates an iterator, and initializes tables.

  This is useful in cases where make_one_shot_iterator wouldn't work because
  the graph contains a hash table that needs to be initialized.

  Args:
    dataset: A `tf.data.Dataset` object.

  Returns:
    A `tf.data.Iterator`.
  """
  iterator = dataset.make_initializable_iterator()
  tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
  return iterator 
Example #6
Source File: model_lib_tf1_test.py    From models with Apache License 2.0 5 votes vote down vote up
def _make_initializable_iterator(dataset):
  """Creates an iterator, and initializes tables.

  Args:
    dataset: A `tf.data.Dataset` object.

  Returns:
    A `tf.data.Iterator`.
  """
  iterator = tf.data.make_initializable_iterator(dataset)
  tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
  return iterator 
Example #7
Source File: inputs_test.py    From models with Apache License 2.0 5 votes vote down vote up
def _make_initializable_iterator(dataset):
  """Creates an iterator, and initializes tables.

  Args:
    dataset: A `tf.data.Dataset` object.

  Returns:
    A `tf.data.Iterator`.
  """
  iterator = tf.data.make_initializable_iterator(dataset)
  tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
  return iterator 
Example #8
Source File: deep_cnn.py    From privacy with Apache License 2.0 5 votes vote down vote up
def loss_fun(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]
    distillation: if set to True, use probabilities and not class labels to
                  compute softmax loss

  Returns:
    Loss tensor of type float.
  """

  # Calculate the cross entropy between labels and predictions
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits=logits, labels=labels, name='cross_entropy_per_example')

  # Calculate the average cross entropy loss across the batch.
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')

  # Add to TF collection for losses
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss') 
Example #9
Source File: abstract_model.py    From tensor2robot with Apache License 2.0 5 votes vote down vote up
def create_optimizer(self):
    """Create the optimizer used for training.

    This function optionally wraps the base optimizer with SyncReplicasOptimizer
    (aggregrate gradients across devices).

    Returns:
      An instance of `tf.train.Optimizer`.
    """
    config = self.get_run_config()
    optimizer = self._create_optimizer_fn()
    if self._use_avg_model_params:
      optimizer = optimizers.create_moving_average_optimizer(optimizer)

      def create_swapping_saver_scaffold(saver=None):
        saver = optimizers.create_swapping_saver(optimizer)
        tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
        return tf.train.Scaffold(saver=saver)

      self._scaffold_fn = create_swapping_saver_scaffold
    if (self._use_sync_replicas_optimizer and (not self.is_device_tpu) and
        config is not None and config.num_worker_replicas > 1):
      optimizer = tf.train.SyncReplicasOptimizer(
          optimizer,
          replicas_to_aggregate=config.num_worker_replicas - 1,
          total_num_replicas=config.num_worker_replicas)
      self._sync_replicas_optimizer = optimizer
    return optimizer 
Example #10
Source File: t2r_models.py    From tensor2robot with Apache License 2.0 5 votes vote down vote up
def create_optimizer(self):
    """Create the optimizer and scaffold used for training."""
    config = self.get_run_config()
    original_optimizer = self._create_optimizer_fn()

    # Override self.scaffold_fn with a custom scaffold_fn that uses the
    # swapping saver required for MovingAverageOptimizer.
    use_avg_model_params = self.hparams.use_avg_model_params

    def scaffold_fn():
      """Create a scaffold object."""
      # MovingAverageOptimizer requires Swapping Saver.
      scaffold = tf.train.Scaffold()
      if use_avg_model_params:
        saver = original_optimizer.swapping_saver(
            keep_checkpoint_every_n_hours=1)
      else:
        saver = None
      scaffold = tf.train.Scaffold(saver=saver, copy_from_scaffold=scaffold)
      # The saver needs to be added to the graph for td3 hooks.
      tf.add_to_collection(tf.GraphKeys.SAVERS, scaffold.saver)
      return scaffold

    self._scaffold_fn = scaffold_fn
    optimizer = original_optimizer
    if (self._use_sync_replicas_optimizer and
        config is not None and config.num_worker_replicas > 1):
      optimizer = tf.train.SyncReplicasOptimizer(
          optimizer,
          replicas_to_aggregate=config.num_worker_replicas - 1,
          total_num_replicas=config.num_worker_replicas)
    if self.is_device_gpu:
      optimizer = replicate_model_fn.TowerOptimizer.TowerOptimizer(optimizer)
    return optimizer 
Example #11
Source File: utils.py    From Object_Detection_Tracking with Apache License 2.0 5 votes vote down vote up
def scalar(name, tensor):
  """Stores a (name, Tensor) tuple in a custom collection."""
  logging.info('Adding summary {}'.format(Pair(name, tensor)))
  tf.add_to_collection('edsummaries', Pair(name, tf.reduce_mean(tensor))) 
Example #12
Source File: preprocessing.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def build_multi_device_iterator(self, batch_size, num_splits, cpu_device,
                                  params, gpu_devices, dataset, doing_eval):
    """Creates a MultiDeviceIterator."""
    assert self.supports_datasets()
    assert num_splits == len(gpu_devices)
    with tf.name_scope('batch_processing'):
      if doing_eval:
        subset = 'validation'
      else:
        subset = 'train'
      batch_size_per_split = batch_size // num_splits
      ds = self.create_dataset(
          batch_size,
          num_splits,
          batch_size_per_split,
          dataset,
          subset,
          train=(not doing_eval),
          datasets_repeat_cached_sample=params.datasets_repeat_cached_sample,
          num_threads=params.datasets_num_private_threads,
          datasets_use_caching=params.datasets_use_caching,
          datasets_parallel_interleave_cycle_length=(
              params.datasets_parallel_interleave_cycle_length),
          datasets_sloppy_parallel_interleave=(
              params.datasets_sloppy_parallel_interleave),
          datasets_parallel_interleave_prefetch=(
              params.datasets_parallel_interleave_prefetch))
      multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
          ds,
          gpu_devices,
          source_device=cpu_device,
          max_buffer_size=params.multi_device_iterator_max_buffer_size)
      tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
                           multi_device_iterator.initializer)
      return multi_device_iterator 
Example #13
Source File: utils.py    From Object_Detection_Tracking with Apache License 2.0 5 votes vote down vote up
def call(self, *args, **kwargs):
    outputs = super(TpuBatchNormalization, self).call(*args, **kwargs)
    # A temporary hack for tf1 compatibility with keras batch norm.
    for u in self.updates:
      tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u)
    return outputs 
Example #14
Source File: discretization_test.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def testGumbelSoftmaxDiscreteBottleneck(self):
    x = tf.constant([[0, 0.9, 0], [0.8, 0., 0.]], dtype=tf.float32)
    tf.add_to_collection(tf.GraphKeys.GLOBAL_STEP, tf.constant(1))
    x_means_hot, _ = discretization.gumbel_softmax_discrete_bottleneck(
        x, bottleneck_bits=2)
    self.evaluate(tf.global_variables_initializer())
    x_means_hot_eval = self.evaluate(x_means_hot)
    self.assertEqual(np.shape(x_means_hot_eval), (2, 4)) 
Example #15
Source File: savp.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def pad_conv3d_lrelu(self, activations, n_filters, kernel_size, strides,
                       scope):
    """Pad, apply 3-D convolution and leaky relu."""
    padding = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]

    # tf.nn.conv3d accepts a list of 5 values for strides
    # with first and last value equal to 1
    if isinstance(strides, numbers.Integral):
      strides = [strides] * 3
    strides = [1] + strides + [1]

    # Filter_shape = [K, K, K, num_input, num_output]
    filter_shape = (
        [kernel_size]*3 + activations.shape[-1:].as_list() + [n_filters])

    with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
      conv_filter = tf.get_variable(
          "conv_filter", shape=filter_shape,
          initializer=tf.truncated_normal_initializer(stddev=0.02))

      if self.hparams.use_spectral_norm:
        conv_filter, assign_op = common_layers.apply_spectral_norm(conv_filter)
        if self.is_training:
          tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign_op)

      padded = tf.pad(activations, padding)
      convolved = tf.nn.conv3d(
          padded, conv_filter, strides=strides, padding="VALID")
      rectified = tf.nn.leaky_relu(convolved, alpha=0.2)
    return rectified 
Example #16
Source File: glow.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def body(self, features):
    exp_coupling = ["affine", "additive"]
    if self.hparams.coupling not in exp_coupling:
      raise ValueError("Expected hparams.coupling to be in %s, got %s" %
                       (exp_coupling, self.hparams.coupling))
    if self.is_training:
      init_features = self.create_init_batch(features)
      init_op = self.objective_tower(init_features, init=True)
      init_op = tf.Print(
          init_op, [init_op], message="Triggering data-dependent init.",
          first_n=20)
      tf.add_to_collection("glow_init_op", init_op)
    train_op = self.objective_tower(features, init=False)
    return tf.zeros_like(features["targets"]), {"training": train_op} 
Example #17
Source File: convnet_builder.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def _batch_norm_without_layers(self, input_layer, decay, use_scale, epsilon):
    """Batch normalization on `input_layer` without tf.layers."""
    # We make this function as similar as possible to the
    # tf.contrib.layers.batch_norm, to minimize the differences between using
    # layers and not using layers.
    shape = input_layer.shape
    num_channels = shape[3] if self.data_format == 'NHWC' else shape[1]
    beta = self.get_variable('beta', [num_channels], tf.float32, tf.float32,
                             initializer=tf.zeros_initializer())
    if use_scale:
      gamma = self.get_variable('gamma', [num_channels], tf.float32,
                                tf.float32, initializer=tf.ones_initializer())
    else:
      gamma = tf.constant(1.0, tf.float32, [num_channels])
    # For moving variables, we use tf.get_variable instead of self.get_variable,
    # since self.get_variable returns the result of tf.cast which we cannot
    # assign to.
    moving_mean = tf.get_variable('moving_mean', [num_channels],
                                  tf.float32,
                                  initializer=tf.zeros_initializer(),
                                  trainable=False)
    moving_variance = tf.get_variable('moving_variance', [num_channels],
                                      tf.float32,
                                      initializer=tf.ones_initializer(),
                                      trainable=False)
    if self.phase_train:
      bn, batch_mean, batch_variance = tf.nn.fused_batch_norm(
          input_layer, gamma, beta, epsilon=epsilon,
          data_format=self.data_format, is_training=True)
      mean_update = moving_averages.assign_moving_average(
          moving_mean, batch_mean, decay=decay, zero_debias=False)
      variance_update = moving_averages.assign_moving_average(
          moving_variance, batch_variance, decay=decay, zero_debias=False)
      tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update)
      tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update)
    else:
      bn, _, _ = tf.nn.fused_batch_norm(
          input_layer, gamma, beta, mean=moving_mean,
          variance=moving_variance, epsilon=epsilon,
          data_format=self.data_format, is_training=False)
    return bn 
Example #18
Source File: preprocessing.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def create_iterator(self, ds):
    ds_iterator = tf.data.make_initializable_iterator(ds)
    tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
                         ds_iterator.initializer)
    return ds_iterator 
Example #19
Source File: ae.py    From magenta with Apache License 2.0 4 votes vote down vote up
def train_op(batch, hparams, config_name):
  """Define a training op, including summaries and optimization.

  Args:
    batch: Dictionary produced by NSynthDataset.
    hparams: Hyperparameters dictionary.
    config_name: Name of config module.

  Returns:
    train_op: A complete iteration of training with summaries.
  """
  config = utils.get_module("baseline.models.ae_configs.%s" % config_name)

  if hparams.raw_audio:
    x = batch["audio"]
    # Add height and channel dims
    x = tf.expand_dims(tf.expand_dims(x, 1), -1)
  else:
    x = batch["spectrogram"]

  # Define the model
  with tf.name_scope("Model"):
    z = config.encode(x, hparams)
    xhat = config.decode(z, batch, hparams)

  # For interpolation
  tf.add_to_collection("x", x)
  tf.add_to_collection("pitch", batch["pitch"])
  tf.add_to_collection("z", z)
  tf.add_to_collection("xhat", xhat)

  # Compute losses
  total_loss = compute_mse_loss(x, xhat, hparams)

  # Apply optimizer
  with tf.name_scope("Optimizer"):
    global_step = tf.get_variable(
        "global_step", [],
        tf.int64,
        initializer=tf.constant_initializer(0),
        trainable=False)
    optimizer = tf.train.AdamOptimizer(hparams.learning_rate, hparams.adam_beta)
    train_step = slim.learning.create_train_op(total_loss,
                                               optimizer,
                                               global_step=global_step)

  return train_step