Python tensorflow.add_to_collection() Examples

The following are 30 code examples of tensorflow.add_to_collection(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: cifar10.py    From ml with Apache License 2.0 6 votes vote down vote up
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss') 
Example #2
Source File: cifar10_reusable.py    From blackbox-attacks with MIT License 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #3
Source File: cifar10_reusable.py    From blackbox-attacks with MIT License 6 votes vote down vote up
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss') 
Example #4
Source File: rat_spn.py    From supair with MIT License 6 votes vote down vote up
def variable_with_weight_decay(name, shape, stddev, wd, mean=0.0, values=None):
    if values is None:
        initializer = tf.truncated_normal_initializer(mean=mean, stddev=stddev, dtype=tf.float32)
    else:
        initializer = tf.constant_initializer(values)
    """Get a TF variable with optional l2-loss attached."""
    var = tf.get_variable(
        name,
        shape,
        initializer=initializer,
        dtype=tf.float32)
    if wd is not None:
        weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
        tf.add_to_collection('weight_losses', weight_decay)

    return var 
Example #5
Source File: utils.py    From rl_graph_generation with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def dense(x, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None):
    with tf.variable_scope(name, reuse=reuse):
        assert (len(tf.get_variable_scope().name.split('/')) == 2)

        w = tf.get_variable("w", [x.get_shape()[1], size], initializer=weight_init)
        b = tf.get_variable("b", [size], initializer=tf.constant_initializer(bias_init))
        weight_decay_fc = 3e-4

        if weight_loss_dict is not None:
            weight_decay = tf.multiply(tf.nn.l2_loss(w), weight_decay_fc, name='weight_decay_loss')
            if weight_loss_dict is not None:
                weight_loss_dict[w] = weight_decay_fc
                weight_loss_dict[b] = 0.0

            tf.add_to_collection(tf.get_variable_scope().name.split('/')[0] + '_' + 'losses', weight_decay)

        return tf.nn.bias_add(tf.matmul(x, w), b) 
Example #6
Source File: model.py    From rgn with MIT License 6 votes vote down vote up
def _drmsds(config, coordinates, targets, weights):
    """ Computes reduced weighted dRMSD loss (as specified by weights) 
        between predicted tertiary structures and targets. """

    # lose end residues if desired
    if config['num_edge_residues'] > 0:
        coordinates = coordinates[:-(config['num_edge_residues'] * NUM_DIHEDRALS)]

    # if only c_alpha atoms are requested then subsample
    if config['atoms'] == 'c_alpha': # starts at 1 because c_alpha atoms are the second atoms
        coordinates = coordinates[1::NUM_DIHEDRALS] # [NUM_STEPS - NUM_EDGE_RESIDUES, BATCH_SIZE, NUM_DIMENSIONS]
        targets     =     targets[1::NUM_DIHEDRALS] # [NUM_STEPS - NUM_EDGE_RESIDUES, BATCH_SIZE, NUM_DIMENSIONS]
                  
    # compute per structure dRMSDs
    drmsds = drmsd(coordinates, targets, weights, name='drmsds') # [BATCH_SIZE]

    # add to relevant collections for summaries, etc.
    if config['log_model_summaries']: tf.add_to_collection(config['name'] + '_drmsdss', drmsds)

    return drmsds 
Example #7
Source File: utils.py    From HardRLWithYoutube with MIT License 6 votes vote down vote up
def dense(x, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None):
    with tf.variable_scope(name, reuse=reuse):
        assert (len(tf.get_variable_scope().name.split('/')) == 2)

        w = tf.get_variable("w", [x.get_shape()[1], size], initializer=weight_init)
        b = tf.get_variable("b", [size], initializer=tf.constant_initializer(bias_init))
        weight_decay_fc = 3e-4

        if weight_loss_dict is not None:
            weight_decay = tf.multiply(tf.nn.l2_loss(w), weight_decay_fc, name='weight_decay_loss')
            if weight_loss_dict is not None:
                weight_loss_dict[w] = weight_decay_fc
                weight_loss_dict[b] = 0.0

            tf.add_to_collection(tf.get_variable_scope().name.split('/')[0] + '_' + 'losses', weight_decay)

        return tf.nn.bias_add(tf.matmul(x, w), b) 
Example #8
Source File: utils.py    From lirpg with MIT License 6 votes vote down vote up
def dense(x, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None):
    with tf.variable_scope(name, reuse=reuse):
        assert (len(tf.get_variable_scope().name.split('/')) == 2)

        w = tf.get_variable("w", [x.get_shape()[1], size], initializer=weight_init)
        b = tf.get_variable("b", [size], initializer=tf.constant_initializer(bias_init))
        weight_decay_fc = 3e-4

        if weight_loss_dict is not None:
            weight_decay = tf.multiply(tf.nn.l2_loss(w), weight_decay_fc, name='weight_decay_loss')
            if weight_loss_dict is not None:
                weight_loss_dict[w] = weight_decay_fc
                weight_loss_dict[b] = 0.0

            tf.add_to_collection(tf.get_variable_scope().name.split('/')[0] + '_' + 'losses', weight_decay)

        return tf.nn.bias_add(tf.matmul(x, w), b) 
Example #9
Source File: model.py    From rgn with MIT License 6 votes vote down vote up
def _alphabet(mode, config):
    """ Creates alphabet for alphabetized dihedral prediction. """

    # prepare initializer
    if config['alphabet'] is not None:
        alphabet_initializer = tf.constant_initializer(config['alphabet']) # user-defined alphabet
    else:
        alphabet_initializer = dict_to_init(config['alphabet_init'], config['alphabet_seed']) # random initialization

    # alphabet variable, possibly trainable
    alphabet = tf.get_variable(name='alphabet',
                               shape=[config['alphabet_size'], NUM_DIHEDRALS],
                               initializer=alphabet_initializer,
                               trainable=config['alphabet_trainable']) # [OUTPUT_SIZE, NUM_DIHEDRALS]
    if mode == 'training' and config['alphabet_trainable']: 
        tf.add_to_collection(tf.GraphKeys.WEIGHTS, alphabet) # add to WEIGHTS collection if trainable

    return alphabet 
Example #10
Source File: cifar10.py    From ml with Apache License 2.0 6 votes vote down vote up
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss') 
Example #11
Source File: variables.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def add_variable(var, restore=True):
  """Adds a variable to the MODEL_VARIABLES collection.

    Optionally it will add the variable to  the VARIABLES_TO_RESTORE collection.
  Args:
    var: a variable.
    restore: whether the variable should be added to the
      VARIABLES_TO_RESTORE collection.

  """
  collections = [MODEL_VARIABLES]
  if restore:
    collections.append(VARIABLES_TO_RESTORE)
  for collection in collections:
    if var not in tf.get_collection(collection):
      tf.add_to_collection(collection, var) 
Example #12
Source File: losses.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def l2_loss(tensor, weight=1.0, scope=None):
  """Define a L2Loss, useful for regularize, i.e. weight decay.

  Args:
    tensor: tensor to regularize.
    weight: an optional weight to modulate the loss.
    scope: Optional scope for name_scope.

  Returns:
    the L2 loss op.
  """
  with tf.name_scope(scope, 'L2Loss', [tensor]):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss 
Example #13
Source File: model_io.py    From BERT with Apache License 2.0 6 votes vote down vote up
def get_ema_hooks(self, train_op, var_list, params_moving_average_decay, scope, mode,
				**kargs):
		self.ema = model_io_utils.track_params_averages(
								params_moving_average_decay, 
								scope,
								**kargs)
		if mode == tf.estimator.ModeKeys.TRAIN:
			with tf.control_dependencies([train_op]):
				if not var_list:
					tvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
				else:
					tvars = var_list
				params_averages_op = self.ema.apply(tvars)
			return params_averages_op, None
			# tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, tf.group(params_averages_op))
		elif mode == tf.estimator.ModeKeys.EVAL or tf.estimator.ModeKeys.PREDICT:
			hooks = model_io_utils.RestoreParametersAverageValues(self.ema)
			return None, hooks
		else:
			return None, None 
Example #14
Source File: cifar10.py    From ml with Apache License 2.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #15
Source File: cifar10.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #16
Source File: cifar10.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss') 
Example #17
Source File: cifar10.py    From ml with Apache License 2.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #18
Source File: deep_cnn.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #19
Source File: repo_distillation_utils.py    From BERT with Apache License 2.0 6 votes vote down vote up
def MHGD_embedding(student_feature, teacher_feature):
	with tf.variable_scope('MHGD'):
		with tf.contrib.framework.arg_scope([tf.contrib.layers.fully_connected], trainable = True,
											weights_regularizer=None, variables_collections = [tf.GraphKeys.GLOBAL_VARIABLES,'MHA']):
			with tf.contrib.framework.arg_scope([tf.contrib.layers.batch_norm], activation_fn=None, trainable = True,
												param_regularizers = None, variables_collections=[tf.GraphKeys.GLOBAL_VARIABLES,'MHA']):
				V_T = teacher_feature
				V_S = student_feature
				B, D2 = student_feature.get_shape().as_list()
				G_T = Attention_head(V_T, V_T, D2, num_head, 'Attention', is_training = True)
				V_T_ = Estimator(V_T, G_T, D, num_head, 'Estimator')
				tf.add_to_collection('MHA_loss', tf.reduce_mean(1-tf.reduce_sum(V_T_*V_T, -1)) )
				
				G_T = Attention_head(V_T, V_T, D2, num_head, 'Attention', reuse = True)
				G_S = Attention_head(V_S, V_S, D2, num_head, 'Attention', reuse = True)

				mean = tf.reduce_mean(G_T, -1, keepdims=True)
				G_T = tf.tanh(G_T-mean)
				G_S = tf.tanh(G_S-mean)
		   
				GNN_losses = kld_loss(G_S, G_T)
		return GNN_losses 
Example #20
Source File: layers.py    From MobileNet with Apache License 2.0 6 votes vote down vote up
def __variable_with_weight_decay(kernel_shape, initializer, wd):
    """
    Create a variable with L2 Regularization (Weight Decay)
    :param kernel_shape: the size of the convolving weight kernel.
    :param initializer: The initialization scheme, He et al. normal or Xavier normal are recommended.
    :param wd:(weight decay) L2 regularization parameter.
    :return: The weights of the kernel initialized. The L2 loss is added to the loss collection.
    """
    w = tf.get_variable('weights', kernel_shape, tf.float32, initializer=initializer)

    collection_name = tf.GraphKeys.REGULARIZATION_LOSSES
    if wd and (not tf.get_variable_scope().reuse):
        weight_decay = tf.multiply(tf.nn.l2_loss(w), wd, name='w_loss')
        tf.add_to_collection(collection_name, weight_decay)
    return w


# Summaries for variables 
Example #21
Source File: sn.py    From HyperGAN with MIT License 5 votes vote down vote up
def spectral_normed_weight(W, u=None, num_iters=1, update_collection=None, with_sigma=False):
  # Usually num_iters = 1 will be enough
  W_shape = W.shape.as_list()
  W_reshaped = tf.reshape(W, [-1, W_shape[-1]])
  if u is None:
    u = tf.get_variable("u", [1, W_shape[-1]], initializer=tf.truncated_normal_initializer(), trainable=False)
  def power_iteration(i, u_i, v_i):
    v_ip1 = _l2normalize(tf.matmul(u_i, tf.transpose(W_reshaped)))
    u_ip1 = _l2normalize(tf.matmul(v_ip1, W_reshaped))
    return i + 1, u_ip1, v_ip1
  _, u_final, v_final = tf.while_loop(
    cond=lambda i, _1, _2: i < num_iters,
    body=power_iteration,
    loop_vars=(tf.constant(0, dtype=tf.int32),
               u, tf.zeros(dtype=tf.float32, shape=[1, W_reshaped.shape.as_list()[0]]))
  )
  if update_collection is None:
    warnings.warn('Setting update_collection to None will make u being updated every W execution. This maybe undesirable'
                  '. Please consider using a update collection instead.')
    sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[0, 0]
    # sigma = tf.reduce_sum(tf.matmul(u_final, tf.transpose(W_reshaped)) * v_final)
    W_bar = W_reshaped / sigma
    with tf.control_dependencies([u.assign(u_final)]):
      W_bar = tf.reshape(W_bar, W_shape)
  else:
    sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[0, 0]
    # sigma = tf.reduce_sum(tf.matmul(u_final, tf.transpose(W_reshaped)) * v_final)
    W_bar = W_reshaped / sigma
    W_bar = tf.reshape(W_bar, W_shape)
    # Put NO_OPS to not update any collection. This is useful for the second call of discriminator if the update_op
    # has already been collected on the first call.
    if update_collection != NO_OPS:
      tf.add_to_collection(update_collection, u.assign(u_final))
  if with_sigma:
    return W_bar, sigma
  else:
    return W_bar 
Example #22
Source File: model.py    From cloudml-samples with Apache License 2.0 5 votes vote down vote up
def build_prediction_graph(self):
    """Builds prediction graph and registers appropriate endpoints."""
    examples = tf.placeholder(tf.string, shape=(None,))
    features = {
        'image': tf.FixedLenFeature(
            shape=[IMAGE_PIXELS], dtype=tf.float32),
        'key': tf.FixedLenFeature(
            shape=[], dtype=tf.string),
    }

    parsed = tf.parse_example(examples, features)
    images = parsed['image']
    keys = parsed['key']

    # Build a Graph that computes predictions from the inference model.
    logits = inference(images, self.hidden1, self.hidden2)
    softmax = tf.nn.softmax(logits)
    prediction = tf.argmax(softmax, 1)

    # Mark the inputs and the outputs
    # Marking the input tensor with an alias with suffix _bytes. This is to
    # indicate that this tensor value is raw bytes and will be base64 encoded
    # over HTTP.
    # Note that any output tensor marked with an alias with suffix _bytes, shall
    # be base64 encoded in the HTTP response. To get the binary value, it
    # should be base64 decoded.
    tf.add_to_collection('inputs',
                         json.dumps({'examples_bytes': examples.name}))
    tf.add_to_collection('outputs',
                         json.dumps({
                             'key': keys.name,
                             'prediction': prediction.name,
                             'scores': softmax.name
                         })) 
Example #23
Source File: rat_spn.py    From supair with MIT License 5 votes vote down vote up
def __init__(self, prod_vectors, num_sums, args, dropout_op=None, name="", given_weights=None):
        super().__init__(name)
        self.inputs = prod_vectors
        self.size = num_sums

        self.scope = self.inputs[0].scope

        for inp in self.inputs:
            assert set(inp.scope) == set(self.scope)

        self.dropout_op = dropout_op
        self.args = args
        num_inputs = sum([v.size for v in prod_vectors])
        self.params = variable_with_weight_decay(name + '_weights',
                                                 shape=[1, num_inputs, num_sums],
                                                 stddev=5e-1,
                                                 wd=None,
                                                 values=given_weights)
        if args.linear_sum_weights:
            if args.normalized_sums:
                self.weights = tf.nn.softmax(self.params, 1)
            else:
                self.weights = self.params ** 2
        else:
            if args.normalized_sums:
                self.weights = tf.nn.log_softmax(self.params, 1)
                if args.sum_weight_l2:
                    exp_weights = tf.exp(self.weights)
                    weight_decay = tf.multiply(tf.nn.l2_loss(exp_weights), args.sum_weight_l2)
                    tf.add_to_collection('losses', weight_decay)
                    tf.add_to_collection('weight_losses', weight_decay)
            else:
                self.weights = self.params

        self.max_child_idx = None 
Example #24
Source File: layers.py    From variance-networks with Apache License 2.0 5 votes vote down vote up
def pt_conv_2d(input_tensor, filter_shape, input_channels, output_channels, padding, name, stochastic=True,
               with_bias=True, reuse=False):
    with tf.variable_scope(name) as scope:
        kernel = tf.get_variable('kernel', [filter_shape[0], filter_shape[1], input_channels, output_channels],
                                 initializer=tf.contrib.layers.xavier_initializer(seed=322), dtype=tf.float32,
                                 trainable=True)
        log_alpha = tf.get_variable('log_alpha', [], initializer=tf.constant_initializer(-10.0), dtype=tf.float32,
                                    trainable=True)
        log_alpha = tf.clip_by_value(log_alpha, -20.0, 20.0)

        if not reuse:
            # computing reg
            k1, k2, k3 = 0.63576, 1.8732, 1.48695
            C = -k1
            mdkl = k1 * tf.nn.sigmoid(k2 + k3 * log_alpha) - 0.5 * tf.log1p(tf.exp(-log_alpha)) + C
            kl = -tf.reduce_sum(mdkl) * tf.reduce_prod(tf.cast(kernel.get_shape(), tf.float32))
            tf.add_to_collection('kl_loss', kl)

        # computing output
        conved_mu = tf.nn.conv2d(input_tensor, kernel, [1, 1, 1, 1], padding=padding)
        conved_si = tf.sqrt(tf.nn.conv2d(input_tensor * input_tensor,
                                         tf.exp(log_alpha) * kernel * kernel,
                                         [1, 1, 1, 1], padding=padding)+1e-16)
        output = conved_mu
        if stochastic:
            output += tf.random_normal(conved_mu.shape, mean=0, stddev=1) * conved_si
        if with_bias:
            biases = tf.get_variable('biases', output_channels, tf.float32, tf.constant_initializer(0.0))
            output = tf.nn.bias_add(output, biases)

        # summaries
        if not reuse:
            if with_bias:
                error = 0.5*(1.0+tf.erf((-conved_mu-biases)/tf.sqrt(2.0)/conved_si))
            else:
                error = 0.5*(1.0+tf.erf((-conved_mu)/tf.sqrt(2.0)/conved_si))
            tf.summary.scalar('error', tf.reduce_sum(error))
            tf.summary.scalar('log_alpha', log_alpha)
            tf.add_to_collection('log_alpha', log_alpha)

    return output 
Example #25
Source File: rnn.py    From glas with Apache License 2.0 5 votes vote down vote up
def collect_named_outputs(self, tensor, outputs_collections=None):
        """ Wrapper for collect_named_outputs """
        for outputs_collection in outputs_collections or []:
            tf.add_to_collection(outputs_collection, tensor)

        return tensor 
Example #26
Source File: reader.py    From glas with Apache License 2.0 5 votes vote down vote up
def initialize_latent_weights(config, dataset):
    """ Initialize the latent loss weights """
    latent_weights = tf.minimum(1.0 - exponential_decay(
        config.batch_size, config.latent_weights_growth_step,
        1.0 - config.latent_weights, 1.0 - config.latent_weights_growth_rate,
        dataset, staircase=False), config.latent_weights_maximum, name='latent_weights')
    tf.add_to_collection(graph_utils.GraphKeys.TRAINING_PARAMETERS, latent_weights)

    return latent_weights 
Example #27
Source File: sample.py    From glas with Apache License 2.0 5 votes vote down vote up
def attend(self, tensor):
        """ Use attention over the latent space """
        if self.attention is not None and not isinstance(self.attention, attentions.NoAttention):
            focus = self.attention.read(self.latent_space, tensor)
            tf.add_to_collection(graph_utils.GraphKeys.RNN_OUTPUTS, focus)

            return focus

        return tensor 
Example #28
Source File: sn.py    From generative_adversary with GNU General Public License v3.0 5 votes vote down vote up
def spectral_normed_weight(W, u=None, num_iters=1, update_collection=None, with_sigma=False):
    # Usually num_iters = 1 will be enough
    W_shape = W.shape.as_list()
    W_reshaped = tf.reshape(W, [-1, W_shape[-1]])
    if u is None:
        u = tf.get_variable("u", [1, W_shape[-1]], initializer=tf.truncated_normal_initializer(), trainable=False)

    def power_iteration(i, u_i, v_i):
        v_ip1 = _l2normalize(tf.matmul(u_i, tf.transpose(W_reshaped)))
        u_ip1 = _l2normalize(tf.matmul(v_ip1, W_reshaped))
        return i + 1, u_ip1, v_ip1

    _, u_final, v_final = tf.while_loop(
        cond=lambda i, _1, _2: i < num_iters,
        body=power_iteration,
        loop_vars=(tf.constant(0, dtype=tf.int32),
                   u, tf.zeros(dtype=tf.float32, shape=[1, W_reshaped.shape.as_list()[0]]))
    )
    sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[0, 0]
    # sigma = tf.reduce_sum(tf.matmul(u_final, tf.transpose(W_reshaped)) * v_final)
    W_bar = W_reshaped / sigma
    W_bar = tf.reshape(W_bar, W_shape)
    # Put NO_OPS to not update any collection. This is useful for the second call of discriminator if the update_op
    # has already been collected on the first call.
    if update_collection != NO_OPS:
        tf.add_to_collection(update_collection, u.assign(u_final))

    if with_sigma:
        return W_bar, sigma
    else:
        return W_bar 
Example #29
Source File: tfops.py    From glow with MIT License 5 votes vote down vote up
def add_edge_padding(x, filter_size):
    assert filter_size[0] % 2 == 1
    if filter_size[0] == 1 and filter_size[1] == 1:
        return x
    a = (filter_size[0] - 1) // 2  # vertical padding size
    b = (filter_size[1] - 1) // 2  # horizontal padding size
    if True:
        x = tf.pad(x, [[0, 0], [a, a], [b, b], [0, 0]])
        name = "_".join([str(dim) for dim in [a, b, *int_shape(x)[1:3]]])
        pads = tf.get_collection(name)
        if not pads:
            if hvd.rank() == 0:
                print("Creating pad", name)
            pad = np.zeros([1] + int_shape(x)[1:3] + [1], dtype='float32')
            pad[:, :a, :, 0] = 1.
            pad[:, -a:, :, 0] = 1.
            pad[:, :, :b, 0] = 1.
            pad[:, :, -b:, 0] = 1.
            pad = tf.convert_to_tensor(pad)
            tf.add_to_collection(name, pad)
        else:
            pad = pads[0]
        pad = tf.tile(pad, [tf.shape(x)[0], 1, 1, 1])
        x = tf.concat([x, pad], axis=3)
    else:
        pad = tf.pad(tf.zeros_like(x[:, :, :, :1]) - 1,
                     [[0, 0], [a, a], [b, b], [0, 0]]) + 1
        x = tf.pad(x, [[0, 0], [a, a], [b, b], [0, 0]])
        x = tf.concat([x, pad], axis=3)
    return x 
Example #30
Source File: model.py    From glow with MIT License 5 votes vote down vote up
def checkpoint(z, logdet):
    zshape = Z.int_shape(z)
    z = tf.reshape(z, [-1, zshape[1]*zshape[2]*zshape[3]])
    logdet = tf.reshape(logdet, [-1, 1])
    combined = tf.concat([z, logdet], axis=1)
    tf.add_to_collection('checkpoints', combined)
    logdet = combined[:, -1]
    z = tf.reshape(combined[:, :-1], [-1, zshape[1], zshape[2], zshape[3]])
    return z, logdet