Python tensorflow.python.training.moving_averages.assign_moving_average() Examples

The following are 30 code examples of tensorflow.python.training.moving_averages.assign_moving_average(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.training.moving_averages , or try the search function .
Example #1
Source File: tensorflow_backend.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def moving_average_update(x, value, momentum):
    """Compute the moving average of a variable.

    # Arguments
        x: A `Variable`.
        value: A tensor with the same shape as `x`.
        momentum: The moving average momentum.

    # Returns
        An operation to update the variable.
    """
    return moving_averages.assign_moving_average(
        x, value, momentum, zero_debias=True)


# LINEAR ALGEBRA 
Example #2
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def moving_average_update(x, value, momentum):
    """Compute the moving average of a variable.

    # Arguments
        x: A `Variable`.
        value: A tensor with the same shape as `x`.
        momentum: The moving average momentum.

    # Returns
        An operation to update the variable.
    """
    return moving_averages.assign_moving_average(
        x, value, momentum, zero_debias=True)


# LINEAR ALGEBRA 
Example #3
Source File: _old_batch_norm.py    From ADL with MIT License 6 votes vote down vote up
def update_bn_ema(xn, batch_mean, batch_var,
                  moving_mean, moving_var, decay, internal_update):
    update_op1 = moving_averages.assign_moving_average(
        moving_mean, batch_mean, decay, zero_debias=False,
        name='mean_ema_op')
    update_op2 = moving_averages.assign_moving_average(
        moving_var, batch_var, decay, zero_debias=False,
        name='var_ema_op')

    if internal_update:
        with tf.control_dependencies([update_op1, update_op2]):
            return tf.identity(xn, name='output')
    else:
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op1)
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op2)
        return tf.identity(xn, name='output') 
Example #4
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def moving_average_update(x, value, momentum):
    """Compute the moving average of a variable.

    # Arguments
        x: A `Variable`.
        value: A tensor with the same shape as `x`.
        momentum: The moving average momentum.

    # Returns
        An operation to update the variable.
    """
    return moving_averages.assign_moving_average(
        x, value, momentum, zero_debias=True)


# LINEAR ALGEBRA 
Example #5
Source File: ssd300.py    From SSD_for_Tensorflow with Apache License 2.0 6 votes vote down vote up
def batch_normalization(self, input, name):
        with tf.variable_scope(name):
            bn_input_shape = input.get_shape() 
            moving_mean = tf.get_variable(name+'_mean', bn_input_shape[-1:] , initializer=tf.zeros_initializer, trainable=False)
            moving_variance = tf.get_variable(name+'_variance', bn_input_shape[-1:] , initializer=tf.ones_initializer, trainable=False)
            def mean_var_with_update():
                mean, variance = tf.nn.moments(input, list(range(len(bn_input_shape) - 1)), name=name+'_moments')
                with tf.control_dependencies([assign_moving_average(moving_mean, mean, self.conv_bn_decay),assign_moving_average(moving_variance, variance, self.conv_bn_decay)]):
                    return tf.identity(mean), tf.identity(variance)
            #mean, variance = tf.cond(tf.cast(self.isTraining, tf.bool), mean_var_with_update, lambda: (moving_mean, moving_variance))
            mean, variance = tf.cond(tf.cast(True, tf.bool), mean_var_with_update, lambda: (moving_mean, moving_variance))
            beta = tf.get_variable(name+'_beta', bn_input_shape[-1:] , initializer=tf.zeros_initializer)
            gamma = tf.get_variable(name+'_gamma', bn_input_shape[-1:] , initializer=tf.ones_initializer)
            return tf.nn.batch_normalization(input, mean, variance, beta, gamma, self.conv_bn_epsilon, name+'_bn_opt')
    
    # smooth_L1 算法 
Example #6
Source File: tensorflow_backend.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def moving_average_update(x, value, momentum):
    """Compute the moving average of a variable.

    # Arguments
        x: A `Variable`.
        value: A tensor with the same shape as `x`.
        momentum: The moving average momentum.

    # Returns
        An operation to update the variable.
    """
    return moving_averages.assign_moving_average(
        x, value, momentum, zero_debias=False)


# LINEAR ALGEBRA 
Example #7
Source File: layers.py    From Conditional_Density_Estimation with MIT License 6 votes vote down vote up
def get_output_for(self, input, phase='train', **kwargs):
        if phase == 'train':
            # Calculate the moments based on the individual batch.
            mean, variance = tf.nn.moments(input, self.axis, shift=self.moving_mean)
            # Update the moving_mean and moving_variance moments.
            update_moving_mean = moving_averages.assign_moving_average(
                self.moving_mean, mean, self.decay)
            update_moving_variance = moving_averages.assign_moving_average(
                self.moving_variance, variance, self.decay)
            # Make sure the updates are computed here.
            with tf.control_dependencies([update_moving_mean,
                                          update_moving_variance]):
                output = tf.nn.batch_normalization(
                    input, mean, variance, self.beta, self.gamma, self.epsilon)
        else:
            output = tf.nn.batch_normalization(
                input, self.moving_mean, self.moving_variance, self.beta, self.gamma, self.epsilon)
        output.set_shape(self.input_shape)
        return output 
Example #8
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def moving_average_update(x, value, momentum):
    """Compute the moving average of a variable.

    # Arguments
        x: A `Variable`.
        value: A tensor with the same shape as `x`.
        momentum: The moving average momentum.

    # Returns
        An operation to update the variable.
    """
    return moving_averages.assign_moving_average(
        x, value, momentum, zero_debias=True)


# LINEAR ALGEBRA 
Example #9
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def moving_average_update(x, value, momentum):
    """Compute the moving average of a variable.

    # Arguments
        x: A `Variable`.
        value: A tensor with the same shape as `x`.
        momentum: The moving average momentum.

    # Returns
        An operation to update the variable.
    """
    return moving_averages.assign_moving_average(
        x, value, momentum, zero_debias=True)


# LINEAR ALGEBRA 
Example #10
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def moving_average_update(x, value, momentum):
    """Compute the moving average of a variable.

    # Arguments
        x: A `Variable`.
        value: A tensor with the same shape as `x`.
        momentum: The moving average momentum.

    # Returns
        An operation to update the variable.
    """
    return moving_averages.assign_moving_average(
        x, value, momentum, zero_debias=True)


# LINEAR ALGEBRA 
Example #11
Source File: _old_batch_norm.py    From petridishnn with MIT License 6 votes vote down vote up
def update_bn_ema(xn, batch_mean, batch_var,
                  moving_mean, moving_var, decay, internal_update):
    update_op1 = moving_averages.assign_moving_average(
        moving_mean, batch_mean, decay, zero_debias=False,
        name='mean_ema_op')
    update_op2 = moving_averages.assign_moving_average(
        moving_var, batch_var, decay, zero_debias=False,
        name='var_ema_op')

    if internal_update:
        with tf.control_dependencies([update_op1, update_op2]):
            return tf.identity(xn, name='output')
    else:
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op1)
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op2)
        return tf.identity(xn, name='output') 
Example #12
Source File: L_Resnet_E_IR.py    From MobileFaceNet_Tensorflow with Apache License 2.0 6 votes vote down vote up
def batch_normalization(input, trainable, name, **kwargs):
    input_shape = input.get_shape()
    shape = input_shape.as_list()[-1::]
    axis = list(range(len(input_shape) - 1))
    moving_mean = tf.get_variable(shape=shape, initializer=tf.zeros_initializer, trainable=trainable, name=name + "_mean")
    moving_variance = tf.get_variable(shape=shape, initializer=tf.ones_initializer, trainable=trainable, name=name + "_var")
    offset = tf.get_variable(shape=shape, initializer=tf.zeros_initializer, trainable=trainable, name=name + "_bias")
    scale = tf.get_variable(shape=shape, initializer=tf.ones_initializer, trainable=trainable, name=name + "_scale") if name != 'fc1' else None

    mean, variance = tf.nn.moments(input, axis)
    update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAY)
    update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, BN_DECAY)
    tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
    tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
    is_training = tf.convert_to_tensor(trainable, dtype='bool', name='is_training')
    mean, variance = control_flow_ops.cond(is_training,
        lambda: (mean, variance),
        lambda: (moving_mean, moving_variance))

    return tf.nn.batch_normalization(input, mean, variance, offset, scale, name=name, **kwargs) 
Example #13
Source File: nn_ops.py    From time-series-machine-learning with Apache License 2.0 6 votes vote down vote up
def batch_normalization(incoming, is_training, beta=0.0, gamma=1.0, epsilon=1e-5, decay=0.9):
  shape = incoming.get_shape()
  dimensions_num = len(shape)
  axis = list(range(dimensions_num - 1))

  with tf.variable_scope('batchnorm'):
    beta = tf.Variable(initial_value=tf.ones(shape=[shape[-1]]) * beta, name='beta')
    gamma = tf.Variable(initial_value=tf.ones(shape=[shape[-1]]) * gamma, name='gamma')

    moving_mean = tf.Variable(initial_value=tf.zeros(shape=shape[-1:]), trainable=False, name='moving_mean')
    moving_variance = tf.Variable(initial_value=tf.zeros(shape=shape[-1:]), trainable=False, name='moving_variance')

  def update_mean_var():
    mean, variance = tf.nn.moments(incoming, axis)
    update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay)
    update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, decay)
    with tf.control_dependencies([update_moving_mean, update_moving_variance]):
      return tf.identity(mean), tf.identity(variance)

  mean, var = tf.cond(is_training, update_mean_var, lambda: (moving_mean, moving_variance))
  inference = tf.nn.batch_normalization(incoming, mean, var, beta, gamma, epsilon)
  inference.set_shape(shape)
  return inference 
Example #14
Source File: Layer.py    From MOTSFusion with MIT License 6 votes vote down vote up
def create_and_apply_batch_norm(self, inp, n_features, decay, tower_setup, scope_name="bn"):
    beta, gamma, moving_mean, moving_var = create_batch_norm_vars(n_features, tower_setup, scope_name)
    self.n_params += 2 * n_features
    if tower_setup.is_main_train_tower:
      assert tower_setup.is_training
    if tower_setup.is_training and not tower_setup.freeze_batchnorm:
      xn, batch_mean, batch_var = tf.nn.fused_batch_norm(inp, gamma, beta, epsilon=Layer.BATCH_NORM_EPSILON,
                                                         is_training=True)
      if tower_setup.is_main_train_tower:
        update_op1 = moving_averages.assign_moving_average(
          moving_mean, batch_mean, decay, zero_debias=False, name='mean_ema_op')
        update_op2 = moving_averages.assign_moving_average(
          moving_var, batch_var, decay, zero_debias=False, name='var_ema_op')
        self.update_ops.append(update_op1)
        self.update_ops.append(update_op2)
      return xn
    else:
      xn = tf.nn.batch_normalization(inp, moving_mean, moving_var, beta, gamma, Layer.BATCH_NORM_EPSILON)
      return xn 
Example #15
Source File: optimizers.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
  """Find max_norm given norm and previous average."""
  with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
    log_norm = math_ops.log(norm + epsilon)

    def moving_average(name, value, decay):
      moving_average_variable = vs.get_variable(
          name, shape=value.get_shape(), dtype=value.dtype,
          initializer=init_ops.zeros_initializer, trainable=False)
      return moving_averages.assign_moving_average(
          moving_average_variable, value, decay, zero_debias=False)

    # quicker adaptation at the beginning
    if global_step is not None:
      n = math_ops.to_float(global_step)
      decay = math_ops.minimum(decay, n / (n + 1.))

    # update averages
    mean = moving_average("mean", log_norm, decay)
    sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

    variance = sq_mean - math_ops.square(mean)
    std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
    max_norms = math_ops.exp(mean + std_factor*std)
    return max_norms, mean 
Example #16
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def moving_average_update(x, value, momentum):
    """Compute the moving average of a variable.

    # Arguments
        x: A `Variable`.
        value: A tensor with the same shape as `x`.
        momentum: The moving average momentum.

    # Returns
        An operation to update the variable.
    """
    return moving_averages.assign_moving_average(
        x, value, momentum, zero_debias=True)


# LINEAR ALGEBRA 
Example #17
Source File: tensorflow_backend.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def moving_average_update(x, value, momentum):
    """Compute the moving average of a variable.

    # Arguments
        x: A `Variable`.
        value: A tensor with the same shape as `x`.
        momentum: The moving average momentum.

    # Returns
        An operation to update the variable.
    """
    return moving_averages.assign_moving_average(
        x, value, momentum, zero_debias=True)


# LINEAR ALGEBRA 
Example #18
Source File: image_ops.py    From NAS-Benchmark with GNU General Public License v3.0 5 votes vote down vote up
def batch_norm(x, is_training, name="bn", decay=0.9, epsilon=1e-5,
               data_format="NHWC"):
  if data_format == "NHWC":
    shape = [x.get_shape()[3]]
  elif data_format == "NCHW":
    shape = [x.get_shape()[1]]
  else:
    raise NotImplementedError("Unknown data_format {}".format(data_format))

  with tf.variable_scope(name, reuse=None if is_training else True):
    offset = tf.get_variable(
      "offset", shape,
      initializer=tf.constant_initializer(0.0, dtype=tf.float32))
    scale = tf.get_variable(
      "scale", shape,
      initializer=tf.constant_initializer(1.0, dtype=tf.float32))
    moving_mean = tf.get_variable(
      "moving_mean", shape, trainable=False,
      initializer=tf.constant_initializer(0.0, dtype=tf.float32))
    moving_variance = tf.get_variable(
      "moving_variance", shape, trainable=False,
      initializer=tf.constant_initializer(1.0, dtype=tf.float32))

    if is_training:
      x, mean, variance = tf.nn.fused_batch_norm(
        x, scale, offset, epsilon=epsilon, data_format=data_format,
        is_training=True)
      update_mean = moving_averages.assign_moving_average(
        moving_mean, mean, decay)
      update_variance = moving_averages.assign_moving_average(
        moving_variance, variance, decay)
      with tf.control_dependencies([update_mean, update_variance]):
        x = tf.identity(x)
    else:
      x, _, _ = tf.nn.fused_batch_norm(x, scale, offset, mean=moving_mean,
                                       variance=moving_variance,
                                       epsilon=epsilon, data_format=data_format,
                                       is_training=False)
  return x 
Example #19
Source File: transformer_nat.py    From fine-lm with MIT License 5 votes vote down vote up
def vq_discrete_bottleneck(x, hparams):
  """Simple vector quantized discrete bottleneck."""
  tf.logging.info("Using EMA with beta = {}".format(hparams.beta))
  bottleneck_size = 2**hparams.bottleneck_bits
  x_shape = common_layers.shape_list(x)
  x = tf.reshape(x, [-1, hparams.hidden_size])
  x_means_hot, e_loss = vq_nearest_neighbor(
      x, hparams)
  means, ema_means, ema_count = (hparams.means, hparams.ema_means,
                                 hparams.ema_count)

  # Update the ema variables
  updated_ema_count = moving_averages.assign_moving_average(
      ema_count,
      tf.reduce_sum(x_means_hot, axis=0),
      hparams.decay,
      zero_debias=False)

  dw = tf.matmul(x_means_hot, x, transpose_a=True)
  updated_ema_means = moving_averages.assign_moving_average(
      ema_means, dw, hparams.decay, zero_debias=False)
  n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True)
  updated_ema_count = (
      (updated_ema_count + hparams.epsilon) /
      (n + bottleneck_size * hparams.epsilon) * n)
  # pylint: disable=g-no-augmented-assignment
  updated_ema_means = updated_ema_means / tf.expand_dims(
      updated_ema_count, axis=-1)
  # pylint: enable=g-no-augmented-assignment
  with tf.control_dependencies([e_loss]):
    update_means = tf.assign(means, updated_ema_means)
    with tf.control_dependencies([update_means]):
      loss = hparams.beta * e_loss

  discrete = tf.reshape(x_means_hot, x_shape[:-1] + [bottleneck_size])
  return discrete, loss 
Example #20
Source File: network.py    From GC-Net with GNU General Public License v3.0 5 votes vote down vote up
def bn(x, c):
  x_shape = x.get_shape()
  params_shape = x_shape[-1:]

  axis = list(range(len(x_shape) - 1))

  beta = _get_variable('beta',
                       params_shape,
                       initializer=tf.zeros_initializer())
                       #tf.constant_initializer(0.00, dtype='float')
  gamma = _get_variable('gamma',
                        params_shape,
                        initializer=tf.ones_initializer())

  moving_mean = _get_variable('moving_mean',
                              params_shape,
                              initializer=tf.zeros_initializer(),
                              trainable=False)
  moving_variance = _get_variable('moving_variance',
                                  params_shape,
                                  initializer=tf.ones_initializer(),
                                  trainable=False)

  # These ops will only be performed when training.
  mean, variance = tf.nn.moments(x, axis)
  update_moving_mean = moving_averages.assign_moving_average(moving_mean,
                                                             mean, BN_DECAY)
  update_moving_variance = moving_averages.assign_moving_average(
                                        moving_variance, variance, BN_DECAY)
  tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
  tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)

  mean, variance = control_flow_ops.cond(
    c['is_training'], lambda: (mean, variance),
    lambda: (moving_mean, moving_variance))

  x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)

  return x

# wrapper for get_variable op 
Example #21
Source File: batch_norm.py    From ADL with MIT License 5 votes vote down vote up
def internal_update_bn_ema(xn, batch_mean, batch_var,
                           moving_mean, moving_var, decay):
    update_op1 = moving_averages.assign_moving_average(
        moving_mean, batch_mean, decay, zero_debias=False,
        name='mean_ema_op')
    update_op2 = moving_averages.assign_moving_average(
        moving_var, batch_var, decay, zero_debias=False,
        name='var_ema_op')

    # When sync_statistics is True, always enable internal_update.
    # Otherwise the update ops (only executed on main tower)
    # will hang when some BatchNorm layers are unused (https://github.com/tensorpack/tensorpack/issues/1078)
    with tf.control_dependencies([update_op1, update_op2]):
        return tf.identity(xn, name='output') 
Example #22
Source File: batch_norm.py    From petridishnn with MIT License 5 votes vote down vote up
def update_bn_ema(xn, batch_mean, batch_var,
                  moving_mean, moving_var, decay):
    update_op1 = moving_averages.assign_moving_average(
        moving_mean, batch_mean, decay, zero_debias=False,
        name='mean_ema_op')
    update_op2 = moving_averages.assign_moving_average(
        moving_var, batch_var, decay, zero_debias=False,
        name='var_ema_op')

    # When sync_statistics is True, always enable internal_update.
    # Otherwise the update ops (only executed on main tower)
    # will hang when some BatchNorm layers are unused (https://github.com/tensorpack/tensorpack/issues/1078)
    with tf.control_dependencies([update_op1, update_op2]):
        return tf.identity(xn, name='output') 
Example #23
Source File: convnet_builder.py    From tf-imagenet with Apache License 2.0 5 votes vote down vote up
def _batch_norm_without_layers(self, input_layer, decay, use_scale, epsilon):
    """Batch normalization on `input_layer` without tf.layers."""
    # We make this function as similar as possible to the
    # tf.contrib.layers.batch_norm, to minimize the differences between using
    # layers and not using layers.
    shape = input_layer.shape
    num_channels = shape[3] if self.data_format == 'NHWC' else shape[1]
    beta = self.get_variable('beta', [num_channels], tf.float32, tf.float32,
                             initializer=tf.zeros_initializer())
    if use_scale:
      gamma = self.get_variable('gamma', [num_channels], tf.float32,
                                tf.float32, initializer=tf.ones_initializer())
    else:
      gamma = tf.constant(1.0, tf.float32, [num_channels])
    # For moving variables, we use tf.get_variable instead of self.get_variable,
    # since self.get_variable returns the result of tf.cast which we cannot
    # assign to.
    moving_mean = tf.get_variable('moving_mean', [num_channels],
                                  tf.float32,
                                  initializer=tf.zeros_initializer(),
                                  trainable=False)
    moving_variance = tf.get_variable('moving_variance', [num_channels],
                                      tf.float32,
                                      initializer=tf.ones_initializer(),
                                      trainable=False)
    if self.phase_train:
      bn, batch_mean, batch_variance = tf.nn.fused_batch_norm(
          input_layer, gamma, beta, epsilon=epsilon,
          data_format=self.data_format, is_training=True)
      mean_update = moving_averages.assign_moving_average(
          moving_mean, batch_mean, decay=decay, zero_debias=False)
      variance_update = moving_averages.assign_moving_average(
          moving_variance, batch_variance, decay=decay, zero_debias=False)
      tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update)
      tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update)
    else:
      bn, _, _ = tf.nn.fused_batch_norm(
          input_layer, gamma, beta, mean=moving_mean,
          variance=moving_variance, epsilon=epsilon,
          data_format=self.data_format, is_training=False)
    return bn 
Example #24
Source File: utils.py    From PFNL with MIT License 5 votes vote down vote up
def BatchNorm(input, is_train, decay=0.999, name='BatchNorm'):
    '''
    https://github.com/zsdonghao/tensorlayer/blob/master/tensorlayer/layers.py
    https://github.com/ry/tensorflow-resnet/blob/master/resnet.py
    http://stackoverflow.com/questions/38312668/how-does-one-do-inference-with-batch-normalization-with-tensor-flow
    '''
    from tensorflow.python.training import moving_averages
    from tensorflow.python.ops import control_flow_ops
    
    axis = list(range(len(input.get_shape()) - 1))
    fdim = input.get_shape()[-1:]
    
    with tf.variable_scope(name):
        beta = tf.get_variable('beta', fdim, initializer=tf.constant_initializer(value=0.0))
        gamma = tf.get_variable('gamma', fdim, initializer=tf.constant_initializer(value=1.0))
        moving_mean = tf.get_variable('moving_mean', fdim, initializer=tf.constant_initializer(value=0.0), trainable=False)
        moving_variance = tf.get_variable('moving_variance', fdim, initializer=tf.constant_initializer(value=0.0), trainable=False)
  
        def mean_var_with_update():
            batch_mean, batch_variance = tf.nn.moments(input, axis)
            update_moving_mean = moving_averages.assign_moving_average(moving_mean, batch_mean, decay, zero_debias=True)
            update_moving_variance = moving_averages.assign_moving_average(moving_variance, batch_variance, decay, zero_debias=True)
            with tf.control_dependencies([update_moving_mean, update_moving_variance]):
                return tf.identity(batch_mean), tf.identity(batch_variance)

        mean, variance = control_flow_ops.cond(is_train, mean_var_with_update, lambda: (moving_mean, moving_variance))

    return tf.nn.batch_normalization(input, mean, variance, beta, gamma, 1e-3) #, tf.stack([mean[0], variance[0], beta[0], gamma[0]]) 
Example #25
Source File: TfEnas.py    From rafiki with Apache License 2.0 5 votes vote down vote up
def _add_batch_norm(self, X, in_ch, decay=0.9, epsilon=1e-5, offset=None, scale=None, is_train=False, 
                        no_moving_average=False):
        with tf.variable_scope('batch_norm'):
            if offset is None:
                offset = self._make_var('offset', (in_ch,), init_constant=0)
            if scale is None:
                scale = self._make_var('scale', (in_ch,), init_constant=1)

            if not no_moving_average:
                moving_mean = self._make_var('moving_mean', (in_ch,), trainable=False, init_constant=0)
                moving_variance = self._make_var('moving_variance', (in_ch,), trainable=False, init_constant=1)

                if is_train:
                    # For training, do batch norm with batch mean & variance
                    # Update moving averages if training
                    (X, mean, variance) = tf.nn.fused_batch_norm(X, scale, offset, epsilon=epsilon, is_training=True)
                    update_mean = moving_averages.assign_moving_average(moving_mean, mean, decay)
                    update_variance = moving_averages.assign_moving_average(moving_variance, variance, decay)
                    with tf.control_dependencies([update_mean, update_variance]):
                        X = tf.identity(X)
                else:
                    # For prediction, do batch norm with computed moving mean & variance from training
                    # Don't update moving averages if predicting
                    (X, _, _) =  tf.nn.fused_batch_norm(X, scale, offset, mean=moving_mean, variance=moving_variance,
                                                            epsilon=epsilon, is_training=False)
            else:
                (X, _, _) =  tf.nn.fused_batch_norm(X, scale, offset, epsilon=epsilon, is_training=True)

            return X 
Example #26
Source File: custom_ops.py    From how_to_convert_text_to_images with MIT License 5 votes vote down vote up
def __call__(self, input_layer, epsilon=1e-5, decay=0.9, name="batch_norm",
                 in_dim=None, phase=Phase.train):
        shape = input_layer.shape
        shp = in_dim or shape[-1]
        with tf.variable_scope(name) as scope:
            self.mean = self.variable('mean', [shp], init=tf.constant_initializer(0.), train=False)
            self.variance = self.variable('variance', [shp], init=tf.constant_initializer(1.0), train=False)

            self.gamma = self.variable("gamma", [shp], init=tf.random_normal_initializer(1., 0.02))
            self.beta = self.variable("beta", [shp], init=tf.constant_initializer(0.))

            if phase == Phase.train:
                mean, variance = tf.nn.moments(input_layer.tensor, [0, 1, 2])
                mean.set_shape((shp,))
                variance.set_shape((shp,))

                update_moving_mean = moving_averages.assign_moving_average(self.mean, mean, decay)
                update_moving_variance = moving_averages.assign_moving_average(self.variance, variance, decay)

                with tf.control_dependencies([update_moving_mean, update_moving_variance]):
                    normalized_x = tf.nn.batch_norm_with_global_normalization(
                        input_layer.tensor, mean, variance, self.beta, self.gamma, epsilon,
                        scale_after_normalization=True)
            else:
                normalized_x = tf.nn.batch_norm_with_global_normalization(
                    input_layer.tensor, self.mean, self.variance,
                    self.beta, self.gamma, epsilon,
                    scale_after_normalization=True)
            return input_layer.with_tensor(normalized_x, parameters=self.vars) 
Example #27
Source File: moving_averages_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testAssignMovingAverageWithZeroDebias(self):
    with self.test_session():
      var = tf.Variable([0.0, 0.0])
      val = tf.constant([1.0, 2.0], tf.float32)
      decay = 0.25
      assign = moving_averages.assign_moving_average(
          var, val, decay, zero_debias=True)
      tf.global_variables_initializer().run()
      self.assertAllClose([0.0, 0.0], var.eval())
      assign.op.run()
      self.assertAllClose([1.0 * (1.0 - 0.25) / (1 - 0.25 ** 2),
                           2.0 * (1.0 - 0.25) / (1 - 0.25 ** 2)],
                          var.eval()) 
Example #28
Source File: moving_averages_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testAssignMovingAverage(self):
    with self.test_session():
      var = tf.Variable([10.0, 11.0])
      val = tf.constant([1.0, 2.0], tf.float32)
      decay = 0.25
      assign = moving_averages.assign_moving_average(var, val, decay)
      tf.global_variables_initializer().run()
      self.assertAllClose([10.0, 11.0], var.eval())
      assign.op.run()
      self.assertAllClose([10.0 * 0.25 + 1.0 * (1.0 - 0.25),
                           11.0 * 0.25 + 2.0 * (1.0 - 0.25)],
                          var.eval()) 
Example #29
Source File: optimizers.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
  """Find max_norm given norm and previous average."""
  with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
    log_norm = math_ops.log(norm + epsilon)

    def moving_average(name, value, decay):
      moving_average_variable = vs.get_variable(
          name,
          shape=value.get_shape(),
          dtype=value.dtype,
          initializer=init_ops.zeros_initializer(),
          trainable=False)
      return moving_averages.assign_moving_average(
          moving_average_variable, value, decay, zero_debias=False)

    # quicker adaptation at the beginning
    if global_step is not None:
      n = math_ops.cast(global_step, dtypes.float32)
      decay = math_ops.minimum(decay, n / (n + 1.))

    # update averages
    mean = moving_average("mean", log_norm, decay)
    sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

    variance = sq_mean - math_ops.square(mean)
    std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
    max_norms = math_ops.exp(mean + std_factor * std)
    return max_norms, mean 
Example #30
Source File: utils.py    From VSR-DUF-Reimplement with Apache License 2.0 5 votes vote down vote up
def BatchNorm(input, is_train, decay=0.999, name='BatchNorm'):
    '''
    https://github.com/zsdonghao/tensorlayer/blob/master/tensorlayer/layers.py
    https://github.com/ry/tensorflow-resnet/blob/master/resnet.py
    http://stackoverflow.com/questions/38312668/how-does-one-do-inference-with-batch-normalization-with-tensor-flow
    '''
    from tensorflow.python.training import moving_averages
    from tensorflow.python.ops import control_flow_ops
    
    axis = list(range(len(input.get_shape()) - 1))
    fdim = input.get_shape()[-1:]
    
    with tf.variable_scope(name):
        beta = tf.get_variable('beta', fdim, initializer=tf.constant_initializer(value=0.0))
        gamma = tf.get_variable('gamma', fdim, initializer=tf.constant_initializer(value=1.0))
        moving_mean = tf.get_variable('moving_mean', fdim, initializer=tf.constant_initializer(value=0.0), trainable=False)
        moving_variance = tf.get_variable('moving_variance', fdim, initializer=tf.constant_initializer(value=0.0), trainable=False)
  
        def mean_var_with_update():
            batch_mean, batch_variance = tf.nn.moments(input, axis)
            update_moving_mean = moving_averages.assign_moving_average(moving_mean, batch_mean, decay, zero_debias=True)
            update_moving_variance = moving_averages.assign_moving_average(moving_variance, batch_variance, decay, zero_debias=True)
            with tf.control_dependencies([update_moving_mean, update_moving_variance]):
                return tf.identity(batch_mean), tf.identity(batch_variance)

        mean, variance = control_flow_ops.cond(is_train, mean_var_with_update, lambda: (moving_mean, moving_variance))

    return tf.nn.batch_normalization(input, mean, variance, beta, gamma, 1e-3) #, tf.stack([mean[0], variance[0], beta[0], gamma[0]])