Python tensorflow.python.ops.nn_ops.relu() Examples

The following are 30 code examples of tensorflow.python.ops.nn_ops.relu(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.nn_ops , or try the search function .
Example #1
Source File: vgg.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      biases_initializer=init_ops.zeros_initializer()):
    with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
      return arg_sc 
Example #2
Source File: vgg.py    From lambda-packs with MIT License 6 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      biases_initializer=init_ops.zeros_initializer()):
    with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
      return arg_sc 
Example #3
Source File: FastWeightsRNN.py    From AssociativeRetrieval with Apache License 2.0 6 votes vote down vote up
def __init__(self, num_units, forget_bias=1.0, reuse_norm=False,
               input_size=None, activation=nn_ops.relu,
               layer_norm=True, norm_gain=1.0, norm_shift=0.0,
               loop_steps=1, decay_rate=0.9, learning_rate=0.5,
               dropout_keep_prob=1.0, dropout_prob_seed=None):

    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)

    self._num_units = num_units
    self._activation = activation
    self._forget_bias = forget_bias
    self._reuse_norm = reuse_norm
    self._keep_prob = dropout_keep_prob
    self._seed = dropout_prob_seed
    self._layer_norm = layer_norm
    self._S = loop_steps
    self._eta = learning_rate
    self._lambda = decay_rate
    self._g = norm_gain
    self._b = norm_shift 
Example #4
Source File: nn_impl.py    From lambda-packs with MIT License 6 votes vote down vote up
def relu_layer(x, weights, biases, name=None):
  """Computes Relu(x * weight + biases).

  Args:
    x: a 2D tensor.  Dimensions typically: batch, in_units
    weights: a 2D tensor.  Dimensions typically: in_units, out_units
    biases: a 1D tensor.  Dimensions: out_units
    name: A name for the operation (optional).  If not specified
      "nn_relu_layer" is used.

  Returns:
    A 2-D Tensor computing relu(matmul(x, weights) + biases).
    Dimensions typically: batch, out_units.
  """
  with ops.name_scope(name, "relu_layer", [x, weights, biases]) as name:
    x = ops.convert_to_tensor(x, name="x")
    weights = ops.convert_to_tensor(weights, name="weights")
    biases = ops.convert_to_tensor(biases, name="biases")
    xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)
    return nn_ops.relu(xw_plus_b, name=name) 
Example #5
Source File: truncated_vgg.py    From Tabulo with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
    """Defines the VGG arg scope.

    Args:
      weight_decay: The l2 regularization coefficient.

    Returns:
      An arg_scope.
    """
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected],
        activation_fn=nn_ops.relu,
        weights_regularizer=regularizers.l2_regularizer(weight_decay),
        biases_initializer=init_ops.zeros_initializer()
    ):
        with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
            return arg_sc 
Example #6
Source File: network.py    From sense_classification with Apache License 2.0 6 votes vote down vote up
def network_arg_scope(is_training=True,
                      weight_decay=cfg.train.weight_decay,
                      batch_norm_decay=0.997,
                      batch_norm_epsilon=1e-5,
                      batch_norm_scale=True):
    batch_norm_params = {
        'is_training': is_training, 'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale,
        'updates_collections': ops.GraphKeys.UPDATE_OPS,
        #'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
        'trainable': cfg.train.bn_training,
    }

    with slim.arg_scope(
            [slim.conv2d, slim.separable_convolution2d],
            weights_regularizer=slim.l2_regularizer(weight_decay),
            weights_initializer=slim.variance_scaling_initializer(),
            trainable=is_training,
            activation_fn=tf.nn.relu6,
            #activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params,
            padding='SAME'):
        with slim.arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
            return arg_sc 
Example #7
Source File: vgg16.py    From Chinese-Character-and-Calligraphic-Image-Processing with MIT License 6 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.

  Args:
    weight_decay: The l2 regularization coefficient.

  Returns:
    An arg_scope.
  """
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      biases_initializer=init_ops.zeros_initializer()):
    with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
      return arg_sc 
Example #8
Source File: loss_ops.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def hinge_loss(logits, labels=None, scope=None, target=None):
  """Method that returns the loss tensor for hinge loss.

  Args:
    logits: The logits, a float tensor.
    labels: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0.
    scope: The scope for the operations performed in computing the loss.
    target: Deprecated alias for `labels`.

  Returns:
    A `Tensor` of same shape as logits and target representing the loss values
      across the batch.

  Raises:
    ValueError: If the shapes of `logits` and `labels` don't match.
  """
  labels = _labels(labels, target)
  with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    labels = math_ops.to_float(labels)
    all_ones = array_ops.ones_like(labels)
    labels = math_ops.sub(2 * labels, all_ones)
    return nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(labels, logits))) 
Example #9
Source File: nn.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def relu_layer(x, weights, biases, name=None):
  """Computes Relu(x * weight + biases).

  Args:
    x: a 2D tensor.  Dimensions typically: batch, in_units
    weights: a 2D tensor.  Dimensions typically: in_units, out_units
    biases: a 1D tensor.  Dimensions: out_units
    name: A name for the operation (optional).  If not specified
      "nn_relu_layer" is used.

  Returns:
    A 2-D Tensor computing relu(matmul(x, weights) + biases).
    Dimensions typically: batch, out_units.
  """
  with ops.name_scope(name, "relu_layer", [x, weights, biases]) as name:
    x = ops.convert_to_tensor(x, name="x")
    weights = ops.convert_to_tensor(weights, name="weights")
    biases = ops.convert_to_tensor(biases, name="biases")
    xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)
    return nn_ops.relu(xw_plus_b, name=name) 
Example #10
Source File: truncated_vgg.py    From Table-Detection-using-Deep-learning with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def vgg_arg_scope(weight_decay=0.0005):
    """Defines the VGG arg scope.

    Args:
      weight_decay: The l2 regularization coefficient.

    Returns:
      An arg_scope.
    """
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected],
        activation_fn=nn_ops.relu,
        weights_regularizer=regularizers.l2_regularizer(weight_decay),
        biases_initializer=init_ops.zeros_initializer()
    ):
        with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
            return arg_sc 
Example #11
Source File: loss_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def hinge_loss(logits, labels=None, scope=None):
  """Method that returns the loss tensor for hinge loss.

  Args:
    logits: The logits, a float tensor.
    labels: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    A `Tensor` of same shape as `logits` and `labels` representing the loss
      values across the batch.

  Raises:
    ValueError: If the shapes of `logits` and `labels` don't match.
  """
  with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    labels = math_ops.to_float(labels)
    all_ones = array_ops.ones_like(labels)
    labels = math_ops.subtract(2 * labels, all_ones)
    return nn_ops.relu(
        math_ops.subtract(all_ones, math_ops.multiply(labels, logits))) 
Example #12
Source File: nn_impl.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def relu_layer(x, weights, biases, name=None):
  """Computes Relu(x * weight + biases).

  Args:
    x: a 2D tensor.  Dimensions typically: batch, in_units
    weights: a 2D tensor.  Dimensions typically: in_units, out_units
    biases: a 1D tensor.  Dimensions: out_units
    name: A name for the operation (optional).  If not specified
      "nn_relu_layer" is used.

  Returns:
    A 2-D Tensor computing relu(matmul(x, weights) + biases).
    Dimensions typically: batch, out_units.
  """
  with ops.name_scope(name, "relu_layer", [x, weights, biases]) as name:
    x = ops.convert_to_tensor(x, name="x")
    weights = ops.convert_to_tensor(weights, name="weights")
    biases = ops.convert_to_tensor(biases, name="biases")
    xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)
    return nn_ops.relu(xw_plus_b, name=name) 
Example #13
Source File: layers_test.py    From tf-slim with Apache License 2.0 6 votes vote down vote up
def _fully_connected_basic_use(self, x, num_output_units, expected_shape):
    output = _layers.legacy_fully_connected(
        x, num_output_units, activation_fn=nn_ops.relu)

    with session.Session() as sess:
      with self.assertRaises(errors_impl.FailedPreconditionError):
        sess.run(output)

      variables_lib.global_variables_initializer().run()
      out_value, shape_value = sess.run([output, array_ops.shape(output)])

    self.assertAllClose(shape_value, expected_shape)
    self.assertEqual(output.get_shape().as_list(), expected_shape)
    self.assertTrue(np.all(out_value >= 0), 'Relu should have all values >= 0.')

    self.assertEqual(2,
                     len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)))
    self.assertEqual(
        0, len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES))) 
Example #14
Source File: nn_test.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def test_unary_ops(self):
    ops = [
        ('relu', nn_ops.relu, nn.relu),
        ('relu6', nn_ops.relu6, nn.relu6),
        ('crelu', nn_ops.crelu, nn.crelu),
        ('elu', nn_ops.elu, nn.elu),
        ('softplus', nn_ops.softplus, nn.softplus),
        ('l2_loss', nn_ops.l2_loss, nn.l2_loss),
        ('softmax', nn_ops.softmax, nn.softmax),
        ('log_softmax', nn_ops.log_softmax, nn.log_softmax),
    ]
    for op_name, tf_op, lt_op in ops:
      golden_tensor = tf_op(self.original_lt.tensor)
      golden_lt = core.LabeledTensor(golden_tensor, self.axes)
      actual_lt = lt_op(self.original_lt)
      self.assertIn(op_name, actual_lt.name)
      self.assertLabeledTensorsEqual(golden_lt, actual_lt) 
Example #15
Source File: loss_ops.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def hinge_loss(logits, labels=None, scope=None):
  """Method that returns the loss tensor for hinge loss.

  Args:
    logits: The logits, a float tensor.
    labels: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    A `Tensor` of same shape as `logits` and `labels` representing the loss
      values across the batch.

  Raises:
    ValueError: If the shapes of `logits` and `labels` don't match.
  """
  with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    labels = math_ops.to_float(labels)
    all_ones = array_ops.ones_like(labels)
    labels = math_ops.subtract(2 * labels, all_ones)
    return nn_ops.relu(
        math_ops.subtract(all_ones, math_ops.multiply(labels, logits))) 
Example #16
Source File: customlayers.py    From AiGEM_TeamHeidelberg2017 with MIT License 5 votes vote down vote up
def Pseudo1dDeconvBundle(input, n_out=128, kernel_size=[3, 3], name='deconv'):
    """
    Pseudo 1d deconv layer. We apply padding on the input to exactly fit the kernel without setting the
    padding in the deconv layer to 'SAME'. We do not want to pad in the deconv as we want to keep the width dimension constant

    - Kernel_height has to equal to the inout AND output sizes
    - Kernel_width has to be an odd number.

    :param input:
    :param name:
    :param input_shape: [height, width] 2d tensor
    :param kernel_size: [height, width]
    :return:
    """
    with tf.variable_scope(name) as vs:
        input_shape = input.get_shape().as_list() # should be shape [batchsize, height, width, channels]

        assert kernel_size[1] % 2 != 0
        assert kernel_size[0] == input_shape[1]

        padding_pos = math.floor(kernel_size[1])

        paddings = [[0, 0], [0, 0], [padding_pos, padding_pos], [0, 0]]

        pad_input = PadLayer(input, paddings=paddings, mode='CONSTANT', name='pad_layer')

        deconv = tl.layers.DeConv2dLayer(pad_input,
                                         act=tf.nn.relu, # TODO: make PRELU great again?
                                         # shape is selected such that the length dim stays the same:
                                         # This means:
                                         shape=[kernel_size[0], kernel_size[1], n_out, input_shape[-1]],
                                         output_shape=[input_shape[0], input_shape[1], input_shape[2]-2*padding_pos, n_out],
                                         # TODO: compare this with the 1d output
                                         #output_shape=[input_shape[0], input_shape[1], 1, n_out],
                                         strides=[1, 5, 1, 1],
                                         padding='VALID',
                                         W_init=tf.truncated_normal_initializer(stddev=0.02), #TODO check if this is too much
                                         b_init=tf.constant_initializer(value=0.01),
                                         W_init_args={}, b_init_args={},
                                         name='decnn2d_layer')
    return deconv 
Example #17
Source File: network.py    From sense_classification with Apache License 2.0 5 votes vote down vote up
def dense_block(inputs, depth, depth_bottleneck, stride, name, rate=1):
    depth_in = inputs.get_shape()[3]
    if depth == depth_in:
        if stride == 1:
            shortcut = inputs
        else:
            shortcut = layers.max_pool2d(inputs, [1, 1], stride=factor, scope=name+'_shortcut')
    else:
        shortcut = layers.conv2d(
            inputs,
            depth, [1, 1],
            stride=stride,
            activation_fn=None,
            scope=name+'_shortcut')
    if PRINT_LAYER_LOG:
        print(name+'_shortcut', shortcut.get_shape())

    residual = layers.conv2d(
        inputs, depth_bottleneck, [1, 1], stride=1, scope=name+'_conv1')
    if PRINT_LAYER_LOG:
        print(name+'_conv1', residual.get_shape())
    residual = resnet_utils.conv2d_same(
        residual, depth_bottleneck, 3, stride, rate=rate, scope=name+'_conv2')
    if PRINT_LAYER_LOG:
        print(name+'_conv2', residual.get_shape())
    residual = layers.conv2d(
        residual, depth, [1, 1], stride=1, activation_fn=None, scope=name+'_conv3')
    if PRINT_LAYER_LOG:
        print(name+'_conv3', residual.get_shape())
    output = nn_ops.relu(shortcut + residual)
    return output 
Example #18
Source File: init_ops.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _random_walk(shape, nonlinearity, dtype=dtypes.float32, seed=None,
                 name="random_walk"):
  """Create a random tensor such that backprop neither vanishes nor explodes.

  Args:
    shape: a python array of int or a 1-d tensor. Sizes of the Tensor.
    nonlinearity: the brain python function for implementing the
      nonlinearity in tensor flow.
    dtype: The type of the output.
    seed: A Python integer. Used to create random seeds. See
      [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
      for behavior.
    name: string.  Optional name for the op.

  Returns:
    A Tensor of the specified sizes filled with random values.
  """
  assert len(shape) == 2, "Random Walk initialization only supports 2D tensors."
  num_inputs = shape[0]
  if nonlinearity == math_ops.tanh:
    # No real formula for this case yet, but this works well for many
    # layer widths.
    rwg = 1.13
  elif nonlinearity == array_ops.identity:
    rwg = math.exp(1.0 / (2.0 * num_inputs))
  elif nonlinearity == nn_ops.relu:
    rwg = math.sqrt(2.0) * math.exp(1.2 / (max(num_inputs, 6) - 2.4))
  else:
    assert False, "Unsupported nonlinearity for Random Walk initialization."

  mean = 0.0
  stddev = rwg / math.sqrt(float(num_inputs))

  return random_ops.random_normal(shape, mean=mean, stddev=stddev, dtype=dtype,
                                  seed=seed, name=name)


# TODO(vrv): Unhide when we are ready to expose this publicly. 
Example #19
Source File: basemodel.py    From tf-cpn with MIT License 5 votes vote down vote up
def resnet_arg_scope(bn_is_training,
                     bn_trainable,
                     trainable=True,
                     weight_decay=cfg.weight_decay,
                     batch_norm_decay=0.99,
                     batch_norm_epsilon=1e-9,
                     batch_norm_scale=True):
    batch_norm_params = {
        'is_training': bn_is_training,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'trainable': bn_trainable,
        'updates_collections': ops.GraphKeys.UPDATE_OPS
    }

    with arg_scope(
            [slim.conv2d],
            weights_regularizer=regularizers.l2_regularizer(weight_decay),
            weights_initializer=initializers.variance_scaling_initializer(),
            trainable=trainable,
            activation_fn=nn_ops.relu,
            normalizer_fn=layers.batch_norm,
            normalizer_params=batch_norm_params):
        with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
            return arg_sc 
Example #20
Source File: modrelu.py    From rotational-unit-of-memory with MIT License 5 votes vote down vote up
def modrelu(z, b, comp):
    if comp:
        z_norm = math_ops.sqrt(math_ops.square(math_ops.real(z)) + math_ops.square(math_ops.imag(z))) + 0.00001
        step1 = nn_ops.bias_add(z_norm, b)
        step2 = math_ops.complex(nn_ops.relu(step1), array_ops.zeros_like(z_norm))
        step3 = z/math_ops.complex(z_norm, array_ops.zeros_like(z_norm))
    else:
        z_norm = math_ops.abs(z) + 0.00001
        step1 = nn_ops.bias_add(z_norm, b)
        step2 = nn_ops.relu(step1)
        step3 = math_ops.sign(z)
       
    return math_ops.multiply(step3, step2) 
Example #21
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testStackRelu(self):
    height, width = 3, 3
    with self.cached_session():
      images = random_ops.random_uniform(
          (5, height * width * 3), seed=1, name='images')
      output = _layers.stack(images, layers_lib.relu, [10, 20, 30])
      self.assertEqual(output.op.name, 'Stack/fully_connected_3/Relu')
      self.assertListEqual(output.get_shape().as_list(), [5, 30]) 
Example #22
Source File: customlayers.py    From AiGEM_TeamHeidelberg2017 with MIT License 5 votes vote down vote up
def focal_loss_alpha(labels=[], logits=[], pos_weights=[], gamma=2., clips=[], name='focal_loss'):
    """
    Add focal loss weigths to the wigthted sigmoid cross entropy
    :return:
    """
    batchsize = labels.get_shape().as_list()[0]
    n_classes = labels.get_shape().as_list()[1]

    with tf.variable_scope(name) as vs:
        # first get a sigmoid to determine the focal loss weigths:
        sigmoid_logits = tf.nn.sigmoid(logits)
        # determine the focal loss weigths:
        labels = math_ops.to_float(labels)
        sigmoid_logits.get_shape().assert_is_compatible_with(labels.get_shape())
        preds = array_ops.where(math_ops.equal(labels, 1.), sigmoid_logits, 1. - sigmoid_logits)
        focal_weights = (math_ops.subtract(1., preds)) ** gamma
        print(focal_weights)

        # clip the weights at E-3 and E3
        up_clip = math_ops.multiply(tf.ones([batchsize, n_classes]), clips[1])
        low_clip = math_ops.multiply(tf.ones([batchsize, n_classes]), clips[0])
        focal_weights = array_ops.where(math_ops.greater(focal_weights, clips[1]), up_clip, focal_weights)
        focal_weights = array_ops.where(math_ops.less(focal_weights, clips[0]), low_clip, focal_weights)
        log_weight = 1. + (pos_weights - 1.) * labels

        # now put them into a weighted softmax ce:
        loss = math_ops.multiply(math_ops.add((1. - labels) * logits,
                         log_weight * (math_ops.log1p(math_ops.exp(-math_ops.abs(logits))) + nn_ops.relu(-logits))),
                                 focal_weights, name='sc_entropy')
        return loss 
Example #23
Source File: layers_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testCreateWithActivation(self):
    height, width = 3, 3
    with self.cached_session():
      images = random_ops.random_uniform((5, height, width, 3), seed=1)
      output = _layers.bias_add(images, activation_fn=nn_ops.relu)
      self.assertEqual(output.op.name, 'BiasAdd/Relu')
      self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3]) 
Example #24
Source File: rev_block_lib_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testMultipleFns(self):

    def f1(x):
      return core_layers.dense(x, self.CHANNELS // 2)

    def f2(x):
      return core_layers.dense(x, self.CHANNELS // 2, activation=nn_ops.relu)

    self._testRevBlock(f=[f1, f2, f1, f2]) 
Example #25
Source File: basemodel.py    From lighttrack with MIT License 5 votes vote down vote up
def resnet_arg_scope(bn_is_training,
                     bn_trainable,
                     trainable=True,
                     weight_decay=cfg.weight_decay,
                     batch_norm_decay=0.99,
                     batch_norm_epsilon=1e-9,
                     batch_norm_scale=True):
    batch_norm_params = {
        'is_training': bn_is_training,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'trainable': bn_trainable,
        'updates_collections': ops.GraphKeys.UPDATE_OPS
    }

    with arg_scope(
            [slim.conv2d],
            weights_regularizer=regularizers.l2_regularizer(weight_decay),
            weights_initializer=initializers.variance_scaling_initializer(),
            trainable=trainable,
            activation_fn=nn_ops.relu,
            normalizer_fn=layers.batch_norm,
            normalizer_params=batch_norm_params):
        with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
            return arg_sc 
Example #26
Source File: alexnet.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def alexnet_v2_arg_scope(weight_decay=0.0005):
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      biases_initializer=init_ops.constant_initializer(0.1),
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope([layers.conv2d], padding='SAME'):
      with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
        return arg_sc 
Example #27
Source File: inception_v2.py    From Chinese-Character-and-Calligraphic-Image-Processing with MIT License 5 votes vote down vote up
def inception_v2_arg_scope(weight_decay=0.00004,
                           batch_norm_var_collection='moving_vars'):
  """Defines the default InceptionV2 arg scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_var_collection: The name of the collection for the batch norm
      variables.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': 0.9997,
      # epsilon to prevent 0s in variance.
      'epsilon': 0.001,
      # collection containing update_ops.
      'updates_collections': ops.GraphKeys.UPDATE_OPS,
      # collection containing the moving mean and moving variance.
      'variables_collections': {
          'beta': None,
          'gamma': None,
          'moving_mean': [batch_norm_var_collection],
          'moving_variance': [batch_norm_var_collection],
      }
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=nn_ops.relu,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc 
Example #28
Source File: inception_v2.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def inception_v2_arg_scope(weight_decay=0.00004,
                           batch_norm_var_collection='moving_vars'):
  """Defines the default InceptionV2 arg scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_var_collection: The name of the collection for the batch norm
      variables.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': 0.9997,
      # epsilon to prevent 0s in variance.
      'epsilon': 0.001,
      # collection containing update_ops.
      'updates_collections': ops.GraphKeys.UPDATE_OPS,
      # collection containing the moving mean and moving variance.
      'variables_collections': {
          'beta': None,
          'gamma': None,
          'moving_mean': [batch_norm_var_collection],
          'moving_variance': [batch_norm_var_collection],
      }
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=nn_ops.relu,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc 
Example #29
Source File: rnn_cell.py    From Multiview2Novelview with MIT License 5 votes vote down vote up
def __init__(self, num_units, num_in_proj=None,
               initializer=None, forget_bias=1.0,
               y_activation=nn_ops.relu, reuse=None):
    """Initialize the parameters for an +RNN cell.
    Args:
      num_units: int, The number of units in the +RNN cell
      num_in_proj: (optional) int, The input dimensionality for the RNN.
        If creating the first layer of an +RNN, this should be set to
        `num_units`. Otherwise, this should be set to `None` (default).
        If `None`, dimensionality of `inputs` should be equal to `num_units`,
        otherwise ValueError is thrown.
      initializer: (optional) The initializer to use for the weight matrices.
      forget_bias: (optional) float, default 1.0, The initial bias of the
        forget gates, used to reduce the scale of forgetting at the beginning
        of the training.
      y_activation: (optional) Activation function of the states passed
        through depth. Default is 'tf.nn.relu`.
      reuse: (optional) Python boolean describing whether to reuse variables
        in an existing scope.  If not `True`, and the existing scope already has
        the given variables, an error is raised.
    """
    super(IntersectionRNNCell, self).__init__(_reuse=reuse)
    self._num_units = num_units
    self._initializer = initializer
    self._forget_bias = forget_bias
    self._num_input_proj = num_in_proj
    self._y_activation = y_activation
    self._reuse = reuse
    self._linear1 = None
    self._linear2 = None 
Example #30
Source File: loss_ops.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def hinge_loss(logits, labels=None, scope=None):
  """Method that returns the loss tensor for hinge loss.

  Args:
    logits: The logits, a float tensor. Note that logits are assumed to be
      unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive
      (resp. negative) binary prediction.
    labels: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0. Internally
      the {0,1} labels are converted to {-1,1} when calculating the hinge loss.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    An unweighted `Tensor` of same shape as `logits` and `labels` representing
    the
      loss values across the batch.

  Raises:
    ValueError: If the shapes of `logits` and `labels` don't match.
  """
  with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    labels = math_ops.cast(labels, dtypes.float32)
    all_ones = array_ops.ones_like(labels)
    labels = math_ops.subtract(2 * labels, all_ones)
    return nn_ops.relu(
        math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))