Python tensorflow.contrib.framework.arg_scope() Examples

The following are 30 code examples of tensorflow.contrib.framework.arg_scope(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.framework , or try the search function .
Example #1
Source File: bidirectional_rnn.py    From aster with MIT License 6 votes vote down vote up
def predict(self, inputs, scope=None):
    with tf.variable_scope(scope, 'BidirectionalRnn', [inputs]) as scope:
      (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
        self._fw_cell, self._bw_cell, inputs, time_major=False, dtype=tf.float32)
      rnn_outputs = tf.concat([output_fw, output_bw], axis=2)

      filter_weights = lambda vars : [x for x in vars if x.op.name.endswith('kernel')]
      tf.contrib.layers.apply_regularization(
        self._rnn_regularizer,
        filter_weights(self._fw_cell.trainable_weights))
      tf.contrib.layers.apply_regularization(
        self._rnn_regularizer,
        filter_weights(self._bw_cell.trainable_weights))

      if self._num_output_units > 0:
        with arg_scope(self._fc_hyperparams):
          rnn_outputs = fully_connected(rnn_outputs, self._num_output_units, activation_fn=tf.nn.relu)

    if self._summarize_activations:
      max_time = rnn_outputs.get_shape()[1].value
      for t in range(max_time):
        activation_t = rnn_outputs[:,t,:]
        tf.summary.histogram('Activations/{}/Step_{}'.format(scope.name, t), activation_t)

    return rnn_outputs 
Example #2
Source File: reader.py    From glas with Apache License 2.0 6 votes vote down vote up
def __call__(self, outputs_collections=None):
        """ Execute the next time step of the GLAS model """
        # Get the current output and decoded output (or zeros if they do not exist yet)
        output = self.output if self.step > 0 else 0.0
        decoded = self.decoder.output if self.step > 0 else tf.zeros((1, self.decoder.output_size))

        with framework.arg_scope(
            [self.decoder.next, self.sampler.random_sample],
            outputs_collections=outputs_collections):

            # sample from the approximate posterior
            sample = self.sampler.random_sample()

            # decode from the latent space
            decoded = self.decoder(sample)

            written = self.attention.write(decoded)

        return output + written, None 
Example #3
Source File: rnn.py    From glas with Apache License 2.0 6 votes vote down vote up
def step_fn(wrapped_fn):
        """ Wrap an RNN class method's step function to implement basic expected behavior """
        @functools.wraps(wrapped_fn)
        def wrapper(self, *args, **kwargs):
            """ Determine scope reuse and keep track of states and outputs """
            with framework.arg_scope(
                [self.collect_named_outputs],
                outputs_collections=kwargs.get('outputs_collections')):

                with tf.variable_scope(self.variable_scope, reuse=self.reuse):
                    output, state = wrapped_fn(self, *args, **kwargs)
                    output = tf.identity(output, name='rnn_output')

                    self.outputs.append(output)
                    self.states.append(state)

                    return self.collect_named_outputs(output)

        return wrapper 
Example #4
Source File: utils.py    From tensornets with MIT License 6 votes vote down vote up
def set_args(largs, conv_bias=True, weights_regularizer=None):
    from .layers import conv2d
    from .layers import fc
    from .layers import sconv2d

    def real_set_args(func):
        def wrapper(*args, **kwargs):
            is_training = kwargs.get('is_training', False)
            layers = sum([x for (x, y) in largs(is_training)], [])
            layers_args = [arg_scope(x, **y) for (x, y) in largs(is_training)]
            if not conv_bias:
                layers_args += [arg_scope([conv2d], biases_initializer=None)]
            if weights_regularizer is not None:
                layers_args += [arg_scope(
                    [conv2d, fc, sconv2d],
                    weights_regularizer=weights_regularizer)]
            with arg_scope(layers, outputs_collections=__outputs__):
                with arg_scopes(layers_args):
                    x = func(*args, **kwargs)
                    x.model_name = func.__name__
                    return x
        return wrapper
    return real_set_args 
Example #5
Source File: configurable_ops_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testShareParams(self):
    # Tests reuse option.
    first_outputs = 2
    alternate_num_outputs = 12
    parameterization = {'first/Conv2D': first_outputs}
    decorator = ops.ConfigurableOps(parameterization=parameterization)
    explicit = layers.conv2d(
        self.inputs, first_outputs, 3, scope='first')
    with arg_scope([layers.conv2d], reuse=True):
      decorated = decorator.conv2d(
          self.inputs,
          num_outputs=alternate_num_outputs,
          kernel_size=3,
          scope='first')
    with self.cached_session():
      tf.global_variables_initializer().run()
      # verifies that parameters are shared.
      self.assertAllClose(explicit.eval(), decorated.eval())
    conv_ops = sorted([
        op.name
        for op in tf.get_default_graph().get_operations()
        if op.type == 'Conv2D'
    ])
    self.assertAllEqual(['first/Conv2D', 'first_1/Conv2D'], conv_ops) 
Example #6
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testConcatOpGetRegularizer(self, use_batch_norm, use_partitioner):
    sc = self._batch_norm_scope() if use_batch_norm else []
    partitioner = tf.fixed_size_partitioner(2) if use_partitioner else None
    model_stub = add_concat_model_stub
    with arg_scope(sc):
      with tf.variable_scope(tf.get_variable_scope(), partitioner=partitioner):
        final_op = add_concat_model_stub.build_model()

    # Instantiate OpRegularizerManager.
    op_handler_dict = self._default_op_handler_dict
    op_handler_dict['FusedBatchNormV3'] = StubBatchNormSourceOpHandler(
        model_stub)
    if not use_batch_norm:
      op_handler_dict['Conv2D'] = StubConvSourceOpHandler(model_stub)
    op_reg_manager = orm.OpRegularizerManager([final_op], op_handler_dict)

    expected_alive = model_stub.expected_alive()
    expected = np.logical_or(expected_alive['conv4'], expected_alive['concat'])
    conv_reg = op_reg_manager.get_regularizer(_get_op('conv4/Conv2D'))
    self.assertAllEqual(expected, conv_reg.alive_vector)

    relu_reg = op_reg_manager.get_regularizer(_get_op('conv4/Relu'))
    self.assertAllEqual(expected, relu_reg.alive_vector) 
Example #7
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testGroupConcatOpGetRegularizerValues(self, op_name, short_name):
    model_stub = grouping_concat_model_stub
    with arg_scope(self._batch_norm_scope()):
      with tf.variable_scope(tf.get_variable_scope()):
        final_op = model_stub.build_model()

    # Instantiate OpRegularizerManager.
    op_handler_dict = self._default_op_handler_dict
    op_handler_dict['FusedBatchNormV3'] = StubBatchNormSourceOpHandler(
        model_stub)

    op_reg_manager = orm.OpRegularizerManager([final_op], op_handler_dict)

    expected_alive = model_stub.expected_alive()
    expected_reg = model_stub.expected_regularization()

    reg = op_reg_manager.get_regularizer(_get_op(op_name))
    self.assertAllEqual(expected_alive[short_name], reg.alive_vector)
    self.assertAllClose(expected_reg[short_name], reg.regularization_vector) 
Example #8
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testGroupConcatOpGetRegularizerObjects(self):
    model_stub = grouping_concat_model_stub
    with arg_scope(self._batch_norm_scope()):
      with tf.variable_scope(tf.get_variable_scope()):
        final_op = model_stub.build_model()

    # Instantiate OpRegularizerManager.
    op_handler_dict = self._default_op_handler_dict
    op_handler_dict['FusedBatchNormV3'] = StubBatchNormSourceOpHandler(
        model_stub)

    op_reg_manager = orm.OpRegularizerManager([final_op], op_handler_dict)
    self.assertEqual(
        op_reg_manager.get_regularizer(_get_op('conv1/Conv2D')),
        op_reg_manager.get_regularizer(_get_op('conv2/Conv2D')))
    self.assertEqual(
        op_reg_manager.get_regularizer(_get_op('conv3/Conv2D')),
        op_reg_manager.get_regularizer(_get_op('conv4/Conv2D'))) 
Example #9
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testGather(self):
    gather_index = [5, 6, 7, 8, 9, 0, 1, 2, 3, 4]
    with arg_scope(self._batch_norm_scope()):
      inputs = tf.zeros([2, 4, 4, 3])
      c1 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv1')
      gather = tf.gather(c1, gather_index, axis=3)

    manager = orm.OpRegularizerManager(
        [gather.op], self._default_op_handler_dict)

    c1_reg = manager.get_regularizer(_get_op('conv1/Conv2D'))
    gather_reg = manager.get_regularizer(_get_op('GatherV2'))

    # Check regularizer indices.
    self.assertAllEqual(list(range(10)), c1_reg.regularization_vector)
    # This fails due to gather not being supported.  Once gather is supported,
    # this test can be enabled to verify that the regularization vector is
    # gathered in the same ordering as the tensor.
    # self.assertAllEqual(
    #     gather_index, gather_reg.regularization_vector)

    # This test shows that gather is not supported.  The regularization vector
    # has the same initial ordering after the gather op scrambled the
    # channels.  Remove this once gather is supported.
    self.assertAllEqual(list(range(10)), gather_reg.regularization_vector) 
Example #10
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testInit_AddConcat_AllOps(self):
    with arg_scope(self._batch_norm_scope()):
      inputs = tf.zeros([2, 4, 4, 3])
      c1 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv1')
      c2 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv2')
      add = c1 + c2
      c3 = layers.conv2d(add, num_outputs=10, kernel_size=3, scope='conv3')
      out = tf.identity(c3)
      concat = tf.concat([c1, c2], axis=3)
      c4 = layers.conv2d(concat, num_outputs=10, kernel_size=3, scope='conv4')

    manager = orm.OpRegularizerManager(
        [out.op], self._default_op_handler_dict, SumGroupingRegularizer)

    # Op c4 is not in the DFS path of out.  Verify that OpRegularizerManager
    # does not process c4.
    self.assertNotIn(c4.op, manager.ops)
    self.assertNotIn(concat.op, manager.ops) 
Example #11
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testInit_Blacklist(self):
    with arg_scope(self._batch_norm_scope()):
      inputs = tf.zeros([2, 4, 4, 3])
      c1 = layers.conv2d(inputs, num_outputs=3, kernel_size=3, scope='conv1')
      c2 = layers.conv2d(c1, num_outputs=4, kernel_size=3, scope='conv2')
      c3 = layers.conv2d(c2, num_outputs=5, kernel_size=3, scope='conv3')

    # Verify c2 has a regularizer.
    manager = orm.OpRegularizerManager(
        [c3.op], self._default_op_handler_dict, SumGroupingRegularizer)
    self.assertIsNotNone(manager.get_regularizer(c2.op))

    # Verify c2 has None regularizer after blacklisting.
    manager = orm.OpRegularizerManager(
        [c3.op], self._default_op_handler_dict, SumGroupingRegularizer,
        regularizer_blacklist=['conv2'])
    self.assertIsNone(manager.get_regularizer(c2.op)) 
Example #12
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testInit_BlacklistGroup(self):
    with arg_scope(self._batch_norm_scope()):
      inputs = tf.zeros([2, 4, 4, 3])
      c1 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv1')
      c2 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv2')
      add = c1 + c2
      c3 = layers.conv2d(add, num_outputs=10, kernel_size=3, scope='conv3')

    # Verify c2 has a regularizer.
    manager = orm.OpRegularizerManager(
        [c3.op], self._default_op_handler_dict, SumGroupingRegularizer)
    self.assertIsNotNone(manager.get_regularizer(c2.op))

    # Verify c2 has None regularizer after blacklisting c1 which is grouped.
    manager = orm.OpRegularizerManager(
        [c3.op], self._default_op_handler_dict, SumGroupingRegularizer,
        regularizer_blacklist=['conv1'])
    self.assertIsNone(manager.get_regularizer(c2.op)) 
Example #13
Source File: model.py    From simulated-unsupervised-tensorflow with Apache License 2.0 6 votes vote down vote up
def _build_model(self):
    with arg_scope([resnet_block, conv2d, max_pool2d, tanh],
                   layer_dict=self.layer_dict):
      self.R_x = self._build_refiner(self.normalized_x)
      self.denormalized_R_x = denormalize(self.R_x)

      self.D_y, self.D_y_logits = \
          self._build_discrim(self.normalized_y, name="D_y")
      self.D_R_x, self.D_R_x_logits = \
          self._build_discrim(self.R_x, name="D_R_x", reuse=True)
      self.D_R_x_history, self.D_R_x_history_logits = \
          self._build_discrim(self.R_x_history,
                              name="D_R_x_history", reuse=True)

      #self.estimate_outputs = self._build_estimation_network()
    self._build_loss() 
Example #14
Source File: spatial_transformer.py    From aster with MIT License 6 votes vote down vote up
def _localize(self, preprocessed_images):
    k = self._num_control_points
    conv_output = self._convnet.extract_features(preprocessed_images)[-1]
    batch_size = shape_utils.combined_static_and_dynamic_shape(conv_output)[0]
    conv_output = tf.reshape(conv_output, [batch_size, -1])
    with arg_scope(self._fc_hyperparams):
      fc1 = fully_connected(conv_output, 512)
      fc2_weights_initializer = tf.zeros_initializer()
      fc2_biases_initializer = tf.constant_initializer(self._init_bias)
      fc2 = fully_connected(0.1 * fc1, 2 * k,
        weights_initializer=fc2_weights_initializer,
        biases_initializer=fc2_biases_initializer,
        activation_fn=None,
        normalizer_fn=None)
      if self._summarize_activations:
        tf.summary.histogram('fc1', fc1)
        tf.summary.histogram('fc2', fc2)
    if self._activation == 'sigmoid':
      ctrl_pts = tf.sigmoid(fc2)
    elif self._activation == 'none':
      ctrl_pts = fc2
    else:
      raise ValueError('Unknown activation: {}'.format(self._activation))
    ctrl_pts = tf.reshape(ctrl_pts, [batch_size, k, 2])
    return ctrl_pts 
Example #15
Source File: sphere.py    From TF_Face_Toolbox with Apache License 2.0 5 votes vote down vote up
def backbone(self, inputs, is_training=False, reuse=None):
    with tf.variable_scope(self.name, reuse=reuse):
        with arg_scope([layers.conv2d], activation_fn=self.prelu, 
                                        weights_regularizer=layers.l2_regularizer(self.weight_decay),
                                        data_format=self.data_format):

          if self.data_format == 'NCHW':
            inputs = tf.transpose(inputs, [0, 3, 1, 2])

          with tf.variable_scope('conv1'):
            net = layers.conv2d(inputs, num_outputs=self.num_outputs[0], kernel_size=3, stride=2) # 64*64*64
            net = self.resBlock(net, num_outputs=self.num_outputs[0]) # 64*64*64

          with tf.variable_scope('conv2'):
            net = layers.conv2d(net, num_outputs=self.num_outputs[1], kernel_size=3, stride=2) # 32*32*128
            net = layers.repeat(net, 2, self.resBlock, self.num_outputs[1]) # 32*32*128 x2

          with tf.variable_scope('conv3'):
            net = layers.conv2d(net, num_outputs=self.num_outputs[2], kernel_size=3, stride=2) # 16*16*256
            net = layers.repeat(net, 4, self.resBlock, self.num_outputs[2]) # 16*16*256 x4

          with tf.variable_scope('conv4'):
            net = layers.conv2d(net, num_outputs=self.num_outputs[3], kernel_size=3, stride=2) # 8*8*512
            net = self.resBlock(net, num_outputs=self.num_outputs[3]) # 8*8*512

          net = tf.reshape(net, [-1, net.get_shape().as_list()[1]*net.get_shape().as_list()[2]*net.get_shape().as_list()[3]])
          net = layers.fully_connected(net, num_outputs=512, activation_fn=None, 
                                       weights_regularizer=layers.l2_regularizer(self.weight_decay)) # 512

    return net 
Example #16
Source File: pose_env_models.py    From tensor2robot with Apache License 2.0 5 votes vote down vote up
def _q_features(self, state, action, is_training=True, reuse=tf.AUTO_REUSE):
    """Compute feature representation of state and action.

    This method is re-used for computing partial context features for RL^2
    algorithm.

    Args:
      state: Image observation tensor.
      action: Continuous-valued 1-D action tensor.
      is_training: Bool specifying whether this graph is being constructed for
        training or not.
      reuse: Bool or TF re-use ENUM.

    Returns:
      Q values of shape (num_batch, feature_size) where feature_size is
        h * w * 32.
    """
    # State-action embedding module.
    net = state
    channels = 32
    with tf.variable_scope('q_features', reuse=reuse):
      with framework.arg_scope(tf_modules.argscope(is_training=is_training)):
        for layer_index in range(3):
          net = layers.conv2d(net, channels, kernel_size=3)
          logging.info('conv%d %s', layer_index, net.get_shape())
      action_context = layers.fully_connected(action, channels)
      _, h, w, _ = net.shape.as_list()
      num_batch_net = tf.shape(net)[0]
      num_batch_context = tf.shape(action_context)[0]
      # assume num_batch_context >= num_batch_net
      net = tf.tile(net, [num_batch_context // num_batch_net, 1, 1, 1])
      action_context = tf.reshape(action_context,
                                  [num_batch_context, 1, 1, channels])
      action_context = tf.tile(action_context, [1, h, w, 1])
      net += action_context
      net = tf.layers.flatten(net)
    return net 
Example #17
Source File: build_inception_v4.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _block_a_reduce(net, endpoints, k=192, l=224, m=256, n=384, scope='BlockReduceA'):
    # 35 x 35 -> 17 x 17 reduce
    # inception-v4: k=192, l=224, m=256, n=384
    # inception-resnet-v1: k=192, l=192, m=256, n=384
    # inception-resnet-v2: k=256, l=256, m=384, n=384
    # default padding = VALID
    # default stride = 1
    with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'):
        with tf.variable_scope(scope):
            with tf.variable_scope('Br1_Pool'):
                br1 = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2')
                # 17 x 17 x input
            with tf.variable_scope('Br2_3x3'):
                br2 = layers.conv2d(net, n, [3, 3], stride=2, scope='Conv1_3x3/2')
                # 17 x 17 x n
            with tf.variable_scope('Br3_3x3Dbl'):
                br3 = layers.conv2d(net, k, [1, 1], padding='SAME', scope='Conv1_1x1')
                br3 = layers.conv2d(br3, l, [3, 3], padding='SAME', scope='Conv2_3x3')
                br3 = layers.conv2d(br3, m, [3, 3], stride=2, scope='Conv3_3x3/2')
                # 17 x 17 x m
            net = tf.concat(3, [br1, br2, br3], name='Concat1')
            # 17 x 17 x input + n + m
            # 1024 for v4 (384 + 384 + 256)
            # 896 for res-v1 (256 + 384 +256)
            # 1152 for res-v2 (384 + 384 + 384)
            endpoints[scope] = net
            print('%s output shape: %s' % (scope, net.get_shape()))
    return net 
Example #18
Source File: build_vgg.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def vgg_arg_scope(
        weight_decay=0.0005,
        use_batch_norm=False):
    """"""
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.9997,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
    }
    normalizer_fn = layers.batch_norm if use_batch_norm else None
    normalizer_params = batch_norm_params if use_batch_norm else None
    l2_regularizer = layers.l2_regularizer(weight_decay)  # 0.00004

    with arg_scope(
            [layers.fully_connected],
            biases_initializer=tf.constant_initializer(0.1),
            weights_initializer=layers.variance_scaling_initializer(factor=1.0),
            weights_regularizer=l2_regularizer,
            activation_fn=tf.nn.relu):
        with arg_scope(
                [layers.conv2d],
                normalizer_fn=normalizer_fn,
                normalizer_params=normalizer_params,
                weights_initializer=layers.variance_scaling_initializer(factor=1.0),
                weights_regularizer=l2_regularizer,
                activation_fn=tf.nn.relu) as arg_sc:
            return arg_sc 
Example #19
Source File: build_vgg.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _build_vgg19(
        inputs,
        num_classes=1000,
        dropout_keep_prob=0.5,
        is_training=True,
        scope=''):
    """Blah"""

    endpoints = {}
    with tf.name_scope(scope, 'vgg19', [inputs]):
        with arg_scope(
                [layers.batch_norm, layers.dropout], is_training=is_training):
            with arg_scope(
                    [layers.conv2d, layers.max_pool2d],
                    stride=1,
                    padding='SAME'):

                net = _block_a(inputs, endpoints, d=64, scope='Scale1')
                net = _block_a(net, endpoints, d=128, scope='Scale2')
                net = _block_c(net, endpoints, d=256, scope='Scale3')
                net = _block_c(net, endpoints, d=512, scope='Scale4')
                net = _block_c(net, endpoints, d=512, scope='Scale5')
                logits = _block_output(net, endpoints, num_classes, dropout_keep_prob)

                endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions')
                return logits, endpoints 
Example #20
Source File: test_gpu.py    From tensorbayes with MIT License 5 votes vote down vote up
def classifier(x, phase, reuse=None):
    with tf.variable_scope('class', reuse=reuse):
        with arg_scope([conv2d, dense], bn=True, phase=phase, activation=tf.nn.relu):
            for i in range(4):
                x = conv2d(x, 64 + 64 * i, 3, 2)
                x = conv2d(x, 64 + 64 * i, 3, 1)

            x = dense(x, 500)
            x = dense(x, 10, activation=None)

    return x 
Example #21
Source File: build_vgg.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _build_vgg16(
        inputs,
        num_classes=1000,
        dropout_keep_prob=0.5,
        is_training=True,
        scope=''):
    """Blah"""

    endpoints = {}
    with tf.name_scope(scope, 'vgg16', [inputs]):
        with arg_scope(
                [layers.batch_norm, layers.dropout], is_training=is_training):
            with arg_scope(
                    [layers.conv2d, layers.max_pool2d], 
                    stride=1,
                    padding='SAME'):

                net = _block_a(inputs, endpoints, d=64, scope='Scale1')
                net = _block_a(net, endpoints, d=128, scope='Scale2')
                net = _block_b(net, endpoints, d=256, scope='Scale3')
                net = _block_b(net, endpoints, d=512, scope='Scale4')
                net = _block_b(net, endpoints, d=512, scope='Scale5')
                logits = _block_output(net, endpoints, num_classes, dropout_keep_prob)

                endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions')
                return logits, endpoints 
Example #22
Source File: Densenet_Cifar10.py    From Densenet-Tensorflow with MIT License 5 votes vote down vote up
def Batch_Normalization(x, training, scope):
    with arg_scope([batch_norm],
                   scope=scope,
                   updates_collections=None,
                   decay=0.9,
                   center=True,
                   scale=True,
                   zero_debias_moving_mean=True) :
        return tf.cond(training,
                       lambda : batch_norm(inputs=x, is_training=training, reuse=None),
                       lambda : batch_norm(inputs=x, is_training=training, reuse=True)) 
Example #23
Source File: Densenet_MNIST.py    From Densenet-Tensorflow with MIT License 5 votes vote down vote up
def Batch_Normalization(x, training, scope):
    with arg_scope([batch_norm],
                   scope=scope,
                   updates_collections=None,
                   decay=0.9,
                   center=True,
                   scale=True,
                   zero_debias_moving_mean=True) :
        return tf.cond(training,
                       lambda : batch_norm(inputs=x, is_training=training, reuse=None),
                       lambda : batch_norm(inputs=x, is_training=training, reuse=True)) 
Example #24
Source File: vae.py    From TensorFlow-VAE-GAN-DRAW with Apache License 2.0 5 votes vote down vote up
def __init__(self, hidden_size, batch_size, learning_rate):
        self.input_tensor = tf.placeholder(
            tf.float32, [None, 28 * 28])

        with arg_scope([layers.conv2d, layers.conv2d_transpose],
                       activation_fn=tf.nn.elu,
                       normalizer_fn=layers.batch_norm,
                       normalizer_params={'scale': True}):
            with tf.variable_scope("model") as scope:
                encoded = encoder(self.input_tensor, hidden_size * 2)

                mean = encoded[:, :hidden_size]
                stddev = tf.sqrt(tf.exp(encoded[:, hidden_size:]))

                epsilon = tf.random_normal([tf.shape(mean)[0], hidden_size])
                input_sample = mean + epsilon * stddev

                output_tensor = decoder(input_sample)

            with tf.variable_scope("model", reuse=True) as scope:
                self.sampled_tensor = decoder(tf.random_normal(
                    [batch_size, hidden_size]))

        vae_loss = self.__get_vae_cost(mean, stddev)
        rec_loss = self.__get_reconstruction_cost(
            output_tensor, self.input_tensor)

        loss = vae_loss + rec_loss
        self.train = layers.optimize_loss(loss, tf.contrib.framework.get_or_create_global_step(
        ), learning_rate=learning_rate, optimizer='Adam', update_ops=[])

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer()) 
Example #25
Source File: gan.py    From TensorFlow-VAE-GAN-DRAW with Apache License 2.0 5 votes vote down vote up
def __init__(self, hidden_size, batch_size, learning_rate):
        self.input_tensor = tf.placeholder(tf.float32, [None, 28 * 28])

        with arg_scope([layers.conv2d, layers.conv2d_transpose],
                       activation_fn=concat_elu,
                       normalizer_fn=layers.batch_norm,
                       normalizer_params={'scale': True}):
            with tf.variable_scope("model"):
                D1 = discriminator(self.input_tensor)  # positive examples
                D_params_num = len(tf.trainable_variables())
                G = decoder(tf.random_normal([batch_size, hidden_size]))
                self.sampled_tensor = G

            with tf.variable_scope("model", reuse=True):
                D2 = discriminator(G)  # generated examples

        D_loss = self.__get_discrinator_loss(D1, D2)
        G_loss = self.__get_generator_loss(D2)

        params = tf.trainable_variables()
        D_params = params[:D_params_num]
        G_params = params[D_params_num:]
        #    train_discrimator = optimizer.minimize(loss=D_loss, var_list=D_params)
        # train_generator = optimizer.minimize(loss=G_loss, var_list=G_params)
        global_step = tf.contrib.framework.get_or_create_global_step()
        self.train_discrimator = layers.optimize_loss(
            D_loss, global_step, learning_rate / 10, 'Adam', variables=D_params, update_ops=[])
        self.train_generator = layers.optimize_loss(
            G_loss, global_step, learning_rate, 'Adam', variables=G_params, update_ops=[])

        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer()) 
Example #26
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testDfsForSourceOps(self):
    with arg_scope(self._batch_norm_scope()):
      inputs = tf.zeros([2, 4, 4, 3])
      c1 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv1')
      c2 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv2')
      tmp = c1 + c2
      c3 = layers.conv2d(tmp, num_outputs=10, kernel_size=3, scope='conv3')
      out = tf.identity(c3)
      # Extra branch that is not a dependency of out.
      concat = tf.concat([c1, c2], axis=3)
      layers.conv2d(concat, num_outputs=10, kernel_size=3, scope='conv4')

    manager = orm.OpRegularizerManager([], self._default_op_handler_dict)
    manager._dfs_for_source_ops([out.op])

    # Verify source ops were found.
    expected_queue = collections.deque([
        _get_op('conv3/BatchNorm/FusedBatchNormV3'),
        _get_op('conv2/BatchNorm/FusedBatchNormV3'),
        _get_op('conv1/BatchNorm/FusedBatchNormV3')
    ])
    self.assertEqual(expected_queue, manager._op_deque)

    # Verify extra branch was not included.
    self.assertNotIn(
        _get_op('conv4/BatchNorm/FusedBatchNormV3'), manager._op_deque) 
Example #27
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testInit_BlacklistGroup_NoMatch(self):
    with arg_scope(self._batch_norm_scope()):
      inputs = tf.zeros([2, 4, 4, 3])
      c1 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv1')
      c2 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv2')
      add = c1 + c2
      c3 = layers.conv2d(add, num_outputs=10, kernel_size=3, scope='conv3')

    # Verify blacklist regex without match raises ValueError
    self.assertRaisesWithLiteralMatch(
        ValueError,
        'Blacklist regex never used: \'oops\'.',
        orm.OpRegularizerManager, [c3.op], self._default_op_handler_dict,
        SumGroupingRegularizer, regularizer_blacklist=['oops']) 
Example #28
Source File: nasnet_model.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def nasnet_large_arg_scope(weight_decay=5e-5,
                           batch_norm_decay=0.9997,
                           batch_norm_epsilon=1e-3):
  """Defines the default arg scope for the NASNet-A Large ImageNet model.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_decay: Decay for batch norm moving average.
    batch_norm_epsilon: Small float added to variance to avoid dividing by zero
      in batch norm.
  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': batch_norm_decay,
      # epsilon to prevent 0s in variance.
      'epsilon': batch_norm_epsilon,
      'scale': True,
      'fused': True,
  }
  weights_regularizer = contrib_layers.l2_regularizer(weight_decay)
  weights_initializer = contrib_layers.variance_scaling_initializer(
      mode='FAN_OUT')
  with arg_scope(
      [slim.fully_connected, slim.conv2d, slim.separable_conv2d],
      weights_regularizer=weights_regularizer,
      weights_initializer=weights_initializer):
    with arg_scope([slim.fully_connected], activation_fn=None, scope='FC'):
      with arg_scope(
          [slim.conv2d, slim.separable_conv2d],
          activation_fn=None,
          biases_initializer=None):
        with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
          return sc 
Example #29
Source File: op_regularizer_manager_test.py    From morph-net with Apache License 2.0 5 votes vote down vote up
def testInit_ForceGroup(self):
    with arg_scope(self._batch_norm_scope()):
      inputs = tf.zeros([2, 4, 4, 3])
      c1 = layers.conv2d(inputs, num_outputs=10, kernel_size=3, scope='conv1')
      c2 = layers.conv2d(c1, num_outputs=10, kernel_size=3, scope='conv2')
      c3 = layers.conv2d(c2, num_outputs=10, kernel_size=3, scope='conv3')

    # Initialize OpRegularizerManager with no force-grouping.
    manager = orm.OpRegularizerManager(
        [c3.op], self._default_op_handler_dict, SumGroupingRegularizer)

    # Verify that c2 is not grouped with c3.
    c2_op_slices = manager.get_op_slices(c2.op)
    self.assertLen(c2_op_slices, 1)
    c2_op_slice = c2_op_slices[0]
    c2_group = manager.get_op_group(c2_op_slice)
    c3_op_slices = manager.get_op_slices(c3.op)
    self.assertLen(c3_op_slices, 1)
    c3_op_slice = c3_op_slices[0]
    self.assertNotIn(c3_op_slice, c2_group.op_slices)

    # Force-group c2 and c3.
    manager = orm.OpRegularizerManager(
        [c3.op], self._default_op_handler_dict, SumGroupingRegularizer,
        force_group=['conv2|conv3'])

    # Verify that c2 is grouped with c3.
    c2_op_slices = manager.get_op_slices(c2.op)
    self.assertLen(c2_op_slices, 1)
    c2_op_slice = c2_op_slices[0]
    c2_group = manager.get_op_group(c2_op_slice)
    c3_op_slices = manager.get_op_slices(c3.op)
    self.assertLen(c3_op_slices, 1)
    c3_op_slice = c3_op_slices[0]
    self.assertIn(c3_op_slice, c2_group.op_slices) 
Example #30
Source File: build_inception_v4.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _block_stem_res(net, endpoints, scope='Stem'):
    # Simpler _stem for inception-resnet-v1 network
    # NOTE observe endpoints of first 3 layers
    # default padding = VALID
    # default stride = 1
    with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'):
        with tf.variable_scope(scope):
            # 299 x 299 x 3
            net = layers.conv2d(net, 32, [3, 3], stride=2, scope='Conv1_3x3/2')
            endpoints[scope + '/Conv1'] = net
            # 149 x 149 x 32
            net = layers.conv2d(net, 32, [3, 3], scope='Conv2_3x3')
            endpoints[scope + '/Conv2'] = net
            # 147 x 147 x 32
            net = layers.conv2d(net, 64, [3, 3], padding='SAME', scope='Conv3_3x3')
            endpoints[scope + '/Conv3'] = net
            # 147 x 147 x 64
            net = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2')
            # 73 x 73 x 64
            net = layers.conv2d(net, 80, [1, 1], padding='SAME', scope='Conv4_1x1')
            # 73 x 73 x 80
            net = layers.conv2d(net, 192, [3, 3], scope='Conv5_3x3')
            # 71 x 71 x 192
            net = layers.conv2d(net, 256, [3, 3], stride=2, scope='Conv6_3x3/2')
            # 35 x 35 x 256
            endpoints[scope] = net
            print('%s output shape: %s' % (scope, net.get_shape()))
    return net