Python tensorflow.contrib.slim.conv2d_transpose() Examples

The following are 30 code examples of tensorflow.contrib.slim.conv2d_transpose(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.contrib.slim , or try the search function .
Example #1
Source File: dcgan4_nobn_cf.py    From TheNumericsOfGANs with MIT License 6 votes vote down vote up
def generator(z, f_dim, output_size, c_dim, is_training=True):
    # Network
    net = slim.fully_connected(z, output_size//16 * output_size//16 * f_dim, activation_fn=None)
    net = tf.reshape(net, [-1, output_size//16, output_size//16, f_dim])
    net = lrelu(net)

    conv2d_trp_argscope = slim.arg_scope(
        [slim.conv2d_transpose], kernel_size=[5, 5], stride=[2, 2], activation_fn=lrelu
    )

    with conv2d_trp_argscope:
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, c_dim, activation_fn=None)

    out = tf.nn.tanh(net)

    return out 
Example #2
Source File: monodepth_model.py    From Semantic-Mono-Depth with MIT License 6 votes vote down vote up
def build_model(self):
        with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], activation_fn=tf.nn.elu):
            with tf.variable_scope('model', reuse=self.reuse_variables):

                self.left_pyramid  = self.scale_pyramid(self.left,  4)
                if self.mode == 'train':
                    self.right_pyramid = self.scale_pyramid(self.right, 4)

                self.model_input = self.left

                #build model
                if self.params.encoder == 'vgg':
                    self.build_vgg(self.model_input)
                elif self.params.encoder == 'resnet50':
                    self.build_resnet50()
                else:
                    return None 
Example #3
Source File: pyramid_network.py    From Master-R-CNN with Apache License 2.0 6 votes vote down vote up
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):

  with slim.arg_scope(
      [slim.conv2d, slim.conv2d_transpose],
      padding='SAME',
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
      activation_fn=activation_fn,
      normalizer_fn=normalizer_fn,) as arg_sc:
    with slim.arg_scope(
      [slim.fully_connected],
          weights_regularizer=slim.l2_regularizer(weight_decay),
          weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
          activation_fn=activation_fn,
          normalizer_fn=normalizer_fn) as arg_sc:
          return arg_sc 
Example #4
Source File: model.py    From domain-transfer-network with MIT License 6 votes vote down vote up
def generator(self, inputs, reuse=False):
        # inputs: (batch, 1, 1, 128)
        with tf.variable_scope('generator', reuse=reuse):
            with slim.arg_scope([slim.conv2d_transpose], padding='SAME', activation_fn=None,           
                                 stride=2, weights_initializer=tf.contrib.layers.xavier_initializer()):
                with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True, 
                                     activation_fn=tf.nn.relu, is_training=(self.mode=='train')):

                    net = slim.conv2d_transpose(inputs, 512, [4, 4], padding='VALID', scope='conv_transpose1')   # (batch_size, 4, 4, 512)
                    net = slim.batch_norm(net, scope='bn1')
                    net = slim.conv2d_transpose(net, 256, [3, 3], scope='conv_transpose2')  # (batch_size, 8, 8, 256)
                    net = slim.batch_norm(net, scope='bn2')
                    net = slim.conv2d_transpose(net, 128, [3, 3], scope='conv_transpose3')  # (batch_size, 16, 16, 128)
                    net = slim.batch_norm(net, scope='bn3')
                    net = slim.conv2d_transpose(net, 1, [3, 3], activation_fn=tf.nn.tanh, scope='conv_transpose4')   # (batch_size, 32, 32, 1)
                    return net 
Example #5
Source File: dcgan3.py    From TheNumericsOfGANs with MIT License 6 votes vote down vote up
def generator(z, f_dim, output_size, c_dim, is_training=True):
    bn_kwargs = {
        'is_training': is_training, 'updates_collections': None
    }

    # Network
    net = slim.fully_connected(z, output_size//8 * output_size//8 * 4*f_dim,
        activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=bn_kwargs
        )
    net = tf.reshape(net, [-1, output_size//8, output_size//8, 4*f_dim])

    conv2d_trp_argscope =  slim.arg_scope([slim.conv2d_transpose],
        kernel_size=[5,5], stride=[2,2], activation_fn=tf.nn.relu, normalizer_params=bn_kwargs,
    )
    with conv2d_trp_argscope:
        net = slim.conv2d_transpose(net, 2*f_dim, normalizer_fn=slim.batch_norm)
        net = slim.conv2d_transpose(net, f_dim, normalizer_fn=slim.batch_norm)
        net = slim.conv2d_transpose(net, c_dim, activation_fn=None)

    out = tf.nn.tanh(net)

    return out 
Example #6
Source File: utils.py    From EDSR-Tensorflow with MIT License 6 votes vote down vote up
def upsample(x,scale=2,features=64,activation=tf.nn.relu):
	assert scale in [2,3,4]
	x = slim.conv2d(x,features,[3,3],activation_fn=activation)
	if scale == 2:
		ps_features = 3*(scale**2)
		x = slim.conv2d(x,ps_features,[3,3],activation_fn=activation)
		#x = slim.conv2d_transpose(x,ps_features,6,stride=1,activation_fn=activation)
		x = PS(x,2,color=True)
	elif scale == 3:
		ps_features =3*(scale**2)
		x = slim.conv2d(x,ps_features,[3,3],activation_fn=activation)
		#x = slim.conv2d_transpose(x,ps_features,9,stride=1,activation_fn=activation)
		x = PS(x,3,color=True)
	elif scale == 4:
		ps_features = 3*(2**2)
		for i in range(2):
			x = slim.conv2d(x,ps_features,[3,3],activation_fn=activation)
			#x = slim.conv2d_transpose(x,ps_features,6,stride=1,activation_fn=activation)
			x = PS(x,2,color=True)
	return x 
Example #7
Source File: conv4.py    From TheNumericsOfGANs with MIT License 6 votes vote down vote up
def generator(z, f_dim, output_size, c_dim, is_training=True):
    # Network
    net = slim.fully_connected(z, 512, activation_fn=lrelu)
    net = slim.fully_connected(net, output_size//16 * output_size//16 * f_dim, activation_fn=lrelu)
    net = tf.reshape(net, [-1, output_size//16, output_size//16, f_dim])

    conv2dtrp_argscope = slim.arg_scope(
        [slim.conv2d_transpose], kernel_size=[5, 5], stride=[2, 2], activation_fn=lrelu)

    with conv2dtrp_argscope:
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, c_dim, activation_fn=None)

    out = tf.nn.tanh(net)

    return out 
Example #8
Source File: dcgan3_nobn_cf.py    From TheNumericsOfGANs with MIT License 6 votes vote down vote up
def generator(z, f_dim, output_size, c_dim, is_training=True):
    # Network
    net = slim.fully_connected(z, output_size//8 * output_size//8 * f_dim, activation_fn=None)
    net = tf.reshape(net, [-1, output_size//8, output_size//8, f_dim])
    net = lrelu(net)

    conv2d_trp_argscope = slim.arg_scope(
        [slim.conv2d_transpose], kernel_size=[5, 5], stride=[2, 2], activation_fn=lrelu
    )

    with conv2d_trp_argscope:
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, c_dim, activation_fn=None)

    out = tf.nn.tanh(net)

    return out 
Example #9
Source File: dcgan4.py    From TheNumericsOfGANs with MIT License 6 votes vote down vote up
def generator(z, f_dim, output_size, c_dim, is_training=True):
    bn_kwargs = {
        'is_training': is_training, 'updates_collections': None
    }

    # Network
    net = slim.fully_connected(z, output_size//16 * output_size//16 * 8*f_dim,
        activation_fn=None, normalizer_fn=None
        )
    net = tf.reshape(net, [-1, output_size//16, output_size//16, 8*f_dim])
    net = lrelu(slim.batch_norm(net, **bn_kwargs))

    conv2d_trp_argscope =  slim.arg_scope([slim.conv2d_transpose],
        kernel_size=[5,5], stride=[2,2], activation_fn=lrelu, normalizer_params=bn_kwargs,
    )
    with conv2d_trp_argscope:
        net = slim.conv2d_transpose(net, 4*f_dim, normalizer_fn=slim.batch_norm)
        net = slim.conv2d_transpose(net, 2*f_dim, normalizer_fn=slim.batch_norm)
        net = slim.conv2d_transpose(net, f_dim, normalizer_fn=slim.batch_norm)
        net = slim.conv2d_transpose(net, c_dim, activation_fn=None)

    out = tf.nn.tanh(net)

    return out 
Example #10
Source File: dcgan3_nobn.py    From TheNumericsOfGANs with MIT License 6 votes vote down vote up
def generator(z, f_dim, output_size, c_dim, is_training=True):
    # Network
    net = slim.fully_connected(z, output_size//8 * output_size//8 * 4*f_dim, activation_fn=tf.nn.relu)
    net = tf.reshape(net, [-1, output_size//8, output_size//8, 4*f_dim])

    conv2d_trp_argscope =  slim.arg_scope([slim.conv2d_transpose],
        kernel_size=[5,5], stride=[2,2], activation_fn=tf.nn.relu,
    )
    with conv2d_trp_argscope:
        net = slim.conv2d_transpose(net, 2*f_dim)
        net = slim.conv2d_transpose(net, f_dim)
        net = slim.conv2d_transpose(net, c_dim, activation_fn=None)

    out = tf.nn.tanh(net)

    return out 
Example #11
Source File: pyramid_network.py    From FastMaskRCNN with Apache License 2.0 6 votes vote down vote up
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None):

  with slim.arg_scope(
      [slim.conv2d, slim.conv2d_transpose],
      padding='SAME',
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
      activation_fn=activation_fn,
      normalizer_fn=normalizer_fn,) as arg_sc:
    with slim.arg_scope(
      [slim.fully_connected],
          weights_regularizer=slim.l2_regularizer(weight_decay),
          weights_initializer=tf.truncated_normal_initializer(stddev=0.001),
          activation_fn=activation_fn,
          normalizer_fn=normalizer_fn) as arg_sc:
          return arg_sc 
Example #12
Source File: UNet.py    From unsupervised-image-segmentation-by-WNet-with-NormalizedCut with MIT License 5 votes vote down vote up
def upconv(cls, image, index_module):
        """
        upconvolute by a 2x2 kernel.
        """
        in_ch = image.get_shape()[-1].value
        out_ch = in_ch // 2
        name = 'upconv%d'%index_module
        upconv = slim.conv2d_transpose(image,out_ch,2,stride=2,
                                    weights_initializer=tf.keras.initializers.he_normal(),
                                    padding='SAME',activation_fn=None,scope=name)
        return upconv 
Example #13
Source File: flownet.py    From DF-Net with MIT License 5 votes vote down vote up
def flownet_c(conv3_a, conv3_b, conv2_a, channel_mult=1, full_res=False):
    """Given two images, returns flow predictions in decreasing resolution.

    Uses FlowNetCorr.
    """
    m = channel_mult

    with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
                        data_format='NCHW',
                        weights_regularizer=slim.l2_regularizer(0.0004),
                        weights_initializer=layers.variance_scaling_initializer(),
                        activation_fn=_leaky_relu):
        corr = correlation(conv3_a, conv3_b,
                           pad=20, kernel_size=1, max_displacement=20, stride_1=1, stride_2=2)

        conv_redir = slim.conv2d(conv3_a, int(32 * m), 1, stride=1, scope='conv_redir')

        conv3_1 = slim.conv2d(tf.concat([conv_redir, corr], 1), int(256 * m), 3,
                              stride=1, scope='conv3_1')
        conv4 = slim.conv2d(conv3_1, int(512 * m), 3, stride=2, scope='conv4')
        conv4_1 = slim.conv2d(conv4, int(512 * m), 3, stride=1, scope='conv4_1')
        conv5 = slim.conv2d(conv4_1, int(512 * m), 3, stride=2, scope='conv5')
        conv5_1 = slim.conv2d(conv5, int(512 * m), 3, stride=1, scope='conv5_1')
        conv6 = slim.conv2d(conv5_1, int(1024 * m), 3, stride=2, scope='conv6')
        conv6_1 = slim.conv2d(conv6, int(1024 * m), 3, stride=1, scope='conv6_1')

        res = _flownet_upconv(conv6_1, conv5_1, conv4_1, conv3_1, conv2_a,
                              channel_mult=channel_mult, full_res=full_res)
        return nchw_to_nhwc(res) 
Example #14
Source File: vaegan.py    From tf-vaegan with MIT License 5 votes vote down vote up
def _generator(self, z, is_training):
        subnet = self.arch['generator']
        n_layer = len(subnet['output'])
        h, w, c = subnet['hwc']
        with slim.arg_scope(
            [slim.batch_norm],
            scale=True,
            updates_collections=None,
            decay=0.9, epsilon=1e-5,
            is_training=is_training,
            scope='BN'):

            x = slim.fully_connected(
                z,
                h * w * c,
                normalizer_fn=slim.batch_norm,
                activation_fn=tf.nn.relu)
            x = tf.reshape(x, [-1, h, w, c])

            with slim.arg_scope(
                    [slim.conv2d_transpose],
                    weights_regularizer=slim.l2_regularizer(subnet['l2-reg']),
                    normalizer_fn=slim.batch_norm,
                    activation_fn=tf.nn.relu):

                for i in range(n_layer -1):
                    x = slim.conv2d_transpose(
                        x,
                        subnet['output'][i],
                        subnet['kernel'][i],
                        subnet['stride'][i])

                # Don't apply BN for the last layer of G
                x = slim.conv2d_transpose(
                    x,
                    subnet['output'][-1],
                    subnet['kernel'][-1],
                    subnet['stride'][-1],
                    normalizer_fn=None,
                    activation_fn=tf.nn.tanh)
        return x 
Example #15
Source File: basemodel.py    From PoseFix_RELEASE with MIT License 5 votes vote down vote up
def resnet_arg_scope(bn_is_training,
                     bn_trainable,
                     trainable=True,
                     weight_decay=cfg.weight_decay,
                     weight_init = initializers.variance_scaling_initializer(),
                     batch_norm_decay=0.99,
                     batch_norm_epsilon=1e-9,
                     batch_norm_scale=True):
    batch_norm_params = {
        'is_training': bn_is_training,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'trainable': bn_trainable,
        'updates_collections': ops.GraphKeys.UPDATE_OPS
    }

    with arg_scope(
            [slim.conv2d, slim.conv2d_transpose],
            weights_regularizer=regularizers.l2_regularizer(weight_decay),
            weights_initializer=weight_init,
            trainable=trainable,
            activation_fn=nn_ops.relu,
            normalizer_fn=layers.batch_norm,
            normalizer_params=batch_norm_params):
        with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
            return arg_sc 
Example #16
Source File: heatmap.py    From dataset_agnostic_segmentation with MIT License 5 votes vote down vote up
def heatmap(x, scope, output_size=2, L2_reg=0.0, reuse=None, train_mode=True, linear_output=True, act_func=tf.nn.relu, **kwargs):
    def _args_scope():
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                            activation_fn=act_func,
                            weights_regularizer=slim.l2_regularizer(L2_reg)):
            with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
                return arg_sc

    with slim.arg_scope(_args_scope()):
        with tf.variable_scope(scope, 'hmap', [x], reuse=reuse) as sc:
            end_points_collection = sc.name + '_end_points'
            # Collect outputs for conv2d, fully_connected and max_pool2d.
            with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d, slim.conv2d_transpose],
                                outputs_collections=end_points_collection):
                # Allow smoother downsampling of chanels
                net = slim.conv2d(x, 128, [3, 3], scope='conv1')
                net = slim.batch_norm(net, is_training=train_mode, scope='bn1')
                net = slim.conv2d_transpose(net, 64, kernel_size=[3, 3], stride=[2, 2], scope='upconv2')
                net = slim.batch_norm(net, is_training=train_mode, scope='bn2')
                net = slim.conv2d_transpose(net, 32, kernel_size=[3, 3], stride=[2, 2], scope='upconv3')
                net = slim.batch_norm(net, is_training=train_mode, scope='bn3')

                trim_hmap = pad_features(net, size=16, scope='conv4')
                trim_hmap = slim.batch_norm(trim_hmap, is_training=train_mode, scope='bn4')
                trim_hmap = slim.conv2d(trim_hmap, 8, [3, 3], scope='conv5')
                trim_hmap = slim.batch_norm(trim_hmap, is_training=train_mode, scope='bn5')
                trim_hmap = slim.conv2d(trim_hmap, 4, [3, 3], scope='conv6')
                trim_hmap = slim.conv2d(trim_hmap, output_size, kernel_size=[3, 3], scope='conv7')
                if linear_output:
                    trim_hmap = slim.conv2d(trim_hmap, output_size, kernel_size=[1, 1], activation_fn=None, scope='conv8')

    return trim_hmap 
Example #17
Source File: cmp.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def readout_general(multi_scale_belief, num_neurons, strides, layers_per_block,
                    kernel_size, batch_norm_is_training_op, wt_decay):
  multi_scale_belief = tf.stop_gradient(multi_scale_belief)
  with tf.variable_scope('readout_maps_deconv'):
    x, outs = deconv(multi_scale_belief, batch_norm_is_training_op,
                     wt_decay=wt_decay, neurons=num_neurons, strides=strides,
                     layers_per_block=layers_per_block, kernel_size=kernel_size,
                     conv_fn=slim.conv2d_transpose, offset=0,
                     name='readout_maps_deconv')
    probs = tf.sigmoid(x)
  return x, probs 
Example #18
Source File: slim_net.py    From automatic-portrait-tf with GNU General Public License v3.0 5 votes vote down vote up
def fcn8s_arg_scope(weight_decay=0.0005):
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        biases_initializer=tf.zeros_initializer()):
        with slim.arg_scope([slim.conv2d, slim.max_pool2d], padding="SAME"):
            with slim.arg_scope([slim.conv2d_transpose], padding="VALID",
                                biases_initializer=None) as arg_sc:
                return arg_sc 
Example #19
Source File: ops.py    From TecoGAN with Apache License 2.0 5 votes vote down vote up
def conv2_tran(batch_input, kernel=3, output_channel=64, stride=1, use_bias=True, scope='conv'):
    # kernel: An integer specifying the width and height of the 2D convolution window
    with tf.variable_scope(scope):
        if use_bias:
            return slim.conv2d_transpose(batch_input, output_channel, [kernel, kernel], stride, 'SAME', data_format='NHWC',
                            activation_fn=None, weights_initializer=tf.contrib.layers.xavier_initializer())
        else:
            return slim.conv2d_transpose(batch_input, output_channel, [kernel, kernel], stride, 'SAME', data_format='NHWC',
                            activation_fn=None, weights_initializer=tf.contrib.layers.xavier_initializer(),
                            biases_initializer=None)

# Define the convolution building block 
Example #20
Source File: heatmap.py    From dataset_agnostic_segmentation with MIT License 5 votes vote down vote up
def pad_features(inputs, size, scope, pad_x=4, pad_y=0, kernel=None, reuse=None):
    """
    Legacy, Kept for reproducibility
    """
    if kernel is None:
        kernel = [3, 3]
    with tf.variable_scope(scope, 'bottlneck', [inputs], reuse=reuse) as sc:
        bottle = slim.conv2d_transpose(inputs, size, kernel_size=kernel, stride=[2, 2], scope='upconv_bottleneck')
        bottle = tf.pad(bottle, paddings=[[0, 0], [0, 0], [0, pad_x], [0, pad_y]], mode='CONSTANT', name='pad_bottleneck')
    return bottle 
Example #21
Source File: pose_netmulti.py    From DeepLabCut with GNU Lesser General Public License v3.0 5 votes vote down vote up
def prediction_layer(cfg, input, name, num_outputs):
    with slim.arg_scope(
        [slim.conv2d, slim.conv2d_transpose],
        padding="SAME",
        activation_fn=None,
        normalizer_fn=None,
        weights_regularizer=slim.l2_regularizer(cfg.weight_decay),
    ):
        with tf.variable_scope(name):
            pred = slim.conv2d_transpose(
                input, num_outputs, kernel_size=[3, 3], stride=2, scope="block4"
            )
            return pred 
Example #22
Source File: pose_net.py    From DeepLabCut with GNU Lesser General Public License v3.0 5 votes vote down vote up
def prediction_layer(cfg, input, name, num_outputs):
    with slim.arg_scope(
        [slim.conv2d, slim.conv2d_transpose],
        padding="SAME",
        activation_fn=None,
        normalizer_fn=None,
        weights_regularizer=slim.l2_regularizer(cfg.weight_decay),
    ):
        with tf.variable_scope(name):
            pred = slim.conv2d_transpose(
                input, num_outputs, kernel_size=[3, 3], stride=2, scope="block4"
            )
            return pred 
Example #23
Source File: resnet_cf.py    From TheNumericsOfGANs with MIT License 5 votes vote down vote up
def generator(z, f_dim, output_size, c_dim, is_training=True):
    # Network
    net = slim.fully_connected(z, output_size//16 * output_size//16 * f_dim, activation_fn=None)
    net = tf.reshape(net, [-1, output_size//16, output_size//16, f_dim])

    argscope_conv2d_trp = slim.arg_scope(
        [slim.conv2d_transpose], kernel_size=[5, 5], stride=[2, 2], activation_fn=None)
    argscope_conv2d = slim.arg_scope(
        [slim.conv2d], kernel_size=[3, 3], stride=[1, 1], activation_fn=None)

    with argscope_conv2d, argscope_conv2d_trp:
        net = slim.conv2d_transpose(lrelu(net), f_dim)
        dnet = slim.conv2d(lrelu(net), f_dim//2)
        net += 1e-1 * slim.conv2d(lrelu(dnet), f_dim)

        net = slim.conv2d_transpose(lrelu(net), f_dim)
        dnet = slim.conv2d(lrelu(net), f_dim//2)
        net += 1e-1 * slim.conv2d(lrelu(dnet), f_dim) 

        net = slim.conv2d_transpose(lrelu(net), f_dim)
        dnet = slim.conv2d(lrelu(net), f_dim//2)
        net += 1e-1 * slim.conv2d(lrelu(dnet), f_dim)

        net = slim.conv2d_transpose(lrelu(net), c_dim)

    out = tf.nn.tanh(net)

    return out 
Example #24
Source File: transfer_models.py    From taskonomy with MIT License 5 votes vote down vote up
def decoder_tiny_transfer_4( encoder_output, is_training, num_output_channels=3, dropout_keep_prob=None, activation_fn=tf.nn.relu,
            batch_norm_decay=0.9, batch_norm_epsilon=1e-5, batch_norm_scale=True, batch_norm_center=True, 
            weight_decay=0.0001, scope='decoder', reuse=None ):
    print('\tbuilding decoder')
    batch_norm_params = {
        'is_training': is_training,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'center': batch_norm_center,
        'updates_collections': tf.GraphKeys.UPDATE_OPS,
    }
    with tf.variable_scope(scope, reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with slim.arg_scope( [slim.conv2d, slim.conv2d_transpose,
                    slim.fully_connected],
                    outputs_collections=end_points_collection):
            with slim.arg_scope([slim.conv2d, slim.conv2d_transpose,
                        slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params,
                        weights_regularizer=slim.l2_regularizer(weight_decay)):

                with slim.arg_scope([slim.conv2d_transpose], stride=2):
                    encoder_shape = [ int( x ) for x in encoder_output.get_shape() ]
                    print('\t\tinput', encoder_shape )
                    net = encoder_output
                    num_layers = 4
                    num_conv_deconv = num_layers - 4
                    num_deconv_only = 4 - num_conv_deconv
                    for i in range(num_conv_deconv):
                        net = add_conv_layer( net, 64, [3,3], stride=1, scope='conv_{i}'.format(i=i) )
                        net = add_conv_transpose_layer( net, 64, [3, 3], scope='deconv_{i}'.format(i=i) )
                    for i in range(num_deconv_only):
                        net = add_conv_transpose_layer( net, 64, [3, 3], scope='deconv_{i}'.format(i=i+num_conv_deconv) )

            net = add_conv_layer( net, num_output_channels, [3, 3], stride=1, 
                    normalizer_fn=None, activation_fn=tf.tanh, scope='decoder_output' )
        end_points = convert_collection_to_dict(end_points_collection)
        return net, end_points 
Example #25
Source File: transfer_models.py    From taskonomy with MIT License 5 votes vote down vote up
def decoder_tiny_transfer_2( encoder_output, is_training, num_output_channels=3, dropout_keep_prob=None, activation_fn=tf.nn.relu,
            batch_norm_decay=0.9, batch_norm_epsilon=1e-5, batch_norm_scale=True, batch_norm_center=True, 
            weight_decay=0.0001, scope='decoder', reuse=None ):
    print('\tbuilding decoder')
    batch_norm_params = {
        'is_training': is_training,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'center': batch_norm_center,
        'updates_collections': tf.GraphKeys.UPDATE_OPS,
    }
    with tf.variable_scope(scope, reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with slim.arg_scope( [slim.conv2d, slim.conv2d_transpose,
                    slim.fully_connected],
                    outputs_collections=end_points_collection):
            with slim.arg_scope([slim.conv2d, slim.conv2d_transpose,
                        slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params,
                        weights_regularizer=slim.l2_regularizer(weight_decay)):

                with slim.arg_scope([slim.conv2d_transpose], stride=2):
                    encoder_shape = [ int( x ) for x in encoder_output.get_shape() ]
                    print('\t\tinput', encoder_shape )
                    net = encoder_output
                    net = add_conv_transpose_layer( net, 64, [4, 4], stride=4, scope='deconv_0' )
                    net = add_conv_transpose_layer( net, 64, [4, 4], stride=4, scope='deconv_1' )
            net = add_conv_layer( net, num_output_channels, [3, 3], stride=1, 
                    normalizer_fn=None, activation_fn=tf.tanh, scope='decoder_output' )
        end_points = convert_collection_to_dict(end_points_collection)
        return net, end_points 
Example #26
Source File: transfer_models.py    From taskonomy with MIT License 5 votes vote down vote up
def decoder_tiny_transfer_2_upsample( encoder_output, is_training, num_output_channels=3, dropout_keep_prob=None, activation_fn=tf.nn.relu,
            batch_norm_decay=0.9, batch_norm_epsilon=1e-5, batch_norm_scale=True, batch_norm_center=True, 
            weight_decay=0.0001, scope='decoder', reuse=None ):
    print('\tbuilding decoder')
    batch_norm_params = {
        'is_training': is_training,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'center': batch_norm_center,
        'updates_collections': tf.GraphKeys.UPDATE_OPS,
    }
    with tf.variable_scope(scope, reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with slim.arg_scope( [slim.conv2d, slim.conv2d_transpose,
                    slim.fully_connected],
                    outputs_collections=end_points_collection):
            with slim.arg_scope([slim.conv2d, slim.conv2d_transpose,
                        slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params,
                        weights_regularizer=slim.l2_regularizer(weight_decay)):

                with slim.arg_scope([slim.conv2d_transpose], stride=2):
                    encoder_shape = [ int( x ) for x in encoder_output.get_shape() ]
                    print('\t\tinput', encoder_shape )
                    net = tf.image.resize_images(
                        encoder_output,
                        (64, 64),
                        align_corners=False
                    )
                    net = add_conv_transpose_layer( net, 64, [3, 3], scope='deconv_0' )
                    net = add_conv_transpose_layer( net, 64, [3, 3], scope='deconv_1' )
            net = add_conv_layer( net, num_output_channels, [3, 3], stride=1, 
                    normalizer_fn=None, activation_fn=tf.tanh, scope='decoder_output' )
        end_points = convert_collection_to_dict(end_points_collection)
        return net, end_points 
Example #27
Source File: transfer_models.py    From taskonomy with MIT License 5 votes vote down vote up
def decoder_tiny_transfer_1_upsample( encoder_output, is_training, num_output_channels=3, dropout_keep_prob=None, activation_fn=tf.nn.relu,
            batch_norm_decay=0.9, batch_norm_epsilon=1e-5, batch_norm_scale=True, batch_norm_center=True, 
            weight_decay=0.0001, scope='decoder', reuse=None ):
    print('\tbuilding decoder')
    batch_norm_params = {
        'is_training': is_training,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'center': batch_norm_center,
        'updates_collections': tf.GraphKeys.UPDATE_OPS,
    }
    with tf.variable_scope(scope, reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with slim.arg_scope( [slim.conv2d, slim.conv2d_transpose,
                    slim.fully_connected],
                    outputs_collections=end_points_collection):
            with slim.arg_scope([slim.conv2d, slim.conv2d_transpose,
                        slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params,
                        weights_regularizer=slim.l2_regularizer(weight_decay)):

                with slim.arg_scope([slim.conv2d_transpose], stride=2):
                    encoder_shape = [ int( x ) for x in encoder_output.get_shape() ]
                    print('\t\tinput', encoder_shape )
                    net = tf.image.resize_images(
                        encoder_output,
                        (128,128),
                        align_corners=False
                    )
                    net = add_conv_transpose_layer( net, 64, [3, 3], scope='deconv_0' )
            net = add_conv_layer( net, num_output_channels, [3, 3], stride=1, 
                    normalizer_fn=None, activation_fn=tf.tanh, scope='decoder_output' )
        end_points = convert_collection_to_dict(end_points_collection)
        return net, end_points 
Example #28
Source File: transfer_models.py    From taskonomy with MIT License 5 votes vote down vote up
def decoder_tiny_transfer_1( encoder_output, is_training, num_output_channels=3, dropout_keep_prob=None, activation_fn=tf.nn.relu,
            batch_norm_decay=0.9, batch_norm_epsilon=1e-5, batch_norm_scale=True, batch_norm_center=True, 
            weight_decay=0.0001, scope='decoder', reuse=None ):
    print('\tbuilding decoder')
    batch_norm_params = {
        'is_training': is_training,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'center': batch_norm_center,
        'updates_collections': tf.GraphKeys.UPDATE_OPS,
    }
    with tf.variable_scope(scope, reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with slim.arg_scope( [slim.conv2d, slim.conv2d_transpose,
                    slim.fully_connected],
                    outputs_collections=end_points_collection):
            with slim.arg_scope([slim.conv2d, slim.conv2d_transpose,
                        slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=batch_norm_params,
                        weights_regularizer=slim.l2_regularizer(weight_decay)):

                with slim.arg_scope([slim.conv2d_transpose], stride=2):
                    encoder_shape = [ int( x ) for x in encoder_output.get_shape() ]
                    print('\t\tinput', encoder_shape )
                    net = encoder_output
                    net = add_conv_transpose_layer( net, 64, [16, 16], stride=16, scope='deconv_0' )

            net = add_conv_layer( net, num_output_channels, [3, 3], stride=1, 
                    normalizer_fn=None, activation_fn=tf.tanh, scope='decoder_output' )
        end_points = convert_collection_to_dict(end_points_collection)
        return net, end_points 
Example #29
Source File: utils.py    From taskonomy with MIT License 5 votes vote down vote up
def add_conv_transpose_layer( *args, **kwargs ):
    net = slim.conv2d_transpose( *args, **kwargs )
    tf.add_to_collection( tf.GraphKeys.ACTIVATIONS, net )
    if 'scope' in kwargs:
        print( '\t\t{scope}'.format( scope=kwargs['scope'] ), net.get_shape() )
    return net 
Example #30
Source File: utils.py    From taskonomy with MIT License 5 votes vote down vote up
def add_conv_transpose_layer( *args, **kwargs ):
    net = slim.conv2d_transpose( *args, **kwargs )
    tf.add_to_collection( tf.GraphKeys.ACTIVATIONS, net )
    if 'scope' in kwargs:
        print( '\t\t{scope}'.format( scope=kwargs['scope'] ), net.get_shape() )
    return net