Python keras.layers.UpSampling2D() Examples

The following are 30 code examples of keras.layers.UpSampling2D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source File: model.py    From ocsvm-anomaly-detection with MIT License 8 votes vote down vote up
def build_cae_model(height=32, width=32, channel=3):
    """
    build convolutional autoencoder model
    """
    input_img = Input(shape=(height, width, channel))

    # encoder
    net = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
    net = MaxPooling2D((2, 2), padding='same')(net)
    net = Conv2D(8, (3, 3), activation='relu', padding='same')(net)
    net = MaxPooling2D((2, 2), padding='same')(net)
    net = Conv2D(4, (3, 3), activation='relu', padding='same')(net)
    encoded = MaxPooling2D((2, 2), padding='same', name='enc')(net)

    # decoder
    net = Conv2D(4, (3, 3), activation='relu', padding='same')(encoded)
    net = UpSampling2D((2, 2))(net)
    net = Conv2D(8, (3, 3), activation='relu', padding='same')(net)
    net = UpSampling2D((2, 2))(net)
    net = Conv2D(16, (3, 3), activation='relu', padding='same')(net)
    net = UpSampling2D((2, 2))(net)
    decoded = Conv2D(channel, (3, 3), activation='sigmoid', padding='same')(net)

    return Model(input_img, decoded) 
Example #2
Source File: colorize.py    From faceai with MIT License 7 votes vote down vote up
def build_model():
    model = Sequential()
    model.add(InputLayer(input_shape=(None, None, 1)))
    model.add(Conv2D(8, (3, 3), activation='relu', padding='same', strides=2))
    model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same', strides=2))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same', strides=2))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
    # model.compile(optimizer='rmsprop', loss='mse')
    model.compile(optimizer='adam', loss='mse')
    return model


#训练数据 
Example #3
Source File: models.py    From cyclegan_keras with The Unlicense 6 votes vote down vote up
def mnist_generator(input_shape=(28, 28, 1), scale=1/4):
    x0 = Input(input_shape)
    x = Conv2D(int(128*scale), (3, 3), strides=(2, 2), padding='same')(x0)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv2D(int(64*scale), (3, 3), strides=(2, 2), padding='same')(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)
    x = residual_block(x, scale, num_id=2)
    x = residual_block(x, scale*2, num_id=3)
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(int(1024*scale), (1, 1))(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(1, (1, 1), activation='sigmoid')(x)
    return Model(x0, x) 
Example #4
Source File: blocks.py    From dfc2019 with MIT License 6 votes vote down vote up
def Conv2DUpsample(filters,
                   upsample_rate,
                   kernel_size=(3,3),
                   up_name='up',
                   conv_name='conv',
                   **kwargs):

    def layer(input_tensor):
        x = UpSampling2D(upsample_rate, name=up_name)(input_tensor)
        x = Conv2D(filters,
                   kernel_size,
                   padding='same',
                   name=conv_name,
                   **kwargs)(x)
        return x
    return layer 
Example #5
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_upsample_layer_params(self):
        options = dict(size=[(2, 2), (3, 3), (4, 4), (5, 5)])

        np.random.seed(1988)
        input_dim = 10
        input_shape = (input_dim, input_dim, 1)
        X = np.random.rand(1, *input_shape)

        # Define a function that tests a model
        def build_model(x):
            kwargs = dict(zip(options.keys(), x))
            model = Sequential()
            model.add(Conv2D(filters=5, kernel_size=(7, 7), input_shape=input_shape))
            model.add(UpSampling2D(**kwargs))
            return x, model

        # Iterate through all combinations
        product = itertools.product(*options.values())
        args = [build_model(p) for p in product]

        # Test the cases
        print("Testing a total of %s cases. This could take a while" % len(args))
        for param, model in args:
            self._run_test(model, param) 
Example #6
Source File: blocks.py    From dfc2019 with MIT License 6 votes vote down vote up
def Upsample2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2),
                     use_batchnorm=False, skip=None):

    def layer(input_tensor):

        conv_name, bn_name, relu_name, up_name = handle_block_names(stage)

        x = UpSampling2D(size=upsample_rate, name=up_name)(input_tensor)

        if skip is not None:
            x = Concatenate()([x, skip])

        x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
                     conv_name=conv_name + '1', bn_name=bn_name + '1', relu_name=relu_name + '1')(x)

        x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
                     conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)

        return x
    return layer 
Example #7
Source File: models.py    From DiscriminativeActiveLearning with MIT License 6 votes vote down vote up
def get_autoencoder_model(input_shape, labels=10):
    """
    An autoencoder for MNIST to be used in the DAL implementation.
    """

    image = Input(shape=input_shape)
    encoder = Conv2D(32, (3, 3), activation='relu', padding='same')(image)
    encoder = MaxPooling2D((2, 2), padding='same')(encoder)
    encoder = Conv2D(8, (3, 3), activation='relu', padding='same')(encoder)
    encoder = Conv2D(4, (3, 3), activation='relu', padding='same')(encoder)
    encoder = MaxPooling2D((2, 2), padding='same')(encoder)

    decoder = UpSampling2D((2, 2), name='embedding')(encoder)
    decoder = Conv2D(4, (3, 3), activation='relu', padding='same')(decoder)
    decoder = Conv2D(8, (3, 3), activation='relu', padding='same')(decoder)
    decoder = UpSampling2D((2, 2))(decoder)
    decoder = Conv2D(32, (3, 3), activation='relu', padding='same')(decoder)
    decoder = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(decoder)

    autoencoder = Model(image, decoder)
    return autoencoder 
Example #8
Source File: models.py    From AnomalyDetectionUsingAutoencoder with MIT License 6 votes vote down vote up
def convolutional_autoencoder():

    input_shape=(28,28,1)
    n_channels = input_shape[-1]
    model = Sequential()
    model.add(Conv2D(32, (3,3), activation='relu', padding='same', input_shape=input_shape))
    model.add(MaxPool2D(padding='same'))
    model.add(Conv2D(16, (3,3), activation='relu', padding='same'))
    model.add(MaxPool2D(padding='same'))
    model.add(Conv2D(8, (3,3), activation='relu', padding='same'))
    model.add(UpSampling2D())
    model.add(Conv2D(16, (3,3), activation='relu', padding='same'))
    model.add(UpSampling2D())
    model.add(Conv2D(32, (3,3), activation='relu', padding='same'))
    model.add(Conv2D(n_channels, (3,3), activation='sigmoid', padding='same'))
    return model 
Example #9
Source File: blocks.py    From dfc2019 with MIT License 6 votes vote down vote up
def Upsample2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2),
                     use_batchnorm=False, skip=None):

    def layer(input_tensor):

        conv_name, bn_name, relu_name, up_name = handle_block_names(stage)

        x = UpSampling2D(size=upsample_rate, name=up_name)(input_tensor)

        if skip is not None:
            x = Concatenate()([x, skip])

        x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
                     conv_name=conv_name + '1', bn_name=bn_name + '1', relu_name=relu_name + '1')(x)

        x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
                     conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)

        return x
    return layer 
Example #10
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_tiny_conv_upsample_random(self):
        np.random.seed(1988)
        input_dim = 10
        input_shape = (input_dim, input_dim, 1)
        num_kernels = 3
        kernel_height = 5
        kernel_width = 5

        # Define a model
        model = Sequential()
        model.add(
            Conv2D(
                input_shape=input_shape,
                filters=num_kernels,
                kernel_size=(kernel_height, kernel_width),
            )
        )
        model.add(UpSampling2D(size=2))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        # Test the keras model
        self._test_model(model) 
Example #11
Source File: blocks.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 6 votes vote down vote up
def Conv2DUpsample(filters,
                   upsample_rate,
                   kernel_size=(3,3),
                   up_name='up',
                   conv_name='conv',
                   **kwargs):

    def layer(input_tensor):
        x = UpSampling2D(upsample_rate, name=up_name)(input_tensor)
        x = Conv2D(filters,
                   kernel_size,
                   padding='same',
                   name=conv_name,
                   **kwargs)(x)
        return x
    return layer 
Example #12
Source File: blocks.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 6 votes vote down vote up
def Upsample2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2),
                     use_batchnorm=False, skip=None):

    def layer(input_tensor):

        conv_name, bn_name, relu_name, up_name = handle_block_names(stage)

        x = UpSampling2D(size=upsample_rate, name=up_name)(input_tensor)

        if skip is not None:
            x = Concatenate()([x, skip])

        x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
                     conv_name=conv_name + '1', bn_name=bn_name + '1', relu_name=relu_name + '1')(x)

        x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
                     conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)

        return x
    return layer 
Example #13
Source File: train_res.py    From u-net with MIT License 6 votes vote down vote up
def _up_block(block,mrge, nb_filters):
    up = merge([Convolution2D(2*nb_filters, 2, 2, border_mode='same')(UpSampling2D(size=(2, 2))(block)), mrge], mode='concat', concat_axis=1)
    # conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(up)
    conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(up)
    conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv)

    # conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
    # conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv)
    # conv = Convolution2D(nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
    
    # conv = Convolution2D(4*nb_filters, 1, 1, activation='relu', border_mode='same')(conv)
    # conv = Convolution2D(nb_filters, 3, 3, activation='relu', border_mode='same')(conv)
    # conv = Convolution2D(nb_filters, 1, 1, activation='relu', border_mode='same')(conv)

    return conv


# http://arxiv.org/pdf/1512.03385v1.pdf
# 50 Layer resnet 
Example #14
Source File: models.py    From ImageAI with MIT License 6 votes vote down vote up
def yolo_main(input, num_anchors, num_classes):

    darknet_network = Model(input, darknet(input))

    network, network_1 = last_layers(darknet_network.output, 512, num_anchors * (num_classes + 5), layer_name="last1")

    network = NetworkConv2D_BN_Leaky( input=network, channels=256, kernel_size=(1,1))
    network = UpSampling2D(2)(network)
    network = Concatenate()([network, darknet_network.layers[152].output])

    network, network_2 = last_layers(network,  256,  num_anchors * (num_classes + 5), layer_name="last2")

    network = NetworkConv2D_BN_Leaky(input=network, channels=128, kernel_size=(1, 1))
    network = UpSampling2D(2)(network)
    network = Concatenate()([network, darknet_network.layers[92].output])

    network, network_3 = last_layers(network, 128, num_anchors * (num_classes + 5), layer_name="last3")

    return Model(input, [network_1, network_2, network_3]) 
Example #15
Source File: unets.py    From dsb2018_topcoders with MIT License 6 votes vote down vote up
def inception_resnet_v2_fpn(input_shape, channels=1, activation="sigmoid"):
    inceresv2 = InceptionResNetV2Same(input_shape=input_shape, include_top=False)
    conv1, conv2, conv3, conv4, conv5 = inceresv2.output

    P1, P2, P3, P4, P5 = create_pyramid_features(conv1, conv2, conv3, conv4, conv5)
    x = concatenate(
        [
            prediction_fpn_block(P5, "P5", (8, 8)),
            prediction_fpn_block(P4, "P4", (4, 4)),
            prediction_fpn_block(P3, "P3", (2, 2)),
            prediction_fpn_block(P2, "P2"),
        ]
    )
    x = conv_bn_relu(x, 256, 3, (1, 1), name="aggregation")
    x = decoder_block_no_bn(x, 128, conv1, 'up4')
    x = UpSampling2D()(x)
    x = conv_relu(x, 64, 3, (1, 1), name="up5_conv1")
    x = conv_relu(x, 64, 3, (1, 1), name="up5_conv2")
    if activation == 'softmax':
        name = 'mask_softmax'
        x = Conv2D(channels, (1, 1), activation=activation, name=name)(x)
    else:
        x = Conv2D(channels, (1, 1), activation=activation, name="mask")(x)
    model = Model(inceresv2.input, x)
    return model 
Example #16
Source File: network.py    From Unified-Gesture-and-Fingertip-Detection with MIT License 6 votes vote down vote up
def model():
    model = VGG16(include_top=False, input_shape=(128, 128, 3))
    x = model.output

    y = x
    x = Flatten()(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)
    probability = Dense(5, activation='sigmoid', name='probabilistic_output')(x)

    y = UpSampling2D((3, 3))(y)
    y = Activation('relu')(y)
    y = Conv2D(1, (3, 3), activation='linear')(y)
    position = Reshape(target_shape=(10, 10), name='positional_output')(y)
    model = Model(input=model.input, outputs=[probability, position])
    return model 
Example #17
Source File: model.py    From n2n-watermark-remove with MIT License 6 votes vote down vote up
def get_unet_model(input_channel_num=3, out_ch=3, start_ch=64, depth=4, inc_rate=2., activation='relu',
         dropout=0.5, batchnorm=False, maxpool=True, upconv=True, residual=False):
    def _conv_block(m, dim, acti, bn, res, do=0):
        n = Conv2D(dim, 3, activation=acti, padding='same')(m)
        n = BatchNormalization()(n) if bn else n
        n = Dropout(do)(n) if do else n
        n = Conv2D(dim, 3, activation=acti, padding='same')(n)
        n = BatchNormalization()(n) if bn else n

        return Concatenate()([m, n]) if res else n

    def _level_block(m, dim, depth, inc, acti, do, bn, mp, up, res):
        if depth > 0:
            n = _conv_block(m, dim, acti, bn, res)
            m = MaxPooling2D()(n) if mp else Conv2D(dim, 3, strides=2, padding='same')(n)
            m = _level_block(m, int(inc * dim), depth - 1, inc, acti, do, bn, mp, up, res)
            if up:
                m = UpSampling2D()(m)
                m = Conv2D(dim, 2, activation=acti, padding='same')(m)
            else:
                m = Conv2DTranspose(dim, 3, strides=2, activation=acti, padding='same')(m)
            n = Concatenate()([n, m])
            m = _conv_block(n, dim, acti, bn, res)
        else:
            m = _conv_block(m, dim, acti, bn, res, do)

        return m

    i = Input(shape=(None, None, input_channel_num))
    o = _level_block(i, start_ch, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv, residual)
    o = Conv2D(out_ch, 1)(o)
    model = Model(inputs=i, outputs=o)

    return model 
Example #18
Source File: bigan.py    From Keras-BiGAN with MIT License 6 votes vote down vote up
def g_block(inp, fil, u = True):

    if u:
        out = UpSampling2D(interpolation = 'bilinear')(inp)
    else:
        out = Activation('linear')(inp)

    skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out)

    out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
    out = LeakyReLU(0.2)(out)

    out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
    out = LeakyReLU(0.2)(out)

    out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out)

    out = add([out, skip])
    out = LeakyReLU(0.2)(out)

    return out 
Example #19
Source File: unets.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def densenet_fpn(input_shape, channels=1, activation="sigmoid"):
    densenet = DenseNet169(input_shape=input_shape, include_top=False)
    conv1 = densenet.get_layer("conv1/relu").output
    conv2 = densenet.get_layer("pool2_relu").output
    conv3 = densenet.get_layer("pool3_relu").output
    conv4 = densenet.get_layer("pool4_relu").output
    conv5 = densenet.get_layer("bn").output
    conv5 = Activation("relu", name="conv5_relu")(conv5)

    P1, P2, P3, P4, P5 = create_pyramid_features(conv1, conv2, conv3, conv4, conv5)
    x = concatenate(
        [
            prediction_fpn_block(P5, "P5", (8, 8)),
            prediction_fpn_block(P4, "P4", (4, 4)),
            prediction_fpn_block(P3, "P3", (2, 2)),
            prediction_fpn_block(P2, "P2"),
        ]
    )
    x = conv_bn_relu(x, 256, 3, (1, 1), name="aggregation")
    x = decoder_block_no_bn(x, 128, conv1, 'up4')
    x = UpSampling2D()(x)
    x = conv_relu(x, 64, 3, (1, 1), name="up5_conv1")
    x = conv_relu(x, 64, 3, (1, 1), name="up5_conv2")
    if activation == 'softmax':
        name = 'mask_softmax'
        x = Conv2D(channels, (1, 1), activation=activation, name=name)(x)
    else:
        x = Conv2D(channels, (1, 1), activation=activation, name="mask")(x)
    model = Model(densenet.input, x)
    return model 
Example #20
Source File: unets.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def xception_fpn(input_shape, channels=1, activation="sigmoid"):
    xception = Xception(input_shape=input_shape, include_top=False)
    conv1 = xception.get_layer("block1_conv2_act").output
    conv2 = xception.get_layer("block3_sepconv2_bn").output
    conv3 = xception.get_layer("block4_sepconv2_bn").output
    conv3 = Activation("relu")(conv3)
    conv4 = xception.get_layer("block13_sepconv2_bn").output
    conv4 = Activation("relu")(conv4)
    conv5 = xception.get_layer("block14_sepconv2_act").output

    P1, P2, P3, P4, P5 = create_pyramid_features(conv1, conv2, conv3, conv4, conv5)
    x = concatenate(
        [
            prediction_fpn_block(P5, "P5", (8, 8)),
            prediction_fpn_block(P4, "P4", (4, 4)),
            prediction_fpn_block(P3, "P3", (2, 2)),
            prediction_fpn_block(P2, "P2"),
        ]
    )
    x = conv_bn_relu(x, 256, 3, (1, 1), name="aggregation")
    x = decoder_block_no_bn(x, 128, conv1, 'up4')
    x = UpSampling2D()(x)
    x = conv_relu(x, 64, 3, (1, 1), name="up5_conv1")
    x = conv_relu(x, 64, 3, (1, 1), name="up5_conv2")
    if activation == 'softmax':
        name = 'mask_softmax'
        x = Conv2D(channels, (1, 1), activation=activation, name=name)(x)
    else:
        x = Conv2D(channels, (1, 1), activation=activation, name="mask")(x)
    model = Model(xception.input, x)
    return model 
Example #21
Source File: mixed-stylegan.py    From StyleGAN-Keras with MIT License 5 votes vote down vote up
def g_block(inp, style, noise, fil, u = True):
    
    b = Dense(fil, kernel_initializer = 'he_normal', bias_initializer = 'ones')(style)
    b = Reshape([1, 1, fil])(b)
    g = Dense(fil, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(style)
    g = Reshape([1, 1, fil])(g)

    n = Conv2D(filters = fil, kernel_size = 1, padding = 'same', kernel_initializer = 'zeros', bias_initializer = 'zeros')(noise)
    
    if u:
        out = UpSampling2D(interpolation = 'bilinear')(inp)
        out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal', bias_initializer = 'zeros')(out)
    else:
        out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal', bias_initializer = 'zeros')(inp)

    out = add([out, n])
    out = AdaInstanceNormalization()([out, b, g])
    out = LeakyReLU(0.01)(out)
    
    b = Dense(fil, kernel_initializer = 'he_normal', bias_initializer = 'ones')(style)
    b = Reshape([1, 1, fil])(b)
    g = Dense(fil, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(style)
    g = Reshape([1, 1, fil])(g)

    n = Conv2D(filters = fil, kernel_size = 1, padding = 'same', kernel_initializer = 'zeros', bias_initializer = 'zeros')(noise)
    
    out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal', bias_initializer = 'zeros')(out)
    out = add([out, n])
    out = AdaInstanceNormalization()([out, b, g])
    out = LeakyReLU(0.01)(out)
    
    return out

#Convolution, Activation, Pooling, Convolution, Activation 
Example #22
Source File: unets.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def resnet101_fpn(input_shape, channels=1, activation="softmax"):
    img_input = Input(input_shape)
    resnet_base = ResNet101(img_input, include_top=True)
    resnet_base.load_weights(download_resnet_imagenet("resnet101"))
    conv1 = resnet_base.get_layer("conv1_relu").output
    conv2 = resnet_base.get_layer("res2c_relu").output
    conv3 = resnet_base.get_layer("res3b3_relu").output
    conv4 = resnet_base.get_layer("res4b22_relu").output
    conv5 = resnet_base.get_layer("res5c_relu").output
    P1, P2, P3, P4, P5 = create_pyramid_features(conv1, conv2, conv3, conv4, conv5)
    x = concatenate(
        [
            prediction_fpn_block(P5, "P5", (8, 8)),
            prediction_fpn_block(P4, "P4", (4, 4)),
            prediction_fpn_block(P3, "P3", (2, 2)),
            prediction_fpn_block(P2, "P2"),
        ]
    )
    x = conv_bn_relu(x, 256, 3, (1, 1), name="aggregation")
    x = decoder_block_no_bn(x, 128, conv1, 'up4')
    x = UpSampling2D()(x)
    x = conv_relu(x, 64, 3, (1, 1), name="up5_conv1")
    x = conv_relu(x, 64, 3, (1, 1), name="up5_conv2")
    if activation == 'softmax':
        name = 'mask_softmax'
        x = Conv2D(channels, (1, 1), activation=activation, name=name)(x)
    else:
        x = Conv2D(channels, (1, 1), activation=activation, name="mask")(x)
    model = Model(img_input, x)
    return model 
Example #23
Source File: unets.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def decoder_block_no_bn(input, filters, skip, block_name, activation='relu'):
    x = UpSampling2D()(input)
    x = conv_relu(x, filters, 3, stride=1, padding='same', name=block_name + '_conv1', activation=activation)
    x = concatenate([x, skip], axis=-1, name=block_name + '_concat')
    x = conv_relu(x, filters, 3, stride=1, padding='same', name=block_name + '_conv2', activation=activation)
    return x 
Example #24
Source File: unets.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def decoder_block(input, filters, skip, block_name):
    x = UpSampling2D()(input)
    x = conv_bn_relu(x, filters, 3, stride=1, padding='same', name=block_name + '_conv1')
    x = concatenate([x, skip], axis=-1, name=block_name + '_concat')
    x = conv_bn_relu(x, filters, 3, stride=1, padding='same', name=block_name + '_conv2')
    return x 
Example #25
Source File: dlight.py    From faceswap with GNU General Public License v3.0 5 votes vote down vote up
def decoder_a(self):
        """ DeLight Decoder A(old face) Network """
        input_ = Input(shape=(4, 4, 1024))
        decoder_a_complexity = 256
        mask_complexity = 128

        var_xy = input_
        var_xy = UpSampling2D(self.upscale_ratio, interpolation='bilinear')(var_xy)

        var_x = var_xy
        var_x = self.blocks.upscale2x(var_x, decoder_a_complexity, fast=False)
        var_x = self.blocks.upscale2x(var_x, decoder_a_complexity // 2, fast=False)
        var_x = self.blocks.upscale2x(var_x, decoder_a_complexity // 4, fast=False)
        var_x = self.blocks.upscale2x(var_x, decoder_a_complexity // 8, fast=False)

        var_x = self.blocks.conv2d(var_x, 3, kernel_size=5, padding="same",
                                   activation="sigmoid", name="face_out")

        outputs = [var_x]

        if self.config.get("learn_mask", False):
            var_y = var_xy  # mask decoder
            var_y = self.blocks.upscale2x(var_y, mask_complexity, fast=False)
            var_y = self.blocks.upscale2x(var_y, mask_complexity // 2, fast=False)
            var_y = self.blocks.upscale2x(var_y, mask_complexity // 4, fast=False)
            var_y = self.blocks.upscale2x(var_y, mask_complexity // 8, fast=False)

            var_y = self.blocks.conv2d(var_y, 1, kernel_size=5, padding="same",
                                       activation="sigmoid", name="mask_out")

            outputs.append(var_y)

        return KerasModel([input_], outputs=outputs) 
Example #26
Source File: stylegan.py    From StyleGAN-Keras with MIT License 5 votes vote down vote up
def g_block(inp, style, noise, fil, u = True):
    
    b = Dense(fil)(style)
    b = Reshape([1, 1, fil])(b)
    g = Dense(fil)(style)
    g = Reshape([1, 1, fil])(g)

    n = Conv2D(filters = fil, kernel_size = 1, padding = 'same', kernel_initializer = 'he_normal')(noise)
    
    if u:
        out = UpSampling2D(interpolation = 'bilinear')(inp)
        out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
    else:
        out = Activation('linear')(inp)
    
    out = AdaInstanceNormalization()([out, b, g])
    out = add([out, n])
    out = LeakyReLU(0.01)(out)
    
    b = Dense(fil)(style)
    b = Reshape([1, 1, fil])(b)
    g = Dense(fil)(style)
    g = Reshape([1, 1, fil])(g)

    n = Conv2D(filters = fil, kernel_size = 1, padding = 'same', kernel_initializer = 'he_normal')(noise)
    
    out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
    out = AdaInstanceNormalization()([out, b, g])
    out = add([out, n])
    out = LeakyReLU(0.01)(out)
    
    return out

#Convolution, Activation, Pooling, Convolution, Activation 
Example #27
Source File: unets.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def resnet50_fpn(input_shape, channels=1, activation="softmax"):
    img_input = Input(input_shape)
    resnet_base = ResNet50(img_input, include_top=True)
    resnet_base.load_weights(download_resnet_imagenet("resnet50"))
    conv1 = resnet_base.get_layer("conv1_relu").output
    conv2 = resnet_base.get_layer("res2c_relu").output
    conv3 = resnet_base.get_layer("res3d_relu").output
    conv4 = resnet_base.get_layer("res4f_relu").output
    conv5 = resnet_base.get_layer("res5c_relu").output
    P1, P2, P3, P4, P5 = create_pyramid_features(conv1, conv2, conv3, conv4, conv5)
    x = concatenate(
        [
            prediction_fpn_block(P5, "P5", (8, 8)),
            prediction_fpn_block(P4, "P4", (4, 4)),
            prediction_fpn_block(P3, "P3", (2, 2)),
            prediction_fpn_block(P2, "P2"),
        ]
    )
    x = conv_bn_relu(x, 256, 3, (1, 1), name="aggregation")
    x = decoder_block_no_bn(x, 128, conv1, 'up4')
    x = UpSampling2D()(x)
    x = conv_relu(x, 64, 3, (1, 1), name="up5_conv1")
    x = conv_relu(x, 64, 3, (1, 1), name="up5_conv2")
    if activation == 'softmax':
        name = 'mask_softmax'
        x = Conv2D(channels, (1, 1), activation=activation, name=name)(x)
    else:
        x = Conv2D(channels, (1, 1), activation=activation, name="mask")(x)
    model = Model(img_input, x)
    return model 
Example #28
Source File: blocks.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 5 votes vote down vote up
def pyramid_block(pyramid_filters=256, segmentation_filters=128, upsample_rate=2,
                  use_batchnorm=False):
    """
    Pyramid block according to:
        http://presentations.cocodataset.org/COCO17-Stuff-FAIR.pdf

    This block generate `M` and `P` blocks.

    Args:
        pyramid_filters: integer, filters in `M` block of top-down FPN branch
        segmentation_filters: integer, number of filters in segmentation head,
            basically filters in convolution layers between `M` and `P` blocks
        upsample_rate: integer, uspsample rate for `M` block of top-down FPN branch
        use_batchnorm: bool, include batchnorm in convolution blocks

    Returns:
        Pyramid block function (as Keras layers functional API)
    """
    def layer(c, m=None):

        x = Conv2D(pyramid_filters, (1, 1))(c)

        if m is not None:
            up = UpSampling2D((upsample_rate, upsample_rate))(m)
            x = Add()([x, up])

        # segmentation head
        p = Conv(segmentation_filters, (3, 3), padding='same', use_batchnorm=use_batchnorm)(x)
        p = Conv(segmentation_filters, (3, 3), padding='same', use_batchnorm=use_batchnorm)(p)
        m = x

        return m, p
    return layer 
Example #29
Source File: network.py    From AdvancedEAST with MIT License 5 votes vote down vote up
def g(self, i):
        # i+diff in cfg.feature_layers_range
        assert i + self.diff in cfg.feature_layers_range, \
            ('i=%d+diff=%d not in ' % (i, self.diff)) + \
            str(cfg.feature_layers_range)
        if i == cfg.feature_layers_num:
            bn = BatchNormalization()(self.h(i))
            return Conv2D(32, 3, activation='relu', padding='same')(bn)
        else:
            return UpSampling2D((2, 2))(self.h(i)) 
Example #30
Source File: model.py    From noise2noise with MIT License 5 votes vote down vote up
def get_unet_model(input_channel_num=3, out_ch=3, start_ch=64, depth=4, inc_rate=2., activation='relu',
         dropout=0.5, batchnorm=False, maxpool=True, upconv=True, residual=False):
    def _conv_block(m, dim, acti, bn, res, do=0):
        n = Conv2D(dim, 3, activation=acti, padding='same')(m)
        n = BatchNormalization()(n) if bn else n
        n = Dropout(do)(n) if do else n
        n = Conv2D(dim, 3, activation=acti, padding='same')(n)
        n = BatchNormalization()(n) if bn else n

        return Concatenate()([m, n]) if res else n

    def _level_block(m, dim, depth, inc, acti, do, bn, mp, up, res):
        if depth > 0:
            n = _conv_block(m, dim, acti, bn, res)
            m = MaxPooling2D()(n) if mp else Conv2D(dim, 3, strides=2, padding='same')(n)
            m = _level_block(m, int(inc * dim), depth - 1, inc, acti, do, bn, mp, up, res)
            if up:
                m = UpSampling2D()(m)
                m = Conv2D(dim, 2, activation=acti, padding='same')(m)
            else:
                m = Conv2DTranspose(dim, 3, strides=2, activation=acti, padding='same')(m)
            n = Concatenate()([n, m])
            m = _conv_block(n, dim, acti, bn, res)
        else:
            m = _conv_block(m, dim, acti, bn, res, do)

        return m

    i = Input(shape=(None, None, input_channel_num))
    o = _level_block(i, start_ch, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv, residual)
    o = Conv2D(out_ch, 1)(o)
    model = Model(inputs=i, outputs=o)

    return model