Python keras.layers.LeakyReLU() Examples

The following are 30 code examples of keras.layers.LeakyReLU(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source File: bigan.py    From Keras-BiGAN with MIT License 6 votes vote down vote up
def d_block(inp, fil, p = True):

    skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(inp)

    out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(inp)
    out = LeakyReLU(0.2)(out)

    out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
    out = LeakyReLU(0.2)(out)

    out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out)

    out = add([out, skip])
    out = LeakyReLU(0.2)(out)

    if p:
        out = AveragePooling2D()(out)

    return out 
Example #2
Source File: mnist_model.py    From keras-tqdm with MIT License 6 votes vote down vote up
def build_model():
    x = Input((28 * 28,), name="x")
    hidden_dim = 512
    h = x
    h = Dense(hidden_dim)(h)
    h = BatchNormalization()(h)
    h = LeakyReLU(0.2)(h)
    h = Dropout(0.5)(h)
    h = Dense(hidden_dim / 2)(h)
    h = BatchNormalization()(h)
    h = LeakyReLU(0.2)(h)
    h = Dropout(0.5)(h)
    h = Dense(10)(h)
    h = Activation('softmax')(h)
    m = Model(x, h)
    m.compile('adam', 'categorical_crossentropy', metrics=['accuracy'])
    return m 
Example #3
Source File: neural_network.py    From ReinforcementLearning with Apache License 2.0 6 votes vote down vote up
def residual_layer(self, x, filters, kernel_size):
        conv_1 = self.conv_layer(x, filters, kernel_size)
        conv_2 = Conv2D(
            filters = filters,
            kernel_size = kernel_size,
            strides = (1, 1),
            padding = 'same',
            data_format = 'channels_first',
            use_bias = False,
            activation = 'linear',
            kernel_regularizer = regularizers.l2(self.reg_const)
            )(conv_1)
        bn = BatchNormalization(axis=1)(conv_2)
        merge_layer = add([x, bn])
        lrelu = LeakyReLU()(merge_layer)
        return lrelu 
Example #4
Source File: object_detection.py    From Traffic-Signal-Violation-Detection-System with GNU General Public License v3.0 6 votes vote down vote up
def _conv_block(inp, convs, skip=True):
    x = inp
    count = 0
    
    for conv in convs:
        if count == (len(convs) - 2) and skip:
            skip_connection = x
        count += 1
        
        if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
        x = Conv2D(conv['filter'], 
                   conv['kernel'], 
                   strides=conv['stride'], 
                   padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
                   name='conv_' + str(conv['layer_idx']), 
                   use_bias=False if conv['bnorm'] else True)(x)
        if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
        if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)

    return add([skip_connection, x]) if skip else x 
Example #5
Source File: yolo.py    From ImageAI with MIT License 6 votes vote down vote up
def _conv_block(inp, convs, do_skip=True):
    x = inp
    count = 0
    
    for conv in convs:
        if count == (len(convs) - 2) and do_skip:
            skip_connection = x
        count += 1
        
        if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # unlike tensorflow darknet prefer left and top paddings
        x = Conv2D(conv['filter'], 
                   conv['kernel'], 
                   strides=conv['stride'], 
                   padding='valid' if conv['stride'] > 1 else 'same', # unlike tensorflow darknet prefer left and top paddings
                   name='conv_' + str(conv['layer_idx']), 
                   use_bias=False if conv['bnorm'] else True)(x)
        if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
        if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)

    return add([skip_connection, x]) if do_skip else x 
Example #6
Source File: yolo.py    From keras-yolo3 with MIT License 6 votes vote down vote up
def _conv_block(inp, convs, do_skip=True):
    x = inp
    count = 0
    
    for conv in convs:
        if count == (len(convs) - 2) and do_skip:
            skip_connection = x
        count += 1
        
        if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # unlike tensorflow darknet prefer left and top paddings
        x = Conv2D(conv['filter'], 
                   conv['kernel'], 
                   strides=conv['stride'], 
                   padding='valid' if conv['stride'] > 1 else 'same', # unlike tensorflow darknet prefer left and top paddings
                   name='conv_' + str(conv['layer_idx']), 
                   use_bias=False if conv['bnorm'] else True)(x)
        if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
        if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)

    return add([skip_connection, x]) if do_skip else x 
Example #7
Source File: yolo3_one_file_to_detect_them_all.py    From keras-yolo3 with MIT License 6 votes vote down vote up
def _conv_block(inp, convs, skip=True):
    x = inp
    count = 0
    
    for conv in convs:
        if count == (len(convs) - 2) and skip:
            skip_connection = x
        count += 1
        
        if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
        x = Conv2D(conv['filter'], 
                   conv['kernel'], 
                   strides=conv['stride'], 
                   padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
                   name='conv_' + str(conv['layer_idx']), 
                   use_bias=False if conv['bnorm'] else True)(x)
        if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
        if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)

    return add([skip_connection, x]) if skip else x 
Example #8
Source File: neural_network.py    From ReinforcementLearning with Apache License 2.0 6 votes vote down vote up
def value_head(self, x):
        x = self.conv_layer(x, 1, (1, 1))
        x = Flatten()(x)
        x = Dense(
            self.value_head_hidden_layer_size,
            use_bias = False,
            activation = 'linear',
            kernel_regularizer = regularizers.l2(self.reg_const)
            )(x)
        x = LeakyReLU()(x)
        x = Dense(
            1,
            use_bias = False,
            activation = 'tanh',
            kernel_regularizer = regularizers.l2(self.reg_const),
            name = 'value_head'
            )(x)
        return x 
Example #9
Source File: yolov3_weights_to_keras.py    From ai-platform with MIT License 6 votes vote down vote up
def _conv_block(inp, convs, skip=True):
  x = inp
  count = 0
  len_convs = len(convs)
  for conv in convs:
    if count == (len_convs - 2) and skip:
      skip_connection = x
    count += 1
    if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
    x = Conv2D(conv['filter'],
           conv['kernel'],
           strides=conv['stride'],
           padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
           name='conv_' + str(conv['layer_idx']),
           use_bias=False if conv['bnorm'] else True)(x)
    if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
    if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
  return add([skip_connection, x]) if skip else x


#SPP block uses three pooling layers of sizes [5, 9, 13] with strides one and all outputs together with the input are concatenated to be fed
  #to the FC block 
Example #10
Source File: bigan.py    From Keras-BiGAN with MIT License 6 votes vote down vote up
def encoder(self):

        if self.E:
            return self.E

        inp = Input(shape = [im_size, im_size, 3])

        x = d_block(inp, 1 * cha)   #64
        x = d_block(x, 2 * cha)   #32
        x = d_block(x, 3 * cha)   #16
        x = d_block(x, 4 * cha)  #8
        x = d_block(x, 8 * cha)  #4
        x = d_block(x, 16 * cha, p = False)  #4

        x = Flatten()(x)

        x = Dense(16 * cha, kernel_initializer = 'he_normal')(x)
        x = LeakyReLU(0.2)(x)

        x = Dense(latent_size, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x)

        self.E = Model(inputs = inp, outputs = x)

        return self.E 
Example #11
Source File: bigan.py    From Keras-BiGAN with MIT License 6 votes vote down vote up
def g_block(inp, fil, u = True):

    if u:
        out = UpSampling2D(interpolation = 'bilinear')(inp)
    else:
        out = Activation('linear')(inp)

    skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out)

    out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
    out = LeakyReLU(0.2)(out)

    out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
    out = LeakyReLU(0.2)(out)

    out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out)

    out = add([out, skip])
    out = LeakyReLU(0.2)(out)

    return out 
Example #12
Source File: network.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fCreateConv3DTranspose(filters, strides, kernel_size=(4, 4, 2), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        conv2d = Conv3DTranspose(filters=filters,
                                 kernel_size=kernel_size,
                                 strides=strides,
                                 padding=padding,
                                 kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)

        return LeakyReLU()(conv2d)
    return f 
Example #13
Source File: stylegan.py    From StyleGAN-Keras with MIT License 5 votes vote down vote up
def d_block(inp, fil, p = True):
    
    route2 = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(inp)
    route2 = LeakyReLU(0.01)(route2)
    if p:
        route2 = AveragePooling2D()(route2)
    route2 = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(route2)
    out = LeakyReLU(0.01)(route2)
    
    return out

#This object holds the models 
Example #14
Source File: stylegan.py    From StyleGAN-Keras with MIT License 5 votes vote down vote up
def g_block(inp, style, noise, fil, u = True):
    
    b = Dense(fil)(style)
    b = Reshape([1, 1, fil])(b)
    g = Dense(fil)(style)
    g = Reshape([1, 1, fil])(g)

    n = Conv2D(filters = fil, kernel_size = 1, padding = 'same', kernel_initializer = 'he_normal')(noise)
    
    if u:
        out = UpSampling2D(interpolation = 'bilinear')(inp)
        out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
    else:
        out = Activation('linear')(inp)
    
    out = AdaInstanceNormalization()([out, b, g])
    out = add([out, n])
    out = LeakyReLU(0.01)(out)
    
    b = Dense(fil)(style)
    b = Reshape([1, 1, fil])(b)
    g = Dense(fil)(style)
    g = Reshape([1, 1, fil])(g)

    n = Conv2D(filters = fil, kernel_size = 1, padding = 'same', kernel_initializer = 'he_normal')(noise)
    
    out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
    out = AdaInstanceNormalization()([out, b, g])
    out = add([out, n])
    out = LeakyReLU(0.01)(out)
    
    return out

#Convolution, Activation, Pooling, Convolution, Activation 
Example #15
Source File: nn.py    From rl-teacher with MIT License 5 votes vote down vote up
def __init__(self, obs_shape, act_shape, h_size=64):
        input_dim = np.prod(obs_shape) + np.prod(act_shape)

        self.model = Sequential()
        self.model.add(Dense(h_size, input_dim=input_dim))
        self.model.add(LeakyReLU())

        self.model.add(Dropout(0.5))
        self.model.add(Dense(h_size))
        self.model.add(LeakyReLU())

        self.model.add(Dropout(0.5))
        self.model.add(Dense(1)) 
Example #16
Source File: DCGAN.py    From DCGAN-Keras with MIT License 5 votes vote down vote up
def build_discriminator(self):

        img_shape = (self.img_size[0], self.img_size[1], self.channels)

        model = Sequential()

        model.add(Conv2D(32, kernel_size=self.kernel_size, strides=2, input_shape=img_shape, padding="same"))  # 192x256 -> 96x128
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))

        model.add(Conv2D(64, kernel_size=self.kernel_size, strides=2, padding="same"))  # 96x128 -> 48x64
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))

        model.add(Conv2D(128, kernel_size=self.kernel_size, strides=2, padding="same"))  # 48x64 -> 24x32
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(BatchNormalization(momentum=0.8))

        model.add(Conv2D(256, kernel_size=self.kernel_size, strides=1, padding="same"))  # 24x32 -> 12x16
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))

        model.add(Conv2D(512, kernel_size=self.kernel_size, strides=1, padding="same"))  # 12x16 -> 6x8
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        model.summary()

        img = Input(shape=img_shape)
        validity = model(img)

        return Model(img, validity) 
Example #17
Source File: _vae_keras.py    From scgen with GNU General Public License v3.0 5 votes vote down vote up
def _decoder(self):
        """
            Constructs the decoder sub-network of VAE. This function implements the
            decoder part of Variational Auto-encoder. It will transform constructed
            latent space to the previous space of data with n_dimensions = n_vars.
            Parameters
            ----------
            No parameters are needed.
            Returns
            -------
            h: Tensor
                A Tensor for last dense layer with the shape of [n_vars, ] to reconstruct data.
        """
        h = Dense(800, kernel_initializer=self.init_w, use_bias=False)(self.z)
        h = BatchNormalization(axis=1)(h)
        h = LeakyReLU()(h)
        h = Dropout(self.dropout_rate)(h)
        h = Dense(800, kernel_initializer=self.init_w, use_bias=False)(h)
        h = BatchNormalization(axis=1)(h)
        h = LeakyReLU()(h)
        h = Dropout(self.dropout_rate)(h)
        # h = Dense(768, kernel_initializer=self.init_w, use_bias=False)(h)
        # h = BatchNormalization()(h)
        # h = LeakyReLU()(h)
        # h = Dropout(self.dropout_rate)(h)
        # h = Dense(1024, kernel_initializer=self.init_w, use_bias=False)(h)
        # h = BatchNormalization()(h)
        # h = LeakyReLU()(h)
        # h = Dropout(self.dropout_rate)(h)
        h = Dense(self.x_dim, kernel_initializer=self.init_w, use_bias=True)(h)

        self.decoder_model = Model(inputs=self.z, outputs=h, name="decoder")
        return h 
Example #18
Source File: videograph.py    From videograph with GNU General Public License v3.0 5 votes vote down vote up
def node_attention(x, n, n_channels_in, activation_type='softmax'):
    activation_types = ['relu', 'softmax', 'sigmoid']
    assert activation_type in activation_types, 'Sorry, unknown activation type: %s' % (activation_type)

    # expand for multiplication
    n = ExpandDimsLayer(axis=0)(n)

    # phi path (Q) or (x)
    x = BatchNormalization()(x)
    phi = x  # (None, 64, 1024)

    # theta path (K) or (c)
    theta = BatchNormalization()(n)  # (1, 100, 1024)
    theta = Conv1D(n_channels_in, 1, padding='same', name='node_embedding')(theta)  # (1, 100, 1024)

    # f path (theta and phi) or (Q and K)
    f = Lambda(__tensor_product)([phi, theta])  # (None, 7, 7, 100, 64)
    f = TransposeLayer((0, 1, 2, 4, 3))(f)  # (None, 7, 7, 64, 100)
    f = BatchNormalization()(f)
    if activation_type == 'relu':
        f = LeakyReLU(alpha=0.2, name='node_attention')(f)
        f = BatchNormalization()(f)
    elif activation_type == 'softmax':
        f = Activation('softmax', name='node_attention')(f)
    elif activation_type == 'sigmoid':
        f = Activation('sigmoid', name='node_attention')(f)
    else:
        raise Exception('sorry, unknown activation type')
    f = TransposeLayer((0, 1, 2, 4, 3))(f)  # (None, 7, 7, 100, 64)

    # g path (V)
    g = BatchNormalization()(n)

    y = Lambda(__tensor_multiplication, name='node_attenative')([f, g])  # (N, 100, 64, 7, 7, 1024)
    y = BatchNormalization()(y)
    y = LeakyReLU(alpha=0.2)(y)
    return y 
Example #19
Source File: network.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fCreateLeakyReluBNConv2D(filters, kernel_size=(3, 3), strides=(1, 1), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        output = Conv2D(filters,
                        kernel_size=kernel_size,
                        strides=strides,
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        output = BatchNormalization(axis=1)(output)

        return LeakyReLU()(output)
    return f 
Example #20
Source File: motion_VAEGAN2D.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def build_discriminator(patchSize):
    def d_block(layer_input, filters, strides=1, bn=True):
        """Discriminator layer"""
        d = Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)
        d = LeakyReLU(alpha=0.2)(d)
        if bn:
            d = BatchNormalization(momentum=0.8, axis=1)(d)
        return d

    # define number of filters
    df = 32

    # Input image
    d0 = Input(shape=(1, patchSize[0], patchSize[1]))

    d1 = d_block(d0, df, bn=False)
    d2 = d_block(d1, df, strides=2)
    d3 = d_block(d2, df*2)
    d4 = d_block(d3, df*2, strides=2)
    d5 = d_block(d4, df*4)
    d6 = d_block(d5, df*4, strides=2)
    flat = Flatten()(d6)
    d7 = Dense(df*8)(flat)
    d8 = LeakyReLU(alpha=0.2)(d7)
    validity = Dense(1, activation='sigmoid')(d8)

    return Model(d0, validity) 
Example #21
Source File: deep_residual_learning_blocks.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def identity_block_3D(input_tensor, filters, kernel_size=(3, 3, 3), stage=0, block=0, se_enabled=False, se_ratio=16):

    numFilters1, numFilters2 = filters

    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch'
    bn_name_base = 'bn' + str(stage) + '_' + str(block) + '_branch'

    x = Conv3D(filters=numFilters1,
               kernel_size=kernel_size,
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal',
               name=conv_name_base + '2a')(input_tensor)

    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = Conv3D(filters=numFilters2,
               kernel_size=kernel_size,
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal',
               name=conv_name_base + '2b')(x)

    # squeeze and excitation block
    if se_enabled:
        x = squeeze_excitation_block_3D(x, ratio=se_ratio)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)

    x = Add()([x, input_tensor])

    x = LeakyReLU(alpha=0.01)(x)


    return x 
Example #22
Source File: deep_residual_learning_blocks.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def identity_block_3D(input_tensor, filters, kernel_size=(3, 3, 3), stage=0, block=0, se_enabled=False, se_ratio=16):
    numFilters1, numFilters2 = filters

    if K.image_data_format() == 'channels_last':
        bn_axis = -1
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch'
    bn_name_base = 'bn' + str(stage) + '_' + str(block) + '_branch'

    x = Conv3D(filters=numFilters1,
               kernel_size=kernel_size,
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal',
               name=conv_name_base + '2a')(input_tensor)

    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = LeakyReLU(alpha=0.01)(x)

    x = Conv3D(filters=numFilters2,
               kernel_size=kernel_size,
               strides=(1, 1, 1),
               padding='same',
               kernel_initializer='he_normal',
               name=conv_name_base + '2b')(x)

    # squeeze and excitation block
    if se_enabled:
        x = squeeze_excitation_block_3D(x, ratio=se_ratio)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)

    x = Add()([x, input_tensor])

    x = LeakyReLU(alpha=0.01)(x)

    return x 
Example #23
Source File: network.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fCreateConv2D_ResBlock(filters, kernel_size=(3, 3), strides=(2, 2), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        output = Conv2D(filters,
                        kernel_size=kernel_size,
                        strides=strides,
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        skip = LeakyReLU()(output)

        output = Conv2D(filters,
                        kernel_size=kernel_size,
                        strides=(1, 1),
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(skip)
        output = LeakyReLU()(output)

        output = Conv2D(filters,
                        kernel_size=kernel_size,
                        strides=(1, 1),
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(output)
        output = LeakyReLU()(output)

        output = add([skip, output])
        return output
    return f 
Example #24
Source File: network.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fCreateConv2DBNTranspose(filters, strides, kernel_size=(3, 3), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        output = Conv2DTranspose(filters=filters,
                                 kernel_size=kernel_size,
                                 strides=strides,
                                 padding=padding,
                                 kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)

        output = BatchNormalization(axis=1)(output)
        return LeakyReLU()(output)
    return f 
Example #25
Source File: network.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fCreateConv2DTranspose(filters, strides, kernel_size=(3, 3), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        conv2d = Conv2DTranspose(filters=filters,
                                 kernel_size=kernel_size,
                                 strides=strides,
                                 padding=padding,
                                 kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)

        return LeakyReLU()(conv2d)
    return f 
Example #26
Source File: network.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fCreateConv3DTranspose_ResBlock(filters, kernel_size=(3, 3, 1), strides=(2, 2, 1), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        output = Conv3DTranspose(filters=filters,
                                 kernel_size=kernel_size,
                                 strides=strides,
                                 padding=padding,
                                 kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        skip = LeakyReLU()(output)

        output = Conv3DTranspose(filters,
                        kernel_size=kernel_size,
                        strides=(1, 1, 1),
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(skip)
        output = LeakyReLU()(output)

        output = Conv3DTranspose(filters,
                        kernel_size=kernel_size,
                        strides=(1, 1, 1),
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(output)
        output = LeakyReLU()(output)

        output = add([skip, output])

        return output
    return f 
Example #27
Source File: mixed-stylegan.py    From StyleGAN-Keras with MIT License 5 votes vote down vote up
def g_block(inp, style, noise, fil, u = True):
    
    b = Dense(fil, kernel_initializer = 'he_normal', bias_initializer = 'ones')(style)
    b = Reshape([1, 1, fil])(b)
    g = Dense(fil, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(style)
    g = Reshape([1, 1, fil])(g)

    n = Conv2D(filters = fil, kernel_size = 1, padding = 'same', kernel_initializer = 'zeros', bias_initializer = 'zeros')(noise)
    
    if u:
        out = UpSampling2D(interpolation = 'bilinear')(inp)
        out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal', bias_initializer = 'zeros')(out)
    else:
        out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal', bias_initializer = 'zeros')(inp)

    out = add([out, n])
    out = AdaInstanceNormalization()([out, b, g])
    out = LeakyReLU(0.01)(out)
    
    b = Dense(fil, kernel_initializer = 'he_normal', bias_initializer = 'ones')(style)
    b = Reshape([1, 1, fil])(b)
    g = Dense(fil, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(style)
    g = Reshape([1, 1, fil])(g)

    n = Conv2D(filters = fil, kernel_size = 1, padding = 'same', kernel_initializer = 'zeros', bias_initializer = 'zeros')(noise)
    
    out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal', bias_initializer = 'zeros')(out)
    out = add([out, n])
    out = AdaInstanceNormalization()([out, b, g])
    out = LeakyReLU(0.01)(out)
    
    return out

#Convolution, Activation, Pooling, Convolution, Activation 
Example #28
Source File: network.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fCreateLeakyReluConv3D(filters, kernel_size=(3, 3, 3), strides=(1, 1, 1), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        conv3d = Conv3D(filters,
                        kernel_size=kernel_size,
                        strides=strides,
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        return LeakyReLU()(conv3d)
    return f 
Example #29
Source File: network.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fCreateLeakyReluBNConv3D(filters, kernel_size, strides, padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        conv3d = Conv3D(filters,
                        kernel_size=kernel_size,
                        strides=strides,
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        return BatchNormalization(axis=1)(LeakyReLU()(conv3d))
    return f 
Example #30
Source File: network.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fCreateConv2DTranspose_ResBlock(filters, kernel_size=(3, 3), strides=(2, 2), padding='same'):
    l1_reg = 0
    l2_reg = 1e-6

    def f(inputs):
        output = Conv2DTranspose(filters=filters,
                                 kernel_size=kernel_size,
                                 strides=strides,
                                 padding=padding,
                                 kernel_regularizer=l1_l2(l1_reg, l2_reg))(inputs)
        skip = LeakyReLU()(output)

        output = Conv2D(filters,
                        kernel_size=kernel_size,
                        strides=(1, 1),
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(skip)
        output = LeakyReLU()(output)

        output = Conv2D(filters,
                        kernel_size=kernel_size,
                        strides=(1, 1),
                        padding=padding,
                        kernel_regularizer=l1_l2(l1_reg, l2_reg))(output)
        output = LeakyReLU()(output)

        output = add([skip, output])

        return output
    return f