Python keras.layers.LeakyReLU() Examples

The following are 30 code examples of keras.layers.LeakyReLU(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source Project: Keras-BiGAN   Author: manicman1999   File: bigan.py    License: MIT License 6 votes vote down vote up
def g_block(inp, fil, u = True):

    if u:
        out = UpSampling2D(interpolation = 'bilinear')(inp)
    else:
        out = Activation('linear')(inp)

    skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out)

    out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
    out = LeakyReLU(0.2)(out)

    out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
    out = LeakyReLU(0.2)(out)

    out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out)

    out = add([out, skip])
    out = LeakyReLU(0.2)(out)

    return out 
Example #2
Source Project: Keras-BiGAN   Author: manicman1999   File: bigan.py    License: MIT License 6 votes vote down vote up
def d_block(inp, fil, p = True):

    skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(inp)

    out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(inp)
    out = LeakyReLU(0.2)(out)

    out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out)
    out = LeakyReLU(0.2)(out)

    out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out)

    out = add([out, skip])
    out = LeakyReLU(0.2)(out)

    if p:
        out = AveragePooling2D()(out)

    return out 
Example #3
Source Project: Keras-BiGAN   Author: manicman1999   File: bigan.py    License: MIT License 6 votes vote down vote up
def encoder(self):

        if self.E:
            return self.E

        inp = Input(shape = [im_size, im_size, 3])

        x = d_block(inp, 1 * cha)   #64
        x = d_block(x, 2 * cha)   #32
        x = d_block(x, 3 * cha)   #16
        x = d_block(x, 4 * cha)  #8
        x = d_block(x, 8 * cha)  #4
        x = d_block(x, 16 * cha, p = False)  #4

        x = Flatten()(x)

        x = Dense(16 * cha, kernel_initializer = 'he_normal')(x)
        x = LeakyReLU(0.2)(x)

        x = Dense(latent_size, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x)

        self.E = Model(inputs = inp, outputs = x)

        return self.E 
Example #4
Source Project: ai-platform   Author: produvia   File: yolov3_weights_to_keras.py    License: MIT License 6 votes vote down vote up
def _conv_block(inp, convs, skip=True):
  x = inp
  count = 0
  len_convs = len(convs)
  for conv in convs:
    if count == (len_convs - 2) and skip:
      skip_connection = x
    count += 1
    if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
    x = Conv2D(conv['filter'],
           conv['kernel'],
           strides=conv['stride'],
           padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
           name='conv_' + str(conv['layer_idx']),
           use_bias=False if conv['bnorm'] else True)(x)
    if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
    if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
  return add([skip_connection, x]) if skip else x


#SPP block uses three pooling layers of sizes [5, 9, 13] with strides one and all outputs together with the input are concatenated to be fed
  #to the FC block 
Example #5
Source Project: ImageAI   Author: OlafenwaMoses   File: yolo.py    License: MIT License 6 votes vote down vote up
def _conv_block(inp, convs, do_skip=True):
    x = inp
    count = 0
    
    for conv in convs:
        if count == (len(convs) - 2) and do_skip:
            skip_connection = x
        count += 1
        
        if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # unlike tensorflow darknet prefer left and top paddings
        x = Conv2D(conv['filter'], 
                   conv['kernel'], 
                   strides=conv['stride'], 
                   padding='valid' if conv['stride'] > 1 else 'same', # unlike tensorflow darknet prefer left and top paddings
                   name='conv_' + str(conv['layer_idx']), 
                   use_bias=False if conv['bnorm'] else True)(x)
        if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
        if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)

    return add([skip_connection, x]) if do_skip else x 
Example #6
Source Project: StarGAN-Keras   Author: hoangthang1607   File: StarGAN.py    License: MIT License 6 votes vote down vote up
def build_discriminator(self):
        """Discriminator network with PatchGAN."""
        inp_img = Input(shape = (self.image_size, self.image_size, 3))
        x = ZeroPadding2D(padding = 1)(inp_img)
        x = Conv2D(filters = self.d_conv_dim, kernel_size = 4, strides = 2, padding = 'valid', use_bias = False)(x)
        x = LeakyReLU(0.01)(x)
    
        curr_dim = self.d_conv_dim
        for i in range(1, self.d_repeat_num):
            x = ZeroPadding2D(padding = 1)(x)
            x = Conv2D(filters = curr_dim*2, kernel_size = 4, strides = 2, padding = 'valid')(x)
            x = LeakyReLU(0.01)(x)
            curr_dim = curr_dim * 2
    
        kernel_size = int(self.image_size / np.power(2, self.d_repeat_num))
    
        out_src = ZeroPadding2D(padding = 1)(x)
        out_src = Conv2D(filters = 1, kernel_size = 3, strides = 1, padding = 'valid', use_bias = False)(out_src)
    
        out_cls = Conv2D(filters = self.c_dim, kernel_size = kernel_size, strides = 1, padding = 'valid', use_bias = False)(x)
        out_cls = Reshape((self.c_dim, ))(out_cls)
    
        return Model(inp_img, [out_src, out_cls]) 
Example #7
Source Project: keras-tqdm   Author: bstriner   File: mnist_model.py    License: MIT License 6 votes vote down vote up
def build_model():
    x = Input((28 * 28,), name="x")
    hidden_dim = 512
    h = x
    h = Dense(hidden_dim)(h)
    h = BatchNormalization()(h)
    h = LeakyReLU(0.2)(h)
    h = Dropout(0.5)(h)
    h = Dense(hidden_dim / 2)(h)
    h = BatchNormalization()(h)
    h = LeakyReLU(0.2)(h)
    h = Dropout(0.5)(h)
    h = Dense(10)(h)
    h = Activation('softmax')(h)
    m = Model(x, h)
    m.compile('adam', 'categorical_crossentropy', metrics=['accuracy'])
    return m 
Example #8
Source Project: ReinforcementLearning   Author: Urinx   File: neural_network.py    License: Apache License 2.0 6 votes vote down vote up
def residual_layer(self, x, filters, kernel_size):
        conv_1 = self.conv_layer(x, filters, kernel_size)
        conv_2 = Conv2D(
            filters = filters,
            kernel_size = kernel_size,
            strides = (1, 1),
            padding = 'same',
            data_format = 'channels_first',
            use_bias = False,
            activation = 'linear',
            kernel_regularizer = regularizers.l2(self.reg_const)
            )(conv_1)
        bn = BatchNormalization(axis=1)(conv_2)
        merge_layer = add([x, bn])
        lrelu = LeakyReLU()(merge_layer)
        return lrelu 
Example #9
Source Project: ReinforcementLearning   Author: Urinx   File: neural_network.py    License: Apache License 2.0 6 votes vote down vote up
def value_head(self, x):
        x = self.conv_layer(x, 1, (1, 1))
        x = Flatten()(x)
        x = Dense(
            self.value_head_hidden_layer_size,
            use_bias = False,
            activation = 'linear',
            kernel_regularizer = regularizers.l2(self.reg_const)
            )(x)
        x = LeakyReLU()(x)
        x = Dense(
            1,
            use_bias = False,
            activation = 'tanh',
            kernel_regularizer = regularizers.l2(self.reg_const),
            name = 'value_head'
            )(x)
        return x 
Example #10
def _conv_block(inp, convs, skip=True):
    x = inp
    count = 0
    
    for conv in convs:
        if count == (len(convs) - 2) and skip:
            skip_connection = x
        count += 1
        
        if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
        x = Conv2D(conv['filter'], 
                   conv['kernel'], 
                   strides=conv['stride'], 
                   padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
                   name='conv_' + str(conv['layer_idx']), 
                   use_bias=False if conv['bnorm'] else True)(x)
        if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
        if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)

    return add([skip_connection, x]) if skip else x 
Example #11
Source Project: keras-yolo3   Author: experiencor   File: yolo.py    License: MIT License 6 votes vote down vote up
def _conv_block(inp, convs, do_skip=True):
    x = inp
    count = 0
    
    for conv in convs:
        if count == (len(convs) - 2) and do_skip:
            skip_connection = x
        count += 1
        
        if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # unlike tensorflow darknet prefer left and top paddings
        x = Conv2D(conv['filter'], 
                   conv['kernel'], 
                   strides=conv['stride'], 
                   padding='valid' if conv['stride'] > 1 else 'same', # unlike tensorflow darknet prefer left and top paddings
                   name='conv_' + str(conv['layer_idx']), 
                   use_bias=False if conv['bnorm'] else True)(x)
        if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
        if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)

    return add([skip_connection, x]) if do_skip else x 
Example #12
Source Project: keras-yolo3   Author: experiencor   File: yolo3_one_file_to_detect_them_all.py    License: MIT License 6 votes vote down vote up
def _conv_block(inp, convs, skip=True):
    x = inp
    count = 0
    
    for conv in convs:
        if count == (len(convs) - 2) and skip:
            skip_connection = x
        count += 1
        
        if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
        x = Conv2D(conv['filter'], 
                   conv['kernel'], 
                   strides=conv['stride'], 
                   padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
                   name='conv_' + str(conv['layer_idx']), 
                   use_bias=False if conv['bnorm'] else True)(x)
        if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
        if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)

    return add([skip_connection, x]) if skip else x 
Example #13
Source Project: Keras-BiGAN   Author: manicman1999   File: bigan.py    License: MIT License 5 votes vote down vote up
def discriminator(self):

        if self.D:
            return self.D

        inp = Input(shape = [im_size, im_size, 3])
        inpl = Input(shape = [latent_size])

        #Latent input
        l = Dense(512, kernel_initializer = 'he_normal')(inpl)
        l = LeakyReLU(0.2)(l)
        l = Dense(512, kernel_initializer = 'he_normal')(l)
        l = LeakyReLU(0.2)(l)
        l = Dense(512, kernel_initializer = 'he_normal')(l)
        l = LeakyReLU(0.2)(l)

        x = d_block(inp, 1 * cha)   #64
        x = d_block(x, 2 * cha)   #32
        x = d_block(x, 3 * cha)   #16
        x = d_block(x, 4 * cha)  #8
        x = d_block(x, 8 * cha)  #4
        x = d_block(x, 16 * cha, p = False)  #4

        x = Flatten()(x)

        x = concatenate([x, l])

        x = Dense(16 * cha, kernel_initializer = 'he_normal')(x)
        x = LeakyReLU(0.2)(x)

        x = Dense(1, kernel_initializer = 'he_normal')(x)

        self.D = Model(inputs = [inp, inpl], outputs = x)

        return self.D 
Example #14
Source Project: MesoNet   Author: DariusAf   File: classifiers.py    License: Apache License 2.0 5 votes vote down vote up
def init_model(self): 
        x = Input(shape = (IMGWIDTH, IMGWIDTH, 3))
        
        x1 = Conv2D(8, (3, 3), padding='same', activation = 'relu')(x)
        x1 = BatchNormalization()(x1)
        x1 = MaxPooling2D(pool_size=(2, 2), padding='same')(x1)
        
        x2 = Conv2D(8, (5, 5), padding='same', activation = 'relu')(x1)
        x2 = BatchNormalization()(x2)
        x2 = MaxPooling2D(pool_size=(2, 2), padding='same')(x2)
        
        x3 = Conv2D(16, (5, 5), padding='same', activation = 'relu')(x2)
        x3 = BatchNormalization()(x3)
        x3 = MaxPooling2D(pool_size=(2, 2), padding='same')(x3)
        
        x4 = Conv2D(16, (5, 5), padding='same', activation = 'relu')(x3)
        x4 = BatchNormalization()(x4)
        x4 = MaxPooling2D(pool_size=(4, 4), padding='same')(x4)
        
        y = Flatten()(x4)
        y = Dropout(0.5)(y)
        y = Dense(16)(y)
        y = LeakyReLU(alpha=0.1)(y)
        y = Dropout(0.5)(y)
        y = Dense(1, activation = 'sigmoid')(y)

        return KerasModel(inputs = x, outputs = y) 
Example #15
Source Project: MesoNet   Author: DariusAf   File: classifiers.py    License: Apache License 2.0 5 votes vote down vote up
def init_model(self):
        x = Input(shape = (IMGWIDTH, IMGWIDTH, 3))
        
        x1 = self.InceptionLayer(1, 4, 4, 2)(x)
        x1 = BatchNormalization()(x1)
        x1 = MaxPooling2D(pool_size=(2, 2), padding='same')(x1)
        
        x2 = self.InceptionLayer(2, 4, 4, 2)(x1)
        x2 = BatchNormalization()(x2)
        x2 = MaxPooling2D(pool_size=(2, 2), padding='same')(x2)        
        
        x3 = Conv2D(16, (5, 5), padding='same', activation = 'relu')(x2)
        x3 = BatchNormalization()(x3)
        x3 = MaxPooling2D(pool_size=(2, 2), padding='same')(x3)
        
        x4 = Conv2D(16, (5, 5), padding='same', activation = 'relu')(x3)
        x4 = BatchNormalization()(x4)
        x4 = MaxPooling2D(pool_size=(4, 4), padding='same')(x4)
        
        y = Flatten()(x4)
        y = Dropout(0.5)(y)
        y = Dense(16)(y)
        y = LeakyReLU(alpha=0.1)(y)
        y = Dropout(0.5)(y)
        y = Dense(1, activation = 'sigmoid')(y)

        return KerasModel(inputs = x, outputs = y) 
Example #16
Source Project: sesemi   Author: vuptran   File: wrn.py    License: MIT License 5 votes vote down vote up
def initial_conv(input):
    x = Conv2D(16, (3, 3), padding='same', **conv_params)(input)
    x = BatchNormalization(**bn_params)(x)
    x = LeakyReLU(leakiness)(x)
    return x 
Example #17
Source Project: sesemi   Author: vuptran   File: wrn.py    License: MIT License 5 votes vote down vote up
def expand_conv(init, base, k, strides=(1, 1)):
    x = Conv2D(base * k, (3, 3), padding='same',
               strides=strides, **conv_params)(init)
    x = BatchNormalization(**bn_params)(x)
    x = LeakyReLU(leakiness)(x)

    x = Conv2D(base * k, (3, 3), padding='same', **conv_params)(x)

    skip = Conv2D(base * k, (1, 1), padding='same',
                  strides=strides, **conv_params)(init)

    m = Add()([x, skip])
    return m 
Example #18
Source Project: sesemi   Author: vuptran   File: wrn.py    License: MIT License 5 votes vote down vote up
def conv1_block(input, k=1, dropout=0.0):
    init = input
    
    x = BatchNormalization(**bn_params)(input)
    x = LeakyReLU(leakiness)(x)
    x = Conv2D(16 * k, (3, 3), padding='same', **conv_params)(x)

    if dropout > 0.0: x = Dropout(dropout)(x)
    
    x = BatchNormalization(**bn_params)(x)
    x = LeakyReLU(leakiness)(x)
    x = Conv2D(16 * k, (3, 3), padding='same', **conv_params)(x)

    m = Add()([init, x])
    return m 
Example #19
Source Project: sesemi   Author: vuptran   File: wrn.py    License: MIT License 5 votes vote down vote up
def conv3_block(input, k=1, dropout=0.0):
    init = input

    x = BatchNormalization(**bn_params)(input)
    x = LeakyReLU(leakiness)(x)
    x = Conv2D(64 * k, (3, 3), padding='same', **conv_params)(x)

    if dropout > 0.0: x = Dropout(dropout)(x)

    x = BatchNormalization(**bn_params)(x)
    x = LeakyReLU(leakiness)(x)
    x = Conv2D(64 * k, (3, 3), padding='same', **conv_params)(x)

    m = Add()([init, x])
    return m 
Example #20
Source Project: posewarp-cvpr2018   Author: balakg   File: networks.py    License: MIT License 5 votes vote down vote up
def my_conv(x_in, nf, ks=3, strides=1, activation='lrelu', name=None):
    x_out = Conv2D(nf, kernel_size=ks, padding='same', strides=strides)(x_in)

    if activation == 'lrelu':
        x_out = LeakyReLU(0.2, name=name)(x_out)
    elif activation != 'none':
        x_out = Activation(activation, name=name)(x_out)

    return x_out 
Example #21
Source Project: voxelmorph   Author: voxelmorph   File: networks.py    License: GNU General Public License v3.0 5 votes vote down vote up
def conv_block(x_in, nf, strides=1):
    """
    specific convolution module including convolution followed by leakyrelu
    """
    ndims = len(x_in.get_shape()) - 2
    assert ndims in [1, 2, 3], "ndims should be one of 1, 2, or 3. found: %d" % ndims

    Conv = getattr(KL, 'Conv%dD' % ndims)
    x_out = Conv(nf, kernel_size=3, padding='same',
                 kernel_initializer='he_normal', strides=strides)(x_in)
    x_out = LeakyReLU(0.2)(x_out)
    return x_out 
Example #22
Source Project: MMdnn   Author: microsoft   File: keras2_emitter.py    License: MIT License 5 votes vote down vote up
def emit_LeakyRelu(self, IR_node, in_scope=False):
        code = "{:<15} = layers.LeakyReLU(name='{}', alpha = {})({})".format(
            IR_node.variable_name,
            IR_node.name,
            IR_node.get_attr('alpha'),
            self.parent_variable_name(IR_node))
        return code 
Example #23
Source Project: CROWN-IBP   Author: huanzhang12   File: mnist_cifar_models.py    License: BSD 2-Clause "Simplified" License 5 votes vote down vote up
def get_model_meta(filename):
    print("Loading model " + filename)
    global use_tf_keras
    global Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K
    try:
        from keras.models import load_model as load_model_keras
        ret = get_model_meta_real(filename, load_model_keras)
        # model is successfully loaded. Import layers from keras
        from keras.models import Sequential
        from keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda
        from keras.layers import Conv2D, MaxPooling2D
        from keras.layers import LeakyReLU
        from keras import regularizers
        from keras import backend as K
        print("Model imported using keras")
    except (KeyboardInterrupt, SystemExit, SyntaxError, NameError, IndentationError):
        raise
    except:
        print("Failed to load model with keras. Trying tf.keras...")
        use_tf_keras = True
        from tensorflow.keras.models import load_model as load_model_tf
        ret = get_model_meta_real(filename, load_model_tf)
        # model is successfully loaded. Import layers from tensorflow.keras
        from tensorflow.keras.models import Sequential
        from tensorflow.keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda
        from tensorflow.keras.layers import Conv2D, MaxPooling2D
        from tensorflow.keras.layers import LeakyReLU
        from tensorflow.keras import regularizers
        from tensorflow.keras import backend as K
        print("Model imported using tensorflow.keras")
    # put imported functions in global
    Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K = \
        Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K
    return ret 
Example #24
Source Project: CROWN-IBP   Author: huanzhang12   File: mnist_cifar_models.py    License: BSD 2-Clause "Simplified" License 5 votes vote down vote up
def get_model_meta_real(filename, model_loader):
    model = model_loader(filename, custom_objects = {"fn": lambda y_true, y_pred: y_pred, "tf": tf})
    json_string = model.to_json()
    model_meta = json.loads(json_string)
    weight_dims = []
    activations = set()
    activation_param = None
    input_dim = []
    # print(model_meta)
    try:
        # for keras
        model_layers = model_meta['config']['layers']
    except (KeyError, TypeError):
        # for tensorflow.keras
        model_layers = model_meta['config']
    for i, layer in enumerate(model_layers):
        if i ==0 and layer['class_name'] == "Flatten":
            input_dim = layer['config']['batch_input_shape']
        if layer['class_name'] == "Dense":
            units = layer['config']['units']
            weight_dims.append(units)
            activation = layer['config']['activation']
            if activation != 'linear':
                activations.add(activation)
        elif layer['class_name'] == "Activation":
            activation = layer['config']['activation']
            activations.add(activation)
        elif layer['class_name'] == "LeakyReLU":
            activation_param = layer['config']['alpha']
            activations.add("leaky")
        elif layer['class_name'] == "Lambda":
            if "arctan" in layer['config']["name"]:
                activation = "arctan"
                activations.add("arctan")
    assert len(activations) == 1, "only one activation is supported," + str(activations)
    return weight_dims, list(activations)[0], activation_param, input_dim 
Example #25
Source Project: Attendance-using-Face   Author: satinder147   File: modelArch.py    License: MIT License 5 votes vote down vote up
def arch(self):
        self.model.add(Dense(64,input_dim=128))
        self.model.add(LeakyReLU(alpha=0.1))
        self.model.add(Dense(32))
        self.model.add(LeakyReLU(alpha=0.1))
        self.model.add(Dense(16))
        self.model.add(LeakyReLU(alpha=0.1))
        self.model.add(Dense(self.classes))
        self.model.add(Activation('softmax'))

        return self.model 
Example #26
Source Project: ReinforcementLearning   Author: Urinx   File: neural_network.py    License: Apache License 2.0 5 votes vote down vote up
def build_model(self):
        """Construct a convolutional neural network with Resnet-style skip connections.

        Network Diagram:                                                                        [value head]
                              |---------------------------------|                   /---C---B---R---F---D---R---D---T
        I-----C-----B-----R---o---C-----B-----R-----C-----B-----M-----R--- ..... ---|
              \___________/     \___________________________________/               \---C---B---R---F---D---S [polich head]
           [Convolutional layer]          [Residual layer]

        I - input
        B - BatchNormalization
        R - Rectifier non-linearity, LeakyReLU
        T - tanh
        C - Conv2D
        F - Flatten
        D - Dense
        M - merge, add
        S - Softmax
        O - output
        """
        main_input = Input(shape=self.input_dim, name='main_input')

        x = self.conv_layer(main_input, self.conv_layer_filters, self.conv_layer_kernel_size)
        for _ in range(self.residual_layer_num):
            x = self.residual_layer(x, self.conv_layer_filters, self.conv_layer_kernel_size)

        vh = self.value_head(x)
        ph = self.policy_head(x)

        model = Model(inputs=main_input, outputs=[vh, ph])
        model.compile(
        	loss=['mean_squared_error', 'categorical_crossentropy'],
            optimizer=SGD(lr=self.learning_rate, momentum=self.momentum)
            )

        return model 
Example #27
Source Project: ReinforcementLearning   Author: Urinx   File: neural_network.py    License: Apache License 2.0 5 votes vote down vote up
def conv_layer(self, x, filters, kernel_size):
        conv = Conv2D(
            filters = filters,
            kernel_size = kernel_size,
            strides = (1, 1),
            padding = 'same',
            data_format = 'channels_first',
            use_bias = False,
            activation = 'linear',
            kernel_regularizer = regularizers.l2(self.reg_const)
            )(x)
        bn = BatchNormalization(axis=1)(conv)
        lrelu = LeakyReLU()(bn)
        return lrelu 
Example #28
Source Project: research   Author: commaai   File: autoencoder.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def encoder(batch_size, df_dim, ch, rows, cols):

    model = Sequential()
    X = Input(batch_shape=(batch_size, rows[-1], cols[-1], ch))
    model = Convolution2D(df_dim, 5, 5, subsample=(2, 2), border_mode="same",
                          name="e_h0_conv", dim_ordering="tf", init=normal)(X)
    model = LeakyReLU(.2)(model)

    model = Convolution2D(df_dim*2, 5, 5, subsample=(2, 2), border_mode="same",
                          name="e_h1_conv", dim_ordering="tf")(model)
    model = BN(mode=2, axis=3, name="e_bn1", gamma_init=mean_normal, epsilon=1e-5)(model)
    model = LeakyReLU(.2)(model)

    model = Convolution2D(df_dim*4, 5, 5, subsample=(2, 2), name="e_h2_conv", border_mode="same",
                          dim_ordering="tf", init=normal)(model)
    model = BN(mode=2, axis=3, name="e_bn2", gamma_init=mean_normal, epsilon=1e-5)(model)
    model = LeakyReLU(.2)(model)

    model = Convolution2D(df_dim*8, 5, 5, subsample=(2, 2), border_mode="same",
                          name="e_h3_conv", dim_ordering="tf", init=normal)(model)
    model = BN(mode=2, axis=3, name="e_bn3", gamma_init=mean_normal, epsilon=1e-5)(model)
    model = LeakyReLU(.2)(model)
    model = Flatten()(model)

    mean = Dense(z_dim, name="e_h3_lin", init=normal)(model)
    logsigma = Dense(z_dim, name="e_h4_lin", activation="tanh", init=normal)(model)
    meansigma = Model([X], [mean, logsigma])
    return meansigma 
Example #29
Source Project: research   Author: commaai   File: autoencoder.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def discriminator(batch_size, df_dim, ch, rows, cols):
    X = Input(batch_shape=(batch_size, rows[-1], cols[-1], ch))
    model = Convolution2D(df_dim, 5, 5, subsample=(2, 2), border_mode="same",
                          batch_input_shape=(batch_size, rows[-1], cols[-1], ch),
                          name="d_h0_conv", dim_ordering="tf", init=normal)(X)
    model = LeakyReLU(.2)(model)

    model = Convolution2D(df_dim*2, 5, 5, subsample=(2, 2), border_mode="same",
                          name="d_h1_conv", dim_ordering="tf", init=normal)(model)
    model = BN(mode=2, axis=3, name="d_bn1", gamma_init=mean_normal, epsilon=1e-5)(model)
    model = LeakyReLU(.2)(model)

    model = Convolution2D(df_dim*4, 5, 5, subsample=(2, 2), border_mode="same",
                          name="d_h2_conv", dim_ordering="tf", init=normal)(model)
    model = BN(mode=2, axis=3, name="d_bn2", gamma_init=mean_normal, epsilon=1e-5)(model)
    model = LeakyReLU(.2)(model)

    model = Convolution2D(df_dim*8, 5, 5, subsample=(2, 2), border_mode="same",
                          name="d_h3_conv", dim_ordering="tf", init=normal)(model)

    dec = BN(mode=2, axis=3, name="d_bn3", gamma_init=mean_normal, epsilon=1e-5)(model)
    dec = LeakyReLU(.2)(dec)
    dec = Flatten()(dec)
    dec = Dense(1, name="d_h3_lin", init=normal)(dec)

    output = Model([X], [dec, model])

    return output 
Example #30
Source Project: DeepFMPO   Author: stan-his   File: models.py    License: MIT License 5 votes vote down vote up
def build_models(inp_shape):

    # Build the actor
    inp = Input(inp_shape)
    hidden_inp = LeakyReLU(0.1)(TimeDistributed(Dense(N_DENSE, activation="linear"))(inp))
    hidden = LSTM(N_LSTM, return_sequences=True)(hidden_inp)
    hidden = Flatten()(hidden)

    hidden2 = LSTM(N_LSTM, return_sequences=True, go_backwards=True)(hidden_inp)
    hidden2 = Flatten()(hidden2)

    inp2 = Input((1,))
    hidden = Concatenate()([hidden, hidden2, inp2])

    hidden = LeakyReLU(0.1)(Dense(N_DENSE2, activation="linear")(hidden))
    out = Dense(n_actions, activation="softmax", activity_regularizer=l2(0.001))(hidden)

    actor = Model([inp,inp2], out)
    actor.compile(loss=maximization, optimizer=Adam(0.0005))


    # Build the critic
    inp = Input(inp_shape)
    hidden = LeakyReLU(0.1)(TimeDistributed(Dense(N_DENSE, activation="linear"))(inp))
    hidden = Bidirectional(LSTM(2*N_LSTM))(hidden)

    inp2 = Input((1,))
    hidden = Concatenate()([hidden, inp2])
    hidden = LeakyReLU(0.1)(Dense(N_DENSE2, activation="linear")(hidden))
    out = Dense(1, activation="linear")(hidden)

    critic = Model([inp,inp2], out)
    critic.compile(loss="MSE", optimizer=Adam(0.0001))


    return actor, critic