Python keras.layers.PReLU() Examples

The following are 30 code examples of keras.layers.PReLU(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source Project: DigiX_HuaWei_Population_Age_Attribution_Predict   Author: WeavingWong   File: models.py    License: MIT License 6 votes vote down vote up
def CapsuleNet(n_capsule = 10, n_routings = 5, capsule_dim = 16,
     n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001):
    K.clear_session()

    inputs = Input(shape=(170,))
    x = Embedding(21099, 300,  trainable=True)(inputs)        
    x = SpatialDropout1D(dropout_rate)(x)
    x = Bidirectional(
        CuDNNGRU(n_recurrent, return_sequences=True,
                 kernel_regularizer=l2(l2_penalty),
                 recurrent_regularizer=l2(l2_penalty)))(x)
    x = PReLU()(x)
    x = Capsule(
        num_capsule=n_capsule, dim_capsule=capsule_dim,
        routings=n_routings, share_weights=True)(x)
    x = Flatten(name = 'concatenate')(x)
    x = Dropout(dropout_rate)(x)
#     fc = Dense(128, activation='sigmoid')(x)
    outputs = Dense(6, activation='softmax')(x)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
    return model 
Example #2
Source Project: DigiX_HuaWei_Population_Age_Attribution_Predict   Author: WeavingWong   File: models.py    License: MIT License 6 votes vote down vote up
def CapsuleNet_v2(n_capsule = 10, n_routings = 5, capsule_dim = 16,
     n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001):
    K.clear_session()

    inputs = Input(shape=(200,))
    x = Embedding(20000, 300,  trainable=True)(inputs)        
    x = SpatialDropout1D(dropout_rate)(x)
    x = Bidirectional(
        CuDNNGRU(n_recurrent, return_sequences=True,
                 kernel_regularizer=l2(l2_penalty),
                 recurrent_regularizer=l2(l2_penalty)))(x)
    x = PReLU()(x)
    x = Capsule(
        num_capsule=n_capsule, dim_capsule=capsule_dim,
        routings=n_routings, share_weights=True)(x)
    x = Flatten(name = 'concatenate')(x)
    x = Dropout(dropout_rate)(x)
#     fc = Dense(128, activation='sigmoid')(x)
    outputs = Dense(6, activation='softmax')(x)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy'])
    return model 
Example #3
Source Project: faceswap   Author: deepfakes   File: mtcnn.py    License: GNU General Public License v3.0 6 votes vote down vote up
def model_definition():
        """ Keras RNetwork for MTCNN """
        input_ = Input(shape=(24, 24, 3))
        var_x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input_)
        var_x = PReLU(shared_axes=[1, 2], name='prelu1')(var_x)
        var_x = MaxPool2D(pool_size=3, strides=2, padding='same')(var_x)

        var_x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(var_x)
        var_x = PReLU(shared_axes=[1, 2], name='prelu2')(var_x)
        var_x = MaxPool2D(pool_size=3, strides=2)(var_x)

        var_x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(var_x)
        var_x = PReLU(shared_axes=[1, 2], name='prelu3')(var_x)
        var_x = Permute((3, 2, 1))(var_x)
        var_x = Flatten()(var_x)
        var_x = Dense(128, name='conv4')(var_x)
        var_x = PReLU(name='prelu4')(var_x)
        classifier = Dense(2, activation='softmax', name='conv5-1')(var_x)
        bbox_regress = Dense(4, name='conv5-2')(var_x)
        return [input_], [classifier, bbox_regress] 
Example #4
Source Project: faceswap   Author: deepfakes   File: mtcnn.py    License: GNU General Public License v3.0 6 votes vote down vote up
def model_definition():
        """ Keras ONetwork for MTCNN """
        input_ = Input(shape=(48, 48, 3))
        var_x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input_)
        var_x = PReLU(shared_axes=[1, 2], name='prelu1')(var_x)
        var_x = MaxPool2D(pool_size=3, strides=2, padding='same')(var_x)
        var_x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(var_x)
        var_x = PReLU(shared_axes=[1, 2], name='prelu2')(var_x)
        var_x = MaxPool2D(pool_size=3, strides=2)(var_x)
        var_x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(var_x)
        var_x = PReLU(shared_axes=[1, 2], name='prelu3')(var_x)
        var_x = MaxPool2D(pool_size=2)(var_x)
        var_x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(var_x)
        var_x = PReLU(shared_axes=[1, 2], name='prelu4')(var_x)
        var_x = Permute((3, 2, 1))(var_x)
        var_x = Flatten()(var_x)
        var_x = Dense(256, name='conv5')(var_x)
        var_x = PReLU(name='prelu5')(var_x)

        classifier = Dense(2, activation='softmax', name='conv6-1')(var_x)
        bbox_regress = Dense(4, name='conv6-2')(var_x)
        landmark_regress = Dense(10, name='conv6-3')(var_x)
        return [input_], [classifier, bbox_regress, landmark_regress] 
Example #5
Source Project: n2n-watermark-remove   Author: zxq2233   File: model.py    License: MIT License 5 votes vote down vote up
def get_srresnet_model(input_channel_num=3, feature_dim=64, resunit_num=16):
    def _residual_block(inputs):
        x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(inputs)
        x = BatchNormalization()(x)
        x = PReLU(shared_axes=[1, 2])(x)
        x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(x)
        x = BatchNormalization()(x)
        m = Add()([x, inputs])

        return m

    inputs = Input(shape=(None, None, input_channel_num))
    x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(inputs)
    x = PReLU(shared_axes=[1, 2])(x)
    x0 = x

    for i in range(resunit_num):
        x = _residual_block(x)

    x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(x)
    x = BatchNormalization()(x)
    x = Add()([x, x0])
    x = Conv2D(input_channel_num, (3, 3), padding="same", kernel_initializer="he_normal")(x)
    model = Model(inputs=inputs, outputs=x)

    return model


# UNet: code from https://github.com/pietz/unet-keras 
Example #6
Source Project: MMdnn   Author: microsoft   File: keras2_emitter.py    License: MIT License 5 votes vote down vote up
def emit_PRelu(self, IR_node, in_scope=False):
        if in_scope:
            raise NotImplementedError
        else:
            code = "{:<15} = layers.PReLU(name='{}')({})".format(
                IR_node.variable_name,
                IR_node.name,
                self.parent_variable_name(IR_node)
            )
            return code 
Example #7
Source Project: Keras-TextClassification   Author: yongzhuo   File: graph.py    License: MIT License 5 votes vote down vote up
def ResCNN(self, x):
        """
            repeat of two conv
        :param x: tensor, input shape
        :return: tensor, result of two conv of resnet
        """
        # pre-activation
        # x = PReLU()(x)
        x = Conv1D(self.filters_num,
                                kernel_size=1,
                                padding='SAME',
                                kernel_regularizer=l2(self.l2),
                                bias_regularizer=l2(self.l2),
                                activation=self.activation_conv,
                                )(x)
        x = BatchNormalization()(x)
        #x = PReLU()(x)
        x = Conv1D(self.filters_num,
                                kernel_size=1,
                                padding='SAME',
                                kernel_regularizer=l2(self.l2),
                                bias_regularizer=l2(self.l2),
                                activation=self.activation_conv,
                                )(x)
        x = BatchNormalization()(x)
        # x = Dropout(self.dropout)(x)
        x = PReLU()(x)
        return x 
Example #8
Source Project: nn-transfer   Author: gzuidhof   File: test_layers.py    License: MIT License 5 votes vote down vote up
def __init__(self):
        super(PReLUNet, self).__init__()
        self.prelu = nn.PReLU(3) 
Example #9
Source Project: nn-transfer   Author: gzuidhof   File: test_layers.py    License: MIT License 5 votes vote down vote up
def test_prelu(self):
        keras_model = Sequential()
        keras_model.add(PReLU(input_shape=(3, 32, 32), shared_axes=(2, 3),
                              name='prelu'))
        keras_model.compile(loss=keras.losses.categorical_crossentropy,
                            optimizer=keras.optimizers.SGD())

        pytorch_model = PReLUNet()

        self.transfer(keras_model, pytorch_model)
        self.assertEqualPrediction(keras_model, pytorch_model, self.test_data) 
Example #10
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: advanced_activations_test.py    License: MIT License 5 votes vote down vote up
def test_prelu():
    layer_test(layers.PReLU, kwargs={},
               input_shape=(2, 3, 4)) 
Example #11
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: advanced_activations_test.py    License: MIT License 5 votes vote down vote up
def test_prelu_share():
    layer_test(layers.PReLU, kwargs={'shared_axes': 1},
               input_shape=(2, 3, 4)) 
Example #12
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: advanced_activations_test.py    License: MIT License 5 votes vote down vote up
def test_prelu():
    layer_test(layers.PReLU, kwargs={},
               input_shape=(2, 3, 4)) 
Example #13
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: advanced_activations_test.py    License: MIT License 5 votes vote down vote up
def test_prelu_share():
    layer_test(layers.PReLU, kwargs={'shared_axes': 1},
               input_shape=(2, 3, 4)) 
Example #14
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: advanced_activations_test.py    License: MIT License 5 votes vote down vote up
def test_prelu():
    layer_test(layers.PReLU, kwargs={},
               input_shape=(2, 3, 4)) 
Example #15
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: advanced_activations_test.py    License: MIT License 5 votes vote down vote up
def test_prelu():
    layer_test(layers.PReLU, kwargs={},
               input_shape=(2, 3, 4)) 
Example #16
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: advanced_activations_test.py    License: MIT License 5 votes vote down vote up
def test_prelu_share():
    layer_test(layers.PReLU, kwargs={'shared_axes': 1},
               input_shape=(2, 3, 4)) 
Example #17
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: advanced_activations_test.py    License: MIT License 5 votes vote down vote up
def test_prelu():
    layer_test(layers.PReLU, kwargs={},
               input_shape=(2, 3, 4)) 
Example #18
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: advanced_activations_test.py    License: MIT License 5 votes vote down vote up
def test_prelu_share():
    layer_test(layers.PReLU, kwargs={'shared_axes': 1},
               input_shape=(2, 3, 4)) 
Example #19
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: advanced_activations_test.py    License: MIT License 5 votes vote down vote up
def test_prelu():
    layer_test(layers.PReLU, kwargs={},
               input_shape=(2, 3, 4)) 
Example #20
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: advanced_activations_test.py    License: MIT License 5 votes vote down vote up
def test_prelu():
    layer_test(layers.PReLU, kwargs={},
               input_shape=(2, 3, 4)) 
Example #21
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: advanced_activations_test.py    License: MIT License 5 votes vote down vote up
def test_prelu_share():
    layer_test(layers.PReLU, kwargs={'shared_axes': 1},
               input_shape=(2, 3, 4)) 
Example #22
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: advanced_activations_test.py    License: MIT License 5 votes vote down vote up
def test_prelu():
    layer_test(layers.PReLU, kwargs={},
               input_shape=(2, 3, 4)) 
Example #23
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: advanced_activations_test.py    License: MIT License 5 votes vote down vote up
def test_prelu_share():
    layer_test(layers.PReLU, kwargs={'shared_axes': 1},
               input_shape=(2, 3, 4)) 
Example #24
Source Project: faceswap   Author: deepfakes   File: mtcnn.py    License: GNU General Public License v3.0 5 votes vote down vote up
def model_definition():
        """ Keras PNetwork for MTCNN """
        input_ = Input(shape=(None, None, 3))
        var_x = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input_)
        var_x = PReLU(shared_axes=[1, 2], name='PReLU1')(var_x)
        var_x = MaxPool2D(pool_size=2)(var_x)
        var_x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(var_x)
        var_x = PReLU(shared_axes=[1, 2], name='PReLU2')(var_x)
        var_x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(var_x)
        var_x = PReLU(shared_axes=[1, 2], name='PReLU3')(var_x)
        classifier = Conv2D(2, (1, 1), activation='softmax', name='conv4-1')(var_x)
        bbox_regress = Conv2D(4, (1, 1), name='conv4-2')(var_x)
        return [input_], [classifier, bbox_regress] 
Example #25
Source Project: noise2noise   Author: yu4u   File: model.py    License: MIT License 5 votes vote down vote up
def get_srresnet_model(input_channel_num=3, feature_dim=64, resunit_num=16):
    def _residual_block(inputs):
        x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(inputs)
        x = BatchNormalization()(x)
        x = PReLU(shared_axes=[1, 2])(x)
        x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(x)
        x = BatchNormalization()(x)
        m = Add()([x, inputs])

        return m

    inputs = Input(shape=(None, None, input_channel_num))
    x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(inputs)
    x = PReLU(shared_axes=[1, 2])(x)
    x0 = x

    for i in range(resunit_num):
        x = _residual_block(x)

    x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(x)
    x = BatchNormalization()(x)
    x = Add()([x, x0])
    x = Conv2D(input_channel_num, (3, 3), padding="same", kernel_initializer="he_normal")(x)
    model = Model(inputs=inputs, outputs=x)

    return model


# UNet: code from https://github.com/pietz/unet-keras 
Example #26
Source Project: ImageEnhancer   Author: CongBao   File: enhancer_gan.py    License: MIT License 5 votes vote down vote up
def activate(self, layer):
        """ activate layer with given activation function
            :param layer: the input layer
            :return: the layer after activation
        """
        if self.activ == 'lrelu':
            return layers.LeakyReLU(0.2)(layer)
        elif self.activ == 'prelu':
            return layers.PReLU()(layer)
        else:
            return Activation(self.activ)(layer) 
Example #27
Source Project: ImageEnhancer   Author: CongBao   File: enhancer.py    License: MIT License 5 votes vote down vote up
def activate(self, layer):
        """ activate layer with given activation function
            :param layer: the input layer
            :return: the layer after activation
        """
        if self.activ == 'lrelu':
            return layers.LeakyReLU()(layer)
        elif self.activ == 'prelu':
            return layers.PReLU()(layer)
        else:
            return Activation(self.activ)(layer) 
Example #28
Source Project: DigiX_HuaWei_Population_Age_Attribution_Predict   Author: WeavingWong   File: models.py    License: MIT License 4 votes vote down vote up
def RnnVersion1( n_recurrent=50, n_filters=30, dropout_rate=0.2, l2_penalty=0.0001,n_capsule = 10, n_routings = 5, capsule_dim = 16):
    K.clear_session()
    def conv_block(x, n, kernel_size):
        x = Conv1D(n, kernel_size, activation='relu') (x)
        x = Conv1D(n_filters, kernel_size, activation='relu') (x)
        x_att = AttentionWithContext()(x)
        x_avg = GlobalAveragePooling1D()(x)
        x_max = GlobalMaxPooling1D()(x)
        return concatenate([x_att, x_avg, x_max])  
    def att_max_avg_pooling(x):
        x_att = AttentionWithContext()(x)
        x_avg = GlobalAveragePooling1D()(x)
        x_max = GlobalMaxPooling1D()(x)
        return concatenate([x_att, x_avg, x_max])

    inputs = Input(shape=(170,))
    emb = Embedding(21099, 300,  trainable=True)(inputs)

    # model 0
    x0 = BatchNormalization()(emb)
    x0 = SpatialDropout1D(dropout_rate)(x0)
    
    x0 = Bidirectional(
        CuDNNGRU(n_recurrent, return_sequences=True,
                 kernel_regularizer=l2(l2_penalty),
                 recurrent_regularizer=l2(l2_penalty)))(x0)
    x0 = Conv1D(n_filters, kernel_size=3)(x0)
    x0 = PReLU()(x0)
#     x0 = Dropout(dropout_rate)(x0)
    x0 = att_max_avg_pooling(x0)

    # model 1
    x1 = SpatialDropout1D(dropout_rate)(emb)
    x1 = Bidirectional(
        CuDNNGRU(2*n_recurrent, return_sequences=True,
                 kernel_regularizer=l2(l2_penalty),
                 recurrent_regularizer=l2(l2_penalty)))(x1)
    x1 = Conv1D(2*n_filters, kernel_size=2)(x1)
    x1 = PReLU()(x1)
#     x1 = Dropout(dropout_rate)(x1)
    x1 = att_max_avg_pooling(x1)

    x = concatenate([x0, x1],name='concatenate')
    
#     fc = Dense(128, activation='sigmoid')(x)
    outputs = Dense(6, activation='softmax')(x)#   , kernel_regularizer=l2(l2_penalty), activity_regularizer=l2(l2_penalty)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', optimizer='Nadam',metrics =['accuracy'])
    return model 
Example #29
Source Project: DigiX_HuaWei_Population_Age_Attribution_Predict   Author: WeavingWong   File: rnn_feature.py    License: MIT License 4 votes vote down vote up
def RnnVersion1( n_recurrent=50, n_filters=30, dropout_rate=0.2, l2_penalty=0.0001,n_capsule = 10, n_routings = 5, capsule_dim = 16):
    K.clear_session()
    def conv_block(x, n, kernel_size):
        x = Conv1D(n, kernel_size, activation='relu') (x)
        x = Conv1D(n_filters, kernel_size, activation='relu') (x)
        x_att = AttentionWithContext()(x)
        x_avg = GlobalAveragePooling1D()(x)
        x_max = GlobalMaxPooling1D()(x)
        return concatenate([x_att, x_avg, x_max])  
    def att_max_avg_pooling(x):
        x_att = AttentionWithContext()(x)
        x_avg = GlobalAveragePooling1D()(x)
        x_max = GlobalMaxPooling1D()(x)
        return concatenate([x_att, x_avg, x_max])

    inputs = Input(shape=(100,))
    emb = Embedding(9399, 300,  trainable=True)(inputs)

    # model 0
    x0 = BatchNormalization()(emb)
    x0 = SpatialDropout1D(dropout_rate)(x0)
    
    x0 = Bidirectional(
        CuDNNGRU(n_recurrent, return_sequences=True,
                 kernel_regularizer=l2(l2_penalty),
                 recurrent_regularizer=l2(l2_penalty)))(x0)
    x0 = Conv1D(n_filters, kernel_size=3)(x0)
    x0 = PReLU()(x0)
#     x0 = Dropout(dropout_rate)(x0)
    x0 = att_max_avg_pooling(x0)

    # model 1
    x1 = SpatialDropout1D(dropout_rate)(emb)
    x1 = Bidirectional(
        CuDNNGRU(2*n_recurrent, return_sequences=True,
                 kernel_regularizer=l2(l2_penalty),
                 recurrent_regularizer=l2(l2_penalty)))(x1)
    x1 = Conv1D(2*n_filters, kernel_size=2)(x1)
    x1 = PReLU()(x1)
#     x1 = Dropout(dropout_rate)(x1)
    x1 = att_max_avg_pooling(x1)

    x = concatenate([x0, x1],name='concatenate')
    
    fc = Dense(128, activation='relu')(x)
    outputs = Dense(6, activation='softmax')(fc)#   , kernel_regularizer=l2(l2_penalty), activity_regularizer=l2(l2_penalty)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', optimizer='Nadam',metrics =['accuracy'])
    return model 
Example #30
Source Project: MMdnn   Author: microsoft   File: keras2_emitter.py    License: MIT License 4 votes vote down vote up
def header_code(self):
        return """import keras
from keras.models import Model
from keras import layers
import keras.backend as K
import numpy as np
from keras.layers.core import Lambda
import tensorflow as tf


weights_dict = dict()
def load_weights_from_file(weight_file):
    try:
        weights_dict = np.load(weight_file, allow_pickle=True).item()
    except:
        weights_dict = np.load(weight_file, allow_pickle=True, encoding='bytes').item()

    return weights_dict


def set_layer_weights(model, weights_dict):
    for layer in model.layers:
        if layer.name in weights_dict:
            cur_dict = weights_dict[layer.name]
            current_layer_parameters = list()
            if layer.__class__.__name__ == "BatchNormalization":
                if 'scale' in cur_dict:
                    current_layer_parameters.append(cur_dict['scale'])
                if 'bias' in cur_dict:
                    current_layer_parameters.append(cur_dict['bias'])
                current_layer_parameters.extend([cur_dict['mean'], cur_dict['var']])
            elif layer.__class__.__name__ == "Scale":
                if 'scale' in cur_dict:
                    current_layer_parameters.append(cur_dict['scale'])
                if 'bias' in cur_dict:
                    current_layer_parameters.append(cur_dict['bias'])
            elif layer.__class__.__name__ == "SeparableConv2D":
                current_layer_parameters = [cur_dict['depthwise_filter'], cur_dict['pointwise_filter']]
                if 'bias' in cur_dict:
                    current_layer_parameters.append(cur_dict['bias'])
            elif layer.__class__.__name__ == "Embedding":
                current_layer_parameters.append(cur_dict['weights'])
            elif layer.__class__.__name__ == "PReLU":
                gamma =  np.ones(list(layer.input_shape[1:]))*cur_dict['gamma']
                current_layer_parameters.append(gamma)
            else:
                # rot 
                if 'weights' in cur_dict:
                    current_layer_parameters = [cur_dict['weights']]
                if 'bias' in cur_dict:
                    current_layer_parameters.append(cur_dict['bias'])
            model.get_layer(layer.name).set_weights(current_layer_parameters)

    return model


def KitModel(weight_file = None):
    global weights_dict
    weights_dict = load_weights_from_file(weight_file) if not weight_file == None else None
        """