Python keras.layers.PReLU() Examples
The following are 30
code examples of keras.layers.PReLU().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.
Example #1
Source File: models.py From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License | 6 votes |
def CapsuleNet(n_capsule = 10, n_routings = 5, capsule_dim = 16, n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001): K.clear_session() inputs = Input(shape=(170,)) x = Embedding(21099, 300, trainable=True)(inputs) x = SpatialDropout1D(dropout_rate)(x) x = Bidirectional( CuDNNGRU(n_recurrent, return_sequences=True, kernel_regularizer=l2(l2_penalty), recurrent_regularizer=l2(l2_penalty)))(x) x = PReLU()(x) x = Capsule( num_capsule=n_capsule, dim_capsule=capsule_dim, routings=n_routings, share_weights=True)(x) x = Flatten(name = 'concatenate')(x) x = Dropout(dropout_rate)(x) # fc = Dense(128, activation='sigmoid')(x) outputs = Dense(6, activation='softmax')(x) model = Model(inputs=inputs, outputs=outputs) model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy']) return model
Example #2
Source File: models.py From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License | 6 votes |
def CapsuleNet_v2(n_capsule = 10, n_routings = 5, capsule_dim = 16, n_recurrent=100, dropout_rate=0.2, l2_penalty=0.0001): K.clear_session() inputs = Input(shape=(200,)) x = Embedding(20000, 300, trainable=True)(inputs) x = SpatialDropout1D(dropout_rate)(x) x = Bidirectional( CuDNNGRU(n_recurrent, return_sequences=True, kernel_regularizer=l2(l2_penalty), recurrent_regularizer=l2(l2_penalty)))(x) x = PReLU()(x) x = Capsule( num_capsule=n_capsule, dim_capsule=capsule_dim, routings=n_routings, share_weights=True)(x) x = Flatten(name = 'concatenate')(x) x = Dropout(dropout_rate)(x) # fc = Dense(128, activation='sigmoid')(x) outputs = Dense(6, activation='softmax')(x) model = Model(inputs=inputs, outputs=outputs) model.compile(loss='categorical_crossentropy', optimizer='nadam', metrics=['accuracy']) return model
Example #3
Source File: mtcnn.py From faceswap with GNU General Public License v3.0 | 6 votes |
def model_definition(): """ Keras ONetwork for MTCNN """ input_ = Input(shape=(48, 48, 3)) var_x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input_) var_x = PReLU(shared_axes=[1, 2], name='prelu1')(var_x) var_x = MaxPool2D(pool_size=3, strides=2, padding='same')(var_x) var_x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(var_x) var_x = PReLU(shared_axes=[1, 2], name='prelu2')(var_x) var_x = MaxPool2D(pool_size=3, strides=2)(var_x) var_x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(var_x) var_x = PReLU(shared_axes=[1, 2], name='prelu3')(var_x) var_x = MaxPool2D(pool_size=2)(var_x) var_x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(var_x) var_x = PReLU(shared_axes=[1, 2], name='prelu4')(var_x) var_x = Permute((3, 2, 1))(var_x) var_x = Flatten()(var_x) var_x = Dense(256, name='conv5')(var_x) var_x = PReLU(name='prelu5')(var_x) classifier = Dense(2, activation='softmax', name='conv6-1')(var_x) bbox_regress = Dense(4, name='conv6-2')(var_x) landmark_regress = Dense(10, name='conv6-3')(var_x) return [input_], [classifier, bbox_regress, landmark_regress]
Example #4
Source File: mtcnn.py From faceswap with GNU General Public License v3.0 | 6 votes |
def model_definition(): """ Keras RNetwork for MTCNN """ input_ = Input(shape=(24, 24, 3)) var_x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input_) var_x = PReLU(shared_axes=[1, 2], name='prelu1')(var_x) var_x = MaxPool2D(pool_size=3, strides=2, padding='same')(var_x) var_x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(var_x) var_x = PReLU(shared_axes=[1, 2], name='prelu2')(var_x) var_x = MaxPool2D(pool_size=3, strides=2)(var_x) var_x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(var_x) var_x = PReLU(shared_axes=[1, 2], name='prelu3')(var_x) var_x = Permute((3, 2, 1))(var_x) var_x = Flatten()(var_x) var_x = Dense(128, name='conv4')(var_x) var_x = PReLU(name='prelu4')(var_x) classifier = Dense(2, activation='softmax', name='conv5-1')(var_x) bbox_regress = Dense(4, name='conv5-2')(var_x) return [input_], [classifier, bbox_regress]
Example #5
Source File: mtcnn.py From faceswap with GNU General Public License v3.0 | 5 votes |
def model_definition(): """ Keras PNetwork for MTCNN """ input_ = Input(shape=(None, None, 3)) var_x = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input_) var_x = PReLU(shared_axes=[1, 2], name='PReLU1')(var_x) var_x = MaxPool2D(pool_size=2)(var_x) var_x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(var_x) var_x = PReLU(shared_axes=[1, 2], name='PReLU2')(var_x) var_x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(var_x) var_x = PReLU(shared_axes=[1, 2], name='PReLU3')(var_x) classifier = Conv2D(2, (1, 1), activation='softmax', name='conv4-1')(var_x) bbox_regress = Conv2D(4, (1, 1), name='conv4-2')(var_x) return [input_], [classifier, bbox_regress]
Example #6
Source File: advanced_activations_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_prelu_share(): layer_test(layers.PReLU, kwargs={'shared_axes': 1}, input_shape=(2, 3, 4))
Example #7
Source File: advanced_activations_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_prelu(): layer_test(layers.PReLU, kwargs={}, input_shape=(2, 3, 4))
Example #8
Source File: advanced_activations_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_prelu_share(): layer_test(layers.PReLU, kwargs={'shared_axes': 1}, input_shape=(2, 3, 4))
Example #9
Source File: advanced_activations_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_prelu(): layer_test(layers.PReLU, kwargs={}, input_shape=(2, 3, 4))
Example #10
Source File: advanced_activations_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_prelu(): layer_test(layers.PReLU, kwargs={}, input_shape=(2, 3, 4))
Example #11
Source File: advanced_activations_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_prelu_share(): layer_test(layers.PReLU, kwargs={'shared_axes': 1}, input_shape=(2, 3, 4))
Example #12
Source File: advanced_activations_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_prelu(): layer_test(layers.PReLU, kwargs={}, input_shape=(2, 3, 4))
Example #13
Source File: advanced_activations_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_prelu_share(): layer_test(layers.PReLU, kwargs={'shared_axes': 1}, input_shape=(2, 3, 4))
Example #14
Source File: advanced_activations_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_prelu(): layer_test(layers.PReLU, kwargs={}, input_shape=(2, 3, 4))
Example #15
Source File: model.py From noise2noise with MIT License | 5 votes |
def get_srresnet_model(input_channel_num=3, feature_dim=64, resunit_num=16): def _residual_block(inputs): x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(inputs) x = BatchNormalization()(x) x = PReLU(shared_axes=[1, 2])(x) x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(x) x = BatchNormalization()(x) m = Add()([x, inputs]) return m inputs = Input(shape=(None, None, input_channel_num)) x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(inputs) x = PReLU(shared_axes=[1, 2])(x) x0 = x for i in range(resunit_num): x = _residual_block(x) x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(x) x = BatchNormalization()(x) x = Add()([x, x0]) x = Conv2D(input_channel_num, (3, 3), padding="same", kernel_initializer="he_normal")(x) model = Model(inputs=inputs, outputs=x) return model # UNet: code from https://github.com/pietz/unet-keras
Example #16
Source File: enhancer_gan.py From ImageEnhancer with MIT License | 5 votes |
def activate(self, layer): """ activate layer with given activation function :param layer: the input layer :return: the layer after activation """ if self.activ == 'lrelu': return layers.LeakyReLU(0.2)(layer) elif self.activ == 'prelu': return layers.PReLU()(layer) else: return Activation(self.activ)(layer)
Example #17
Source File: enhancer.py From ImageEnhancer with MIT License | 5 votes |
def activate(self, layer): """ activate layer with given activation function :param layer: the input layer :return: the layer after activation """ if self.activ == 'lrelu': return layers.LeakyReLU()(layer) elif self.activ == 'prelu': return layers.PReLU()(layer) else: return Activation(self.activ)(layer)
Example #18
Source File: advanced_activations_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_prelu(): layer_test(layers.PReLU, kwargs={}, input_shape=(2, 3, 4))
Example #19
Source File: model.py From n2n-watermark-remove with MIT License | 5 votes |
def get_srresnet_model(input_channel_num=3, feature_dim=64, resunit_num=16): def _residual_block(inputs): x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(inputs) x = BatchNormalization()(x) x = PReLU(shared_axes=[1, 2])(x) x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(x) x = BatchNormalization()(x) m = Add()([x, inputs]) return m inputs = Input(shape=(None, None, input_channel_num)) x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(inputs) x = PReLU(shared_axes=[1, 2])(x) x0 = x for i in range(resunit_num): x = _residual_block(x) x = Conv2D(feature_dim, (3, 3), padding="same", kernel_initializer="he_normal")(x) x = BatchNormalization()(x) x = Add()([x, x0]) x = Conv2D(input_channel_num, (3, 3), padding="same", kernel_initializer="he_normal")(x) model = Model(inputs=inputs, outputs=x) return model # UNet: code from https://github.com/pietz/unet-keras
Example #20
Source File: advanced_activations_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_prelu_share(): layer_test(layers.PReLU, kwargs={'shared_axes': 1}, input_shape=(2, 3, 4))
Example #21
Source File: advanced_activations_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_prelu(): layer_test(layers.PReLU, kwargs={}, input_shape=(2, 3, 4))
Example #22
Source File: advanced_activations_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_prelu_share(): layer_test(layers.PReLU, kwargs={'shared_axes': 1}, input_shape=(2, 3, 4))
Example #23
Source File: advanced_activations_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_prelu(): layer_test(layers.PReLU, kwargs={}, input_shape=(2, 3, 4))
Example #24
Source File: test_layers.py From nn-transfer with MIT License | 5 votes |
def test_prelu(self): keras_model = Sequential() keras_model.add(PReLU(input_shape=(3, 32, 32), shared_axes=(2, 3), name='prelu')) keras_model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.SGD()) pytorch_model = PReLUNet() self.transfer(keras_model, pytorch_model) self.assertEqualPrediction(keras_model, pytorch_model, self.test_data)
Example #25
Source File: test_layers.py From nn-transfer with MIT License | 5 votes |
def __init__(self): super(PReLUNet, self).__init__() self.prelu = nn.PReLU(3)
Example #26
Source File: graph.py From Keras-TextClassification with MIT License | 5 votes |
def ResCNN(self, x): """ repeat of two conv :param x: tensor, input shape :return: tensor, result of two conv of resnet """ # pre-activation # x = PReLU()(x) x = Conv1D(self.filters_num, kernel_size=1, padding='SAME', kernel_regularizer=l2(self.l2), bias_regularizer=l2(self.l2), activation=self.activation_conv, )(x) x = BatchNormalization()(x) #x = PReLU()(x) x = Conv1D(self.filters_num, kernel_size=1, padding='SAME', kernel_regularizer=l2(self.l2), bias_regularizer=l2(self.l2), activation=self.activation_conv, )(x) x = BatchNormalization()(x) # x = Dropout(self.dropout)(x) x = PReLU()(x) return x
Example #27
Source File: keras2_emitter.py From MMdnn with MIT License | 5 votes |
def emit_PRelu(self, IR_node, in_scope=False): if in_scope: raise NotImplementedError else: code = "{:<15} = layers.PReLU(name='{}')({})".format( IR_node.variable_name, IR_node.name, self.parent_variable_name(IR_node) ) return code
Example #28
Source File: graph.py From Keras-TextClassification with MIT License | 4 votes |
def create_model(self, hyper_parameters): """ 构建神经网络, 参考 https://blog.csdn.net/dqcfkyqdxym3f8rb0/article/details/86662906 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding_output = self.word_embedding.output embedding_output_spatial = SpatialDropout1D(self.dropout_spatial)(embedding_output) # 首先是 region embedding 层 conv_1 = Conv1D(self.filters_num, kernel_size=1, padding='SAME', kernel_regularizer=l2(self.l2), bias_regularizer=l2(self.l2), activation=self.activation_conv, )(embedding_output_spatial) conv_1_prelu = PReLU()(conv_1) block = None layer_curr = 0 for i in range(self.layer_repeats): if i == 0: # 第一层输入用embedding输出的结果作为输入 block = self.ResCNN(embedding_output_spatial) block_add = Add()([block, conv_1_prelu]) block = MaxPooling1D(pool_size=self.pooling_size_strides[0], strides=self.pooling_size_strides[1])(block_add) elif self.layer_repeats - 1 == i or layer_curr == 1: # 最后一次repeat用GlobalMaxPooling1D block_last = self.ResCNN(block) # ResNet(shortcut连接|skip连接|residual连接), 这里是shortcut连接. 恒等映射, block+f(block) block_add = Add()([block_last, block]) block = GlobalMaxPooling1D()(block_add) break else: # 中间层 repeat if K.int_shape(block)[1] // 2 < 8: # 防止错误, 不能pooling/2的情况, 就是说size >= 2 layer_curr = 1 block_mid = self.ResCNN(block) block_add = Add()([block_mid, block]) block = MaxPooling1D(pool_size=self.pooling_size_strides[0], strides=self.pooling_size_strides[1])(block_add) # 全连接层 output = Dense(self.full_connect_unit, activation='linear')(block) output = BatchNormalization()(output) #output = PReLU()(output) output = Dropout(self.dropout)(output) output = Dense(self.label, activation=self.activate_classify)(output) self.model = Model(inputs=self.word_embedding.input, outputs=output) self.model.summary(120)
Example #29
Source File: keras2_emitter.py From MMdnn with MIT License | 4 votes |
def header_code(self): return """import keras from keras.models import Model from keras import layers import keras.backend as K import numpy as np from keras.layers.core import Lambda import tensorflow as tf weights_dict = dict() def load_weights_from_file(weight_file): try: weights_dict = np.load(weight_file, allow_pickle=True).item() except: weights_dict = np.load(weight_file, allow_pickle=True, encoding='bytes').item() return weights_dict def set_layer_weights(model, weights_dict): for layer in model.layers: if layer.name in weights_dict: cur_dict = weights_dict[layer.name] current_layer_parameters = list() if layer.__class__.__name__ == "BatchNormalization": if 'scale' in cur_dict: current_layer_parameters.append(cur_dict['scale']) if 'bias' in cur_dict: current_layer_parameters.append(cur_dict['bias']) current_layer_parameters.extend([cur_dict['mean'], cur_dict['var']]) elif layer.__class__.__name__ == "Scale": if 'scale' in cur_dict: current_layer_parameters.append(cur_dict['scale']) if 'bias' in cur_dict: current_layer_parameters.append(cur_dict['bias']) elif layer.__class__.__name__ == "SeparableConv2D": current_layer_parameters = [cur_dict['depthwise_filter'], cur_dict['pointwise_filter']] if 'bias' in cur_dict: current_layer_parameters.append(cur_dict['bias']) elif layer.__class__.__name__ == "Embedding": current_layer_parameters.append(cur_dict['weights']) elif layer.__class__.__name__ == "PReLU": gamma = np.ones(list(layer.input_shape[1:]))*cur_dict['gamma'] current_layer_parameters.append(gamma) else: # rot if 'weights' in cur_dict: current_layer_parameters = [cur_dict['weights']] if 'bias' in cur_dict: current_layer_parameters.append(cur_dict['bias']) model.get_layer(layer.name).set_weights(current_layer_parameters) return model def KitModel(weight_file = None): global weights_dict weights_dict = load_weights_from_file(weight_file) if not weight_file == None else None """
Example #30
Source File: rnn_feature.py From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License | 4 votes |
def RnnVersion1( n_recurrent=50, n_filters=30, dropout_rate=0.2, l2_penalty=0.0001,n_capsule = 10, n_routings = 5, capsule_dim = 16): K.clear_session() def conv_block(x, n, kernel_size): x = Conv1D(n, kernel_size, activation='relu') (x) x = Conv1D(n_filters, kernel_size, activation='relu') (x) x_att = AttentionWithContext()(x) x_avg = GlobalAveragePooling1D()(x) x_max = GlobalMaxPooling1D()(x) return concatenate([x_att, x_avg, x_max]) def att_max_avg_pooling(x): x_att = AttentionWithContext()(x) x_avg = GlobalAveragePooling1D()(x) x_max = GlobalMaxPooling1D()(x) return concatenate([x_att, x_avg, x_max]) inputs = Input(shape=(100,)) emb = Embedding(9399, 300, trainable=True)(inputs) # model 0 x0 = BatchNormalization()(emb) x0 = SpatialDropout1D(dropout_rate)(x0) x0 = Bidirectional( CuDNNGRU(n_recurrent, return_sequences=True, kernel_regularizer=l2(l2_penalty), recurrent_regularizer=l2(l2_penalty)))(x0) x0 = Conv1D(n_filters, kernel_size=3)(x0) x0 = PReLU()(x0) # x0 = Dropout(dropout_rate)(x0) x0 = att_max_avg_pooling(x0) # model 1 x1 = SpatialDropout1D(dropout_rate)(emb) x1 = Bidirectional( CuDNNGRU(2*n_recurrent, return_sequences=True, kernel_regularizer=l2(l2_penalty), recurrent_regularizer=l2(l2_penalty)))(x1) x1 = Conv1D(2*n_filters, kernel_size=2)(x1) x1 = PReLU()(x1) # x1 = Dropout(dropout_rate)(x1) x1 = att_max_avg_pooling(x1) x = concatenate([x0, x1],name='concatenate') fc = Dense(128, activation='relu')(x) outputs = Dense(6, activation='softmax')(fc)# , kernel_regularizer=l2(l2_penalty), activity_regularizer=l2(l2_penalty) model = Model(inputs=inputs, outputs=outputs) model.compile(loss='categorical_crossentropy', optimizer='Nadam',metrics =['accuracy']) return model