Python keras.layers.AveragePooling1D() Examples

The following are 19 code examples of keras.layers.AveragePooling1D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source File: models.py    From delft with Apache License 2.0 6 votes vote down vote up
def bidLstm_simple(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
    #inp = Input(shape=(maxlen, ))
    input_layer = Input(shape=(maxlen, embed_size), )
    #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
    x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
                           recurrent_dropout=dropout_rate))(input_layer)
    x = Dropout(dropout_rate)(x)
    x_a = GlobalMaxPool1D()(x)
    x_b = GlobalAveragePooling1D()(x)
    #x_c = AttentionWeightedAverage()(x)
    #x_a = MaxPooling1D(pool_size=2)(x)
    #x_b = AveragePooling1D(pool_size=2)(x)
    x = concatenate([x_a,x_b])
    x = Dense(dense_size, activation="relu")(x)
    x = Dropout(dropout_rate)(x)
    x = Dense(nb_classes, activation="sigmoid")(x)
    model = Model(inputs=input_layer, outputs=x)
    model.summary()
    model.compile(loss='binary_crossentropy', 
        optimizer='adam', 
        metrics=['accuracy'])
    return model


# bidirectional LSTM with attention layer 
Example #2
Source File: gatings.py    From NNCF with MIT License 6 votes vote down vote up
def get_contextual_spatial_gated_input(X, conf_dict):
    # X: input to be gated, (None, steps, x_dim)
    # return X' = X * sigmoid(Dense(Average(f(X)))), f is a non-linear function.
    assert len(X._keras_shape) == 3, [X._keras_shape]
    seq_len, x_dim = X._keras_shape[1], X._keras_shape[2]
    gating_hidden_dim = conf_dict['gating_hidden_dim']
    gating_hidden_actv = conf_dict['gating_hidden_actv']

    Xp = ReshapeBatchAdhoc()(X)
    Xp = Dense(gating_hidden_dim, activation=gating_hidden_actv)(Xp)
    #Xp = Lambda(lambda x: x * 0)(Xp)
    Xp = ReshapeBatchAdhoc(mid_dim=seq_len)(Xp)
    Xp = AveragePooling1D(seq_len)(Xp)  # (None, 1, x_dim)
    Xp = Reshape((Xp._keras_shape[-1], ))(Xp)
    Xp = Dense(x_dim, activation='sigmoid')(Xp)
    Xp = Reshape((1, x_dim))(Xp)
    X = DotMergeAdhoc()([X, Xp])
    return X 
Example #3
Source File: CPSC_model.py    From CPSC_Scheme with MIT License 5 votes vote down vote up
def __backbone(inp, C=0.001, initial='he_normal'):
        """
        # 用于信号片段特征学习的卷积层组合
        :param inp:  keras tensor, 单个信号切片输入
        :param C:   double, 正则化系数, 默认0.001
        :param initial:  str, 初始化方式, 默认he_normal
        :return: keras tensor, 单个信号切片经过卷积层后的输出
        """
        net = Conv1D(4, 31, padding='same', kernel_initializer=initial, kernel_regularizer=regularizers.l2(C))(inp)
        net = BatchNormalization()(net)
        net = Activation('relu')(net)
        net = AveragePooling1D(5, 5)(net)

        net = Conv1D(8, 11, padding='same', kernel_initializer=initial, kernel_regularizer=regularizers.l2(C))(net)
        net = BatchNormalization()(net)
        net = Activation('relu')(net)
        net = AveragePooling1D(5, 5)(net)

        net = Conv1D(8, 7, padding='same', kernel_initializer=initial, kernel_regularizer=regularizers.l2(C))(net)
        net = BatchNormalization()(net)
        net = Activation('relu')(net)
        net = AveragePooling1D(5, 5)(net)

        net = Conv1D(16, 5, padding='same', kernel_initializer=initial, kernel_regularizer=regularizers.l2(C))(net)
        net = BatchNormalization()(net)
        net = Activation('relu')(net)
        net = AveragePooling1D(int(net.shape[1]), int(net.shape[1]))(net)

        return net 
Example #4
Source File: models.py    From delft with Apache License 2.0 5 votes vote down vote up
def lstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
    #inp = Input(shape=(maxlen, ))
    input_layer = Input(shape=(maxlen, embed_size), )
    #x = Embedding(max_features, embed_size, weights=[embedding_matrix],
    #              trainable=False)(inp)
    x = LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
                           recurrent_dropout=dropout_rate)(input_layer)
    #x = CuDNNLSTM(recurrent_units, return_sequences=True)(x)
    x = Dropout(dropout_rate)(x)
    x_a = GlobalMaxPool1D()(x)
    x_b = GlobalAveragePooling1D()(x)
    #x_c = AttentionWeightedAverage()(x)
    #x_a = MaxPooling1D(pool_size=2)(x)
    #x_b = AveragePooling1D(pool_size=2)(x)
    x = concatenate([x_a,x_b])
    x = Dense(dense_size, activation="relu")(x)
    x = Dropout(dropout_rate)(x)
    x = Dense(nb_classes, activation="sigmoid")(x)
    model = Model(inputs=input_layer, outputs=x)
    model.summary()
    model.compile(loss='binary_crossentropy', 
                optimizer='adam', 
                metrics=['accuracy'])
    return model


# bidirectional LSTM 
Example #5
Source File: malwaresnet.py    From youarespecial with MIT License 5 votes vote down vote up
def ResidualBlock1D_helper(layers, kernel_size, filters, final_stride=1):
    def f(_input):
        basic = _input
        for ln in range(layers):
            #basic = BatchNormalization()( basic ) # triggers known keras bug w/ TimeDistributed: https://github.com/fchollet/keras/issues/5221
            basic = ELU()(basic)  
            basic = Conv1D(filters, kernel_size, kernel_initializer='he_normal',
                           kernel_regularizer=l2(1.e-4), padding='same')(basic)

        # note that this strides without averaging
        return AveragePooling1D(pool_size=1, strides=final_stride)(Add()([_input, basic]))

    return f 
Example #6
Source File: gatings.py    From NNCF with MIT License 5 votes vote down vote up
def get_contextual_temporal_gated_input(X, conf_dict):
    # X: input to be gated, (None, steps, x_dim)
    # return X' = X * c * softmax(X.Average(f(X))), f is a non-linear function.
    assert len(X._keras_shape) == 3, [X._keras_shape]
    seq_len, x_dim = X._keras_shape[1], X._keras_shape[2]
    gating_hidden_dim = conf_dict['gating_hidden_dim']
    gating_hidden_actv = conf_dict['gating_hidden_actv']
    scale = conf_dict['scale']
    nl_choice = conf_dict['nl_choice']

    Xp = ReshapeBatchAdhoc()(X)
    Xp = Dense(gating_hidden_dim, activation=gating_hidden_actv)(Xp)
    Xp = ReshapeBatchAdhoc(mid_dim=seq_len)(Xp)
    Xp = AveragePooling1D(seq_len)(Xp)  # (None, 1, x_dim)
    Xp = Reshape((Xp._keras_shape[-1], ))(Xp)
    if nl_choice == 'nl':
        Xp = Dense(x_dim, activation='relu', bias=True)(Xp)
    elif nl_choice == 'bn+nl':
        Xp = BatchNormalization()(Xp)
        Xp = Dense(x_dim, activation='relu', bias=True)(Xp)
    elif nl_choice == 'bn+l':
        Xp = BatchNormalization()(Xp)
        Xp = Dense(x_dim, activation='linear', bias=True)(Xp)
    else:
        assert False, 'nonononon'
    Xp = Reshape((1, x_dim))(Xp)  # (None, 1, x_dim)
    Xp = DotSumMergeAdhoc()([X, Xp])  # (None, steps, 1)
    if True:  # debug
        Xp = Activation('sigmoid')(Xp)  # (None, steps, 1)
    else:
        # following can be uncomment to replace sigmoid with softmax
        Xp = Reshape((Xp._keras_shape[1], ))(Xp)  # (None, steps)
        Xp = Activation('softmax')(Xp)  # (None, steps)
        Xp = Reshape((Xp._keras_shape[-1], 1))(Xp)  # (None, steps, 1)
    X = DotMergeAdhoc(scale=scale)([X, Xp]) # (None, steps, x_dim)
    return X 
Example #7
Source File: architecture_pooling.py    From temporalCNN with GNU General Public License v3.0 5 votes vote down vote up
def Archi_3CONV2AP_1FC256_GAP_f5_3_1fd(X, nbclasses):
	
	#-- get the input sizes
	m, L, depth = X.shape
	input_shape = (L,depth)
	
	#-- parameters of the architecture
	l2_rate = 1.e-6
	dropout_rate = 0.5
	nb_conv = 3
	nb_fc= 1
	nbunits_fc = 256 #-- will be double
	
	# Define the input placeholder.
	X_input = Input(input_shape)
		
	#-- nb_conv CONV layers
	X = conv_bn_relu(X_input, nbunits=512, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=768, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=1024, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same')
	X = Dropout(dropout_rate)(X)
	
	#-- Flatten + 	1 FC layers
	X = GlobalAveragePooling1D()(X)
	for add in range(nb_fc):	
		X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
		
	#-- SOFTMAX layer
	out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
		
	# Create model.
	return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_GAP_f5_3_1fd')	
	
#----------------------------------------------------------------------- 
Example #8
Source File: architecture_pooling.py    From temporalCNN with GNU General Public License v3.0 5 votes vote down vote up
def Archi_3CONV2AP_1FC256_GAP_f9_5_3fd(X, nbclasses):
	
	#-- get the input sizes
	m, L, depth = X.shape
	input_shape = (L,depth)
	
	#-- parameters of the architecture
	l2_rate = 1.e-6
	dropout_rate = 0.5
	nb_conv = 3
	nb_fc= 1
	nbunits_fc = 256 #-- will be double
	
	# Define the input placeholder.
	X_input = Input(input_shape)
		
	#-- nb_conv CONV layers
	X = conv_bn_relu(X_input, nbunits=512, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=512, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=512, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	
	#-- Flatten + 	1 FC layers
	X = GlobalAveragePooling1D()(X)
	for add in range(nb_fc):	
		X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
		
	#-- SOFTMAX layer
	out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
		
	# Create model.
	return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_GAP_f9_5_3fd')	
	
#----------------------------------------------------------------------- 
Example #9
Source File: architecture_pooling.py    From temporalCNN with GNU General Public License v3.0 5 votes vote down vote up
def Archi_3CONV2AP_1FC256_GAP_f17_9_5fd(X, nbclasses):
	
	#-- get the input sizes
	m, L, depth = X.shape
	input_shape = (L,depth)
	
	#-- parameters of the architecture
	l2_rate = 1.e-6
	dropout_rate = 0.5
	nb_conv = 3
	nb_fc= 1
	nbunits_fc = 256 #-- will be double
	
	# Define the input placeholder.
	X_input = Input(input_shape)
		
	#-- nb_conv CONV layers
	X = conv_bn_relu(X_input, nbunits=256, kernel_size=17, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=512, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=512, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	
	#-- Flatten + 	1 FC layers
	X = GlobalAveragePooling1D()(X)
	for add in range(nb_fc):	
		X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
		
	#-- SOFTMAX layer
	out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
		
	# Create model.
	return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_GAP_f17_9_5fd')	


#----------------------------------------------------------------------- 
Example #10
Source File: architecture_pooling.py    From temporalCNN with GNU General Public License v3.0 5 votes vote down vote up
def Archi_3CONV2AP_1FC256_f3_1_1fd(X, nbclasses):
	
	#-- get the input sizes
	m, L, depth = X.shape
	input_shape = (L,depth)
	
	#-- parameters of the architecture
	l2_rate = 1.e-6
	dropout_rate = 0.5
	nb_conv = 3
	nb_fc= 1
	nbunits_conv = 128 #-- will be double
	nbunits_fc = 256 #-- will be double
	
	# Define the input placeholder.
	X_input = Input(input_shape)
		
	#-- nb_conv CONV layers
	X = conv_bn_relu(X_input, nbunits=nbunits_conv, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=nbunits_conv, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same')
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=nbunits_conv, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same')
	X = Dropout(dropout_rate)(X)
	
	#-- Flatten + 	1 FC layers
	X = Flatten()(X)
	for add in range(nb_fc):	
		X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
		
	#-- SOFTMAX layer
	out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
		
	# Create model.
	return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f3_1_1fd')	
	

#----------------------------------------------------------------------- 
Example #11
Source File: architecture_pooling.py    From temporalCNN with GNU General Public License v3.0 5 votes vote down vote up
def Archi_3CONV2AP_1FC256_f5_3_1fd(X, nbclasses):
	
	#-- get the input sizes
	m, L, depth = X.shape
	input_shape = (L,depth)
	
	#-- parameters of the architecture
	l2_rate = 1.e-6
	dropout_rate = 0.5
	nb_conv = 3
	nb_fc= 1
	nbunits_conv = 128 #-- will be double
	nbunits_fc = 256 #-- will be double
	
	# Define the input placeholder.
	X_input = Input(input_shape)
		
	#-- nb_conv CONV layers
	X = conv_bn_relu(X_input, nbunits=nbunits_conv, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=nbunits_conv*2**1, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=nbunits_conv*2**1, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same')
	#~ X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	
	#-- Flatten + 	1 FC layers
	X = Flatten()(X)
	for add in range(nb_fc):	
		X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
		
	#-- SOFTMAX layer
	out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
		
	# Create model.
	return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f5_3_1fd')	
	
#----------------------------------------------------------------------- 
Example #12
Source File: architecture_pooling.py    From temporalCNN with GNU General Public License v3.0 5 votes vote down vote up
def Archi_3CONV2AP_1FC256_f9_5_3fd(X, nbclasses):
	
	#-- get the input sizes
	m, L, depth = X.shape
	input_shape = (L,depth)
	
	#-- parameters of the architecture
	l2_rate = 1.e-6
	dropout_rate = 0.5
	nb_conv = 3
	nb_fc= 1
	nbunits_conv = 128 #-- will be double
	nbunits_fc = 256 #-- will be double
	
	# Define the input placeholder.
	X_input = Input(input_shape)
		
	#-- nb_conv CONV layers
	X = conv_bn_relu(X_input, nbunits=nbunits_conv, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=nbunits_conv*2**1, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=nbunits_conv*2**2, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	
	#-- Flatten + 	1 FC layers
	X = Flatten()(X)
	for add in range(nb_fc):	
		X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
		
	#-- SOFTMAX layer
	out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
		
	# Create model.
	return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f9_5_3fd')	


#----------------------------------------------------------------------- 
Example #13
Source File: architecture_pooling.py    From temporalCNN with GNU General Public License v3.0 5 votes vote down vote up
def Archi_3CONV2AP_1FC256_f17_9_5fd(X, nbclasses):
	
	#-- get the input sizes
	m, L, depth = X.shape
	input_shape = (L,depth)
	
	#-- parameters of the architecture
	l2_rate = 1.e-6
	dropout_rate = 0.5
	nb_conv = 3
	nb_fc= 1
	nbunits_fc = 256 #-- will be double
	
	# Define the input placeholder.
	X_input = Input(input_shape)
		
	#-- nb_conv CONV layers
	X = conv_bn_relu(X_input, nbunits=128, kernel_size=17, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=256, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=384, kernel_size=5, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	
	#-- Flatten + 	1 FC layers
	X = Flatten()(X)
	for add in range(nb_fc):	
		X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
		
	#-- SOFTMAX layer
	out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
		
	# Create model.
	return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f17_9_5fd')	


#----------------------------------------------------------------------- 
Example #14
Source File: architecture_pooling.py    From temporalCNN with GNU General Public License v3.0 5 votes vote down vote up
def Archi_3CONV2AP_1FC256_f33_17_9fd(X, nbclasses):
	
	#-- get the input sizes
	m, L, depth = X.shape
	input_shape = (L,depth)
	
	#-- parameters of the architecture
	l2_rate = 1.e-6
	dropout_rate = 0.5
	nb_conv = 3
	nb_fc= 1
	nbunits_fc = 256 #-- will be double
	
	# Define the input placeholder.
	X_input = Input(input_shape)
		
	#-- nb_conv CONV layers
	X = conv_bn_relu(X_input, nbunits=128, kernel_size=33, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=192, kernel_size=17, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=256, kernel_size=9, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	
	#-- Flatten + 	1 FC layers
	X = Flatten()(X)
	for add in range(nb_fc):	
		X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
		
	#-- SOFTMAX layer
	out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
		
	# Create model.
	return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_f33_17_9fd')	


#----------------------------------------------------------------------- 
Example #15
Source File: models.py    From delft with Apache License 2.0 5 votes vote down vote up
def gru_best(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
    #input_layer = Input(shape=(maxlen,))
    input_layer = Input(shape=(maxlen, embed_size), )
    #embedding_layer = Embedding(max_features, embed_size,
    #                            weights=[embedding_matrix], trainable=False)(input_layer)
    x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
                           recurrent_dropout=dropout_rate))(input_layer)
    x = Dropout(dropout_rate)(x)
    x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
                           recurrent_dropout=dropout_rate))(x)
    #x = AttentionWeightedAverage(maxlen)(x)
    x_a = GlobalMaxPool1D()(x)
    x_b = GlobalAveragePooling1D()(x)
    #x_c = AttentionWeightedAverage()(x)
    #x_a = MaxPooling1D(pool_size=2)(x)
    #x_b = AveragePooling1D(pool_size=2)(x)
    x = concatenate([x_a,x_b], axis=1)
    #x = Dense(dense_size, activation="relu")(x)
    #x = Dropout(dropout_rate)(x)
    x = Dense(dense_size, activation="relu")(x)
    output_layer = Dense(nb_classes, activation="sigmoid")(x)

    model = Model(inputs=input_layer, outputs=output_layer)
    model.summary()
    model.compile(loss='binary_crossentropy',
                  #optimizer=RMSprop(clipvalue=1, clipnorm=1),
                  optimizer='adam',
                  metrics=['accuracy'])
    return model


# 1 layer bid GRU 
Example #16
Source File: models.py    From delft with Apache License 2.0 5 votes vote down vote up
def gru(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
    #input_layer = Input(shape=(maxlen,))
    input_layer = Input(shape=(maxlen, embed_size), )
    #embedding_layer = Embedding(max_features, embed_size, 
    #                            weights=[embedding_matrix], trainable=False)(input_layer)
    x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
                           recurrent_dropout=recurrent_dropout_rate))(input_layer)
    x = Dropout(dropout_rate)(x)
    x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
                           recurrent_dropout=recurrent_dropout_rate))(x)
    #x = AttentionWeightedAverage(maxlen)(x)
    x_a = GlobalMaxPool1D()(x)
    x_b = GlobalAveragePooling1D()(x)
    #x_c = AttentionWeightedAverage()(x)
    #x_a = MaxPooling1D(pool_size=2)(x)
    #x_b = AveragePooling1D(pool_size=2)(x)
    x = concatenate([x_a,x_b], axis=1)
    #x = Dense(dense_size, activation="relu")(x)
    #x = Dropout(dropout_rate)(x)
    x = Dense(dense_size, activation="relu")(x)
    output_layer = Dense(nb_classes, activation="sigmoid")(x)

    model = Model(inputs=input_layer, outputs=output_layer)
    model.summary()
    model.compile(loss='binary_crossentropy',
                  optimizer=RMSprop(clipvalue=1, clipnorm=1),
                  #optimizer='adam',
                  metrics=['accuracy'])
    return model 
Example #17
Source File: models.py    From delft with Apache License 2.0 5 votes vote down vote up
def cnn3(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
    #inp = Input(shape=(maxlen, ))
    input_layer = Input(shape=(maxlen, embed_size), )
    #x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
    x = GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
                           recurrent_dropout=dropout_rate)(input_layer)
    #x = Dropout(dropout_rate)(x) 

    x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
    x = MaxPooling1D(pool_size=2)(x)
    x_a = GlobalMaxPool1D()(x)
    x_b = GlobalAveragePooling1D()(x)
    #x_c = AttentionWeightedAverage()(x)
    #x_a = MaxPooling1D(pool_size=2)(x)
    #x_b = AveragePooling1D(pool_size=2)(x)
    x = concatenate([x_a,x_b])
    #x = Dropout(dropout_rate)(x)
    x = Dense(dense_size, activation="relu")(x)
    x = Dense(nb_classes, activation="sigmoid")(x)
    model = Model(inputs=input_layer, outputs=x)
    model.summary()  
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model 
Example #18
Source File: architecture_pooling.py    From temporalCNN with GNU General Public License v3.0 4 votes vote down vote up
def Archi_3CONV2AP_1FC256_GAP_f3_1_1fd(X, nbclasses):
	
	#-- get the input sizes
	m, L, depth = X.shape
	input_shape = (L,depth)
	
	#-- parameters of the architecture
	l2_rate = 1.e-6
	dropout_rate = 0.5
	nb_conv = 3
	nb_fc= 1
	nbunits_fc = 256 #-- will be double
	
	# Define the input placeholder.
	X_input = Input(input_shape)
		
	#-- nb_conv CONV layers
	X = conv_bn_relu(X_input, nbunits=768, kernel_size=3, kernel_regularizer=l2(l2_rate), padding='same')
	X = AveragePooling1D(pool_size=2, strides=2, padding='valid')(X)
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=1024, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same')
	X = Dropout(dropout_rate)(X)
	X = conv_bn_relu(X, nbunits=1024, kernel_size=1, kernel_regularizer=l2(l2_rate), padding='same')
	X = Dropout(dropout_rate)(X)
	
	#-- Flatten + 	1 FC layers
	X = GlobalAveragePooling1D()(X)
	for add in range(nb_fc):	
		X = fc_bn_relu_drop(X, nbunits=nbunits_fc, kernel_regularizer=l2(l2_rate), dropout_rate=dropout_rate)
		
	#-- SOFTMAX layer
	out = softmax(X, nbclasses, kernel_regularizer=l2(l2_rate))
		
	# Create model.
	return Model(inputs = X_input, outputs = out, name='Archi_3CONV2AP_1FC256_GAP_f3_1_1fd')		
	
	

#-----------------------------------------------------------------------		
#-----------------------------------------------------------------------

#--------------------- Switcher for running the architectures 
Example #19
Source File: layers_export.py    From Fabrik with GNU General Public License v3.0 4 votes vote down vote up
def pooling(layer, layer_in, layerId, tensor=True):
    poolMap = {
        ('1D', 'MAX'): MaxPooling1D,
        ('2D', 'MAX'): MaxPooling2D,
        ('3D', 'MAX'): MaxPooling3D,
        ('1D', 'AVE'): AveragePooling1D,
        ('2D', 'AVE'): AveragePooling2D,
        ('3D', 'AVE'): AveragePooling3D,
    }
    out = {}
    layer_type = layer['params']['layer_type']
    pool_type = layer['params']['pool']
    padding = get_padding(layer)
    if (layer_type == '1D'):
        strides = layer['params']['stride_w']
        kernel = layer['params']['kernel_w']
        if (padding == 'custom'):
            p_w = layer['params']['pad_w']
            out[layerId + 'Pad'] = ZeroPadding1D(padding=p_w)(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    elif (layer_type == '2D'):
        strides = (layer['params']['stride_h'], layer['params']['stride_w'])
        kernel = (layer['params']['kernel_h'], layer['params']['kernel_w'])
        if (padding == 'custom'):
            p_h, p_w = layer['params']['pad_h'], layer['params']['pad_w']
            out[layerId + 'Pad'] = ZeroPadding2D(padding=(p_h, p_w))(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    else:
        strides = (layer['params']['stride_h'], layer['params']['stride_w'],
                   layer['params']['stride_d'])
        kernel = (layer['params']['kernel_h'], layer['params']['kernel_w'],
                  layer['params']['kernel_d'])
        if (padding == 'custom'):
            p_h, p_w, p_d = layer['params']['pad_h'], layer['params']['pad_w'],\
                layer['params']['pad_d']
            out[layerId +
                'Pad'] = ZeroPadding3D(padding=(p_h, p_w, p_d))(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    # Note - figure out a permanent fix for padding calculation of layers
    # in case padding is given in layer attributes
    # if ('padding' in layer['params']):
    #    padding = layer['params']['padding']
    out[layerId] = poolMap[(layer_type, pool_type)](
        pool_size=kernel, strides=strides, padding=padding)
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out


# ********** Locally-connected Layers **********