Python keras.layers.convolutional.Conv1D() Examples

The following are 26 code examples of keras.layers.convolutional.Conv1D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers.convolutional , or try the search function .
Example #1
Source File: models.py    From very-deep-convnets-raw-waveforms with Apache License 2.0 6 votes vote down vote up
def m_rec(num_classes=10):
    from keras.layers.recurrent import LSTM
    print('Using Model LSTM 1')
    m = Sequential()
    m.add(Conv1D(64,
                 input_shape=[AUDIO_LENGTH, 1],
                 kernel_size=80,
                 strides=4,
                 padding='same',
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=regularizers.l2(l=0.0001)))
    m.add(BatchNormalization())
    m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))
    m.add(LSTM(32,
               kernel_regularizer=regularizers.l2(l=0.0001),
               return_sequences=True,
               dropout=0.2))
    m.add(LSTM(32,
               kernel_regularizer=regularizers.l2(l=0.0001),
               return_sequences=False,
               dropout=0.2))
    m.add(Dense(32))
    m.add(Dense(num_classes, activation='softmax'))
    return m 
Example #2
Source File: rnn_text.py    From EventForecast with GNU Lesser General Public License v3.0 5 votes vote down vote up
def model_cnn(vocab, weights, dataPath, batchn, epoch):
    global LEN
    global DIM
    global BATCH
    testx, testy = build_dataset('%s%d'%(dataPath, 2528), vocab, weights=weights)
    testx = np.array(testx, dtype=np.float64)
    testy = np.array(testx, dtype=np.float64)
    model = Sequential()
    #model.add(Embedding(400001, 50, input_length=LEN, mask_zero=False,weights=[embedModel]))
    model.add(Conv1D(input_shape=(LEN, DIM), filters=32, kernel_size=30, padding='same', activation='relu'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Flatten())
    model.add(Dense(250, activation='softmax'))
    model.add(Dense(1, activation='softmax'))
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    print(model.summary())
    index = 0
    while True:
        data, result = build_dataset('%s%d'%(dataPath, index%2528), vocab, weights)
        for i in range(1, batchn):
            index += 1
            newData, newResult = build_dataset('%s%d'%(dataPath, index), vocab, weights)
            data.extend(newData)
            result.extend(newResult)
        model.fit(np.array(data, dtype=np.float64), np.array(result, dtype=np.float64), epochs=10, batch_size=BATCH, verbose=2, validation_data = (testx,testy))
        model.save('hotnews_c_%d_%d.h5'%(BATCH, index))
        predict = model.predict(testx)
        for i in range(testy.shape[0]):
            print(testy[i], predict[i])
        index += 1
        if index > epoch:
            return model 
Example #3
Source File: multiclass.py    From intent_classifier with Apache License 2.0 5 votes vote down vote up
def cnn_model(self, params):
        """
        Method builds uncompiled intent_model of shallow-and-wide CNN
        Args:
            params: disctionary of parameters for NN

        Returns:
            Uncompiled intent_model
        """
        if type(self.opt['kernel_sizes_cnn']) is str:
            self.opt['kernel_sizes_cnn'] = [int(x) for x in
                                            self.opt['kernel_sizes_cnn'].split(' ')]

        inp = Input(shape=(params['text_size'], params['embedding_size']))

        outputs = []
        for i in range(len(params['kernel_sizes_cnn'])):
            output_i = Conv1D(params['filters_cnn'], kernel_size=params['kernel_sizes_cnn'][i],
                              activation=None,
                              kernel_regularizer=l2(params['coef_reg_cnn']),
                              padding='same')(inp)
            output_i = BatchNormalization()(output_i)
            output_i = Activation('relu')(output_i)
            output_i = GlobalMaxPooling1D()(output_i)
            outputs.append(output_i)

        output = concatenate(outputs, axis=1)

        output = Dropout(rate=params['dropout_rate'])(output)
        output = Dense(params['dense_size'], activation=None,
                       kernel_regularizer=l2(params['coef_reg_den']))(output)
        output = BatchNormalization()(output)
        output = Activation('relu')(output)
        output = Dropout(rate=params['dropout_rate'])(output)
        output = Dense(self.n_classes, activation=None,
                       kernel_regularizer=l2(params['coef_reg_den']))(output)
        output = BatchNormalization()(output)
        act_output = Activation('sigmoid')(output)
        model = Model(inputs=inp, outputs=act_output)
        return model 
Example #4
Source File: models.py    From very-deep-convnets-raw-waveforms with Apache License 2.0 5 votes vote down vote up
def m5(num_classes=10):
    print('Using Model M5')
    m = Sequential()
    m.add(Conv1D(128,
                 input_shape=[AUDIO_LENGTH, 1],
                 kernel_size=80,
                 strides=4,
                 padding='same',
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=regularizers.l2(l=0.0001)))
    m.add(BatchNormalization())
    m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))
    m.add(Conv1D(128,
                 kernel_size=3,
                 strides=1,
                 padding='same',
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=regularizers.l2(l=0.0001)))
    m.add(BatchNormalization())
    m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))
    m.add(Conv1D(256,
                 kernel_size=3,
                 strides=1,
                 padding='same',
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=regularizers.l2(l=0.0001)))
    m.add(BatchNormalization())
    m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))
    m.add(Conv1D(512,
                 kernel_size=3,
                 strides=1,
                 padding='same',
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=regularizers.l2(l=0.0001)))
    m.add(BatchNormalization())
    m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))
    m.add(Lambda(lambda x: K.mean(x, axis=1))) # Same as GAP for 1D Conv Layer
    m.add(Dense(num_classes, activation='softmax'))
    return m 
Example #5
Source File: models.py    From very-deep-convnets-raw-waveforms with Apache License 2.0 5 votes vote down vote up
def m3(num_classes=10):
    print('Using Model M3')
    m = Sequential()
    m.add(Conv1D(256,
                 input_shape=[AUDIO_LENGTH, 1],
                 kernel_size=80,
                 strides=4,
                 padding='same',
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=regularizers.l2(l=0.0001)))
    m.add(BatchNormalization())
    m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))
    m.add(Conv1D(256,
                 kernel_size=3,
                 strides=1,
                 padding='same',
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=regularizers.l2(l=0.0001)))
    m.add(BatchNormalization())
    m.add(Activation('relu'))

    m.add(MaxPooling1D(pool_size=4, strides=None))
    m.add(Lambda(lambda x: K.mean(x, axis=1))) # Same as GAP for 1D Conv Layer
    m.add(Dense(num_classes, activation='softmax'))
    return m 
Example #6
Source File: cnn.py    From keras-english-resume-parser-and-analyzer with MIT License 5 votes vote down vote up
def define_model(self, length, vocab_size):

        embedding_size = 100
        cnn_filter_size = 32

        inputs1 = Input(shape=(length,))
        embedding1 = Embedding(vocab_size, embedding_size)(inputs1)
        conv1 = Conv1D(filters=cnn_filter_size, kernel_size=4, activation='relu')(
            embedding1)
        drop1 = Dropout(0.5)(conv1)
        pool1 = MaxPooling1D(pool_size=2)(drop1)
        flat1 = Flatten()(pool1)

        inputs2 = Input(shape=(length,))
        embedding2 = Embedding(vocab_size, embedding_size)(inputs2)
        conv2 = Conv1D(filters=cnn_filter_size, kernel_size=6, activation='relu')(
            embedding2)
        drop2 = Dropout(0.5)(conv2)
        pool2 = MaxPooling1D(pool_size=2)(drop2)
        flat2 = Flatten()(pool2)

        inputs3 = Input(shape=(length,))
        embedding3 = Embedding(vocab_size, embedding_size)(inputs3)
        conv3 = Conv1D(filters=cnn_filter_size, kernel_size=8, activation='relu')(
            embedding3)
        drop3 = Dropout(0.5)(conv3)
        pool3 = MaxPooling1D(pool_size=2)(drop3)
        flat3 = Flatten()(pool3)

        merged = concatenate([flat1, flat2, flat3])
        # interpretation
        dense1 = Dense(10, activation='relu')(merged)

        outputs = Dense(units=len(self.labels), activation='softmax')(dense1)

        model = Model(inputs=[inputs1, inputs2, inputs3], outputs=outputs)
        # compile
        model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        # summarize
        print(model.summary())
        return model 
Example #7
Source File: cnn.py    From keras-english-resume-parser-and-analyzer with MIT License 5 votes vote down vote up
def create_model(self):
        embedding_size = 100
        self.model = Sequential()
        self.model.add(Embedding(input_dim=self.vocab_size, input_length=self.max_len, output_dim=embedding_size))
        self.model.add(SpatialDropout1D(0.2))
        self.model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu'))
        self.model.add(GlobalMaxPool1D())
        self.model.add(Dense(units=len(self.labels), activation='softmax'))

        self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) 
Example #8
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_causal_dilated_conv():
    # Causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[0], [1], [3], [5]]]
               )

    # Non-causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'valid',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[1], [3], [5]]]
               )

    # Causal dilated with larger kernel size:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(10, dtype='float32'), (1, 10, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 3,
                   'dilation_rate': 2,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=np.float32([[[0], [1], [2], [4], [6], [9], [12], [15], [18], [21]]])
               ) 
Example #9
Source File: multiclass.py    From intent_classifier with Apache License 2.0 4 votes vote down vote up
def dcnn_model(self, params):
        """
        Method builds uncompiled intent_model of deep CNN
        Args:
            params: disctionary of parameters for NN

        Returns:
            Uncompiled intent_model
        """
        if type(self.opt['kernel_sizes_cnn']) is str:
            self.opt['kernel_sizes_cnn'] = [int(x) for x in
                                            self.opt['kernel_sizes_cnn'].split(' ')]

        if type(self.opt['filters_cnn']) is str:
            self.opt['filters_cnn'] = [int(x) for x in
                                       self.opt['filters_cnn'].split(' ')]

        inp = Input(shape=(params['text_size'], params['embedding_size']))

        output = inp

        for i in range(len(params['kernel_sizes_cnn'])):
            output = Conv1D(params['filters_cnn'][i], kernel_size=params['kernel_sizes_cnn'][i],
                            activation=None,
                            kernel_regularizer=l2(params['coef_reg_cnn']),
                            padding='same')(output)
            output = BatchNormalization()(output)
            output = Activation('relu')(output)
            output = MaxPooling1D()(output)

        output = GlobalMaxPooling1D()(output)
        output = Dropout(rate=params['dropout_rate'])(output)
        output = Dense(params['dense_size'], activation=None,
                       kernel_regularizer=l2(params['coef_reg_den']))(output)
        output = BatchNormalization()(output)
        output = Activation('relu')(output)
        output = Dropout(rate=params['dropout_rate'])(output)
        output = Dense(self.n_classes, activation=None,
                       kernel_regularizer=l2(params['coef_reg_den']))(output)
        output = BatchNormalization()(output)
        act_output = Activation('sigmoid')(output)
        model = Model(inputs=inp, outputs=act_output)
        return model 
Example #10
Source File: kerasClassify.py    From emailinsight with MIT License 4 votes vote down vote up
def evaluate_conv_model(dataset, num_classes, maxlen=125,embedding_dims=250,max_features=5000,nb_filter=300,filter_length=3,num_hidden=250,dropout=0.25,verbose=True,pool_length=2,with_lstm=False):
    (X_train, Y_train), (X_test, Y_test) = dataset
    
    batch_size = 32
    nb_epoch = 7

    if verbose:
        print('Loading data...')
        print(len(X_train), 'train sequences')
        print(len(X_test), 'test sequences')
        print('Pad sequences (samples x time)')
    
    X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
    X_test = sequence.pad_sequences(X_test, maxlen=maxlen)

    if verbose:
        print('X_train shape:', X_train.shape)
        print('X_test shape:', X_test.shape)
        print('Build model...')

    model = Sequential()
    # we start off with an efficient embedding layer which maps
    # our vocab indices into embedding_dims dimensions
    model.add(Embedding(max_features, embedding_dims, input_length=maxlen))
    model.add(Dropout(dropout))

    # we add a Convolution1D, which will learn nb_filter
    # word group filters of size filter_length:
    model.add(Conv1D(activation="relu", filters=nb_filter, kernel_size=filter_length, strides=1, padding="valid"))
    if pool_length:
        # we use standard max pooling (halving the output of the previous layer):
        model.add(MaxPooling1D(pool_size=2))
    if with_lstm:
        model.add(LSTM(125))
    else:
        # We flatten the output of the conv layer,
        # so that we can add a vanilla dense layer:
        model.add(Flatten())

        #We add a vanilla hidden layer:
        model.add(Dense(num_hidden))
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

    # We project onto a single unit output layer, and squash it with a sigmoid:
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy',optimizer='adam',  metrics=['accuracy'])
    model.fit(X_train, Y_train, batch_size=batch_size,epochs=nb_epoch, validation_split=0.1)
    score = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=1 if verbose else 0)
    if verbose:
        print('Test score:',score[0])
        print('Test accuracy:', score[1])
    predictions = model.predict_classes(X_test,verbose=1 if verbose else 0)
    return predictions,score[1] 
Example #11
Source File: models.py    From very-deep-convnets-raw-waveforms with Apache License 2.0 4 votes vote down vote up
def m18(num_classes=10):
    print('Using Model M18')
    m = Sequential()
    m.add(Conv1D(64,
                 input_shape=[AUDIO_LENGTH, 1],
                 kernel_size=80,
                 strides=4,
                 padding='same',
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=regularizers.l2(l=0.0001)))
    m.add(BatchNormalization())
    m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))

    for i in range(4):
        m.add(Conv1D(64,
                     kernel_size=3,
                     strides=1,
                     padding='same',
                     kernel_initializer='glorot_uniform',
                     kernel_regularizer=regularizers.l2(l=0.0001)))
        m.add(BatchNormalization())
        m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))

    for i in range(4):
        m.add(Conv1D(128,
                     kernel_size=3,
                     strides=1,
                     padding='same',
                     kernel_initializer='glorot_uniform',
                     kernel_regularizer=regularizers.l2(l=0.0001)))
        m.add(BatchNormalization())
        m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))

    for i in range(4):
        m.add(Conv1D(256,
                     kernel_size=3,
                     strides=1,
                     padding='same',
                     kernel_initializer='glorot_uniform',
                     kernel_regularizer=regularizers.l2(l=0.0001)))
        m.add(BatchNormalization())
        m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))

    for i in range(4):
        m.add(Conv1D(512,
                     kernel_size=3,
                     strides=1,
                     padding='same',
                     kernel_initializer='glorot_uniform',
                     kernel_regularizer=regularizers.l2(l=0.0001)))
        m.add(BatchNormalization())
        m.add(Activation('relu'))

    m.add(Lambda(lambda x: K.mean(x, axis=1))) # Same as GAP for 1D Conv Layer
    m.add(Dense(num_classes, activation='softmax'))
    return m 
Example #12
Source File: models.py    From very-deep-convnets-raw-waveforms with Apache License 2.0 4 votes vote down vote up
def m11(num_classes=10):
    print('Using Model M11')
    m = Sequential()
    m.add(Conv1D(64,
                 input_shape=[AUDIO_LENGTH, 1],
                 kernel_size=80,
                 strides=4,
                 padding='same',
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=regularizers.l2(l=0.0001)))
    m.add(BatchNormalization())
    m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))

    for i in range(2):
        m.add(Conv1D(64,
                     kernel_size=3,
                     strides=1,
                     padding='same',
                     kernel_initializer='glorot_uniform',
                     kernel_regularizer=regularizers.l2(l=0.0001)))
        m.add(BatchNormalization())
        m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))

    for i in range(2):
        m.add(Conv1D(128,
                     kernel_size=3,
                     strides=1,
                     padding='same',
                     kernel_initializer='glorot_uniform',
                     kernel_regularizer=regularizers.l2(l=0.0001)))
        m.add(BatchNormalization())
        m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))

    for i in range(3):
        m.add(Conv1D(256,
                     kernel_size=3,
                     strides=1,
                     padding='same',
                     kernel_initializer='glorot_uniform',
                     kernel_regularizer=regularizers.l2(l=0.0001)))
        m.add(BatchNormalization())
        m.add(Activation('relu'))
    m.add(MaxPooling1D(pool_size=4, strides=None))

    for i in range(2):
        m.add(Conv1D(512,
                     kernel_size=3,
                     strides=1,
                     padding='same',
                     kernel_initializer='glorot_uniform',
                     kernel_regularizer=regularizers.l2(l=0.0001)))
        m.add(BatchNormalization())
        m.add(Activation('relu'))

    m.add(Lambda(lambda x: K.mean(x, axis=1))) # Same as GAP for 1D Conv Layer
    m.add(Dense(num_classes, activation='softmax'))
    return m 
Example #13
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_conv_1d():
    batch_size = 2
    steps = 8
    input_dim = 2
    kernel_size = 3
    filters = 3

    for padding in _convolution_paddings:
        for strides in [1, 2]:
            if padding == 'same' and strides != 1:
                continue
            layer_test(convolutional.Conv1D,
                       kwargs={'filters': filters,
                               'kernel_size': kernel_size,
                               'padding': padding,
                               'strides': strides},
                       input_shape=(batch_size, steps, input_dim))

            layer_test(convolutional.Conv1D,
                       kwargs={'filters': filters,
                               'kernel_size': kernel_size,
                               'padding': padding,
                               'kernel_regularizer': 'l2',
                               'bias_regularizer': 'l2',
                               'activity_regularizer': 'l2',
                               'kernel_constraint': 'max_norm',
                               'bias_constraint': 'max_norm',
                               'strides': strides},
                       input_shape=(batch_size, steps, input_dim))

    # Test dilation
    layer_test(convolutional.Conv1D,
               kwargs={'filters': filters,
                       'kernel_size': kernel_size,
                       'padding': padding,
                       'dilation_rate': 2,
                       'activation': None},
               input_shape=(batch_size, steps, input_dim))

    convolutional.Conv1D(filters=filters,
                         kernel_size=kernel_size,
                         padding=padding,
                         input_shape=(input_dim,)) 
Example #14
Source File: Unet.py    From ECG_UNet with MIT License 4 votes vote down vote up
def Unet(nClasses, optimizer=None, input_length=1800, nChannels=1):
    inputs = Input((input_length, nChannels))
    conv1 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
    conv1 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
    pool1 = MaxPooling1D(pool_size=2)(conv1)

    conv2 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
    pool2 = MaxPooling1D(pool_size=2)(conv2)
    
    conv3 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
    conv3 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
    pool3 = MaxPooling1D(pool_size=2)(conv3)

    conv4 = Conv1D(128, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
    conv4 = Dropout(0.5)(conv4)
    conv4 = Conv1D(128, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)

    up1 = Conv1D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling1D(size=2)(conv4))
    merge1 = concatenate([up1, conv3], axis=-1)
    conv5 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(merge1)
    conv5 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
    
    up2 = Conv1D(32, 2, activation='relu', padding='same', kernel_initializer = 'he_normal')(UpSampling1D(size=2)(conv5))
    merge2 = concatenate([up2, conv2], axis=-1)
    conv6 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer = 'he_normal')(merge2)
    conv6 = Dropout(0.2)(conv6)
    conv6 = Conv1D(32, 32, activation='relu', padding='same')(conv6)
    
    up3 = Conv1D(16, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling1D(size=2)(conv6))
    merge3 = concatenate([up3, conv1], axis=-1)
    conv7 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(merge3)
    conv7 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
    
    conv8 = Conv1D(nClasses, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
    conv8 = core.Reshape((nClasses, input_length))(conv8)
    conv8 = core.Permute((2, 1))(conv8)

    conv9 = core.Activation('softmax')(conv8)

    model = Model(inputs=inputs, outputs=conv9)
    if not optimizer is None:
        model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=['accuracy'])

    return model 
Example #15
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_conv_1d():
    batch_size = 2
    steps = 8
    input_dim = 2
    kernel_size = 3
    filters = 3

    for padding in _convolution_paddings:
        for strides in [1, 2]:
            if padding == 'same' and strides != 1:
                continue
            layer_test(convolutional.Conv1D,
                       kwargs={'filters': filters,
                               'kernel_size': kernel_size,
                               'padding': padding,
                               'strides': strides},
                       input_shape=(batch_size, steps, input_dim))

            layer_test(convolutional.Conv1D,
                       kwargs={'filters': filters,
                               'kernel_size': kernel_size,
                               'padding': padding,
                               'kernel_regularizer': 'l2',
                               'bias_regularizer': 'l2',
                               'activity_regularizer': 'l2',
                               'kernel_constraint': 'max_norm',
                               'bias_constraint': 'max_norm',
                               'strides': strides},
                       input_shape=(batch_size, steps, input_dim))

    # Test dilation
    layer_test(convolutional.Conv1D,
               kwargs={'filters': filters,
                       'kernel_size': kernel_size,
                       'padding': padding,
                       'dilation_rate': 2,
                       'activation': None},
               input_shape=(batch_size, steps, input_dim))

    convolutional.Conv1D(filters=filters,
                         kernel_size=kernel_size,
                         padding=padding,
                         input_shape=(input_dim,)) 
Example #16
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_causal_dilated_conv():
    # Causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[0], [1], [3], [5]]]
               )

    # Non-causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'valid',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[1], [3], [5]]]
               )

    # Causal dilated with larger kernel size:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(10, dtype='float32'), (1, 10, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 3,
                   'dilation_rate': 2,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=np.float32([[[0], [1], [2], [4], [6], [9], [12], [15], [18], [21]]])
               ) 
Example #17
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_causal_dilated_conv():
    # Causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[0], [1], [3], [5]]]
               )

    # Non-causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'valid',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[1], [3], [5]]]
               )

    # Causal dilated with larger kernel size:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(10, dtype='float32'), (1, 10, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 3,
                   'dilation_rate': 2,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=np.float32([[[0], [1], [2], [4], [6], [9], [12], [15], [18], [21]]])
               ) 
Example #18
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_conv_1d():
    batch_size = 2
    steps = 8
    input_dim = 2
    kernel_size = 3
    filters = 3

    for padding in _convolution_paddings:
        for strides in [1, 2]:
            if padding == 'same' and strides != 1:
                continue
            layer_test(convolutional.Conv1D,
                       kwargs={'filters': filters,
                               'kernel_size': kernel_size,
                               'padding': padding,
                               'strides': strides},
                       input_shape=(batch_size, steps, input_dim))

            layer_test(convolutional.Conv1D,
                       kwargs={'filters': filters,
                               'kernel_size': kernel_size,
                               'padding': padding,
                               'kernel_regularizer': 'l2',
                               'bias_regularizer': 'l2',
                               'activity_regularizer': 'l2',
                               'kernel_constraint': 'max_norm',
                               'bias_constraint': 'max_norm',
                               'strides': strides},
                       input_shape=(batch_size, steps, input_dim))

    # Test dilation
    layer_test(convolutional.Conv1D,
               kwargs={'filters': filters,
                       'kernel_size': kernel_size,
                       'padding': padding,
                       'dilation_rate': 2,
                       'activation': None},
               input_shape=(batch_size, steps, input_dim))

    convolutional.Conv1D(filters=filters,
                         kernel_size=kernel_size,
                         padding=padding,
                         input_shape=(input_dim,)) 
Example #19
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_causal_dilated_conv():
    # Causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[0], [1], [3], [5]]]
               )

    # Non-causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'valid',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[1], [3], [5]]]
               )

    # Causal dilated with larger kernel size:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(10, dtype='float32'), (1, 10, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 3,
                   'dilation_rate': 2,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=np.float32([[[0], [1], [2], [4], [6], [9], [12], [15], [18], [21]]])
               ) 
Example #20
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_conv_1d():
    batch_size = 2
    steps = 8
    input_dim = 2
    kernel_size = 3
    filters = 3

    for padding in _convolution_paddings:
        for strides in [1, 2]:
            if padding == 'same' and strides != 1:
                continue
            layer_test(convolutional.Conv1D,
                       kwargs={'filters': filters,
                               'kernel_size': kernel_size,
                               'padding': padding,
                               'strides': strides},
                       input_shape=(batch_size, steps, input_dim))

            layer_test(convolutional.Conv1D,
                       kwargs={'filters': filters,
                               'kernel_size': kernel_size,
                               'padding': padding,
                               'kernel_regularizer': 'l2',
                               'bias_regularizer': 'l2',
                               'activity_regularizer': 'l2',
                               'kernel_constraint': 'max_norm',
                               'bias_constraint': 'max_norm',
                               'strides': strides},
                       input_shape=(batch_size, steps, input_dim))

    # Test dilation
    layer_test(convolutional.Conv1D,
               kwargs={'filters': filters,
                       'kernel_size': kernel_size,
                       'padding': padding,
                       'dilation_rate': 2,
                       'activation': None},
               input_shape=(batch_size, steps, input_dim))

    convolutional.Conv1D(filters=filters,
                         kernel_size=kernel_size,
                         padding=padding,
                         input_shape=(input_dim,)) 
Example #21
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_causal_dilated_conv():
    # Causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[0], [1], [3], [5]]]
               )

    # Non-causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'valid',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[1], [3], [5]]]
               )

    # Causal dilated with larger kernel size:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(10, dtype='float32'), (1, 10, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 3,
                   'dilation_rate': 2,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=np.float32([[[0], [1], [2], [4], [6], [9], [12], [15], [18], [21]]])
               ) 
Example #22
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_causal_dilated_conv():
    # Causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[0], [1], [3], [5]]]
               )

    # Non-causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'valid',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[1], [3], [5]]]
               )

    # Causal dilated with larger kernel size:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(10, dtype='float32'), (1, 10, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 3,
                   'dilation_rate': 2,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=np.float32([[[0], [1], [2], [4], [6], [9], [12], [15], [18], [21]]])
               ) 
Example #23
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_conv_1d():
    batch_size = 2
    steps = 8
    input_dim = 2
    kernel_size = 3
    filters = 3

    for padding in _convolution_paddings:
        for strides in [1, 2]:
            if padding == 'same' and strides != 1:
                continue
            layer_test(convolutional.Conv1D,
                       kwargs={'filters': filters,
                               'kernel_size': kernel_size,
                               'padding': padding,
                               'strides': strides},
                       input_shape=(batch_size, steps, input_dim))

            layer_test(convolutional.Conv1D,
                       kwargs={'filters': filters,
                               'kernel_size': kernel_size,
                               'padding': padding,
                               'kernel_regularizer': 'l2',
                               'bias_regularizer': 'l2',
                               'activity_regularizer': 'l2',
                               'kernel_constraint': 'max_norm',
                               'bias_constraint': 'max_norm',
                               'strides': strides},
                       input_shape=(batch_size, steps, input_dim))

    # Test dilation
    layer_test(convolutional.Conv1D,
               kwargs={'filters': filters,
                       'kernel_size': kernel_size,
                       'padding': padding,
                       'dilation_rate': 2,
                       'activation': None},
               input_shape=(batch_size, steps, input_dim))

    convolutional.Conv1D(filters=filters,
                         kernel_size=kernel_size,
                         padding=padding,
                         input_shape=(input_dim,)) 
Example #24
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_causal_dilated_conv():
    # Causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[0], [1], [3], [5]]]
               )

    # Non-causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'valid',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[1], [3], [5]]]
               )

    # Causal dilated with larger kernel size:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(10, dtype='float32'), (1, 10, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 3,
                   'dilation_rate': 2,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=np.float32([[[0], [1], [2], [4], [6], [9], [12], [15], [18], [21]]])
               ) 
Example #25
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_conv_1d():
    batch_size = 2
    steps = 8
    input_dim = 2
    kernel_size = 3
    filters = 3

    for padding in _convolution_paddings:
        for strides in [1, 2]:
            if padding == 'same' and strides != 1:
                continue
            layer_test(convolutional.Conv1D,
                       kwargs={'filters': filters,
                               'kernel_size': kernel_size,
                               'padding': padding,
                               'strides': strides},
                       input_shape=(batch_size, steps, input_dim))

            layer_test(convolutional.Conv1D,
                       kwargs={'filters': filters,
                               'kernel_size': kernel_size,
                               'padding': padding,
                               'kernel_regularizer': 'l2',
                               'bias_regularizer': 'l2',
                               'activity_regularizer': 'l2',
                               'kernel_constraint': 'max_norm',
                               'bias_constraint': 'max_norm',
                               'strides': strides},
                       input_shape=(batch_size, steps, input_dim))

    # Test dilation
    layer_test(convolutional.Conv1D,
               kwargs={'filters': filters,
                       'kernel_size': kernel_size,
                       'padding': padding,
                       'dilation_rate': 2,
                       'activation': None},
               input_shape=(batch_size, steps, input_dim))

    convolutional.Conv1D(filters=filters,
                         kernel_size=kernel_size,
                         padding=padding,
                         input_shape=(input_dim,)) 
Example #26
Source File: convolutional_test.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_causal_dilated_conv():
    # Causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[0], [1], [3], [5]]]
               )

    # Non-causal:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(4, dtype='float32'), (1, 4, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 2,
                   'dilation_rate': 1,
                   'padding': 'valid',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=[[[1], [3], [5]]]
               )

    # Causal dilated with larger kernel size:
    layer_test(convolutional.Conv1D,
               input_data=np.reshape(np.arange(10, dtype='float32'), (1, 10, 1)),
               kwargs={
                   'filters': 1,
                   'kernel_size': 3,
                   'dilation_rate': 2,
                   'padding': 'causal',
                   'kernel_initializer': 'ones',
                   'use_bias': False,
               },
               expected_output=np.float32([[[0], [1], [2], [4], [6], [9], [12], [15], [18], [21]]])
               )