Python keras.layers.core.Merge() Examples

The following are 18 code examples of keras.layers.core.Merge(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers.core , or try the search function .
Example #1
Source File: model_zoo.py    From visual_turing_test-tutorial with MIT License 6 votes vote down vote up
def create(self):
        assert self._config.merge_mode in ['max', 'ave', 'sum'], \
                'Merge mode of this model is either max, ave or sum'

        self.textual_embedding(self, mask_zero=False)
        #self.textual_embedding(self, mask_zero=True)
        self.add(MaskedConvolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=self._config.language_cnn_filter_length,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        self.temporal_pooling(self)
        #self.add(DropMask())
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax')) 
Example #2
Source File: model_zoo.py    From visual_turing_test-tutorial with MIT License 6 votes vote down vote up
def create(self):
        assert self._config.merge_mode in ['max', 'ave', 'sum'], \
                'Merge mode of this model is either max, ave or sum'

        model_list = [None] * self._config.language_cnn_views
        for j in xrange(1,self._config.language_cnn_views+1):
            current_view = Sequential()
            self.textual_embedding(current_view, mask_zero=True)
            current_view.add(Convolution1D(
                nb_filter=self._config.language_cnn_filters,
                filter_length=j,
                border_mode='valid',
                activation=self._config.language_cnn_activation,
                subsample_length=1))
            self.temporal_pooling(current_view)
            model_list[j-1] = current_view

        self.add(Merge(model_list, mode='concat'))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax')) 
Example #3
Source File: model_zoo.py    From visual_turing_test-tutorial with MIT License 6 votes vote down vote up
def create(self):

        assert self._config.textual_embedding_dim == 0, \
                'Embedding cannot be learnt but must be fixed'

        language_forward = Sequential()
        language_forward.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, return_sequences=False,
            input_shape=(self._config.max_input_time_steps, self._config.input_dim)))
        self.language_forward = language_forward

        language_backward = Sequential()
        language_backward.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, return_sequences=False,
            go_backwards=True,
            input_shape=(self._config.max_input_time_steps, self._config.input_dim)))
        self.language_backward = language_backward

        self.add(Merge([language_forward, language_backward]))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax')) 
Example #4
Source File: test_core.py    From CAPTCHA-breaking with MIT License 5 votes vote down vote up
def test_merge(self):
        layer_1 = core.Layer()
        layer_2 = core.Layer()
        layer = core.Merge([layer_1, layer_2])
        self._runner(layer) 
Example #5
Source File: Segmentation_Models.py    From brain_segmentation with MIT License 5 votes vote down vote up
def comp_double(self):
        '''
        double model. Simialar to two-pathway, except takes in a 4x33x33 patch and it's center 4x5x5 patch. merges paths at flatten layer.
        '''
        print 'Compiling double model...'
        single = Sequential()
        single.add(Convolution2D(64, 7, 7, border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01), input_shape=(4,33,33)))
        single.add(Activation('relu'))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
        single.add(Dropout(0.5))
        single.add(Convolution2D(nb_filter=128, nb_row=5, nb_col=5, activation='relu', border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01)))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
        single.add(Dropout(0.5))
        single.add(Convolution2D(nb_filter=256, nb_row=5, nb_col=5, activation='relu', border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01)))
        single.add(BatchNormalization(mode=0, axis=1))
        single.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))
        single.add(Dropout(0.5))
        single.add(Convolution2D(nb_filter=128, nb_row=3, nb_col=3, activation='relu', border_mode='valid', W_regularizer=l1l2(l1=0.01, l2=0.01)))
        single.add(Dropout(0.25))
        single.add(Flatten())

        # add small patch to train on
        five = Sequential()
        five.add(Reshape((100,1), input_shape = (4,5,5)))
        five.add(Flatten())
        five.add(MaxoutDense(128, nb_feature=5))
        five.add(Dropout(0.5))

        model = Sequential()
        # merge both paths
        model.add(Merge([five, single], mode='concat', concat_axis=1))
        model.add(Dense(5))
        model.add(Activation('softmax'))

        sgd = SGD(lr=0.001, decay=0.01, momentum=0.9)
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        print 'Done.'
        return model 
Example #6
Source File: customlayers.py    From deep-mil-for-whole-mammogram-classification with MIT License 5 votes vote down vote up
def convolution2Dgroup(n_group, nb_filter, nb_row, nb_col, **kwargs):
    def f(input):
        return Merge([
            Convolution2D(nb_filter//n_group,nb_row,nb_col)(
                splittensor(axis=1,
                            ratio_split=n_group,
                            id_split=i)(input))
            for i in range(n_group)
        ],mode='concat',concat_axis=1)

    return f 
Example #7
Source File: customlayers.py    From deep-mil-for-whole-mammogram-classification with MIT License 5 votes vote down vote up
def convolution2Dgroup(n_group, nb_filter, nb_row, nb_col, **kwargs):
    def f(input):
        return Merge([
            Convolution2D(nb_filter//n_group,nb_row,nb_col)(
                splittensor(axis=1,
                            ratio_split=n_group,
                            id_split=i)(input))
            for i in range(n_group)
        ],mode='concat',concat_axis=1)

    return f 
Example #8
Source File: customlayers.py    From convnets-keras with MIT License 5 votes vote down vote up
def convolution2Dgroup(n_group, nb_filter, nb_row, nb_col, **kwargs):
    def f(input):
        return Merge([
                         Convolution2D(nb_filter // n_group, nb_row, nb_col)(
                             splittensor(axis=1,
                                         ratio_split=n_group,
                                         id_split=i)(input))
                         for i in range(n_group)
                         ], mode='concat', concat_axis=1)

    return f 
Example #9
Source File: test_sequential_model.py    From CAPTCHA-breaking with MIT License 5 votes vote down vote up
def test_merge_overlap(self):
        print('Test merge overlap')
        left = Sequential()
        left.add(Dense(input_dim, nb_hidden))
        left.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, left], mode='sum'))

        model.add(Dense(nb_hidden, nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

        model.train_on_batch(X_train[:32], y_train[:32])

        loss = model.evaluate(X_train, y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.6:
            raise Exception('Score too low, learning issue.')
        preds = model.predict(X_test, verbose=0)
        classes = model.predict_classes(X_test, verbose=0)
        probas = model.predict_proba(X_test, verbose=0)
        print(model.get_config(verbose=1))

        model.save_weights('temp.h5', overwrite=True)
        model.load_weights('temp.h5')

        nloss = model.evaluate(X_train, y_train, verbose=0)
        print(nloss)
        assert(loss == nloss) 
Example #10
Source File: model_zoo.py    From visual_turing_test-tutorial with MIT License 5 votes vote down vote up
def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.stacked_RNN(language_model)
        language_model.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=self._config.go_backwards))
        self.language_model = language_model

        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        #visual_model = Sequential()
        #self.visual_embedding(visual_model)
        self.visual_model = visual_model

        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.add(Dropout(0.5))
        self.add(Dense(self._config.output_dim))

        self.add(RepeatVector(self._config.max_output_time_steps))
        self.add(self._config.recurrent_decoder(
                self._config.hidden_state_dim, return_sequences=True))
        self.add(Dropout(0.5))
        self.add(TimeDistributedDense(self._config.output_dim))
        self.add(Activation('softmax'))


###
# Graph-based models
### 
Example #11
Source File: model_zoo.py    From visual_turing_test-tutorial with MIT License 5 votes vote down vote up
def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.language_model = language_model


        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        #visual_model = Sequential()
        #self.visual_embedding(visual_model)
        self.visual_model = visual_model
        visual_model.add(RepeatVector(self._config.max_input_time_steps))

        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=self._config.go_backwards))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax')) 
Example #12
Source File: model_zoo.py    From visual_turing_test-tutorial with MIT License 5 votes vote down vote up
def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.language_model = language_model

        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        #visual_model = Sequential()
        #self.visual_embedding(visual_model)
        # the below should contain all zeros
        zero_model = Sequential()
        zero_model.add(RepeatVector(self._config.max_input_time_steps)-1)
        visual_model.add(Merge[visual_model, zero_model], mode='concat')
        self.visual_model = visual_model

        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=self._config.go_backwards))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax')) 
Example #13
Source File: model_zoo.py    From visual_turing_test-tutorial with MIT License 5 votes vote down vote up
def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.stacked_RNN(language_model)
        language_model.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=self._config.go_backwards))
        self.language_model = language_model


        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        self.visual_model = visual_model

        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax')) 
Example #14
Source File: model_zoo.py    From visual_turing_test-tutorial with MIT License 5 votes vote down vote up
def create(self):
        assert self._config.merge_mode in ['max', 'ave', 'sum'], \
                'Merge mode of this model is either max, ave or sum'

        unigram = Sequential() 
        self.textual_embedding(unigram, mask_zero=True)
        unigram.add(Convolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=1,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        self.temporal_pooling(unigram)

        bigram = Sequential()
        self.textual_embedding(bigram, mask_zero=True)
        bigram.add(Convolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=2,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        self.temporal_pooling(bigram)

        trigram = Sequential()
        self.textual_embedding(trigram, mask_zero=True)
        trigram.add(Convolution1D(
            nb_filter=self._config.language_cnn_filters,
            filter_length=3,
            border_mode='valid',
            activation=self._config.language_cnn_activation,
            subsample_length=1))
        self.temporal_pooling(trigram)

        self.add(Merge([unigram, bigram, trigram], mode='concat'))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax')) 
Example #15
Source File: model_zoo.py    From visual_turing_test-tutorial with MIT License 5 votes vote down vote up
def create(self):
        assert self._config.merge_mode in ['max', 'ave', 'sum'], \
                'Merge mode of this model is either max, ave or sum'

        self.textual_embedding(self, mask_zero=False)
        self.stacked_RNN(self)
        self.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=True,
            go_backwards=self._config.go_backwards))
        self.add(Dropout(0.5))
        self.add(TimeDistributedDense(self._config.output_dim))
        self.temporal_pooling(self)
        self.add(Activation('softmax')) 
Example #16
Source File: test_sequential_model.py    From CAPTCHA-breaking with MIT License 4 votes vote down vote up
def test_merge_sum(self):
        print('Test merge: sum')
        left = Sequential()
        left.add(Dense(input_dim, nb_hidden))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(input_dim, nb_hidden))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='sum'))

        model.add(Dense(nb_hidden, nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

        loss = model.evaluate([X_train, X_train], y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.7:
            raise Exception('Score too low, learning issue.')
        preds = model.predict([X_test, X_test], verbose=0)
        classes = model.predict_classes([X_test, X_test], verbose=0)
        probas = model.predict_proba([X_test, X_test], verbose=0)
        print(model.get_config(verbose=1))

        print('test weight saving')
        model.save_weights('temp.h5', overwrite=True)
        left = Sequential()
        left.add(Dense(input_dim, nb_hidden))
        left.add(Activation('relu'))
        right = Sequential()
        right.add(Dense(input_dim, nb_hidden))
        right.add(Activation('relu'))
        model = Sequential()
        model.add(Merge([left, right], mode='sum'))
        model.add(Dense(nb_hidden, nb_class))
        model.add(Activation('softmax'))
        model.load_weights('temp.h5')
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        nloss = model.evaluate([X_train, X_train], y_train, verbose=0)
        print(nloss)
        assert(loss == nloss) 
Example #17
Source File: test_sequential_model.py    From CAPTCHA-breaking with MIT License 4 votes vote down vote up
def test_merge_concat(self):
        print('Test merge: concat')
        left = Sequential()
        left.add(Dense(input_dim, nb_hidden))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(input_dim, nb_hidden))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='concat'))

        model.add(Dense(nb_hidden * 2, nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

        loss = model.evaluate([X_train, X_train], y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.6:
            raise Exception('Score too low, learning issue.')
        preds = model.predict([X_test, X_test], verbose=0)
        classes = model.predict_classes([X_test, X_test], verbose=0)
        probas = model.predict_proba([X_test, X_test], verbose=0)
        print(model.get_config(verbose=1))

        print('test weight saving')
        model.save_weights('temp.h5', overwrite=True)
        left = Sequential()
        left.add(Dense(input_dim, nb_hidden))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(input_dim, nb_hidden))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='concat'))

        model.add(Dense(nb_hidden * 2, nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.load_weights('temp.h5')

        nloss = model.evaluate([X_train, X_train], y_train, verbose=0)
        print(nloss)
        assert(loss == nloss) 
Example #18
Source File: test_sequential_model.py    From CAPTCHA-breaking with MIT License 4 votes vote down vote up
def test_merge_recursivity(self):
        print('Test merge recursivity')

        left = Sequential()
        left.add(Dense(input_dim, nb_hidden))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(input_dim, nb_hidden))
        right.add(Activation('relu'))

        righter = Sequential()
        righter.add(Dense(input_dim, nb_hidden))
        righter.add(Activation('relu'))

        intermediate = Sequential()
        intermediate.add(Merge([left, right], mode='sum'))
        intermediate.add(Dense(nb_hidden, nb_hidden))
        intermediate.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([intermediate, righter], mode='sum'))

        model.add(Dense(nb_hidden, nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

        loss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.6:
            raise Exception('Score too low, learning issue.')
        preds = model.predict([X_test, X_test, X_test], verbose=0)
        classes = model.predict_classes([X_test, X_test, X_test], verbose=0)
        probas = model.predict_proba([X_test, X_test, X_test], verbose=0)
        print(model.get_config(verbose=1))

        model.save_weights('temp.h5', overwrite=True)
        model.load_weights('temp.h5')

        nloss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
        print(nloss)
        assert(loss == nloss)