Python keras.layers.average() Examples

The following are 15 code examples of keras.layers.average(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_dense_elementwise_params(self):
        options = dict(modes=[add, multiply, concatenate, average, maximum])

        def build_model(mode):
            x1 = Input(shape=(3,))
            x2 = Input(shape=(3,))
            y1 = Dense(4)(x1)
            y2 = Dense(4)(x2)
            z = mode([y1, y2])
            model = Model([x1, x2], z)
            return mode, model

        product = itertools.product(*options.values())
        args = [build_model(p[0]) for p in product]
        print("Testing a total of %s cases. This could take a while" % len(args))
        for param, model in args:
            self._run_test(model, param) 
Example #2
Source File: merge_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_average():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    o = layers.average([i1, i2])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2], o)

    avg_layer = layers.Average()
    o2 = avg_layer([i1, i2])
    assert avg_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, 0.5 * (x1 + x2), atol=1e-4) 
Example #3
Source File: merge_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_average():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    o = layers.average([i1, i2])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2], o)

    avg_layer = layers.Average()
    o2 = avg_layer([i1, i2])
    assert avg_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, 0.5 * (x1 + x2), atol=1e-4) 
Example #4
Source File: merge_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_average():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    o = layers.average([i1, i2])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2], o)

    avg_layer = layers.Average()
    o2 = avg_layer([i1, i2])
    assert avg_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, 0.5 * (x1 + x2), atol=1e-4) 
Example #5
Source File: merge_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_average():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    o = layers.average([i1, i2])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2], o)

    avg_layer = layers.Average()
    o2 = avg_layer([i1, i2])
    assert avg_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, 0.5 * (x1 + x2), atol=1e-4) 
Example #6
Source File: merge_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_average():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    o = layers.average([i1, i2])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2], o)

    avg_layer = layers.Average()
    o2 = avg_layer([i1, i2])
    assert avg_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, 0.5 * (x1 + x2), atol=1e-4) 
Example #7
Source File: merge_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_average():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    o = layers.average([i1, i2])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2], o)

    avg_layer = layers.Average()
    o2 = avg_layer([i1, i2])
    assert avg_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, 0.5 * (x1 + x2), atol=1e-4) 
Example #8
Source File: merge_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_merge_average():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    o = layers.average([i1, i2])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2], o)

    avg_layer = layers.Average()
    o2 = avg_layer([i1, i2])
    assert avg_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, 0.5 * (x1 + x2), atol=1e-4) 
Example #9
Source File: test_keras2_numeric.py    From coremltools with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_imdb_fasttext_first_2(self):

        max_features = 10
        max_len = 6
        embedding_dims = 4
        pool_length = 2

        model = Sequential()
        model.add(Embedding(max_features, embedding_dims, input_length=max_len))
        # we add a AveragePooling1D, which will average the embeddings
        # of all words in the document
        model.add(AveragePooling1D(pool_size=pool_length))

        self._test_model(model, one_dim_seq_flags=[True]) 
Example #10
Source File: MSnetworks.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fconcatenate(path_orig, path_down):
    if path_orig._keras_shape == path_down._keras_shape:
        path_down_cropped = path_down
    else:
        crop_x_1 = int(np.ceil((path_down._keras_shape[2] - path_orig._keras_shape[2]) / 2))
        crop_x_0 = path_down._keras_shape[2] - path_orig._keras_shape[2] - crop_x_1
        crop_y_1 = int(np.ceil((path_down._keras_shape[3] - path_orig._keras_shape[3]) / 2))
        crop_y_0 = path_down._keras_shape[3] - path_orig._keras_shape[3] - crop_y_1
        crop_z_1 = int(np.ceil((path_down._keras_shape[4] - path_orig._keras_shape[4]) / 2))
        crop_z_0 = path_down._keras_shape[4] - path_orig._keras_shape[4] - crop_z_1
        path_down_cropped = Cropping3D(cropping=((crop_x_0, crop_x_1), (crop_y_0, crop_y_1), (crop_z_0, crop_z_1)))(path_down)
    connected = average([path_orig, path_down_cropped])
    return connected 
Example #11
Source File: fuse_validate_model.py    From two-stream-action-recognition-keras with MIT License 5 votes vote down vote up
def two_stream_fuse(self):
        # spatial stream (frozen)
        cnn_spatial_multi = self.cnn_spatial_multi()

        # temporal stream (frozen)
        cnn_temporal_multi = self.cnn_temporal_multi()

        # fused by taking average
        outputs = average([cnn_spatial_multi.output, cnn_temporal_multi.output])

        model = Model([cnn_spatial_multi.input, cnn_temporal_multi.input], outputs)

        return model

    # CNN model for the temporal stream with multiple inputs 
Example #12
Source File: fuse_validate_model.py    From two-stream-action-recognition-keras with MIT License 5 votes vote down vote up
def cnn_spatial(self):
        base_model = InceptionV3(weights='imagenet', include_top=False)
    
        # add a global spatial average pooling layer
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        # let's add a fully-connected layer
        x = Dense(1024, activation='relu')(x)
        # and a logistic layer
        predictions = Dense(self.nb_classes, activation='softmax')(x)
    
        model = Model(inputs=base_model.input, outputs=predictions)
        return model

    # CNN model for the temporal stream 
Example #13
Source File: layers_export.py    From Fabrik with GNU General Public License v3.0 5 votes vote down vote up
def eltwise(layer, layer_in, layerId):
    out = {}
    if (layer['params']['layer_type'] == 'Multiply'):
        # This input reverse is to handle visualization
        out[layerId] = multiply(layer_in[::-1])
    elif (layer['params']['layer_type'] == 'Sum'):
        out[layerId] = add(layer_in[::-1])
    elif (layer['params']['layer_type'] == 'Average'):
        out[layerId] = average(layer_in[::-1])
    elif (layer['params']['layer_type'] == 'Dot'):
        out[layerId] = dot(layer_in[::-1], -1)
    else:
        out[layerId] = maximum(layer_in[::-1])
    return out 
Example #14
Source File: MSnetworks.py    From CNNArt with Apache License 2.0 4 votes vote down vote up
def fCreateModel_SPP_MultiPath(patchSize, patchSize2, dr_rate=0.0, iPReLU=0, l2_reg=1e-6):
    # Total params: 2,057,510
    # There are 2 pathway, whose receptive fields are in multiple relation.
    # Their outputs are averaged as the final prediction
    # The third down sampling convolutional layer in each pathway is replaced by the SPP module
    Strides = fgetStrides()
    kernelnumber = fgetKernelNumber()
    
    sharedConv1 = fCreateVNet_Block
    sharedDown1 = fCreateVNet_DownConv_Block
    sharedConv2 = fCreateVNet_Block
    sharedDown2 = fCreateVNet_DownConv_Block
    sharedConv3 = fCreateVNet_Block
    sharedSPP = fSPP
    
    inp1 = Input(shape=(1, patchSize[0], patchSize[1], patchSize[2]))
    inp1_Conv_1 = sharedConv1(inp1, kernelnumber[0], type=fgetLayerNumConv(), l2_reg=l2_reg)
    inp1_DownConv_1 = sharedDown1(inp1_Conv_1, inp1_Conv_1._keras_shape[1], Strides[0],
                                                     iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
    inp1_Conv_2 = sharedConv2(inp1_DownConv_1, kernelnumber[1], type=fgetLayerNumConv(), l2_reg=l2_reg)
    inp1_DownConv_2 = sharedDown2(inp1_Conv_2, inp1_Conv_2._keras_shape[1], Strides[1],
                                                   iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
    inp1_Conv_3 = sharedConv3(inp1_DownConv_2, kernelnumber[2], type=fgetLayerNumConv(), l2_reg=l2_reg)
    inp1_SPP = sharedSPP(inp1_Conv_3, level=3)
    
    inp2 = Input(shape=(1, patchSize2[0], patchSize2[1], patchSize2[2]))
    inp2_Conv_1 = sharedConv1(inp2, kernelnumber[0], type=fgetLayerNumConv(), l2_reg=l2_reg)
    inp2_DownConv_1 = sharedDown1(inp2_Conv_1, inp2_Conv_1._keras_shape[1], Strides[0],
                                                     iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
    inp2_Conv_2 = sharedConv2(inp2_DownConv_1, kernelnumber[1], type=fgetLayerNumConv(), l2_reg=l2_reg)
    inp2_DownConv_2 = sharedDown2(inp2_Conv_2, inp2_Conv_2._keras_shape[1], Strides[1],
                                                   iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
    inp2_Conv_3 = sharedConv3(inp2_DownConv_2, kernelnumber[2], type=fgetLayerNumConv(), l2_reg=l2_reg)
    inp2_SPP = sharedSPP(inp2_Conv_3, level=3)    
    SPP_aver = average([inp1_SPP, inp2_SPP])
    
    dropout_out = Dropout(dr_rate)(SPP_aver)
    dense_out = Dense(units=2,
                          kernel_initializer='normal',
                          kernel_regularizer=l2(l2_reg))(dropout_out)
    output_fc = Activation('softmax')(dense_out)
    model_shared = Model(inputs=[inp1, inp2], outputs = output_fc)    
    return model_shared 
Example #15
Source File: MSnetworks.py    From CNNArt with Apache License 2.0 4 votes vote down vote up
def fCreateModel_FCN_MultiFM(patchSize, dr_rate=0.0, iPReLU=0,l1_reg=0, l2_reg=1e-6):
    # Total params: 1,420,549
    # The dense layer is repleced by a convolutional layer with filters=2 for the two classes
    # The FM from the third down scaled convolutional layer is upsempled by deconvolution and
    # added with the FM from the second down scaled convolutional layer.
    # The combined FM goes through a convolutional layer with filters=2 for the two classes
    # The two predictions are averages as the final result.
    Strides = fgetStrides()
    kernelnumber = fgetKernelNumber()
    inp = Input(shape=(1, int(patchSize[0]), int(patchSize[1]), int(patchSize[2])))

    after_Conv_1 = fCreateVNet_Block(inp, kernelnumber[0], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_1 = fCreateVNet_DownConv_Block(after_Conv_1, after_Conv_1._keras_shape[1], Strides[0],
                                                     iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    after_Conv_2 = fCreateVNet_Block(after_DownConv_1, kernelnumber[1], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_2 = fCreateVNet_DownConv_Block(after_Conv_2, after_Conv_2._keras_shape[1], Strides[1],
                                                   iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    after_Conv_3 = fCreateVNet_Block(after_DownConv_2, kernelnumber[2], type=fgetLayerNumConv(), l2_reg=l2_reg)
    after_DownConv_3 = fCreateVNet_DownConv_Block(after_Conv_3, after_Conv_3._keras_shape[1], Strides[2],
                                                   iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    # fully convolution over the FM from the deepest level
    dropout_out1 = Dropout(dr_rate)(after_DownConv_3)
    fclayer1 = Conv3D(2,
                       kernel_size=(1,1,1),
                       kernel_initializer='he_normal',
                       weights=None,
                       padding='valid',
                       strides=(1, 1, 1),
                       kernel_regularizer=l1_l2(l1_reg, l2_reg),
                       )(dropout_out1)
    fclayer1 = GlobalAveragePooling3D()(fclayer1)
    
    # Upsample FM from the deepest level, add with FM from level 2, 
    UpedFM_Level3 = Conv3DTranspose(filters=97, kernel_size=(3,3,1), strides=(2,2,1), padding='same')(after_DownConv_3)
    conbined_FM_Level23 = add([UpedFM_Level3, after_DownConv_2])    
    fclayer2 = Conv3D(2,
                       kernel_size=(1,1,1),
                       kernel_initializer='he_normal',
                       weights=None,
                       padding='valid',
                       strides=(1, 1, 1),
                       kernel_regularizer=l1_l2(l1_reg, l2_reg),
                       )(conbined_FM_Level23)
    fclayer2 = GlobalAveragePooling3D()(fclayer2)

    # combine the two predictions using average
    fcl_aver = average([fclayer1, fclayer2])
    predict = Activation('softmax')(fcl_aver)
    cnn_fcl_msfm = Model(inputs=inp, outputs=predict)
    return cnn_fcl_msfm