Python keras.layers.MaxPooling3D() Examples

The following are 30 code examples of keras.layers.MaxPooling3D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source File: LRCN_keras.py    From ActionRecognition with MIT License 14 votes vote down vote up
def load_model():
    # use simple CNN structure
    in_shape = (SequenceLength, IMSIZE[0], IMSIZE[1], 3)
    model = Sequential()
    model.add(ConvLSTM2D(32, kernel_size=(7, 7), padding='valid', return_sequences=True, input_shape=in_shape))
    model.add(Activation('relu'))
    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
    model.add(ConvLSTM2D(64, kernel_size=(5, 5), padding='valid', return_sequences=True))
    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
    model.add(ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True))
    model.add(Activation('relu'))
    model.add(ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True))
    model.add(Activation('relu'))
    model.add(ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True))
    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
    model.add(Dense(320))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    out_shape = model.output_shape
    # print('====Model shape: ', out_shape)
    model.add(Reshape((SequenceLength, out_shape[2] * out_shape[3] * out_shape[4])))
    model.add(LSTM(64, return_sequences=False))
    model.add(Dropout(0.5))
    model.add(Dense(N_CLASSES, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])

    # model structure summary
    print(model.summary())

    return model 
Example #2
Source File: livenessmodel.py    From Intelegent_Lock with MIT License 6 votes vote down vote up
def get_liveness_model():

    model = Sequential()
    model.add(Conv3D(32, kernel_size=(3, 3, 3),
                    activation='relu',
                    input_shape=(24,100,100,1)))
    model.add(Conv3D(64, (3, 3, 3), activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2)))
    model.add(Conv3D(64, (3, 3, 3), activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2)))
    model.add(Conv3D(64, (3, 3, 3), activation='relu'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(2, activation='softmax'))

    return model 
Example #3
Source File: subdivide.py    From aitom with GNU General Public License v3.0 6 votes vote down vote up
def dsrff3D(image_size, num_labels):
    num_channels=1
    inputs = Input(shape = (image_size, image_size, image_size, num_channels))

    # modified VGG19 architecture
    bn_axis = 3
    m = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(inputs)    
    m = Convolution3D(32, 3, 3, 3, activation='relu', border_mode='same')(m)
    m = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))(m)

    m = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(m)    
    m = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same')(m)
    m = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))(m)

    m = Flatten(name='flatten')(m)
    m = Dense(512, activation='relu', name='fc1')(m)
    m = Dense(512, activation='relu', name='fc2')(m)
    m = Dense(num_labels, activation='softmax')(m)

    mod = KM.Model(inputs=inputs, outputs=m)

    return mod 
Example #4
Source File: transfer_learning.py    From hyperspectral_deeplearning_review with GNU General Public License v3.0 6 votes vote down vote up
def get_model_compiled(args, inputshape, num_class):
    model = Sequential()
    if args.arch == "CNN1D":
        model.add(Conv1D(20, (24), activation='relu', input_shape=inputshape))
        model.add(MaxPooling1D(pool_size=5))
        model.add(Flatten())
        model.add(Dense(100))
    elif "CNN2D" in args.arch:
        model.add(Conv2D(50, kernel_size=(5, 5), input_shape=inputshape))
        model.add(Activation('relu'))
        model.add(Conv2D(100, (5, 5)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(100))
    elif args.arch == "CNN3D":
        model.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=inputshape))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(Conv3D(64, (5, 5, 16)))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(MaxPooling3D(pool_size=(2, 2, 1)))
        model.add(Flatten())
        model.add(Dense(300))
    if args.arch != "CNN2D": model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dense(num_class, activation='softmax'))
    model.compile(loss=categorical_crossentropy, optimizer=Adam(args.lr1), metrics=['accuracy']) 
    return model 
Example #5
Source File: cnn3d.py    From hyperspectral_deeplearning_review with GNU General Public License v3.0 6 votes vote down vote up
def get_model_compiled(shapeinput, num_class, w_decay=0, lr=1e-3):
    clf = Sequential()
    clf.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=shapeinput))
    clf.add(BatchNormalization())
    clf.add(Activation('relu'))
    clf.add(Conv3D(64, (5, 5, 16)))
    clf.add(BatchNormalization())
    clf.add(Activation('relu'))
    clf.add(MaxPooling3D(pool_size=(2, 2, 1)))
    clf.add(Flatten())
    clf.add(Dense(300, kernel_regularizer=regularizers.l2(w_decay)))
    clf.add(BatchNormalization())
    clf.add(Activation('relu'))
    clf.add(Dense(num_class, activation='softmax'))
    clf.compile(loss=categorical_crossentropy, optimizer=Adam(lr=lr), metrics=['accuracy'])
    return clf 
Example #6
Source File: timeception.py    From timeception with GNU General Public License v3.0 6 votes vote down vote up
def __define_timeception_layers(self, n_channels_in, n_layers, n_groups, expansion_factor, is_dilated):
        """
        Define layers inside the timeception layers.
        """

        # how many layers of timeception
        for i in range(n_layers):
            layer_num = i + 1

            # get details about grouping
            n_channels_per_branch, n_channels_out = self.__get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)

            # temporal conv per group
            self.__define_grouped_convolutions(n_channels_in, n_groups, n_channels_per_branch, is_dilated, layer_num)

            # downsample over time
            layer_name = 'maxpool_tc%d' % (layer_num)
            layer = MaxPooling3D(pool_size=(2, 1, 1), name=layer_name)
            setattr(self, layer_name, layer)

            n_channels_in = n_channels_out 
Example #7
Source File: timeception.py    From timeception with GNU General Public License v3.0 6 votes vote down vote up
def timeception_layers(tensor, n_layers=4, n_groups=8, is_dilated=True):
    input_shape = K.int_shape(tensor)
    assert len(input_shape) == 5

    expansion_factor = 1.25
    _, n_timesteps, side_dim, side_dim, n_channels_in = input_shape

    # how many layers of timeception
    for i in range(n_layers):
        layer_num = i + 1

        # get details about grouping
        n_channels_per_branch, n_channels_out = __get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)

        # temporal conv per group
        tensor = __grouped_convolutions(tensor, n_groups, n_channels_per_branch, is_dilated, layer_num)

        # downsample over time
        tensor = MaxPooling3D(pool_size=(2, 1, 1), name='maxpool_tc%d' % (layer_num))(tensor)
        n_channels_in = n_channels_out

    return tensor 
Example #8
Source File: timeception.py    From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __define_timeception_layers(self, n_channels_in, n_layers, n_groups, expansion_factor, is_dilated):
        """
        Define layers inside the timeception layers.
        """

        # how many layers of timeception
        for i in range(n_layers):
            layer_num = i + 1

            # get details about grouping
            n_channels_per_branch, n_channels_out = self.__get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)

            # temporal conv per group
            self.__define_grouped_convolutions(n_channels_in, n_groups, n_channels_per_branch, is_dilated, layer_num)

            # downsample over time
            layer_name = 'maxpool_tc%d' % (layer_num)
            layer = MaxPooling3D(pool_size=(2, 1, 1), name=layer_name)
            setattr(self, layer_name, layer)

            n_channels_in = n_channels_out 
Example #9
Source File: timeception.py    From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def timeception_layers(tensor, n_layers=4, n_groups=8, is_dilated=True):
    input_shape = K.int_shape(tensor)
    assert len(input_shape) == 5

    expansion_factor = 1.25
    _, n_timesteps, side_dim, side_dim, n_channels_in = input_shape

    # how many layers of timeception
    for i in range(n_layers):
        layer_num = i + 1

        # get details about grouping
        n_channels_per_branch, n_channels_out = __get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)

        # temporal conv per group
        tensor = __grouped_convolutions(tensor, n_groups, n_channels_per_branch, is_dilated, layer_num)

        # downsample over time
        tensor = MaxPooling3D(pool_size=(2, 1, 1), name='maxpool_tc%d' % (layer_num))(tensor)
        n_channels_in = n_channels_out

    return tensor 
Example #10
Source File: test_views.py    From Fabrik with GNU General Public License v3.0 5 votes vote down vote up
def test_keras_import(self):
        # Global Pooling 1D
        model = Sequential()
        model.add(GlobalMaxPooling1D(input_shape=(16, 1)))
        model.build()
        self.keras_param_test(model, 0, 5)
        # Global Pooling 2D
        model = Sequential()
        model.add(GlobalMaxPooling2D(input_shape=(16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 8)
        # Pooling 1D
        model = Sequential()
        model.add(MaxPooling1D(pool_size=2, strides=2, padding='same', input_shape=(16, 1)))
        model.build()
        self.keras_param_test(model, 0, 5)
        # Pooling 2D
        model = Sequential()
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', input_shape=(16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 8)
        # Pooling 3D
        model = Sequential()
        model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='same',
                               input_shape=(16, 16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 11)


# ********** Locally-connected Layers ********** 
Example #11
Source File: deepdrug3d.py    From DeepDrug3D with GNU General Public License v3.0 5 votes vote down vote up
def build():
        model = Sequential()
        # Conv layer 1
        model.add(Convolution3D(
            input_shape = (14,32,32,32),
            filters=64,
            kernel_size=5,
            padding='valid',     # Padding method
            data_format='channels_first',
        ))
        model.add(LeakyReLU(alpha = 0.1))
        # Dropout 1
        model.add(Dropout(0.2))
        # Conv layer 2
        model.add(Convolution3D(
            filters=64,
            kernel_size=3,
            padding='valid',     # Padding method
            data_format='channels_first',
        ))
        model.add(LeakyReLU(alpha = 0.1))
        # Maxpooling 1
        model.add(MaxPooling3D(
            pool_size=(2,2,2),
            strides=None,
            padding='valid',    # Padding method
            data_format='channels_first'
        ))
        # Dropout 2
        model.add(Dropout(0.4))
        # FC 1
        model.add(Flatten())
        model.add(Dense(128)) # TODO changed to 64 for the CAM
        model.add(LeakyReLU(alpha = 0.1))
        # Dropout 3
        model.add(Dropout(0.4))
        # Fully connected layer 2 to shape (2) for 2 classes
        model.add(Dense(2))
        model.add(Activation('softmax'))
        return model 
Example #12
Source File: timeception.py    From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __temporal_convolutional_block(tensor, n_channels_per_branch, kernel_sizes, dilation_rates, layer_num, group_num):
    """
    Define 5 branches of convolutions that operate of channels of each group.
    """

    # branch 1: dimension reduction only and no temporal conv
    t_1 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b1_g%d_tc%d' % (group_num, layer_num))(tensor)
    t_1 = BatchNormalization(name='bn_b1_g%d_tc%d' % (group_num, layer_num))(t_1)

    # branch 2: dimension reduction followed by depth-wise temp conv (kernel-size 3)
    t_2 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b2_g%d_tc%d' % (group_num, layer_num))(tensor)
    t_2 = DepthwiseConv1DLayer(kernel_sizes[0], dilation_rates[0], padding='same', name='convdw_b2_g%d_tc%d' % (group_num, layer_num))(t_2)
    t_2 = BatchNormalization(name='bn_b2_g%d_tc%d' % (group_num, layer_num))(t_2)

    # branch 3: dimension reduction followed by depth-wise temp conv (kernel-size 5)
    t_3 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b3_g%d_tc%d' % (group_num, layer_num))(tensor)
    t_3 = DepthwiseConv1DLayer(kernel_sizes[1], dilation_rates[1], padding='same', name='convdw_b3_g%d_tc%d' % (group_num, layer_num))(t_3)
    t_3 = BatchNormalization(name='bn_b3_g%d_tc%d' % (group_num, layer_num))(t_3)

    # branch 4: dimension reduction followed by depth-wise temp conv (kernel-size 7)
    t_4 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b4_g%d_tc%d' % (group_num, layer_num))(tensor)
    t_4 = DepthwiseConv1DLayer(kernel_sizes[2], dilation_rates[2], padding='same', name='convdw_b4_g%d_tc%d' % (group_num, layer_num))(t_4)
    t_4 = BatchNormalization(name='bn_b4_g%d_tc%d' % (group_num, layer_num))(t_4)

    # branch 5: dimension reduction followed by temporal max pooling
    t_5 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b5_g%d_tc%d' % (group_num, layer_num))(tensor)
    t_5 = MaxPooling3D(pool_size=(2, 1, 1), strides=(1, 1, 1), padding='same', name='maxpool_b5_g%d_tc%d' % (group_num, layer_num))(t_5)
    t_5 = BatchNormalization(name='bn_b5_g%d_tc%d' % (group_num, layer_num))(t_5)

    # concatenate channels of branches
    tensor = Concatenate(axis=4, name='concat_g%d_tc%d' % (group_num, layer_num))([t_1, t_2, t_3, t_4, t_5])

    return tensor 
Example #13
Source File: timeception.py    From videograph with GNU General Public License v3.0 5 votes vote down vote up
def timeception_temporal_convolutions_parallelized(tensor, n_layers, n_groups, expansion_factor, is_dilated=True):
    input_shape = K.int_shape(tensor)
    assert len(input_shape) == 5

    raise Exception('Sorry, not implemented now')

    _, n_timesteps, side_dim, side_dim, n_channels_in = input_shape

    # collapse regions in one dim
    tensor = ReshapeLayer((n_timesteps, side_dim * side_dim, 1, n_channels_in))(tensor)

    for i in range(n_layers):
        # add global pooling as regions
        tensor = __global_spatial_pooling(tensor)

        # temporal conv (inception-style, shuffled)
        n_channels_per_branch, n_channels_out = __get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)
        if is_dilated:
            tensor = __timeception_shuffled_depthwise_dilated_parallelized(tensor, n_groups, n_channels_per_branch)
        else:
            tensor = __timeception_shuffled_depthwise_parallelized(tensor, n_groups, n_channels_per_branch)
        tensor = MaxPooling3D(pool_size=(2, 1, 1))(tensor)
        n_channels_in = n_channels_out

    return tensor

# endregion

# region Timeception Block 
Example #14
Source File: test_views.py    From Fabrik with GNU General Public License v3.0 5 votes vote down vote up
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Input2'], 'l2': net['Input4'], 'l3': net['Pooling']}
        # Pool 1D
        net['l1']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l1']
        net['l3']['params']['layer_type'] = '1D'
        net['l3']['shape']['input'] = net['l1']['shape']['output']
        net['l3']['shape']['output'] = [12, 12]
        inp = data(net['l1'], '', 'l1')['l1']
        temp = pooling(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling1D')
        # Pool 2D
        net['l0']['connection']['output'].append('l0')
        net['l3']['connection']['input'] = ['l0']
        net['l3']['params']['layer_type'] = '2D'
        net['l3']['shape']['input'] = net['l0']['shape']['output']
        net['l3']['shape']['output'] = [3, 226, 226]
        inp = data(net['l0'], '', 'l0')['l0']
        temp = pooling(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling2D')
        # Pool 3D
        net['l2']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l2']
        net['l3']['params']['layer_type'] = '3D'
        net['l3']['shape']['input'] = net['l2']['shape']['output']
        net['l3']['shape']['output'] = [3, 226, 226, 18]
        inp = data(net['l2'], '', 'l2')['l2']
        temp = pooling(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[2].__class__.__name__, 'MaxPooling3D')


# ********** Locally-connected Layers ********** 
Example #15
Source File: timeception.py    From videograph with GNU General Public License v3.0 5 votes vote down vote up
def timeception_temporal_convolutions(tensor, n_layers, n_groups, expansion_factor, is_dilated=True):
    input_shape = K.int_shape(tensor)
    assert len(input_shape) == 5

    _, n_timesteps, side_dim, side_dim, n_channels_in = input_shape

    # collapse regions in one dim
    tensor = ReshapeLayer((n_timesteps, side_dim * side_dim, 1, n_channels_in))(tensor)

    for i in range(n_layers):

        n_channels_per_branch, n_channels_out = __get_n_channels_per_branch(n_groups, expansion_factor, n_channels_in)

        # add global pooling as local regions
        tensor = __global_spatial_pooling(tensor)

        # temporal conv (inception-style, shuffled)
        if is_dilated:
            tensor = __timeception_shuffled_depthwise_dilated(tensor, n_groups, n_channels_per_branch)
        else:
            tensor = __timeception_shuffled_depthwise(tensor, n_groups, n_channels_per_branch)

        # downsample over time
        tensor = MaxPooling3D(pool_size=(2, 1, 1))(tensor)
        n_channels_in = n_channels_out

    return tensor 
Example #16
Source File: c3d.py    From 3D-ConvNets-for-Action-Recognition with MIT License 5 votes vote down vote up
def c3d_model():
    input_shape = (112, 112, 8, 3)
    weight_decay = 0.005
    nb_classes = 101

    inputs = Input(input_shape)
    x = Conv3D(64,(3,3,3),strides=(1,1,1),padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(inputs)
    x = MaxPooling3D((2,2,1),strides=(2,2,1),padding='same')(x)

    x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = MaxPooling3D((2,2,2),strides=(2,2,2),padding='same')(x)

    x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = MaxPooling3D((2,2,2),strides=(2,2,2),padding='same')(x)

    x = Conv3D(256,(3,3,3),strides=(1,1,1),padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = MaxPooling3D((2,2,2),strides=(2,2,2),padding='same')(x)

    x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same',
               activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = MaxPooling3D((2, 2, 1), strides=(2, 2, 1), padding='same')(x)

    x = Flatten()(x)
    x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = Dropout(0.5)(x)
    x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x)
    x = Dropout(0.5)(x)
    x = Dense(nb_classes,kernel_regularizer=l2(weight_decay))(x)
    x = Activation('softmax')(x)

    model = Model(inputs, x)
    return model 
Example #17
Source File: seg_src.py    From aitom with GNU General Public License v3.0 5 votes vote down vote up
def model_simple_upsampling__reshape(img_shape, class_n=None):

    from keras.layers import Input, Dense, Convolution3D, MaxPooling3D, UpSampling3D, Reshape, Flatten
    from keras.models import Sequential, Model
    from keras.layers.core import Activation
    from aitom.classify.deep.unsupervised.autoencoder.seg_util import conv_block

    NUM_CHANNELS=1
    input_shape = (None, img_shape[0], img_shape[1], img_shape[2], NUM_CHANNELS)

    # use relu activation for hidden layer to guarantee non-negative outputs are passed to the max pooling layer. In such case, as long as the output layer is linear activation, the network can still accomodate negative image intendities, just matter of shift back using the bias term
    input_img = Input(shape=input_shape[1:])
    x = input_img

    x = conv_block(x, 32, 3, 3, 3)
    x = MaxPooling3D((2, 2, 2), border_mode='same')(x)
    x = conv_block(x, 32, 3, 3, 3)
    x = MaxPooling3D((2, 2, 2), border_mode='same')(x)

    x = conv_block(x, 32, 3, 3, 3)

    x = UpSampling3D((2, 2, 2))(x)
    x = conv_block(x, 32, 3, 3, 3)

    x = UpSampling3D((2, 2, 2))(x)
    x = conv_block(x, 32, 3, 3, 3)

    x = Convolution3D(class_n, 1, 1, 1, border_mode='same')(x)
    x = Reshape((N.prod(img_shape), class_n))(x)
    x = Activation('softmax')(x)

    model = Model(input=input_img, output=x)

    print('model layers:')
    for l in model.layers:    print (l.output_shape, l.name)

    return model 
Example #18
Source File: timeception.py    From timeception with GNU General Public License v3.0 5 votes vote down vote up
def __temporal_convolutional_block(tensor, n_channels_per_branch, kernel_sizes, dilation_rates, layer_num, group_num):
    """
    Define 5 branches of convolutions that operate of channels of each group.
    """

    # branch 1: dimension reduction only and no temporal conv
    t_1 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b1_g%d_tc%d' % (group_num, layer_num))(tensor)
    t_1 = BatchNormalization(name='bn_b1_g%d_tc%d' % (group_num, layer_num))(t_1)

    # branch 2: dimension reduction followed by depth-wise temp conv (kernel-size 3)
    t_2 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b2_g%d_tc%d' % (group_num, layer_num))(tensor)
    t_2 = DepthwiseConv1DLayer(kernel_sizes[0], dilation_rates[0], padding='same', name='convdw_b2_g%d_tc%d' % (group_num, layer_num))(t_2)
    t_2 = BatchNormalization(name='bn_b2_g%d_tc%d' % (group_num, layer_num))(t_2)

    # branch 3: dimension reduction followed by depth-wise temp conv (kernel-size 5)
    t_3 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b3_g%d_tc%d' % (group_num, layer_num))(tensor)
    t_3 = DepthwiseConv1DLayer(kernel_sizes[1], dilation_rates[1], padding='same', name='convdw_b3_g%d_tc%d' % (group_num, layer_num))(t_3)
    t_3 = BatchNormalization(name='bn_b3_g%d_tc%d' % (group_num, layer_num))(t_3)

    # branch 4: dimension reduction followed by depth-wise temp conv (kernel-size 7)
    t_4 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b4_g%d_tc%d' % (group_num, layer_num))(tensor)
    t_4 = DepthwiseConv1DLayer(kernel_sizes[2], dilation_rates[2], padding='same', name='convdw_b4_g%d_tc%d' % (group_num, layer_num))(t_4)
    t_4 = BatchNormalization(name='bn_b4_g%d_tc%d' % (group_num, layer_num))(t_4)

    # branch 5: dimension reduction followed by temporal max pooling
    t_5 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name='conv_b5_g%d_tc%d' % (group_num, layer_num))(tensor)
    t_5 = MaxPooling3D(pool_size=(2, 1, 1), strides=(1, 1, 1), padding='same', name='maxpool_b5_g%d_tc%d' % (group_num, layer_num))(t_5)
    t_5 = BatchNormalization(name='bn_b5_g%d_tc%d' % (group_num, layer_num))(t_5)

    # concatenate channels of branches
    tensor = Concatenate(axis=4, name='concat_g%d_tc%d' % (group_num, layer_num))([t_1, t_2, t_3, t_4, t_5])

    return tensor 
Example #19
Source File: cnn_models_3d.py    From spinalcordtoolbox with MIT License 5 votes vote down vote up
def nn_architecture_seg_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, initial_learning_rate=0.00001,
                        depth=3, n_base_filters=16, metrics=dice_coefficient, batch_normalization=True):
    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    for layer_depth in range(depth):
        layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters * (2**layer_depth),
                                          batch_normalization=batch_normalization)
        layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters * (2**layer_depth) * 2,
                                          batch_normalization=batch_normalization)
        if layer_depth < depth - 1:
            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
            levels.append([layer1, layer2, current_layer])
        else:
            current_layer = layer2
            levels.append([layer1, layer2])

    for layer_depth in range(depth - 2, -1, -1):
        up_convolution = UpSampling3D(size=pool_size)
        concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=concat, batch_normalization=batch_normalization)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=current_layer,
                                                 batch_normalization=batch_normalization)

    final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
    act = Activation('sigmoid')(final_convolution)
    model = Model(inputs=inputs, outputs=act)

    if not isinstance(metrics, list):
        metrics = [metrics]

    model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics)
    return model 
Example #20
Source File: standard.py    From kits19.MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def contracting_layer(input, neurons):
    conv1 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(input)
    conv2 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(conv1)
    pool = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
    return pool, conv2

# Create the middle layer between the contracting and expanding layers 
Example #21
Source File: residual.py    From kits19.MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def contracting_layer(input, neurons):
    conv1 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(input)
    conv2 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(conv1)
    conc1 = concatenate([input, conv2], axis=4)
    pool = MaxPooling3D(pool_size=(2, 2, 2))(conc1)
    return pool, conv2

# Create the middle layer between the contracting and expanding layers 
Example #22
Source File: subdivide.py    From aitom with GNU General Public License v3.0 5 votes vote down vote up
def inception3D(image_size, num_labels):
    num_channels=1
    inputs = Input(shape = (image_size, image_size, image_size, num_channels))

    m = Convolution3D(32, 5, 5, 5, subsample=(1, 1, 1), activation='relu', border_mode='valid', input_shape=())(inputs)
    m = MaxPooling3D(pool_size=(2, 2, 2), strides=None, border_mode='same')(m)

    # inception module 0
    branch1x1 = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m)
    branch3x3_reduce = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m)
    branch3x3 = Convolution3D(64, 3, 3, 3, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch3x3_reduce)
    branch5x5_reduce = Convolution3D(16, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(m)
    branch5x5 = Convolution3D(32, 5, 5, 5, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch5x5_reduce)
    branch_pool = MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), border_mode='same')(m)
    branch_pool_proj = Convolution3D(32, 1, 1, 1, subsample=(1, 1, 1), activation='relu', border_mode='same')(branch_pool)
    #m = merge([branch1x1, branch3x3, branch5x5, branch_pool_proj], mode='concat', concat_axis=-1)
    from keras.layers import concatenate
    m = concatenate([branch1x1, branch3x3, branch5x5, branch_pool_proj],axis=-1)

    m = AveragePooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), border_mode='valid')(m)
    m = Flatten()(m)
    m = Dropout(0.7)(m)

    # expliciately seperate Dense and Activation layers in order for projecting to structural feature space
    m = Dense(num_labels, activation='linear')(m)
    m = Activation('softmax')(m)

    mod = KM.Model(input=inputs, output=m)

    return mod 
Example #23
Source File: timeception.py    From videograph with GNU General Public License v3.0 4 votes vote down vote up
def __inception_style_temporal_layer_shuffled_depthwise_complicated(self, tensor_input, n_channels_per_branch):
        n_groups = self.n_groups
        _, n_timesteps, side_dim1, side_dim2, n_channels_in = K.int_shape(tensor_input)
        assert n_channels_in % n_groups == 0
        n_branches = 5

        n_channels_per_group_in = n_channels_in / n_groups
        n_channels_out = n_groups * n_branches * n_channels_per_branch
        n_channels_per_group_out = n_channels_out / n_groups

        assert n_channels_out % n_groups == 0

        # slice maps into groups
        tensors = Lambda(lambda x: [x[:, :, :, :, i * n_channels_per_group_in:(i + 1) * n_channels_per_group_in] for i in range(n_groups)])(tensor_input)

        t_outputs = []
        for idx_group in range(n_groups):
            tensor_group = tensors[idx_group]

            # branch 1: dimension reduction only and no temporal conv
            t_0 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same')(tensor_group)
            t_0 = BatchNormalization()(t_0)

            # branch 2: dimension reduction followed by depth-wise temp conv (kernel-size 3)
            t_3 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same')(tensor_group)
            t_3 = DepthwiseConv1DLayer(3, padding='same')(t_3)
            t_3 = BatchNormalization()(t_3)

            # branch 3: dimension reduction followed by depth-wise temp conv (kernel-size 5)
            t_5 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same')(tensor_group)
            t_5 = DepthwiseConv1DLayer(5, padding='same')(t_5)
            t_5 = BatchNormalization()(t_5)

            # branch 4: dimension reduction followed by depth-wise temp conv (kernel-size 7)
            t_7 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same')(tensor_group)
            t_7 = DepthwiseConv1DLayer(7, padding='same')(t_7)
            t_7 = BatchNormalization()(t_7)

            # branch 5: dimension reduction followed by temporal max pooling
            t_1 = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same')(tensor_group)
            t_1 = MaxPooling3D(pool_size=(2, 1, 1), strides=(1, 1, 1), padding='same')(t_1)
            t_1 = BatchNormalization()(t_1)

            # concatenate channels of branches
            tensor = Concatenate(axis=4)([t_0, t_3, t_5, t_7, t_1])
            t_outputs.append(tensor)

        # concatenate channels of groups
        tensor = Concatenate(axis=4)(t_outputs)
        tensor = Activation('relu')(tensor)

        # shuffle channels
        tensor = ReshapeLayer((n_timesteps, side_dim1, side_dim2, n_groups, n_channels_per_group_out))(tensor)
        tensor = TransposeLayer((0, 1, 2, 3, 5, 4))(tensor)
        tensor = ReshapeLayer((n_timesteps, side_dim1, side_dim2, n_channels_out))(tensor)

        return tensor 
Example #24
Source File: timeception.py    From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def __define_temporal_convolutional_block(self, n_channels_per_branch, kernel_sizes, dilation_rates, layer_num, group_num):
        """
        Define 5 branches of convolutions that operate of channels of each group.
        """

        # branch 1: dimension reduction only and no temporal conv
        layer_name = 'conv_b1_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'bn_b1_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNormalization(name=layer_name)
        setattr(self, layer_name, layer)

        # branch 2: dimension reduction followed by depth-wise temp conv (kernel-size 3)
        layer_name = 'conv_b2_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'convdw_b2_g%d_tc%d' % (group_num, layer_num)
        layer = DepthwiseConv1DLayer(kernel_sizes[0], dilation_rates[0], padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'bn_b2_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNormalization(name=layer_name)
        setattr(self, layer_name, layer)

        # branch 3: dimension reduction followed by depth-wise temp conv (kernel-size 5)
        layer_name = 'conv_b3_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'convdw_b3_g%d_tc%d' % (group_num, layer_num)
        layer = DepthwiseConv1DLayer(kernel_sizes[1], dilation_rates[1], padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'bn_b3_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNormalization(name=layer_name)
        setattr(self, layer_name, layer)

        # branch 4: dimension reduction followed by depth-wise temp conv (kernel-size 7)
        layer_name = 'conv_b4_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'convdw_b4_g%d_tc%d' % (group_num, layer_num)
        layer = DepthwiseConv1DLayer(kernel_sizes[2], dilation_rates[2], padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'bn_b4_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNormalization(name=layer_name)
        setattr(self, layer_name, layer)

        # branch 5: dimension reduction followed by temporal max pooling
        layer_name = 'conv_b5_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'maxpool_b5_g%d_tc%d' % (group_num, layer_num)
        layer = MaxPooling3D(pool_size=(2, 1, 1), strides=(1, 1, 1), padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'bn_b5_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNormalization(name=layer_name)
        setattr(self, layer_name, layer)

        # concatenate channels of branches
        layer_name = 'concat_g%d_tc%d' % (group_num, layer_num)
        layer = Concatenate(axis=4, name=layer_name)
        setattr(self, layer_name, layer) 
Example #25
Source File: layers_export.py    From Fabrik with GNU General Public License v3.0 4 votes vote down vote up
def pooling(layer, layer_in, layerId, tensor=True):
    poolMap = {
        ('1D', 'MAX'): MaxPooling1D,
        ('2D', 'MAX'): MaxPooling2D,
        ('3D', 'MAX'): MaxPooling3D,
        ('1D', 'AVE'): AveragePooling1D,
        ('2D', 'AVE'): AveragePooling2D,
        ('3D', 'AVE'): AveragePooling3D,
    }
    out = {}
    layer_type = layer['params']['layer_type']
    pool_type = layer['params']['pool']
    padding = get_padding(layer)
    if (layer_type == '1D'):
        strides = layer['params']['stride_w']
        kernel = layer['params']['kernel_w']
        if (padding == 'custom'):
            p_w = layer['params']['pad_w']
            out[layerId + 'Pad'] = ZeroPadding1D(padding=p_w)(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    elif (layer_type == '2D'):
        strides = (layer['params']['stride_h'], layer['params']['stride_w'])
        kernel = (layer['params']['kernel_h'], layer['params']['kernel_w'])
        if (padding == 'custom'):
            p_h, p_w = layer['params']['pad_h'], layer['params']['pad_w']
            out[layerId + 'Pad'] = ZeroPadding2D(padding=(p_h, p_w))(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    else:
        strides = (layer['params']['stride_h'], layer['params']['stride_w'],
                   layer['params']['stride_d'])
        kernel = (layer['params']['kernel_h'], layer['params']['kernel_w'],
                  layer['params']['kernel_d'])
        if (padding == 'custom'):
            p_h, p_w, p_d = layer['params']['pad_h'], layer['params']['pad_w'],\
                layer['params']['pad_d']
            out[layerId +
                'Pad'] = ZeroPadding3D(padding=(p_h, p_w, p_d))(*layer_in)
            padding = 'valid'
            layer_in = [out[layerId + 'Pad']]
    # Note - figure out a permanent fix for padding calculation of layers
    # in case padding is given in layer attributes
    # if ('padding' in layer['params']):
    #    padding = layer['params']['padding']
    out[layerId] = poolMap[(layer_type, pool_type)](
        pool_size=kernel, strides=strides, padding=padding)
    if tensor:
        out[layerId] = out[layerId](*layer_in)
    return out


# ********** Locally-connected Layers ********** 
Example #26
Source File: network.py    From cocktail-party with MIT License 4 votes vote down vote up
def build(video_shape, audio_spectrogram_size):
		model = Sequential()

		model.add(ZeroPadding3D(padding=(1, 2, 2), name='zero1', input_shape=video_shape))
		model.add(Convolution3D(32, (3, 5, 5), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv1'))
		model.add(BatchNormalization())
		model.add(LeakyReLU())
		model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max1'))
		model.add(Dropout(0.25))

		model.add(ZeroPadding3D(padding=(1, 2, 2), name='zero2'))
		model.add(Convolution3D(64, (3, 5, 5), strides=(1, 1, 1), kernel_initializer='he_normal', name='conv2'))
		model.add(BatchNormalization())
		model.add(LeakyReLU())
		model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max2'))
		model.add(Dropout(0.25))

		model.add(ZeroPadding3D(padding=(1, 1, 1), name='zero3'))
		model.add(Convolution3D(128, (3, 3, 3), strides=(1, 1, 1), kernel_initializer='he_normal', name='conv3'))
		model.add(BatchNormalization())
		model.add(LeakyReLU())
		model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max3'))
		model.add(Dropout(0.25))

		model.add(TimeDistributed(Flatten(), name='time'))

		model.add(Dense(1024, kernel_initializer='he_normal', name='dense1'))
		model.add(BatchNormalization())
		model.add(LeakyReLU())
		model.add(Dropout(0.25))

		model.add(Dense(1024, kernel_initializer='he_normal', name='dense2'))
		model.add(BatchNormalization())
		model.add(LeakyReLU())
		model.add(Dropout(0.25))

		model.add(Flatten())

		model.add(Dense(2048, kernel_initializer='he_normal', name='dense3'))
		model.add(BatchNormalization())
		model.add(LeakyReLU())
		model.add(Dropout(0.25))

		model.add(Dense(2048, kernel_initializer='he_normal', name='dense4'))
		model.add(BatchNormalization())
		model.add(LeakyReLU())
		model.add(Dropout(0.25))

		model.add(Dense(audio_spectrogram_size, name='output'))

		model.summary()

		return VideoToSpeechNet(model) 
Example #27
Source File: timeception.py    From timeception with GNU General Public License v3.0 4 votes vote down vote up
def __define_temporal_convolutional_block(self, n_channels_per_branch, kernel_sizes, dilation_rates, layer_num, group_num):
        """
        Define 5 branches of convolutions that operate of channels of each group.
        """

        # branch 1: dimension reduction only and no temporal conv
        layer_name = 'conv_b1_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'bn_b1_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNormalization(name=layer_name)
        setattr(self, layer_name, layer)

        # branch 2: dimension reduction followed by depth-wise temp conv (kernel-size 3)
        layer_name = 'conv_b2_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'convdw_b2_g%d_tc%d' % (group_num, layer_num)
        layer = DepthwiseConv1DLayer(kernel_sizes[0], dilation_rates[0], padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'bn_b2_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNormalization(name=layer_name)
        setattr(self, layer_name, layer)

        # branch 3: dimension reduction followed by depth-wise temp conv (kernel-size 5)
        layer_name = 'conv_b3_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'convdw_b3_g%d_tc%d' % (group_num, layer_num)
        layer = DepthwiseConv1DLayer(kernel_sizes[1], dilation_rates[1], padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'bn_b3_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNormalization(name=layer_name)
        setattr(self, layer_name, layer)

        # branch 4: dimension reduction followed by depth-wise temp conv (kernel-size 7)
        layer_name = 'conv_b4_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'convdw_b4_g%d_tc%d' % (group_num, layer_num)
        layer = DepthwiseConv1DLayer(kernel_sizes[2], dilation_rates[2], padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'bn_b4_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNormalization(name=layer_name)
        setattr(self, layer_name, layer)

        # branch 5: dimension reduction followed by temporal max pooling
        layer_name = 'conv_b5_g%d_tc%d' % (group_num, layer_num)
        layer = Conv3D(n_channels_per_branch, kernel_size=(1, 1, 1), padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'maxpool_b5_g%d_tc%d' % (group_num, layer_num)
        layer = MaxPooling3D(pool_size=(2, 1, 1), strides=(1, 1, 1), padding='same', name=layer_name)
        setattr(self, layer_name, layer)
        layer_name = 'bn_b5_g%d_tc%d' % (group_num, layer_num)
        layer = BatchNormalization(name=layer_name)
        setattr(self, layer_name, layer)

        # concatenate channels of branches
        layer_name = 'concat_g%d_tc%d' % (group_num, layer_num)
        layer = Concatenate(axis=4, name=layer_name)
        setattr(self, layer_name, layer) 
Example #28
Source File: step2_train_nodule_detector.py    From kaggle_ndsb2017 with MIT License 4 votes vote down vote up
def get_net(input_shape=(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1), load_weight_path=None, features=False, mal=False) -> Model:
    inputs = Input(shape=input_shape, name="input_1")
    x = inputs
    x = AveragePooling3D(pool_size=(2, 1, 1), strides=(2, 1, 1), border_mode="same")(x)
    x = Convolution3D(64, 3, 3, 3, activation='relu', border_mode='same', name='conv1', subsample=(1, 1, 1))(x)
    x = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), border_mode='valid', name='pool1')(x)

    # 2nd layer group
    x = Convolution3D(128, 3, 3, 3, activation='relu', border_mode='same', name='conv2', subsample=(1, 1, 1))(x)
    x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool2')(x)
    if USE_DROPOUT:
        x = Dropout(p=0.3)(x)

    # 3rd layer group
    x = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same', name='conv3a', subsample=(1, 1, 1))(x)
    x = Convolution3D(256, 3, 3, 3, activation='relu', border_mode='same', name='conv3b', subsample=(1, 1, 1))(x)
    x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool3')(x)
    if USE_DROPOUT:
        x = Dropout(p=0.4)(x)

    # 4th layer group
    x = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same', name='conv4a', subsample=(1, 1, 1))(x)
    x = Convolution3D(512, 3, 3, 3, activation='relu', border_mode='same', name='conv4b', subsample=(1, 1, 1),)(x)
    x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), border_mode='valid', name='pool4')(x)
    if USE_DROPOUT:
        x = Dropout(p=0.5)(x)

    last64 = Convolution3D(64, 2, 2, 2, activation="relu", name="last_64")(x)
    out_class = Convolution3D(1, 1, 1, 1, activation="sigmoid", name="out_class_last")(last64)
    out_class = Flatten(name="out_class")(out_class)

    out_malignancy = Convolution3D(1, 1, 1, 1, activation=None, name="out_malignancy_last")(last64)
    out_malignancy = Flatten(name="out_malignancy")(out_malignancy)

    model = Model(input=inputs, output=[out_class, out_malignancy])
    if load_weight_path is not None:
        model.load_weights(load_weight_path, by_name=False)
    model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True), loss={"out_class": "binary_crossentropy", "out_malignancy": mean_absolute_error}, metrics={"out_class": [binary_accuracy, binary_crossentropy], "out_malignancy": mean_absolute_error})

    if features:
        model = Model(input=inputs, output=[last64])
    model.summary(line_length=140)

    return model 
Example #29
Source File: multiRes.py    From kits19.MIScnn with GNU General Public License v3.0 4 votes vote down vote up
def Unet(input_shape, n_labels, activation='sigmoid'):
    '''
    MultiResUNet3D

    Arguments:
        height {int} -- height of image
        width {int} -- width of image
        z {int} -- length along z axis
        n_channels {int} -- number of channels in image

    Returns:
        [keras model] -- MultiResUNet3D model
    '''


    inputs = Input(input_shape)

    mresblock1 = MultiResBlock(32, inputs)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock1)
    mresblock1 = ResPath(32, 4, mresblock1)

    mresblock2 = MultiResBlock(32*2, pool1)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock2)
    mresblock2 = ResPath(32*2, 3,mresblock2)

    mresblock3 = MultiResBlock(32*4, pool2)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock3)
    mresblock3 = ResPath(32*4, 2,mresblock3)

    mresblock4 = MultiResBlock(32*8, pool3)
    pool4 = MaxPooling3D(pool_size=(2, 2, 2))(mresblock4)
    mresblock4 = ResPath(32*8, 1,mresblock4)

    mresblock5 = MultiResBlock(32*16, pool4)

    up6 = concatenate([Conv3DTranspose(32*8, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock5), mresblock4], axis=4)
    mresblock6 = MultiResBlock(32*8,up6)

    up7 = concatenate([Conv3DTranspose(32*4, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock6), mresblock3], axis=4)
    mresblock7 = MultiResBlock(32*4,up7)

    up8 = concatenate([Conv3DTranspose(32*2, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock7), mresblock2], axis=4)
    mresblock8 = MultiResBlock(32*2,up8)

    up9 = concatenate([Conv3DTranspose(32, (2, 2, 2), strides=(2, 2, 2), padding='same')(mresblock8), mresblock1], axis=4)
    mresblock9 = MultiResBlock(32,up9)

    conv10 = conv3d_bn(mresblock9 , n_labels, 1, 1, 1, activation=activation)

    model = Model(inputs=[inputs], outputs=[conv10])

    return model

#-----------------------------------------------------#
#                     Subroutines                     #
#-----------------------------------------------------# 
Example #30
Source File: videograph.py    From videograph with GNU General Public License v3.0 4 votes vote down vote up
def graph_embedding(tensor, n_layers, n_avg_size, n_kernel_size, t_kernel_size, n_max_size, t_max_size):
    """
    Graph embedding.
    :param tensor:
    :param n_layers:
    :return:
    """

    input_shape = K.int_shape(tensor)
    _, n_odes, n_timesteps, side_dim, side_dim, n_channels_in = input_shape

    # hide temporal dimension
    tensor = TransposeLayer((0, 2, 1, 3, 4, 5))(tensor)  # (None, 64, 100, 7, 7, 1024)
    tensor = ReshapeLayer((n_odes, side_dim, side_dim, n_channels_in))(tensor)

    # pool over node
    tensor = AveragePooling3D(pool_size=(n_avg_size, 1, 1), name='pool_n')(tensor)
    _, n_odes, side_dim, side_dim, n_channels_in = K.int_shape(tensor)

    # recover node dimension
    tensor = ReshapeLayer((n_timesteps, n_odes, side_dim, side_dim, n_channels_in))(tensor)  # (None, 64, 100, 7, 7, 1024)
    tensor = TransposeLayer((0, 2, 1, 3, 4, 5))(tensor)  # (None, 100, 64, 7, 7, 1024)

    # hide the node dimension
    tensor = ReshapeLayer((n_timesteps, side_dim, side_dim, n_channels_in))(tensor)  # (None, 64, 7, 7, 1024)

    # 2 layers spatio-temporal conv
    for i in range(n_layers):
        layer_id = '%d' % (i + 1)

        # spatial conv
        tensor = Conv3D(n_channels_in, (1, 1, 1), padding='SAME', name='conv_s_%s' % (layer_id))(tensor)  # (None, 64, 7, 7, 1024)

        # temporal conv
        tensor = DepthwiseConv1DLayer(t_kernel_size, padding='SAME', name='conv_t_%s' % (layer_id))(tensor)  # (None, 64, 7, 7, 1024)

        # node conv
        tensor = __convolve_nodes(tensor, n_odes, layer_id, n_kernel_size)  # (None, 100, 7, 7, 1024)

        # activation
        tensor = BatchNormalization()(tensor)
        tensor = LeakyReLU(alpha=0.2)(tensor)

        # max_pool over nodes
        tensor = MaxPooling3D(pool_size=(n_max_size, 1, 1), name='pool_n_%s' % (layer_id))(tensor)  # (None, 100, 7, 7, 1024)
        _, n_odes, side_dim, side_dim, n_channels_in = K.int_shape(tensor)

        # get back temporal dimension and hide node dimension
        tensor = ReshapeLayer((n_timesteps, n_odes, side_dim, side_dim, n_channels_in))(tensor)  # (None, 64, 100, 7, 7, 1024)
        tensor = TransposeLayer((0, 2, 1, 3, 4, 5))(tensor)  # (None, 100, 64, 7, 7, 1024)
        tensor = ReshapeLayer((n_timesteps, side_dim, side_dim, n_channels_in))(tensor)  # (None, 64, 7, 7, 1024)

        # max_pool over time
        tensor = MaxPooling3D(pool_size=(t_max_size, 1, 1), name='pool_t_%s' % (layer_id))(tensor)  # (None, 64, 7, 7, 1024)
        _, n_timesteps, side_dim, side_dim, n_channels_in = K.int_shape(tensor)  # (None, 64, 7, 7, 1024)

    # recover nodes dimension
    tensor = ReshapeLayer((n_odes, n_timesteps, side_dim, side_dim, n_channels_in))(tensor)

    return tensor