Python keras.layers.Conv3D() Examples
The following are 30
code examples of keras.layers.Conv3D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.
Example #1
Source File: densely_connected_cnn_blocks.py From CNNArt with Apache License 2.0 | 6 votes |
def transition_SE_layer_3D(input_tensor, numFilters, compressionFactor=1.0, se_ratio=16): numOutPutFilters = int(numFilters*compressionFactor) if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 x = BatchNormalization(axis=bn_axis)(input_tensor) x = Activation('relu')(x) x = Conv3D(numOutPutFilters, (1, 1, 1), strides=(1, 1, 1), padding='same', kernel_initializer='he_normal')(x) # SE Block x = squeeze_excitation_block_3D(x, ratio=se_ratio) #x = BatchNormalization(axis=bn_axis)(x) # downsampling x = AveragePooling3D((2, 2, 2), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='')(x) #x = squeeze_excitation_block(x, ratio=se_ratio) return x, numOutPutFilters
Example #2
Source File: densely_connected_cnn_blocks.py From CNNArt with Apache License 2.0 | 6 votes |
def transition_layer_3D(input_tensor, numFilters, compressionFactor=1.0): numOutPutFilters = int(numFilters*compressionFactor) if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 x = BatchNormalization(axis=bn_axis)(input_tensor) x = Activation('relu')(x) x = Conv3D(numOutPutFilters, (1, 1, 1), strides=(1, 1, 1), padding='same', kernel_initializer='he_normal')(x) # downsampling x = AveragePooling3D((2, 2, 2), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='')(x) return x, numOutPutFilters
Example #3
Source File: transfer_learning.py From hyperspectral_deeplearning_review with GNU General Public License v3.0 | 6 votes |
def get_model_compiled(args, inputshape, num_class): model = Sequential() if args.arch == "CNN1D": model.add(Conv1D(20, (24), activation='relu', input_shape=inputshape)) model.add(MaxPooling1D(pool_size=5)) model.add(Flatten()) model.add(Dense(100)) elif "CNN2D" in args.arch: model.add(Conv2D(50, kernel_size=(5, 5), input_shape=inputshape)) model.add(Activation('relu')) model.add(Conv2D(100, (5, 5))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(100)) elif args.arch == "CNN3D": model.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=inputshape)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv3D(64, (5, 5, 16))) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling3D(pool_size=(2, 2, 1))) model.add(Flatten()) model.add(Dense(300)) if args.arch != "CNN2D": model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dense(num_class, activation='softmax')) model.compile(loss=categorical_crossentropy, optimizer=Adam(args.lr1), metrics=['accuracy']) return model
Example #4
Source File: cnn3d.py From hyperspectral_deeplearning_review with GNU General Public License v3.0 | 6 votes |
def get_model_compiled(shapeinput, num_class, w_decay=0, lr=1e-3): clf = Sequential() clf.add(Conv3D(32, kernel_size=(5, 5, 24), input_shape=shapeinput)) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Conv3D(64, (5, 5, 16))) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(MaxPooling3D(pool_size=(2, 2, 1))) clf.add(Flatten()) clf.add(Dense(300, kernel_regularizer=regularizers.l2(w_decay))) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Dense(num_class, activation='softmax')) clf.compile(loss=categorical_crossentropy, optimizer=Adam(lr=lr), metrics=['accuracy']) return clf
Example #5
Source File: Build_Model.py From DOVE with GNU General Public License v3.0 | 6 votes |
def makecnn(learningrate,regular,decay,channel_number): #model structure model=Sequential() model.add(Conv3D(100, kernel_size=(3,3,3), strides=(1, 1, 1), input_shape = (20,20,20,channel_number),padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(Conv3D(200, kernel_size=(3,3,3), strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')) model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)) model.add(Conv3D(400, kernel_size=(3,3,3),strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) #model.add(Dropout(0.3)) model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last')) model.add(Flatten()) model.add(Dropout(0.3)) model.add(Dense(1000, use_bias=True, input_shape = (32000,),kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Dense(100, use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(0.2)) model.add(Dropout(0.3)) model.add(Dense(1, activation='sigmoid', use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=l2(regular), kernel_constraint=None, bias_constraint=None)) nadam=Nadam(lr=learningrate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=decay) model.compile(loss='binary_crossentropy', optimizer=nadam, metrics=['accuracy',f1score,precision,recall]) return model
Example #6
Source File: livenessmodel.py From Intelegent_Lock with MIT License | 6 votes |
def get_liveness_model(): model = Sequential() model.add(Conv3D(32, kernel_size=(3, 3, 3), activation='relu', input_shape=(24,100,100,1))) model.add(Conv3D(64, (3, 3, 3), activation='relu')) model.add(MaxPooling3D(pool_size=(2, 2, 2))) model.add(Conv3D(64, (3, 3, 3), activation='relu')) model.add(MaxPooling3D(pool_size=(2, 2, 2))) model.add(Conv3D(64, (3, 3, 3), activation='relu')) model.add(MaxPooling3D(pool_size=(2, 2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(2, activation='softmax')) return model
Example #7
Source File: blocks.py From CSBDeep with BSD 3-Clause "New" or "Revised" License | 6 votes |
def conv_block3(n_filter, n1, n2, n3, activation="relu", border_mode="same", dropout=0.0, batch_norm=False, init="glorot_uniform", **kwargs): def _func(lay): if batch_norm: s = Conv3D(n_filter, (n1, n2, n3), padding=border_mode, kernel_initializer=init, **kwargs)(lay) s = BatchNormalization()(s) s = Activation(activation)(s) else: s = Conv3D(n_filter, (n1, n2, n3), padding=border_mode, kernel_initializer=init, activation=activation, **kwargs)(lay) if dropout is not None and dropout > 0: s = Dropout(dropout)(s) return s return _func
Example #8
Source File: T3D_keras.py From T3D-keras with MIT License | 6 votes |
def _TTL(prev_layer): # print('In _TTL') b1 = BatchNormalization()(prev_layer) b1 = Activation('relu')(b1) # b1 = Conv3D(128, kernel_size=(1), strides=1, use_bias=False, padding='same')(b1) b1 = Conv3D(128, kernel_size=(1, 3, 3), strides=1, use_bias=False, padding='same')(b1) b2 = BatchNormalization()(prev_layer) b2 = Activation('relu')(b2) b2 = Conv3D(128, kernel_size=(3, 3, 3), strides=1, use_bias=False, padding='same')(b2) b3 = BatchNormalization()(prev_layer) b3 = Activation('relu')(b3) b3 = Conv3D(128, kernel_size=(4, 3, 3), strides=1, use_bias=False, padding='same')(b3) x = keras.layers.concatenate([b1, b2, b3], axis=1) # print('completed _TTL') return x
Example #9
Source File: drn.py From 3D-ConvNets-for-Action-Recognition with MIT License | 6 votes |
def denseblock(x, growth_rate, strides=(1, 1, 1), internal_layers=4, dropout_rate=0., weight_decay=0.005): x = Conv3D(growth_rate, (3, 3, 3), kernel_initializer='he_normal', padding="same", strides=strides, use_bias=False, kernel_regularizer=l2(weight_decay))(x) if dropout_rate: x = Dropout(dropout_rate)(x) list_feat = [] list_feat.append(x) for i in range(internal_layers - 1): x = conv_factory(x, growth_rate, dropout_rate, weight_decay) list_feat.append(x) x = concatenate(list_feat, axis=-1) x = Conv3D(internal_layers * growth_rate, (1, 1, 1), kernel_initializer='he_normal', padding="same", use_bias=False, kernel_regularizer=l2(weight_decay))(x) return x
Example #10
Source File: model.py From Brain-Segmentation with MIT License | 6 votes |
def model_thresholding(): IMAGE_ORDERING = "channels_first" img_input = Input(shape=(1,240,240,48)) conv_1 = Conv3D(filters=16,kernel_size=(3, 3, 3),padding='same',activation='relu',name = "CONV3D_1",dilation_rate=(2, 2, 2),data_format=IMAGE_ORDERING)(img_input) maxpool_1 = MaxPool3D(name = "MAXPOOL3D_1",data_format=IMAGE_ORDERING)(conv_1) conv_2 = Conv3D(filters=32,kernel_size=(3, 3, 3),padding='same',activation='relu',name = "CONV3D_2",dilation_rate=(2, 2, 2),data_format=IMAGE_ORDERING)(maxpool_1) maxpool_2 = MaxPool3D(name = "MAXPOOL3D_2",data_format=IMAGE_ORDERING)(conv_2) conv_3 = Conv3D(filters=32,kernel_size=(3, 3, 3),padding='same',activation='relu',name = "CONV3D_3",dilation_rate=(2, 2, 2),data_format=IMAGE_ORDERING)(maxpool_2) convt_1 = Conv3DTranspose(16,kernel_size=(2,2,2),strides=(2,2,2),name = "CONV3DT_1",activation='relu',data_format=IMAGE_ORDERING)(conv_3) concat_1 = Concatenate(axis=1)([convt_1,conv_2]) conv_4 = Conv3D(filters=16,kernel_size=(3, 3, 3),padding='same',activation='relu',name = "CONV3D_4",data_format=IMAGE_ORDERING)(concat_1) convt_2 = Conv3DTranspose(4,kernel_size=(2,2,2),strides=(2,2,2),name = "CONV3DT_2",activation='relu',data_format=IMAGE_ORDERING)(conv_4) concat_2 = Concatenate(axis=1)([convt_2,conv_1]) conv_5 = Conv3D(filters=1,kernel_size=(3, 3, 3),padding='same',activation='sigmoid',name = "CONV3D_5",data_format=IMAGE_ORDERING)(concat_2) return Model(img_input, conv_5) concat_2 = Concatenate(axis=1)([convt_2,conv_1]) conv_5 = Conv3D(filters=1,kernel_size=(3, 3, 3),padding='same',activation='sigmoid',name = "CONV3D_5",data_format=IMAGE_ORDERING)(concat_2) return Model(img_input, conv_5)
Example #11
Source File: unet.py From Keras-Brats-Improved-Unet3d with MIT License | 5 votes |
def create_convolution_block(input_layer, n_filters, batch_normalization=False, kernel=(3, 3, 3), activation=None, padding='same', strides=(1, 1, 1), instance_normalization=False): """ :param strides: :param input_layer: :param n_filters: :param batch_normalization: :param kernel: :param activation: Keras activation layer to use. (default is 'relu') :param padding: :return: """ layer = Conv3D(n_filters, kernel, padding=padding, strides=strides)(input_layer) if batch_normalization: layer = BatchNormalization(axis=1)(layer) elif instance_normalization: try: from keras_contrib.layers.normalization import InstanceNormalization except ImportError: raise ImportError("Install keras_contrib in order to use instance normalization." "\nTry: pip install git+https://www.github.com/farizrahman4u/keras-contrib.git") layer = InstanceNormalization(axis=1)(layer) if activation is None: return Activation('relu')(layer) else: return activation()(layer)
Example #12
Source File: i3d_keras_epic_kitchens.py From videograph with GNU General Public License v3.0 | 5 votes |
def conv3d_bn(x, filters, num_frames, num_row, num_col, padding='same', strides=(1, 1, 1), use_bias=False, use_activation_fn=True, use_bn=True, name=None): """Utility function to apply conv3d + BN. # Arguments x: input tensor. filters: filters in `Conv3D`. num_frames: frames (time depth) of the convolution kernel. num_row: height of the convolution kernel. num_col: width of the convolution kernel. padding: padding mode in `Conv3D`. strides: strides in `Conv3D`. use_bias: use bias or not use_activation_fn: use an activation function or not. use_bn: use batch normalization or not. name: name of the ops; will become `name + '_conv'` for the convolution and `name + '_bn'` for the batch norm layer. # Returns Output tensor after applying `Conv3D` and `BatchNormalization`. """ if name is not None: bn_name = name + '_bn' conv_name = name + '_conv' else: bn_name = None conv_name = None x = Conv3D(filters, (num_frames, num_row, num_col), strides=strides, padding=padding, use_bias=use_bias, name=conv_name)(x) if use_bn: if K.image_data_format() == 'channels_first': bn_axis = 1 else: bn_axis = 4 x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) if use_activation_fn: x = Activation('relu', name=name)(x) return x
Example #13
Source File: models.py From C3D-keras with MIT License | 5 votes |
def c3d_model(): input_shape = (112,112,16,3) weight_decay = 0.005 nb_classes = 101 inputs = Input(input_shape) x = Conv3D(64,(3,3,3),strides=(1,1,1),padding='same', activation='relu',kernel_regularizer=l2(weight_decay))(inputs) x = MaxPool3D((2,2,1),strides=(2,2,1),padding='same')(x) x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same', activation='relu',kernel_regularizer=l2(weight_decay))(x) x = MaxPool3D((2,2,2),strides=(2,2,2),padding='same')(x) x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same', activation='relu',kernel_regularizer=l2(weight_decay))(x) x = MaxPool3D((2,2,2),strides=(2,2,2),padding='same')(x) x = Conv3D(256,(3,3,3),strides=(1,1,1),padding='same', activation='relu',kernel_regularizer=l2(weight_decay))(x) x = MaxPool3D((2,2,2),strides=(2,2,2),padding='same')(x) x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same', activation='relu',kernel_regularizer=l2(weight_decay))(x) x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x) x = Flatten()(x) x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x) x = Dropout(0.5)(x) x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x) x = Dropout(0.5)(x) x = Dense(nb_classes,kernel_regularizer=l2(weight_decay))(x) x = Activation('softmax')(x) model = Model(inputs, x) return model
Example #14
Source File: non_local.py From keras-non-local-nets with MIT License | 5 votes |
def _convND(ip, rank, channels): assert rank in [3, 4, 5], "Rank of input must be 3, 4 or 5" if rank == 3: x = Conv1D(channels, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(ip) elif rank == 4: x = Conv2D(channels, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal')(ip) else: x = Conv3D(channels, (1, 1, 1), padding='same', use_bias=False, kernel_initializer='he_normal')(ip) return x
Example #15
Source File: test_bench.py From Keras-inference-time-optimizer with MIT License | 5 votes |
def get_simple_3d_model(): from keras.layers import Input, Conv3D, BatchNormalization, Activation from keras.models import Model inp = Input((28, 28, 28, 4)) x = Conv3D(32, (3, 3, 3), padding='same', kernel_initializer='random_uniform')(inp) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv3D(32, (3, 3, 3), padding='same', kernel_initializer='random_uniform')(x) x = BatchNormalization()(x) out = Activation('relu')(x) model = Model(inputs=inp, outputs=out) return model
Example #16
Source File: CNN.py From CNNs-on-CHB-MIT with GNU General Public License v3.0 | 5 votes |
def createModel(): input_shape=(1, 22, 59, 114) model = Sequential() #C1 model.add(Conv3D(16, (22, 5, 5), strides=(1, 2, 2), padding='valid',activation='relu',data_format= "channels_first", input_shape=input_shape)) model.add(keras.layers.MaxPooling3D(pool_size=(1, 2, 2),data_format= "channels_first", padding='same')) model.add(BatchNormalization()) #C2 model.add(Conv3D(32, (1, 3, 3), strides=(1, 1,1), padding='valid',data_format= "channels_first", activation='relu'))#incertezza se togliere padding model.add(keras.layers.MaxPooling3D(pool_size=(1,2, 2),data_format= "channels_first", )) model.add(BatchNormalization()) #C3 model.add(Conv3D(64, (1,3, 3), strides=(1, 1,1), padding='valid',data_format= "channels_first", activation='relu'))#incertezza se togliere padding model.add(keras.layers.MaxPooling3D(pool_size=(1,2, 2),data_format= "channels_first", )) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dropout(0.5)) model.add(Dense(256, activation='sigmoid')) model.add(Dropout(0.5)) model.add(Dense(2, activation='softmax')) opt_adam = keras.optimizers.Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(loss='categorical_crossentropy', optimizer=opt_adam, metrics=['accuracy']) return model
Example #17
Source File: unet.py From 3DUnetCNN with MIT License | 5 votes |
def create_convolution_block(input_layer, n_filters, batch_normalization=False, kernel=(3, 3, 3), activation=None, padding='same', strides=(1, 1, 1), instance_normalization=False): """ :param strides: :param input_layer: :param n_filters: :param batch_normalization: :param kernel: :param activation: Keras activation layer to use. (default is 'relu') :param padding: :return: """ layer = Conv3D(n_filters, kernel, padding=padding, strides=strides)(input_layer) if batch_normalization: layer = BatchNormalization(axis=1)(layer) elif instance_normalization: try: from keras_contrib.layers.normalization import InstanceNormalization except ImportError: raise ImportError("Install keras_contrib in order to use instance normalization." "\nTry: pip install git+https://www.github.com/farizrahman4u/keras-contrib.git") layer = InstanceNormalization(axis=1)(layer) if activation is None: return Activation('relu')(layer) else: return activation()(layer)
Example #18
Source File: resnet_3d.py From 3D-ConvNets-for-Action-Recognition with MIT License | 5 votes |
def residual_block(x, filters, drop_rate=0., weight_decay=0.005): x = conv_factory(x, 4 * filters, kernel=(1, 1, 1)) if drop_rate: x = Dropout(drop_rate)(x) x = conv_factory(x, filters, kernel=(3, 3, 3)) if drop_rate: x = Dropout(drop_rate)(x) x = Conv3D(4 * filters, (1, 1, 1), kernel_initializer='he_normal', padding="same", use_bias=False, kernel_regularizer=l2(weight_decay))(x) return x
Example #19
Source File: i3d_keras.py From videograph with GNU General Public License v3.0 | 5 votes |
def conv3d_bn(x, filters, num_frames, num_row, num_col, padding='same', strides=(1, 1, 1), use_bias=False, use_activation_fn=True, use_bn=True, name=None): """Utility function to apply conv3d + BN. # Arguments x: input tensor. filters: filters in `Conv3D`. num_frames: frames (time depth) of the convolution kernel. num_row: height of the convolution kernel. num_col: width of the convolution kernel. padding: padding mode in `Conv3D`. strides: strides in `Conv3D`. use_bias: use bias or not use_activation_fn: use an activation function or not. use_bn: use batch normalization or not. name: name of the ops; will become `name + '_conv'` for the convolution and `name + '_bn'` for the batch norm layer. # Returns Output tensor after applying `Conv3D` and `BatchNormalization`. """ if name is not None: bn_name = name + '_bn' conv_name = name + '_conv' else: bn_name = None conv_name = None x = Conv3D(filters, (num_frames, num_row, num_col), strides=strides, padding=padding, use_bias=use_bias, name=conv_name)(x) if use_bn: if K.image_data_format() == 'channels_first': bn_axis = 1 else: bn_axis = 4 x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) if use_activation_fn: x = Activation('relu', name=name)(x) return x
Example #20
Source File: deep_residual_learning_blocks.py From CNNArt with Apache License 2.0 | 5 votes |
def identity_block_3D(input_tensor, filters, kernel_size=(3, 3, 3), stage=0, block=0, se_enabled=False, se_ratio=16): numFilters1, numFilters2 = filters if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch' bn_name_base = 'bn' + str(stage) + '_' + str(block) + '_branch' x = Conv3D(filters=numFilters1, kernel_size=kernel_size, strides=(1, 1, 1), padding='same', kernel_initializer='he_normal', name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = LeakyReLU(alpha=0.01)(x) x = Conv3D(filters=numFilters2, kernel_size=kernel_size, strides=(1, 1, 1), padding='same', kernel_initializer='he_normal', name=conv_name_base + '2b')(x) # squeeze and excitation block if se_enabled: x = squeeze_excitation_block_3D(x, ratio=se_ratio) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Add()([x, input_tensor]) x = LeakyReLU(alpha=0.01)(x) return x
Example #21
Source File: layers.py From deephar with MIT License | 5 votes |
def bn_act_conv3d(x, filters, size, strides=(1, 1, 1), padding='same', name=None): if name is not None: bn_name = name + '_bn' act_name = name + '_act' else: bn_name = None act_name = None x = BatchNormalization(axis=-1, scale=False, name=bn_name)(x) x = Activation('relu', name=act_name)(x) x = Conv3D(filters, size, strides=strides, padding=padding, use_bias=False, name=name)(x) return x
Example #22
Source File: c3d.py From 3D-ConvNets-for-Action-Recognition with MIT License | 5 votes |
def c3d_model(): input_shape = (112, 112, 8, 3) weight_decay = 0.005 nb_classes = 101 inputs = Input(input_shape) x = Conv3D(64,(3,3,3),strides=(1,1,1),padding='same', activation='relu',kernel_regularizer=l2(weight_decay))(inputs) x = MaxPooling3D((2,2,1),strides=(2,2,1),padding='same')(x) x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same', activation='relu',kernel_regularizer=l2(weight_decay))(x) x = MaxPooling3D((2,2,2),strides=(2,2,2),padding='same')(x) x = Conv3D(128,(3,3,3),strides=(1,1,1),padding='same', activation='relu',kernel_regularizer=l2(weight_decay))(x) x = MaxPooling3D((2,2,2),strides=(2,2,2),padding='same')(x) x = Conv3D(256,(3,3,3),strides=(1,1,1),padding='same', activation='relu',kernel_regularizer=l2(weight_decay))(x) x = MaxPooling3D((2,2,2),strides=(2,2,2),padding='same')(x) x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same', activation='relu',kernel_regularizer=l2(weight_decay))(x) x = MaxPooling3D((2, 2, 1), strides=(2, 2, 1), padding='same')(x) x = Flatten()(x) x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x) x = Dropout(0.5)(x) x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x) x = Dropout(0.5)(x) x = Dense(nb_classes,kernel_regularizer=l2(weight_decay))(x) x = Activation('softmax')(x) model = Model(inputs, x) return model
Example #23
Source File: densenet_3d.py From 3D-ConvNets-for-Action-Recognition with MIT License | 5 votes |
def conv_factory(x, nb_filter, kernel=(3,3,3), dropout_rate=0., weight_decay=0.005): x = Conv3D(nb_filter, kernel, kernel_initializer='he_normal', padding="same", use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x) x = Activation('relu')(x) if dropout_rate: x = Dropout(dropout_rate)(x) return x
Example #24
Source File: drn.py From 3D-ConvNets-for-Action-Recognition with MIT License | 5 votes |
def conv_factory(x, nb_filter, dropout_rate=0., weight_decay=0.005): x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x) x = Activation('relu')(x) x = Conv3D(nb_filter, (3, 3, 3), kernel_initializer='he_normal', padding="same", use_bias=False, kernel_regularizer=l2(weight_decay))(x) if dropout_rate: x = Dropout(dropout_rate)(x) return x
Example #25
Source File: drn.py From 3D-ConvNets-for-Action-Recognition with MIT License | 5 votes |
def c3d_model(): input_shape = (112, 112, 16, 3) weight_decay = 0.005 nb_classes = 101 inputs = Input(input_shape) x = Conv3D(64, (3, 3, 3), strides=(1, 1, 1), padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(inputs) x = MaxPool3D((2, 2, 1), strides=(2, 2, 1), padding='same')(x) x = Conv3D(128, (3, 3, 3), strides=(1, 1, 1), padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(x) x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x) x = Conv3D(128, (3, 3, 3), strides=(1, 1, 1), padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(x) x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x) x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(x) x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x) x = Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same', activation='relu', kernel_regularizer=l2(weight_decay))(x) x = MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x) x4 = Flatten()(x) x3 = Dense(2048, activation='relu', kernel_regularizer=l2(weight_decay))(x4) # x = Dropout(0.5)(x) x2 = Dense(2048, activation='relu', kernel_regularizer=l2(weight_decay))(x3) # x = Dropout(0.5)(x) x1 = Dense(nb_classes, kernel_regularizer=l2(weight_decay))(x2) # x = Activation('softmax')(x) out = concatenate([x1, x2, x3, x4], axis=-1) model = Model(inputs, out) return model
Example #26
Source File: blocks.py From CSBDeep with BSD 3-Clause "New" or "Revised" License | 5 votes |
def resnet_block(n_filter, kernel_size=(3,3), pool=(1,1), n_conv_per_block=2, batch_norm=False, kernel_initializer='he_normal', activation='relu'): n_conv_per_block >= 2 or _raise(ValueError('required: n_conv_per_block >= 2')) len(pool) == len(kernel_size) or _raise(ValueError('kernel and pool sizes must match.')) n_dim = len(kernel_size) n_dim in (2,3) or _raise(ValueError('resnet_block only 2d or 3d.')) conv_layer = Conv2D if n_dim == 2 else Conv3D conv_kwargs = dict ( padding = 'same', use_bias = not batch_norm, kernel_initializer = kernel_initializer, ) channel_axis = -1 if backend_channels_last() else 1 def f(inp): x = conv_layer(n_filter, kernel_size, strides=pool, **conv_kwargs)(inp) if batch_norm: x = BatchNormalization(axis=channel_axis)(x) x = Activation(activation)(x) for _ in range(n_conv_per_block-2): x = conv_layer(n_filter, kernel_size, **conv_kwargs)(x) if batch_norm: x = BatchNormalization(axis=channel_axis)(x) x = Activation(activation)(x) x = conv_layer(n_filter, kernel_size, **conv_kwargs)(x) if batch_norm: x = BatchNormalization(axis=channel_axis)(x) if any(p!=1 for p in pool) or n_filter != K.int_shape(inp)[-1]: inp = conv_layer(n_filter, (1,)*n_dim, strides=pool, **conv_kwargs)(inp) x = Add()([inp, x]) x = Activation(activation)(x) return x return f
Example #27
Source File: deep_residual_learning_blocks.py From CNNArt with Apache License 2.0 | 5 votes |
def identity_block_3D(input_tensor, filters, kernel_size=(3, 3, 3), stage=0, block=0, se_enabled=False, se_ratio=16): numFilters1, numFilters2 = filters if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch' bn_name_base = 'bn' + str(stage) + '_' + str(block) + '_branch' x = Conv3D(filters=numFilters1, kernel_size=kernel_size, strides=(1, 1, 1), padding='same', kernel_initializer='he_normal', name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = LeakyReLU(alpha=0.01)(x) x = Conv3D(filters=numFilters2, kernel_size=kernel_size, strides=(1, 1, 1), padding='same', kernel_initializer='he_normal', name=conv_name_base + '2b')(x) # squeeze and excitation block if se_enabled: x = squeeze_excitation_block_3D(x, ratio=se_ratio) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Add()([x, input_tensor]) x = LeakyReLU(alpha=0.01)(x) return x
Example #28
Source File: i3d_keras.py From deep-smoke-machine with BSD 3-Clause "New" or "Revised" License | 5 votes |
def conv3d_bn(x, filters, num_frames, num_row, num_col, padding='same', strides=(1, 1, 1), use_bias=False, use_activation_fn=True, use_bn=True, name=None): """Utility function to apply conv3d + BN. # Arguments x: input tensor. filters: filters in `Conv3D`. num_frames: frames (time depth) of the convolution kernel. num_row: height of the convolution kernel. num_col: width of the convolution kernel. padding: padding mode in `Conv3D`. strides: strides in `Conv3D`. use_bias: use bias or not use_activation_fn: use an activation function or not. use_bn: use batch normalization or not. name: name of the ops; will become `name + '_conv'` for the convolution and `name + '_bn'` for the batch norm layer. # Returns Output tensor after applying `Conv3D` and `BatchNormalization`. """ if name is not None: bn_name = name + '_bn' conv_name = name + '_conv' else: bn_name = None conv_name = None x = Conv3D(filters, (num_frames, num_row, num_col), strides=strides, padding=padding, use_bias=use_bias, name=conv_name)(x) if use_bn: if K.image_data_format() == 'channels_first': bn_axis = 1 else: bn_axis = 4 x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x) if use_activation_fn: x = Activation('relu', name=name)(x) return x
Example #29
Source File: densely_connected_cnn_blocks.py From CNNArt with Apache License 2.0 | 5 votes |
def dense_block_3D(input_tensor, numInputFilters, numLayers=1, growthRate_k=12, bottleneck_enabled=False): if K.image_data_format() == 'channels_last': concat_axis = -1 bn_axis = -1 else: concat_axis = 1 bn_axis = 1 concat_features = input_tensor for i in range(numLayers): x = BatchNormalization(axis=bn_axis, name='')(concat_features) x = Activation('relu')(x) if bottleneck_enabled == True: x = Conv3D(4*growthRate_k, (1, 1, 1), strides=(1,1,1), kernel_initializer='he_normal', padding='same')(x) # "in our experiments, we let each 1x1 conv produce 4k feature maps x = BatchNormalization(axis=bn_axis)(x) x = Activation('relu')(x) x = Conv3D(growthRate_k, (3,3,3), strides=(1,1,1), kernel_initializer='he_normal', padding='same')(x) concat_features = concatenate([x, concat_features], axis=concat_axis) numInputFilters += growthRate_k return concat_features, numInputFilters
Example #30
Source File: densely_connected_cnn_blocks.py From CNNArt with Apache License 2.0 | 5 votes |
def dense_SE_block_3D(input_tensor, numInputFilters, numLayers=1, growthRate_k=12, bottleneck_enabled=False, se_ratio=16): if K.image_data_format() == 'channels_last': concat_axis = -1 bn_axis = -1 else: concat_axis = 1 bn_axis = 1 concat_features = input_tensor for i in range(numLayers): x = BatchNormalization(axis=bn_axis, name='')(concat_features) x = Activation('relu')(x) if bottleneck_enabled == True: x = Conv3D(4*growthRate_k, (1,1,1), strides=(1,1,1), kernel_initializer='he_normal', padding='same')(x) # "in our experiments, we let each 1x1 conv produce 4k feature maps x = BatchNormalization(axis=bn_axis)(x) x = Activation('relu')(x) x = Conv3D(growthRate_k, (3,3,3), strides=(1,1,1), kernel_initializer='he_normal', padding='same')(x) concat_features = concatenate([x, concat_features], axis=concat_axis) numInputFilters += growthRate_k # SE-Block concat_features = squeeze_excitation_block_3D(concat_features, ratio=se_ratio) return concat_features, numInputFilters