Python keras.layers.Conv2D() Examples
The following are 30
code examples of keras.layers.Conv2D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.
Example #1
Source File: model.py From ocsvm-anomaly-detection with MIT License | 8 votes |
def build_cae_model(height=32, width=32, channel=3): """ build convolutional autoencoder model """ input_img = Input(shape=(height, width, channel)) # encoder net = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img) net = MaxPooling2D((2, 2), padding='same')(net) net = Conv2D(8, (3, 3), activation='relu', padding='same')(net) net = MaxPooling2D((2, 2), padding='same')(net) net = Conv2D(4, (3, 3), activation='relu', padding='same')(net) encoded = MaxPooling2D((2, 2), padding='same', name='enc')(net) # decoder net = Conv2D(4, (3, 3), activation='relu', padding='same')(encoded) net = UpSampling2D((2, 2))(net) net = Conv2D(8, (3, 3), activation='relu', padding='same')(net) net = UpSampling2D((2, 2))(net) net = Conv2D(16, (3, 3), activation='relu', padding='same')(net) net = UpSampling2D((2, 2))(net) decoded = Conv2D(channel, (3, 3), activation='sigmoid', padding='same')(net) return Model(input_img, decoded)
Example #2
Source File: ae_model.py From Pix2Pose with MIT License | 6 votes |
def DCGAN_discriminator(): nb_filters = 64 nb_conv = int(np.floor(np.log(128) / np.log(2))) list_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)] input_img = Input(shape=(128, 128, 3)) x = Conv2D(list_filters[0], (3, 3), strides=(2, 2), name="disc_conv2d_1", padding="same")(input_img) x = BatchNormalization(axis=-1)(x) x = LeakyReLU(0.2)(x) # Next convs for i, f in enumerate(list_filters[1:]): name = "disc_conv2d_%s" % (i + 2) x = Conv2D(f, (3, 3), strides=(2, 2), name=name, padding="same")(x) x = BatchNormalization(axis=-1)(x) x = LeakyReLU(0.2)(x) x_flat = Flatten()(x) x_out = Dense(1, activation="sigmoid", name="disc_dense")(x_flat) discriminator_model = Model(inputs=input_img, outputs=[x_out]) return discriminator_model
Example #3
Source File: Xception.py From Semantic-Segmentation with MIT License | 6 votes |
def _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1): # 计算padding的数量,hw是否需要收缩 if stride == 1: return Conv2D(filters, (kernel_size, kernel_size), strides=(stride, stride), padding='same', use_bias=False, dilation_rate=(rate, rate), name=prefix)(x) else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) return Conv2D(filters, (kernel_size, kernel_size), strides=(stride, stride), padding='valid', use_bias=False, dilation_rate=(rate, rate), name=prefix)(x)
Example #4
Source File: parallel_model.py From dataiku-contrib with Apache License 2.0 | 6 votes |
def build_model(x_train, num_classes): # Reset default graph. Keras leaves old ops in the graph, # which are ignored for execution but clutter graph # visualization in TensorBoard. tf.reset_default_graph() inputs = KL.Input(shape=x_train.shape[1:], name="input_image") x = KL.Conv2D(32, (3, 3), activation='relu', padding="same", name="conv1")(inputs) x = KL.Conv2D(64, (3, 3), activation='relu', padding="same", name="conv2")(x) x = KL.MaxPooling2D(pool_size=(2, 2), name="pool1")(x) x = KL.Flatten(name="flat1")(x) x = KL.Dense(128, activation='relu', name="dense1")(x) x = KL.Dense(num_classes, activation='softmax', name="dense2")(x) return KM.Model(inputs, x, "digit_classifier_model") # Load MNIST Data
Example #5
Source File: classifiers.py From MesoNet with Apache License 2.0 | 6 votes |
def InceptionLayer(self, a, b, c, d): def func(x): x1 = Conv2D(a, (1, 1), padding='same', activation='relu')(x) x2 = Conv2D(b, (1, 1), padding='same', activation='relu')(x) x2 = Conv2D(b, (3, 3), padding='same', activation='relu')(x2) x3 = Conv2D(c, (1, 1), padding='same', activation='relu')(x) x3 = Conv2D(c, (3, 3), dilation_rate = 2, strides = 1, padding='same', activation='relu')(x3) x4 = Conv2D(d, (1, 1), padding='same', activation='relu')(x) x4 = Conv2D(d, (3, 3), dilation_rate = 3, strides = 1, padding='same', activation='relu')(x4) y = Concatenate(axis = -1)([x1, x2, x3, x4]) return y return func
Example #6
Source File: bigan.py From Keras-BiGAN with MIT License | 6 votes |
def d_block(inp, fil, p = True): skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(inp) out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(inp) out = LeakyReLU(0.2)(out) out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out) out = LeakyReLU(0.2)(out) out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out) out = add([out, skip]) out = LeakyReLU(0.2)(out) if p: out = AveragePooling2D()(out) return out
Example #7
Source File: yolov3_weights_to_keras.py From ai-platform with MIT License | 6 votes |
def _conv_block(inp, convs, skip=True): x = inp count = 0 len_convs = len(convs) for conv in convs: if count == (len_convs - 2) and skip: skip_connection = x count += 1 if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top x = Conv2D(conv['filter'], conv['kernel'], strides=conv['stride'], padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top name='conv_' + str(conv['layer_idx']), use_bias=False if conv['bnorm'] else True)(x) if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x) if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x) return add([skip_connection, x]) if skip else x #SPP block uses three pooling layers of sizes [5, 9, 13] with strides one and all outputs together with the input are concatenated to be fed #to the FC block
Example #8
Source File: bigan.py From Keras-BiGAN with MIT License | 6 votes |
def g_block(inp, fil, u = True): if u: out = UpSampling2D(interpolation = 'bilinear')(inp) else: out = Activation('linear')(inp) skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out) out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out) out = LeakyReLU(0.2)(out) out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out) out = LeakyReLU(0.2)(out) out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out) out = add([out, skip]) out = LeakyReLU(0.2)(out) return out
Example #9
Source File: dual_path_network.py From Keras-DualPathNetworks with Apache License 2.0 | 6 votes |
def _bn_relu_conv_block(input, filters, kernel=(3, 3), stride=(1, 1), weight_decay=5e-4): ''' Adds a Batchnorm-Relu-Conv block for DPN Args: input: input tensor filters: number of output filters kernel: convolution kernel size stride: stride of convolution Returns: a keras tensor ''' channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(filters, kernel, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=stride)(input) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) return x
Example #10
Source File: dual_path_network.py From Keras-DualPathNetworks with Apache License 2.0 | 6 votes |
def _initial_conv_block_inception(input, initial_conv_filters, weight_decay=5e-4): ''' Adds an initial conv block, with batch norm and relu for the DPN Args: input: input tensor initial_conv_filters: number of filters for initial conv block weight_decay: weight decay factor Returns: a keras tensor ''' channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(initial_conv_filters, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=(2, 2))(input) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) return x
Example #11
Source File: keras_ops.py From deep_architect with MIT License | 6 votes |
def conv2d(h_num_filters, h_filter_width, h_stride, h_use_bias): def compile_fn(di, dh): layer = layers.Conv2D(dh['num_filters'], (dh['filter_width'],) * 2, strides=(dh['stride'],) * 2, use_bias=dh['use_bias'], padding='SAME') def fn(di): return {'out': layer(di['in'])} return fn return siso_keras_module( 'Conv2D', compile_fn, { 'num_filters': h_num_filters, 'filter_width': h_filter_width, 'stride': h_stride, 'use_bias': h_use_bias, })
Example #12
Source File: resnet.py From DEXTR-KerasTensorflow with GNU General Public License v3.0 | 5 votes |
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(1, 1), dilation=1): """conv_block is the block that has a conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the filterss of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. Note that from stage 3, the first conv layer at main path is with strides=(2,2) And the shortcut should have strides=(2,2) as well """ filters1, filters2, filters3 = filters if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Conv2D(filters1, (1, 1), strides=strides, name=conv_name_base + '2a', use_bias=False)(input_tensor) x = BN(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(filters2, kernel_size, padding='same', name=conv_name_base + '2b', use_bias=False, dilation_rate=dilation)(x) x = BN(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c', use_bias=False)(x) x = BN(axis=bn_axis, name=bn_name_base + '2c')(x) shortcut = Conv2D(filters3, (1, 1), strides=strides, name=conv_name_base + '1', use_bias=False)(input_tensor) shortcut = BN(axis=bn_axis, name=bn_name_base + '1')(shortcut) x = layers.add([x, shortcut]) x = Activation('relu')(x) return x
Example #13
Source File: deeplab.py From Semantic-Segmentation with MIT License | 5 votes |
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3): # 计算padding的数量,hw是否需要收缩 if stride == 1: depth_padding = 'same' else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) depth_padding = 'valid' # 如果需要激活函数 if not depth_activation: x = Activation('relu')(x) # 分离卷积,首先3x3分离卷积,再1x1卷积 # 3x3采用膨胀卷积 x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate), padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x) x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) # 1x1卷积,进行压缩 x = Conv2D(filters, (1, 1), padding='same', use_bias=False, name=prefix + '_pointwise')(x) x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) return x
Example #14
Source File: dual_path_network.py From Keras-DualPathNetworks with Apache License 2.0 | 5 votes |
def _grouped_convolution_block(input, grouped_channels, cardinality, strides, weight_decay=5e-4): ''' Adds a grouped convolution block. It is an equivalent block from the paper Args: input: input tensor grouped_channels: grouped number of filters cardinality: cardinality factor describing the number of groups strides: performs strided convolution for downscaling if > 1 weight_decay: weight decay term Returns: a keras tensor ''' init = input channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 group_list = [] if cardinality == 1: # with cardinality 1, it is a standard convolution x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=strides, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init) x = BatchNormalization(axis=channel_axis)(x) x = Activation('relu')(x) return x for c in range(cardinality): x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels] if K.image_data_format() == 'channels_last' else lambda z: z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input) x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=strides, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x) group_list.append(x) group_merge = concatenate(group_list, axis=channel_axis) group_merge = BatchNormalization(axis=channel_axis)(group_merge) group_merge = Activation('relu')(group_merge) return group_merge
Example #15
Source File: model.py From PanopticSegmentation with MIT License | 5 votes |
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), use_bias=True, train_bn=True): """conv_block is the block that has a conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names use_bias: Boolean. To use or not use a bias in conv layers. train_bn: Boolean. Train or freeze Batch Norm layers Note that from stage 3, the first conv layer at main path is with subsample=(2,2) And the shortcut should have subsample=(2,2) as well """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), strides=strides, name=conv_name_base + '2a', use_bias=use_bias)(input_tensor) x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn) shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides, name=conv_name_base + '1', use_bias=use_bias)(input_tensor) shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn) x = KL.Add()([x, shortcut]) x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x) return x
Example #16
Source File: model.py From PanopticSegmentation with MIT License | 5 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block, use_bias=True, train_bn=True): """The identity_block is the block that has no conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names use_bias: Boolean. To use or not use a bias in conv layers. train_bn: Boolean. Train or freeze Batch Norm layers """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', use_bias=use_bias)(input_tensor) x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn) x = KL.Add()([x, input_tensor]) x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x) return x
Example #17
Source File: gtsrb_injection_example.py From backdoor with MIT License | 5 votes |
def load_traffic_sign_model(base=32, dense=512, num_classes=43): input_shape = (32, 32, 3) model = Sequential() model.add(Conv2D(base, (3, 3), padding='same', input_shape=input_shape, activation='relu')) model.add(Conv2D(base, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(base * 2, (3, 3), padding='same', activation='relu')) model.add(Conv2D(base * 2, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(base * 4, (3, 3), padding='same', activation='relu')) model.add(Conv2D(base * 4, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(dense, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) opt = keras.optimizers.adam(lr=0.001, decay=1 * 10e-5) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) return model
Example #18
Source File: resnet_152.py From Car-Recognition with MIT License | 5 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block): '''The identity_block is the block that has no conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names ''' eps = 1.1e-5 nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' scale_name_base = 'scale' + str(stage) + block + '_branch' x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', use_bias=False)(input_tensor) x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2a')(x) x = Scale(axis=bn_axis, name=scale_name_base + '2a')(x) x = Activation('relu', name=conv_name_base + '2a_relu')(x) x = ZeroPadding2D((1, 1), name=conv_name_base + '2b_zeropadding')(x) x = Conv2D(nb_filter2, (kernel_size, kernel_size), name=conv_name_base + '2b', use_bias=False)(x) x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2b')(x) x = Scale(axis=bn_axis, name=scale_name_base + '2b')(x) x = Activation('relu', name=conv_name_base + '2b_relu')(x) x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=False)(x) x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2c')(x) x = Scale(axis=bn_axis, name=scale_name_base + '2c')(x) x = add([x, input_tensor], name='res' + str(stage) + block) x = Activation('relu', name='res' + str(stage) + block + '_relu')(x) return x
Example #19
Source File: resnet_50.py From Car-Recognition with MIT License | 5 votes |
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)): """ conv_block is the block that has a conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names Note that from stage 3, the first conv layer at main path is with subsample=(2,2) And the shortcut should have subsample=(2,2) as well """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Conv2D(nb_filter1, (1, 1), strides=strides, name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) shortcut = Conv2D(nb_filter3, (1, 1), strides=strides, name=conv_name_base + '1')(input_tensor) shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut) x = add([x, shortcut]) x = Activation('relu')(x) return x
Example #20
Source File: resnet_50.py From Car-Recognition with MIT License | 5 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block): """ The identity_block is the block that has no conv layer at shortcut Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) x = add([x, input_tensor]) x = Activation('relu')(x) return x
Example #21
Source File: resnet50_mod.py From Pix2Pose with MIT License | 5 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. """ filters1, filters2, filters3 = filters if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Conv2D(filters1, (1, 1), name=conv_name_base +'2a',kernel_regularizer=l2(0.0001))(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(filters2, kernel_size, padding='same', name=conv_name_base + '2b',kernel_regularizer=l2(0.0001))(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c',kernel_regularizer=l2(0.0001))(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = Activation('relu',name = 'act'+str(stage)+block+'_branch')(x) return x
Example #22
Source File: resnet.py From DEXTR-KerasTensorflow with GNU General Public License v3.0 | 5 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block, dilation=1): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the filterss of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names dilation: dilation of the intermediate convolution # Returns Output tensor for the block. """ filters1, filters2, filters3 = filters if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a', use_bias=False)(input_tensor) x = BN(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(filters2, kernel_size, padding='same', name=conv_name_base + '2b', use_bias=False, dilation_rate=dilation)(x) x = BN(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c', use_bias=False)(x) x = BN(axis=bn_axis, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = Activation('relu')(x) return x
Example #23
Source File: classifiers.py From DEXTR-KerasTensorflow with GNU General Public License v3.0 | 5 votes |
def build_pyramid_pooling_module(res, input_shape, nb_classes, sigmoid=False, output_size=None): """Build the Pyramid Pooling Module.""" # ---PSPNet concat layers with Interpolation feature_map_size = tuple(int(ceil(input_dim / 8.0)) for input_dim in input_shape) if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 print("PSP module will interpolate to a final feature map size of %s" % (feature_map_size, )) interp_block1 = psp_block(res, 1, feature_map_size, input_shape) interp_block2 = psp_block(res, 2, feature_map_size, input_shape) interp_block3 = psp_block(res, 3, feature_map_size, input_shape) interp_block6 = psp_block(res, 6, feature_map_size, input_shape) # concat all these layers. resulted res = Concatenate()([interp_block1, interp_block2, interp_block3, interp_block6, res]) x = Conv2D(512, (1, 1), strides=(1, 1), padding="same", name="class_psp_reduce_conv", use_bias=False)(res) x = resnet.BN(bn_axis, name="class_psp_reduce_bn")(x) x = Activation('relu')(x) x = Conv2D(nb_classes, (1, 1), strides=(1, 1), name="class_psp_final_conv")(x) if output_size: x = Upsampling(output_size)(x) if sigmoid: x = Activation('sigmoid')(x) return x
Example #24
Source File: classifiers.py From DEXTR-KerasTensorflow with GNU General Public License v3.0 | 5 votes |
def psp_block(prev_layer, level, feature_map_shape, input_shape): if input_shape == (512, 512): kernel_strides_map = {1: [64, 64], 2: [32, 32], 3: [22, 21], 6: [11, 9]} # TODO: Level 6: Kernel correct, but stride not exactly the same as Pytorch else: raise ValueError("Pooling parameters for input shape " + input_shape + " are not defined.") if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 names = [ "class_psp_" + str(level) + "_conv", "class_psp_" + str(level) + "_bn" ] kernel = (kernel_strides_map[level][0], kernel_strides_map[level][0]) strides = (kernel_strides_map[level][1], kernel_strides_map[level][1]) prev_layer = AveragePooling2D(kernel, strides=strides)(prev_layer) prev_layer = Conv2D(512, (1, 1), strides=(1, 1), name=names[0], use_bias=False)(prev_layer) prev_layer = resnet.BN(bn_axis, name=names[1])(prev_layer) prev_layer = Activation('relu')(prev_layer) prev_layer = Upsampling(feature_map_shape)(prev_layer) return prev_layer
Example #25
Source File: deeplab.py From Semantic-Segmentation with MIT License | 5 votes |
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3): # 计算padding的数量,hw是否需要收缩 if stride == 1: depth_padding = 'same' else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) depth_padding = 'valid' # 如果需要激活函数 if not depth_activation: x = Activation('relu')(x) # 分离卷积,首先3x3分离卷积,再1x1卷积 # 3x3采用膨胀卷积 x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate), padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x) x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) # 1x1卷积,进行压缩 x = Conv2D(filters, (1, 1), padding='same', use_bias=False, name=prefix + '_pointwise')(x) x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) return x
Example #26
Source File: mobilenetV2.py From Semantic-Segmentation with MIT License | 5 votes |
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1): in_channels = inputs.shape[-1].value # inputs._keras_shape[-1] pointwise_conv_filters = int(filters * alpha) pointwise_filters = _make_divisible(pointwise_conv_filters, 8) x = inputs prefix = 'expanded_conv_{}_'.format(block_id) if block_id: # Expand x = Conv2D(expansion * in_channels, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'expand')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'expand_BN')(x) x = Activation(relu6, name=prefix + 'expand_relu')(x) else: prefix = 'expanded_conv_' # Depthwise x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same', dilation_rate=(rate, rate), name=prefix + 'depthwise')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'depthwise_BN')(x) x = Activation(relu6, name=prefix + 'depthwise_relu')(x) # Project x = Conv2D(pointwise_filters, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'project')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x) if skip_connection: return Add(name=prefix + 'add')([inputs, x]) # if in_channels == pointwise_filters and stride == 1: # return Add(name='res_connect_' + str(block_id))([inputs, x]) return x
Example #27
Source File: deeplab.py From Semantic-Segmentation with MIT License | 5 votes |
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3): # 计算padding的数量,hw是否需要收缩 if stride == 1: depth_padding = 'same' else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) depth_padding = 'valid' # 如果需要激活函数 if not depth_activation: x = Activation('relu')(x) # 分离卷积,首先3x3分离卷积,再1x1卷积 # 3x3采用膨胀卷积 x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate), padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x) x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) # 1x1卷积,进行压缩 x = Conv2D(filters, (1, 1), padding='same', use_bias=False, name=prefix + '_pointwise')(x) x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) return x
Example #28
Source File: Xception.py From Semantic-Segmentation with MIT License | 5 votes |
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3): # 计算padding的数量,hw是否需要收缩 if stride == 1: depth_padding = 'same' else: kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg x = ZeroPadding2D((pad_beg, pad_end))(x) depth_padding = 'valid' # 如果需要激活函数 if not depth_activation: x = Activation('relu')(x) # 分离卷积,首先3x3分离卷积,再1x1卷积 # 3x3采用膨胀卷积 x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate), padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x) x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) # 1x1卷积,进行压缩 x = Conv2D(filters, (1, 1), padding='same', use_bias=False, name=prefix + '_pointwise')(x) x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x) if depth_activation: x = Activation('relu')(x) return x
Example #29
Source File: mobilenetV2.py From Semantic-Segmentation with MIT License | 5 votes |
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1): in_channels = inputs.shape[-1].value # inputs._keras_shape[-1] pointwise_conv_filters = int(filters * alpha) pointwise_filters = _make_divisible(pointwise_conv_filters, 8) x = inputs prefix = 'expanded_conv_{}_'.format(block_id) if block_id: # Expand x = Conv2D(expansion * in_channels, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'expand')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'expand_BN')(x) x = Activation(relu6, name=prefix + 'expand_relu')(x) else: prefix = 'expanded_conv_' # Depthwise x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same', dilation_rate=(rate, rate), name=prefix + 'depthwise')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'depthwise_BN')(x) x = Activation(relu6, name=prefix + 'depthwise_relu')(x) # Project x = Conv2D(pointwise_filters, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'project')(x) x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x) if skip_connection: return Add(name=prefix + 'add')([inputs, x]) # if in_channels == pointwise_filters and stride == 1: # return Add(name='res_connect_' + str(block_id))([inputs, x]) return x
Example #30
Source File: models.py From keras-ctpn with Apache License 2.0 | 5 votes |
def ctpn(base_features, num_anchors, rnn_units=128, fc_units=512): """ ctpn网络 :param base_features: (B,H,W,C) :param num_anchors: anchors个数 :param rnn_units: :param fc_units: :return: """ x = layers.Conv2D(512, kernel_size=(3, 3), padding='same', name='pre_fc')(base_features) # [B,H,W,512] # 沿着宽度方式做rnn rnn_forward = layers.TimeDistributed(layers.GRU(rnn_units, return_sequences=True, kernel_initializer='he_normal'), name='gru_forward')(x) rnn_backward = layers.TimeDistributed( layers.GRU(rnn_units, return_sequences=True, kernel_initializer='he_normal', go_backwards=True), name='gru_backward')(x) rnn_output = layers.Concatenate(name='gru_concat')([rnn_forward, rnn_backward]) # (B,H,W,256) # conv实现fc fc_output = layers.Conv2D(fc_units, kernel_size=(1, 1), activation='relu', name='fc_output')( rnn_output) # (B,H,W,512) # 分类 class_logits = layers.Conv2D(2 * num_anchors, kernel_size=(1, 1), name='cls')(fc_output) class_logits = layers.Reshape(target_shape=(-1, 2), name='cls_reshape')(class_logits) # 中心点垂直坐标和高度回归 predict_deltas = layers.Conv2D(2 * num_anchors, kernel_size=(1, 1), name='deltas')(fc_output) predict_deltas = layers.Reshape(target_shape=(-1, 2), name='deltas_reshape')(predict_deltas) # 侧边精调(只需要预测x偏移即可) predict_side_deltas = layers.Conv2D(num_anchors, kernel_size=(1, 1), name='side_deltas')(fc_output) predict_side_deltas = layers.Reshape(target_shape=(-1, 1), name='side_deltas_reshape')( predict_side_deltas) return class_logits, predict_deltas, predict_side_deltas