Python keras.layers.AveragePooling2D() Examples
The following are 30
code examples of keras.layers.AveragePooling2D().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.

Example #1
Source Project: Carla-RL Author: Sentdex File: models.py License: MIT License | 7 votes |
def model_base_test_CNN(input_shape): model = Sequential() model.add(Conv2D(32, (3, 3), input_shape=input_shape, padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Conv2D(128, (3, 3), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')) model.add(Flatten()) return model.input, model.output # 64x3 model
Example #2
Source Project: Keras-BiGAN Author: manicman1999 File: bigan.py License: MIT License | 6 votes |
def d_block(inp, fil, p = True): skip = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(inp) out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(inp) out = LeakyReLU(0.2)(out) out = Conv2D(filters = fil, kernel_size = 3, padding = 'same', kernel_initializer = 'he_normal')(out) out = LeakyReLU(0.2)(out) out = Conv2D(fil, 1, padding = 'same', kernel_initializer = 'he_normal')(out) out = add([out, skip]) out = LeakyReLU(0.2)(out) if p: out = AveragePooling2D()(out) return out
Example #3
Source Project: deep_architect Author: negrinho File: keras_ops.py License: MIT License | 6 votes |
def avg_pool2d(h_kernel_size, h_stride): def compile_fn(di, dh): layer = layers.AveragePooling2D(pool_size=dh['kernel_size'], strides=(dh['stride'], dh['stride']), padding='same') def fn(di): return {'out': layer(di['in'])} return fn return siso_keras_module('AvgPool', compile_fn, { 'kernel_size': h_kernel_size, 'stride': h_stride, })
Example #4
Source Project: deep_architect Author: negrinho File: keras_ops.py License: MIT License | 6 votes |
def avg_pool2d(h_kernel_size, h_stride): def compile_fn(di, dh): layer = layers.AveragePooling2D(pool_size=dh['kernel_size'], strides=(dh['stride'], dh['stride']), padding='same') def fn(di): return {'out': layer(di['in'])} return fn return siso_keras_module('AvgPool2D', compile_fn, { 'kernel_size': h_kernel_size, 'stride': h_stride, })
Example #5
Source Project: Transfer-Learning Author: DhavalThkkar File: transfer.py License: MIT License | 6 votes |
def add_new_last_layer(base_model, nb_classes): """Add last layer to the convnet Args: base_model: keras model excluding top nb_classes: # of classes Returns: new keras model with last layer """ x = base_model.output x = AveragePooling2D((8, 8), border_mode='valid', name='avg_pool')(x) x = Dropout(0.4)(x) x = Flatten()(x) predictions = Dense(2, activation='softmax')(x) model = Model(input=base_model.input, output=predictions) return model
Example #6
Source Project: kaggle-rsna18 Author: i-pan File: densenet_gray.py License: MIT License | 6 votes |
def transition_block(x, reduction, name): """A transition block. # Arguments x: input tensor. reduction: float, compression rate at transition layers. name: string, block label. # Returns output tensor for the block. """ bn_axis = 3 if K.image_data_format() == 'channels_last' else 1 x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_bn')(x) x = Activation('relu', name=name + '_relu')(x) x = Conv2D(int(K.int_shape(x)[bn_axis] * reduction), 1, use_bias=False, name=name + '_conv')(x) x = AveragePooling2D(2, strides=2, name=name + '_pool')(x) return x
Example #7
Source Project: CNNArt Author: thomaskuestner File: densely_connected_cnn_blocks.py License: Apache License 2.0 | 6 votes |
def transition_layer(input_tensor, numFilters, compressionFactor=1.0): numOutPutFilters = int(numFilters*compressionFactor) if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 x = BatchNormalization(axis=bn_axis)(input_tensor) x = Activation('relu')(x) x = Conv2D(numOutPutFilters, (1, 1), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x) # downsampling x = AveragePooling2D((2, 2), strides=(2, 2), padding='valid', data_format='channels_last', name='')(x) return x, numOutPutFilters
Example #8
Source Project: CNNArt Author: thomaskuestner File: densely_connected_cnn_blocks.py License: Apache License 2.0 | 6 votes |
def transition_SE_layer(input_tensor, numFilters, compressionFactor=1.0, se_ratio=16): numOutPutFilters = int(numFilters*compressionFactor) if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 x = BatchNormalization(axis=bn_axis)(input_tensor) x = Activation('relu')(x) x = Conv2D(numOutPutFilters, (1, 1), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x) # SE Block x = squeeze_excitation_block(x, ratio=se_ratio) #x = BatchNormalization(axis=bn_axis)(x) # downsampling x = AveragePooling2D((2, 2), strides=(2, 2), padding='valid', data_format='channels_last', name='')(x) #x = squeeze_excitation_block(x, ratio=se_ratio) return x, numOutPutFilters
Example #9
Source Project: CNNArt Author: thomaskuestner File: densely_connected_cnn_blocks.py License: Apache License 2.0 | 6 votes |
def transition_layer(input_tensor, numFilters, compressionFactor=1.0): numOutPutFilters = int(numFilters*compressionFactor) if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 x = BatchNormalization(axis=bn_axis)(input_tensor) x = Activation('relu')(x) x = Conv2D(numOutPutFilters, (1, 1), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x) # downsampling x = AveragePooling2D((2, 2), strides=(2, 2), padding='valid', data_format='channels_last', name='')(x) return x, numOutPutFilters
Example #10
Source Project: CNNArt Author: thomaskuestner File: densely_connected_cnn_blocks.py License: Apache License 2.0 | 6 votes |
def transition_SE_layer(input_tensor, numFilters, compressionFactor=1.0, se_ratio=16): numOutPutFilters = int(numFilters*compressionFactor) if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 x = BatchNormalization(axis=bn_axis)(input_tensor) x = Activation('relu')(x) x = Conv2D(numOutPutFilters, (1, 1), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x) # SE Block x = squeeze_excitation_block(x, ratio=se_ratio) #x = BatchNormalization(axis=bn_axis)(x) # downsampling x = AveragePooling2D((2, 2), strides=(2, 2), padding='valid', data_format='channels_last', name='')(x) #x = squeeze_excitation_block(x, ratio=se_ratio) return x, numOutPutFilters
Example #11
Source Project: Carla-RL Author: Sentdex File: models.py License: MIT License | 6 votes |
def model_base_64x3_CNN(input_shape): model = Sequential() model.add(Conv2D(64, (3, 3), input_shape=input_shape, padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Conv2D(64, (3, 3), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Flatten()) return model.input, model.output # 4 CNN layer model
Example #12
Source Project: Carla-RL Author: Sentdex File: models.py License: MIT License | 6 votes |
def model_base_4_CNN(input_shape): model = Sequential() model.add(Conv2D(64, (5, 5), input_shape=input_shape, padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Conv2D(64, (5, 5), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(5, 5), strides=(3, 3), padding='same')) model.add(Conv2D(128, (5, 5), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')) model.add(Conv2D(256, (3, 3), padding='same')) model.add(Activation('relu')) model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')) model.add(Flatten()) return model.input, model.output # 5 CNN layer with residual connections model
Example #13
Source Project: deepQuest Author: sheffieldnlp File: cnn_model-predictor.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def One_vs_One_Inception(self, nOutput=2, input=[224, 224, 3]): """ Builds a simple One_vs_One_Inception network with 2 inception layers (useful for ECOC models). """ if len(input) == 3: input_shape = tuple([input[2]] + input[0:2]) else: input_shape = tuple(input) self.model = Graph() # Input self.model.add_input(name='input', input_shape=input_shape) # Inception Ea out_Ea = self.__addInception('inceptionEa', 'input', 4, 2, 8, 2, 2, 2) # Inception Eb out_Eb = self.__addInception('inceptionEb', out_Ea, 2, 2, 4, 2, 1, 1) # Average Pooling pool_size=(7,7) self.model.add_node(AveragePooling2D(pool_size=input_shape[1:], strides=(1, 1)), name='ave_pool/ECOC', input=out_Eb) # Softmax self.model.add_node(Flatten(), name='loss_OnevsOne/classifier_flatten', input='ave_pool/ECOC') self.model.add_node(Dropout(0.5), name='loss_OnevsOne/drop', input='loss_OnevsOne/classifier_flatten') self.model.add_node(Dense(nOutput, activation='softmax'), name='loss_OnevsOne', input='loss_OnevsOne/drop') # Output self.model.add_output(name='loss_OnevsOne/output', input='loss_OnevsOne')
Example #14
Source Project: deepQuest Author: sheffieldnlp File: cnn_model-predictor.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def add_One_vs_One_Inception(self, input, input_shape, id_branch, nOutput=2, activation='softmax'): """ Builds a simple One_vs_One_Inception network with 2 inception layers on the top of the current model (useful for ECOC_loss models). """ # Inception Ea out_Ea = self.__addInception('inceptionEa_' + str(id_branch), input, 4, 2, 8, 2, 2, 2) # Inception Eb out_Eb = self.__addInception('inceptionEb_' + str(id_branch), out_Ea, 2, 2, 4, 2, 1, 1) # Average Pooling pool_size=(7,7) self.model.add_node(AveragePooling2D(pool_size=input_shape[1:], strides=(1, 1)), name='ave_pool/ECOC_' + str(id_branch), input=out_Eb) # Softmax self.model.add_node(Flatten(), name='fc_OnevsOne_' + str(id_branch) + '/flatten', input='ave_pool/ECOC_' + str(id_branch)) self.model.add_node(Dropout(0.5), name='fc_OnevsOne_' + str(id_branch) + '/drop', input='fc_OnevsOne_' + str(id_branch) + '/flatten') output_name = 'fc_OnevsOne_' + str(id_branch) self.model.add_node(Dense(nOutput, activation=activation), name=output_name, input='fc_OnevsOne_' + str(id_branch) + '/drop') return output_name
Example #15
Source Project: deepQuest Author: sheffieldnlp File: cnn_model-predictor.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def add_One_vs_One_Inception_Functional(self, input, input_shape, id_branch, nOutput=2, activation='softmax'): """ Builds a simple One_vs_One_Inception network with 2 inception layers on the top of the current model (useful for ECOC_loss models). """ in_node = self.model.get_layer(input).output # Inception Ea [out_Ea, out_Ea_name] = self.__addInception_Functional('inceptionEa_' + str(id_branch), in_node, 4, 2, 8, 2, 2, 2) # Inception Eb [out_Eb, out_Eb_name] = self.__addInception_Functional('inceptionEb_' + str(id_branch), out_Ea, 2, 2, 4, 2, 1, 1) # Average Pooling pool_size=(7,7) x = AveragePooling2D(pool_size=input_shape, strides=(1, 1), name='ave_pool/ECOC_' + str(id_branch))(out_Eb) # Softmax output_name = 'fc_OnevsOne_' + str(id_branch) x = Flatten(name='fc_OnevsOne_' + str(id_branch) + '/flatten')(x) x = Dropout(0.5, name='fc_OnevsOne_' + str(id_branch) + '/drop')(x) out_node = Dense(nOutput, activation=activation, name=output_name)(x) return out_node
Example #16
Source Project: deepQuest Author: sheffieldnlp File: cnn_model-predictor.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def One_vs_One_Inception_v2(self, nOutput=2, input=[224, 224, 3]): """ Builds a simple One_vs_One_Inception_v2 network with 2 inception layers (useful for ECOC models). """ if len(input) == 3: input_shape = tuple([input[2]] + input[0:2]) else: input_shape = tuple(input) self.model = Graph() # Input self.model.add_input(name='input', input_shape=input_shape) # Inception Ea out_Ea = self.__addInception('inceptionEa', 'input', 16, 8, 32, 8, 8, 8) # Inception Eb out_Eb = self.__addInception('inceptionEb', out_Ea, 8, 8, 16, 8, 4, 4) # Average Pooling pool_size=(7,7) self.model.add_node(AveragePooling2D(pool_size=input_shape[1:], strides=(1, 1)), name='ave_pool/ECOC', input=out_Eb) # Softmax self.model.add_node(Flatten(), name='loss_OnevsOne/classifier_flatten', input='ave_pool/ECOC') self.model.add_node(Dropout(0.5), name='loss_OnevsOne/drop', input='loss_OnevsOne/classifier_flatten') self.model.add_node(Dense(nOutput, activation='softmax'), name='loss_OnevsOne', input='loss_OnevsOne/drop') # Output self.model.add_output(name='loss_OnevsOne/output', input='loss_OnevsOne')
Example #17
Source Project: deepQuest Author: sheffieldnlp File: cnn_model.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def One_vs_One_Inception(self, nOutput=2, input=[224, 224, 3]): """ Builds a simple One_vs_One_Inception network with 2 inception layers (useful for ECOC models). """ if len(input) == 3: input_shape = tuple([input[2]] + input[0:2]) else: input_shape = tuple(input) self.model = Graph() # Input self.model.add_input(name='input', input_shape=input_shape) # Inception Ea out_Ea = self.__addInception('inceptionEa', 'input', 4, 2, 8, 2, 2, 2) # Inception Eb out_Eb = self.__addInception('inceptionEb', out_Ea, 2, 2, 4, 2, 1, 1) # Average Pooling pool_size=(7,7) self.model.add_node(AveragePooling2D(pool_size=input_shape[1:], strides=(1, 1)), name='ave_pool/ECOC', input=out_Eb) # Softmax self.model.add_node(Flatten(), name='loss_OnevsOne/classifier_flatten', input='ave_pool/ECOC') self.model.add_node(Dropout(0.5), name='loss_OnevsOne/drop', input='loss_OnevsOne/classifier_flatten') self.model.add_node(Dense(nOutput, activation='softmax'), name='loss_OnevsOne', input='loss_OnevsOne/drop') # Output self.model.add_output(name='loss_OnevsOne/output', input='loss_OnevsOne')
Example #18
Source Project: deepQuest Author: sheffieldnlp File: cnn_model.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def add_One_vs_One_Inception(self, input, input_shape, id_branch, nOutput=2, activation='softmax'): """ Builds a simple One_vs_One_Inception network with 2 inception layers on the top of the current model (useful for ECOC_loss models). """ # Inception Ea out_Ea = self.__addInception('inceptionEa_' + str(id_branch), input, 4, 2, 8, 2, 2, 2) # Inception Eb out_Eb = self.__addInception('inceptionEb_' + str(id_branch), out_Ea, 2, 2, 4, 2, 1, 1) # Average Pooling pool_size=(7,7) self.model.add_node(AveragePooling2D(pool_size=input_shape[1:], strides=(1, 1)), name='ave_pool/ECOC_' + str(id_branch), input=out_Eb) # Softmax self.model.add_node(Flatten(), name='fc_OnevsOne_' + str(id_branch) + '/flatten', input='ave_pool/ECOC_' + str(id_branch)) self.model.add_node(Dropout(0.5), name='fc_OnevsOne_' + str(id_branch) + '/drop', input='fc_OnevsOne_' + str(id_branch) + '/flatten') output_name = 'fc_OnevsOne_' + str(id_branch) self.model.add_node(Dense(nOutput, activation=activation), name=output_name, input='fc_OnevsOne_' + str(id_branch) + '/drop') return output_name
Example #19
Source Project: deepQuest Author: sheffieldnlp File: cnn_model.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def add_One_vs_One_Inception_Functional(self, input, input_shape, id_branch, nOutput=2, activation='softmax'): """ Builds a simple One_vs_One_Inception network with 2 inception layers on the top of the current model (useful for ECOC_loss models). """ in_node = self.model.get_layer(input).output # Inception Ea [out_Ea, out_Ea_name] = self.__addInception_Functional('inceptionEa_' + str(id_branch), in_node, 4, 2, 8, 2, 2, 2) # Inception Eb [out_Eb, out_Eb_name] = self.__addInception_Functional('inceptionEb_' + str(id_branch), out_Ea, 2, 2, 4, 2, 1, 1) # Average Pooling pool_size=(7,7) x = AveragePooling2D(pool_size=input_shape, strides=(1, 1), name='ave_pool/ECOC_' + str(id_branch))(out_Eb) # Softmax output_name = 'fc_OnevsOne_' + str(id_branch) x = Flatten(name='fc_OnevsOne_' + str(id_branch) + '/flatten')(x) x = Dropout(0.5, name='fc_OnevsOne_' + str(id_branch) + '/drop')(x) out_node = Dense(nOutput, activation=activation, name=output_name)(x) return out_node
Example #20
Source Project: deepQuest Author: sheffieldnlp File: cnn_model.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def add_One_vs_One_Inception_v2(self, input, input_shape, id_branch, nOutput=2, activation='softmax'): """ Builds a simple One_vs_One_Inception_v2 network with 2 inception layers on the top of the current model (useful for ECOC_loss models). """ # Inception Ea out_Ea = self.__addInception('inceptionEa_' + str(id_branch), input, 16, 8, 32, 8, 8, 8) # Inception Eb out_Eb = self.__addInception('inceptionEb_' + str(id_branch), out_Ea, 8, 8, 16, 8, 4, 4) # Average Pooling pool_size=(7,7) self.model.add_node(AveragePooling2D(pool_size=input_shape[1:], strides=(1, 1)), name='ave_pool/ECOC_' + str(id_branch), input=out_Eb) # Softmax self.model.add_node(Flatten(), name='fc_OnevsOne_' + str(id_branch) + '/flatten', input='ave_pool/ECOC_' + str(id_branch)) self.model.add_node(Dropout(0.5), name='fc_OnevsOne_' + str(id_branch) + '/drop', input='fc_OnevsOne_' + str(id_branch) + '/flatten') output_name = 'fc_OnevsOne_' + str(id_branch) self.model.add_node(Dense(nOutput, activation=activation), name=output_name, input='fc_OnevsOne_' + str(id_branch) + '/drop') return output_name
Example #21
Source Project: Face-skin-hair-segmentaiton-and-skin-color-evaluation Author: JACKYLUO1991 File: lednet.py License: Apache License 2.0 | 5 votes |
def apn_module(self, x): def right(x): x = layers.AveragePooling2D()(x) x = layers.Conv2D(self.classes, kernel_size=1, padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) x = layers.UpSampling2D(interpolation='bilinear')(x) return x def conv(x, filters, kernel_size, stride): x = layers.Conv2D(filters, kernel_size=kernel_size, strides=(stride, stride), padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) return x x_7 = conv(x, int(x.shape[-1]), 7, stride=2) x_5 = conv(x_7, int(x.shape[-1]), 5, stride=2) x_3 = conv(x_5, int(x.shape[-1]), 3, stride=2) x_3_1 = conv(x_3, self.classes, 3, stride=1) x_3_1_up = layers.UpSampling2D(interpolation='bilinear')(x_3_1) x_5_1 = conv(x_5, self.classes, 5, stride=1) x_3_5 = layers.add([x_5_1, x_3_1_up]) x_3_5_up = layers.UpSampling2D(interpolation='bilinear')(x_3_5) x_7_1 = conv(x_7, self.classes, 3, stride=1) x_3_5_7 = layers.add([x_7_1, x_3_5_up]) x_3_5_7_up = layers.UpSampling2D(interpolation='bilinear')(x_3_5_7) x_middle = conv(x, self.classes, 1, stride=1) x_middle = layers.multiply([x_3_5_7_up, x_middle]) x_right = right(x) x_middle = layers.add([x_middle, x_right]) return x_middle
Example #22
Source Project: DEXTR-KerasTensorflow Author: scaelles File: classifiers.py License: GNU General Public License v3.0 | 5 votes |
def psp_block(prev_layer, level, feature_map_shape, input_shape): if input_shape == (512, 512): kernel_strides_map = {1: [64, 64], 2: [32, 32], 3: [22, 21], 6: [11, 9]} # TODO: Level 6: Kernel correct, but stride not exactly the same as Pytorch else: raise ValueError("Pooling parameters for input shape " + input_shape + " are not defined.") if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 names = [ "class_psp_" + str(level) + "_conv", "class_psp_" + str(level) + "_bn" ] kernel = (kernel_strides_map[level][0], kernel_strides_map[level][0]) strides = (kernel_strides_map[level][1], kernel_strides_map[level][1]) prev_layer = AveragePooling2D(kernel, strides=strides)(prev_layer) prev_layer = Conv2D(512, (1, 1), strides=(1, 1), name=names[0], use_bias=False)(prev_layer) prev_layer = resnet.BN(bn_axis, name=names[1])(prev_layer) prev_layer = Activation('relu')(prev_layer) prev_layer = Upsampling(feature_map_shape)(prev_layer) return prev_layer
Example #23
Source Project: keras-image-segmentation Author: dhkim0225 File: pspnet.py License: MIT License | 5 votes |
def interp_block(x, num_filters=512, level=1, input_shape=(512, 512, 3), output_stride=16): feature_map_shape = (input_shape[0] / output_stride, input_shape[1] / output_stride) # compute dataformat if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 if output_stride == 16: scale = 5 elif output_stride == 8: scale = 10 kernel = (level*scale, level*scale) strides = (level*scale, level*scale) global_feat = AveragePooling2D(kernel, strides=strides, name='pool_level_%s_%s'%(level, output_stride))(x) global_feat = _conv( filters=num_filters, kernel_size=(1, 1), padding='same', name='conv_level_%s_%s'%(level,output_stride))(global_feat) global_feat = BatchNormalization(axis=bn_axis, name='bn_level_%s_%s'%(level, output_stride))(global_feat) global_feat = Lambda(Interp, arguments={'shape': feature_map_shape})(global_feat) return global_feat # squeeze and excitation function
Example #24
Source Project: PSPNet-Keras-tensorflow Author: Vladkryvoruchko File: layers_builder.py License: MIT License | 5 votes |
def interp_block(prev_layer, level, feature_map_shape, input_shape): if input_shape == (473, 473): kernel_strides_map = {1: 60, 2: 30, 3: 20, 6: 10} elif input_shape == (713, 713): kernel_strides_map = {1: 90, 2: 45, 3: 30, 6: 15} else: print("Pooling parameters for input shape ", input_shape, " are not defined.") exit(1) names = [ "conv5_3_pool" + str(level) + "_conv", "conv5_3_pool" + str(level) + "_conv_bn" ] kernel = (kernel_strides_map[level], kernel_strides_map[level]) strides = (kernel_strides_map[level], kernel_strides_map[level]) prev_layer = AveragePooling2D(kernel, strides=strides)(prev_layer) prev_layer = Conv2D(512, (1, 1), strides=(1, 1), name=names[0], use_bias=False)(prev_layer) prev_layer = BN(name=names[1])(prev_layer) prev_layer = Activation('relu')(prev_layer) # prev_layer = Lambda(Interp, arguments={ # 'shape': feature_map_shape})(prev_layer) prev_layer = Interp(feature_map_shape)(prev_layer) return prev_layer
Example #25
Source Project: image-segmentation-keras Author: divamgupta File: _pspnet_2.py License: MIT License | 5 votes |
def interp_block(prev_layer, level, feature_map_shape, input_shape): if input_shape == (473, 473): kernel_strides_map = {1: 60, 2: 30, 3: 20, 6: 10} elif input_shape == (713, 713): kernel_strides_map = {1: 90, 2: 45, 3: 30, 6: 15} else: print("Pooling parameters for input shape ", input_shape, " are not defined.") exit(1) names = [ "conv5_3_pool" + str(level) + "_conv", "conv5_3_pool" + str(level) + "_conv_bn" ] kernel = (kernel_strides_map[level], kernel_strides_map[level]) strides = (kernel_strides_map[level], kernel_strides_map[level]) prev_layer = AveragePooling2D(kernel, strides=strides)(prev_layer) prev_layer = Conv2D(512, (1, 1), strides=(1, 1), name=names[0], use_bias=False)(prev_layer) prev_layer = BN(name=names[1])(prev_layer) prev_layer = Activation('relu')(prev_layer) # prev_layer = Lambda(Interp, arguments={ # 'shape': feature_map_shape})(prev_layer) prev_layer = Interp(feature_map_shape)(prev_layer) return prev_layer
Example #26
Source Project: posewarp-cvpr2018 Author: balakg File: truncated_vgg.py License: MIT License | 5 votes |
def vgg_norm(): img_input = Input(shape=(256, 256, 3)) x1 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input) x2 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x1) x3 = AveragePooling2D((2, 2), strides=(2, 2), name='block1_pool')(x2) x4 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x3) x5 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x4) x6 = AveragePooling2D((2, 2), strides=(2, 2), name='block2_pool')(x5) x7 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x6) x8 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x7) x9 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x8) x10 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x9) x11 = AveragePooling2D((2, 2), strides=(2, 2), name='block3_pool')(x10) x12 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x11) x13 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x12) x14 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x13) x15 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x14) x16 = AveragePooling2D((2, 2), strides=(2, 2), name='block4_pool')(x15) x17 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x16) x18 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x17) x19 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x18) x20 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x19) x21 = AveragePooling2D((2, 2), strides=(2, 2), name='block5_pool')(x20) model = Model(inputs=[img_input], outputs=[x1, x2, x4, x5, x7, x8, x9, x10, x12, x13, x14, x15]) model_orig = VGG19(weights='imagenet', input_shape=(256, 256, 3), include_top=False) for i in range(len(model.layers)): weights = model_orig.layers[i].get_weights() model.layers[i].set_weights(weights) return model
Example #27
Source Project: FECNet Author: GerardLiu96 File: FECWithPretrained.py License: MIT License | 5 votes |
def create_model(): #Data format:tensorflow,channels_last;theano,channels_last if DATA_FORMAT=='channels_first': INP_SHAPE=(3,299,299) img_input=Input(shape=INP_SHAPE) CONCAT_AXIS=1 elif DATA_FORMAT=='channels_last': INP_SHAPE=(299,299,3) img_input=Input(shape=INP_SHAPE) CONCAT_AXIS=3 else: raise Exception('Invalid Dim Ordering') base_model = InceptionV3(weights='imagenet', include_top=False) base_model.summary() for layer in base_model.layers: layer.trainable = False x = base_model.get_layer('mixed7').output x = Convolution2D(512, (1, 1), kernel_initializer="glorot_uniform", padding="same", name="DenseNet_initial_conv2D", use_bias=False, kernel_regularizer=l2(WEIGHT_DECAY))(x) x = BatchNormalization()(x) x, nb_filter = dense_block(x, 5, 512, growth_rate=64,dropout_rate=0.5) x = AveragePooling2D(pool_size=(7, 7), strides=1, padding='valid', data_format=DATA_FORMAT)(x) x = Dense(512, activation='relu')(x) #x = Dropout(0.5)(x) x = Dense(16)(x) x = Lambda(lambda x:tf.nn.l2_normalize(x))(x) model = Model(inputs=base_model.input, outputs=x) return model
Example #28
Source Project: hacktoberfest2018 Author: ambujraj File: DenseNet_CIFAR10.py License: GNU General Public License v3.0 | 5 votes |
def add_transition(input, num_filter = 12, dropout_rate = 0.2): global weight_decay BatchNorm = BatchNormalization()(input) relu = Activation('relu')(BatchNorm) Conv2D_BottleNeck = Conv2D(int(num_filter*compression), (1,1), use_bias=False, padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(relu) if dropout_rate>0: Conv2D_BottleNeck = Dropout(dropout_rate)(Conv2D_BottleNeck) avg = AveragePooling2D(pool_size=(2,2))(Conv2D_BottleNeck) return avg, int(num_filter*compression)
Example #29
Source Project: hacktoberfest2018 Author: ambujraj File: DenseNet_CIFAR10.py License: GNU General Public License v3.0 | 5 votes |
def output_layer(input): global compression BatchNorm = BatchNormalization()(input) relu = Activation('relu')(BatchNorm) AvgPooling = AveragePooling2D(pool_size=(2,2))(relu) flat = Flatten()(AvgPooling) output = Dense(num_classes, activation='softmax')(flat) return output
Example #30
Source Project: CycleGAN-Keras Author: simontomaskarlsson File: model.py License: GNU General Public License v3.0 | 5 votes |
def modelMultiScaleDiscriminator(self, name=None): x1 = Input(shape=self.img_shape) x2 = AveragePooling2D(pool_size=(2, 2))(x1) #x4 = AveragePooling2D(pool_size=(2, 2))(x2) out_x1 = self.modelDiscriminator('D1')(x1) out_x2 = self.modelDiscriminator('D2')(x2) #out_x4 = self.modelDiscriminator('D4')(x4) return Model(inputs=x1, outputs=[out_x1, out_x2], name=name)