Python tensorflow.python.keras.regularizers.l2() Examples
The following are 30
code examples of tensorflow.python.keras.regularizers.l2().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.keras.regularizers
, or try the search function
.
Example #1
Source File: core.py From icme2019 with MIT License | 6 votes |
def build(self, input_shape): input_size = input_shape[-1] hidden_units = [int(input_size)] + list(self.hidden_size) self.kernels = [self.add_weight(name='kernel' + str(i), shape=( hidden_units[i], hidden_units[i+1]), initializer=glorot_normal( seed=self.seed), regularizer=l2(self.l2_reg), trainable=True) for i in range(len(self.hidden_size))] self.bias = [self.add_weight(name='bias' + str(i), shape=(self.hidden_size[i],), initializer=Zeros(), trainable=True) for i in range(len(self.hidden_size))] super(MLP, self).build(input_shape) # Be sure to call this somewhere!
Example #2
Source File: interaction.py From DeepCTR with Apache License 2.0 | 6 votes |
def build(self, input_shape): if len(input_shape) != 3: raise ValueError("Unexpected inputs dimensions % d,\ expect to be 3 dimensions" % (len(input_shape))) if input_shape[1] != self.num_fields: raise ValueError("Mismatch in number of fields {} and \ concatenated embeddings dims {}".format(self.num_fields, input_shape[1])) self.field_strengths = self.add_weight(name='field_pair_strengths', shape=(self.num_fields, self.num_fields), initializer=TruncatedNormal(), regularizer=l2(self.regularizer), trainable=True) super(FwFMLayer, self).build(input_shape) # Be sure to call this somewhere!
Example #3
Source File: interaction.py From DeepCTR with Apache License 2.0 | 6 votes |
def build(self, input_shape): if len(input_shape) != 2: raise ValueError( "Unexpected inputs dimensions %d, expect to be 2 dimensions" % (len(input_shape),)) dim = int(input_shape[-1]) self.kernels = [self.add_weight(name='kernel' + str(i), shape=(dim, 1), initializer=glorot_normal( seed=self.seed), regularizer=l2(self.l2_reg), trainable=True) for i in range(self.layer_num)] self.bias = [self.add_weight(name='bias' + str(i), shape=(dim, 1), initializer=Zeros(), trainable=True) for i in range(self.layer_num)] # Be sure to call this somewhere! super(CrossNet, self).build(input_shape)
Example #4
Source File: core.py From DeepCTR with Apache License 2.0 | 6 votes |
def call(self, inputs, training=None, **kwargs): deep_input = inputs for i in range(len(self.hidden_units)): fc = tf.nn.bias_add(tf.tensordot( deep_input, self.kernels[i], axes=(-1, 0)), self.bias[i]) # fc = Dense(self.hidden_size[i], activation=None, \ # kernel_initializer=glorot_normal(seed=self.seed), \ # kernel_regularizer=l2(self.l2_reg))(deep_input) if self.use_bn: fc = self.bn_layers[i](fc, training=training) fc = self.activation_layers[i](fc) fc = self.dropout_layers[i](fc, training=training) deep_input = fc return deep_input
Example #5
Source File: core.py From DeepCTR with Apache License 2.0 | 6 votes |
def build(self, input_shape): # if len(self.hidden_units) == 0: # raise ValueError("hidden_units is empty") input_size = input_shape[-1] hidden_units = [int(input_size)] + list(self.hidden_units) self.kernels = [self.add_weight(name='kernel' + str(i), shape=( hidden_units[i], hidden_units[i + 1]), initializer=glorot_normal( seed=self.seed), regularizer=l2(self.l2_reg), trainable=True) for i in range(len(self.hidden_units))] self.bias = [self.add_weight(name='bias' + str(i), shape=(self.hidden_units[i],), initializer=Zeros(), trainable=True) for i in range(len(self.hidden_units))] if self.use_bn: self.bn_layers = [tf.keras.layers.BatchNormalization() for _ in range(len(self.hidden_units))] self.dropout_layers = [tf.keras.layers.Dropout(self.dropout_rate, seed=self.seed + i) for i in range(len(self.hidden_units))] self.activation_layers = [activation_layer(self.activation) for _ in range(len(self.hidden_units))] super(DNN, self).build(input_shape) # Be sure to call this somewhere!
Example #6
Source File: inputs.py From DeepCTR with Apache License 2.0 | 6 votes |
def create_embedding_dict(sparse_feature_columns, varlen_sparse_feature_columns, seed, l2_reg, prefix='sparse_', seq_mask_zero=True): sparse_embedding = {} for feat in sparse_feature_columns: emb = Embedding(feat.vocabulary_size, feat.embedding_dim, embeddings_initializer=feat.embeddings_initializer, embeddings_regularizer=l2(l2_reg), name=prefix + '_emb_' + feat.embedding_name) emb.trainable = feat.trainable sparse_embedding[feat.embedding_name] = emb if varlen_sparse_feature_columns and len(varlen_sparse_feature_columns) > 0: for feat in varlen_sparse_feature_columns: # if feat.name not in sparse_embedding: emb = Embedding(feat.vocabulary_size, feat.embedding_dim, embeddings_initializer=feat.embeddings_initializer, embeddings_regularizer=l2( l2_reg), name=prefix + '_seq_emb_' + feat.name, mask_zero=seq_mask_zero) emb.trainable = feat.trainable sparse_embedding[feat.embedding_name] = emb return sparse_embedding
Example #7
Source File: graphsage.py From GraphNeuralNetwork with MIT License | 6 votes |
def build(self, input_shapes): self.dense_layers = [Dense( self.input_dim, activation='relu', use_bias=True, kernel_regularizer=l2(self.l2_reg))] self.neigh_weights = self.add_weight( shape=(self.input_dim * 2, self.output_dim), initializer=glorot_uniform( seed=self.seed), regularizer=l2(self.l2_reg), name="neigh_weights") if self.use_bias: self.bias = self.add_weight(shape=(self.output_dim,), initializer=Zeros(), name='bias_weight') self.built = True
Example #8
Source File: gcn.py From GraphNeuralNetwork with MIT License | 6 votes |
def build(self, input_shapes): if self.feature_less: input_dim = int(input_shapes[0][-1]) else: assert len(input_shapes) == 2 features_shape = input_shapes[0] input_dim = int(features_shape[-1]) self.kernel = self.add_weight(shape=(input_dim, self.units), initializer=glorot_uniform( seed=self.seed), regularizer=l2(self.l2_reg), name='kernel', ) if self.use_bias: self.bias = self.add_weight(shape=(self.units,), initializer=Zeros(), name='bias', ) self.dropout = Dropout(self.dropout_rate, seed=self.seed) self.built = True
Example #9
Source File: resnet.py From camera-trap-classifier with MIT License | 6 votes |
def bottleneck(filters, init_strides=(1, 1), is_first_block_of_first_layer=False): """Bottleneck architecture for > 34 layer resnet. Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf Returns: A final conv layer of filters * 4 """ def f(input): if is_first_block_of_first_layer: # don't repeat bn->relu since we just did bn->relu->maxpool conv_1_1 = Conv2D(filters=filters, kernel_size=(1, 1), strides=init_strides, padding="same", kernel_initializer="he_normal", kernel_regularizer=l2(1e-4))(input) else: conv_1_1 = _bn_relu_conv(filters=filters, kernel_size=(1, 1), strides=init_strides)(input) conv_3_3 = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv_1_1) residual = _bn_relu_conv(filters=filters * 4, kernel_size=(1, 1))(conv_3_3) return _shortcut(input, residual) return f
Example #10
Source File: resnet.py From camera-trap-classifier with MIT License | 6 votes |
def basic_block(filters, init_strides=(1, 1), is_first_block_of_first_layer=False): """Basic 3 X 3 convolution blocks for use on resnets with layers <= 34. Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf """ def f(input): if is_first_block_of_first_layer: # don't repeat bn->relu since we just did bn->relu->maxpool conv1 = Conv2D(filters=filters, kernel_size=(3, 3), strides=init_strides, padding="same", kernel_initializer="he_normal", kernel_regularizer=l2(1e-4))(input) else: conv1 = _bn_relu_conv(filters=filters, kernel_size=(3, 3), strides=init_strides)(input) residual = _bn_relu_conv(filters=filters, kernel_size=(3, 3))(conv1) return _shortcut(input, residual) return f
Example #11
Source File: resnet.py From camera-trap-classifier with MIT License | 6 votes |
def _shortcut(input, residual): """Adds a shortcut between input and residual block and merges them with "sum" """ # Expand channels of shortcut to match residual. # Stride appropriately to match residual (width, height) # Should be int if network architecture is correctly configured. input_shape = K.int_shape(input) residual_shape = K.int_shape(residual) stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS])) stride_height = int(round(input_shape[COL_AXIS] / residual_shape[COL_AXIS])) equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS] shortcut = input # 1 X 1 conv if shape is different. Else identity. if stride_width > 1 or stride_height > 1 or not equal_channels: shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS], kernel_size=(1, 1), strides=(stride_width, stride_height), padding="valid", kernel_initializer="he_normal", kernel_regularizer=l2(0.0001))(input) return add([shortcut, residual])
Example #12
Source File: resnet.py From camera-trap-classifier with MIT License | 6 votes |
def _bn_relu_conv(**conv_params): """Helper to build a BN -> relu -> conv block. This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf """ filters = conv_params["filters"] kernel_size = conv_params["kernel_size"] strides = conv_params.setdefault("strides", (1, 1)) kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal") padding = conv_params.setdefault("padding", "same") kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4)) def f(input): activation = _bn_relu(input) return Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer)(activation) return f
Example #13
Source File: resnet.py From camera-trap-classifier with MIT License | 6 votes |
def _conv_bn_relu(**conv_params): """Helper to build a conv -> BN -> relu block """ filters = conv_params["filters"] kernel_size = conv_params["kernel_size"] strides = conv_params.setdefault("strides", (1, 1)) kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal") padding = conv_params.setdefault("padding", "same") kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4)) def f(input): conv = Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer)(input) return _bn_relu(conv) return f
Example #14
Source File: core.py From icme2019 with MIT License | 6 votes |
def call(self, inputs, training=None, **kwargs): deep_input = inputs for i in range(len(self.hidden_size)): fc = tf.nn.bias_add(tf.tensordot( deep_input, self.kernels[i], axes=(-1, 0)), self.bias[i]) # fc = Dense(self.hidden_size[i], activation=None, \ # kernel_initializer=glorot_normal(seed=self.seed), \ # kernel_regularizer=l2(self.l2_reg))(deep_input) if self.use_bn: fc = tf.keras.layers.BatchNormalization()(fc) fc = activation_fun(self.activation, fc) #fc = tf.nn.dropout(fc, self.keep_prob) fc = tf.keras.layers.Dropout(1 - self.keep_prob)(fc,) deep_input = fc return deep_input
Example #15
Source File: input_embedding.py From icme2019 with MIT License | 6 votes |
def merge_dense_input(dense_input_, embed_list, embedding_size, l2_reg): dense_input = list(dense_input_.values()) if len(dense_input) > 0: if embedding_size == "auto": if len(dense_input) == 1: continuous_embedding_list = dense_input[0] else: continuous_embedding_list = Concatenate()(dense_input) continuous_embedding_list = Reshape( [1, len(dense_input)])(continuous_embedding_list) embed_list.append(continuous_embedding_list) else: continuous_embedding_list = list( map(Dense(embedding_size, use_bias=False, kernel_regularizer=l2(l2_reg), ), dense_input)) continuous_embedding_list = list( map(Reshape((1, embedding_size)), continuous_embedding_list)) embed_list += continuous_embedding_list return embed_list
Example #16
Source File: densenet.py From ImageAI with MIT License | 6 votes |
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4): ''' SubpixelConvolutional Upscaling (factor = 2) Args: ip: keras tensor nb_filters: number of layers type: can be 'upsampling', 'subpixel', 'deconv'. Determines type of upsampling performed weight_decay: weight decay factor Returns: keras tensor, after applying upsampling operation. ''' if type == 'upsampling': x = UpSampling2D()(ip) elif type == 'subpixel': x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(ip) x = SubPixelUpscaling(scale_factor=2)(x) x = Conv2D(nb_filters, (3, 3), activation='relu', padding='same', kernel_regularizer=l2(weight_decay), use_bias=False, kernel_initializer='he_normal')(x) else: x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding='same', strides=(2, 2), kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(ip) return x
Example #17
Source File: densenet.py From ImageAI with MIT License | 6 votes |
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4): ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D Args: ip: keras tensor nb_filter: number of filters compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block. dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip) x = Activation('relu')(x) x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = AveragePooling2D((2, 2), strides=(2, 2))(x) return x
Example #18
Source File: input_embedding.py From icme2019 with MIT License | 6 votes |
def get_linear_logit(linear_emb_list, dense_input_dict, l2_reg): if len(linear_emb_list) > 1: linear_term = add(linear_emb_list) elif len(linear_emb_list) == 1: linear_term = linear_emb_list[0] else: linear_term = None dense_input = list(dense_input_dict.values()) if len(dense_input) > 0: dense_input__ = dense_input[0] if len( dense_input) == 1 else Concatenate()(dense_input) linear_dense_logit = Dense( 1, activation=None, use_bias=False, kernel_regularizer=l2(l2_reg))(dense_input__) if linear_term is not None: linear_term = add([linear_dense_logit, linear_term]) else: linear_term = linear_dense_logit return linear_term
Example #19
Source File: densenet.py From ImageAI with MIT License | 5 votes |
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4): ''' Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout Args: ip: Input keras tensor nb_filter: number of filters bottleneck: add bottleneck block dropout_rate: dropout rate weight_decay: weight decay factor Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck) ''' concat_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip) x = Activation('relu')(x) if bottleneck: inter_channel = nb_filter * 4 # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua x = Conv2D(inter_channel, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, kernel_regularizer=l2(weight_decay))(x) x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x) x = Activation('relu')(x) x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_normal', padding='same', use_bias=False)(x) if dropout_rate: x = Dropout(dropout_rate)(x) return x
Example #20
Source File: interaction.py From DeepCTR with Apache License 2.0 | 5 votes |
def call(self, inputs, training=None, **kwargs): if K.ndim(inputs[0]) != 3: raise ValueError( "Unexpected inputs dimensions %d, expect to be 3 dimensions" % (K.ndim(inputs))) embeds_vec_list = inputs row = [] col = [] for r, c in itertools.combinations(embeds_vec_list, 2): row.append(r) col.append(c) p = tf.concat(row, axis=1) q = tf.concat(col, axis=1) inner_product = p * q bi_interaction = inner_product attention_temp = tf.nn.relu(tf.nn.bias_add(tf.tensordot( bi_interaction, self.attention_W, axes=(-1, 0)), self.attention_b)) # Dense(self.attention_factor,'relu',kernel_regularizer=l2(self.l2_reg_w))(bi_interaction) self.normalized_att_score = softmax(tf.tensordot( attention_temp, self.projection_h, axes=(-1, 0)), dim=1) attention_output = reduce_sum( self.normalized_att_score * bi_interaction, axis=1) attention_output = self.dropout(attention_output,training=training) # training afm_out = self.tensordot([attention_output, self.projection_p]) return afm_out
Example #21
Source File: interaction.py From icme2019 with MIT License | 5 votes |
def build(self, input_shape): if not isinstance(input_shape, list) or len(input_shape) < 2: raise ValueError('A `AttentionalFM` layer should be called ' 'on a list of at least 2 inputs') shape_set = set() reduced_input_shape = [shape.as_list() for shape in input_shape] for i in range(len(input_shape)): shape_set.add(tuple(reduced_input_shape[i])) if len(shape_set) > 1: raise ValueError('A `AttentionalFM` layer requires ' 'inputs with same shapes ' 'Got different shapes: %s' % (shape_set)) if len(input_shape[0]) != 3 or input_shape[0][1] != 1: raise ValueError('A `AttentionalFM` layer requires ' 'inputs of a list with same shape tensor like\ (None, 1, embedding_size)' 'Got different shapes: %s' % (input_shape[0])) embedding_size = input_shape[0][-1].value self.attention_W = self.add_weight(shape=(embedding_size, self.attention_factor), initializer=glorot_normal(seed=self.seed), regularizer=l2(self.l2_reg_w), name="attention_W") self.attention_b = self.add_weight( shape=(self.attention_factor,), initializer=Zeros(), name="attention_b") self.projection_h = self.add_weight(shape=(self.attention_factor, 1), initializer=glorot_normal(seed=self.seed), name="projection_h") self.projection_p = self.add_weight(shape=( embedding_size, 1), initializer=glorot_normal(seed=self.seed), name="projection_p") # Be sure to call this somewhere! super(AFMLayer, self).build(input_shape)
Example #22
Source File: interaction.py From icme2019 with MIT License | 5 votes |
def call(self, inputs, **kwargs): if K.ndim(inputs[0]) != 3: raise ValueError( "Unexpected inputs dimensions %d, expect to be 3 dimensions" % (K.ndim(inputs))) embeds_vec_list = inputs row = [] col = [] for r, c in itertools.combinations(embeds_vec_list, 2): row.append(r) col.append(c) p = tf.concat(row, axis=1) q = tf.concat(col, axis=1) inner_product = p * q bi_interaction = inner_product attention_temp = tf.nn.relu(tf.nn.bias_add(tf.tensordot( bi_interaction, self.attention_W, axes=(-1, 0)), self.attention_b)) # Dense(self.attention_factor,'relu',kernel_regularizer=l2(self.l2_reg_w))(bi_interaction) self.normalized_att_score = tf.nn.softmax(tf.tensordot( attention_temp, self.projection_h, axes=(-1, 0)), dim=1) attention_output = tf.reduce_sum( self.normalized_att_score*bi_interaction, axis=1) attention_output = tf.nn.dropout( attention_output, self.keep_prob, seed=1024) # Dropout(1-self.keep_prob)(attention_output) afm_out = tf.tensordot( attention_output, self.projection_p, axes=(-1, 0)) return afm_out
Example #23
Source File: gat.py From GraphNeuralNetwork with MIT License | 5 votes |
def build(self, input_shape): X, A = input_shape embedding_size = int(X[-1]) self.weight = self.add_weight(name='weight', shape=[embedding_size, self.att_embedding_size * self.head_num], dtype=tf.float32, regularizer=l2(self.l2_reg), initializer=tf.keras.initializers.glorot_uniform()) self.att_self_weight = self.add_weight(name='att_self_weight', shape=[1, self.head_num, self.att_embedding_size], dtype=tf.float32, regularizer=l2(self.l2_reg), initializer=tf.keras.initializers.glorot_uniform()) self.att_neighs_weight = self.add_weight(name='att_neighs_weight', shape=[1, self.head_num, self.att_embedding_size], dtype=tf.float32, regularizer=l2(self.l2_reg), initializer=tf.keras.initializers.glorot_uniform()) if self.use_bias: self.bias_weight = self.add_weight(name='bias', shape=[1, self.head_num, self.att_embedding_size], dtype=tf.float32, initializer=Zeros()) self.in_dropout = Dropout(self.dropout_rate) self.feat_dropout = Dropout(self.dropout_rate, ) self.att_dropout = Dropout(self.dropout_rate, ) # Be sure to call this somewhere! super(GATLayer, self).build(input_shape)
Example #24
Source File: graphsage.py From GraphNeuralNetwork with MIT License | 5 votes |
def build(self, input_shapes): self.neigh_weights = self.add_weight(shape=(self.input_dim, self.units), initializer=glorot_uniform( seed=self.seed), regularizer=l2(self.l2_reg), name="neigh_weights") if self.use_bias: self.bias = self.add_weight(shape=(self.units), initializer=Zeros(), name='bias_weight') self.dropout = Dropout(self.dropout_rate) self.built = True
Example #25
Source File: mlr.py From icme2019 with MIT License | 5 votes |
def get_embedding(region_num, region_feature_dim_dict, base_feature_dim_dict, bias_feature_dim_dict, init_std, seed, l2_reg_linear): region_embeddings = [[Embedding(feat.dimension, 1, embeddings_initializer=TruncatedNormal(stddev=init_std, seed=seed+j), embeddings_regularizer=l2(l2_reg_linear), name='region_emb_' + str(j)+'_' + str(i)) for i, feat in enumerate(region_feature_dim_dict['sparse'])] for j in range(region_num)] base_embeddings = [[Embedding(feat.dimension, 1, embeddings_initializer=TruncatedNormal(stddev=init_std, seed=seed + j), embeddings_regularizer=l2(l2_reg_linear), name='base_emb_' + str(j) + '_' + str(i)) for i, feat in enumerate(base_feature_dim_dict['sparse'])] for j in range(region_num)] bias_embedding = [Embedding(feat.dimension, 1, embeddings_initializer=TruncatedNormal(stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg_linear), name='embed_bias' + '_' + str(i)) for i, feat in enumerate(bias_feature_dim_dict['sparse'])] return region_embeddings, base_embeddings, bias_embedding
Example #26
Source File: resnet_model.py From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 | 5 votes |
def _gen_l2_regularizer(use_l2_regularizer=True): return regularizers.l2(L2_WEIGHT_DECAY) if use_l2_regularizer else None
Example #27
Source File: resnet_model.py From models with Apache License 2.0 | 5 votes |
def _gen_l2_regularizer(use_l2_regularizer=True, l2_weight_decay=1e-4): return regularizers.l2(l2_weight_decay) if use_l2_regularizer else None
Example #28
Source File: resnet_model.py From class-balanced-loss with MIT License | 4 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. """ filters1, filters2, filters3 = filters if backend.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = layers.Conv2D(filters1, (1, 1), kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2a')(input_tensor) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2a')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters2, kernel_size, padding='same', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2b')(x) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2b')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters3, (1, 1), kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2c')(x) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = layers.Activation('relu')(x) return x
Example #29
Source File: resnet_model.py From g-tensorflow-models with Apache License 2.0 | 4 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. """ filters1, filters2, filters3 = filters if backend.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = layers.Conv2D(filters1, (1, 1), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2a')(input_tensor) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2a')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters2, kernel_size, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2b')(x) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2b')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters3, (1, 1), use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2c')(x) x = layers.BatchNormalization(axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = layers.Activation('relu')(x) return x
Example #30
Source File: resnet_cifar_model.py From models with Apache License 2.0 | 4 votes |
def identity_building_block(input_tensor, kernel_size, filters, stage, block, training=None): """The identity block is the block that has no conv layer at shortcut. Arguments: input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: current block label, used for generating layer names training: Only used if training keras model with Estimator. In other scenarios it is handled automatically. Returns: Output tensor for the block. """ filters1, filters2 = filters if backend.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = layers.Conv2D(filters1, kernel_size, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2a')(input_tensor) x = layers.BatchNormalization( axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2a')(x, training=training) x = layers.Activation('relu')(x) x = layers.Conv2D(filters2, kernel_size, padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY), name=conv_name_base + '2b')(x) x = layers.BatchNormalization( axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, name=bn_name_base + '2b')(x, training=training) x = layers.add([x, input_tensor]) x = layers.Activation('relu')(x) return x