Python tensorflow.keras.regularizers.l2() Examples

The following are 30 code examples of tensorflow.keras.regularizers.l2(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.regularizers , or try the search function .
Example #1
Source Project: DexiNed   Author: xavysp   File: model.py    License: MIT License 7 votes vote down vote up
def __init__(self, out_features,**kwargs):
        super(_DenseLayer, self).__init__(**kwargs)
        k_reg = None if w_decay is None else l2(w_decay)
        self.layers = []
        self.layers.append(tf.keras.Sequential(
            [
                layers.ReLU(),
                layers.Conv2D(
                    filters=out_features, kernel_size=(3,3), strides=(1,1), padding='same',
                    use_bias=True, kernel_initializer=weight_init,
                kernel_regularizer=k_reg),
                layers.BatchNormalization(),
                layers.ReLU(),
                layers.Conv2D(
                    filters=out_features, kernel_size=(3,3), strides=(1,1), padding='same',
                    use_bias=True, kernel_initializer=weight_init,
                    kernel_regularizer=k_reg),
                layers.BatchNormalization(),
            ])) # first relu can be not needed 
Example #2
Source Project: DexiNed   Author: xavysp   File: model.py    License: MIT License 6 votes vote down vote up
def __init__(self, mid_features, out_features=None, stride=(1,1),
                 use_bn=True,use_act=True,**kwargs):
        super(DoubleConvBlock, self).__init__(**kwargs)
        self.use_bn =use_bn
        self.use_act =use_act
        out_features = mid_features if out_features is None else out_features
        k_reg = None if w_decay is None else l2(w_decay)

        self.conv1 = layers.Conv2D(
            filters=mid_features, kernel_size=(3, 3), strides=stride, padding='same',
        use_bias=True, kernel_initializer=weight_init,
        kernel_regularizer=k_reg)
        self.bn1 = layers.BatchNormalization()

        self.conv2 = layers.Conv2D(
            filters=out_features, kernel_size=(3, 3), padding='same',strides=(1,1),
        use_bias=True, kernel_initializer=weight_init,
        kernel_regularizer=k_reg)
        self.bn2 = layers.BatchNormalization()
        self.relu = layers.ReLU() 
Example #3
Source Project: keras-squeeze-excite-network   Author: titu1994   File: se_resnext.py    License: MIT License 6 votes vote down vote up
def __initial_conv_block_inception(input_tensor, weight_decay=5e-4):
    """ Adds an initial conv block, with batch norm and relu for the inception resnext
    Args:
        input_tensor: input Keras tensor
        weight_decay: weight decay factor
    Returns: a Keras tensor
    """
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay), strides=(2, 2))(input_tensor)
    x = BatchNormalization(axis=channel_axis)(x)
    x = LeakyReLU()(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    return x 
Example #4
Source Project: object-localization   Author: lars76   File: train.py    License: MIT License 6 votes vote down vote up
def create_model(trainable=False):
    model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights="imagenet")

    for layer in model.layers:
        layer.trainable = trainable

    block = model.get_layer("block_16_project_BN").output

    x = Conv2D(112, padding="same", kernel_size=3, strides=1, activation="relu")(block)
    x = Conv2D(112, padding="same", kernel_size=3, strides=1, use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(5, padding="same", kernel_size=1, activation="sigmoid")(x)

    model = Model(inputs=model.input, outputs=x)

    # divide by 2 since d/dweight learning_rate * weight^2 = 2 * learning_rate * weight
    # see https://arxiv.org/pdf/1711.05101.pdf
    regularizer = l2(WEIGHT_DECAY / 2)
    for weight in model.trainable_weights:
        with tf.keras.backend.name_scope("weight_regularizer"):
            model.add_loss(regularizer(weight)) # in tf2.0: lambda: regularizer(weight)

    return model 
Example #5
Source Project: ivis   Author: beringresearch   File: network.py    License: GNU General Public License v2.0 6 votes vote down vote up
def triplet_network(base_network, embedding_dims=2, embedding_l2=0.0):
    def output_shape(shapes):
        shape1, shape2, shape3 = shapes
        return (3, shape1[0],)

    input_a = Input(shape=base_network.input_shape[1:])
    input_p = Input(shape=base_network.input_shape[1:])
    input_n = Input(shape=base_network.input_shape[1:])

    embeddings = Dense(embedding_dims,
                       kernel_regularizer=l2(embedding_l2))(base_network.output)
    network = Model(base_network.input, embeddings)

    processed_a = network(input_a)
    processed_p = network(input_p)
    processed_n = network(input_n)

    triplet = Lambda(K.stack,
                     output_shape=output_shape,
                     name='stacked_triplets')([processed_a,
                                               processed_p,
                                               processed_n],)
    model = Model([input_a, input_p, input_n], triplet)

    return model, processed_a, processed_p, processed_n 
Example #6
Source Project: U-Time   Author: perslev   File: utime.py    License: MIT License 6 votes vote down vote up
def create_seq_modeling(in_,
                            input_dims,
                            data_per_period,
                            n_periods,
                            n_classes,
                            transition_window,
                            name_prefix=""):
        cls = AveragePooling2D((data_per_period, 1),
                               name="{}average_pool".format(name_prefix))(in_)
        out = Conv2D(filters=n_classes,
                     kernel_size=(transition_window, 1),
                     activation="softmax",
                     kernel_regularizer=regularizers.l2(1e-5),
                     padding="same",
                     name="{}sequence_conv_out".format(name_prefix))(cls)
        s = [-1, n_periods, input_dims//data_per_period, n_classes]
        if s[2] == 1:
            s.pop(2)  # Squeeze the dim
        out = Lambda(lambda x: tf.reshape(x, s),
                     name="{}sequence_classification_reshaped".format(name_prefix))(out)
        return out 
Example #7
Source Project: U-Time   Author: perslev   File: utime.py    License: MIT License 6 votes vote down vote up
def log(self):
        self.logger("{} Model Summary\n"
                    "-------------------".format(__class__.__name__))
        self.logger("N periods:         {}".format(self.n_periods))
        self.logger("Input dims:        {}".format(self.input_dims))
        self.logger("N channels:        {}".format(self.n_channels))
        self.logger("N classes:         {}".format(self.n_classes))
        self.logger("Kernel size:       {}".format(self.kernel_size))
        self.logger("Dilation rate:     {}".format(self.dilation))
        self.logger("CF factor:         %.3f" % self.cf)
        self.logger("Init filters:      {}".format(self.init_filters))
        self.logger("Depth:             %i" % self.depth)
        self.logger("Poolings:          {}".format(self.pools))
        self.logger("Transition window  {}".format(self.transition_window))
        self.logger("Dense activation   {}".format(self.dense_classifier_activation))
        self.logger("l2 reg:            %s" % self.l2_reg)
        self.logger("Padding:           %s" % self.padding)
        self.logger("Conv activation:   %s" % self.activation)
        self.logger("Receptive field:   %s" % self.receptive_field[0])
        self.logger("Seq length.:       {}".format(self.n_periods*self.input_dims))
        self.logger("N params:          %i" % self.count_params())
        self.logger("Input:             %s" % self.input)
        self.logger("Output:            %s" % self.output) 
Example #8
Source Project: CNNArt   Author: thomaskuestner   File: multiclass_3D_CNN.py    License: Apache License 2.0 6 votes vote down vote up
def buildModel(patchShape, numClasses):
    input = Input(shape=patchShape)
    n_base_fileter = 32
    _handle_data_format()
    conv = Conv3D(filters=n_base_fileter, kernel_size=(7, 7, 7),
                  strides=(2, 2, 2), kernel_initializer="he_normal",
                  )(input)
    norm = BatchNormalization(axis=CHANNEL_AXIS)(conv)
    conv1 = Activation("relu")(norm)
    pool1 = MaxPooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2),
                         padding="same")(conv1)
    flatten1 = Flatten()(pool1)
    dense = Dense(units=numClasses,
                  kernel_initializer="he_normal",
                  activation="softmax",
                  kernel_regularizer=l2(1e-4))(flatten1)
    model = Model(inputs=input, outputs=dense)
    return model 
Example #9
Source Project: MultiPlanarUNet   Author: perslev   File: multitask_unet2d.py    License: MIT License 6 votes vote down vote up
def log(self):
        self.logger("Multi-Task UNet Model Summary\n"
                    "-----------------------------")
        self.logger("N classes:         %s" % list(self.n_classes))
        self.logger("CF factor:         %.3f" % self.cf**2)
        self.logger("Depth:             %i" % self.depth)
        self.logger("l2 reg:            %s" % self.l2_reg)
        self.logger("Padding:           %s" % self.padding)
        self.logger("Conv activation:   %s" % self.activation)
        self.logger("Out activation:    %s" % list(self.out_activation))
        self.logger("Receptive field:   %s" % self.receptive_field)
        self.logger("N params:          %i" % self.count_params())
        self.logger("N tasks:           %i" % self.n_tasks)
        if self.n_tasks > 1:
            inputs = self.input
            outputs = self.output
        else:
            inputs = [self.input]
            outputs = [self.output]
        for i, (id_, in_, out) in enumerate(zip(self.task_IDs, inputs, outputs)):
            self.logger("\n--- Task %s ---" % id_)
            self.logger("In shape:  %s" % in_.shape)
            self.logger("Out shape: %s\n" % out.shape) 
Example #10
Source Project: MultiPlanarUNet   Author: perslev   File: unet.py    License: MIT License 6 votes vote down vote up
def log(self):
        self.logger("UNet Model Summary\n------------------")
        self.logger("Image rows:        %i" % self.img_shape[0])
        self.logger("Image cols:        %i" % self.img_shape[1])
        self.logger("Image channels:    %i" % self.img_shape[2])
        self.logger("N classes:         %i" % self.n_classes)
        self.logger("CF factor:         %.3f" % self.cf**2)
        self.logger("Depth:             %i" % self.depth)
        self.logger("l2 reg:            %s" % self.l2_reg)
        self.logger("Padding:           %s" % self.padding)
        self.logger("Conv activation:   %s" % self.activation)
        self.logger("Out activation:    %s" % self.out_activation)
        self.logger("Receptive field:   %s" % self.receptive_field)
        self.logger("N params:          %i" % self.count_params())
        self.logger("Output:            %s" % self.output)
        self.logger("Crop:              %s" % (self.label_crop if np.sum(self.label_crop) != 0 else "None")) 
Example #11
Source Project: DeepPavlov   Author: deepmipt   File: morpho_tagger.py    License: Apache License 2.0 6 votes vote down vote up
def _initialize(self):
        if isinstance(self.char_window_size, int):
            self.char_window_size = [self.char_window_size]
        if self.char_filters is None or isinstance(self.char_filters, int):
            self.char_filters = [self.char_filters] * len(self.char_window_size)
        if len(self.char_window_size) != len(self.char_filters):
            raise ValueError("There should be the same number of window sizes and filter sizes")
        if isinstance(self.word_lstm_units, int):
            self.word_lstm_units = [self.word_lstm_units] * self.word_lstm_layers
        if len(self.word_lstm_units) != self.word_lstm_layers:
            raise ValueError("There should be the same number of lstm layer units and lstm layers")
        if self.word_vectorizers is None:
            self.word_vectorizers = []
        if self.regularizer is not None:
            self.regularizer = l2(self.regularizer)
        if self.verbose > 0:
            log.info("{} symbols, {} tags in CharacterTagger".format(len(self.symbols), len(self.tags))) 
Example #12
Source Project: RSN   Author: thunlp   File: cnnmodule.py    License: MIT License 6 votes vote down vote up
def _cnn_(cnn_input_shape,name=None):
    with tf.variable_scope(name or 'convnet', reuse=tf.AUTO_REUSE):
        convnet = Sequential()
        convnet.add(Conv1D(230, 3,
            input_shape = cnn_input_shape,
            kernel_initializer = W_init,
            bias_initializer = b_init_conv,
            kernel_regularizer=l2(2e-4)
            ))
        convnet.add(MaxPooling1D(pool_size=cnn_input_shape[0]-4))
        convnet.add(Activation('relu'))

        convnet.add(Flatten())
        convnet.add(Dense(cnn_input_shape[-1]*230, activation = 'sigmoid',
            kernel_initializer = W_init,
            bias_initializer = b_init_dense,
            kernel_regularizer=l2(1e-3)
            ))
    return convnet 
Example #13
Source Project: TF.Keras-Commonly-used-models   Author: 1044197988   File: DenseNet.py    License: Apache License 2.0 6 votes vote down vote up
def transition_layer(x, nb_channels, dropout_rate=None, compression=1.0, weight_decay=1e-4):
    """
    Creates a transition layer between dense blocks as transition, which do convolution and pooling.
    Works as downsampling.
    """
    
    x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
    x = Activation('relu')(x)
    x = Conv2D(int(nb_channels*compression), (1, 1), padding='same',
                      use_bias=False, kernel_regularizer=l2(weight_decay))(x)
    
    # Adding dropout
    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)
    return x 
Example #14
Source Project: TF.Keras-Commonly-used-models   Author: 1044197988   File: MNasNet.py    License: Apache License 2.0 6 votes vote down vote up
def depthwiseConv_bn(x, depth_multiplier, kernel_size,  strides=1):
	""" Depthwise convolution 
	The DepthwiseConv2D is just the first step of the Depthwise Separable convolution (without the pointwise step).
	Depthwise Separable convolutions consists in performing just the first step in a depthwise spatial convolution 
	(which acts on each input channel separately).
	
	This function defines a 2D Depthwise separable convolution operation with BN and relu6.
	# Arguments
		x: Tensor, input tensor of conv layer.
		filters: Integer, the dimensionality of the output space.
		kernel_size: An integer or tuple/list of 2 integers, specifying the
			width and height of the 2D convolution window.
		strides: An integer or tuple/list of 2 integers,
			specifying the strides of the convolution along the width and height.
			Can be a single integer to specify the same value for
			all spatial dimensions.
	# Returns
		Output tensor.
	"""

	x = layers.DepthwiseConv2D(kernel_size=kernel_size, strides=strides, depth_multiplier=depth_multiplier,
									padding='same', use_bias=False, kernel_regularizer=regularizers.l2(l=0.0003))(x)  
	x = layers.BatchNormalization(epsilon=1e-3, momentum=0.999)(x)  
	x = layers.ReLU(max_value=6)(x)
	return x 
Example #15
Source Project: TF.Keras-Commonly-used-models   Author: 1044197988   File: SE_HRNet.py    License: Apache License 2.0 6 votes vote down vote up
def build(self, input_shape, num_output, repetitions=3):
        input_x = Input(shape=input_shape)

        feature_maps = self.extract_multi_resolution_feature(repetitions=repetitions)(input_x)
        x = self.make_classification_head(feature_maps, self.filter_list)

        x = Conv2D(filters=x.get_shape().as_list()[-1] * 2, kernel_size=(1, 1), strides=(1, 1), padding='same', kernel_regularizer=l2(1e-4))(x)
        x = BatchNormalization(axis=-1)(x, training=self.training)
        x = Activation("relu")(x)
        x = GlobalAveragePooling2D()(x)
        x = Flatten()(x)

        x = Dense(units=num_output,
                  name='final_fully_connected',
                  kernel_initializer="he_normal",
                  kernel_regularizer=l2(1e-4),
                  activation='softmax')(x)

        return Model(inputs=input_x, outputs=x) 
Example #16
Source Project: TF.Keras-Commonly-used-models   Author: 1044197988   File: Darknet53.py    License: Apache License 2.0 6 votes vote down vote up
def conv2d_unit(x, filters, kernels, strides=1):
    """Convolution Unit
    This function defines a 2D convolution operation with BN and LeakyReLU.
    # Arguments
        x: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernels: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        strides: An integer or tuple/list of 2 integers,
            specifying the strides of the convolution along the width and
            height. Can be a single integer to specify the same value for
            all spatial dimensions.
    # Returns
            Output tensor.
    """
    x = Conv2D(filters, kernels,
               padding='same',
               strides=strides,
               activation='linear',
               kernel_regularizer=l2(5e-4))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)

    return x 
Example #17
Source Project: TF.Keras-Commonly-used-models   Author: 1044197988   File: dual_path_network.py    License: Apache License 2.0 6 votes vote down vote up
def _initial_conv_block_inception(input, initial_conv_filters, weight_decay=5e-4):
    ''' Adds an initial conv block, with batch norm and relu for the DPN
    Args:
        input: input tensor
        initial_conv_filters: number of filters for initial conv block
        weight_decay: weight decay factor
    Returns: a keras tensor
    '''
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = Conv2D(initial_conv_filters, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay), strides=(2, 2))(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    return x 
Example #18
Source Project: TF.Keras-Commonly-used-models   Author: 1044197988   File: dual_path_network.py    License: Apache License 2.0 6 votes vote down vote up
def _bn_relu_conv_block(input, filters, kernel=(3, 3), stride=(1, 1), weight_decay=5e-4):
    ''' Adds a Batchnorm-Relu-Conv block for DPN
    Args:
        input: input tensor
        filters: number of output filters
        kernel: convolution kernel size
        stride: stride of convolution
    Returns: a keras tensor
    '''
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = Conv2D(filters, kernel, padding='same', use_bias=False, kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay), strides=stride)(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    return x 
Example #19
Source Project: TF.Keras-Commonly-used-models   Author: 1044197988   File: ResNextFPN.py    License: Apache License 2.0 6 votes vote down vote up
def bottleneck_block(input, filters=64, cardinality=8, strides=1, weight_decay=5e-4):
    init = input
    grouped_channels = int(filters / cardinality)

    if init.shape[-1] != 2 * filters:
        init = Conv2D(filters * 2, (1, 1), padding='same', strides=(strides, strides),
                      use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
        init = BatchNormalization(axis=3)(init)

    x = Conv2D(filters, (1, 1), padding='same', use_bias=False,
               kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input)
    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)

    x = grouped_convolution_block(x, grouped_channels, cardinality, strides, weight_decay)
    x = Conv2D(filters * 2, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(axis=3)(x)

    x = add([init, x])
    x = Activation('relu')(x)
    return x 
Example #20
Source Project: tf2-yolo3   Author: akkaze   File: models.py    License: Apache License 2.0 5 votes vote down vote up
def DarknetConv(x, filters, size, strides=1, padding='same', batch_norm=True):
    x = Conv2D(filters=filters,
               kernel_size=size,
               strides=strides,
               padding=padding,
               use_bias=not batch_norm,
               kernel_regularizer=l2(0.0005))(x)
    if batch_norm:
        x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.1)(x)
    return x 
Example #21
Source Project: DexiNed   Author: xavysp   File: model.py    License: MIT License 5 votes vote down vote up
def __init__(self, up_scale,**kwargs):
        super(UpConvBlock, self).__init__(**kwargs)
        constant_features = 16
        k_reg = None if w_decay is None else l2(w_decay)
        features = []
        total_up_scale = 2 ** up_scale
        for i in range(up_scale):
            out_features = 1 if i == up_scale-1 else constant_features
            if i==up_scale-1:
                features.append(layers.Conv2D(
                    filters=out_features, kernel_size=(1,1), strides=(1,1), padding='same',
                    activation='relu', kernel_initializer=tf.initializers.TruncatedNormal(stddev=0.1),
                    kernel_regularizer=k_reg,use_bias=True)) #tf.initializers.TruncatedNormal(mean=0.)
                features.append(layers.Conv2DTranspose(
                    out_features, kernel_size=(total_up_scale,total_up_scale),
                    strides=(2,2), padding='same',
                    kernel_initializer=tf.initializers.TruncatedNormal(stddev=0.1),
                    kernel_regularizer=k_reg,use_bias=True)) # stddev=0.1
            else:

                features.append(layers.Conv2D(
                    filters=out_features, kernel_size=(1,1), strides=(1,1), padding='same',
                    activation='relu',kernel_initializer=weight_init,
                kernel_regularizer=k_reg,use_bias=True))
                features.append(layers.Conv2DTranspose(
                    out_features, kernel_size=(total_up_scale,total_up_scale),
                    strides=(2,2), padding='same', use_bias=True,
                    kernel_initializer=weight_init, kernel_regularizer=k_reg))

        self.features = keras.Sequential(features) 
Example #22
Source Project: DexiNed   Author: xavysp   File: model.py    License: MIT License 5 votes vote down vote up
def __init__(self, out_features, k_size=(1,1),stride=(1,1),
                 use_bs=False, use_act=False,w_init=None,**kwargs): # bias_init=tf.constant_initializer(0.0)
        super(SingleConvBlock, self).__init__(**kwargs)
        self.use_bn = use_bs
        self.use_act = use_act
        k_reg = None if w_decay is None else l2(w_decay)
        self.conv = layers.Conv2D(
            filters=out_features, kernel_size=k_size, strides=stride,
            padding='same',kernel_initializer=w_init,
            kernel_regularizer=k_reg)#, use_bias=True, bias_initializer=bias_init
        if self.use_bn:
            self.bn = layers.BatchNormalization()
        if self.use_act:
            self.relu = layers.ReLU() 
Example #23
Source Project: keras-squeeze-excite-network   Author: titu1994   File: se_densenet.py    License: MIT License 5 votes vote down vote up
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4):
    """ Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout
    Args:
        ip: Input keras tensor
        nb_filter: number of filters
        bottleneck: add bottleneck block
        dropout_rate: dropout rate
        weight_decay: weight decay factor
    Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck)
    """
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
    x = Activation('relu')(x)

    if bottleneck:
        inter_channel = nb_filter * 4  # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua

        x = Conv2D(inter_channel, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
                   kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)

    x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_normal', padding='same', use_bias=False)(x)
    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    return x 
Example #24
Source Project: keras-squeeze-excite-network   Author: titu1994   File: se_densenet.py    License: MIT License 5 votes vote down vote up
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4):
    """ Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
    Args:
        ip: keras tensor
        nb_filter: number of filters
        compression: calculated as 1 - reduction. Reduces the number of feature maps
                    in the transition block.
        weight_decay: weight decay factor
    Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
    """
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
    x = Activation('relu')(x)
    x = Conv2D(int(nb_filter * compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    return x 
Example #25
Source Project: keras-squeeze-excite-network   Author: titu1994   File: se_resnext.py    License: MIT License 5 votes vote down vote up
def __initial_conv_block(input_tensor, weight_decay=5e-4):
    """ Adds an initial convolution block, with batch normalization and relu activation
    Args:
        input_tensor: input Keras tensor
        weight_decay: weight decay factor
    Returns: a Keras tensor
    """
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = Conv2D(64, (3, 3), padding='same', use_bias=False, kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay))(input_tensor)
    x = BatchNormalization(axis=channel_axis)(x)
    x = LeakyReLU()(x)

    return x 
Example #26
Source Project: keras-squeeze-excite-network   Author: titu1994   File: se_resnext.py    License: MIT License 5 votes vote down vote up
def __grouped_convolution_block(input_tensor, grouped_channels, cardinality, strides, weight_decay=5e-4):
    """ Adds a grouped convolution block. It is an equivalent block from the paper
    Args:
        input_tensor: input Keras tensor
        grouped_channels: grouped number of filters
        cardinality: cardinality factor describing the number of groups
        strides: performs strided convolution for downscaling if > 1
        weight_decay: weight decay term
    Returns: a Keras tensor
    """
    init = input_tensor
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    group_list = []

    if cardinality == 1:
        # with cardinality 1, it is a standard convolution
        x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
        x = BatchNormalization(axis=channel_axis)(x)
        x = LeakyReLU()(x)
        return x

    for c in range(cardinality):
        x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels]
        if K.image_data_format() == 'channels_last' else
        lambda _z: _z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input_tensor)

        x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)

        group_list.append(x)

    group_merge = concatenate(group_list, axis=channel_axis)
    x = BatchNormalization(axis=channel_axis)(group_merge)
    x = LeakyReLU()(x)

    return x 
Example #27
Source Project: alibi-detect   Author: SeldonIO   File: resnet.py    License: Apache License 2.0 5 votes vote down vote up
def l2_regulariser(l2_regularisation: bool = True):
    """
    Apply L2 regularisation to kernel.

    Parameters
    ----------
    l2_regularisation
        Whether to apply L2 regularisation.

    Returns
    -------
    Kernel regularisation.
    """
    return l2(L2_WEIGHT_DECAY) if l2_regularisation else None 
Example #28
Source Project: bootcamp   Author: milvus-io   File: conv_models.py    License: Apache License 2.0 5 votes vote down vote up
def identity_block(self, input_tensor, kernel_size, filters, stage, block):
        conv_name_base = f'res{stage}_{block}_branch'

        x = Conv2D(filters,
                   kernel_size=kernel_size,
                   strides=1,
                   activation=None,
                   padding='same',
                   kernel_initializer='glorot_uniform',
                   kernel_regularizer=regularizers.l2(l=0.0001),
                   name=conv_name_base + '_2a')(input_tensor)
        x = BatchNormalization(name=conv_name_base + '_2a_bn')(x)
        x = self.clipped_relu(x)

        x = Conv2D(filters,
                   kernel_size=kernel_size,
                   strides=1,
                   activation=None,
                   padding='same',
                   kernel_initializer='glorot_uniform',
                   kernel_regularizer=regularizers.l2(l=0.0001),
                   name=conv_name_base + '_2b')(x)
        x = BatchNormalization(name=conv_name_base + '_2b_bn')(x)

        x = self.clipped_relu(x)

        x = layers.add([x, input_tensor])
        x = self.clipped_relu(x)
        return x 
Example #29
Source Project: bootcamp   Author: milvus-io   File: conv_models.py    License: Apache License 2.0 5 votes vote down vote up
def conv_and_res_block(self, inp, filters, stage):
        conv_name = 'conv{}-s'.format(filters)
        # TODO: why kernel_regularizer?
        o = Conv2D(filters,
                   kernel_size=5,
                   strides=2,
                   activation=None,
                   padding='same',
                   kernel_initializer='glorot_uniform',
                   kernel_regularizer=regularizers.l2(l=0.0001), name=conv_name)(inp)
        o = BatchNormalization(name=conv_name + '_bn')(o)
        o = self.clipped_relu(o)
        for i in range(3):
            o = self.identity_block(o, kernel_size=3, filters=filters, stage=stage, block=i)
        return o 
Example #30
Source Project: image_recognition   Author: tue-robotics   File: wide_resnet.py    License: MIT License 5 votes vote down vote up
def __call__(self):
        logging.debug("Creating model...")

        assert ((self._depth - 4) % 6 == 0)
        n = (self._depth - 4) / 6

        inputs = Input(shape=self._input_shape)

        n_stages = [16, 16 * self._k, 32 * self._k, 64 * self._k]

        conv1 = Convolution2D(filters=n_stages[0], kernel_size=(3, 3),
                              strides=(1, 1),
                              padding="same",
                              kernel_initializer=self._weight_init,
                              kernel_regularizer=l2(self._weight_decay),
                              use_bias=self._use_bias)(inputs)  # "One conv at the beginning (spatial size: 32x32)"

        # Add wide residual blocks
        block_fn = self._wide_basic
        conv2 = self._layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n, stride=(1, 1))(conv1)
        conv3 = self._layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n, stride=(2, 2))(conv2)
        conv4 = self._layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n, stride=(2, 2))(conv3)
        batch_norm = BatchNormalization(axis=self._channel_axis)(conv4)
        relu = Activation("relu")(batch_norm)

        # Classifier block
        pool = AveragePooling2D(pool_size=(8, 8), strides=(1, 1), padding="same")(relu)
        flatten = Flatten()(pool)
        predictions_g = Dense(units=2, kernel_initializer=self._weight_init, use_bias=self._use_bias,
                              kernel_regularizer=l2(self._weight_decay), activation="softmax",
                              name="pred_gender")(flatten)
        predictions_a = Dense(units=101, kernel_initializer=self._weight_init, use_bias=self._use_bias,
                              kernel_regularizer=l2(self._weight_decay), activation="softmax",
                              name="pred_age")(flatten)
        model = Model(inputs=inputs, outputs=[predictions_g, predictions_a])

        return model