Python keras.layers.SeparableConv2D() Examples

The following are 22 code examples of keras.layers.SeparableConv2D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.layers , or try the search function .
Example #1
Source Project: Keras-FasterRCNN   Author: you359   File: xception.py    License: MIT License 6 votes vote down vote up
def classifier_layers(x, input_shape, trainable=False):

    # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround
    # (hence a smaller stride in the region that follows the ROI pool)
    x = TimeDistributed(SeparableConv2D(1536, (3, 3),
                                        padding='same',
                                        use_bias=False),
                        name='block14_sepconv1')(x)
    x = TimeDistributed(BatchNormalization(), name='block14_sepconv1_bn')(x)
    x = Activation('relu', name='block14_sepconv1_act')(x)

    x = TimeDistributed(SeparableConv2D(2048, (3, 3),
                                        padding='same',
                                        use_bias=False),
                        name='block14_sepconv2')(x)
    x = TimeDistributed(BatchNormalization(), name='block14_sepconv2_bn')(x)
    x = Activation('relu', name='block14_sepconv2_act')(x)

    TimeDistributed(GlobalAveragePooling2D(), name='avg_pool')(x)

    return x 
Example #2
Source Project: pose-regression   Author: dluvizon   File: layers.py    License: MIT License 6 votes vote down vote up
def lin_interpolation_2d(inp, dim):

    num_rows, num_cols, num_filters = K.int_shape(inp)[1:]
    conv = SeparableConv2D(num_filters, (num_rows, num_cols), use_bias=False)
    x = conv(inp)

    w = conv.get_weights()
    w[0].fill(0)
    w[1].fill(0)
    linspace = linspace_2d(num_rows, num_cols, dim=dim)

    for i in range(num_filters):
        w[0][:,:, i, 0] = linspace[:,:]
        w[1][0, 0, i, i] = 1.

    conv.set_weights(w)
    conv.trainable = False

    x = Lambda(lambda x: K.squeeze(x, axis=1))(x)
    x = Lambda(lambda x: K.squeeze(x, axis=1))(x)
    x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)

    return x 
Example #3
Source Project: tf-coreml   Author: tf-coreml   File: test_tf_keras_layers.py    License: Apache License 2.0 6 votes vote down vote up
def test_tiny_separable_conv_valid_depth_multiplier(self):
      np.random.seed(1988)
      input_dim = 16
      input_shape = (input_dim, input_dim, 3)
      depth_multiplier = 5
      kernel_height = 3
      kernel_width = 3
      num_kernels = 40
      # Define a model
      model = Sequential()
      model.add(SeparableConv2D(filters=num_kernels, kernel_size=(kernel_height, kernel_width),
                                padding='valid', strides=(1, 1), depth_multiplier=depth_multiplier,
                                input_shape=input_shape))
      # Set some random weights
      model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
      # Test the keras model
      self._test_keras_model(model) 
Example #4
Source Project: tf-coreml   Author: tf-coreml   File: test_tf_keras_layers.py    License: Apache License 2.0 6 votes vote down vote up
def test_tiny_separable_conv_same_fancy_depth_multiplier(self):
      np.random.seed(1988)
      input_dim = 16
      input_shape = (input_dim, input_dim, 3)
      depth_multiplier = 2
      kernel_height = 3
      kernel_width = 3
      num_kernels = 40
      # Define a model
      model = Sequential()
      model.add(SeparableConv2D(filters=num_kernels, kernel_size=(kernel_height, kernel_width),
                                padding='same', strides=(2, 2), activation='relu', depth_multiplier=depth_multiplier,
                                input_shape=input_shape))
      # Set some random weights
      model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
      # Test the keras model
      self._test_keras_model(model) 
Example #5
Source Project: FSA-Net   Author: shamangary   File: FSANET_model.py    License: Apache License 2.0 5 votes vote down vote up
def _convBlock(self, x, num_filters, activation, kernel_size=(3,3)):
        x = SeparableConv2D(num_filters,kernel_size,padding='same')(x)
        x = BatchNormalization(axis=-1)(x)
        x = Activation(activation)(x)
        return x 
Example #6
Source Project: Keras-NASNet   Author: titu1994   File: nasnet.py    License: MIT License 5 votes vote down vote up
def _separable_conv_block(ip, filters, kernel_size=(3, 3), strides=(1, 1), weight_decay=5e-5, id=None, weights=None):
    '''Adds 2 blocks of [relu-separable conv-batchnorm]

    # Arguments:
        ip: input tensor
        filters: number of output filters per layer
        kernel_size: kernel size of separable convolutions
        strides: strided convolution for downsampling
        weight_decay: l2 regularization weight
        id: string id

    # Returns:
        a Keras tensor
    '''
    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1

    with K.name_scope('separable_conv_block_%s' % id):
        x = Activation('relu')(ip)
        x = SeparableConv2D(filters, kernel_size, strides=strides, name='separable_conv_1_%s' % id,
                            padding='same', use_bias=False, kernel_initializer='he_normal',
                            kernel_regularizer=l2(weight_decay),
                            weights=[weights['d1'], weights['p1']])(x)
        x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                               name="separable_conv_1_bn_%s" % (id),
                               weights=weights['bn1'])(x)
        x = Activation('relu')(x)
        x = SeparableConv2D(filters, kernel_size, name='separable_conv_2_%s' % id,
                            padding='same', use_bias=False, kernel_initializer='he_normal',
                            kernel_regularizer=l2(weight_decay),
                            weights=[weights['d2'], weights['p2']])(x)
        x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                               name="separable_conv_2_bn_%s" % (id),
                               weights=weights['bn2'])(x)
    return x 
Example #7
Source Project: Keras-NASNet   Author: titu1994   File: nasnet.py    License: MIT License 5 votes vote down vote up
def _separable_conv_block(ip, filters, kernel_size=(3, 3), strides=(1, 1), weight_decay=5e-5, id=None):
    '''Adds 2 blocks of [relu-separable conv-batchnorm]

    # Arguments:
        ip: input tensor
        filters: number of output filters per layer
        kernel_size: kernel size of separable convolutions
        strides: strided convolution for downsampling
        weight_decay: l2 regularization weight
        id: string id

    # Returns:
        a Keras tensor
    '''
    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1

    with K.name_scope('separable_conv_block_%s' % id):
        x = Activation('relu')(ip)
        x = SeparableConv2D(filters, kernel_size, strides=strides, name='separable_conv_1_%s' % id,
                            padding='same', use_bias=False, kernel_initializer='he_normal',
                            kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                               name="separable_conv_1_bn_%s" % (id))(x)
        x = Activation('relu')(x)
        x = SeparableConv2D(filters, kernel_size, name='separable_conv_2_%s' % id,
                            padding='same', use_bias=False, kernel_initializer='he_normal',
                            kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                               name="separable_conv_2_bn_%s" % (id))(x)
    return x 
Example #8
Source Project: keras-contrib   Author: keras-team   File: nasnet.py    License: MIT License 5 votes vote down vote up
def _separable_conv_block(ip, filters, kernel_size=(3, 3), strides=(1, 1),
                          weight_decay=5e-5, id=None):
    '''Adds 2 blocks of [relu-separable conv-batchnorm]

    # Arguments:
        ip: input tensor
        filters: number of output filters per layer
        kernel_size: kernel size of separable convolutions
        strides: strided convolution for downsampling
        weight_decay: l2 regularization weight
        id: string id

    # Returns:
        a Keras tensor
    '''
    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1

    with K.name_scope('separable_conv_block_%s' % id):
        x = Activation('relu')(ip)
        x = SeparableConv2D(filters, kernel_size, strides=strides,
                            name='separable_conv_1_%s' % id, padding='same',
                            use_bias=False, kernel_initializer='he_normal',
                            kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY,
                               epsilon=_BN_EPSILON,
                               name="separable_conv_1_bn_%s" % id)(x)
        x = Activation('relu')(x)
        x = SeparableConv2D(filters, kernel_size, name='separable_conv_2_%s' % id,
                            padding='same', use_bias=False,
                            kernel_initializer='he_normal',
                            kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY,
                               epsilon=_BN_EPSILON,
                               name="separable_conv_2_bn_%s" % id)(x)
    return x 
Example #9
Source Project: neural-image-assessment   Author: titu1994   File: nasnet.py    License: MIT License 5 votes vote down vote up
def _separable_conv_block(ip, filters, kernel_size=(3, 3), strides=(1, 1), weight_decay=5e-5, id=None):
    '''Adds 2 blocks of [relu-separable conv-batchnorm]

    # Arguments:
        ip: input tensor
        filters: number of output filters per layer
        kernel_size: kernel size of separable convolutions
        strides: strided convolution for downsampling
        weight_decay: l2 regularization weight
        id: string id

    # Returns:
        a Keras tensor
    '''
    channel_dim = 1 if K.image_data_format() == 'channels_first' else -1

    with K.name_scope('separable_conv_block_%s' % id):
        x = Activation('relu')(ip)
        x = SeparableConv2D(filters, kernel_size, strides=strides, name='separable_conv_1_%s' % id,
                            padding='same', use_bias=False, kernel_initializer='he_normal',
                            kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                               name="separable_conv_1_bn_%s" % (id))(x)
        x = Activation('relu')(x)
        x = SeparableConv2D(filters, kernel_size, name='separable_conv_2_%s' % id,
                            padding='same', use_bias=False, kernel_initializer='he_normal',
                            kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=channel_dim, momentum=_BN_DECAY, epsilon=_BN_EPSILON,
                               name="separable_conv_2_bn_%s" % (id))(x)
    return x 
Example #10
Source Project: faceswap   Author: deepfakes   File: nn_blocks.py    License: GNU General Public License v3.0 5 votes vote down vote up
def conv_sep(self, input_tensor, filters, kernel_size=5, strides=2, **kwargs):
        """ Seperable Convolution Layer.

        Parameters
        ----------
        input_tensor: tensor
            The input tensor to the layer
        filters: int
            The dimensionality of the output space (i.e. the number of output filters in the
            convolution)
        kernel_size: int, optional
            An integer or tuple/list of 2 integers, specifying the height and width of the 2D
            convolution window. Can be a single integer to specify the same value for all spatial
            dimensions. Default: 5
        strides: tuple or int, optional
            An integer or tuple/list of 2 integers, specifying the strides of the convolution along
            the height and width. Can be a single integer to specify the same value for all spatial
            dimensions. Default: `2`
        kwargs: dict
            Any additional Keras standard layer keyword arguments

        Returns
        -------
        tensor
            The output tensor from the Upscale layer
        """
        logger.debug("input_tensor: %s, filters: %s, kernel_size: %s, strides: %s, kwargs: %s)",
                     input_tensor, filters, kernel_size, strides, kwargs)
        name = self._get_name("separableconv2d_{}".format(input_tensor.shape[1]))
        kwargs = self._set_default_initializer(kwargs)
        var_x = SeparableConv2D(filters,
                                kernel_size=kernel_size,
                                strides=strides,
                                padding="same",
                                name="{}_seperableconv2d".format(name),
                                **kwargs)(input_tensor)
        var_x = Activation("relu", name="{}_relu".format(name))(var_x)
        return var_x 
Example #11
Source Project: pose-regression   Author: dluvizon   File: layers.py    License: MIT License 5 votes vote down vote up
def separable_act_conv_bn(x, filters, size, strides=(1, 1), padding='same',
        name=None):
    if name is not None:
        conv_name = name + '_conv'
        act_name = name + '_act'
    else:
        conv_name = None
        act_name = None

    x = Activation('relu', name=act_name)(x)
    x = SeparableConv2D(filters, size, strides=strides, padding=padding,
            use_bias=False, name=conv_name)(x)
    x = BatchNormalization(axis=-1, scale=False, name=name)(x)
    return x 
Example #12
Source Project: deephar   Author: dluvizon   File: layers.py    License: MIT License 5 votes vote down vote up
def sepconv2d(x, filters, kernel_size, strides=(1, 1), padding='same',
        name=None):
    """SeparableConv2D possibly wrapped by a TimeDistributed layer."""
    f = SeparableConv2D(filters, kernel_size, strides=strides, padding=padding,
            use_bias=False, name=name)

    return TimeDistributed(f, name=name)(x) if K.ndim(x) == 5 else f(x) 
Example #13
Source Project: deephar   Author: dluvizon   File: layers.py    License: MIT License 5 votes vote down vote up
def lin_interpolation_2d(x, axis, vmin=0., vmax=1., name=None):
    """Implements a 2D linear interpolation using a depth size separable
    convolution (non trainable).
    """
    assert K.ndim(x) in [4, 5], \
            'Input tensor must have ndim 4 or 5 ({})'.format(K.ndim(x))

    if 'global_sam_cnt' not in globals():
        global global_sam_cnt
        global_sam_cnt = 0

    if name is None:
        name = 'custom_sam_%d' % global_sam_cnt
        global_sam_cnt += 1

    if K.ndim(x) == 4:
        num_rows, num_cols, num_filters = K.int_shape(x)[1:]
    else:
        num_rows, num_cols, num_filters = K.int_shape(x)[2:]

    f = SeparableConv2D(num_filters, (num_rows, num_cols), use_bias=False,
            name=name)
    x = TimeDistributed(f, name=name)(x) if K.ndim(x) == 5 else f(x)

    w = f.get_weights()
    w[0].fill(0)
    w[1].fill(0)
    linspace = linspace_2d(num_rows, num_cols, dim=axis)

    for i in range(num_filters):
        w[0][:,:, i, 0] = linspace[:,:]
        w[1][0, 0, i, i] = 1.

    f.set_weights(w)
    f.trainable = False

    x = Lambda(lambda x: K.squeeze(x, axis=-2))(x)
    x = Lambda(lambda x: K.squeeze(x, axis=-2))(x)
    x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)

    return x 
Example #14
Source Project: deephar   Author: dluvizon   File: layers.py    License: MIT License 5 votes vote down vote up
def separable_conv_bn_act(x, filters, size, strides=(1, 1), padding='same',
        name=None):
    if name is not None:
        conv_name = name + '_conv'
        bn_name = name + '_bn'
    else:
        conv_name = None
        bn_name = None

    x = SeparableConv2D(filters, size, strides=strides, padding=padding,
            use_bias=False, name=conv_name)(x)
    x = BatchNormalization(axis=-1, scale=False, name=bn_name)(x)
    x = Activation('relu', name=name)(x)
    return x 
Example #15
Source Project: deephar   Author: dluvizon   File: layers.py    License: MIT License 5 votes vote down vote up
def separable_act_conv_bn(x, filters, size, strides=(1, 1), padding='same',
        name=None):
    if name is not None:
        conv_name = name + '_conv'
        act_name = name + '_act'
    else:
        conv_name = None
        act_name = None

    x = Activation('relu', name=act_name)(x)
    x = SeparableConv2D(filters, size, strides=strides, padding=padding,
            use_bias=False, name=conv_name)(x)
    x = BatchNormalization(axis=-1, scale=False, name=name)(x)
    return x 
Example #16
Source Project: deephar   Author: dluvizon   File: layers.py    License: MIT License 5 votes vote down vote up
def separable_conv_bn(x, filters, size, strides=(1, 1), padding='same',
        name=None):
    if name is not None:
        conv_name = name + '_conv'
    else:
        conv_name = None

    x = SeparableConv2D(filters, size, strides=strides, padding=padding,
            use_bias=False, name=conv_name)(x)
    x = BatchNormalization(axis=-1, scale=False, name=name)(x)
    return x 
Example #17
Source Project: pirateAI   Author: HugoCMU   File: model_chunks.py    License: MIT License 5 votes vote down vote up
def a3c_sepconv(x, params):
    """
    Feed forward model used in a3c paper but with seperable convolutions
    :param x: input tensor
    :param params: {dict} hyperparams (sub-selection)
    :return: output tensor
    :raises ValueError: could not find parameter
    """
    x = layers.SeparableConv2D(filters=16, kernel_size=8, strides=4, activation='relu')(x)
    x = layers.SeparableConv2D(filters=32, kernel_size=4, strides=2, activation='relu')(x)
    return x 
Example #18
Source Project: Fabrik   Author: Cloud-CV   File: layers_export.py    License: GNU General Public License v3.0 5 votes vote down vote up
def depthwiseConv(layer, layer_in, layerId, tensor=True):
    out = {}
    padding = get_padding(layer)
    filters = layer['params']['num_output']
    k_h, k_w = layer['params']['kernel_h'], layer['params']['kernel_w']
    s_h, s_w = layer['params']['stride_h'], layer['params']['stride_w']
    depth_multiplier = layer['params']['depth_multiplier']
    use_bias = layer['params']['use_bias']
    depthwise_initializer = layer['params']['depthwise_initializer']
    pointwise_initializer = layer['params']['pointwise_initializer']
    bias_initializer = layer['params']['bias_initializer']
    if (padding == 'custom'):
        p_h, p_w = layer['params']['pad_h'], layer['params']['pad_w']
        out[layerId + 'Pad'] = ZeroPadding2D(padding=(p_h, p_w))(*layer_in)
        padding = 'valid'
        layer_in = [out[layerId + 'Pad']]
    depthwise_regularizer = regularizerMap[layer['params']['depthwise_regularizer']]
    pointwise_regularizer = regularizerMap[layer['params']['pointwise_regularizer']]
    bias_regularizer = regularizerMap[layer['params']['bias_regularizer']]
    activity_regularizer = regularizerMap[layer['params']['activity_regularizer']]
    depthwise_constraint = constraintMap[layer['params']['depthwise_constraint']]
    pointwise_constraint = constraintMap[layer['params']['pointwise_constraint']]
    bias_constraint = constraintMap[layer['params']['bias_constraint']]
    out[layerId] = SeparableConv2D(filters, [k_h, k_w], strides=(s_h, s_w), padding=padding,
                                   depth_multiplier=depth_multiplier, use_bias=use_bias,
                                   depthwise_initializer=depthwise_initializer,
                                   pointwise_initializer=pointwise_initializer,
                                   bias_initializer=bias_initializer,
                                   depthwise_regularizer=depthwise_regularizer,
                                   pointwise_regularizer=pointwise_regularizer,
                                   bias_regularizer=bias_regularizer,
                                   activity_regularizer=activity_regularizer,
                                   depthwise_constraint=depthwise_constraint,
                                   pointwise_constraint=pointwise_constraint,
                                   bias_constraint=bias_constraint,)(*layer_in)
    return out 
Example #19
Source Project: face_classification   Author: oarriaga   File: cnn.py    License: MIT License 4 votes vote down vote up
def big_XCEPTION(input_shape, num_classes):
    img_input = Input(input_shape)
    x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False)(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64, (3, 3), use_bias=False)(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128, (1, 1), strides=(2, 2),
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1), strides=(2, 2),
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])
    x = Conv2D(num_classes, (3, 3),
               # kernel_regularizer=regularization,
               padding='same')(x)
    x = GlobalAveragePooling2D()(x)
    output = Activation('softmax', name='predictions')(x)

    model = Model(img_input, output)
    return model 
Example #20
Source Project: Emotion-recognition   Author: omar178   File: cnn.py    License: MIT License 4 votes vote down vote up
def big_XCEPTION(input_shape, num_classes):
    img_input = Input(input_shape)
    x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False)(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64, (3, 3), use_bias=False)(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128, (1, 1), strides=(2, 2),
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1), strides=(2, 2),
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])
    x = Conv2D(num_classes, (3, 3),
            #kernel_regularizer=regularization,
            padding='same')(x)
    x = GlobalAveragePooling2D()(x)
    output = Activation('softmax',name='predictions')(x)

    model = Model(img_input, output)
    return model 
Example #21
Source Project: emotion_recognition   Author: taoyafan   File: cnn.py    License: MIT License 4 votes vote down vote up
def big_XCEPTION(input_shape, num_classes):
    img_input = Input(input_shape)
    x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False)(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64, (3, 3), use_bias=False)(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128, (1, 1), strides=(2, 2),
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1), strides=(2, 2),
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])
    x = Conv2D(num_classes, (3, 3),
            #kernel_regularizer=regularization,
            padding='same')(x)
    x = GlobalAveragePooling2D()(x)
    output = Activation('softmax',name='predictions')(x)

    model = Model(img_input, output)
    return model 
Example #22
Source Project: EmotionClassifier   Author: XiuweiHe   File: cnn.py    License: GNU General Public License v3.0 4 votes vote down vote up
def big_XCEPTION(input_shape, num_classes):
    img_input = Input(input_shape)
    x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False)(img_input)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64, (3, 3), use_bias=False)(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128, (1, 1), strides=(2, 2),
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1), strides=(2, 2),
                      padding='same', use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu', name='block3_sepconv1_act')(x)
    x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])
    x = Conv2D(num_classes, (3, 3),
            #kernel_regularizer=regularization,
            padding='same')(x)
    x = GlobalAveragePooling2D()(x)
    output = Activation('softmax',name='predictions')(x)

    model = Model(img_input, output)
    return model