Python tensorflow.keras.layers.MaxPooling2D() Examples

The following are 30 code examples of tensorflow.keras.layers.MaxPooling2D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.layers , or try the search function .
Example #1
Source File: factory.py    From mtcnn with MIT License 6 votes vote down vote up
def build_pnet(self, input_shape=None):
        if input_shape is None:
            input_shape = (None, None, 3)

        p_inp = Input(input_shape)

        p_layer = Conv2D(10, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_inp)
        p_layer = PReLU(shared_axes=[1, 2])(p_layer)
        p_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(p_layer)

        p_layer = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_layer)
        p_layer = PReLU(shared_axes=[1, 2])(p_layer)

        p_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_layer)
        p_layer = PReLU(shared_axes=[1, 2])(p_layer)

        p_layer_out1 = Conv2D(2, kernel_size=(1, 1), strides=(1, 1))(p_layer)
        p_layer_out1 = Softmax(axis=3)(p_layer_out1)

        p_layer_out2 = Conv2D(4, kernel_size=(1, 1), strides=(1, 1))(p_layer)

        p_net = Model(p_inp, [p_layer_out2, p_layer_out1])

        return p_net 
Example #2
Source File: dual_path_network.py    From TF.Keras-Commonly-used-models with Apache License 2.0 6 votes vote down vote up
def _initial_conv_block_inception(input, initial_conv_filters, weight_decay=5e-4):
    ''' Adds an initial conv block, with batch norm and relu for the DPN
    Args:
        input: input tensor
        initial_conv_filters: number of filters for initial conv block
        weight_decay: weight decay factor
    Returns: a keras tensor
    '''
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = Conv2D(initial_conv_filters, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay), strides=(2, 2))(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    return x 
Example #3
Source File: model.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def conv_layer(inputs,
               filters=32,
               kernel_size=3,
               strides=1,
               use_maxpool=True,
               postfix=None,
               activation=None):

    x = conv2d(inputs,
               filters=filters,
               kernel_size=kernel_size,
               strides=strides,
               name='conv'+postfix)
    x = BatchNormalization(name="bn"+postfix)(x)
    x = ELU(name='elu'+postfix)(x)
    if use_maxpool:
        x = MaxPooling2D(name='pool'+postfix)(x)
    return x 
Example #4
Source File: model.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def conv_layer(inputs,
               filters=32,
               kernel_size=3,
               strides=1,
               use_maxpool=True,
               postfix=None,
               activation=None):
    """Helper function to build Conv2D-BN-ReLU layer
        with optional MaxPooling2D.
    """

    x = Conv2D(filters=filters,
               kernel_size=kernel_size,
               strides=strides,
               kernel_initializer='he_normal',
               name="conv_"+postfix,
               padding='same')(inputs)
    x = BatchNormalization(name="bn_"+postfix)(x)
    x = Activation('relu', name='relu_'+postfix)(x)
    if use_maxpool:
        x = MaxPooling2D(name='pool'+postfix)(x)
    return x 
Example #5
Source File: run.py    From polyaxon with Apache License 2.0 6 votes vote down vote up
def get_model(args):
    model = models.Sequential()
    model.add(
        layers.Conv2D(args.conv1_size, (3, 3), activation=args.conv_activation, input_shape=(28, 28, 1)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(args.conv2_size, (3, 3), activation=args.conv_activation))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation=args.conv_activation))
    model.add(layers.Dropout(args.dropout))
    model.add(layers.Flatten())
    model.add(layers.Dense(args.hidden1_size, activation=args.dense_activation))
    model.add(layers.Dense(10, activation='softmax'))

    model.summary()

    model.compile(optimizer=OPTIMIZERS[args.optimizer](learning_rate=args.learning_rate),
                  loss=args.loss,
                  metrics=['accuracy'])

    return model 
Example #6
Source File: run.py    From polyaxon-examples with Apache License 2.0 6 votes vote down vote up
def get_model(args):
    model = models.Sequential()
    model.add(
        layers.Conv2D(args.conv1_size, (3, 3), activation=args.conv_activation, input_shape=(28, 28, 1)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(args.conv2_size, (3, 3), activation=args.conv_activation))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation=args.conv_activation))
    model.add(layers.Dropout(args.dropout))
    model.add(layers.Flatten())
    model.add(layers.Dense(args.hidden1_size, activation=args.dense_activation))
    model.add(layers.Dense(10, activation='softmax'))

    model.summary()

    model.compile(optimizer=OPTIMIZERS[args.optimizer](learning_rate=args.learning_rate),
                  loss=args.loss,
                  metrics=['accuracy'])

    return model 
Example #7
Source File: xception.py    From keras-tuner with Apache License 2.0 6 votes vote down vote up
def residual(x, num_filters,
             kernel_size=(3, 3),
             activation='relu',
             pool_strides=(2, 2),
             max_pooling=True):
    "Residual block."
    if max_pooling:
        res = layers.Conv2D(num_filters, kernel_size=(
            1, 1), strides=pool_strides, padding='same')(x)
    elif num_filters != keras.backend.int_shape(x)[-1]:
        res = layers.Conv2D(num_filters, kernel_size=(1, 1), padding='same')(x)
    else:
        res = x

    x = sep_conv(x, num_filters, kernel_size, activation)
    x = sep_conv(x, num_filters, kernel_size, activation)
    if max_pooling:
        x = layers.MaxPooling2D(
            kernel_size, strides=pool_strides, padding='same')(x)

    x = layers.add([x, res])
    return x 
Example #8
Source File: leap.py    From DeepPoseKit with Apache License 2.0 5 votes vote down vote up
def __init__(
        self,
        n_layers,
        filters,
        kernel_size,
        activation,
        pooling="max",
        initializer="glorot_uniform",
        batchnorm=False,
        use_bias=True,
        name=None,
    ):
        self.n_layers = n_layers
        self.filters = filters
        self.kernel_size = kernel_size
        self.activation = activation
        self.initializer = initializer
        self.use_bias = use_bias
        self.pooling = pooling
        if activation.lower() is not "selu" and batchnorm:
            self.batchnorm = True
        else:
            self.batchnorm = False
        if activation.lower() is "selu":
            self.initializer = "lecun_normal"
        if pooling is "average":
            self.Pooling2D = layers.AveragePooling2D
        else:
            self.Pooling2D = layers.MaxPooling2D
        self.name = name 
Example #9
Source File: factory.py    From mtcnn with MIT License 5 votes vote down vote up
def build_onet(self, input_shape=None):
        if input_shape is None:
            input_shape = (48, 48, 3)

        o_inp = Input(input_shape)
        o_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_inp)
        o_layer = PReLU(shared_axes=[1, 2])(o_layer)
        o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(o_layer)

        o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer)
        o_layer = PReLU(shared_axes=[1, 2])(o_layer)
        o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(o_layer)

        o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer)
        o_layer = PReLU(shared_axes=[1, 2])(o_layer)
        o_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(o_layer)

        o_layer = Conv2D(128, kernel_size=(2, 2), strides=(1, 1), padding="valid")(o_layer)
        o_layer = PReLU(shared_axes=[1, 2])(o_layer)

        o_layer = Flatten()(o_layer)
        o_layer = Dense(256)(o_layer)
        o_layer = PReLU()(o_layer)

        o_layer_out1 = Dense(2)(o_layer)
        o_layer_out1 = Softmax(axis=1)(o_layer_out1)
        o_layer_out2 = Dense(4)(o_layer)
        o_layer_out3 = Dense(10)(o_layer)

        o_net = Model(o_inp, [o_layer_out2, o_layer_out3, o_layer_out1])
        return o_net 
Example #10
Source File: conftest.py    From alibi with Apache License 2.0 5 votes vote down vote up
def conv_net(request):
    """
    Creates a simple CNN classifier on the data in the request. This is a
    module scoped fixture, so if you need to modify the state of the objects
    returned, copy the objects first.
    """
    import tensorflow as tf
    if tf.executing_eagerly():
        tf.compat.v1.disable_eager_execution()
    data = request.param
    x_train, y_train = data['X_train'], data['y_train']

    def model():
        x_in = Input(shape=(28, 28, 1))
        x = Conv2D(filters=8, kernel_size=2, padding='same', activation='relu')(x_in)
        x = MaxPooling2D(pool_size=2)(x)
        x = Dropout(0.3)(x)
        x = Flatten()(x)
        x_out = Dense(10, activation='softmax')(x)
        cnn = Model(inputs=x_in, outputs=x_out)
        cnn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

        return cnn

    cnn = model()
    cnn.fit(x_train, y_train, batch_size=256, epochs=1)

    return cnn


# High level fixtures that help us check if the code logs any warnings/correct 
Example #11
Source File: cifar10.py    From mia with MIT License 5 votes vote down vote up
def target_model_fn():
    """The architecture of the target (victim) model.

    The attack is white-box, hence the attacker is assumed to know this architecture too."""

    model = tf.keras.models.Sequential()

    model.add(
        layers.Conv2D(
            32,
            (3, 3),
            activation="relu",
            padding="same",
            input_shape=(WIDTH, HEIGHT, CHANNELS),
        )
    )
    model.add(layers.Conv2D(32, (3, 3), activation="relu"))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.25))

    model.add(layers.Conv2D(64, (3, 3), activation="relu", padding="same"))
    model.add(layers.Conv2D(64, (3, 3), activation="relu"))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.25))

    model.add(layers.Flatten())

    model.add(layers.Dense(512, activation="relu"))
    model.add(layers.Dropout(0.5))

    model.add(layers.Dense(NUM_CLASSES, activation="softmax"))
    model.compile("adam", loss="categorical_crossentropy", metrics=["accuracy"])

    return model 
Example #12
Source File: model.py    From polyaxon with Apache License 2.0 5 votes vote down vote up
def create_model(
    conv1_size,
    conv2_size,
    dropout,
    hidden1_size,
    conv_activation,
    dense_activation,
    optimizer,
    learning_rate,
    loss,
    num_classes,
):
    model = Sequential()
    model.add(Conv2D(conv1_size, (5, 5), activation=conv_activation,
                     input_shape=(img_width, img_height, 1)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(conv2_size, (5, 5), activation=conv_activation))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    model.add(Flatten())
    model.add(Dense(hidden1_size, activation=dense_activation))
    model.add(Dense(num_classes, activation='softmax'))

    model.compile(
        optimizer=OPTIMIZERS[optimizer](learning_rate=learning_rate),
        loss=loss,
        metrics=['accuracy'],
    )

    return model 
Example #13
Source File: inception_v2.py    From keras_imagenet with MIT License 5 votes vote down vote up
def inception_s2(x, filters):
    """Utility function to implement the 'stride-2' inception module.

    # Arguments
        x: input tensor.
        filters: a list of filter sizes.

    # Returns
        Output tensor after applying the 'stride-2' inception.
    """
    if len(filters) != 2:
        raise ValueError('filters should have 2 components')
    if len(filters[0]) != 2 or len(filters[1]) != 2:
        raise ValueError('incorrect spec of filters')

    branch3x3 = conv2d_bn(x, filters[0][0], (1, 1))
    branch3x3 = conv2d_bn(branch3x3, filters[0][1], (3, 3), strides=(2, 2))

    branch5x5 = conv2d_bn(x, filters[1][0], (1, 1))
    branch5x5 = conv2d_bn(branch5x5, filters[1][1], (3, 3))
    branch5x5 = conv2d_bn(branch5x5, filters[1][1], (3, 3), strides=(2, 2))

    # use MaxPooling2D here
    branchpool = layers.MaxPooling2D(
        pool_size=(3, 3), strides=(2, 2), padding='same')(x)

    concat_axis = 1 if backend.image_data_format() == 'channels_first' else 3
    x = layers.concatenate(
        [branch3x3, branch5x5, branchpool], axis=concat_axis)
    return x 
Example #14
Source File: inception_mobilenet.py    From keras_imagenet with MIT License 5 votes vote down vote up
def _mixed_s2(x, filters, name=None):
    """Utility function to implement the 'stride-2' mixed block.

    # Arguments
        x: input tensor.
        filters: a list of filter sizes.
        name: name of the ops

    # Returns
        Output tensor after applying the 'stride-2' mixed block.
    """
    if len(filters) != 2:
        raise ValueError('filters should have 2 components')

    name1 = name + '_3x3' if name else None
    branch3x3 = _depthwise_conv2d_bn(x, filters[0],
                                     kernel_size=(3, 3),
                                     strides=(2, 2),
                                     name=name1)

    name1 = name + '_5x5' if name else None
    branch5x5 = _depthwise_conv2d_bn(x, filters[1],
                                     kernel_size=(5, 5),
                                     strides=(2, 2),
                                     name=name1)

    name1 = name + '_pool' if name else None
    branchpool = layers.MaxPooling2D(pool_size=(3, 3), padding='same',
                                     strides=(2, 2), name=name1)(x)

    concat_axis = 1 if backend.image_data_format() == 'channels_first' else 3
    x = layers.concatenate([branch3x3, branch5x5, branchpool],
                           axis=concat_axis,
                           name=name)
    return x 
Example #15
Source File: bayesian_unet.py    From bcnn with MIT License 5 votes vote down vote up
def down_stage(inputs, filters, kernel_size=3,
               activation="relu", padding="SAME"):
    conv = Conv2D(filters, kernel_size,
                  activation=activation, padding=padding)(inputs)
    conv = GroupNormalization()(conv)
    conv = Conv2D(filters, kernel_size,
                  activation=activation, padding=padding)(conv)
    conv = GroupNormalization()(conv)
    pool = MaxPooling2D()(conv)
    return conv, pool 
Example #16
Source File: layers.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def Spp_Conv2D_BN_Leaky(x, num_filters):
    y1 = MaxPooling2D(pool_size=(5,5), strides=(1,1), padding='same')(x)
    y2 = MaxPooling2D(pool_size=(9,9), strides=(1,1), padding='same')(x)
    y3 = MaxPooling2D(pool_size=(13,13), strides=(1,1), padding='same')(x)

    y = compose(
            Concatenate(),
            DarknetConv2D_BN_Leaky(num_filters, (1,1)))([y1, y2, y3, x])
    return y 
Example #17
Source File: layers.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def Spp_Conv2D_BN_Leaky(x, num_filters):
    y1 = MaxPooling2D(pool_size=(5,5), strides=(1,1), padding='same')(x)
    y2 = MaxPooling2D(pool_size=(9,9), strides=(1,1), padding='same')(x)
    y3 = MaxPooling2D(pool_size=(13,13), strides=(1,1), padding='same')(x)

    y = compose(
            Concatenate(),
            DarknetConv2D_BN_Leaky(num_filters, (1,1)))([y1, y2, y3, x])
    return y 
Example #18
Source File: GoogleNet.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def Inception(x,nb_filter):  
    branch1x1 = Conv2d_BN(x,nb_filter,(1,1), padding='same',strides=(1,1),name=None)  
  
    branch3x3 = Conv2d_BN(x,nb_filter,(1,1), padding='same',strides=(1,1),name=None)  
    branch3x3 = Conv2d_BN(branch3x3,nb_filter,(3,3), padding='same',strides=(1,1),name=None)  
  
    branch5x5 = Conv2d_BN(x,nb_filter,(1,1), padding='same',strides=(1,1),name=None)  
    branch5x5 = Conv2d_BN(branch5x5,nb_filter,(1,1), padding='same',strides=(1,1),name=None)  
  
    branchpool = MaxPooling2D(pool_size=(3,3),strides=(1,1),padding='same')(x)  
    branchpool = Conv2d_BN(branchpool,nb_filter,(1,1),padding='same',strides=(1,1),name=None)  
  
    x = concatenate([branch1x1,branch3x3,branch5x5,branchpool],axis=3)  
  
    return x 
Example #19
Source File: Unet_family.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def __init__(self,classes=16):
        super(U_Net, self).__init__()

        n1 = 32
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = MaxPooling2D(strides=2)
        self.Maxpool2 = MaxPooling2D(strides=2)
        self.Maxpool3 = MaxPooling2D(strides=2)
        self.Maxpool4 = MaxPooling2D(strides=2)

        self.Conv1 = conv_block(filters[0])
        self.Conv2 = conv_block(filters[1])
        self.Conv3 = conv_block(filters[2])
        self.Conv4 = conv_block(filters[3])
        self.Conv5 = conv_block(filters[4])

        self.Up5 = up_conv(filters[3])
        self.Up_conv5 = conv_block(filters[3])

        self.Up4 = up_conv(filters[2])
        self.Up_conv4 = conv_block(filters[2])

        self.Up3 = up_conv(filters[1])
        self.Up_conv3 = conv_block(filters[1])

        self.Up2 = up_conv(filters[0])
        self.Up_conv2 = conv_block(filters[0])

        self.Conv = Conv2D(classes,kernel_size=1, strides=1, padding='same',activation='softmax',name='final_layer') 
Example #20
Source File: Unet_family.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def __init__(self, classes=16, t=2):
        super(R2U_Net, self).__init__()

        n1 = 32
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool = MaxPooling2D(strides=2)
        self.Maxpool1 = MaxPooling2D(strides=2)
        self.Maxpool2 = MaxPooling2D(strides=2)
        self.Maxpool3 = MaxPooling2D(strides=2)

        self.RRCNN1 = RRCNN_block(filters[0], t=t)

        self.RRCNN2 = RRCNN_block(filters[1], t=t)

        self.RRCNN3 = RRCNN_block(filters[2], t=t)

        self.RRCNN4 = RRCNN_block(filters[3], t=t)

        self.RRCNN5 = RRCNN_block(filters[4], t=t)

        self.Up5 = up_conv(filters[3])
        self.Up_RRCNN5 = RRCNN_block(filters[3], t=t)

        self.Up4 = up_conv(filters[2])
        self.Up_RRCNN4 = RRCNN_block(filters[2], t=t)

        self.Up3 = up_conv(filters[1])
        self.Up_RRCNN3 = RRCNN_block(filters[1], t=t)

        self.Up2 = up_conv(filters[0])
        self.Up_RRCNN2 = RRCNN_block(filters[0], t=t)

        self.Conv = Conv2D(classes, kernel_size=1, strides=1, padding='same',name='final_layer') 
Example #21
Source File: Unet_family.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def __init__(self, classes=16, t=2):
        super(R2AttU_Net, self).__init__()

        n1 = 32
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.Maxpool1 = MaxPooling2D(strides=2)
        self.Maxpool2 = MaxPooling2D(strides=2)
        self.Maxpool3 = MaxPooling2D(strides=2)
        self.Maxpool4 = MaxPooling2D(strides=2)

        self.RRCNN1 = RRCNN_block(filters[0], t=t)
        self.RRCNN2 = RRCNN_block(filters[1], t=t)
        self.RRCNN3 = RRCNN_block(filters[2], t=t)
        self.RRCNN4 = RRCNN_block(filters[3], t=t)
        self.RRCNN5 = RRCNN_block(filters[4], t=t)

        self.Up5 = up_conv(filters[3])
        self.Att5 = Attention_block(filters[3])
        self.Up_RRCNN5 = RRCNN_block(filters[3], t=t)

        self.Up4 = up_conv(filters[2])
        self.Att4 = Attention_block(filters[2])
        self.Up_RRCNN4 = RRCNN_block(filters[2], t=t)

        self.Up3 = up_conv(filters[1])
        self.Att3 = Attention_block(filters[1])
        self.Up_RRCNN3 = RRCNN_block(filters[1], t=t)

        self.Up2 = up_conv(filters[0])
        self.Att2 = Attention_block(filters[0])
        self.Up_RRCNN2 = RRCNN_block(filters[0], t=t)

        self.Conv = Conv2D(classes, kernel_size=1, strides=1, padding='same',activation='softmax',name='final_layer') 
Example #22
Source File: Unet_family.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def __init__(self, classes=16):
        super(NestedUNet, self).__init__()

        n1 = 32
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.pool = MaxPooling2D(strides=2)
        self.Up = UpSampling2D()

        self.conv0_0 = conv_block_nested(filters[0], filters[0])
        self.conv1_0 = conv_block_nested(filters[1], filters[1])
        self.conv2_0 = conv_block_nested(filters[2], filters[2])
        self.conv3_0 = conv_block_nested(filters[3], filters[3])
        self.conv4_0 = conv_block_nested(filters[4], filters[4])

        self.conv0_1 = conv_block_nested(filters[0], filters[0])
        self.conv1_1 = conv_block_nested(filters[1], filters[1])
        self.conv2_1 = conv_block_nested(filters[2], filters[2])
        self.conv3_1 = conv_block_nested(filters[3], filters[3])

        self.conv0_2 = conv_block_nested(filters[0], filters[0])
        self.conv1_2 = conv_block_nested(filters[1], filters[1])
        self.conv2_2 = conv_block_nested(filters[2], filters[2])

        self.conv0_3 = conv_block_nested(filters[0], filters[0])
        self.conv1_3 = conv_block_nested(filters[1], filters[1])

        self.conv0_4 = conv_block_nested(filters[0], filters[0])

        self.final = Conv2D(classes, kernel_size=1,activation='softmax',name='final_layer') 
Example #23
Source File: common.py    From tf2-mobile-pose-estimation with Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 pool_size,
                 strides,
                 padding=0,
                 ceil_mode=False,
                 data_format="channels_last",
                 **kwargs):
        super(MaxPool2d, self).__init__(**kwargs)
        if isinstance(pool_size, int):
            pool_size = (pool_size, pool_size)
        if isinstance(strides, int):
            strides = (strides, strides)
        if isinstance(padding, int):
            padding = (padding, padding)

        self.use_stride = (strides[0] > 1) or (strides[1] > 1)
        self.ceil_mode = ceil_mode and self.use_stride
        self.use_pad = (padding[0] > 0) or (padding[1] > 0)

        if self.ceil_mode:
            self.padding = padding
            self.pool_size = pool_size
            self.strides = strides
            self.data_format = data_format
        elif self.use_pad:
            if is_channels_first(data_format):
                self.paddings_tf = [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2]
            else:
                self.paddings_tf = [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]]

        self.pool = nn.MaxPooling2D(
            pool_size=pool_size,
            strides=strides,
            padding="valid",
            data_format=data_format) 
Example #24
Source File: common.py    From imgclsmob with MIT License 5 votes vote down vote up
def __init__(self,
                 pool_size,
                 strides,
                 padding=0,
                 ceil_mode=False,
                 data_format="channels_last",
                 **kwargs):
        super(MaxPool2d, self).__init__(**kwargs)
        if isinstance(pool_size, int):
            pool_size = (pool_size, pool_size)
        if isinstance(strides, int):
            strides = (strides, strides)
        if isinstance(padding, int):
            padding = (padding, padding)

        self.use_stride = (strides[0] > 1) or (strides[1] > 1)
        self.ceil_mode = ceil_mode and self.use_stride
        self.use_pad = (padding[0] > 0) or (padding[1] > 0)

        if self.ceil_mode:
            self.padding = padding
            self.pool_size = pool_size
            self.strides = strides
            self.data_format = data_format
        elif self.use_pad:
            if is_channels_first(data_format):
                self.paddings_tf = [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2]
            else:
                self.paddings_tf = [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]]

        self.pool = nn.MaxPooling2D(
            pool_size=pool_size,
            strides=strides,
            padding="valid",
            data_format=data_format) 
Example #25
Source File: mnist.py    From armory with MIT License 5 votes vote down vote up
def make_mnist_model(**kwargs) -> tf.keras.Model:
    model = Sequential()
    model.add(
        Conv2D(
            filters=4,
            kernel_size=(5, 5),
            strides=1,
            activation="relu",
            input_shape=(28, 28, 1),
        )
    )
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(
            filters=10,
            kernel_size=(5, 5),
            strides=1,
            activation="relu",
            input_shape=(23, 23, 4),
        )
    )
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(100, activation="relu"))
    model.add(Dense(10, activation="softmax"))

    model.compile(
        loss=tf.keras.losses.sparse_categorical_crossentropy,
        optimizer=tf.keras.optimizers.Adam(lr=0.003),
        metrics=["accuracy"],
    )
    return model 
Example #26
Source File: vgg.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def make_layers(cfg,
                    inputs, 
                    batch_norm=True, 
                    in_channels=1):
        """Helper function to ease the creation of VGG
            network model

        Arguments:
            cfg (dict): Summarizes the network layer 
                configuration
            inputs (tensor): Input from previous layer
            batch_norm (Bool): Whether to use batch norm
                between Conv2D and ReLU
            in_channel (int): Number of input channels
        """
        x = inputs
        for layer in cfg:
            if layer == 'M':
                x = MaxPooling2D()(x)
            elif layer == 'A':
                x = AveragePooling2D(pool_size=3)(x)
            else:
                x = Conv2D(layer,
                           kernel_size=3,
                           padding='same',
                           kernel_initializer='he_normal'
                           )(x)
                if batch_norm:
                    x = BatchNormalization()(x)
                x = Activation('relu')(x)
    
        return x 
Example #27
Source File: mnist_cifar_models.py    From CROWN-IBP with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def get_model_meta(filename):
    print("Loading model " + filename)
    global use_tf_keras
    global Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K
    try:
        from keras.models import load_model as load_model_keras
        ret = get_model_meta_real(filename, load_model_keras)
        # model is successfully loaded. Import layers from keras
        from keras.models import Sequential
        from keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda
        from keras.layers import Conv2D, MaxPooling2D
        from keras.layers import LeakyReLU
        from keras import regularizers
        from keras import backend as K
        print("Model imported using keras")
    except (KeyboardInterrupt, SystemExit, SyntaxError, NameError, IndentationError):
        raise
    except:
        print("Failed to load model with keras. Trying tf.keras...")
        use_tf_keras = True
        from tensorflow.keras.models import load_model as load_model_tf
        ret = get_model_meta_real(filename, load_model_tf)
        # model is successfully loaded. Import layers from tensorflow.keras
        from tensorflow.keras.models import Sequential
        from tensorflow.keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda
        from tensorflow.keras.layers import Conv2D, MaxPooling2D
        from tensorflow.keras.layers import LeakyReLU
        from tensorflow.keras import regularizers
        from tensorflow.keras import backend as K
        print("Model imported using tensorflow.keras")
    # put imported functions in global
    Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K = \
        Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K
    return ret 
Example #28
Source File: cifar.py    From armory with MIT License 5 votes vote down vote up
def make_cifar_model(**kwargs) -> tf.keras.Model:
    model = Sequential()
    model.add(
        Conv2D(
            filters=4,
            kernel_size=(5, 5),
            strides=1,
            activation="relu",
            input_shape=(32, 32, 3),
        )
    )
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(
            filters=10,
            kernel_size=(5, 5),
            strides=1,
            activation="relu",
            input_shape=(23, 23, 4),
        )
    )
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(100, activation="relu"))
    model.add(Dense(10, activation="softmax"))

    model.compile(
        loss=tf.keras.losses.sparse_categorical_crossentropy,
        optimizer=tf.keras.optimizers.Adam(lr=0.003),
        metrics=["accuracy"],
    )
    return model 
Example #29
Source File: micronnet_gtsrb.py    From armory with MIT License 5 votes vote down vote up
def make_model(**kwargs) -> tf.keras.Model:
    # Model is based on MicronNet: https://arxiv.org/abs/1804.00497v3

    img_size = 48
    NUM_CLASSES = 43
    eps = 1e-6

    inputs = Input(shape=(img_size, img_size, 3))
    x = Conv2D(1, (1, 1), padding="same")(inputs)
    x = BatchNormalization(epsilon=eps)(x)
    x = Activation("relu")(x)
    x = Conv2D(29, (5, 5), padding="same")(x)
    x = BatchNormalization(epsilon=eps)(x)
    x = Activation("relu")(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
    x = Conv2D(59, (3, 3), padding="same")(x)
    x = BatchNormalization(epsilon=eps)(x)
    x = Activation("relu")(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
    x = Conv2D(74, (3, 3), padding="same")(x)
    x = BatchNormalization(epsilon=eps)(x)
    x = Activation("relu")(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
    x = Flatten()(x)
    x = Dense(300)(x)
    x = Activation("relu")(x)
    x = BatchNormalization(epsilon=eps)(x)
    x = Dense(300, activation="relu")(x)
    predictions = Dense(NUM_CLASSES, activation="softmax")(x)

    model = Model(inputs=inputs, outputs=predictions)
    model.compile(
        optimizer=tf.keras.optimizers.SGD(
            lr=0.01, decay=1e-6, momentum=0.9, nesterov=True
        ),
        loss=tf.keras.losses.sparse_categorical_crossentropy,
        metrics=["accuracy"],
    )

    return model 
Example #30
Source File: cifar_tf_example.py    From ray with Apache License 2.0 5 votes vote down vote up
def create_model(config):
    import tensorflow as tf
    model = Sequential()
    model.add(Conv2D(32, (3, 3), padding="same", input_shape=input_shape))
    model.add(Activation("relu"))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), padding="same"))
    model.add(Activation("relu"))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(64))
    model.add(Activation("relu"))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes))
    model.add(Activation("softmax"))

    # initiate RMSprop optimizer
    opt = tf.keras.optimizers.RMSprop(lr=0.001, decay=1e-6)

    # Let"s train the model using RMSprop
    model.compile(
        loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
    return model