Python tensorflow.keras.layers.AveragePooling2D() Examples

The following are 30 code examples of tensorflow.keras.layers.AveragePooling2D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.layers , or try the search function .
Example #1
Source File: cifar10_tutorial.py    From cleverhans with MIT License 6 votes vote down vote up
def __init__(self, nb_filters=64):
    super(CNN, self).__init__()
    img_size = 32
    log_resolution = int(round(math.log(img_size) / math.log(2)))
    conv_args = dict(
            activation=tf.nn.leaky_relu,
            kernel_size=3,
            padding='same')
    self.layers_obj = []
    for scale in range(log_resolution - 2):
      conv1 = Conv2D(nb_filters << scale, **conv_args)
      conv2 = Conv2D(nb_filters << (scale + 1), **conv_args)
      pool = AveragePooling2D(pool_size=(2, 2), strides=(2, 2))
      self.layers_obj.append(conv1)
      self.layers_obj.append(conv2)
      self.layers_obj.append(pool)
    conv = Conv2D(10, **conv_args)
    self.layers_obj.append(conv) 
Example #2
Source File: CNN.py    From nn_builder with MIT License 6 votes vote down vote up
def create_and_append_layer(self, layer, list_to_append_layer_to, activation=None, output_layer=False):
        """Creates and appends a layer to the list provided"""
        layer_name = layer[0].lower()
        assert layer_name in self.valid_cnn_hidden_layer_types, "Layer name {} not valid, use one of {}".format(
            layer_name, self.valid_cnn_hidden_layer_types)
        if layer_name == "conv":
            list_to_append_layer_to.extend([Conv2D(filters=layer[1], kernel_size=layer[2],
                                                strides=layer[3], padding=layer[4], activation=activation,
                                                   kernel_initializer=self.initialiser_function)])
        elif layer_name == "maxpool":
            list_to_append_layer_to.extend([MaxPool2D(pool_size=(layer[1], layer[1]),
                                                   strides=(layer[2], layer[2]), padding=layer[3])])
        elif layer_name == "avgpool":
            list_to_append_layer_to.extend([AveragePooling2D(pool_size=(layer[1], layer[1]),
                                                   strides=(layer[2], layer[2]), padding=layer[3])])
        elif layer_name == "linear":
            list_to_append_layer_to.extend([Dense(layer[1], activation=activation, kernel_initializer=self.initialiser_function)])
        else:
            raise ValueError("Wrong layer name") 
Example #3
Source File: qpooling.py    From qkeras with Apache License 2.0 6 votes vote down vote up
def QAveragePooling2D(  # pylint: disable=invalid-name
    pool_size=(2, 2), strides=None, padding="valid", quantizer=None, **kwargs):
  """Computes the quantized version of AveragePooling2D."""

  # this is just a convenient layer, not being actually anything fancy. Just
  # reminds us that we need to quantize average pooling before the next layer.

  def _call(x):
    """Performs inline call to AveragePooling followed by QActivation."""

    x = AveragePooling2D(pool_size, strides, padding, **kwargs)(x)

    if kwargs.get("name", None):
      name = kwargs["name"] + "_act"
    else:
      name = None

    if quantizer:
      x = QActivation(quantizer, name=name)(x)

    return x

  return _call 
Example #4
Source File: DenseNet.py    From TF.Keras-Commonly-used-models with Apache License 2.0 6 votes vote down vote up
def transition_layer(x, nb_channels, dropout_rate=None, compression=1.0, weight_decay=1e-4):
    """
    Creates a transition layer between dense blocks as transition, which do convolution and pooling.
    Works as downsampling.
    """
    
    x = BatchNormalization(gamma_regularizer=l2(weight_decay), beta_regularizer=l2(weight_decay))(x)
    x = Activation('relu')(x)
    x = Conv2D(int(nb_channels*compression), (1, 1), padding='same',
                      use_bias=False, kernel_regularizer=l2(weight_decay))(x)
    
    # Adding dropout
    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    
    x = AveragePooling2D((2, 2), strides=(2, 2))(x)
    return x 
Example #5
Source File: cnn_cifar_affinity.py    From affinity-loss with MIT License 6 votes vote down vote up
def create_models():
    input = layers.Input((32,32,3))
    x = input
    for i in range(3):
        x = conv_bn_relu(x, 64)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 128)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 256)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.BatchNormalization()(x)
    x = ClusteringAffinity(10, 1, 90.0)(x)

    # To calculate the regularization term, output n_dimensions is 1 more. 
    # Please ignore it at predict time

    return Model(input, x) 
Example #6
Source File: cnn_cifar_optuna_affinity.py    From affinity-loss with MIT License 6 votes vote down vote up
def create_models(sigma, m):
    input = layers.Input((32,32,3))
    x = input
    for i in range(3):
        x = conv_bn_relu(x, 64)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 128)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 256)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.BatchNormalization()(x)
    x = ClusteringAffinity(10, m, sigma)(x)

    return Model(input, x) 
Example #7
Source File: pspnet.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 pool_out_size,
                 upscale_out_size,
                 data_format="channels_last",
                 **kwargs):
        super(PyramidPoolingBranch, self).__init__(**kwargs)
        self.upscale_out_size = upscale_out_size
        self.data_format = data_format

        self.pool = nn.AveragePooling2D(
            pool_size=pool_out_size,
            data_format=data_format,
            name="pool")
        self.conv = conv1x1_block(
            in_channels=in_channels,
            out_channels=out_channels,
            data_format=data_format,
            name="conv") 
Example #8
Source File: deeplabv3.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 upscale_out_size,
                 data_format="channels_last",
                 **kwargs):
        super(ASPPAvgBranch, self).__init__(**kwargs)
        self.upscale_out_size = upscale_out_size
        self.data_format = data_format

        self.pool = nn.AveragePooling2D(
            pool_size=1,
            data_format=data_format,
            name="pool")
        self.conv = conv1x1_block(
            in_channels=in_channels,
            out_channels=out_channels,
            data_format=data_format,
            name="conv") 
Example #9
Source File: inception_v2.py    From keras_imagenet with MIT License 5 votes vote down vote up
def inception(x, filters):
    """Utility function to implement the inception module.

    # Arguments
        x: input tensor.
        filters: a list of filter sizes.

    # Returns
        Output tensor after applying the inception.
    """
    if len(filters) != 4:
        raise ValueError('filters should have 4 components')
    if len(filters[1]) != 2 or len(filters[2]) != 2:
        raise ValueError('incorrect spec of filters')

    branch1x1 = conv2d_bn(x, filters[0], (1, 1))

    branch3x3 = conv2d_bn(x, filters[1][0], (1, 1))
    branch3x3 = conv2d_bn(branch3x3, filters[1][1], (3, 3))

    # branch5x5 is implemented with two 3x3 conv2d's
    branch5x5 = conv2d_bn(x, filters[2][0], (1, 1))
    branch5x5 = conv2d_bn(branch5x5, filters[2][1], (3, 3))
    branch5x5 = conv2d_bn(branch5x5, filters[2][1], (3, 3))

    # use AveragePooling2D here
    branchpool = layers.AveragePooling2D(
        pool_size=(3, 3), strides=(1, 1), padding='same')(x)
    branchpool = conv2d_bn(branchpool, filters[3], (1, 1))

    concat_axis = 1 if backend.image_data_format() == 'channels_first' else 3
    x = layers.concatenate(
        [branch1x1, branch3x3, branch5x5, branchpool], axis=concat_axis)
    return x 
Example #10
Source File: CNN.py    From nn_builder with MIT License 5 votes vote down vote up
def __init__(self, layers_info, output_activation=None, hidden_activations="relu", dropout= 0.0, initialiser="default",
                 batch_norm=False, y_range=(), random_seed=0, input_dim=None):
        Model.__init__(self)
        self.valid_cnn_hidden_layer_types = {'conv', 'maxpool', 'avgpool', 'linear'}
        self.valid_layer_types_with_no_parameters = (MaxPool2D, AveragePooling2D)
        Base_Network.__init__(self, layers_info, output_activation, hidden_activations, dropout, initialiser,
                              batch_norm, y_range, random_seed, input_dim) 
Example #11
Source File: resnet.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def features_pyramid(x, n_layers):
    """Generate features pyramid from the output of the 
    last layer of a backbone network (e.g. ResNetv1 or v2)

    Arguments:
        x (tensor): Output feature maps of a backbone network
        n_layers (int): Number of additional pyramid layers

    Return:
        outputs (list): Features pyramid 
    """
    outputs = [x]
    conv = AveragePooling2D(pool_size=2, name='pool1')(x)
    outputs.append(conv)
    prev_conv = conv
    n_filters = 512

    # additional feature map layers
    for i in range(n_layers - 1):
        postfix = "_layer" + str(i+2)
        conv = conv_layer(prev_conv,
                          n_filters,
                          kernel_size=3,
                          strides=2,
                          use_maxpool=False,
                          postfix=postfix)
        outputs.append(conv)
        prev_conv = conv

    return outputs 
Example #12
Source File: vgg.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def make_layers(cfg,
                    inputs, 
                    batch_norm=True, 
                    in_channels=1):
        """Helper function to ease the creation of VGG
            network model

        Arguments:
            cfg (dict): Summarizes the network layer 
                configuration
            inputs (tensor): Input from previous layer
            batch_norm (Bool): Whether to use batch norm
                between Conv2D and ReLU
            in_channel (int): Number of input channels
        """
        x = inputs
        for layer in cfg:
            if layer == 'M':
                x = MaxPooling2D()(x)
            elif layer == 'A':
                x = AveragePooling2D(pool_size=3)(x)
            else:
                x = Conv2D(layer,
                           kernel_size=3,
                           padding='same',
                           kernel_initializer='he_normal'
                           )(x)
                if batch_norm:
                    x = BatchNormalization()(x)
                x = Activation('relu')(x)
    
        return x 
Example #13
Source File: wide_resnet.py    From image_recognition with MIT License 5 votes vote down vote up
def __call__(self):
        logging.debug("Creating model...")

        assert ((self._depth - 4) % 6 == 0)
        n = (self._depth - 4) / 6

        inputs = Input(shape=self._input_shape)

        n_stages = [16, 16 * self._k, 32 * self._k, 64 * self._k]

        conv1 = Convolution2D(filters=n_stages[0], kernel_size=(3, 3),
                              strides=(1, 1),
                              padding="same",
                              kernel_initializer=self._weight_init,
                              kernel_regularizer=l2(self._weight_decay),
                              use_bias=self._use_bias)(inputs)  # "One conv at the beginning (spatial size: 32x32)"

        # Add wide residual blocks
        block_fn = self._wide_basic
        conv2 = self._layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n, stride=(1, 1))(conv1)
        conv3 = self._layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n, stride=(2, 2))(conv2)
        conv4 = self._layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n, stride=(2, 2))(conv3)
        batch_norm = BatchNormalization(axis=self._channel_axis)(conv4)
        relu = Activation("relu")(batch_norm)

        # Classifier block
        pool = AveragePooling2D(pool_size=(8, 8), strides=(1, 1), padding="same")(relu)
        flatten = Flatten()(pool)
        predictions_g = Dense(units=2, kernel_initializer=self._weight_init, use_bias=self._use_bias,
                              kernel_regularizer=l2(self._weight_decay), activation="softmax",
                              name="pred_gender")(flatten)
        predictions_a = Dense(units=101, kernel_initializer=self._weight_init, use_bias=self._use_bias,
                              kernel_regularizer=l2(self._weight_decay), activation="softmax",
                              name="pred_age")(flatten)
        model = Model(inputs=inputs, outputs=[predictions_g, predictions_a])

        return model 
Example #14
Source File: googlenet.py    From keras_imagenet with MIT License 5 votes vote down vote up
def inception(x, filters):
    """Utility function to implement the inception module.

    # Arguments
        x: input tensor.
        filters: a list of filter sizes.

    # Returns
        Output tensor after applying the inception.
    """
    if len(filters) != 4:
        raise ValueError('filters should have 4 components')
    if len(filters[1]) != 2 or len(filters[2]) != 2:
        raise ValueError('incorrect spec of filters')

    branch1x1 = conv2d_bn(x, filters[0], (1, 1))

    branch3x3 = conv2d_bn(x, filters[1][0], (1, 1))
    branch3x3 = conv2d_bn(branch3x3, filters[1][1], (3, 3))

    branch5x5 = conv2d_bn(x, filters[2][0], (1, 1))
    branch5x5 = conv2d_bn(branch5x5, filters[2][1], (5, 5))

    branchpool = layers.AveragePooling2D(
        pool_size=(3, 3), strides=(1, 1), padding='same')(x)
    branchpool = conv2d_bn(branchpool, filters[3], (1, 1))

    if backend.image_data_format() == 'channels_first':
        concat_axis = 1
    else:
        concat_axis = 3
    x = layers.concatenate(
        [branch1x1, branch3x3, branch5x5, branchpool], axis=concat_axis)
    return x 
Example #15
Source File: cnn_cifar_softmax.py    From affinity-loss with MIT License 5 votes vote down vote up
def create_models():
    input = layers.Input((32,32,3))
    x = input
    for i in range(3):
        x = conv_bn_relu(x, 64)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 128)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 256)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(10, activation="softmax")(x)

    return Model(input, x) 
Example #16
Source File: leap.py    From DeepPoseKit with Apache License 2.0 5 votes vote down vote up
def __init__(
        self,
        n_layers,
        filters,
        kernel_size,
        activation,
        pooling="max",
        initializer="glorot_uniform",
        batchnorm=False,
        use_bias=True,
        name=None,
    ):
        self.n_layers = n_layers
        self.filters = filters
        self.kernel_size = kernel_size
        self.activation = activation
        self.initializer = initializer
        self.use_bias = use_bias
        self.pooling = pooling
        if activation.lower() is not "selu" and batchnorm:
            self.batchnorm = True
        else:
            self.batchnorm = False
        if activation.lower() is "selu":
            self.initializer = "lecun_normal"
        if pooling is "average":
            self.Pooling2D = layers.AveragePooling2D
        else:
            self.Pooling2D = layers.MaxPooling2D
        self.name = name 
Example #17
Source File: SEResNeXt.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def residual_layer(self, x, out_dim):
        '''
        Residual block.
        '''
        for i in range(self.num_block):
            input_dim = int(np.shape(x)[-1])
            
            if input_dim*2 == out_dim:
                flag = True
                stride = 2
                channel = input_dim // 2
            else:
                flag = False
                stride = 1
            
            subway_x = self.split_layer(x, stride)
            subway_x = self.conv_bn(subway_x, out_dim, 1, 1)
            subway_x = self.squeeze_excitation_layer(subway_x, out_dim)
            
            if flag:
                pad_x = AveragePooling2D(pool_size=(2,2), strides=(2,2), padding='same')(x)
                pad_x = Lambda(self.channel_zeropad, output_shape=self.channel_zeropad_output)(pad_x)
            else:
                pad_x = x
            
            x = self.activation(add([pad_x, subway_x]))
                
        return x 
Example #18
Source File: DeeplabV3+.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def aspp(x,input_shape,out_stride):
	b0=Conv2D(256,(1,1),padding="same",use_bias=False)(x)
	b0=BatchNormalization()(b0)
	b0=Activation("relu")(b0)
	
	b1=DepthwiseConv2D((3,3),dilation_rate=(6,6),padding="same",use_bias=False)(x)
	b1=BatchNormalization()(b1)
	b1=Activation("relu")(b1)
	b1=Conv2D(256,(1,1),padding="same",use_bias=False)(b1)
	b1=BatchNormalization()(b1)
	b1=Activation("relu")(b1)
	
	b2=DepthwiseConv2D((3,3),dilation_rate=(12,12),padding="same",use_bias=False)(x)
	b2=BatchNormalization()(b2)
	b2=Activation("relu")(b2)
	b2=Conv2D(256,(1,1),padding="same",use_bias=False)(b2)
	b2=BatchNormalization()(b2)
	b2=Activation("relu")(b2)	

	b3=DepthwiseConv2D((3,3),dilation_rate=(12,12),padding="same",use_bias=False)(x)
	b3=BatchNormalization()(b3)
	b3=Activation("relu")(b3)
	b3=Conv2D(256,(1,1),padding="same",use_bias=False)(b3)
	b3=BatchNormalization()(b3)
	b3=Activation("relu")(b3)
	
	out_shape=int(input_shape[0]/out_stride)
	b4=AveragePooling2D(pool_size=(out_shape,out_shape))(x)
	b4=Conv2D(256,(1,1),padding="same",use_bias=False)(b4)
	b4=BatchNormalization()(b4)
	b4=Activation("relu")(b4)
	b4=BilinearUpsampling((out_shape,out_shape))(b4)
	
	x=Concatenate()([b4,b0,b1,b2,b3])
	return x 
Example #19
Source File: peleenet.py    From imgclsmob with MIT License 4 votes vote down vote up
def __init__(self,
                 channels,
                 init_block_channels,
                 bottleneck_sizes,
                 dropout_rate=0.5,
                 in_channels=3,
                 in_size=(224, 224),
                 classes=1000,
                 data_format="channels_last",
                 **kwargs):
        super(PeleeNet, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes
        self.data_format = data_format

        self.features = tf.keras.Sequential(name="features")
        self.features.add(StemBlock(
            in_channels=in_channels,
            out_channels=init_block_channels,
            data_format=data_format,
            name="init_block"))
        in_channels = init_block_channels
        for i, channels_per_stage in enumerate(channels):
            bottleneck_size = bottleneck_sizes[i]
            stage = tf.keras.Sequential(name="stage{}".format(i + 1))
            if i != 0:
                stage.add(TransitionBlock(
                    in_channels=in_channels,
                    out_channels=in_channels,
                    data_format=data_format,
                    name="trans{}".format(i + 1)))
            for j, out_channels in enumerate(channels_per_stage):
                stage.add(DenseBlock(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    bottleneck_size=bottleneck_size,
                    data_format=data_format,
                    name="unit{}".format(j + 1)))
                in_channels = out_channels
            self.features.add(stage)
        self.features.add(conv1x1_block(
            in_channels=in_channels,
            out_channels=in_channels,
            data_format=data_format,
            name="final_block"))
        self.features.add(nn.AveragePooling2D(
            pool_size=7,
            strides=1,
            data_format=data_format,
            name="final_pool"))

        self.output1 = tf.keras.Sequential(name="output1")
        if dropout_rate > 0.0:
            self.output1.add(nn.Dropout(
                rate=dropout_rate,
                name="dropout"))
        self.output1.add(nn.Dense(
            units=classes,
            input_dim=in_channels,
            name="fc")) 
Example #20
Source File: pyramidnet.py    From imgclsmob with MIT License 4 votes vote down vote up
def __init__(self,
                 channels,
                 init_block_channels,
                 bottleneck,
                 in_channels=3,
                 in_size=(224, 224),
                 classes=1000,
                 data_format="channels_last",
                 **kwargs):
        super(PyramidNet, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes
        self.data_format = data_format

        self.features = tf.keras.Sequential(name="features")
        self.features.add(PyrInitBlock(
            in_channels=in_channels,
            out_channels=init_block_channels,
            data_format=data_format,
            name="init_block"))
        in_channels = init_block_channels
        for i, channels_per_stage in enumerate(channels):
            stage = tf.keras.Sequential(name="stage{}".format(i + 1))
            for j, out_channels in enumerate(channels_per_stage):
                strides = 2 if (j == 0) and (i != 0) else 1
                stage.add(PyrUnit(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    strides=strides,
                    bottleneck=bottleneck,
                    data_format=data_format,
                    name="unit{}".format(j + 1)))
                in_channels = out_channels
            self.features.add(stage)
        self.features.add(PreResActivation(
            in_channels=in_channels,
            data_format=data_format,
            name="post_activ"))
        self.features.add(nn.AveragePooling2D(
            pool_size=7,
            strides=1,
            data_format=data_format,
            name="final_pool"))

        self.output1 = nn.Dense(
            units=classes,
            input_dim=in_channels,
            name="output1") 
Example #21
Source File: menet.py    From imgclsmob with MIT License 4 votes vote down vote up
def __init__(self,
                 channels,
                 init_block_channels,
                 side_channels,
                 groups,
                 in_channels=3,
                 in_size=(224, 224),
                 classes=1000,
                 data_format="channels_last",
                 **kwargs):
        super(MENet, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes
        self.data_format = data_format

        self.features = tf.keras.Sequential(name="features")
        self.features.add(MEInitBlock(
            in_channels=in_channels,
            out_channels=init_block_channels,
            data_format=data_format,
            name="init_block"))
        in_channels = init_block_channels
        for i, channels_per_stage in enumerate(channels):
            stage = tf.keras.Sequential(name="stage{}".format(i + 1))
            for j, out_channels in enumerate(channels_per_stage):
                downsample = (j == 0)
                ignore_group = (i == 0) and (j == 0)
                stage.add(MEUnit(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    side_channels=side_channels,
                    groups=groups,
                    downsample=downsample,
                    ignore_group=ignore_group,
                    data_format=data_format,
                    name="unit{}".format(j + 1)))
                in_channels = out_channels
            self.features.add(stage)
        self.features.add(nn.AveragePooling2D(
            pool_size=7,
            strides=1,
            data_format=data_format,
            name="final_pool"))

        self.output1 = nn.Dense(
            units=classes,
            input_dim=in_channels,
            name="output1") 
Example #22
Source File: darknet.py    From imgclsmob with MIT License 4 votes vote down vote up
def __init__(self,
                 channels,
                 odd_pointwise,
                 avg_pool_size,
                 cls_activ,
                 alpha=0.1,
                 in_channels=3,
                 in_size=(224, 224),
                 classes=1000,
                 data_format="channels_last",
                 **kwargs):
        super(DarkNet, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes
        self.data_format = data_format

        self.features = tf.keras.Sequential(name="features")
        for i, channels_per_stage in enumerate(channels):
            stage = tf.keras.Sequential(name="stage{}".format(i + 1))
            for j, out_channels in enumerate(channels_per_stage):
                stage.add(dark_convYxY(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    alpha=alpha,
                    pointwise=(len(channels_per_stage) > 1) and not (((j + 1) % 2 == 1) ^ odd_pointwise),
                    data_format=data_format,
                    name="unit{}".format(j + 1)))
                in_channels = out_channels
            if i != len(channels) - 1:
                stage.add(MaxPool2d(
                    pool_size=2,
                    strides=2,
                    data_format=data_format,
                    name="pool{}".format(i + 1)))
            self.features.add(stage)

        self.output1 = tf.keras.Sequential(name="output1")
        self.output1.add(Conv2d(
            in_channels=in_channels,
            out_channels=classes,
            kernel_size=1,
            data_format=data_format,
            name="final_conv"))
        if cls_activ:
            self.output1.add(nn.LeakyReLU(alpha=alpha))
        self.output1.add(nn.AveragePooling2D(
            pool_size=avg_pool_size,
            strides=1,
            data_format=data_format,
            name="final_pool")) 
Example #23
Source File: cbamresnet.py    From imgclsmob with MIT License 4 votes vote down vote up
def __init__(self,
                 channels,
                 init_block_channels,
                 bottleneck,
                 in_channels=3,
                 in_size=(224, 224),
                 classes=1000,
                 data_format="channels_last",
                 **kwargs):
        super(CbamResNet, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes
        self.data_format = data_format

        self.features = tf.keras.Sequential(name="features")
        self.features.add(ResInitBlock(
            in_channels=in_channels,
            out_channels=init_block_channels,
            data_format=data_format,
            name="init_block"))
        in_channels = init_block_channels
        for i, channels_per_stage in enumerate(channels):
            stage = tf.keras.Sequential(name="stage{}".format(i + 1))
            for j, out_channels in enumerate(channels_per_stage):
                strides = 2 if (j == 0) and (i != 0) else 1
                stage.add(CbamResUnit(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    strides=strides,
                    bottleneck=bottleneck,
                    data_format=data_format,
                    name="unit{}".format(j + 1)))
                in_channels = out_channels
            self.features.add(stage)
        self.features.add(nn.AveragePooling2D(
            pool_size=7,
            strides=1,
            data_format=data_format,
            name="final_pool"))

        self.output1 = nn.Dense(
            units=classes,
            input_dim=in_channels,
            name="output1") 
Example #24
Source File: inceptionv3.py    From imgclsmob with MIT License 4 votes vote down vote up
def __init__(self,
                 channels,
                 init_block_channels,
                 b_mid_channels,
                 dropout_rate=0.5,
                 in_channels=3,
                 in_size=(299, 299),
                 classes=1000,
                 data_format="channels_last",
                 **kwargs):
        super(InceptionV3, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes
        self.data_format = data_format
        normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
        reduction_units = [ReductionAUnit, ReductionBUnit]

        self.features = tf.keras.Sequential(name="features")
        self.features.add(InceptInitBlock(
            in_channels=in_channels,
            out_channels=init_block_channels,
            data_format=data_format,
            name="init_block"))
        in_channels = init_block_channels

        for i, channels_per_stage in enumerate(channels):
            stage = tf.keras.Sequential(name="stage{}".format(i + 1))
            for j, out_channels in enumerate(channels_per_stage):
                if (j == 0) and (i != 0):
                    unit = reduction_units[i - 1]
                else:
                    unit = normal_units[i]
                if unit == InceptionBUnit:
                    stage.add(unit(
                        in_channels=in_channels,
                        out_channels=out_channels,
                        mid_channels=b_mid_channels[j - 1],
                        data_format=data_format,
                        name="unit{}".format(j + 1)))
                else:
                    stage.add(unit(
                        in_channels=in_channels,
                        out_channels=out_channels,
                        data_format=data_format,
                        name="unit{}".format(j + 1)))
                in_channels = out_channels
            self.features.add(stage)
        self.features.add(nn.AveragePooling2D(
            pool_size=8,
            strides=1,
            data_format=data_format,
            name="final_pool"))

        self.output1 = tf.keras.Sequential(name="output1")
        self.output1.add(nn.Dropout(
            rate=dropout_rate,
            name="dropout"))
        self.output1.add(nn.Dense(
            units=classes,
            input_dim=in_channels,
            name="fc")) 
Example #25
Source File: hardnet.py    From imgclsmob with MIT License 4 votes vote down vote up
def __init__(self,
                 init_block_channels,
                 unit_in_channels,
                 unit_out_channels,
                 unit_links,
                 use_deptwise,
                 use_last_dropout,
                 output_dropout_rate,
                 in_channels=3,
                 in_size=(224, 224),
                 classes=1000,
                 data_format="channels_last",
                 **kwargs):
        super(HarDNet, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes
        self.data_format = data_format
        activation = "relu6"

        self.features = tf.keras.Sequential(name="features")
        self.features.add(HarDInitBlock(
            in_channels=in_channels,
            out_channels=init_block_channels,
            use_deptwise=use_deptwise,
            activation=activation,
            data_format=data_format,
            name="init_block"))
        for i, (in_channels_list_i, out_channels_list_i) in enumerate(zip(unit_in_channels, unit_out_channels)):
            stage = tf.keras.Sequential(name="stage{}".format(i + 1))
            for j, (in_channels_list_ij, out_channels_list_ij) in enumerate(zip(in_channels_list_i,
                                                                                out_channels_list_i)):
                use_dropout = ((j == len(in_channels_list_i) - 1) and (i == len(unit_in_channels) - 1) and
                               use_last_dropout)
                downsampling = ((j == len(in_channels_list_i) - 1) and (i != len(unit_in_channels) - 1))
                stage.add(HarDUnit(
                    in_channels_list=in_channels_list_ij,
                    out_channels_list=out_channels_list_ij,
                    links_list=unit_links[i][j],
                    use_deptwise=use_deptwise,
                    use_dropout=use_dropout,
                    downsampling=downsampling,
                    activation=activation,
                    data_format=data_format,
                    name="unit{}".format(j + 1)))
            self.features.add(stage)
        in_channels = unit_out_channels[-1][-1][-1]
        self.features.add(nn.AveragePooling2D(
            pool_size=7,
            strides=1,
            data_format=data_format,
            name="final_pool"))

        self.output1 = tf.keras.Sequential(name="output1")
        self.output1.add(nn.Dropout(
            rate=output_dropout_rate,
            name="dropout"))
        self.output1.add(nn.Dense(
            units=classes,
            input_dim=in_channels,
            name="fc")) 
Example #26
Source File: darknet53.py    From imgclsmob with MIT License 4 votes vote down vote up
def __init__(self,
                 channels,
                 init_block_channels,
                 alpha=0.1,
                 in_channels=3,
                 in_size=(224, 224),
                 classes=1000,
                 data_format="channels_last",
                 **kwargs):
        super(DarkNet53, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes
        self.data_format = data_format

        self.features = tf.keras.Sequential(name="features")
        self.features.add(conv3x3_block(
            in_channels=in_channels,
            out_channels=init_block_channels,
            activation=nn.LeakyReLU(alpha=alpha),
            data_format=data_format,
            name="init_block"))
        in_channels = init_block_channels
        for i, channels_per_stage in enumerate(channels):
            stage = tf.keras.Sequential(name="stage{}".format(i + 1))
            for j, out_channels in enumerate(channels_per_stage):
                if j == 0:
                    stage.add(conv3x3_block(
                        in_channels=in_channels,
                        out_channels=out_channels,
                        strides=2,
                        activation=nn.LeakyReLU(alpha=alpha),
                        data_format=data_format,
                        name="unit{}".format(j + 1)))
                else:
                    stage.add(DarkUnit(
                        in_channels=in_channels,
                        out_channels=out_channels,
                        alpha=alpha,
                        data_format=data_format,
                        name="unit{}".format(j + 1)))
                in_channels = out_channels
            self.features.add(stage)
        self.features.add(nn.AveragePooling2D(
            pool_size=7,
            strides=1,
            data_format=data_format,
            name="final_pool"))

        self.output1 = nn.Dense(
            units=classes,
            input_dim=in_channels,
            name="output1") 
Example #27
Source File: pyramidnet_cifar.py    From imgclsmob with MIT License 4 votes vote down vote up
def __init__(self,
                 channels,
                 init_block_channels,
                 bottleneck,
                 in_channels=3,
                 in_size=(32, 32),
                 classes=10,
                 data_format="channels_last",
                 **kwargs):
        super(CIFARPyramidNet, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes
        self.data_format = data_format

        self.features = tf.keras.Sequential(name="features")
        self.features.add(conv3x3_block(
            in_channels=in_channels,
            out_channels=init_block_channels,
            activation=None,
            data_format=data_format,
            name="init_block"))
        in_channels = init_block_channels
        for i, channels_per_stage in enumerate(channels):
            stage = tf.keras.Sequential(name="stage{}".format(i + 1))
            for j, out_channels in enumerate(channels_per_stage):
                strides = 2 if (j == 0) and (i != 0) else 1
                stage.add(PyrUnit(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    strides=strides,
                    bottleneck=bottleneck,
                    data_format=data_format,
                    name="unit{}".format(j + 1)))
                in_channels = out_channels
            self.features.add(stage)
        self.features.add(PreResActivation(
            in_channels=in_channels,
            data_format=data_format,
            name="post_activ"))
        self.features.add(nn.AveragePooling2D(
            pool_size=8,
            strides=1,
            data_format=data_format,
            name="final_pool"))

        self.output1 = nn.Dense(
            units=classes,
            input_dim=in_channels,
            name="output1") 
Example #28
Source File: BiFPN.py    From TF.Keras-Commonly-used-models with Apache License 2.0 4 votes vote down vote up
def __init__(self, in_channels):
        '''Bi-directional feature pyramid network (BiFPN)
        Args:
          in_channels: (Variable) list of features' size of each layer from backbone
                        with [(width, channel)].
        e.g.
        if block 1,2,4,7,14 in MobileNetV2 is used,
        in_channels: [(10,160),(19,64),(38,32),(75,24),(150,32)]
        (ascending of width size)
        I make 'in_channels' with
        self.bb_size = [(output.shape.as_list()[1], output.shape.as_list()[3])
                            for output in self.backbone.outputs]
        '''

        super(BiFPN, self).__init__()
        self.epsilon = 0.0001
        self.input_layer_cnt = len(in_channels)
        in_wd, in_ch = zip(*in_channels)

        self.td_weights = []
        self.out_weights = []
        self.td_convs = []
        self.out_convs = []

        self.out_weights.append(tf.random.normal([3]))
        self.out_convs.append(tf.keras.Sequential([layers.Conv2D(in_ch[0], 3, padding='same'),
                                                   layers.BatchNormalization()]))
        for i in range(self.input_layer_cnt-2):
            self.td_weights.append(tf.random.normal([2]))
            self.td_convs.append(tf.keras.Sequential([layers.Conv2D(in_ch[i+1], 3, padding='same'),
                                                      layers.BatchNormalization()]))
            self.out_weights.append(tf.random.normal([3]))
            self.out_convs.append(tf.keras.Sequential([layers.Conv2D(in_ch[i+1], 3, padding='same'),
                                                       layers.BatchNormalization()]))
        self.td_weights.append(tf.random.normal([2]))
        self.td_convs.append(tf.keras.Sequential([layers.Conv2D(in_ch[-1], 3, padding='same'),
                                                   layers.BatchNormalization()]))

        self.upconvs  = [tf.keras.Sequential([layers.UpSampling2D(u),
                                              layers.Conv2D(c,k,padding=pad)])
                                              for u,c,k,pad in zip([2,2,2,2],
                                                                   in_ch[1:],
                                                                   [2,3,2,3],
                                                                   ['valid','same','valid','same'])]
        self.downconvs= [tf.keras.Sequential([layers.ZeroPadding2D(pad),
                                              layers.AveragePooling2D(p),
                                              layers.Conv2D(c,3,padding='same')])
                                              for c,p,pad in zip(in_ch[:-1],
                                                                 [2,2,2,2],
                                                                 [1,0,1,0])] 
Example #29
Source File: inception_mobilenet.py    From keras_imagenet with MIT License 4 votes vote down vote up
def _mixed(x, filters, name=None):
    """Utility function to implement the mixed (inception mobilenet) block.

    # Arguments
        x: input tensor.
        filters: a list of filter sizes.
        name: name of the ops

    # Returns
        Output tensor after applying the mixed block.
    """
    if len(filters) != 4:
        raise ValueError('filters should have 4 components')

    name1 = name + '_1x1' if name else None
    branch1x1 = _conv2d_bn(x, filters[0],
                           kernel_size=(1, 1),
                           name=name1)

    name1 = name + '_3x3' if name else None
    branch3x3 = _depthwise_conv2d_bn(x, filters[1],
                                     kernel_size=(3, 3),
                                     name=name1)

    name1 = name + '_5x5' if name else None
    branch5x5 = _depthwise_conv2d_bn(x, filters[2],
                                     kernel_size=(5, 5),
                                     name=name1)

    name1 = name + '_pool_1' if name else None
    name2 = name + '_pool_2' if name else None
    branchpool = layers.AveragePooling2D(pool_size=(3, 3), strides=(1, 1),
                                         padding='same',
                                         name=name1)(x)
    branchpool = _conv2d_bn(branchpool, filters[3], (1, 1),
                            name=name2)

    concat_axis = 1 if backend.image_data_format() == 'channels_first' else 3
    x = layers.concatenate([branch1x1, branch3x3, branch5x5, branchpool],
                           axis=concat_axis,
                           name=name)
    return x 
Example #30
Source File: helloworld.py    From keras-tuner with Apache License 2.0 4 votes vote down vote up
def build_model(hp):
    """Function that build a TF model based on hyperparameters values.

    Args:
        hp (HyperParameter): hyperparameters values

    Returns:
        Model: Compiled model
    """

    num_layers = hp.Int('num_layers', 2, 8, default=6)
    lr = hp.Choice('learning_rate', [1e-3, 5e-4])

    inputs = layers.Input(shape=(28, 28, 1))
    x = inputs

    for idx in range(num_layers):
        idx = str(idx)

        filters = hp.Int('filters_' + idx, 32, 256, step=32, default=64)
        x = layers.Conv2D(filters=filters, kernel_size=3, padding='same',
                          activation='relu')(x)

        # add a pooling layers if needed
        if x.shape[1] >= 8:
            pool_type = hp.Choice('pool_' + idx, values=['max', 'avg'])
            if pool_type == 'max':
                x = layers.MaxPooling2D(2)(x)
            elif pool_type == 'avg':
                x = layers.AveragePooling2D(2)(x)

    x = layers.Flatten()(x)
    outputs = layers.Dense(10, activation='softmax')(x)

    # Build model
    model = keras.Model(inputs, outputs)
    model.compile(optimizer=Adam(lr),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model


# Initialize the tuner by passing the `build_model` function
# and specifying key search constraints: maximize val_acc (objective),
# and the number of trials to do. More efficient tuners like UltraBand() can
# be used.