Python tensorflow.keras.layers.UpSampling2D() Examples

The following are 20 code examples of tensorflow.keras.layers.UpSampling2D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.layers , or try the search function .
Example #1
Source File: bayesian_unet.py    From bcnn with MIT License 6 votes vote down vote up
def up_stage(inputs, skip, filters, prior_fn, kernel_size=3,
             activation="relu", padding="SAME"):
    up = UpSampling2D()(inputs)
    up = tfp.layers.Convolution2DFlipout(filters, 2,
                                         activation=activation,
                                         padding=padding,
                                         kernel_prior_fn=prior_fn)(up)
    up = GroupNormalization()(up)

    merge = concatenate([skip, up])
    merge = GroupNormalization()(merge)

    conv = tfp.layers.Convolution2DFlipout(filters, kernel_size,
                                           activation=activation,
                                           padding=padding,
                                           kernel_prior_fn=prior_fn)(merge)
    conv = GroupNormalization()(conv)
    conv = tfp.layers.Convolution2DFlipout(filters, kernel_size,
                                           activation=activation,
                                           padding=padding,
                                           kernel_prior_fn=prior_fn)(conv)
    conv = GroupNormalization()(conv)

    return conv 
Example #2
Source File: hrnet.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 scale_factor,
                 data_format="channels_last",
                 **kwargs):
        super(UpSamplingBlock, self).__init__(**kwargs)
        self.scale_factor = scale_factor

        self.conv = conv1x1_block(
            in_channels=in_channels,
            out_channels=out_channels,
            strides=1,
            activation=None,
            data_format=data_format,
            name="conv")
        self.upsample = nn.UpSampling2D(
            size=scale_factor,
            data_format=data_format,
            interpolation="nearest",
            name="upsample") 
Example #3
Source File: dropout_unet.py    From bcnn with MIT License 6 votes vote down vote up
def up_stage(inputs, skip, filters, kernel_size=3,
             activation="relu", padding="SAME"):
    up = UpSampling2D()(inputs)
    up = Conv2D(filters, 2, activation=activation, padding=padding)(up)
    up = GroupNormalization()(up)

    merge = concatenate([skip, up])
    merge = GroupNormalization()(merge)

    conv = Conv2D(filters, kernel_size,
                  activation=activation, padding=padding)(merge)
    conv = GroupNormalization()(conv)
    conv = Conv2D(filters, kernel_size,
                  activation=activation, padding=padding)(conv)
    conv = GroupNormalization()(conv)
    conv = SpatialDropout2D(0.5)(conv, training=True)

    return conv 
Example #4
Source File: densenet.py    From DeepPoseKit with Apache License 2.0 5 votes vote down vote up
def __init__(self, compression_factor=0.5, **kwargs):
        # super(TransitionDown, self).__init__(self, **kwargs)
        self.concat = Concatenate()
        self.compression_factor = compression_factor

        self.upsample = (
            SubPixelUpscaling()
        )  # layers.UpSampling2D(interpolation='bilinear') 
Example #5
Source File: Refinenet.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def MultiResolutionFusion(high_inputs=None,low_inputs=None,n_filters=256,name=''):
    """
    Fuse together all path inputs. This block first applies convolutions
    for input adaptation, which generate feature maps of the same feature dimension 
    (the smallest one among the inputs), and then up-samples all (smaller) feature maps to
    the largest resolution of the inputs. Finally, all features maps are fused by summation.
    Arguments:
      high_inputs: The input tensors that have the higher resolution
      low_inputs: The input tensors that have the lower resolution
      n_filters: Number of output feature maps for each conv
    Returns:
      Fused feature maps at higher resolution
    
    """
    
    if low_inputs is None: # RefineNet block 4
        return high_inputs

    else:
        conv_low = Conv2D(n_filters, 3, padding='same', name=name+'conv_lo', kernel_initializer=kern_init, kernel_regularizer=kern_reg)(low_inputs)
        conv_low = BatchNormalization()(conv_low)
        conv_high = Conv2D(n_filters, 3, padding='same', name=name+'conv_hi', kernel_initializer=kern_init, kernel_regularizer=kern_reg)(high_inputs)
        conv_high = BatchNormalization()(conv_high)
        
        conv_low_up = UpSampling2D(size=2, interpolation='bilinear', name=name+'up')(conv_low)
        
        return Add(name=name+'sum')([conv_low_up, conv_high]) 
Example #6
Source File: HRNet.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def fuse_layers(x, channels, multi_scale_output=True):
    out = []

    for i in range(len(channels) if multi_scale_output else 1):
        residual = x[i]
        for j in range(len(channels)):
            if j > i:
                y = conv(x[j], channels[i], 1, padding_='valid')
                y = BatchNormalization(epsilon=1e-5, momentum=0.1)(y)
                y = UpSampling2D(size=2 ** (j - i))(y)
                residual = Add()([residual, y])
            elif j < i:
                y = x[j]
                for k in range(i - j):
                    if k == i - j - 1:
                        y = conv(y, channels[i], 3, strides_=2)
                        y = BatchNormalization(epsilon=1e-5, momentum=0.1)(y)
                    else:
                        y = conv(y, channels[j], 3, strides_=2)
                        y = BatchNormalization(epsilon=1e-5, momentum=0.1)(y)
                        y = Activation('relu')(y)
                residual = Add()([residual, y])

        residual = Activation('relu')(residual)
        out.append(residual)

    return out 
Example #7
Source File: Unet_family.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def __init__(self, classes=16):
        super(NestedUNet, self).__init__()

        n1 = 32
        filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]

        self.pool = MaxPooling2D(strides=2)
        self.Up = UpSampling2D()

        self.conv0_0 = conv_block_nested(filters[0], filters[0])
        self.conv1_0 = conv_block_nested(filters[1], filters[1])
        self.conv2_0 = conv_block_nested(filters[2], filters[2])
        self.conv3_0 = conv_block_nested(filters[3], filters[3])
        self.conv4_0 = conv_block_nested(filters[4], filters[4])

        self.conv0_1 = conv_block_nested(filters[0], filters[0])
        self.conv1_1 = conv_block_nested(filters[1], filters[1])
        self.conv2_1 = conv_block_nested(filters[2], filters[2])
        self.conv3_1 = conv_block_nested(filters[3], filters[3])

        self.conv0_2 = conv_block_nested(filters[0], filters[0])
        self.conv1_2 = conv_block_nested(filters[1], filters[1])
        self.conv2_2 = conv_block_nested(filters[2], filters[2])

        self.conv0_3 = conv_block_nested(filters[0], filters[0])
        self.conv1_3 = conv_block_nested(filters[1], filters[1])

        self.conv0_4 = conv_block_nested(filters[0], filters[0])

        self.final = Conv2D(classes, kernel_size=1,activation='softmax',name='final_layer') 
Example #8
Source File: Unet_family.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def __init__(self, filters):
        super(up_conv, self).__init__()
        self.up = Sequential([
            UpSampling2D(),
            Conv2D(filters, kernel_size=(3,3), strides=1, padding='same'),
            BatchNormalization(),
            Activation('relu')
        ]) 
Example #9
Source File: hourglass.py    From Centernet-Tensorflow2.0 with Apache License 2.0 5 votes vote down vote up
def connect_left_right(left, right, num_channels, num_channels_next, name):
  # left: 2 residual modules
  left = residual(left, num_channels_next, name=name + 'skip.0')
  left = residual(left, num_channels_next, name=name + 'skip.1')

  # up: 2 times residual & nearest neighbour
  out = residual(right, num_channels, name=name + 'out.0')
  out = residual(out, num_channels_next, name=name + 'out.1')
  out = UpSampling2D(name=name + 'out.upsampleNN')(out)
  out = Add(name=name + 'out.add')([left, out])
  return out 
Example #10
Source File: mv2_hourglass.py    From tf2-mobile-pose-estimation with Apache License 2.0 5 votes vote down vote up
def _hourglass_module(input, stage_index, number_of_keypoints):
    if stage_index == 0:
        return _inverted_bottleneck(input, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3), []
    else:
        # down sample
        x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='SAME')(input)

        # block front
        x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
        x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
        x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
        x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
        x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)

        stage_index -= 1

        # block middle
        x, middle_layers = _hourglass_module(x, stage_index=stage_index, number_of_keypoints=number_of_keypoints)

        # block back
        x = _inverted_bottleneck(x, up_channel_rate=6, channels=number_of_keypoints, is_subsample=False, kernel_size=3)

        # up sample
        upsampling_size = (2, 2)  # (x.shape[1] * 2, x.shape[2] * 2)
        x = layers.UpSampling2D(size=upsampling_size, interpolation='bilinear')(x)
        upsampling_layer = x

        # jump layer
        x = _inverted_bottleneck(input, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
        x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
        x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
        x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
        x = _inverted_bottleneck(x, up_channel_rate=6, channels=number_of_keypoints, is_subsample=False, kernel_size=3)
        jump_branch_layer = x

        # add
        x = upsampling_layer + jump_branch_layer

        middle_layers.append(x)

        return x, middle_layers 
Example #11
Source File: train.py    From object-localization with MIT License 5 votes vote down vote up
def create_model(trainable=True):
    model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights="imagenet")

    for layer in model.layers:
        layer.trainable = trainable

    block1 = model.get_layer("block_5_add").output
    block2 = model.get_layer("block_12_add").output
    block3 = model.get_layer("block_15_add").output

    blocks = [block2, block1]

    x = block3
    for block in blocks:
        x = UpSampling2D()(x)

        x = Conv2D(256, kernel_size=3, padding="same", strides=1)(x)
        x = BatchNormalization()(x)
        x = Activation("relu")(x)

        x = Concatenate()([x, block])

        x = Conv2D(256, kernel_size=3, padding="same", strides=1)(x)
        x = BatchNormalization()(x)
        x = Activation("relu")(x)

    x = Conv2D(1, kernel_size=1, activation="sigmoid")(x)

    return Model(inputs=model.input, outputs=x) 
Example #12
Source File: airnet.py    From imgclsmob with MIT License 5 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 groups=1,
                 ratio=2,
                 data_format="channels_last",
                 **kwargs):
        super(AirBlock, self).__init__(**kwargs)
        assert (out_channels % ratio == 0)
        mid_channels = out_channels // ratio

        self.conv1 = conv1x1_block(
            in_channels=in_channels,
            out_channels=mid_channels,
            data_format=data_format,
            name="conv1")
        self.pool = MaxPool2d(
            pool_size=3,
            strides=2,
            padding=1,
            data_format=data_format,
            name="pool")
        self.conv2 = conv3x3_block(
            in_channels=mid_channels,
            out_channels=mid_channels,
            groups=groups,
            data_format=data_format,
            name="conv2")
        self.conv3 = conv1x1_block(
            in_channels=mid_channels,
            out_channels=out_channels,
            activation=None,
            data_format=data_format,
            name="conv3")
        self.sigmoid = tf.nn.sigmoid
        self.upsample = nn.UpSampling2D(
            size=(2, 2),
            data_format=data_format,
            interpolation="bilinear",
            name="upsample") 
Example #13
Source File: networks.py    From brainstorm with MIT License 4 votes vote down vote up
def unet2D(x_in,
           img_shape, out_im_chans,
           nf_enc=[64, 64, 128, 128, 256, 256, 512],
           nf_dec=None,
           layer_prefix='unet',
           n_convs_per_stage=1,
        ):
    ks = 3
    x = x_in

    encodings = []
    encoding_vol_sizes = []
    for i in range(len(nf_enc)):
        for j in range(n_convs_per_stage):
            x = Conv2D(
                nf_enc[i],
                kernel_size=ks,
                strides=(1, 1), padding='same',
                name='{}_enc_conv2D_{}_{}'.format(layer_prefix, i, j + 1))(x)
            x = LeakyReLU(0.2)(x)

        encodings.append(x)
        encoding_vol_sizes.append(np.asarray(x.get_shape().as_list()[1:-1]))

        if i < len(nf_enc) - 1:
            x = MaxPooling2D(pool_size=(2, 2), padding='same', name='{}_enc_maxpool_{}'.format(layer_prefix, i))(x)

    if nf_dec is None:
        nf_dec = list(reversed(nf_enc[1:]))

    for i in range(len(nf_dec)):
        curr_shape = x.get_shape().as_list()[1:-1]

        # only do upsample if we are not yet at max resolution
        if np.any(curr_shape < list(img_shape[:len(curr_shape)])):
            x = UpSampling2D(size=(2, 2), name='{}_dec_upsamp_{}'.format(layer_prefix, i))(x)

        # just concatenate the final layer here
        if i <= len(encodings) - 2:
            x = _pad_or_crop_to_shape_2D(x, np.asarray(x.get_shape().as_list()[1:-1]), encoding_vol_sizes[-i-2])
            x = Concatenate(axis=-1)([x, encodings[-i-2]])

        for j in range(n_convs_per_stage):
            x = Conv2D(nf_dec[i],
                       kernel_size=ks, padding='same',
                       name='{}_dec_conv2D_{}_{}'.format(layer_prefix, i, j))(x)
            x = LeakyReLU(0.2)(x)


    y = Conv2D(out_im_chans, kernel_size=1, padding='same',
               name='{}_dec_conv2D_final'.format(layer_prefix))(x)  # add your own activation after this model

    # add your own activation after this model
    return y 
Example #14
Source File: mv2_cpm.py    From tf2-mobile-pose-estimation with Apache License 2.0 4 votes vote down vote up
def _mobilenetV2(input):
    x = _inverted_bottleneck(input, up_channel_rate=1, channels=12, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=1, channels=12, is_subsample=False, kernel_size=3)
    mv2_branch_0 = x

    x = _inverted_bottleneck(x, up_channel_rate=6, channels=18, is_subsample=True, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=18, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=18, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=18, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=18, is_subsample=False, kernel_size=3)
    mv2_branch_1 = x

    x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=True, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
    mv2_branch_2 = x

    x = _inverted_bottleneck(x, up_channel_rate=6, channels=48, is_subsample=True, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=48, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=48, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=48, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=48, is_subsample=False, kernel_size=3)
    mv2_branch_3 = x

    x = _inverted_bottleneck(x, up_channel_rate=6, channels=72, is_subsample=True, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=72, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=72, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=72, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=72, is_subsample=False, kernel_size=3)
    mv2_branch_4 = x

    x = layers.Concatenate(axis=3)([
        layers.MaxPool2D(pool_size=(4, 4), strides=(4, 4), padding='SAME')(mv2_branch_0),
        layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='SAME')(mv2_branch_1),
        mv2_branch_2,
        layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(mv2_branch_3),
        layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(mv2_branch_4),
    ])

    return x 
Example #15
Source File: module.py    From Centernet-Tensorflow2.0 with Apache License 2.0 4 votes vote down vote up
def upsample_module(inputs, out1, out2):
    left, right = inputs

    xl = res_layer0(left,out2)
    xl = res_layer0(xl, out2)

    xr = convblock(right, out1, 3)
    xr = convblock(xr, out2, 3)
    xr = layers.UpSampling2D()(xr)
    out = layers.Add()([xl, xr])
    return out 
Example #16
Source File: mv2_hourglass.py    From tf2-mobile-pose-estimation with Apache License 2.0 4 votes vote down vote up
def build_mv2_hourglass_model(number_of_keypoints):
    hourglas_stage_num = 4
    input_shape = (192, 192, 3)  # h, w, c
    input = layers.Input(shape=input_shape)

    ## HEADER
    # cnn with regularizer
    x = layers.Conv2D(filters=16, kernel_size=(3, 3), strides=(2, 2), padding='SAME', kernel_regularizer=l2_regularizer_00004)(input)
    # batch norm
    x = layers.BatchNormalization(momentum=0.999)(x)
    # activation
    x = layers.ReLU(max_value=6)(x)

    # 128, 112
    x = _inverted_bottleneck(x, up_channel_rate=1, channels=16, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=1, channels=16, is_subsample=False, kernel_size=3)

    # 64, 56
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=True, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)
    x = _inverted_bottleneck(x, up_channel_rate=6, channels=24, is_subsample=False, kernel_size=3)


    captured_h, captured_w = int(x.shape[1]), int(x.shape[2])
    print(f"captured_h, captured_w: {captured_h}, {captured_w}")

    # HOURGLASS recursively
    # stage = 4
    #

    x, middle_output_layers = _hourglass_module(x, stage_index=hourglas_stage_num, number_of_keypoints=number_of_keypoints)

    print("before")
    for l in middle_output_layers:
        print(f"  l.shape: {l.shape}")

    for layer_index, middle_layer in enumerate(middle_output_layers):
        layer_stage = layer_index + 1
        h, w = middle_layer.shape[1], middle_layer.shape[2]
        if h == captured_h and w == captured_w:
            continue
        else:
            upsampling_size = (captured_h // h, captured_w // w)
            middle_output_layers[layer_index] = layers.UpSampling2D(size=upsampling_size, interpolation='bilinear')(middle_layer)

    print("after")
    for l in middle_output_layers:
        print(f"  l.shape: {l.shape}")

    model = models.Model(input, outputs=middle_output_layers)
    return model 
Example #17
Source File: BiFPN.py    From TF.Keras-Commonly-used-models with Apache License 2.0 4 votes vote down vote up
def __init__(self, in_channels):
        '''Bi-directional feature pyramid network (BiFPN)
        Args:
          in_channels: (Variable) list of features' size of each layer from backbone
                        with [(width, channel)].
        e.g.
        if block 1,2,4,7,14 in MobileNetV2 is used,
        in_channels: [(10,160),(19,64),(38,32),(75,24),(150,32)]
        (ascending of width size)
        I make 'in_channels' with
        self.bb_size = [(output.shape.as_list()[1], output.shape.as_list()[3])
                            for output in self.backbone.outputs]
        '''

        super(BiFPN, self).__init__()
        self.epsilon = 0.0001
        self.input_layer_cnt = len(in_channels)
        in_wd, in_ch = zip(*in_channels)

        self.td_weights = []
        self.out_weights = []
        self.td_convs = []
        self.out_convs = []

        self.out_weights.append(tf.random.normal([3]))
        self.out_convs.append(tf.keras.Sequential([layers.Conv2D(in_ch[0], 3, padding='same'),
                                                   layers.BatchNormalization()]))
        for i in range(self.input_layer_cnt-2):
            self.td_weights.append(tf.random.normal([2]))
            self.td_convs.append(tf.keras.Sequential([layers.Conv2D(in_ch[i+1], 3, padding='same'),
                                                      layers.BatchNormalization()]))
            self.out_weights.append(tf.random.normal([3]))
            self.out_convs.append(tf.keras.Sequential([layers.Conv2D(in_ch[i+1], 3, padding='same'),
                                                       layers.BatchNormalization()]))
        self.td_weights.append(tf.random.normal([2]))
        self.td_convs.append(tf.keras.Sequential([layers.Conv2D(in_ch[-1], 3, padding='same'),
                                                   layers.BatchNormalization()]))

        self.upconvs  = [tf.keras.Sequential([layers.UpSampling2D(u),
                                              layers.Conv2D(c,k,padding=pad)])
                                              for u,c,k,pad in zip([2,2,2,2],
                                                                   in_ch[1:],
                                                                   [2,3,2,3],
                                                                   ['valid','same','valid','same'])]
        self.downconvs= [tf.keras.Sequential([layers.ZeroPadding2D(pad),
                                              layers.AveragePooling2D(p),
                                              layers.Conv2D(c,3,padding='same')])
                                              for c,p,pad in zip(in_ch[:-1],
                                                                 [2,2,2,2],
                                                                 [1,0,1,0])] 
Example #18
Source File: PSPNet-ResNet50.py    From TF.Keras-Commonly-used-models with Apache License 2.0 4 votes vote down vote up
def build_pspnet(num_classes):

    #ResNet50 提取特征
    inputs = Input(shape=INPUT_SHAPE)

    res_features = ResNet50(inputs)

    #金字塔池化
    x_c1 = AveragePooling2D(pool_size=60, strides=60, name='ave_c1')(res_features)
    x_c1 = Conv2D(filters=512, kernel_size=1, strides=1, padding='same', name='conv_c1')(x_c1)
    x_c1 = BatchNormalization(momentum=0.95, axis=-1)(x_c1)
    x_c1 = Activation(activation='relu')(x_c1)
    #x_c1 = Dropout(0.2)(x_c1)
    x_c1 = UpSampling2D(size=(60, 60), name='up_c1')(x_c1)

    x_c2 = AveragePooling2D(pool_size=30, strides=30, name='ave_c2')(res_features)
    x_c2 = Conv2D(filters=512, kernel_size=1, strides=1, padding='same', name='conv_c2')(x_c2)
    x_c2 = BatchNormalization(momentum=0.95, axis=-1)(x_c2)
    x_c2 = Activation(activation='relu')(x_c2)
    #x_c2 = Dropout(0.2)(x_c2)
    x_c2 = UpSampling2D(size=(30, 30), name='up_c2')(x_c2)

    x_c3 = AveragePooling2D(pool_size=20, strides=20, name='ave_c3')(res_features)
    x_c3 = Conv2D(filters=512, kernel_size=1, strides=1, padding='same', name='conv_c3')(x_c3)
    x_c3 = BatchNormalization(momentum=0.95, axis=-1)(x_c3)
    x_c3 = Activation(activation='relu')(x_c3)
    #x_c3 = Dropout(0.2)(x_c3)
    x_c3 = UpSampling2D(size=(20, 20), name='up_c3')(x_c3)

    x_c4 = AveragePooling2D(pool_size=10, strides=10, name='ave_c4')(res_features)
    x_c4 = Conv2D(filters=512, kernel_size=1, strides=1, padding='same', name='conv_c4')(x_c4)
    x_c4 = BatchNormalization(momentum=0.95, axis=-1)(x_c4)
    x_c4 = Activation(activation='relu')(x_c4)
    #x_c4 = Dropout(0.2)(x_c4)
    x_c4 = UpSampling2D(size=(10, 10), name='up_c4')(x_c4)

    x_c5 = Conv2D(filters=512, kernel_size=1, strides=1, name='conv_c5', padding='same')(res_features)
    x_c5 = BatchNormalization(momentum=0.95, axis=-1)(x_c5)
    x_c5 = Activation(activation='relu')(x_c5)
    #x_c5 = Dropout(0.2)(x_c5)

    x = Concatenate(axis=-1, name='concat')([x_c1, x_c2, x_c3, x_c4, x_c5])
    x = Conv2D(filters=512, kernel_size=3, strides=1, padding='same', name='sum_conv_1_11')(x)
    x = BatchNormalization(momentum=0.95, axis=-1)(x)
    x = Activation(activation='relu')(x)

    x = UpSampling2D(size=(4, 4))(x)
    # x = Conv2D(filters=256, kernel_size=3, strides=1, padding='same', name='sum_conv_1_21')(x)
    # x = BatchNormalization(momentum=0.95, axis=-1)(x)
    # x = Activation(activation='relu')(x)

    outputs = Conv2D(filters=num_classes, kernel_size=1, strides=1, padding='same', name='sum_conv_2', activation='softmax')(x)


    model = Model(inputs=inputs, outputs=outputs)
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    model.summary()

    return model 
Example #19
Source File: model.py    From Advanced-Deep-Learning-with-Keras with MIT License 4 votes vote down vote up
def build_fcn(input_shape,
              backbone,
              n_classes=4):
    """Helper function to build an FCN model.
        
    Arguments:
        backbone (Model): A backbone network
            such as ResNetv2 or v1
        n_classes (int): Number of object classes
            including background.
    """

    inputs = Input(shape=input_shape)
    features = backbone(inputs)

    main_feature = features[0]
    features = features[1:]
    out_features = [main_feature]
    feature_size = 8
    size = 2
    # other half of the features pyramid
    # including upsampling to restore the
    # feature maps to the dimensions
    # equal to 1/4 the image size
    for feature in features:
        postfix = "fcn_" + str(feature_size)
        feature = conv_layer(feature,
                             filters=256,
                             use_maxpool=False,
                             postfix=postfix)
        postfix = postfix + "_up2d"
        feature = UpSampling2D(size=size,
                               interpolation='bilinear',
                               name=postfix)(feature)
        size = size * 2
        feature_size = feature_size * 2
        out_features.append(feature)

    # concatenate all upsampled features
    x = Concatenate()(out_features)
    # perform 2 additional feature extraction 
    # and upsampling
    x = tconv_layer(x, 256, postfix="up_x2")
    x = tconv_layer(x, 256, postfix="up_x4")
    # generate the pixel-wise classifier
    x = Conv2DTranspose(filters=n_classes,
                        kernel_size=1,
                        strides=1,
                        padding='same',
                        kernel_initializer='he_normal',
                        name="pre_activation")(x)
    x = Softmax(name="segmentation")(x)

    model = Model(inputs, x, name="fcn")

    return model 
Example #20
Source File: Refinenet.py    From TF.Keras-Commonly-used-models with Apache License 2.0 4 votes vote down vote up
def build_refinenet(input_shape, num_class, resnet_weights = None,
                    frontend_trainable = True):
    """
    Builds the RefineNet model. 
    Arguments:
      input_shape: Size of input image, including number of channels
      num_classes: Number of classes
      resnet_weights: Path to pre-trained weights for ResNet-101
      frontend_trainable: Whether or not to freeze ResNet layers during training
    Returns:
      RefineNet model
    """
    
    # Build ResNet-101
    model_base = resnet101_model(input_shape, resnet_weights)

    # Get ResNet block output layers
    high = model_base.output
    low = [None, None, None]

    # Get the feature maps to the proper size with bottleneck
    high[0] = Conv2D(512, 1, padding='same', name='resnet_map1', kernel_initializer=kern_init, kernel_regularizer=kern_reg)(high[0])
    high[1] = Conv2D(256, 1, padding='same', name='resnet_map2', kernel_initializer=kern_init, kernel_regularizer=kern_reg)(high[1])
    high[2] = Conv2D(256, 1, padding='same', name='resnet_map3', kernel_initializer=kern_init, kernel_regularizer=kern_reg)(high[2])
    high[3] = Conv2D(256, 1, padding='same', name='resnet_map4', kernel_initializer=kern_init, kernel_regularizer=kern_reg)(high[3])
    for h in high:
        h = BatchNormalization()(h)

    # RefineNet
    low[0] = RefineBlock(high_inputs = high[0], low_inputs = None, block=4) # Only input ResNet 1/32
    low[1] = RefineBlock(high_inputs = high[1], low_inputs = low[0], block=3) # High input = ResNet 1/16, Low input = Previous 1/16
    low[2] = RefineBlock(high_inputs = high[2], low_inputs = low[1], block=2) # High input = ResNet 1/8, Low input = Previous 1/8
    net = RefineBlock(high_inputs = high[3], low_inputs = low[2], block=1) # High input = ResNet 1/4, Low input = Previous 1/4.

    net = ResidualConvUnit(net, name='rf_rcu_o1_')
    net = ResidualConvUnit(net, name='rf_rcu_o2_')
    
    net = UpSampling2D(size=4, interpolation='bilinear', name='rf_up_o')(net)
    net = Conv2D(num_class, 1, activation = 'softmax', name='rf_pred')(net)
    
    model = Model(model_base.input,net)
    
    for layer in model.layers:
        if 'rb' in layer.name or 'rf_' in layer.name:
            layer.trainable = True
        else:
            layer.trainable = frontend_trainable
    return model