Python tensorflow.keras.initializers.RandomNormal() Examples

The following are 10 code examples of tensorflow.keras.initializers.RandomNormal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.initializers , or try the search function .
Example #1
Source File: main.py    From stacks-usecase with Apache License 2.0 6 votes vote down vote up
def conv_lb(prev_layer, num_filters, layer_name, pad="same", batch_norm=True):
    """
    conv_lb
    Condensing operations into new function for better readability
    performs a convolution, then batch normalization, then leakyReLU
    Input: single layer (prev_layer) along with constant parameters
    Output: single layer
    """
    weight_init = RandomNormal(stddev=0.02)
    new_layer = Conv2D(
        num_filters, FILTER, strides=STRIDE, padding=pad, kernel_initializer=weight_init
    )(prev_layer)
    if batch_norm:
        new_layer = BatchNormalization()(new_layer, training=True)
    new_layer = LeakyReLU(alpha=LEAKY_RELU_ALPHA, name=layer_name)(new_layer)
    return new_layer 
Example #2
Source File: main.py    From stacks-usecase with Apache License 2.0 6 votes vote down vote up
def deconv_b(prev_layer, num_filters, batch_norm=True):
    """
    deconv_b
    Condensing operations into new function for better readability
    performs a convolution, then batch normalization
    Input: single layer (prev_layer) along with constant parameters
    Output: single layer
    """
    weight_init = RandomNormal(stddev=0.02)
    new_layer = Conv2DTranspose(
        num_filters,
        FILTER,
        strides=STRIDE,
        padding="same",
        activation="relu",
        kernel_initializer=weight_init,
    )(prev_layer)
    if batch_norm:
        new_layer = BatchNormalization()(new_layer, training=True)
    return new_layer 
Example #3
Source File: attn_augconv.py    From keras-attention-augmented-convs with MIT License 5 votes vote down vote up
def build(self, input_shape):
        self._shape = input_shape

        # normalize the format of depth_v and depth_k
        self.depth_k, self.depth_v = _normalize_depth_vars(self.depth_k, self.depth_v,
                                                           input_shape)

        if self.axis == 1:
            _, channels, height, width = input_shape
        else:
            _, height, width, channels = input_shape

        if self.relative:
            dk_per_head = self.depth_k // self.num_heads

            if dk_per_head == 0:
                print('dk per head', dk_per_head)

            self.key_relative_w = self.add_weight('key_rel_w',
                                                  shape=[2 * width - 1, dk_per_head],
                                                  initializer=initializers.RandomNormal(
                                                      stddev=dk_per_head ** -0.5))

            self.key_relative_h = self.add_weight('key_rel_h',
                                                  shape=[2 * height - 1, dk_per_head],
                                                  initializer=initializers.RandomNormal(
                                                      stddev=dk_per_head ** -0.5))

        else:
            self.key_relative_w = None
            self.key_relative_h = None 
Example #4
Source File: main.py    From stacks-usecase with Apache License 2.0 5 votes vote down vote up
def concat_deconv(prev_layer, skip_layer, num_filters, batch_norm=True, dropout=True):
    """
    concat_deconv
    Condensing operations into new function for better readability
    Performs a deconvolution, then concatenates two layers,
    then batch normalization if batch_norm=True
    Input: two layers (prev_layer, skip_layer) along with constant parameters
    Output: single layer
    """
    weight_init = RandomNormal(stddev=0.02)
    new_layer = Conv2DTranspose(
        num_filters,
        FILTER,
        strides=STRIDE,
        padding="same",
        activation="relu",
        kernel_initializer=weight_init,
    )(prev_layer)
    new_layer = Concatenate()([skip_layer, new_layer])
    if batch_norm:
        new_layer = BatchNormalization()(new_layer, training=True)
    if dropout:
        new_layer = Dropout(rate=DROPOUT_RATE)(new_layer, training=True)
    return new_layer


###########################################################
# Generator, U-net
########################################################### 
Example #5
Source File: main.py    From stacks-usecase with Apache License 2.0 5 votes vote down vote up
def discriminator(summary=False):
    """
    Decides whether an image is real or generated. Used in
    training the generator.
    """
    input_img = Input(shape=IMAGE_SIZE)  # image put into generator
    unknown_img = Input(shape=IMAGE_SIZE)  # either real image or generated image
    weight_init = RandomNormal(stddev=0.02)

    input_tensor = Concatenate()([input_img, unknown_img])
    d = conv_lb(input_tensor, 64, layer_name="layer_1", batch_norm=False)
    d = conv_lb(d, 128, layer_name="layer_2")
    d = conv_lb(d, 256, layer_name="layer_3")
    d = conv_lb(d, 512, layer_name="layer_4")
    d = Conv2D(
        1,
        FILTER,
        padding="same",
        kernel_initializer=weight_init,
        activation="sigmoid",
        name="layer_6",
    )(d)

    # Define discriminator model
    dis_model = Model(inputs=[input_img, unknown_img], outputs=d, name="Discriminator")
    if summary:
        dis_model.summary()
    return dis_model


###########################################################
# General Utility Functions
########################################################### 
Example #6
Source File: model.py    From EfficientDet with Apache License 2.0 5 votes vote down vote up
def __init__(self, width, depth, num_anchors=9, separable_conv=True, freeze_bn=False, detect_quadrangle=False, **kwargs):
        super(BoxNet, self).__init__(**kwargs)
        self.width = width
        self.depth = depth
        self.num_anchors = num_anchors
        self.separable_conv = separable_conv
        self.detect_quadrangle = detect_quadrangle
        num_values = 9 if detect_quadrangle else 4
        options = {
            'kernel_size': 3,
            'strides': 1,
            'padding': 'same',
            'bias_initializer': 'zeros',
        }
        if separable_conv:
            kernel_initializer = {
                'depthwise_initializer': initializers.VarianceScaling(),
                'pointwise_initializer': initializers.VarianceScaling(),
            }
            options.update(kernel_initializer)
            self.convs = [layers.SeparableConv2D(filters=width, name=f'{self.name}/box-{i}', **options) for i in
                          range(depth)]
            self.head = layers.SeparableConv2D(filters=num_anchors * num_values,
                                               name=f'{self.name}/box-predict', **options)
        else:
            kernel_initializer = {
                'kernel_initializer': initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)
            }
            options.update(kernel_initializer)
            self.convs = [layers.Conv2D(filters=width, name=f'{self.name}/box-{i}', **options) for i in range(depth)]
            self.head = layers.Conv2D(filters=num_anchors * num_values, name=f'{self.name}/box-predict', **options)
        self.bns = [
            [layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{self.name}/box-{i}-bn-{j}') for j in
             range(3, 8)]
            for i in range(depth)]
        # self.bns = [[BatchNormalization(freeze=freeze_bn, name=f'{self.name}/box-{i}-bn-{j}') for j in range(3, 8)]
        #             for i in range(depth)]
        self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
        self.reshape = layers.Reshape((-1, num_values))
        self.level = 0 
Example #7
Source File: networks.py    From brainstorm with MIT License 5 votes vote down vote up
def cvpr2018_net(vol_size, enc_nf, dec_nf, indexing='ij', name="voxelmorph"):
    """
    From https://github.com/voxelmorph/voxelmorph.

    unet architecture for voxelmorph models presented in the CVPR 2018 paper.
    You may need to modify this code (e.g., number of layers) to suit your project needs.

    :param vol_size: volume size. e.g. (256, 256, 256)
    :param enc_nf: list of encoder filters. right now it needs to be 1x4.
           e.g. [16,32,32,32]
    :param dec_nf: list of decoder filters. right now it must be 1x6 (like voxelmorph-1) or 1x7 (voxelmorph-2)
    :return: the keras model
    """
    import tensorflow.keras.layers as KL

    ndims = len(vol_size)
    assert ndims==3, "ndims should be 3. found: %d" % ndims

    src = Input(vol_size + (1,), name='input_src')
    tgt = Input(vol_size + (1,), name='input_tgt')

    input_stack = Concatenate(name='concat_inputs')([src, tgt])

    # get the core model
    x = unet3D(input_stack, img_shape=vol_size, out_im_chans=ndims, nf_enc=enc_nf, nf_dec=dec_nf)

    # transform the results into a flow field.
    Conv = getattr(KL, 'Conv%dD' % ndims)
    flow = Conv(ndims, kernel_size=3, padding='same', name='flow',
                  kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)

    # warp the source with the flow
    y = SpatialTransformer(interp_method='linear', indexing=indexing)([src, flow])
    # prepare model
    model = Model(inputs=[src, tgt], outputs=[y, flow], name=name)
    return model


##############################################################################
# Appearance transform model
############################################################################## 
Example #8
Source File: HRNet.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def conv(x, outsize, kernel_size, strides_=1, padding_='same', activation=None):
    return Conv2D(outsize, kernel_size, strides=strides_, padding=padding_, kernel_initializer=RandomNormal(
        stddev=0.001), use_bias=False, activation=activation)(x) 
Example #9
Source File: main.py    From stacks-usecase with Apache License 2.0 4 votes vote down vote up
def generator(summary=False):
    """
    Generates image based on input. Uses a U-net.
    Training is focused on making the generator
    as good as possible, because the generator
    is used in inference.

    variable legend:
        e = encoder
        s = center layer
        d = decoder
        # (ie 1,2,3,etc) = layer number
        a = activation
        b = batch normalization
        c = a concatenated layer
    So d3ab is the layer 3 decoder that has gone
    through activation and batch normalization.
    """
    # -----------------------------------------------------------
    # Encoder
    input_tensor = Input(shape=IMAGE_SIZE)
    e1a = conv_lb(input_tensor, 64, layer_name="layer_1", batch_norm=False)
    e2ba = conv_lb(e1a, 128, layer_name="layer_2")
    e3ba = conv_lb(e2ba, 256, layer_name="layer_3")
    e4ba = conv_lb(e3ba, 512, layer_name="layer_4")
    e5ba = conv_lb(e4ba, 512, layer_name="layer_5")
    e6ba = conv_lb(e5ba, 512, layer_name="layer_6")
    e7ba = conv_lb(e6ba, 512, layer_name="layer_7")
    # -----------------------------------------------------------
    # Center layer
    s8ba = conv_lb(e7ba, 512, layer_name="middle_layer", batch_norm=False)
    # -----------------------------------------------------------
    # Decoder
    d9cba = concat_deconv(s8ba, e7ba, 512)
    d10cba = concat_deconv(d9cba, e6ba, 512)
    d11cba = concat_deconv(d10cba, e5ba, 512)
    d12cba = concat_deconv(d11cba, e4ba, 512, dropout=False)
    d13cba = concat_deconv(d12cba, e3ba, 256, dropout=False)
    d14cba = concat_deconv(d13cba, e2ba, 128, dropout=False)
    d15cba = concat_deconv(d14cba, e1a, 64, dropout=False)
    d16ba = Conv2DTranspose(
        3,
        FILTER,
        strides=STRIDE,
        padding="same",
        activation="tanh",
        kernel_initializer=RandomNormal(stddev=0.02),
    )(d15cba)
    # Define generator model
    gen_model = Model(input_tensor, d16ba, name="Generator")
    if summary:
        gen_model.summary()
    return gen_model


###########################################################
# Discriminator
########################################################### 
Example #10
Source File: model.py    From EfficientDet with Apache License 2.0 4 votes vote down vote up
def __init__(self, width, depth, num_classes=20, num_anchors=9, separable_conv=True, freeze_bn=False, **kwargs):
        super(ClassNet, self).__init__(**kwargs)
        self.width = width
        self.depth = depth
        self.num_classes = num_classes
        self.num_anchors = num_anchors
        self.separable_conv = separable_conv
        options = {
            'kernel_size': 3,
            'strides': 1,
            'padding': 'same',
        }
        if self.separable_conv:
            kernel_initializer = {
                'depthwise_initializer': initializers.VarianceScaling(),
                'pointwise_initializer': initializers.VarianceScaling(),
            }
            options.update(kernel_initializer)
            self.convs = [layers.SeparableConv2D(filters=width, bias_initializer='zeros', name=f'{self.name}/class-{i}',
                                                 **options)
                          for i in range(depth)]
            self.head = layers.SeparableConv2D(filters=num_classes * num_anchors,
                                               bias_initializer=PriorProbability(probability=0.01),
                                               name=f'{self.name}/class-predict', **options)
        else:
            kernel_initializer = {
                'kernel_initializer': initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)
            }
            options.update(kernel_initializer)
            self.convs = [layers.Conv2D(filters=width, bias_initializer='zeros', name=f'{self.name}/class-{i}',
                                        **options)
                          for i in range(depth)]
            self.head = layers.Conv2D(filters=num_classes * num_anchors,
                                      bias_initializer=PriorProbability(probability=0.01),
                                      name='class-predict', **options)
        self.bns = [
            [layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{self.name}/class-{i}-bn-{j}') for j
             in range(3, 8)]
            for i in range(depth)]
        # self.bns = [[BatchNormalization(freeze=freeze_bn, name=f'{self.name}/class-{i}-bn-{j}') for j in range(3, 8)]
        #             for i in range(depth)]
        self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
        self.reshape = layers.Reshape((-1, num_classes))
        self.activation = layers.Activation('sigmoid')
        self.level = 0