Python tensorflow.keras.layers.Conv3D() Examples

The following are 23 code examples of tensorflow.keras.layers.Conv3D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.layers , or try the search function .
Example #1
Source File: dropout_vnet.py    From bcnn with MIT License 6 votes vote down vote up
def up_stage(inputs, skip, filters, kernel_size=3,
             activation="relu", padding="SAME"):
    up = UpSampling3D()(inputs)
    up = Conv3D(filters, 2, activation=activation, padding=padding)(up)
    up = GroupNormalization()(up)

    merge = concatenate([skip, up])
    merge = GroupNormalization()(merge)

    conv = Conv3D(filters, kernel_size,
                  activation=activation, padding=padding)(merge)
    conv = GroupNormalization()(conv)
    conv = Conv3D(filters, kernel_size,
                  activation=activation, padding=padding)(conv)
    conv = GroupNormalization()(conv)
    conv = SpatialDropout3D(0.5)(conv, training=True)

    return conv 
Example #2
Source File: RAUNet-3D.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def residual_block(input, input_channels=None, output_channels=None, kernel_size=(3, 3, 3), stride=1, name='out'):
    """
    full pre-activation residual block
    https://arxiv.org/pdf/1603.05027.pdf
    """
    if output_channels is None:
        output_channels = input.get_shape()[-1]
    if input_channels is None:
        input_channels = output_channels // 4

    strides = (stride, stride, stride)

    x = BatchNormalization()(input)
    x = Activation('relu')(x)
    x = Conv3D(input_channels, (1, 1, 1))(x)

    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv3D(input_channels, kernel_size, padding='same', strides=stride)(x)

    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv3D(output_channels, (1, 1, 1), padding='same')(x)

    if input_channels != output_channels or stride != 1:
        input = Conv3D(output_channels, (1, 1, 1), padding='same', strides=strides)(input)
    if name == 'out':
        x = add([x, input])
    else:
        x = add([x, input], name=name)
    return x 
Example #3
Source File: dropout_vnet.py    From bcnn with MIT License 5 votes vote down vote up
def down_stage(inputs, filters, kernel_size=3,
               activation="relu", padding="SAME"):
    conv = Conv3D(filters, kernel_size,
                  activation=activation, padding=padding)(inputs)
    conv = GroupNormalization()(conv)
    conv = Conv3D(filters, kernel_size,
                  activation=activation, padding=padding)(conv)
    conv = GroupNormalization()(conv)
    pool = MaxPooling3D()(conv)
    return conv, pool 
Example #4
Source File: standard.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def expanding_layer_3D(input, neurons, concatenate_link, ba_norm,
                       ba_norm_momentum):
    up = concatenate([Conv3DTranspose(neurons, (2, 2, 2), strides=(2, 2, 2),
                     padding='same')(input), concatenate_link], axis=4)
    conv1 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(up)
    if ba_norm : conv1 = BatchNormalization(momentum=ba_norm_momentum)(conv1)
    conv2 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(conv1)
    if ba_norm : conv2 = BatchNormalization(momentum=ba_norm_momentum)(conv2)
    return conv2 
Example #5
Source File: standard.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def contracting_layer_3D(input, neurons, ba_norm, ba_norm_momentum):
    conv1 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(input)
    if ba_norm : conv1 = BatchNormalization(momentum=ba_norm_momentum)(conv1)
    conv2 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(conv1)
    if ba_norm : conv2 = BatchNormalization(momentum=ba_norm_momentum)(conv2)
    pool = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
    return pool, conv2

# Create the middle layer between the contracting and expanding layers 
Example #6
Source File: standard.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def create_model_3D(self, input_shape, n_labels=2):
        # Input layer
        inputs = Input(input_shape)
        # Start the CNN Model chain with adding the inputs as first tensor
        cnn_chain = inputs
        # Cache contracting normalized conv layers
        # for later copy & concatenate links
        contracting_convs = []

        # Contracting Layers
        for i in range(0, self.depth):
            neurons = self.n_filters * 2**i
            cnn_chain, last_conv = contracting_layer_3D(cnn_chain, neurons,
                                                        self.ba_norm,
                                                        self.ba_norm_momentum)
            contracting_convs.append(last_conv)

        # Middle Layer
        neurons = self.n_filters * 2**self.depth
        cnn_chain = middle_layer_3D(cnn_chain, neurons, self.ba_norm,
                                    self.ba_norm_momentum)

        # Expanding Layers
        for i in reversed(range(0, self.depth)):
            neurons = self.n_filters * 2**i
            cnn_chain = expanding_layer_3D(cnn_chain, neurons,
                                           contracting_convs[i], self.ba_norm,
                                           self.ba_norm_momentum)

        # Output Layer
        conv_out = Conv3D(n_labels, (1, 1, 1),
                   activation=self.activation)(cnn_chain)
        # Create Model with associated input and output layers
        model = Model(inputs=[inputs], outputs=[conv_out])
        # Return model
        return model

#-----------------------------------------------------#
#                   Subroutines 2D                    #
#-----------------------------------------------------#
# Create a contracting layer 
Example #7
Source File: plain.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def conv_layer_3D(input, neurons, ba_norm, strides=1):
    conv = Conv3D(neurons, (3,3,3), activation='relu', padding='same',
                  strides=strides)(input)
    if ba_norm : conv = BatchNormalization(momentum=0.99)(conv)
    return conv 
Example #8
Source File: multiRes.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def conv3d_bn(x, filters, num_row, num_col, num_z, padding='same', strides=(1, 1, 1), activation='relu', name=None):
    '''
    3D Convolutional layers

    Arguments:
        x {keras layer} -- input layer
        filters {int} -- number of filters
        num_row {int} -- number of rows in filters
        num_col {int} -- number of columns in filters
        num_z {int} -- length along z axis in filters
    Keyword Arguments:
        padding {str} -- mode of padding (default: {'same'})
        strides {tuple} -- stride of convolution operation (default: {(1, 1, 1)})
        activation {str} -- activation function (default: {'relu'})
        name {str} -- name of the layer (default: {None})

    Returns:
        [keras layer] -- [output layer]
    '''

    x = Conv3D(filters, (num_row, num_col, num_z), strides=strides, padding=padding, use_bias=False)(x)
    x = BatchNormalization(axis=4, scale=False)(x)

    if(activation==None):
        return x

    x = Activation(activation, name=name)(x)
    return x 
Example #9
Source File: residual.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def middle_layer_3D(input, neurons, ba_norm, ba_norm_momentum):
    conv_m1 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(input)
    if ba_norm : conv_m1 = BatchNormalization(momentum=ba_norm_momentum)(conv_m1)
    conv_m2 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(conv_m1)
    if ba_norm : conv_m2 = BatchNormalization(momentum=ba_norm_momentum)(conv_m2)
    shortcut = Conv3D(neurons, (1, 1, 1), activation='relu', padding="same")(input)
    add_layer = add([shortcut, conv_m2])
    return add_layer

# Create an expanding layer 
Example #10
Source File: residual.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def contracting_layer_3D(input, neurons, ba_norm, ba_norm_momentum):
    conv1 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(input)
    if ba_norm : conv1 = BatchNormalization(momentum=ba_norm_momentum)(conv1)
    conv2 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(conv1)
    if ba_norm : conv2 = BatchNormalization(momentum=ba_norm_momentum)(conv2)
    shortcut = Conv3D(neurons, (1, 1, 1), activation='relu', padding="same")(input)
    add_layer = add([shortcut, conv2])
    pool = MaxPooling3D(pool_size=(2, 2, 2))(add_layer)
    return pool, add_layer

# Create the middle layer between the contracting and expanding layers 
Example #11
Source File: residual.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def create_model_3D(self, input_shape, n_labels=2):
        # Input layer
        inputs = Input(input_shape)
        # Start the CNN Model chain with adding the inputs as first tensor
        cnn_chain = inputs
        # Cache contracting normalized conv layers
        # for later copy & concatenate links
        contracting_convs = []

        # Contracting Layers
        for i in range(0, self.depth):
            neurons = self.n_filters * 2**i
            cnn_chain, last_conv = contracting_layer_3D(cnn_chain, neurons,
                                                        self.ba_norm,
                                                        self.ba_norm_momentum)
            contracting_convs.append(last_conv)

        # Middle Layer
        neurons = self.n_filters * 2**self.depth
        cnn_chain = middle_layer_3D(cnn_chain, neurons, self.ba_norm,
                                    self.ba_norm_momentum)

        # Expanding Layers
        for i in reversed(range(0, self.depth)):
            neurons = self.n_filters * 2**i
            cnn_chain = expanding_layer_3D(cnn_chain, neurons,
                                           contracting_convs[i], self.ba_norm,
                                           self.ba_norm_momentum)

        # Output Layer
        conv_out = Conv3D(n_labels, (1, 1, 1),
                   activation=self.activation)(cnn_chain)
        # Create Model with associated input and output layers
        model = Model(inputs=[inputs], outputs=[conv_out])
        # Return model
        return model

#-----------------------------------------------------#
#                   Subroutines 2D                    #
#-----------------------------------------------------#
# Create a contracting layer 
Example #12
Source File: compact.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def expanding_layer_3D(input, neurons, concatenate_link, ba_norm,
                       ba_norm_momentum):
    up = concatenate([Conv3DTranspose(neurons, (2, 2, 2), strides=(2, 2, 2),
                     padding='same')(input), concatenate_link], axis=4)
    conv1 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(up)
    if ba_norm : conv1 = BatchNormalization(momentum=ba_norm_momentum)(conv1)
    conv2 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(conv1)
    if ba_norm : conv2 = BatchNormalization(momentum=ba_norm_momentum)(conv2)
    conc = concatenate([up, conv2], axis=-1)
    return conc 
Example #13
Source File: compact.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def middle_layer_3D(input, neurons, ba_norm, ba_norm_momentum):
    conv_m1 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(input)
    if ba_norm : conv_m1 = BatchNormalization(momentum=ba_norm_momentum)(conv_m1)
    conv_m2 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(conv_m1)
    if ba_norm : conv_m2 = BatchNormalization(momentum=ba_norm_momentum)(conv_m2)
    conc = concatenate([input, conv_m2], axis=-1)
    return conc

# Create an expanding layer 
Example #14
Source File: compact.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def create_model_3D(self, input_shape, n_labels=2):
        # Input layer
        inputs = Input(input_shape)
        # Start the CNN Model chain with adding the inputs as first tensor
        cnn_chain = inputs
        # Cache contracting normalized conv layers
        # for later copy & concatenate links
        contracting_convs = []

        # Contracting Layers
        for i in range(0, self.depth):
            neurons = self.n_filters * 2**i
            cnn_chain, last_conv = contracting_layer_3D(cnn_chain, neurons,
                                                        self.ba_norm,
                                                        self.ba_norm_momentum)
            contracting_convs.append(last_conv)

        # Middle Layer
        neurons = self.n_filters * 2**self.depth
        cnn_chain = middle_layer_3D(cnn_chain, neurons, self.ba_norm,
                                    self.ba_norm_momentum)

        # Expanding Layers
        for i in reversed(range(0, self.depth)):
            neurons = self.n_filters * 2**i
            cnn_chain = expanding_layer_3D(cnn_chain, neurons,
                                           contracting_convs[i], self.ba_norm,
                                           self.ba_norm_momentum)

        # Output Layer
        conv_out = Conv3D(n_labels, (1, 1, 1),
                   activation=self.activation)(cnn_chain)
        # Create Model with associated input and output layers
        model = Model(inputs=[inputs], outputs=[conv_out])
        # Return model
        return model

#-----------------------------------------------------#
#                   Subroutines 2D                    #
#-----------------------------------------------------#
# Create a contracting layer 
Example #15
Source File: dense.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def expanding_layer_3D(input, neurons, concatenate_link, ba_norm,
                       ba_norm_momentum):
    up = concatenate([Conv3DTranspose(neurons, (2, 2, 2), strides=(2, 2, 2),
                     padding='same')(input), concatenate_link], axis=4)
    conv1 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(up)
    if ba_norm : conv1 = BatchNormalization(momentum=ba_norm_momentum)(conv1)
    conc1 = concatenate([up, conv1], axis=-1)
    conv2 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(conc1)
    if ba_norm : conv2 = BatchNormalization(momentum=ba_norm_momentum)(conv2)
    conc2 = concatenate([up, conv2], axis=-1)
    return conc2 
Example #16
Source File: dense.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def middle_layer_3D(input, neurons, ba_norm, ba_norm_momentum):
    conv_m1 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(input)
    if ba_norm : conv_m1 = BatchNormalization(momentum=ba_norm_momentum)(conv_m1)
    conc1 = concatenate([input, conv_m1], axis=-1)
    conv_m2 = Conv3D(neurons, (3, 3, 3), activation='relu', padding='same')(conc1)
    if ba_norm : conv_m2 = BatchNormalization(momentum=ba_norm_momentum)(conv_m2)
    conc2 = concatenate([input, conv_m2], axis=-1)
    return conc2

# Create an expanding layer 
Example #17
Source File: dense.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def contracting_layer_3D(input, neurons, ba_norm, ba_norm_momentum):
    conv1 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(input)
    if ba_norm : conv1 = BatchNormalization(momentum=ba_norm_momentum)(conv1)
    conc1 = concatenate([input, conv1], axis=-1)
    conv2 = Conv3D(neurons, (3,3,3), activation='relu', padding='same')(conc1)
    if ba_norm : conv2 = BatchNormalization(momentum=ba_norm_momentum)(conv2)
    conc2 = concatenate([input, conv2], axis=-1)
    pool = MaxPooling3D(pool_size=(2, 2, 2))(conc2)
    return pool, conc2

# Create the middle layer between the contracting and expanding layers 
Example #18
Source File: dense.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def create_model_3D(self, input_shape, n_labels=2):
        # Input layer
        inputs = Input(input_shape)
        # Start the CNN Model chain with adding the inputs as first tensor
        cnn_chain = inputs
        # Cache contracting normalized conv layers
        # for later copy & concatenate links
        contracting_convs = []

        # Contracting Layers
        for i in range(0, self.depth):
            neurons = self.n_filters * 2**i
            cnn_chain, last_conv = contracting_layer_3D(cnn_chain, neurons,
                                                        self.ba_norm,
                                                        self.ba_norm_momentum)
            contracting_convs.append(last_conv)

        # Middle Layer
        neurons = self.n_filters * 2**self.depth
        cnn_chain = middle_layer_3D(cnn_chain, neurons, self.ba_norm,
                                    self.ba_norm_momentum)

        # Expanding Layers
        for i in reversed(range(0, self.depth)):
            neurons = self.n_filters * 2**i
            cnn_chain = expanding_layer_3D(cnn_chain, neurons,
                                           contracting_convs[i], self.ba_norm,
                                           self.ba_norm_momentum)

        # Output Layer
        conv_out = Conv3D(n_labels, (1, 1, 1),
                   activation=self.activation)(cnn_chain)
        # Create Model with associated input and output layers
        model = Model(inputs=[inputs], outputs=[conv_out])
        # Return model
        return model

#-----------------------------------------------------#
#                   Subroutines 2D                    #
#-----------------------------------------------------#
# Create a contracting layer 
Example #19
Source File: bayesian_vnet.py    From bcnn with MIT License 5 votes vote down vote up
def down_stage(inputs, filters, kernel_size=3,
             activation="relu", padding="SAME"):
    conv = Conv3D(filters, kernel_size,
                  activation=activation, padding=padding)(inputs)
    conv = GroupNormalization()(conv)
    conv = Conv3D(filters, kernel_size,
                  activation=activation, padding=padding)(conv)
    conv = GroupNormalization()(conv)
    pool = MaxPooling3D()(conv)
    return conv, pool 
Example #20
Source File: dropout_vnet.py    From bcnn with MIT License 5 votes vote down vote up
def end_stage(inputs, kernel_size=3, activation="relu", padding="SAME"):
    conv = Conv3D(1, kernel_size, activation=activation, padding="SAME")(inputs)
    conv = Conv3D(1, 1, activation="sigmoid")(conv)

    return conv 
Example #21
Source File: plain.py    From MIScnn with GNU General Public License v3.0 4 votes vote down vote up
def create_model_3D(self, input_shape, n_labels=2):
        # Input layer
        inputs = Input(input_shape)
        # Start the CNN Model chain with adding the inputs as first tensor
        cnn_chain = inputs
        # Cache contracting normalized conv layers
        # for later copy & concatenate links
        contracting_convs = []

        # First contracting layer
        neurons = self.feature_map[0]
        cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)
        cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)
        contracting_convs.append(cnn_chain)
        cnn_chain = MaxPooling3D(pool_size=(1, 2, 2))(cnn_chain)

        # Remaining contracting layers
        for i in range(1, len(self.feature_map)):
            neurons = self.feature_map[i]
            cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)
            cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)
            contracting_convs.append(cnn_chain)
            cnn_chain = MaxPooling3D(pool_size=(2, 2, 2))(cnn_chain)

        # Middle Layer
        neurons = self.feature_map[-1]
        cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)
        cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)

        # Expanding Layers except last layer
        for i in reversed(range(1, len(self.feature_map))):
            neurons = self.feature_map[i]
            cnn_chain = Conv3DTranspose(neurons, (2, 2, 2), strides=(2, 2, 2),
                                        padding='same')(cnn_chain)
            cnn_chain = concatenate([cnn_chain, contracting_convs[i]], axis=-1)
            cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)
            cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)

        # Last expanding layer
        neurons = self.feature_map[0]
        cnn_chain = Conv3DTranspose(neurons, (1, 2, 2), strides=(1, 2, 2),
                                    padding='same')(cnn_chain)
        cnn_chain = concatenate([cnn_chain, contracting_convs[0]], axis=-1)
        cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)
        cnn_chain = conv_layer_3D(cnn_chain, neurons, self.ba_norm, strides=1)

        # Output Layer
        conv_out = Conv3D(n_labels, (1, 1, 1), activation=self.activation)(cnn_chain)
        # Create Model with associated input and output layers
        model = Model(inputs=[inputs], outputs=[conv_out])
        # Return model
        return model

#-----------------------------------------------------#
#                   Subroutines 2D                    #
#-----------------------------------------------------#
# Convolution layer 
Example #22
Source File: highresnet.py    From nobrainer with Apache License 2.0 4 votes vote down vote up
def highresnet(
    n_classes, input_shape, activation="relu", dropout_rate=0, name="highresnet"
):
    """Instantiate HighResNet model."""

    conv_kwds = {"kernel_size": (3, 3, 3), "padding": "same"}

    n_base_filters = 16

    inputs = layers.Input(shape=input_shape)
    x = layers.Conv3D(n_base_filters, **conv_kwds)(inputs)

    for ii in range(3):
        skip = x
        x = layers.BatchNormalization()(x)
        x = layers.Activation(activation)(x)
        x = layers.Conv3D(n_base_filters, **conv_kwds)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Activation(activation)(x)
        x = layers.Conv3D(n_base_filters, **conv_kwds)(x)
        x = layers.Add()([x, skip])

    x = ZeroPadding3DChannels(8)(x)
    for ii in range(3):
        skip = x
        x = layers.BatchNormalization()(x)
        x = layers.Activation(activation)(x)
        x = layers.Conv3D(n_base_filters * 2, dilation_rate=2, **conv_kwds)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Activation(activation)(x)
        x = layers.Conv3D(n_base_filters * 2, dilation_rate=2, **conv_kwds)(x)
        x = layers.Add()([x, skip])

    x = ZeroPadding3DChannels(16)(x)
    for ii in range(3):
        skip = x
        x = layers.BatchNormalization()(x)
        x = layers.Activation(activation)(x)
        x = layers.Conv3D(n_base_filters * 4, dilation_rate=4, **conv_kwds)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Activation(activation)(x)
        x = layers.Conv3D(n_base_filters * 4, dilation_rate=4, **conv_kwds)(x)
        x = layers.Add()([x, skip])

    x = layers.Conv3D(filters=n_classes, kernel_size=(1, 1, 1), padding="same")(x)

    final_activation = "sigmoid" if n_classes == 1 else "softmax"
    x = layers.Activation(final_activation)(x)

    # QUESTION: where should dropout go?

    return tf.keras.Model(inputs=inputs, outputs=x, name=name) 
Example #23
Source File: networks.py    From brainstorm with MIT License 4 votes vote down vote up
def unet3D(x_in,
           img_shape, out_im_chans,
           nf_enc=[64, 64, 128, 128, 256, 256, 512],
           nf_dec=None,
           layer_prefix='unet',
           n_convs_per_stage=1,
        ):
    ks = 3
    x = x_in

    encodings = []
    encoding_vol_sizes = []
    for i in range(len(nf_enc)):
        for j in range(n_convs_per_stage):
            x = Conv3D(
                nf_enc[i],
                kernel_size=ks,
                strides=(1, 1, 1), padding='same',
                name='{}_enc_conv3D_{}_{}'.format(layer_prefix, i, j + 1))(x)
            x = LeakyReLU(0.2)(x)

        encodings.append(x)
        encoding_vol_sizes.append(np.asarray(x.get_shape().as_list()[1:-1]))

        if i < len(nf_enc) - 1:
            x = MaxPooling3D(pool_size=(2, 2, 2), padding='same', name='{}_enc_maxpool_{}'.format(layer_prefix, i))(x)

    if nf_dec is None:
        nf_dec = list(reversed(nf_enc[1:]))

    for i in range(len(nf_dec)):
        curr_shape = x.get_shape().as_list()[1:-1]

        # only do upsample if we are not yet at max resolution
        if np.any(curr_shape < list(img_shape[:len(curr_shape)])):
            us = (2, 2, 2)
            x = UpSampling3D(size=us, name='{}_dec_upsamp_{}'.format(layer_prefix, i))(x)

        # just concatenate the final layer here
        if i <= len(encodings) - 2:
            x = _pad_or_crop_to_shape_3D(x, np.asarray(x.get_shape().as_list()[1:-1]), encoding_vol_sizes[-i-2])
            x = Concatenate(axis=-1)([x, encodings[-i-2]])

        for j in range(n_convs_per_stage):
            x = Conv3D(nf_dec[i],
                       kernel_size=ks, strides=(1, 1, 1), padding='same',
                       name='{}_dec_conv3D_{}_{}'.format(layer_prefix, i, j))(x)
            x = LeakyReLU(0.2)(x)


    y = Conv3D(out_im_chans, kernel_size=1, padding='same',
               name='{}_dec_conv3D_final'.format(layer_prefix))(x)  # add your own activation after this model

    # add your own activation after this model
    return y