Python keras.layers.ReLU() Examples
The following are 25
code examples of keras.layers.ReLU().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.

Example #1
Source Project: MMdnn Author: microsoft File: keras2_emitter.py License: MIT License | 6 votes |
def emit_Relu6(self, IR_node, in_scope=False): try: # Keras == 2.1.6 from keras.applications.mobilenet import relu6 str_relu6 = 'keras.applications.mobilenet.relu6' code = "{:<15} = layers.Activation({}, name = '{}')({})".format( IR_node.variable_name, str_relu6, IR_node.name, self.IR_graph.get_node(IR_node.in_edges[0]).real_variable_name) return code except: # Keras == 2.2.2 from keras.layers import ReLU code = "{:<15} = layers.ReLU(6, name = '{}')({})".format( IR_node.variable_name, IR_node.name, self.IR_graph.get_node(IR_node.in_edges[0]).real_variable_name) return code
Example #2
Source Project: Keras-TextClassification Author: yongzhuo File: graph.py License: MIT License | 6 votes |
def shortcut_pool(inputs, output, filters=256, pool_type='max', shortcut=True): """ ResNet(shortcut连接|skip连接|residual连接), 这里是用shortcut连接. 恒等映射, block+f(block) 再加上 downsampling实现 参考: https://github.com/zonetrooper32/VDCNN/blob/keras_version/vdcnn.py :param inputs: tensor :param output: tensor :param filters: int :param pool_type: str, 'max'、'k-max' or 'conv' or other :param shortcut: boolean :return: tensor """ if shortcut: conv_2 = Conv1D(filters=filters, kernel_size=1, strides=2, padding='SAME')(inputs) conv_2 = BatchNormalization()(conv_2) output = downsampling(output, pool_type=pool_type) out = Add()([output, conv_2]) else: out = ReLU(inputs) out = downsampling(out, pool_type=pool_type) if pool_type is not None: # filters翻倍 out = Conv1D(filters=filters*2, kernel_size=1, strides=1, padding='SAME')(out) out = BatchNormalization()(out) return out
Example #3
Source Project: keras-octconv Author: titu1994 File: octave_conv_block.py License: MIT License | 6 votes |
def initial_oct_conv_bn_relu(ip, filters, kernel_size=(3, 3), strides=(1, 1), alpha=0.5, padding='same', dilation=None, bias=False, activation=True): channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x_high, x_low = initial_octconv(ip, filters, kernel_size, strides, alpha, padding, dilation, bias) relu = ReLU() x_high = BatchNormalization(axis=channel_axis)(x_high) if activation: x_high = relu(x_high) x_low = BatchNormalization(axis=channel_axis)(x_low) if activation: x_low = relu(x_low) return x_high, x_low
Example #4
Source Project: keras-octconv Author: titu1994 File: octave_conv_block.py License: MIT License | 6 votes |
def oct_conv_bn_relu(ip_high, ip_low, filters, kernel_size=(3, 3), strides=(1, 1), alpha=0.5, padding='same', dilation=None, bias=False, activation=True): channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x_high, x_low = octconv_block(ip_high, ip_low, filters, kernel_size, strides, alpha, padding, dilation, bias) relu = ReLU() x_high = BatchNormalization(axis=channel_axis)(x_high) if activation: x_high = relu(x_high) x_low = BatchNormalization(axis=channel_axis)(x_low) if activation: x_low = relu(x_low) return x_high, x_low
Example #5
Source Project: keras-octconv Author: titu1994 File: octave_resnet.py License: MIT License | 6 votes |
def _bottleneck_original(ip, filters, strides=(1, 1), downsample_shortcut=False, expansion=4): final_filters = int(filters * expansion) shortcut = ip x = _conv_bn_relu(ip, filters, kernel_size=(1, 1)) x = _conv_bn_relu(x, filters, kernel_size=(3, 3), strides=strides) x = _conv_bn_relu(x, final_filters, kernel_size=(1, 1), activation=False) if downsample_shortcut: shortcut = _conv_block(shortcut, final_filters, kernel_size=(1, 1), strides=strides) x = add([x, shortcut]) x = ReLU()(x) return x
Example #6
Source Project: Generative-Adversarial-Networks-Projects Author: PacktPublishing File: run.py License: MIT License | 6 votes |
def build_generator(): gen_model = Sequential() gen_model.add(Dense(input_dim=100, output_dim=2048)) gen_model.add(ReLU()) gen_model.add(Dense(256 * 8 * 8)) gen_model.add(BatchNormalization()) gen_model.add(ReLU()) gen_model.add(Reshape((8, 8, 256), input_shape=(256 * 8 * 8,))) gen_model.add(UpSampling2D(size=(2, 2))) gen_model.add(Conv2D(128, (5, 5), padding='same')) gen_model.add(ReLU()) gen_model.add(UpSampling2D(size=(2, 2))) gen_model.add(Conv2D(64, (5, 5), padding='same')) gen_model.add(ReLU()) gen_model.add(UpSampling2D(size=(2, 2))) gen_model.add(Conv2D(3, (5, 5), padding='same')) gen_model.add(Activation('tanh')) return gen_model
Example #7
Source Project: imgclsmob Author: osmr File: common.py License: MIT License | 5 votes |
def call(self, x): return nn.ReLU(max_value=6.0)(x)
Example #8
Source Project: imgclsmob Author: osmr File: common.py License: MIT License | 5 votes |
def call(self, x): return nn.ReLU(max_value=6.0)(x + 3.0) / 6.0
Example #9
Source Project: imgclsmob Author: osmr File: common.py License: MIT License | 5 votes |
def call(self, x): return x * nn.ReLU(max_value=6.0)(x + 3.0) / 6.0
Example #10
Source Project: imgclsmob Author: osmr File: common.py License: MIT License | 5 votes |
def get_activation_layer(x, activation, name="activ"): """ Create activation layer from string/function. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. activation : function or str Activation function or name of activation function. name : str, default 'activ' Block name. Returns ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ assert (activation is not None) if isfunction(activation): x = activation()(x) elif isinstance(activation, str): if activation == "relu": x = nn.Activation("relu", name=name)(x) elif activation == "relu6": x = nn.ReLU(max_value=6.0, name=name)(x) elif activation == "swish": x = swish(x=x, name=name) elif activation == "hswish": x = HSwish(name=name)(x) else: raise NotImplementedError() else: x = activation(x) return x
Example #11
Source Project: StarGAN-Keras Author: hoangthang1607 File: StarGAN.py License: MIT License | 5 votes |
def ResidualBlock(self, inp, dim_out): """Residual Block with instance normalization.""" x = ZeroPadding2D(padding = 1)(inp) x = Conv2D(filters = dim_out, kernel_size=3, strides=1, padding='valid', use_bias = False)(x) x = InstanceNormalization(axis = -1)(x) x = ReLU()(x) x = ZeroPadding2D(padding = 1)(x) x = Conv2D(filters = dim_out, kernel_size=3, strides=1, padding='valid', use_bias = False)(x) x = InstanceNormalization(axis = -1)(x) return Add()([inp, x])
Example #12
Source Project: MMdnn Author: microsoft File: keras2_parser.py License: MIT License | 5 votes |
def __init__(self, model): super(Keras2Parser, self).__init__() # load model files into Keras graph if isinstance(model, _string_types): try: # Keras 2.1.6 from keras.applications.mobilenet import relu6 from keras.applications.mobilenet import DepthwiseConv2D model = _keras.models.load_model( model, custom_objects={ 'relu6': _keras.applications.mobilenet.relu6, 'DepthwiseConv2D': _keras.applications.mobilenet.DepthwiseConv2D } ) except: # Keras. 2.2.2 import keras.layers as layers model = _keras.models.load_model( model, custom_objects={ 'relu6': layers.ReLU(6, name='relu6'), 'DepthwiseConv2D': layers.DepthwiseConv2D } ) self.weight_loaded = True elif isinstance(model, tuple): model = self._load_model(model[0], model[1]) else: assert False # _keras.utils.plot_model(model, "model.png", show_shapes = True) # Build network graph self.data_format = _keras.backend.image_data_format() self.keras_graph = Keras2Graph(model) self.keras_graph.build() self.lambda_layer_count = 0
Example #13
Source Project: Keras-TextClassification Author: yongzhuo File: graph.py License: MIT License | 5 votes |
def create_model(self, hyper_parameters): """ 构建神经网络 :param hyper_parameters:json, hyper parameters of network :return: tensor, moedl """ super().create_model(hyper_parameters) embedding_output = self.word_embedding.output embedding_output_spatial = SpatialDropout1D(self.dropout_spatial)(embedding_output) # 首先是 region embedding 层 conv_1 = Conv1D(self.filters[0][0], kernel_size=1, strides=1, padding='SAME', kernel_regularizer=l2(self.l2), bias_regularizer=l2(self.l2), activation=self.activation_conv, )(embedding_output_spatial) block = ReLU()(conv_1) for filters_block in self.filters: for j in range(filters_block[1]-1): # conv + short-cut block_mid = self.convolutional_block(block, units=filters_block[0]) block = shortcut_conv(block, block_mid, shortcut=True) # 这里是conv + max-pooling block_mid = self.convolutional_block(block, units=filters_block[0]) block = shortcut_pool(block, block_mid, filters=filters_block[0], pool_type=self.pool_type, shortcut=True) block = k_max_pooling(top_k=self.top_k)(block) block = Flatten()(block) block = Dropout(self.dropout)(block) # 全连接层 # block_fully = Dense(2048, activation='tanh')(block) # output = Dense(2048, activation='tanh')(block_fully) output = Dense(self.label, activation=self.activate_classify)(block) self.model = Model(inputs=self.word_embedding.input, outputs=output) self.model.summary(120)
Example #14
Source Project: Keras-TextClassification Author: yongzhuo File: graph.py License: MIT License | 5 votes |
def convolutional_block(self, inputs, units=256): """ Each convolutional block (see Figure 2) is a sequence of two convolutional layers, each one followed by a temporal BatchNorm (Ioffe and Szegedy, 2015) layer and an ReLU activation. The kernel size of all the temporal convolutions is 3, with padding such that the temporal resolution is preserved (or halved in the case of the convolutional pooling with stride 2, see below). :param inputs: tensor, input :param units: int, units :return: tensor, result of convolutional block """ x = Conv1D(units, kernel_size=3, padding='SAME', strides=1, kernel_regularizer=l2(self.l2), bias_regularizer=l2(self.l2), activation=self.activation_conv, )(inputs) x = BatchNormalization()(x) x = ReLU()(x) x = Conv1D(units, kernel_size=3, strides=1, padding='SAME', kernel_regularizer=l2(self.l2), bias_regularizer=l2(self.l2), activation=self.activation_conv, )(x) x = BatchNormalization()(x) x = ReLU()(x) return x
Example #15
Source Project: keras-octconv Author: titu1994 File: octave_conv_block.py License: MIT License | 5 votes |
def final_oct_conv_bn_relu(ip_high, ip_low, filters, kernel_size=(3, 3), strides=(1, 1), padding='same', dilation=None, bias=False, activation=True): channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = final_octconv(ip_high, ip_low, filters, kernel_size, strides, padding, dilation, bias) x = BatchNormalization(axis=channel_axis)(x) if activation: x = ReLU()(x) return x
Example #16
Source Project: keras-octconv Author: titu1994 File: octave_resnet.py License: MIT License | 5 votes |
def _conv_bn_relu(ip, filters, kernel_size=(3, 3), strides=(1, 1), padding='same', bias=False, activation=True): channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = _conv_block(ip, filters, kernel_size, strides, padding, bias) x = BatchNormalization(axis=channel_axis)(x) if activation: x = ReLU()(x) return x
Example #17
Source Project: keras-octconv Author: titu1994 File: octave_resnet.py License: MIT License | 5 votes |
def _octresnet_bottleneck_block(ip, filters, alpha=0.5, strides=(1, 1), downsample_shortcut=False, first_block=False, expansion=4): if first_block: x_high_res, x_low_res = initial_oct_conv_bn_relu(ip, filters, kernel_size=(1, 1), alpha=alpha) x_high, x_low = oct_conv_bn_relu(x_high_res, x_low_res, filters, kernel_size=(3, 3), strides=strides, alpha=alpha) else: x_high_res, x_low_res = ip x_high, x_low = oct_conv_bn_relu(x_high_res, x_low_res, filters, kernel_size=(1, 1), alpha=alpha) x_high, x_low = oct_conv_bn_relu(x_high, x_low, filters, kernel_size=(3, 3), strides=strides, alpha=alpha) final_out_filters = int(filters * expansion) x_high, x_low = oct_conv_bn_relu(x_high, x_low, filters=final_out_filters, kernel_size=(1, 1), alpha=alpha, activation=False) if downsample_shortcut: x_high_res, x_low_res = oct_conv_bn_relu(x_high_res, x_low_res, final_out_filters, kernel_size=(1, 1), strides=strides, alpha=alpha, activation=False) x_high = add([x_high, x_high_res]) x_low = add([x_low, x_low_res]) x_high = ReLU()(x_high) x_low = ReLU()(x_low) return x_high, x_low
Example #18
Source Project: imgclsmob Author: osmr File: common.py License: MIT License | 4 votes |
def pre_conv_block(x, in_channels, out_channels, kernel_size, strides, padding, return_preact=False, name="pre_conv_block"): """ Convolution block with Batch normalization and ReLU pre-activation. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. in_channels : int Number of input channels. out_channels : int Number of output channels. kernel_size : int or tuple/list of 2 int Convolution window size. strides : int or tuple/list of 2 int Strides of the convolution. padding : int or tuple/list of 2 int Padding value for convolution layer. return_preact : bool, default False Whether return pre-activation. It's used by PreResNet. name : str, default 'pre_conv_block' Block name. Returns ------- tuple of two keras.backend tensor/variable/symbol Resulted tensor and preactivated input tensor. """ x = batchnorm( x=x, name=name + "/bn") x = nn.Activation("relu", name=name + "/activ")(x) if return_preact: x_pre_activ = x x = conv2d( x=x, in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, use_bias=False, name=name + "/conv") if return_preact: return x, x_pre_activ else: return x
Example #19
Source Project: StarGAN-Keras Author: hoangthang1607 File: StarGAN.py License: MIT License | 4 votes |
def build_generator(self): """Generator network.""" # Input tensors inp_c = Input(shape = (self.c_dim, )) inp_img = Input(shape = (self.image_size, self.image_size, 3)) # Replicate spatially and concatenate domain information c = Lambda(lambda x: K.repeat(x, self.image_size**2))(inp_c) c = Reshape((self.image_size, self.image_size, self.c_dim))(c) x = Concatenate()([inp_img, c]) # First Conv2D x = Conv2D(filters = self.g_conv_dim, kernel_size = 7, strides = 1, padding = 'same', use_bias = False)(x) x = InstanceNormalization(axis = -1)(x) x = ReLU()(x) # Down-sampling layers curr_dim = self.g_conv_dim for i in range(2): x = ZeroPadding2D(padding = 1)(x) x = Conv2D(filters = curr_dim*2, kernel_size = 4, strides = 2, padding = 'valid', use_bias = False)(x) x = InstanceNormalization(axis = -1)(x) x = ReLU()(x) curr_dim = curr_dim * 2 # Bottleneck layers. for i in range(self.g_repeat_num): x = self.ResidualBlock(x, curr_dim) # Up-sampling layers for i in range(2): x = UpSampling2D(size = 2)(x) x = Conv2D(filters = curr_dim // 2, kernel_size = 4, strides = 1, padding = 'same', use_bias = False)(x) x = InstanceNormalization(axis = -1)(x) x = ReLU()(x) curr_dim = curr_dim // 2 # Last Conv2D x = ZeroPadding2D(padding = 3)(x) out = Conv2D(filters = 3, kernel_size = 7, strides = 1, padding = 'valid', activation = 'tanh', use_bias = False)(x) return Model(inputs = [inp_img, inp_c], outputs = out)
Example #20
Source Project: MMdnn Author: microsoft File: keras2_parser.py License: MIT License | 4 votes |
def _load_model(self, model_network_path, model_weight_path): """Load a keras model from disk Parameters ---------- model_network_path: str Path where the model network path is (json file) model_weight_path: str Path where the model network weights are (hd5 file) Returns ------- model: A keras model """ from keras.models import model_from_json # Load the model network json_file = open(model_network_path, 'r') loaded_model_json = json_file.read() json_file.close() # Load the model weights try: from keras.applications.mobilenet import relu6 from keras.applications.mobilenet import DepthwiseConv2D loaded_model = model_from_json(loaded_model_json, custom_objects={ 'relu6': _keras.applications.mobilenet.relu6, 'DepthwiseConv2D': _keras.applications.mobilenet.DepthwiseConv2D}) except: import keras.layers as layers loaded_model = model_from_json(loaded_model_json, custom_objects={ 'relu6': layers.ReLU(6, name='relu6'), 'DepthwiseConv2D': layers.DepthwiseConv2D}) if model_weight_path: if os.path.isfile(model_weight_path): loaded_model.load_weights(model_weight_path) self.weight_loaded = True print("Network file [{}] and [{}] is loaded successfully.".format(model_network_path, model_weight_path)) else: print("Warning: Weights File [%s] is not found." % (model_weight_path)) return loaded_model
Example #21
Source Project: coremltools Author: apple File: test_keras2_numeric.py License: BSD 3-Clause "New" or "Revised" License | 4 votes |
def test_tiny_mobilenet_arch(self, model_precision=_MLMODEL_FULL_PRECISION): def ReLU6(x, name): if keras.__version__ >= _StrictVersion("2.2.1"): return ReLU(6.0, name=name)(x) else: return Activation(relu6, name=name)(x) img_input = Input(shape=(32, 32, 3)) x = Conv2D( 4, (3, 3), padding="same", use_bias=False, strides=(2, 2), name="conv1" )(img_input) x = BatchNormalization(axis=-1, name="conv1_bn")(x) x = ReLU6(x, name="conv1_relu") x = DepthwiseConv2D( (3, 3), padding="same", depth_multiplier=1, strides=(1, 1), use_bias=False, name="conv_dw_1", )(x) x = BatchNormalization(axis=-1, name="conv_dw_1_bn")(x) x = ReLU6(x, name="conv_dw_1_relu") x = Conv2D( 8, (1, 1), padding="same", use_bias=False, strides=(1, 1), name="conv_pw_1" )(x) x = BatchNormalization(axis=-1, name="conv_pw_1_bn")(x) x = ReLU6(x, name="conv_pw_1_relu") x = DepthwiseConv2D( (3, 3), padding="same", depth_multiplier=1, strides=(2, 2), use_bias=False, name="conv_dw_2", )(x) x = BatchNormalization(axis=-1, name="conv_dw_2_bn")(x) x = ReLU6(x, name="conv_dw_2_relu") x = Conv2D( 8, (1, 1), padding="same", use_bias=False, strides=(2, 2), name="conv_pw_2" )(x) x = BatchNormalization(axis=-1, name="conv_pw_2_bn")(x) x = ReLU6(x, name="conv_pw_2_relu") model = Model(inputs=[img_input], outputs=[x]) self._test_model(model, delta=1e-2, model_precision=model_precision)
Example #22
Source Project: medical_image_segmentation Author: CVxTz File: baseline_aug.py License: MIT License | 4 votes |
def get_unet(do=0, activation=ReLU): inputs = Input((None, None, 3)) conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(inputs))) conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv1))) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(pool1))) conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv2))) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(pool2))) conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv3))) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(pool3))) conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv4))) pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(pool4))) conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(conv5))) up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3) conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(up6))) conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv6))) up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3) conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(up7))) conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv7))) up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3) conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(up8))) conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv8))) up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3) conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(up9))) conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv9))) conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9) model = Model(inputs=[inputs], outputs=[conv10]) model.compile(optimizer=Adam(lr=1e-3), loss=losses.binary_crossentropy, metrics=['accuracy']) return model
Example #23
Source Project: medical_image_segmentation Author: CVxTz File: baseline.py License: MIT License | 4 votes |
def get_unet(do=0, activation=ReLU): inputs = Input((None, None, 3)) conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(inputs))) conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv1))) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(pool1))) conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv2))) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(pool2))) conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv3))) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(pool3))) conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv4))) pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(pool4))) conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(conv5))) up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3) conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(up6))) conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv6))) up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3) conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(up7))) conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv7))) up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3) conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(up8))) conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv8))) up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3) conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(up9))) conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv9))) conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9) model = Model(inputs=[inputs], outputs=[conv10]) model.compile(optimizer=Adam(lr=1e-3), loss=losses.binary_crossentropy, metrics=['accuracy']) return model
Example #24
Source Project: medical_image_segmentation Author: CVxTz File: focal_aug.py License: MIT License | 4 votes |
def get_unet(do=0, activation=ReLU): inputs = Input((None, None, 3)) conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(inputs))) conv1 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv1))) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(pool1))) conv2 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv2))) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(pool2))) conv3 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv3))) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(pool3))) conv4 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv4))) pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(pool4))) conv5 = Dropout(do)(activation()(Conv2D(512, (3, 3), padding='same')(conv5))) up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3) conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(up6))) conv6 = Dropout(do)(activation()(Conv2D(256, (3, 3), padding='same')(conv6))) up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3) conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(up7))) conv7 = Dropout(do)(activation()(Conv2D(128, (3, 3), padding='same')(conv7))) up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3) conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(up8))) conv8 = Dropout(do)(activation()(Conv2D(64, (3, 3), padding='same')(conv8))) up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3) conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(up9))) conv9 = Dropout(do)(activation()(Conv2D(32, (3, 3), padding='same')(conv9))) conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9) model = Model(inputs=[inputs], outputs=[conv10]) model.compile(optimizer=Adam(lr=1e-5), loss=focal_loss(gamma=2., alpha=.25), metrics=['accuracy']) return model
Example #25
Source Project: IterNet Author: conscienceli File: train.py License: MIT License | 4 votes |
def train(iteration=3, DATASET='DRIVE', crop_size=128, need_au=True, ACTIVATION='ReLU', dropout=0.1, batch_size=32, repeat=4, minimum_kernel=32, epochs=200): model_name = f"Final_Emer_Iteration_{iteration}_cropsize_{crop_size}_epochs_{epochs}" print("Model : %s" % model_name) prepare_dataset.prepareDataset(DATASET) activation = globals()[ACTIVATION] model = define_model.get_unet(minimum_kernel=minimum_kernel, do=dropout, activation=activation, iteration=iteration) try: os.makedirs(f"trained_model/{DATASET}/", exist_ok=True) os.makedirs(f"logs/{DATASET}/", exist_ok=True) except: pass load_path = f"trained_model/{DATASET}/{model_name}_weights.best.hdf5" try: model.load_weights(load_path, by_name=True) except: pass now = datetime.now() # current date and time date_time = now.strftime("%Y-%m-%d---%H-%M-%S") tensorboard = TensorBoard( log_dir=f"logs/{DATASET}/Final_Emer-Iteration_{iteration}-Cropsize_{crop_size}-Epochs_{epochs}---{date_time}", histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None, update_freq='epoch') save_path = f"trained_model/{DATASET}/{model_name}.hdf5" checkpoint = ModelCheckpoint(save_path, monitor='final_out_loss', verbose=1, save_best_only=True, mode='min') data_generator = define_model.Generator(batch_size, repeat, DATASET) history = model.fit_generator(data_generator.gen(au=need_au, crop_size=crop_size, iteration=iteration), epochs=epochs, verbose=1, steps_per_epoch=100 * data_generator.n // batch_size, use_multiprocessing=True, workers=8, callbacks=[tensorboard, checkpoint])