Python keras.layers.Add() Examples
The following are 30
code examples of keras.layers.Add().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.
Example #1
Source File: wrn_renorm.py From BatchRenormalization with MIT License | 6 votes |
def expand_conv(init, base, k, strides=(1, 1)): x = Convolution2D(base * k, (3, 3), padding='same', strides=strides, kernel_initializer='he_normal', use_bias=False)(init) channel_axis = 1 if K.image_data_format() == "channels_first" else -1 x = BatchRenormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_init='uniform')(x) x = Activation('relu')(x) x = Convolution2D(base * k, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False)(x) skip = Convolution2D(base * k, (1, 1), padding='same', strides=strides, kernel_initializer='he_normal', use_bias=False)(init) m = Add()([x, skip]) return m
Example #2
Source File: FFNN.py From dts with MIT License | 6 votes |
def _residual_block(self, units, inputs): out = Dense(units=units, kernel_regularizer=self.kernel_regularizer, activation=self.activation, kernel_initializer=self.kernel_initializer, kernel_constraint=self.kernel_constraint, use_bias=self.use_bias, bias_regularizer=self.bias_regularizer, bias_initializer=self.bias_initializer, bias_constraint=self.bias_constraint)(inputs) out = Dropout(self.dropout)(out) out = Dense(units=units, kernel_regularizer=self.kernel_regularizer, activation=self.activation, kernel_initializer=self.kernel_initializer, kernel_constraint=self.kernel_constraint, use_bias=self.use_bias, bias_regularizer=self.bias_regularizer, bias_initializer=self.bias_initializer, bias_constraint=self.bias_constraint)(out) out = BatchNormalization(trainable=True)(out) if K.int_shape(inputs)[-1] != K.int_shape(out)[-1]: inputs = Dense(units=units, kernel_regularizer=self.kernel_regularizer, activation=self.activation, kernel_initializer=self.kernel_initializer, kernel_constraint=self.kernel_constraint, use_bias=self.use_bias, bias_regularizer=self.bias_regularizer, bias_initializer=self.bias_initializer, bias_constraint=self.bias_constraint)(inputs) out = Add()([inputs, out]) return out
Example #3
Source File: graph.py From Keras-TextClassification with MIT License | 6 votes |
def shortcut_pool(inputs, output, filters=256, pool_type='max', shortcut=True): """ ResNet(shortcut连接|skip连接|residual连接), 这里是用shortcut连接. 恒等映射, block+f(block) 再加上 downsampling实现 参考: https://github.com/zonetrooper32/VDCNN/blob/keras_version/vdcnn.py :param inputs: tensor :param output: tensor :param filters: int :param pool_type: str, 'max'、'k-max' or 'conv' or other :param shortcut: boolean :return: tensor """ if shortcut: conv_2 = Conv1D(filters=filters, kernel_size=1, strides=2, padding='SAME')(inputs) conv_2 = BatchNormalization()(conv_2) output = downsampling(output, pool_type=pool_type) out = Add()([output, conv_2]) else: out = ReLU(inputs) out = downsampling(out, pool_type=pool_type) if pool_type is not None: # filters翻倍 out = Conv1D(filters=filters*2, kernel_size=1, strides=1, padding='SAME')(out) out = BatchNormalization()(out) return out
Example #4
Source File: model.py From attention-is-all-you-need-keras with MIT License | 6 votes |
def __call__(self, x, encoder_output, return_attention=False): x_embedded = self._embedding(x) pos_encoding = self._position_encoding(x) pos_encoding_embedded = self._position_embedding(pos_encoding) x = Add()([x_embedded, pos_encoding_embedded]) self_atts = [] enc_atts = [] for layer in self._layers: x, self_att, enc_att = layer(x, encoder_output) if return_attention: self_atts.append(self_att) enc_atts.append(enc_att) if return_attention: return [x, self_atts, enc_atts] else: return x
Example #5
Source File: transformer.py From keras-transformer with MIT License | 6 votes |
def __init__(self, name: str, num_heads: int, residual_dropout: float = 0, attention_dropout: float = 0, activation: Optional[Union[str, Callable]] = 'gelu', compression_window_size: int = None, use_masking: bool = True, vanilla_wiring=False): self.attention_layer = MultiHeadSelfAttention( num_heads, use_masking=use_masking, dropout=attention_dropout, compression_window_size=compression_window_size, name=f'{name}_self_attention') self.norm1_layer = LayerNormalization(name=f'{name}_normalization1') self.dropout_layer = ( Dropout(residual_dropout, name=f'{name}_dropout') if residual_dropout > 0 else lambda x: x) self.norm2_layer = LayerNormalization(name=f'{name}_normalization2') self.transition_layer = TransformerTransition( name=f'{name}_transition', activation=activation) self.addition_layer = Add(name=f'{name}_add') self.vanilla_wiring = vanilla_wiring
Example #6
Source File: wide_residual_network.py From semantic-embeddings with MIT License | 6 votes |
def conv_block(input, base, k=1, dropout=0.0): init = input channel_axis = 1 if K.image_data_format() == "channels_first" else -1 x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(input) x = Activation('relu')(x) x = Convolution2D(base * k, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False)(x) if dropout > 0.0: x = Dropout(dropout)(x) x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(x) x = Activation('relu')(x) x = Convolution2D(base * k, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False)(x) m = Add()([init, x]) return m
Example #7
Source File: hourglass.py From keras-centernet with MIT License | 6 votes |
def residual(_x, out_dim, name, stride=1): shortcut = _x num_channels = K.int_shape(shortcut)[-1] _x = ZeroPadding2D(padding=1, name=name + '.pad1')(_x) _x = Conv2D(out_dim, 3, strides=stride, use_bias=False, name=name + '.conv1')(_x) _x = BatchNormalization(epsilon=1e-5, name=name + '.bn1')(_x) _x = Activation('relu', name=name + '.relu1')(_x) _x = Conv2D(out_dim, 3, padding='same', use_bias=False, name=name + '.conv2')(_x) _x = BatchNormalization(epsilon=1e-5, name=name + '.bn2')(_x) if num_channels != out_dim or stride != 1: shortcut = Conv2D(out_dim, 1, strides=stride, use_bias=False, name=name + '.shortcut.0')( shortcut) shortcut = BatchNormalization(epsilon=1e-5, name=name + '.shortcut.1')(shortcut) _x = Add(name=name + '.add')([_x, shortcut]) _x = Activation('relu', name=name + '.relu')(_x) return _x
Example #8
Source File: wide_residual_network.py From semantic-embeddings with MIT License | 6 votes |
def expand_conv(init, base, k, strides=(1, 1)): x = Convolution2D(base * k, (3, 3), padding='same', strides=strides, kernel_initializer='he_normal', use_bias=False)(init) channel_axis = 1 if K.image_data_format() == "channels_first" else -1 x = BatchNormalization(axis=channel_axis, momentum=0.1, epsilon=1e-5, gamma_initializer='uniform')(x) x = Activation('relu')(x) x = Convolution2D(base * k, (3, 3), padding='same', kernel_initializer='he_normal', use_bias=False)(x) skip = Convolution2D(base * k, (1, 1), padding='same', strides=strides, kernel_initializer='he_normal', use_bias=False)(init) m = Add()([x, skip]) return m
Example #9
Source File: model.py From BERT-keras with GNU General Public License v3.0 | 5 votes |
def __init__(self, n_state: int, n_head: int, d_hid: int, residual_dropout: float, attention_dropout: float, use_attn_mask: bool, layer_id: int, neg_inf: float, ln_epsilon: float, accurate_gelu: bool) -> None: self.attention = MultiHeadSelfAttention(n_state, n_head, attention_dropout, use_attn_mask, layer_id, neg_inf) self.drop1 = Dropout(residual_dropout, name='layer_{}/ln_1_drop'.format(layer_id)) self.add1 = Add(name='layer_{}/ln_1_add'.format(layer_id)) self.ln1 = LayerNormalization(ln_epsilon, name='layer_{}/ln_1'.format(layer_id)) self.ffn = PositionWiseFF(n_state, d_hid, layer_id, accurate_gelu) self.drop2 = Dropout(residual_dropout, name='layer_{}/ln_2_drop'.format(layer_id)) self.add2 = Add(name='layer_{}/ln_2_add'.format(layer_id)) self.ln2 = LayerNormalization(ln_epsilon, name='layer_{}/ln_2'.format(layer_id))
Example #10
Source File: graph_yoon_kim.py From Keras-TextClassification with MIT License | 5 votes |
def call(self, x): dim = K.int_shape(x)[-1] transform_gate = self.dense_1(x) transform_gate = Activation("sigmoid")(transform_gate) carry_gate = Lambda(lambda x: 1.0 - x, output_shape=(dim,))(transform_gate) transformed_data = self.dense_2(x) transformed_data = Activation(self.activation)(transformed_data) transformed_gated = Multiply()([transform_gate, transformed_data]) identity_gated = Multiply()([carry_gate, x]) value = Add()([transformed_gated, identity_gated]) return value
Example #11
Source File: model.py From DeepTL-Lane-Change-Classification with MIT License | 5 votes |
def compile(self, learning_rate, momentum): """Gets the model ready for training. Adds losses, regularization, and metrics. Then calls the Keras compile() function. """ # Optimizer object optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum, clipnorm=5.0) # Add Losses # First, clear previously set losses to avoid duplication self.keras_model._losses = [] self.keras_model._per_input_losses = {} loss_names = ["rpn_class_loss", "rpn_bbox_loss", "mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"] for name in loss_names: layer = self.keras_model.get_layer(name) if layer.output in self.keras_model.losses: continue self.keras_model.add_loss( tf.reduce_mean(layer.output, keep_dims=True)) # Add L2 Regularization # Skip gamma and beta weights of batch normalization layers. reg_losses = [keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32) for w in self.keras_model.trainable_weights if 'gamma' not in w.name and 'beta' not in w.name] self.keras_model.add_loss(tf.add_n(reg_losses)) # Compile self.keras_model.compile(optimizer=optimizer, loss=[ None] * len(self.keras_model.outputs)) # Add metrics for losses for name in loss_names: if name in self.keras_model.metrics_names: continue layer = self.keras_model.get_layer(name) self.keras_model.metrics_names.append(name) self.keras_model.metrics_tensors.append(tf.reduce_mean( layer.output, keep_dims=True))
Example #12
Source File: test_keras2_numeric.py From coremltools with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_model_with_duplicated_edges(self): # Create a simple model inputs = Input(shape=(20, 20)) activation = Activation("relu")(inputs) cropping = Cropping1D(cropping=(1, 1))(activation) conv1d = Conv1D(20, 3, padding="valid")(activation) ouputs = Add()([conv1d, cropping]) model = Model(inputs, ouputs) self._test_model(model)
Example #13
Source File: SSUN.py From SSUN with MIT License | 5 votes |
def MSCNN_RS(num_PC,img_rows,img_cols): CNNInput = Input(shape=[img_rows,img_cols,num_PC],name='CNNInput') CONV1 = Conv2D(32, (3, 3), activation='relu', padding='same', name='CONV1')(CNNInput) POOL1 = MaxPooling2D((2, 2), name='POOL1')(CONV1) CONV2 = Conv2D(32, (3, 3), activation='relu', padding='same', name='CONV2')(POOL1) POOL2 = MaxPooling2D((2, 2), name='POOL2')(CONV2) CONV3 = Conv2D(32, (3, 3), activation='relu', padding='same', name='CONV3')(POOL2) POOL3 = MaxPooling2D((2, 2), name='POOL3')(CONV3) FLATTEN1 = Flatten(name='FLATTEN1')(POOL1) FLATTEN2 = Flatten(name='FLATTEN2')(POOL2) FLATTEN3 = Flatten(name='FLATTEN3')(POOL3) DENSE1 = Dense(128,activation='relu', name='DENSE1')(FLATTEN1) DENSE2 = Dense(128,activation='relu', name='DENSE2')(FLATTEN2) DENSE3 = Dense(128,activation='relu', name='DENSE3')(FLATTEN3) CNNDense = Add()([DENSE1, DENSE2, DENSE3]) CNNSOFTMAX = Dense(nb_classes,activation='softmax', name='CNNSOFTMAX')(CNNDense) model = Model(input=[CNNInput], output=[CNNSOFTMAX]) adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, amsgrad=False) model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) return model #%% Spectral
Example #14
Source File: model.py From raster-deep-learning with Apache License 2.0 | 5 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block, use_bias=True, train_bn=True): """The identity_block is the block that has no conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names use_bias: Boolean. To use or not use a bias in conv layers. train_bn: Boolean. Train or freeze Batch Norm layers """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', use_bias=use_bias)(input_tensor) x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn) x = KL.Add()([x, input_tensor]) x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x) return x
Example #15
Source File: model.py From raster-deep-learning with Apache License 2.0 | 5 votes |
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), use_bias=True, train_bn=True): """conv_block is the block that has a conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: default 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names use_bias: Boolean. To use or not use a bias in conv layers. train_bn: Boolean. Train or freeze Batch Norm layers Note that from stage 3, the first conv layer at main path is with subsample=(2,2) And the shortcut should have subsample=(2,2) as well """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), strides=strides, name=conv_name_base + '2a', use_bias=use_bias)(input_tensor) x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn) shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides, name=conv_name_base + '1', use_bias=use_bias)(input_tensor) shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn) x = KL.Add()([x, shortcut]) x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x) return x
Example #16
Source File: blocks.py From deep-learning-explorer with Apache License 2.0 | 5 votes |
def identity_block(kernel_size, filters, stage, block, weight_decay=0., batch_momentum=0.99): '''The identity_block is the block that has no conv layer at shortcut # Arguments kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names ''' def f(input_tensor): nb_filter1, nb_filter2, nb_filter3 = filters if KB.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', kernel_regularizer=KR.l2(weight_decay))(input_tensor) x = KL.BatchNormalization( axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', kernel_regularizer=KR.l2(weight_decay))(x) x = KL.BatchNormalization( axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=KR.l2(weight_decay))(x) x = KL.BatchNormalization( axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x) x = KL.Add()([x, input_tensor]) x = KL.Activation('relu')(x) return x return f
Example #17
Source File: blocks.py From deep-learning-explorer with Apache License 2.0 | 5 votes |
def atrous_identity_block(kernel_size, filters, stage, block, weight_decay=0., atrous_rate=(2, 2), batch_momentum=0.99): '''The identity_block is the block that has no conv layer at shortcut # Arguments kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names ''' def f(input_tensor): nb_filter1, nb_filter2, nb_filter3 = filters if KB.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', kernel_regularizer=KR.l2(weight_decay))(input_tensor) x = KL.BatchNormalization( axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), dilation_rate=atrous_rate, padding='same', name=conv_name_base + '2b', kernel_regularizer=KR.l2(weight_decay))(x) x = KL.BatchNormalization( axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=KR.l2(weight_decay))(x) x = KL.BatchNormalization( axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x) x = KL.Add()([x, input_tensor]) x = KL.Activation('relu')(x) return x return f
Example #18
Source File: model.py From deep-learning-explorer with Apache License 2.0 | 5 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block, use_bias=True, train_bn=True): """The identity_block is the block that has no conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names use_bias: Boolean. To use or not use a bias in conv layers. train_bn: Boolean. Train or freeze Batch Norm layres """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', use_bias=use_bias)(input_tensor) x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn) x = KL.Add()([x, input_tensor]) x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x) return x
Example #19
Source File: model.py From DeepTL-Lane-Change-Classification with MIT License | 5 votes |
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), use_bias=True): """conv_block is the block that has a conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names Note that from stage 3, the first conv layer at main path is with subsample=(2,2) And the shortcut should have subsample=(2,2) as well """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), strides=strides, name=conv_name_base + '2a', use_bias=use_bias)(input_tensor) x = BatchNorm(axis=3, name=bn_name_base + '2a')(x) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', use_bias=use_bias)(x) x = BatchNorm(axis=3, name=bn_name_base + '2b')(x) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x) x = BatchNorm(axis=3, name=bn_name_base + '2c')(x) shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides, name=conv_name_base + '1', use_bias=use_bias)(input_tensor) shortcut = BatchNorm(axis=3, name=bn_name_base + '1')(shortcut) x = KL.Add()([x, shortcut]) x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x) return x
Example #20
Source File: wrn.py From sesemi with MIT License | 5 votes |
def expand_conv(init, base, k, strides=(1, 1)): x = Conv2D(base * k, (3, 3), padding='same', strides=strides, **conv_params)(init) x = BatchNormalization(**bn_params)(x) x = LeakyReLU(leakiness)(x) x = Conv2D(base * k, (3, 3), padding='same', **conv_params)(x) skip = Conv2D(base * k, (1, 1), padding='same', strides=strides, **conv_params)(init) m = Add()([x, skip]) return m
Example #21
Source File: wrn.py From sesemi with MIT License | 5 votes |
def conv2_block(input, k=1, dropout=0.0): init = input x = BatchNormalization(**bn_params)(input) x = LeakyReLU(leakiness)(x) x = Conv2D(32 * k, (3, 3), padding='same', **conv_params)(x) if dropout > 0.0: x = Dropout(dropout)(x) x = BatchNormalization(**bn_params)(x) x = LeakyReLU(leakiness)(x) x = Conv2D(32 * k, (3, 3), padding='same', **conv_params)(x) m = Add()([init, x]) return m
Example #22
Source File: network.py From ecg with GNU General Public License v3.0 | 5 votes |
def resnet_block( layer, num_filters, subsample_length, block_index, **params): from keras.layers import Add from keras.layers import MaxPooling1D from keras.layers.core import Lambda def zeropad(x): y = K.zeros_like(x) return K.concatenate([x, y], axis=2) def zeropad_output_shape(input_shape): shape = list(input_shape) assert len(shape) == 3 shape[2] *= 2 return tuple(shape) shortcut = MaxPooling1D(pool_size=subsample_length)(layer) zero_pad = (block_index % params["conv_increase_channels_at"]) == 0 \ and block_index > 0 if zero_pad is True: shortcut = Lambda(zeropad, output_shape=zeropad_output_shape)(shortcut) for i in range(params["conv_num_skip"]): if not (block_index == 0 and i == 0): layer = _bn_relu( layer, dropout=params["conv_dropout"] if i > 0 else 0, **params) layer = add_conv_weight( layer, params["conv_filter_length"], num_filters, subsample_length if i == 0 else 1, **params) layer = Add()([shortcut, layer]) return layer
Example #23
Source File: model.py From attention-is-all-you-need-keras with MIT License | 5 votes |
def __call__(self, x): x_embedded = self._embedding(x) pos_encoding = self._position_encoding(x) pos_encoding_embedded = self._position_embedding(pos_encoding) x = Add()([x_embedded, pos_encoding_embedded]) for layer in self._layers: x = layer(x) return x
Example #24
Source File: model.py From attention-is-all-you-need-keras with MIT License | 5 votes |
def __init__(self, h=8, d_k=64, d_v=64, d_model=512, d_inner_hid=2048, return_attention=True): self._mha_a = MultiHeadAttention(h=h, d_k=d_k, d_v=d_v, d_model=d_model, return_attention=return_attention) self._mha_b = MultiHeadAttention(h=h, d_k=d_k, d_v=d_v, d_model=d_model, return_attention=return_attention) self._psfw = PositionWiseFeedForward(d_model=d_model, d_ff=d_inner_hid) self._ln_a = LayerNormalization() self._ln_b = LayerNormalization() self._ln_c = LayerNormalization() self._add_a = Add() self._add_b = Add() self._add_c = Add() self._return_attention = return_attention
Example #25
Source File: model.py From attention-is-all-you-need-keras with MIT License | 5 votes |
def __init__(self, h=8, d_k=64, d_v=64, d_model=512, d_inner_hid=2048): self._mha = MultiHeadAttention(h=h, d_k=d_k, d_v=d_v, d_model=d_model) self._ln_a = LayerNormalization() self._psfw = PositionWiseFeedForward(d_model=d_model, d_ff=d_inner_hid) self._ln_b = LayerNormalization() self._add_a = Add() self._add_b = Add()
Example #26
Source File: model.py From Mask-RCNN-Pedestrian-Detection with MIT License | 5 votes |
def compile(self, learning_rate, momentum): """Gets the model ready for training. Adds losses, regularization, and metrics. Then calls the Keras compile() function. """ # Optimizer object optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum, clipnorm=self.config.GRADIENT_CLIP_NORM) # Add Losses # First, clear previously set losses to avoid duplication self.keras_model._losses = [] self.keras_model._per_input_losses = {} loss_names = ["rpn_class_loss", "rpn_bbox_loss", "mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"] for name in loss_names: layer = self.keras_model.get_layer(name) if layer.output in self.keras_model.losses: continue self.keras_model.add_loss( tf.reduce_mean(layer.output, keep_dims=True)) # Add L2 Regularization # Skip gamma and beta weights of batch normalization layers. reg_losses = [keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32) for w in self.keras_model.trainable_weights if 'gamma' not in w.name and 'beta' not in w.name] self.keras_model.add_loss(tf.add_n(reg_losses)) # Compile self.keras_model.compile(optimizer=optimizer, loss=[ None] * len(self.keras_model.outputs)) # Add metrics for losses for name in loss_names: if name in self.keras_model.metrics_names: continue layer = self.keras_model.get_layer(name) self.keras_model.metrics_names.append(name) self.keras_model.metrics_tensors.append(tf.reduce_mean( layer.output, keep_dims=True))
Example #27
Source File: model.py From Mask-RCNN-Pedestrian-Detection with MIT License | 5 votes |
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), use_bias=True, train_bn=True): """conv_block is the block that has a conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names use_bias: Boolean. To use or not use a bias in conv layers. train_bn: Boolean. Train or freeze Batch Norm layres Note that from stage 3, the first conv layer at main path is with subsample=(2,2) And the shortcut should have subsample=(2,2) as well """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), strides=strides, name=conv_name_base + '2a', use_bias=use_bias)(input_tensor) x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn) shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides, name=conv_name_base + '1', use_bias=use_bias)(input_tensor) shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn) x = KL.Add()([x, shortcut]) x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x) return x
Example #28
Source File: model.py From Mask-RCNN-Pedestrian-Detection with MIT License | 5 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block, use_bias=True, train_bn=True): """The identity_block is the block that has no conv layer at shortcut # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the nb_filters of 3 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names use_bias: Boolean. To use or not use a bias in conv layers. train_bn: Boolean. Train or freeze Batch Norm layres """ nb_filter1, nb_filter2, nb_filter3 = filters conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', use_bias=use_bias)(input_tensor) x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(x) x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn) x = KL.Add()([x, input_tensor]) x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x) return x
Example #29
Source File: wrn.py From sesemi with MIT License | 5 votes |
def conv1_block(input, k=1, dropout=0.0): init = input x = BatchNormalization(**bn_params)(input) x = LeakyReLU(leakiness)(x) x = Conv2D(16 * k, (3, 3), padding='same', **conv_params)(x) if dropout > 0.0: x = Dropout(dropout)(x) x = BatchNormalization(**bn_params)(x) x = LeakyReLU(leakiness)(x) x = Conv2D(16 * k, (3, 3), padding='same', **conv_params)(x) m = Add()([init, x]) return m
Example #30
Source File: wrn.py From sesemi with MIT License | 5 votes |
def conv3_block(input, k=1, dropout=0.0): init = input x = BatchNormalization(**bn_params)(input) x = LeakyReLU(leakiness)(x) x = Conv2D(64 * k, (3, 3), padding='same', **conv_params)(x) if dropout > 0.0: x = Dropout(dropout)(x) x = BatchNormalization(**bn_params)(x) x = LeakyReLU(leakiness)(x) x = Conv2D(64 * k, (3, 3), padding='same', **conv_params)(x) m = Add()([init, x]) return m