Python keras.layers.multiply() Examples
The following are 30
code examples of keras.layers.multiply().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.layers
, or try the search function
.

Example #1
Source Project: keras-squeeze-excite-network Author: titu1994 File: se.py License: MIT License | 6 votes |
def spatial_squeeze_excite_block(input): ''' Create a spatial squeeze-excite block Args: input: input tensor Returns: a keras tensor References - [Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579) ''' se = Conv2D(1, (1, 1), activation='sigmoid', use_bias=False, kernel_initializer='he_normal')(input) x = multiply([input, se]) return x
Example #2
Source Project: keras-squeeze-excite-network Author: titu1994 File: se.py License: MIT License | 6 votes |
def spatial_squeeze_excite_block(input_tensor): """ Create a spatial squeeze-excite block Args: input_tensor: input Keras tensor Returns: a Keras tensor References - [Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579) """ se = Conv2D(1, (1, 1), activation='sigmoid', use_bias=False, kernel_initializer='he_normal')(input_tensor) x = multiply([input_tensor, se]) return x
Example #3
Source Project: imgclsmob Author: osmr File: common.py License: MIT License | 6 votes |
def swish(x, name="swish"): """ Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. name : str, default 'swish' Block name. Returns ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ w = nn.Activation("sigmoid", name=name + "/sigmoid")(x) x = nn.multiply([x, w], name=name + "/mul") return x
Example #4
Source Project: voxelmorph Author: voxelmorph File: models.py License: GNU General Public License v3.0 | 6 votes |
def _softmax(x, axis=-1, alpha=1): """ building on keras implementation, allow alpha parameter Softmax activation function. # Arguments x : Tensor. axis: Integer, axis along which the softmax normalization is applied. alpha: a value to multiply all x # Returns Tensor, output of softmax transformation. # Raises ValueError: In case `dim(x) == 1`. """ x = alpha * x ndim = K.ndim(x) if ndim == 2: return K.softmax(x) elif ndim > 2: e = K.exp(x - K.max(x, axis=axis, keepdims=True)) s = K.sum(e, axis=axis, keepdims=True) return e / s else: raise ValueError('Cannot apply softmax to a tensor that is 1D')
Example #5
Source Project: coremltools Author: apple File: test_keras2_numeric.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_tiny_mul_random(self): np.random.seed(1988) input_dim = 10 num_channels = 6 # Define a model input_tensor = Input(shape=(input_dim,)) x1 = Dense(num_channels)(input_tensor) x2 = Dense(num_channels)(x1) x3 = Dense(num_channels)(x1) x4 = multiply([x2, x3]) x5 = Dense(num_channels)(x4) model = Model(inputs=[input_tensor], outputs=[x5]) # Set some random weights model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()]) # Get the coreml model self._test_model(model)
Example #6
Source Project: coremltools Author: apple File: test_keras2_numeric.py License: BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_dense_elementwise_params(self): options = dict(modes=[add, multiply, concatenate, average, maximum]) def build_model(mode): x1 = Input(shape=(3,)) x2 = Input(shape=(3,)) y1 = Dense(4)(x1) y2 = Dense(4)(x2) z = mode([y1, y2]) model = Model([x1, x2], z) return mode, model product = itertools.product(*options.values()) args = [build_model(p[0]) for p in product] print("Testing a total of %s cases. This could take a while" % len(args)) for param, model in args: self._run_test(model, param)
Example #7
Source Project: recurrentshop Author: farizrahman4u File: recurrent_highway_networks.py License: MIT License | 6 votes |
def RHN(input_dim, hidden_dim, depth): # Wrapped model inp = Input(batch_shape=(batch_size, input_dim)) state = Input(batch_shape=(batch_size, hidden_dim)) drop_mask = Input(batch_shape=(batch_size, hidden_dim)) # To avoid all zero mask causing gradient to vanish inverted_drop_mask = Lambda(lambda x: 1.0 - x, output_shape=lambda s: s)(drop_mask) drop_mask_2 = Lambda(lambda x: x + 0., output_shape=lambda s: s)(inverted_drop_mask) dropped_state = multiply([state, inverted_drop_mask]) y, new_state = RHNCell(units=hidden_dim, recurrence_depth=depth, kernel_initializer=weight_init, kernel_regularizer=l2(weight_decay), kernel_constraint=max_norm(gradient_clip), bias_initializer=Constant(transform_bias), recurrent_initializer=weight_init, recurrent_regularizer=l2(weight_decay), recurrent_constraint=max_norm(gradient_clip))([inp, dropped_state]) return RecurrentModel(input=inp, output=y, initial_states=[state, drop_mask], final_states=[new_state, drop_mask_2]) # lr decay Scheduler
Example #8
Source Project: recurrentshop Author: farizrahman4u File: query_reduction_network.py License: MIT License | 6 votes |
def QRNcell(): xq = Input(batch_shape=(batch_size, embedding_dim * 2)) # Split into context and query xt = Lambda(lambda x, dim: x[:, :dim], arguments={'dim': embedding_dim}, output_shape=lambda s: (s[0], s[1] / 2))(xq) qt = Lambda(lambda x, dim: x[:, dim:], arguments={'dim': embedding_dim}, output_shape=lambda s: (s[0], s[1] / 2))(xq) h_tm1 = Input(batch_shape=(batch_size, embedding_dim)) zt = Dense(1, activation='sigmoid', bias_initializer=Constant(2.5))(multiply([xt, qt])) zt = Lambda(lambda x, dim: K.repeat_elements(x, dim, axis=1), arguments={'dim': embedding_dim})(zt) ch = Dense(embedding_dim, activation='tanh')(concatenate([xt, qt], axis=-1)) rt = Dense(1, activation='sigmoid')(multiply([xt, qt])) rt = Lambda(lambda x, dim: K.repeat_elements(x, dim, axis=1), arguments={'dim': embedding_dim})(rt) ht = add([multiply([zt, ch, rt]), multiply([Lambda(lambda x: 1 - x, output_shape=lambda s: s)(zt), h_tm1])]) return RecurrentModel(input=xq, output=ht, initial_states=[h_tm1], final_states=[ht], return_sequences=True) # # Load data #
Example #9
Source Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: merge_test.py License: MIT License | 6 votes |
def test_merge_multiply(): i1 = layers.Input(shape=(4, 5)) i2 = layers.Input(shape=(4, 5)) i3 = layers.Input(shape=(4, 5)) o = layers.multiply([i1, i2, i3]) assert o._keras_shape == (None, 4, 5) model = models.Model([i1, i2, i3], o) mul_layer = layers.Multiply() o2 = mul_layer([i1, i2, i3]) assert mul_layer.output_shape == (None, 4, 5) x1 = np.random.random((2, 4, 5)) x2 = np.random.random((2, 4, 5)) x3 = np.random.random((2, 4, 5)) out = model.predict([x1, x2, x3]) assert out.shape == (2, 4, 5) assert_allclose(out, x1 * x2 * x3, atol=1e-4)
Example #10
Source Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: merge_test.py License: MIT License | 6 votes |
def test_merge_multiply(): i1 = layers.Input(shape=(4, 5)) i2 = layers.Input(shape=(4, 5)) i3 = layers.Input(shape=(4, 5)) o = layers.multiply([i1, i2, i3]) assert o._keras_shape == (None, 4, 5) model = models.Model([i1, i2, i3], o) mul_layer = layers.Multiply() o2 = mul_layer([i1, i2, i3]) assert mul_layer.output_shape == (None, 4, 5) x1 = np.random.random((2, 4, 5)) x2 = np.random.random((2, 4, 5)) x3 = np.random.random((2, 4, 5)) out = model.predict([x1, x2, x3]) assert out.shape == (2, 4, 5) assert_allclose(out, x1 * x2 * x3, atol=1e-4)
Example #11
Source Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: merge_test.py License: MIT License | 6 votes |
def test_merge_multiply(): i1 = layers.Input(shape=(4, 5)) i2 = layers.Input(shape=(4, 5)) i3 = layers.Input(shape=(4, 5)) o = layers.multiply([i1, i2, i3]) assert o._keras_shape == (None, 4, 5) model = models.Model([i1, i2, i3], o) mul_layer = layers.Multiply() o2 = mul_layer([i1, i2, i3]) assert mul_layer.output_shape == (None, 4, 5) x1 = np.random.random((2, 4, 5)) x2 = np.random.random((2, 4, 5)) x3 = np.random.random((2, 4, 5)) out = model.predict([x1, x2, x3]) assert out.shape == (2, 4, 5) assert_allclose(out, x1 * x2 * x3, atol=1e-4)
Example #12
Source Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: merge_test.py License: MIT License | 6 votes |
def test_merge_multiply(): i1 = layers.Input(shape=(4, 5)) i2 = layers.Input(shape=(4, 5)) i3 = layers.Input(shape=(4, 5)) o = layers.multiply([i1, i2, i3]) assert o._keras_shape == (None, 4, 5) model = models.Model([i1, i2, i3], o) mul_layer = layers.Multiply() o2 = mul_layer([i1, i2, i3]) assert mul_layer.output_shape == (None, 4, 5) x1 = np.random.random((2, 4, 5)) x2 = np.random.random((2, 4, 5)) x3 = np.random.random((2, 4, 5)) out = model.predict([x1, x2, x3]) assert out.shape == (2, 4, 5) assert_allclose(out, x1 * x2 * x3, atol=1e-4)
Example #13
Source Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: merge_test.py License: MIT License | 6 votes |
def test_merge_multiply(): i1 = layers.Input(shape=(4, 5)) i2 = layers.Input(shape=(4, 5)) i3 = layers.Input(shape=(4, 5)) o = layers.multiply([i1, i2, i3]) assert o._keras_shape == (None, 4, 5) model = models.Model([i1, i2, i3], o) mul_layer = layers.Multiply() o2 = mul_layer([i1, i2, i3]) assert mul_layer.output_shape == (None, 4, 5) x1 = np.random.random((2, 4, 5)) x2 = np.random.random((2, 4, 5)) x3 = np.random.random((2, 4, 5)) out = model.predict([x1, x2, x3]) assert out.shape == (2, 4, 5) assert_allclose(out, x1 * x2 * x3, atol=1e-4)
Example #14
Source Project: DeepLearning_Wavelet-LSTM Author: hello-sea File: merge_test.py License: MIT License | 6 votes |
def test_merge_multiply(): i1 = layers.Input(shape=(4, 5)) i2 = layers.Input(shape=(4, 5)) i3 = layers.Input(shape=(4, 5)) o = layers.multiply([i1, i2, i3]) assert o._keras_shape == (None, 4, 5) model = models.Model([i1, i2, i3], o) mul_layer = layers.Multiply() o2 = mul_layer([i1, i2, i3]) assert mul_layer.output_shape == (None, 4, 5) x1 = np.random.random((2, 4, 5)) x2 = np.random.random((2, 4, 5)) x3 = np.random.random((2, 4, 5)) out = model.predict([x1, x2, x3]) assert out.shape == (2, 4, 5) assert_allclose(out, x1 * x2 * x3, atol=1e-4)
Example #15
Source Project: neurowriter Author: albarji File: models.py License: MIT License | 6 votes |
def gatedblock(dilation, dropout, kernels, kernel_size): """Keras compatible Dilated convolution layer Includes Gated activation, skip connections, batch normalization and dropout """ def f(input_): norm = BatchNormalization()(input_) # Dropout of inputs drop = Dropout(dropout)(norm) # Normal activation normal_out = Conv1D(kernels, kernel_size, dilation_rate=dilation, activation='tanh', padding='same')(drop) # Gate gate_out = Conv1D(kernels, kernel_size, dilation_rate=dilation, activation='sigmoid', padding='same')(drop) # Point-wise nonlinear · gate merged = multiply([normal_out, gate_out]) # Activation after gate skip_out = Conv1D(kernels, 1, activation='tanh')(merged) # Residual connections: allow the network input to skip the # whole block if necessary out = add([skip_out, input_]) return out, skip_out return f
Example #16
Source Project: contextual-multimodal-fusion Author: soujanyaporia File: trimodal_attention_models.py License: MIT License | 6 votes |
def self_attention(x): ''' . stands for dot product * stands for elemwise multiplication m = x . transpose(x) n = softmax(m) o = n . x a = o * x return a ''' m = dot([x, x], axes=[2,2]) n = Activation('softmax')(m) o = dot([n, x], axes=[2,1]) a = multiply([o, x]) return a
Example #17
Source Project: DeepLearning-SeGAN-Segmentation Author: iNLyze File: SeGAN.py License: MIT License | 6 votes |
def segmentor(self, start_filters=64, filter_inc_rate=2, out_ch=1, depth=2): """ Creates recursively a segmentor model a.k.a. generator in GAN literature """ inp = Input(shape=self.shape) first_block = convl1_lrelu(inp, start_filters, 4, 2) middle_blocks = level_block(first_block, int(start_filters * 2), depth=depth, filter_inc_rate=filter_inc_rate, p=0.1) if self.softmax: last_block = upsampl_softmax(middle_blocks, out_ch+1, 3, 1, 2, self.max_project) # out_ch+1, because softmax needs crossentropy else: last_block = upsampl_conv(middle_blocks, out_ch, 3, 1, 2) if self.crop: out = multiply([inp, last_block]) # crop input with predicted mask return Model([inp], [out], name='segmentor_net') return Model([inp], [last_block], name='segmentor_net') #return Model([inp], [last_block], name='segmentor_net')
Example #18
Source Project: DeepLearning-SeGAN-Segmentation Author: iNLyze File: SeGAN.py License: MIT License | 6 votes |
def critic(self): """ Creates a critic a.k.a. discriminator model """ # Note: Future improvement is to provide definable depth of critic inp_cropped = Input(self.shape, name='inp_cropped_image') # Data cropped with generated OR g.t. mask shared_1 = shared_convl1_lrelu(self.shape, 64, 4, 2, name='shared_1_conv_lrelu') shared_2 = shared_convl1_bn_lrelu((16, 16, 64), 128, 4, 2, name='shared_2_conv_bn_lrelu') shared_3 = shared_convl1_bn_lrelu((8, 8, 128), 256, 4, 2, name='shared_3_conv_bn_lrelu') shared_4 = shared_convl1_bn_lrelu((4, 4, 256), 512, 4, 2, name='shared_4_conv_bn_lrelu') x1_S = shared_1(inp_cropped) #x1_S = shared_1(multiply([inp, mask])) x2_S = shared_2(x1_S) x3_S = shared_3(x2_S) x4_S = shared_4(x3_S) features = Concatenate(name='features_S')( [Flatten()(inp_cropped), Flatten()(x1_S), Flatten()(x2_S), Flatten()(x3_S), Flatten()(x4_S)] #[Flatten()(inp), Flatten()(x1_S), Flatten()(x2_S), Flatten()(x3_S), Flatten()(x4_S)] ) return Model(inp_cropped, features, name='critic_net') #return Model([inp, mask], features, name='critic_net')
Example #19
Source Project: Face-skin-hair-segmentaiton-and-skin-color-evaluation Author: JACKYLUO1991 File: lednet.py License: Apache License 2.0 | 5 votes |
def apn_module(self, x): def right(x): x = layers.AveragePooling2D()(x) x = layers.Conv2D(self.classes, kernel_size=1, padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) x = layers.UpSampling2D(interpolation='bilinear')(x) return x def conv(x, filters, kernel_size, stride): x = layers.Conv2D(filters, kernel_size=kernel_size, strides=(stride, stride), padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) return x x_7 = conv(x, int(x.shape[-1]), 7, stride=2) x_5 = conv(x_7, int(x.shape[-1]), 5, stride=2) x_3 = conv(x_5, int(x.shape[-1]), 3, stride=2) x_3_1 = conv(x_3, self.classes, 3, stride=1) x_3_1_up = layers.UpSampling2D(interpolation='bilinear')(x_3_1) x_5_1 = conv(x_5, self.classes, 5, stride=1) x_3_5 = layers.add([x_5_1, x_3_1_up]) x_3_5_up = layers.UpSampling2D(interpolation='bilinear')(x_3_5) x_7_1 = conv(x_7, self.classes, 3, stride=1) x_3_5_7 = layers.add([x_7_1, x_3_5_up]) x_3_5_7_up = layers.UpSampling2D(interpolation='bilinear')(x_3_5_7) x_middle = conv(x, self.classes, 1, stride=1) x_middle = layers.multiply([x_3_5_7_up, x_middle]) x_right = right(x) x_middle = layers.add([x_middle, x_right]) return x_middle
Example #20
Source Project: Keras-GAN Author: eriklindernoren File: cgan.py License: MIT License | 5 votes |
def build_generator(self): model = Sequential() model.add(Dense(256, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) model.summary() noise = Input(shape=(self.latent_dim,)) label = Input(shape=(1,), dtype='int32') label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label)) model_input = multiply([noise, label_embedding]) img = model(model_input) return Model([noise, label], img)
Example #21
Source Project: Keras-GAN Author: eriklindernoren File: cgan.py License: MIT License | 5 votes |
def build_discriminator(self): model = Sequential() model.add(Dense(512, input_dim=np.prod(self.img_shape))) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.4)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dropout(0.4)) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=self.img_shape) label = Input(shape=(1,), dtype='int32') label_embedding = Flatten()(Embedding(self.num_classes, np.prod(self.img_shape))(label)) flat_img = Flatten()(img) model_input = multiply([flat_img, label_embedding]) validity = model(model_input) return Model([img, label], validity)
Example #22
Source Project: Keras-GAN Author: eriklindernoren File: acgan.py License: MIT License | 5 votes |
def build_generator(self): model = Sequential() model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim)) model.add(Reshape((7, 7, 128))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, kernel_size=3, padding="same")) model.add(Activation("relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(self.channels, kernel_size=3, padding='same')) model.add(Activation("tanh")) model.summary() noise = Input(shape=(self.latent_dim,)) label = Input(shape=(1,), dtype='int32') label_embedding = Flatten()(Embedding(self.num_classes, self.latent_dim)(label)) model_input = multiply([noise, label_embedding]) img = model(model_input) return Model([noise, label], img)
Example #23
Source Project: keras-squeeze-excite-network Author: titu1994 File: se.py License: MIT License | 5 votes |
def squeeze_excite_block(input, ratio=16): ''' Create a channel-wise squeeze-excite block Args: input: input tensor filters: number of output filters Returns: a keras tensor References - [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507) ''' init = input channel_axis = 1 if K.image_data_format() == "channels_first" else -1 filters = init._keras_shape[channel_axis] se_shape = (1, 1, filters) se = GlobalAveragePooling2D()(init) se = Reshape(se_shape)(se) se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) if K.image_data_format() == 'channels_first': se = Permute((3, 1, 2))(se) x = multiply([init, se]) return x
Example #24
Source Project: keras-squeeze-excite-network Author: titu1994 File: se.py License: MIT License | 5 votes |
def squeeze_excite_block(input_tensor, ratio=16): """ Create a channel-wise squeeze-excite block Args: input_tensor: input Keras tensor ratio: number of output filters Returns: a Keras tensor References - [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507) """ init = input_tensor channel_axis = 1 if K.image_data_format() == "channels_first" else -1 filters = _tensor_shape(init)[channel_axis] se_shape = (1, 1, filters) se = GlobalAveragePooling2D()(init) se = Reshape(se_shape)(se) se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) if K.image_data_format() == 'channels_first': se = Permute((3, 1, 2))(se) x = multiply([init, se]) return x
Example #25
Source Project: MalConv-keras Author: j40903272 File: malconv.py License: MIT License | 5 votes |
def Malconv(max_len=200000, win_size=500, vocab_size=256): inp = Input((max_len,)) emb = Embedding(vocab_size, 8)(inp) conv1 = Conv1D(kernel_size=(win_size), filters=128, strides=(win_size), padding='same')(emb) conv2 = Conv1D(kernel_size=(win_size), filters=128, strides=(win_size), padding='same')(emb) a = Activation('sigmoid', name='sigmoid')(conv2) mul = multiply([conv1, a]) a = Activation('relu', name='relu')(mul) p = GlobalMaxPool1D()(a) d = Dense(64)(p) out = Dense(1, activation='sigmoid')(d) return Model(inp, out)
Example #26
Source Project: CBAM-keras Author: kobiso File: attention_module.py License: MIT License | 5 votes |
def se_block(input_feature, ratio=8): """Contains the implementation of Squeeze-and-Excitation(SE) block. As described in https://arxiv.org/abs/1709.01507. """ channel_axis = 1 if K.image_data_format() == "channels_first" else -1 channel = input_feature._keras_shape[channel_axis] se_feature = GlobalAveragePooling2D()(input_feature) se_feature = Reshape((1, 1, channel))(se_feature) assert se_feature._keras_shape[1:] == (1,1,channel) se_feature = Dense(channel // ratio, activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) assert se_feature._keras_shape[1:] == (1,1,channel//ratio) se_feature = Dense(channel, activation='sigmoid', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) assert se_feature._keras_shape[1:] == (1,1,channel) if K.image_data_format() == 'channels_first': se_feature = Permute((3, 1, 2))(se_feature) se_feature = multiply([input_feature, se_feature]) return se_feature
Example #27
Source Project: CBAM-keras Author: kobiso File: attention_module.py License: MIT License | 5 votes |
def channel_attention(input_feature, ratio=8): channel_axis = 1 if K.image_data_format() == "channels_first" else -1 channel = input_feature._keras_shape[channel_axis] shared_layer_one = Dense(channel//ratio, activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros') shared_layer_two = Dense(channel, kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros') avg_pool = GlobalAveragePooling2D()(input_feature) avg_pool = Reshape((1,1,channel))(avg_pool) assert avg_pool._keras_shape[1:] == (1,1,channel) avg_pool = shared_layer_one(avg_pool) assert avg_pool._keras_shape[1:] == (1,1,channel//ratio) avg_pool = shared_layer_two(avg_pool) assert avg_pool._keras_shape[1:] == (1,1,channel) max_pool = GlobalMaxPooling2D()(input_feature) max_pool = Reshape((1,1,channel))(max_pool) assert max_pool._keras_shape[1:] == (1,1,channel) max_pool = shared_layer_one(max_pool) assert max_pool._keras_shape[1:] == (1,1,channel//ratio) max_pool = shared_layer_two(max_pool) assert max_pool._keras_shape[1:] == (1,1,channel) cbam_feature = Add()([avg_pool,max_pool]) cbam_feature = Activation('sigmoid')(cbam_feature) if K.image_data_format() == "channels_first": cbam_feature = Permute((3, 1, 2))(cbam_feature) return multiply([input_feature, cbam_feature])
Example #28
Source Project: CBAM-keras Author: kobiso File: attention_module.py License: MIT License | 5 votes |
def spatial_attention(input_feature): kernel_size = 7 if K.image_data_format() == "channels_first": channel = input_feature._keras_shape[1] cbam_feature = Permute((2,3,1))(input_feature) else: channel = input_feature._keras_shape[-1] cbam_feature = input_feature avg_pool = Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(cbam_feature) assert avg_pool._keras_shape[-1] == 1 max_pool = Lambda(lambda x: K.max(x, axis=3, keepdims=True))(cbam_feature) assert max_pool._keras_shape[-1] == 1 concat = Concatenate(axis=3)([avg_pool, max_pool]) assert concat._keras_shape[-1] == 2 cbam_feature = Conv2D(filters = 1, kernel_size=kernel_size, strides=1, padding='same', activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(concat) assert cbam_feature._keras_shape[-1] == 1 if K.image_data_format() == "channels_first": cbam_feature = Permute((3, 1, 2))(cbam_feature) return multiply([input_feature, cbam_feature])
Example #29
Source Project: CBAM-keras Author: kobiso File: attention_module-checkpoint.py License: MIT License | 5 votes |
def se_block(input_feature, ratio=8): """Contains the implementation of Squeeze-and-Excitation(SE) block. As described in https://arxiv.org/abs/1709.01507. """ channel_axis = 1 if K.image_data_format() == "channels_first" else -1 channel = input_feature._keras_shape[channel_axis] se_feature = GlobalAveragePooling2D()(input_feature) se_feature = Reshape((1, 1, channel))(se_feature) assert se_feature._keras_shape[1:] == (1,1,channel) se_feature = Dense(channel // ratio, activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) assert se_feature._keras_shape[1:] == (1,1,channel//ratio) se_feature = Dense(channel, activation='sigmoid', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros')(se_feature) assert se_feature._keras_shape[1:] == (1,1,channel) if K.image_data_format() == 'channels_first': se_feature = Permute((3, 1, 2))(se_feature) se_feature = multiply([input_feature, se_feature]) return se_feature
Example #30
Source Project: CBAM-keras Author: kobiso File: attention_module-checkpoint.py License: MIT License | 5 votes |
def channel_attention(input_feature, ratio=8): channel_axis = 1 if K.image_data_format() == "channels_first" else -1 channel = input_feature._keras_shape[channel_axis] shared_layer_one = Dense(channel//ratio, activation='relu', kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros') shared_layer_two = Dense(channel, kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros') avg_pool = GlobalAveragePooling2D()(input_feature) avg_pool = Reshape((1,1,channel))(avg_pool) assert avg_pool._keras_shape[1:] == (1,1,channel) avg_pool = shared_layer_one(avg_pool) assert avg_pool._keras_shape[1:] == (1,1,channel//ratio) avg_pool = shared_layer_two(avg_pool) assert avg_pool._keras_shape[1:] == (1,1,channel) max_pool = GlobalMaxPooling2D()(input_feature) max_pool = Reshape((1,1,channel))(max_pool) assert max_pool._keras_shape[1:] == (1,1,channel) max_pool = shared_layer_one(max_pool) assert max_pool._keras_shape[1:] == (1,1,channel//ratio) max_pool = shared_layer_two(max_pool) assert max_pool._keras_shape[1:] == (1,1,channel) cbam_feature = Add()([avg_pool,max_pool]) cbam_feature = Activation('sigmoid')(cbam_feature) if K.image_data_format() == "channels_first": cbam_feature = Permute((3, 1, 2))(cbam_feature) return multiply([input_feature, cbam_feature])