Python keras.backend.relu() Examples
The following are 30
code examples of keras.backend.relu().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example #1
Source File: mobilenet_base.py From MobileNetV3 with MIT License | 8 votes |
def _squeeze(self, inputs): """Squeeze and Excitation. This function defines a squeeze structure. # Arguments inputs: Tensor, input tensor of conv layer. """ input_channels = int(inputs.shape[-1]) x = GlobalAveragePooling2D()(inputs) x = Dense(input_channels, activation='relu')(x) x = Dense(input_channels, activation='hard_sigmoid')(x) x = Reshape((1, 1, input_channels))(x) x = Multiply()([inputs, x]) return x
Example #2
Source File: keras_models.py From keras-language-modeling with MIT License | 6 votes |
def compile(self, optimizer, **kwargs): qa_model = self.get_qa_model() good_similarity = qa_model([self.question, self.answer_good]) bad_similarity = qa_model([self.question, self.answer_bad]) # loss = merge([good_similarity, bad_similarity], # mode=lambda x: K.relu(self.config['margin'] - x[0] + x[1]), # output_shape=lambda x: x[0]) loss = Lambda(lambda x: K.relu(self.config['margin'] - x[0] + x[1]), output_shape=lambda x: x[0])([good_similarity, bad_similarity]) self.prediction_model = Model(inputs=[self.question, self.answer_good], outputs=good_similarity, name='prediction_model') self.prediction_model.compile(loss=lambda y_true, y_pred: y_pred, optimizer=optimizer, **kwargs) self.training_model = Model(inputs=[self.question, self.answer_good, self.answer_bad], outputs=loss, name='training_model') self.training_model.compile(loss=lambda y_true, y_pred: y_pred, optimizer=optimizer, **kwargs)
Example #3
Source File: example_gan_convolutional.py From keras-adversarial with MIT License | 6 votes |
def model_discriminator(input_shape=(1, 28, 28), dropout_rate=0.5): d_input = dim_ordering_input(input_shape, name="input_x") nch = 512 # nch = 128 H = Convolution2D(int(nch / 2), 5, 5, subsample=(2, 2), border_mode='same', activation='relu')(d_input) H = LeakyReLU(0.2)(H) H = Dropout(dropout_rate)(H) H = Convolution2D(nch, 5, 5, subsample=(2, 2), border_mode='same', activation='relu')(H) H = LeakyReLU(0.2)(H) H = Dropout(dropout_rate)(H) H = Flatten()(H) H = Dense(int(nch / 2))(H) H = LeakyReLU(0.2)(H) H = Dropout(dropout_rate)(H) d_V = Dense(1, activation='sigmoid')(H) return Model(d_input, d_V)
Example #4
Source File: example_gan_convolutional.py From keras-adversarial with MIT License | 6 votes |
def model_generator(): nch = 256 g_input = Input(shape=[100]) H = Dense(nch * 14 * 14)(g_input) H = BatchNormalization(mode=2)(H) H = Activation('relu')(H) H = dim_ordering_reshape(nch, 14)(H) H = UpSampling2D(size=(2, 2))(H) H = Convolution2D(int(nch / 2), 3, 3, border_mode='same')(H) H = BatchNormalization(mode=2, axis=1)(H) H = Activation('relu')(H) H = Convolution2D(int(nch / 4), 3, 3, border_mode='same')(H) H = BatchNormalization(mode=2, axis=1)(H) H = Activation('relu')(H) H = Convolution2D(1, 1, 1, border_mode='same')(H) g_V = Activation('sigmoid')(H) return Model(g_input, g_V)
Example #5
Source File: reactionrnn.py From reactionrnn with MIT License | 6 votes |
def reactionrnn_model(weights_path, num_classes, maxlen=140): ''' Builds the model architecture for textgenrnn and loads the pretrained weights for the model. ''' input = Input(shape=(maxlen,), name='input') embedded = Embedding(num_classes, 100, input_length=maxlen, name='embedding')(input) rnn = GRU(256, return_sequences=False, name='rnn')(embedded) output = Dense(5, name='output', activation=lambda x: K.relu(x) / K.sum(K.relu(x), axis=-1))(rnn) model = Model(inputs=[input], outputs=[output]) model.load_weights(weights_path, by_name=True) model.compile(loss='mse', optimizer='nadam') return model
Example #6
Source File: layers.py From nn_playground with MIT License | 6 votes |
def call(self, inputs): if self.data_format == 'channels_first': sq = K.mean(inputs, [2, 3]) else: sq = K.mean(inputs, [1, 2]) ex = K.dot(sq, self.kernel1) if self.use_bias: ex = K.bias_add(ex, self.bias1) ex= K.relu(ex) ex = K.dot(ex, self.kernel2) if self.use_bias: ex = K.bias_add(ex, self.bias2) ex= K.sigmoid(ex) if self.data_format == 'channels_first': ex = K.expand_dims(ex, -1) ex = K.expand_dims(ex, -1) else: ex = K.expand_dims(ex, 1) ex = K.expand_dims(ex, 1) return inputs * ex
Example #7
Source File: train.py From landmark-recognition-challenge with GNU General Public License v3.0 | 6 votes |
def triplet_loss(X): # https://arxiv.org/pdf/1804.07275v1.pdf # Eq (1) features = K.int_shape(X)[-1] // 3 p1, p2, n1 = X[...,:features], X[...,features:2*features], X[...,2*features:] d_p1_p2 = K.sum(K.square(p1 - p2), axis=-1, keepdims=True) d_p1_n1 = K.sum(K.square(p1 - n1), axis=-1, keepdims=True) d_p2_n1 = K.sum(K.square(p2 - n1), axis=-1, keepdims=True) m = 2. loss = K.relu(m + d_p1_p2 - d_p1_n1 ) + K.relu(m + d_p1_p2 - d_p2_n1) # Eq (3,4) note: lambda trade-off param confirmed to be 1e-3 by the paper authors (by email) loss += 1e-3 * ( \ K.sum(K.square(p1), axis=-1, keepdims=True) + \ K.sum(K.square(p2), axis=-1, keepdims=True) + \ K.sum(K.square(n1), axis=-1, keepdims=True)) return loss
Example #8
Source File: learn_labelembedding.py From semantic-embeddings with MIT License | 6 votes |
def labelembed_model(base_model, num_classes, **kwargs): input_ = base_model.input embedding = base_model.output out = keras.layers.Activation('relu')(embedding) out = keras.layers.BatchNormalization(name = 'embedding_bn')(out) out1 = keras.layers.Dense(num_classes, name = 'prob')(out) out2 = keras.layers.Dense(num_classes, name = 'out2')(keras.layers.Lambda(lambda x: K.stop_gradient(x))(out)) cls_input_ = keras.layers.Input((1,), name = 'labels') cls_embedding_layer = keras.layers.Embedding(num_classes, num_classes, embeddings_initializer = 'identity', name = 'labelembeddings') cls_embedding = keras.layers.Flatten()(cls_embedding_layer(cls_input_)) loss = keras.layers.Lambda(lambda x: labelembed_loss(x[0], x[1], x[2], K.flatten(x[3]), num_classes = num_classes, **kwargs)[:,None], name = 'labelembed_loss')([out1, out2, cls_embedding, cls_input_]) return keras.models.Model([input_, cls_input_], [embedding, out1, loss])
Example #9
Source File: example.py From bayesian_dense with MIT License | 6 votes |
def model(hidden_dim=512, input_dim=28*28, sigma_regularization=1e-3, mu_regularization=1e-5, k=10, activation = lambda x: K.relu(x, 1.0 / 5.5)): """Create two layer MLP with softmax output""" _x = Input(shape=(input_dim,)) layer = lambda output_dim, activation: BayesianDense(output_dim, activation=activation, W_sigma_regularizer=VariationalRegularizer(weight=sigma_regularization), b_sigma_regularizer=VariationalRegularizer(weight=sigma_regularization), W_regularizer=WeightRegularizer(l1=mu_regularization)) h1 = layer(hidden_dim, activation) h2 = layer(hidden_dim, activation) y = layer(k, 'softmax') _y = y(h2(h1(_x))) m = Model(_x, _y) m.compile(Adam(1e-3),loss='categorical_crossentropy') return m
Example #10
Source File: srelu.py From keras-contrib with MIT License | 6 votes |
def call(self, x, mask=None): # ensure the the right part is always to the right of the left t_right_actual = self.t_left + K.abs(self.t_right) if K.backend() == 'theano': t_left = K.pattern_broadcast(self.t_left, self.param_broadcast) a_left = K.pattern_broadcast(self.a_left, self.param_broadcast) a_right = K.pattern_broadcast(self.a_right, self.param_broadcast) t_right_actual = K.pattern_broadcast(t_right_actual, self.param_broadcast) else: t_left = self.t_left a_left = self.a_left a_right = self.a_right y_left_and_center = t_left + K.relu(x - t_left, a_left, t_right_actual - t_left) y_right = K.relu(x - t_right_actual) * a_right return y_left_and_center + y_right
Example #11
Source File: models.py From pOSAL with MIT License | 6 votes |
def identity_block(input_tensor, kernel_size, filters, stage, block): filters1, filters2, filters3 = filters if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(filters2, kernel_size, padding='same', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) x = layers.add([x, input_tensor]) x = Activation('relu')(x) return x
Example #12
Source File: learn_labelembedding.py From semantic-embeddings with MIT License | 6 votes |
def labelembed_loss(out1, out2, tar, targets, tau = 2., alpha = 0.9, beta = 0.5, num_classes = 100): out2_prob = K.softmax(out2) tau2_prob = K.stop_gradient(K.softmax(out2 / tau)) soft_tar = K.stop_gradient(K.softmax(tar)) L_o1_y = K.sparse_categorical_crossentropy(output = K.softmax(out1), target = targets) pred = K.argmax(out2, axis = -1) mask = K.stop_gradient(K.cast(K.equal(pred, K.cast(targets, 'int64')), K.floatx())) L_o1_emb = -cross_entropy(out1, soft_tar) # pylint: disable=invalid-unary-operand-type L_o2_y = K.sparse_categorical_crossentropy(output = out2_prob, target = targets) L_emb_o2 = -cross_entropy(tar, tau2_prob) * mask * (K.cast(K.shape(mask)[0], K.floatx())/(K.sum(mask)+1e-8)) # pylint: disable=invalid-unary-operand-type L_re = K.relu(K.sum(out2_prob * K.one_hot(K.cast(targets, 'int64'), num_classes), axis = -1) - alpha) return beta * L_o1_y + (1-beta) * L_o1_emb + L_o2_y + L_emb_o2 + L_re
Example #13
Source File: core_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_activation(): # with string argument layer_test(layers.Activation, kwargs={'activation': 'relu'}, input_shape=(3, 2)) # with function argument layer_test(layers.Activation, kwargs={'activation': K.relu}, input_shape=(3, 2))
Example #14
Source File: antirectifier.py From pCVR with Apache License 2.0 | 5 votes |
def call(self, inputs): inputs -= K.mean(inputs, axis=1, keepdims=True) inputs = K.l2_normalize(inputs, axis=1) pos = K.relu(inputs) neg = K.relu(-inputs) return K.concatenate([pos, neg], axis=1) # global parameters
Example #15
Source File: L2_Net.py From pyslam with GNU General Public License v3.0 | 5 votes |
def build_cnn(weights): model = Sequential() model.add(ZeroPadding2D(1, input_shape=(32,32, 1))) model.add(Conv2D(32, kernel_size=(3, 3))) model.add(BatchNormalization(epsilon=0.0001, scale=False, center=False)) model.add(Lambda(K.relu)) model.add(ZeroPadding2D(1)) model.add(Conv2D(32, kernel_size=(3, 3))) model.add(BatchNormalization(epsilon=0.0001, scale=False, center=False)) model.add(Lambda(K.relu)) model.add(ZeroPadding2D(1)) model.add(Conv2D(64, kernel_size=(3, 3), strides=2)) model.add(BatchNormalization(epsilon=0.0001, scale=False, center=False)) model.add(Lambda(K.relu)) model.add(ZeroPadding2D(1)) model.add(Conv2D(64, kernel_size=(3, 3))) model.add(BatchNormalization(epsilon=0.0001, scale=False, center=False)) model.add(Lambda(K.relu)) model.add(ZeroPadding2D(1)) model.add(Conv2D(128, kernel_size=(3, 3), strides=2)) model.add(BatchNormalization(epsilon=0.0001, scale=False, center=False)) model.add(Lambda(K.relu)) model.add(ZeroPadding2D(1)) model.add(Conv2D(128, kernel_size=(3, 3))) model.add(BatchNormalization(epsilon=0.0001, scale=False, center=False)) model.add(Lambda(K.relu)) model.add(Conv2D(128, kernel_size=(8, 8))) model.add(BatchNormalization(epsilon=0.0001, scale=False, center=False)) model.add(LRN(alpha=256,k=0,beta=0.5,n=256)) model.set_weights(weights) return model
Example #16
Source File: activations.py From GRU-D with MIT License | 5 votes |
def exp_relu(x): return K.exp(-K.relu(x))
Example #17
Source File: mobilenet_v2.py From MobileNetV2 with MIT License | 5 votes |
def relu6(x): """Relu 6 """ return K.relu(x, max_value=6.0)
Example #18
Source File: core_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_activation(): # with string argument layer_test(layers.Activation, kwargs={'activation': 'relu'}, input_shape=(3, 2)) # with function argument layer_test(layers.Activation, kwargs={'activation': K.relu}, input_shape=(3, 2))
Example #19
Source File: antirectifier.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def call(self, inputs): inputs -= K.mean(inputs, axis=1, keepdims=True) inputs = K.l2_normalize(inputs, axis=1) pos = K.relu(inputs) neg = K.relu(-inputs) return K.concatenate([pos, neg], axis=1) # global parameters
Example #20
Source File: antirectifier.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def call(self, inputs): inputs -= K.mean(inputs, axis=1, keepdims=True) inputs = K.l2_normalize(inputs, axis=1) pos = K.relu(inputs) neg = K.relu(-inputs) return K.concatenate([pos, neg], axis=1) # global parameters
Example #21
Source File: core_test.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def test_activation(): # with string argument layer_test(layers.Activation, kwargs={'activation': 'relu'}, input_shape=(3, 2)) # with function argument layer_test(layers.Activation, kwargs={'activation': K.relu}, input_shape=(3, 2))
Example #22
Source File: antirectifier.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def call(self, inputs): inputs -= K.mean(inputs, axis=1, keepdims=True) inputs = K.l2_normalize(inputs, axis=1) pos = K.relu(inputs) neg = K.relu(-inputs) return K.concatenate([pos, neg], axis=1) # global parameters
Example #23
Source File: baseline_aug.py From medical_image_segmentation with MIT License | 5 votes |
def custom_activation(x): return K.relu(x, alpha=0.0, max_value=1)
Example #24
Source File: antirectifier.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def call(self, inputs): inputs -= K.mean(inputs, axis=1, keepdims=True) inputs = K.l2_normalize(inputs, axis=1) pos = K.relu(inputs) neg = K.relu(-inputs) return K.concatenate([pos, neg], axis=1) # global parameters
Example #25
Source File: antirectifier.py From DeepLearning_Wavelet-LSTM with MIT License | 5 votes |
def call(self, inputs): inputs -= K.mean(inputs, axis=1, keepdims=True) inputs = K.l2_normalize(inputs, axis=1) pos = K.relu(inputs) neg = K.relu(-inputs) return K.concatenate([pos, neg], axis=1) # global parameters
Example #26
Source File: focal_aug.py From medical_image_segmentation with MIT License | 5 votes |
def custom_activation(x): return K.relu(x, alpha=0.0, max_value=1)
Example #27
Source File: mobilenet_base.py From MobileNetV3 with MIT License | 5 votes |
def _hard_swish(self, x): """Hard swish """ return x * K.relu(x + 3.0, max_value=6.0) / 6.0
Example #28
Source File: baseline_aug_vgg.py From medical_image_segmentation with MIT License | 5 votes |
def custom_activation(x): return K.relu(x, alpha=0.0, max_value=1)
Example #29
Source File: se_mobilenets.py From TF.Keras-Commonly-used-models with Apache License 2.0 | 5 votes |
def relu6(x): return K.relu(x, max_value=6)
Example #30
Source File: baseline.py From medical_image_segmentation with MIT License | 5 votes |
def custom_activation(x): return K.relu(x, alpha=0.0, max_value=1)