Python tensorflow.keras.backend.relu() Examples

The following are 10 code examples of tensorflow.keras.backend.relu(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.backend , or try the search function .
Example #1
Source File: stylegan_two.py    From StyleGAN2-Tensorflow-2.0 with MIT License 5 votes vote down vote up
def hinge_d(y_true, y_pred):
    return K.mean(K.relu(1.0 + (y_true * y_pred))) 
Example #2
Source File: se_mobilenets.py    From keras-squeeze-excite-network with MIT License 5 votes vote down vote up
def relu6(x):
    return K.relu(x, max_value=6) 
Example #3
Source File: utils.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def odd_shifted_relu(x, shift=-0.5, scale=2.0):
    """
    Odd shifted ReLu
    Essentially in x > 0, it is a shifted ReLu, and in x < 0 it's a negative mirror. 
    """

    shift = float(shift)
    scale = float(scale)
    return scale * K.relu(x - shift)  - scale * K.relu(- x - shift) 
Example #4
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def __call__(self, x):
    non_sign_bits = self.bits - (self.negative_slope != 0)
    m = K.cast_to_floatx(pow(2, non_sign_bits))
    m_i = K.cast_to_floatx(pow(2, self.integer))
    x_uq = tf.where(
        x <= m_i, K.relu(x, alpha=self.negative_slope), tf.ones_like(x) * m_i)

    if self.use_sigmoid:
      p = _sigmoid(x / m_i) * m
      xq = m_i * tf.keras.backend.clip(
          2.0 * (_round_through(p, self.use_stochastic_rounding) / m) - 1.0,
          0.0, 1.0 - 1.0 / m)
      if self.negative_slope > 0:
        neg_factor = 1 / (self.negative_slope * m)
        xq = xq + m_i * self.negative_slope * tf.keras.backend.clip(
            2.0 * (_round_through(p * self.negative_slope,
            self.use_stochastic_rounding) * neg_factor) - 1.0,
            -1.0, 0.0)
    else:
      p = x * m / m_i
      xq = m_i * tf.keras.backend.clip(
          _round_through(p, self.use_stochastic_rounding) / m, 0.0,
          1.0 - 1.0 / m)
      if self.negative_slope > 0:
        neg_factor = 1 / (self.negative_slope * m)
        xq = xq + m_i * self.negative_slope * (tf.keras.backend.clip(
            _round_through(p * self.negative_slope,
                           self.use_stochastic_rounding) * neg_factor, -1.0, 0.0))
    return x_uq + tf.stop_gradient(-x_uq + xq) 
Example #5
Source File: quantizers.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def __call__(self, x):
    if self.max_value is None:
      x = K.relu(x)
    else:
      x = tf.where(
          x <= self.max_value, K.relu(x), tf.ones_like(x) * self.max_value)

    x_clipped = _clip_power_of_two(x, self._min_exp, self._max_exp,
                                   self.max_value,
                                   self.quadratic_approximation,
                                   self.use_stochastic_rounding)
    return x + tf.stop_gradient(-x + pow(2.0, x_clipped)) 
Example #6
Source File: activations.py    From megnet with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def softplus2(x):
    """
    out = log(exp(x)+1) - log(2)
    softplus function that is 0 at x=0, the implementation aims at avoiding overflow

    Args:
        x: (Tensor) input tensor

    Returns:
         (Tensor) output tensor
    """
    return kb.relu(x) + kb.log(0.5*kb.exp(-kb.abs(x)) + 0.5) 
Example #7
Source File: mobilenet_base.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def _relu6(self, x):
        """Relu 6
        """
        return K.relu(x, max_value=6.0) 
Example #8
Source File: mobilenet_base.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def _hard_swish(self, x):
        """Hard swish
        """
        return x * K.relu(x + 3.0, max_value=6.0) / 6.0 
Example #9
Source File: mobilenet_base.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def _squeeze(self, inputs):
        """Squeeze and Excitation.
        This function defines a squeeze structure.
        # Arguments
            inputs: Tensor, input tensor of conv layer.
        """
        input_channels = int(inputs.shape[-1])

        x = GlobalAveragePooling2D()(inputs)
        x = Dense(input_channels, activation='relu')(x)
        x = Dense(input_channels, activation='hard_sigmoid')(x)
        x = Reshape((1, 1, input_channels))(x)

        return x 
Example #10
Source File: stylegan_two.py    From StyleGAN2-Tensorflow-2.0 with MIT License 4 votes vote down vote up
def train_step(self, images, style, noise, perform_gp = True, perform_pl = False):

        with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
            #Get style information
            w_space = []
            pl_lengths = self.pl_mean
            for i in range(len(style)):
                w_space.append(self.GAN.S(style[i]))

            #Generate images
            generated_images = self.GAN.G(w_space + [noise])

            #Discriminate
            real_output = self.GAN.D(images, training=True)
            fake_output = self.GAN.D(generated_images, training=True)

            #Hinge loss function
            gen_loss = K.mean(fake_output)
            divergence = K.mean(K.relu(1 + real_output) + K.relu(1 - fake_output))
            disc_loss = divergence

            if perform_gp:
                #R1 gradient penalty
                disc_loss += gradient_penalty(images, real_output, 10)

            if perform_pl:
                #Slightly adjust W space
                w_space_2 = []
                for i in range(len(style)):
                    std = 0.1 / (K.std(w_space[i], axis = 0, keepdims = True) + 1e-8)
                    w_space_2.append(w_space[i] + K.random_normal(tf.shape(w_space[i])) / (std + 1e-8))

                #Generate from slightly adjusted W space
                pl_images = self.GAN.G(w_space_2 + [noise])

                #Get distance after adjustment (path length)
                delta_g = K.mean(K.square(pl_images - generated_images), axis = [1, 2, 3])
                pl_lengths = delta_g

                if self.pl_mean > 0:
                    gen_loss += K.mean(K.square(pl_lengths - self.pl_mean))

        #Get gradients for respective areas
        gradients_of_generator = gen_tape.gradient(gen_loss, self.GAN.GM.trainable_variables)
        gradients_of_discriminator = disc_tape.gradient(disc_loss, self.GAN.D.trainable_variables)

        #Apply gradients
        self.GAN.GMO.apply_gradients(zip(gradients_of_generator, self.GAN.GM.trainable_variables))
        self.GAN.DMO.apply_gradients(zip(gradients_of_discriminator, self.GAN.D.trainable_variables))

        return disc_loss, gen_loss, divergence, pl_lengths