Python keras.backend.square() Examples

The following are 30 code examples of keras.backend.square(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: 7_visualize_filters.py    From deep-learning-note with MIT License 8 votes vote down vote up
def generate_pattern(layer_name, filter_index, size=150):
    # 过滤器可视化函数
    layer_output = model.get_layer(layer_name).output
    loss = K.mean(layer_output[:, :, :, filter_index])
    grads = K.gradients(loss, model.input)[0]
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
    iterate = K.function([model.input], [loss, grads])
    input_img_data = np.random.random((1, size, size, 3)) * 20 + 128.
    
    step = 1
    for _ in range(40):
        loss_value, grads_value = iterate([input_img_data])
        input_img_data += grads_value * step
    
    img = input_img_data[0]
    return deprocess_image(img) 
Example #2
Source File: feature_vis.py    From facies_net with GNU Lesser General Public License v3.0 6 votes vote down vote up
def smoothing(im, mode = None):
    # utility function to smooth an image
    if mode is None:
        return im
    elif mode == 'L2':
        # L2 norm
        return im / (np.sqrt(np.mean(np.square(im))) + K.epsilon())
    elif mode == 'GaussianBlur':
        # Gaussian Blurring with width of 3
        return filters.gaussian_filter(im,1/8)
    elif mode == 'Decay':
        # Decay regularization
        decay = 0.98
        return decay * im
    elif mode == 'Clip_weak':
        # Clip weak pixel regularization
        percentile = 1
        threshold = np.percentile(np.abs(im),percentile)
        im[np.where(np.abs(im) < threshold)] = 0
        return im
    else:
        # print error message
        print('Unknown smoothing parameter. No smoothing implemented.')
        return im 
Example #3
Source File: breakout_dqn.py    From reinforcement-learning with MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None,), dtype='int32')
        y = K.placeholder(shape=(None,), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network 
Example #4
Source File: breakout_dueling_ddqn.py    From reinforcement-learning with MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None, ), dtype='int32')
        y = K.placeholder(shape=(None, ), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network
    # dueling network's Q Value is sum of advantages and state value 
Example #5
Source File: model_loss.py    From speech_separation with MIT License 6 votes vote down vote up
def audio_discriminate_loss2(gamma=0.1,beta = 2*0.1,num_speaker=2):
    def loss_func(S_true,S_pred,gamma=gamma,beta=beta,num_speaker=num_speaker):
        sum_mtr = K.zeros_like(S_true[:,:,:,:,0])
        for i in range(num_speaker):
            sum_mtr += K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,i])
            for j in range(num_speaker):
                if i != j:
                    sum_mtr -= gamma*(K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,j]))

        for i in range(num_speaker):
            for j in range(i+1,num_speaker):
                #sum_mtr -= beta*K.square(S_pred[:,:,:,i]-S_pred[:,:,:,j])
                #sum_mtr += beta*K.square(S_true[:,:,:,:,i]-S_true[:,:,:,:,j])
                pass
        #sum = K.sum(K.maximum(K.flatten(sum_mtr),0))

        loss = K.mean(K.flatten(sum_mtr))

        return loss
    return loss_func 
Example #6
Source File: custom.py    From WannaPark with GNU General Public License v3.0 6 votes vote down vote up
def call(self, x, mask=None):
        if K.image_dim_ordering == "th":
            _, f, r, c = self.shape
        else:
            _, r, c, f = self.shape
        squared = K.square(x)
        pooled = K.pool2d(squared, (self.n, self.n), strides=(1, 1),
            padding="same", pool_mode="avg")
        if K.image_dim_ordering == "th":
            summed = K.sum(pooled, axis=1, keepdims=True)
            averaged = self.alpha * K.repeat_elements(summed, f, axis=1)
        else:
            summed = K.sum(pooled, axis=3, keepdims=True)
            averaged = self.alpha * K.repeat_elements(summed, f, axis=3)
        denom = K.pow(self.k + averaged, self.beta)
        return x / denom 
Example #7
Source File: weightnorm.py    From weightnorm with MIT License 6 votes vote down vote up
def get_weightnorm_params_and_grads(p, g):
    ps = K.get_variable_shape(p)

    # construct weight scaler: V_scaler = g/||V||
    V_scaler_shape = (ps[-1],)  # assumes we're using tensorflow!
    V_scaler = K.ones(V_scaler_shape)  # init to ones, so effective parameters don't change

    # get V parameters = ||V||/g * W
    norm_axes = [i for i in range(len(ps) - 1)]
    V = p / tf.reshape(V_scaler, [1] * len(norm_axes) + [-1])

    # split V_scaler into ||V|| and g parameters
    V_norm = tf.sqrt(tf.reduce_sum(tf.square(V), norm_axes))
    g_param = V_scaler * V_norm

    # get grad in V,g parameters
    grad_g = tf.reduce_sum(g * V, norm_axes) / V_norm
    grad_V = tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) * \
             (g - tf.reshape(grad_g / V_norm, [1] * len(norm_axes) + [-1]) * V)

    return V, V_norm, V_scaler, g_param, grad_g, grad_V 
Example #8
Source File: customlayers.py    From deep-mil-for-whole-mammogram-classification with MIT License 6 votes vote down vote up
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """
    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
                                              , (0,half))
        extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:,i:i+ch,:,:]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs) 
Example #9
Source File: wgan_gp.py    From Keras-GAN with MIT License 6 votes vote down vote up
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
        """
        Computes gradient penalty based on prediction and weighted real / fake samples
        """
        gradients = K.gradients(y_pred, averaged_samples)[0]
        # compute the euclidean norm by squaring ...
        gradients_sqr = K.square(gradients)
        #   ... summing over the rows ...
        gradients_sqr_sum = K.sum(gradients_sqr,
                                  axis=np.arange(1, len(gradients_sqr.shape)))
        #   ... and sqrt
        gradient_l2_norm = K.sqrt(gradients_sqr_sum)
        # compute lambda * (1 - ||grad||)^2 still for each single sample
        gradient_penalty = K.square(1 - gradient_l2_norm)
        # return the mean as loss over all the batch samples
        return K.mean(gradient_penalty) 
Example #10
Source File: customlayers.py    From convnets-keras with MIT License 6 votes vote down vote up
def crosschannelnormalization(alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """

    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0, 2, 3, 1))
                                              , (0, half))
        extra_channels = K.permute_dimensions(extra_channels, (0, 3, 1, 2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:, i:i + ch, :, :]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape: input_shape, **kwargs) 
Example #11
Source File: customlayers.py    From deep-mil-for-whole-mammogram-classification with MIT License 6 votes vote down vote up
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """
    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
                                              , (0,half))
        extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:,i:i+ch,:,:]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs) 
Example #12
Source File: customlayers.py    From deep-mil-for-whole-mammogram-classification with MIT License 5 votes vote down vote up
def __call__(self, loss):
        #if self.layer is None:
        #    raise Exception('Need to call `set_layer` on '
        #                    'ActivityRegularizer instance '
        #                    'before calling the instance.')
        regularized_loss = loss
        for i in range(len(self.layer.inbound_nodes)):
            output = self.layer.get_output_at(i)
            if self.l1:
                regularized_loss += K.sum(self.l1 * K.abs(output[:,:,:,1]))
            if self.l2:
                regularized_loss += K.sum(self.l2 * K.square(output[:,:,:,1]))
        return K.in_train_phase(regularized_loss, loss) 
Example #13
Source File: capsulelayers.py    From Multi-level-DCNet with GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs, **kwargs):
        return K.sqrt(K.sum(K.square(inputs), -1)) 
Example #14
Source File: tf_normal_sampler.py    From social_lstm_keras_tf with GNU General Public License v3.0 5 votes vote down vote up
def _to_normal2d(output_batch) -> ds.MultivariateNormalTriL:
    """
    :param output_batch: (n_samples, 5)
    :return
    """

    # mean of x and y
    x_mean = Lambda(lambda o: o[:, 0])(output_batch)
    y_mean = Lambda(lambda o: o[:, 1])(output_batch)

    # std of x and y
    # std is must be 0 or positive
    x_std = Lambda(lambda o: K.exp(o[:, 2]))(output_batch)
    y_std = Lambda(lambda o: K.exp(o[:, 3]))(output_batch)

    # correlation coefficient
    # correlation coefficient range is [-1, 1]
    cor = Lambda(lambda o: K.tanh(o[:, 4]))(output_batch)

    loc = Concatenate()([
        Lambda(lambda x_mean: K.expand_dims(x_mean, 1))(x_mean),
        Lambda(lambda y_mean: K.expand_dims(y_mean, 1))(y_mean)
    ])

    x_var = Lambda(lambda x_std: K.square(x_std))(x_std)
    y_var = Lambda(lambda y_std: K.square(y_std))(y_std)
    xy_cor = Multiply()([x_std, y_std, cor])

    cov = Lambda(lambda inputs: K.stack(inputs, axis=0))(
        [x_var, xy_cor, xy_cor, y_var])
    cov = Lambda(lambda cov: K.permute_dimensions(cov, (1, 0)))(cov)
    cov = Reshape((2, 2))(cov)

    scale_tril = Lambda(lambda cov: tf.cholesky(cov))(cov)
    mvn = ds.MultivariateNormalTriL(loc, scale_tril)

    return mvn 
Example #15
Source File: StarGAN.py    From StarGAN-Keras with MIT License 5 votes vote down vote up
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
        """
        Computes gradient penalty based on prediction and weighted real / fake samples
        """
        gradients = K.gradients(y_pred, averaged_samples)[0]
        # compute the euclidean norm by squaring ...
        gradients_sqr = K.square(gradients)
        #   ... summing over the rows ...
        gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape)))
        #   ... and sqrt
        gradient_l2_norm = K.sqrt(gradients_sqr_sum)
        # compute lambda * (1 - ||grad||)^2 still for each single sample
        gradient_penalty = K.square(1 - gradient_l2_norm)
        # return the mean as loss over all the batch samples
        return K.mean(gradient_penalty) 
Example #16
Source File: adamaccum.py    From RecurrentGaze with MIT License 5 votes vote down vote up
def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [(self.iterations, self.iterations + 1)]

        t = self.iterations + 1
        lr_t = self.lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))

        ms = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
        vs = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
        gs = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
        self.weights = ms + vs

        for p, g, m, v, gg in zip(params, grads, ms, vs, gs):

            flag = K.equal(self.iterations % self.accum_iters, 0)
            flag = K.cast(flag, dtype='float32')

            gg_t = (1 - flag) * (gg + g)
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * (gg + flag * g) / self.accum_iters
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square((gg + flag * g) / self.accum_iters)
            p_t = p - flag * lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

            self.updates.append((m, flag * m_t + (1 - flag) * m))
            self.updates.append((v, flag * v_t + (1 - flag) * v))
            self.updates.append((gg, gg_t))

            new_p = p_t
            # apply constraints
            if getattr(p, 'constraint', None) is not None:
                c = p.constraints(new_p)
                new_p = c(new_p)
            self.updates.append((p, new_p))
        return self.updates 
Example #17
Source File: training.py    From neural-style-keras with MIT License 5 votes vote down vote up
def tv_loss(x):
    '''
    Total variation loss is used to keep the image locally coherent
    '''
    assert K.ndim(x) == 4
    a = K.square(x[:, :-1, :-1, :] - x[:, 1:, :-1, :])
    b = K.square(x[:, :-1, :-1, :] - x[:, :-1, 1:, :])
    return K.sum(a + b, axis=(1, 2, 3)) 
Example #18
Source File: training.py    From neural-style-keras with MIT License 5 votes vote down vote up
def style_loss(x, target, norm_by_channels=False):
    '''
    Style loss is the MSE between Gram matrices computed using activation maps.
    '''
    x_gram = gram_matrix(x, norm_by_channels=norm_by_channels)
    return K.mean(K.square(target - x_gram), axis=(1, 2)) 
Example #19
Source File: wgan_ops.py    From se_relativisticgan with MIT License 5 votes vote down vote up
def gradient_penalty_loss(y_true, y_pred, averaged_samples, gradient_penalty_weight):
    """
    This term is used for stabilizing the WGAN training.
    """
    gradients = K.gradients(y_pred, averaged_samples)[0]
    gradients_sqr = K.square(gradients)
    axes_for_sum = tuple(np.arange(1, len(gradients_sqr.shape)))
    gradients_sqr_sum = K.sum(gradients_sqr, axis=axes_for_sum)
    gradient_norm = K.sqrt(gradients_sqr_sum)
    gradient_penalty = gradient_penalty_weight * K.square(1 - gradient_norm)
    return K.mean(gradient_penalty) 
Example #20
Source File: mnist_3d_latent_space_and_generate.py    From VAE-for-Image-Generation with MIT License 5 votes vote down vote up
def vae_loss(self, x, x_decoded_mean):
        xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
        kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return K.mean(xent_loss + kl_loss) 
Example #21
Source File: caltech101_128_train.py    From VAE-for-Image-Generation with MIT License 5 votes vote down vote up
def vae_loss(self, x, x_decoded_mean_squash):
        x = K.flatten(x)
        x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
        xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
        kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return K.mean(xent_loss + kl_loss) 
Example #22
Source File: mnist_2d_latent_space_and_generate.py    From VAE-for-Image-Generation with MIT License 5 votes vote down vote up
def vae_loss(self, x, x_decoded_mean):
        xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
        kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return K.mean(xent_loss + kl_loss) 
Example #23
Source File: caltech101_92_generate.py    From VAE-for-Image-Generation with MIT License 5 votes vote down vote up
def vae_loss(self, x, x_decoded_mean_squash):
        x = K.flatten(x)
        x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
        xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
        kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return K.mean(xent_loss + kl_loss) 
Example #24
Source File: mnist_train.py    From VAE-for-Image-Generation with MIT License 5 votes vote down vote up
def vae_loss(self, x, x_decoded_mean):
        xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
        kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return K.mean(xent_loss + kl_loss) 
Example #25
Source File: cifar10_generate.py    From VAE-for-Image-Generation with MIT License 5 votes vote down vote up
def vae_loss(self, x, x_decoded_mean_squash):
        x = K.flatten(x)
        x_decoded_mean_squash = K.flatten(x_decoded_mean_squash)
        xent_loss = img_rows * img_cols * metrics.binary_crossentropy(x, x_decoded_mean_squash)
        kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return K.mean(xent_loss + kl_loss) 
Example #26
Source File: customlayers.py    From deep-mil-for-whole-mammogram-classification with MIT License 5 votes vote down vote up
def __call__(self, loss):
        #if self.layer is None:
        #    raise Exception('Need to call `set_layer` on '
        #                    'ActivityRegularizer instance '
        #                    'before calling the instance.')
        regularized_loss = loss
        for i in range(len(self.layer.inbound_nodes)):
            output = self.layer.get_output_at(i)
            if self.l1:
                regularized_loss += K.sum(self.l1 * K.abs(output[:,:,:,1]))
            if self.l2:
                regularized_loss += K.sum(self.l2 * K.square(output[:,:,:,1]))
        return K.in_train_phase(regularized_loss, loss) 
Example #27
Source File: test_gradAsc_MaxSoftmax.py    From Automatic-Modulation-Classification with GNU General Public License v3.0 5 votes vote down vote up
def build_backprop(model, loss):
    # Gradient of the input image with respect to the loss function
    gradients = K.gradients(loss, model.input)[0]
    # Normalize the gradients
    gradients /= (K.sqrt(K.mean(K.square(gradients))) + 1e-5)
    # Keras function to calculate the gradients and loss
    return K.function([model.input], [loss, gradients])


# Loss function that optimizes one class 
Example #28
Source File: test_gradAsc_MaxFilters.py    From Automatic-Modulation-Classification with GNU General Public License v3.0 5 votes vote down vote up
def build_backprop(model, loss):
    # Gradient of the input image with respect to the loss function
    gradients = K.gradients(loss, model.input)[0]
    # Normalize the gradients
    gradients /= (K.sqrt(K.mean(K.square(gradients))) + 1e-5)
    # Keras function to calculate the gradients and loss
    return K.function([model.input], [loss, gradients]) 
Example #29
Source File: test_gradAsc_MaxSoftmax.py    From Automatic-Modulation-Classification with GNU General Public License v3.0 5 votes vote down vote up
def build_backprop(model, loss):
    # Gradient of the input image with respect to the loss function
    gradients = K.gradients(loss, model.input)[0]
    # Normalize the gradients
    gradients /= (K.sqrt(K.mean(K.square(gradients))) + 1e-5)
    # Keras function to calculate the gradients and loss
    return K.function([model.input], [loss, gradients])


# Loss function that optimizes one class 
Example #30
Source File: test_gradAsc_MaxFilters.py    From Automatic-Modulation-Classification with GNU General Public License v3.0 5 votes vote down vote up
def build_backprop(model, loss):
    # Gradient of the input image with respect to the loss function
    gradients = K.gradients(loss, model.input)[0]
    # Normalize the gradients
    gradients /= (K.sqrt(K.mean(K.square(gradients))) + 1e-5)
    # Keras function to calculate the gradients and loss
    return K.function([model.input], [loss, gradients])