Python keras.backend.square() Examples

The following are 30 code examples for showing how to use keras.backend.square(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.backend , or try the search function .

Example 1
Project: deep-learning-note   Author: wdxtub   File: 7_visualize_filters.py    License: MIT License 6 votes vote down vote up
def generate_pattern(layer_name, filter_index, size=150):
    # 过滤器可视化函数
    layer_output = model.get_layer(layer_name).output
    loss = K.mean(layer_output[:, :, :, filter_index])
    grads = K.gradients(loss, model.input)[0]
    grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
    iterate = K.function([model.input], [loss, grads])
    input_img_data = np.random.random((1, size, size, 3)) * 20 + 128.
    
    step = 1
    for _ in range(40):
        loss_value, grads_value = iterate([input_img_data])
        input_img_data += grads_value * step
    
    img = input_img_data[0]
    return deprocess_image(img) 
Example 2
Project: Keras-GAN   Author: eriklindernoren   File: wgan_gp.py    License: MIT License 6 votes vote down vote up
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
        """
        Computes gradient penalty based on prediction and weighted real / fake samples
        """
        gradients = K.gradients(y_pred, averaged_samples)[0]
        # compute the euclidean norm by squaring ...
        gradients_sqr = K.square(gradients)
        #   ... summing over the rows ...
        gradients_sqr_sum = K.sum(gradients_sqr,
                                  axis=np.arange(1, len(gradients_sqr.shape)))
        #   ... and sqrt
        gradient_l2_norm = K.sqrt(gradients_sqr_sum)
        # compute lambda * (1 - ||grad||)^2 still for each single sample
        gradient_penalty = K.square(1 - gradient_l2_norm)
        # return the mean as loss over all the batch samples
        return K.mean(gradient_penalty) 
Example 3
Project: speech_separation   Author: bill9800   File: model_loss.py    License: MIT License 6 votes vote down vote up
def audio_discriminate_loss2(gamma=0.1,beta = 2*0.1,num_speaker=2):
    def loss_func(S_true,S_pred,gamma=gamma,beta=beta,num_speaker=num_speaker):
        sum_mtr = K.zeros_like(S_true[:,:,:,:,0])
        for i in range(num_speaker):
            sum_mtr += K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,i])
            for j in range(num_speaker):
                if i != j:
                    sum_mtr -= gamma*(K.square(S_true[:,:,:,:,i]-S_pred[:,:,:,:,j]))

        for i in range(num_speaker):
            for j in range(i+1,num_speaker):
                #sum_mtr -= beta*K.square(S_pred[:,:,:,i]-S_pred[:,:,:,j])
                #sum_mtr += beta*K.square(S_true[:,:,:,:,i]-S_true[:,:,:,:,j])
                pass
        #sum = K.sum(K.maximum(K.flatten(sum_mtr),0))

        loss = K.mean(K.flatten(sum_mtr))

        return loss
    return loss_func 
Example 4
Project: reinforcement-learning-kr   Author: rlcode   File: breakout_dqn.py    License: MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None,), dtype='int32')
        y = K.placeholder(shape=(None,), dtype='float32')

        prediction = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(prediction * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # 상태가 입력, 큐함수가 출력인 인공신경망 생성 
Example 5
Project: reinforcement-learning   Author: rlcode   File: breakout_ddqn.py    License: MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None, ), dtype='int32')
        y = K.placeholder(shape=(None, ), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network 
Example 6
Project: reinforcement-learning   Author: rlcode   File: breakout_dqn.py    License: MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None,), dtype='int32')
        y = K.placeholder(shape=(None,), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network 
Example 7
Project: reinforcement-learning   Author: rlcode   File: breakout_dueling_ddqn.py    License: MIT License 6 votes vote down vote up
def optimizer(self):
        a = K.placeholder(shape=(None, ), dtype='int32')
        y = K.placeholder(shape=(None, ), dtype='float32')

        py_x = self.model.output

        a_one_hot = K.one_hot(a, self.action_size)
        q_value = K.sum(py_x * a_one_hot, axis=1)
        error = K.abs(y - q_value)

        quadratic_part = K.clip(error, 0.0, 1.0)
        linear_part = error - quadratic_part
        loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)

        optimizer = RMSprop(lr=0.00025, epsilon=0.01)
        updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
        train = K.function([self.model.input, a, y], [loss], updates=updates)

        return train

    # approximate Q function using Convolution Neural Network
    # state is input and Q Value of each action is output of network
    # dueling network's Q Value is sum of advantages and state value 
Example 8
Project: convnets-keras   Author: heuritech   File: customlayers.py    License: MIT License 6 votes vote down vote up
def crosschannelnormalization(alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """

    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0, 2, 3, 1))
                                              , (0, half))
        extra_channels = K.permute_dimensions(extra_channels, (0, 3, 1, 2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:, i:i + ch, :, :]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape: input_shape, **kwargs) 
Example 9
Project: deep-mil-for-whole-mammogram-classification   Author: wentaozhu   File: customlayers.py    License: MIT License 6 votes vote down vote up
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """
    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
                                              , (0,half))
        extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:,i:i+ch,:,:]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs) 
Example 10
Project: deep-mil-for-whole-mammogram-classification   Author: wentaozhu   File: customlayers.py    License: MIT License 6 votes vote down vote up
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
    """
    This is the function used for cross channel normalization in the original
    Alexnet
    """
    def f(X):
        b, ch, r, c = X.shape
        half = n // 2
        square = K.square(X)
        extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
                                              , (0,half))
        extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
        scale = k
        for i in range(n):
            scale += alpha * extra_channels[:,i:i+ch,:,:]
        scale = scale ** beta
        return X / scale

    return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs) 
Example 11
Project: facies_net   Author: crild   File: feature_vis.py    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
def smoothing(im, mode = None):
    # utility function to smooth an image
    if mode is None:
        return im
    elif mode == 'L2':
        # L2 norm
        return im / (np.sqrt(np.mean(np.square(im))) + K.epsilon())
    elif mode == 'GaussianBlur':
        # Gaussian Blurring with width of 3
        return filters.gaussian_filter(im,1/8)
    elif mode == 'Decay':
        # Decay regularization
        decay = 0.98
        return decay * im
    elif mode == 'Clip_weak':
        # Clip weak pixel regularization
        percentile = 1
        threshold = np.percentile(np.abs(im),percentile)
        im[np.where(np.abs(im) < threshold)] = 0
        return im
    else:
        # print error message
        print('Unknown smoothing parameter. No smoothing implemented.')
        return im 
Example 12
Project: WannaPark   Author: dalmia   File: custom.py    License: GNU General Public License v3.0 6 votes vote down vote up
def call(self, x, mask=None):
        if K.image_dim_ordering == "th":
            _, f, r, c = self.shape
        else:
            _, r, c, f = self.shape
        squared = K.square(x)
        pooled = K.pool2d(squared, (self.n, self.n), strides=(1, 1),
            padding="same", pool_mode="avg")
        if K.image_dim_ordering == "th":
            summed = K.sum(pooled, axis=1, keepdims=True)
            averaged = self.alpha * K.repeat_elements(summed, f, axis=1)
        else:
            summed = K.sum(pooled, axis=3, keepdims=True)
            averaged = self.alpha * K.repeat_elements(summed, f, axis=3)
        denom = K.pow(self.k + averaged, self.beta)
        return x / denom 
Example 13
Project: weightnorm   Author: openai   File: weightnorm.py    License: MIT License 6 votes vote down vote up
def get_weightnorm_params_and_grads(p, g):
    ps = K.get_variable_shape(p)

    # construct weight scaler: V_scaler = g/||V||
    V_scaler_shape = (ps[-1],)  # assumes we're using tensorflow!
    V_scaler = K.ones(V_scaler_shape)  # init to ones, so effective parameters don't change

    # get V parameters = ||V||/g * W
    norm_axes = [i for i in range(len(ps) - 1)]
    V = p / tf.reshape(V_scaler, [1] * len(norm_axes) + [-1])

    # split V_scaler into ||V|| and g parameters
    V_norm = tf.sqrt(tf.reduce_sum(tf.square(V), norm_axes))
    g_param = V_scaler * V_norm

    # get grad in V,g parameters
    grad_g = tf.reduce_sum(g * V, norm_axes) / V_norm
    grad_V = tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) * \
             (g - tf.reshape(grad_g / V_norm, [1] * len(norm_axes) + [-1]) * V)

    return V, V_norm, V_scaler, g_param, grad_g, grad_V 
Example 14
Project: deep-learning-note   Author: wdxtub   File: 3_nerual_style_transfer.py    License: MIT License 5 votes vote down vote up
def content_loss(base, combination):
    return K.sum(K.square(combination - base)) 
Example 15
Project: deep-learning-note   Author: wdxtub   File: 3_nerual_style_transfer.py    License: MIT License 5 votes vote down vote up
def style_loss(style, combination):
    S = gram_matrix(style)
    C = gram_matrix(combination)
    channels = 3
    size = img_height * img_width
    return K.sum(K.square(S - C)) / ( 4. * (channels ** 2) * (size ** 2)) 
Example 16
Project: deep-learning-note   Author: wdxtub   File: 3_nerual_style_transfer.py    License: MIT License 5 votes vote down vote up
def total_variation_loss(x):
    a = K.square(
        x[:, :img_height-1, :img_width-1, :] -
        x[:, 1:, :img_width-1, :])
    b = K.square(
        x[:, :img_height-1, :img_width-1, :] -
        x[:, :img_height-1, 1:, :])
    return K.sum(K.pow(a+b, 1.25)) 
Example 17
Project: CapsNet   Author: l11x0m7   File: capsule.py    License: MIT License 5 votes vote down vote up
def margin_loss(y, pred):
    """
    For the first part of loss(classification loss)
    """
    return K.mean(K.sum(y * K.square(K.maximum(0.9 - pred, 0)) + \
        0.5 *  K.square((1 - y) * K.maximum(pred - 0.1, 0)), axis=1)) 
Example 18
Project: CapsNet   Author: l11x0m7   File: capsule.py    License: MIT License 5 votes vote down vote up
def squash(s, axis=-1):
    """
    Squash function. This could be viewed as one kind of activations.
    """
    squared_s = K.sum(K.square(s), axis=axis, keepdims=True)
    scale = squared_s / (1 + squared_s) / K.sqrt(squared_s + K.epsilon())
    return scale * s 
Example 19
Project: CapsNet   Author: l11x0m7   File: capsule.py    License: MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        return K.sqrt(K.sum(K.square(inputs), axis=-1)) 
Example 20
Project: CapsNet   Author: l11x0m7   File: capsule.py    License: MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        # inputs -> (X, y), then output the mask of y
        # inputs -> X, then output the mask of prediction
        if type(inputs) is list or tuple:
            inputs, mask = inputs
        else:
            pred = K.sqrt(K.sum(K.square(inputs), axis=-1) + K.epsilon())
            mask = K.one_hot(indices=K.argmax(pred, 1), num_classes=pred.get_shape().as_list()[1])
        return K.batch_flatten(inputs * K.expand_dims(mask, axis=-1)) 
Example 21
Project: 3DGCN   Author: blackmints   File: loss.py    License: MIT License 5 votes vote down vote up
def std_rmse(std=1):
    def rmse(y_true, y_pred):
        return K.sqrt(K.mean(K.square((y_pred - y_true)))) * std

    return rmse 
Example 22
Project: 3DGCN   Author: blackmints   File: loss.py    License: MIT License 5 votes vote down vote up
def std_r2(std=1):
    def r2(y_true, y_pred):
        ss_res = K.sum(K.square((y_true - y_pred) * std))
        ss_tot = K.sum(K.square((y_true - K.mean(y_true) * std)))
        return 1 - ss_res / (ss_tot + K.epsilon())

    return r2 
Example 23
Project: CalibrationNN   Author: Andres-Hernandez   File: neural_network.py    License: GNU General Public License v3.0 5 votes vote down vote up
def logarithmic_mean_squared_error(y_true, y_pred):
    return -K.mean(K.log(1.-K.clip(K.square(y_pred-y_true),0., 1.-K.epsilon())))

#_paper 
Example 24
Project: CalibrationNN   Author: Andres-Hernandez   File: neural_network.py    License: GNU General Public License v3.0 5 votes vote down vote up
def rbf(x):
    #This is not really a radial basis function, but it is similar
    return K.exp(-K.square(x)) 
Example 25
Project: speech_separation   Author: bill9800   File: model_loss.py    License: MIT License 5 votes vote down vote up
def audio_discriminate_loss(gamma=0.1,num_speaker=2):
    def loss_func(S_true,S_pred,gamma=gamma,num_speaker=num_speaker):
        sum = 0
        for i in range(num_speaker):
            sum += K.sum(K.flatten((K.square(S_true[:,:,:,i]-S_pred[:,:,:,i]))))
            for j in range(num_speaker):
                if i != j:
                    sum -= gamma*K.sum(K.flatten((K.square(S_true[:,:,:,i]-S_pred[:,:,:,j]))))

        loss = sum / (num_speaker*298*257*2)
        return loss
    return loss_func 
Example 26
Project: Keras-BiGAN   Author: manicman1999   File: bigan.py    License: MIT License 5 votes vote down vote up
def gradient_penalty_loss(y_true, y_pred, averaged_samples, weight):
    gradients = K.gradients(y_pred, averaged_samples)[0]
    gradients_sqr = K.square(gradients)
    gradient_penalty = K.sum(gradients_sqr,
                              axis=np.arange(1, len(gradients_sqr.shape)))

    # (weight / 2) * ||grad||^2
    # Penalize the gradient norm
    return K.mean(gradient_penalty) * (weight / 2) 
Example 27
Project: fancy-cnn   Author: textclf   File: embeddings.py    License: MIT License 5 votes vote down vote up
def __call__(self, p):
        if self.skip:
            return self.s * (p / K.clip(K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True)), 0.5, 100))
        return self.s * (p / K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True))) 
Example 28
Project: fancy-cnn   Author: textclf   File: embeddings.py    License: MIT License 5 votes vote down vote up
def __call__(self, p):
        if self.skip:
            return self.s * (p / K.clip(K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True)), 0.5, 100))
        return self.s * (p / K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True))) 
Example 29
Project: deepchem   Author: deepchem   File: model.py    License: MIT License 5 votes vote down vote up
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std=0.01):
    h = Convolution1D(9, 9, activation='relu', name='conv_1')(x)
    h = Convolution1D(9, 9, activation='relu', name='conv_2')(h)
    h = Convolution1D(10, 11, activation='relu', name='conv_3')(h)
    h = Flatten(name='flatten_1')(h)
    h = Dense(435, activation='relu', name='dense_1')(h)

    def sampling(args):
      z_mean_, z_log_var_ = args
      batch_size = K.shape(z_mean_)[0]
      epsilon = K.random_normal(
          shape=(batch_size, latent_rep_size), mean=0., std=epsilon_std)
      return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

    z_mean = Dense(latent_rep_size, name='z_mean', activation='linear')(h)
    z_log_var = Dense(latent_rep_size, name='z_log_var', activation='linear')(h)

    def vae_loss(x, x_decoded_mean):
      x = K.flatten(x)
      x_decoded_mean = K.flatten(x_decoded_mean)
      xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
      kl_loss = -0.5 * K.mean(
          1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
      return xent_loss + kl_loss

    return (vae_loss, Lambda(
        sampling, output_shape=(latent_rep_size,),
        name='lambda')([z_mean, z_log_var])) 
Example 30
Project: Pix2Pose   Author: kirumang   File: ae_model.py    License: MIT License 5 votes vote down vote up
def call(self,x):
        y_pred=x[0]
        y_recont_gt=x[1] 
        y_prob_pred=tf.squeeze(x[2],axis=3) 
        y_prob_gt=x[3]
        visible = tf.cast(y_prob_gt > 0.5,y_pred.dtype)
        visible = tf.squeeze(visible,axis=3) 
        #generate transformed values using sym
        if(len(self.sym)>1):
            #if(True):
            for sym_id,transform in enumerate(self.sym): #3x3 matrix
                tf_mat=tf.convert_to_tensor(transform,y_recont_gt.dtype)
                y_gt_transformed = tf.transpose(tf.matmul(tf_mat,tf.transpose(tf.reshape(y_recont_gt,[-1,3]))))
                y_gt_transformed = tf.reshape(y_gt_transformed,[-1,128,128,3])
                loss_xyz_temp = K.sum(K.abs(y_gt_transformed-y_pred),axis=3)/3
                loss_sum=K.sum(loss_xyz_temp,axis=[1,2])
                if(sym_id>0):
                    loss_sums = tf.concat([loss_sums,tf.expand_dims(loss_sum,axis=0)],axis=0)
                    loss_xyzs=  tf.concat([loss_xyzs,tf.expand_dims(loss_xyz_temp,axis=0)],axis=0)
                else:
                    loss_sums = tf.expand_dims(loss_sum,axis=0) 
                    loss_xyzs = tf.expand_dims(loss_xyz_temp,axis=0)
            
            min_values = tf.reduce_min(loss_sums,axis=0,keepdims=True) 
            loss_switch = tf.cast(tf.equal(loss_sums,min_values),y_pred.dtype)
            loss_xyz = tf.expand_dims(tf.expand_dims(loss_switch,axis=2),axis=3)*loss_xyzs
            loss_xyz = K.sum(loss_xyz,axis=0) 
        else:
            loss_xyz = K.sum(K.abs(y_recont_gt-y_pred),axis=3)/3
        prob_loss = K.square(y_prob_pred-K.minimum(loss_xyz,1)) 
        loss_invisible = (1-visible)*loss_xyz
        loss_visible = visible*loss_xyz
        loss = loss_visible*3 + loss_invisible+ 0.5*prob_loss 
        loss = K.mean(loss,axis=[1,2])
        return loss