Python keras.backend.sqrt() Examples
The following are 30 code examples for showing how to use keras.backend.sqrt(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
keras.backend
, or try the search function
.
Example 1
Project: deep-learning-note Author: wdxtub File: 7_visualize_filters.py License: MIT License | 6 votes |
def generate_pattern(layer_name, filter_index, size=150): # 过滤器可视化函数 layer_output = model.get_layer(layer_name).output loss = K.mean(layer_output[:, :, :, filter_index]) grads = K.gradients(loss, model.input)[0] grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5) iterate = K.function([model.input], [loss, grads]) input_img_data = np.random.random((1, size, size, 3)) * 20 + 128. step = 1 for _ in range(40): loss_value, grads_value = iterate([input_img_data]) input_img_data += grads_value * step img = input_img_data[0] return deprocess_image(img)
Example 2
Project: Keras-GAN Author: eriklindernoren File: wgan_gp.py License: MIT License | 6 votes |
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples): """ Computes gradient penalty based on prediction and weighted real / fake samples """ gradients = K.gradients(y_pred, averaged_samples)[0] # compute the euclidean norm by squaring ... gradients_sqr = K.square(gradients) # ... summing over the rows ... gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape))) # ... and sqrt gradient_l2_norm = K.sqrt(gradients_sqr_sum) # compute lambda * (1 - ||grad||)^2 still for each single sample gradient_penalty = K.square(1 - gradient_l2_norm) # return the mean as loss over all the batch samples return K.mean(gradient_penalty)
Example 3
Project: facies_net Author: crild File: feature_vis.py License: GNU Lesser General Public License v3.0 | 6 votes |
def smoothing(im, mode = None): # utility function to smooth an image if mode is None: return im elif mode == 'L2': # L2 norm return im / (np.sqrt(np.mean(np.square(im))) + K.epsilon()) elif mode == 'GaussianBlur': # Gaussian Blurring with width of 3 return filters.gaussian_filter(im,1/8) elif mode == 'Decay': # Decay regularization decay = 0.98 return decay * im elif mode == 'Clip_weak': # Clip weak pixel regularization percentile = 1 threshold = np.percentile(np.abs(im),percentile) im[np.where(np.abs(im) < threshold)] = 0 return im else: # print error message print('Unknown smoothing parameter. No smoothing implemented.') return im
Example 4
Project: weightnorm Author: openai File: weightnorm.py License: MIT License | 6 votes |
def get_weightnorm_params_and_grads(p, g): ps = K.get_variable_shape(p) # construct weight scaler: V_scaler = g/||V|| V_scaler_shape = (ps[-1],) # assumes we're using tensorflow! V_scaler = K.ones(V_scaler_shape) # init to ones, so effective parameters don't change # get V parameters = ||V||/g * W norm_axes = [i for i in range(len(ps) - 1)] V = p / tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) # split V_scaler into ||V|| and g parameters V_norm = tf.sqrt(tf.reduce_sum(tf.square(V), norm_axes)) g_param = V_scaler * V_norm # get grad in V,g parameters grad_g = tf.reduce_sum(g * V, norm_axes) / V_norm grad_V = tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) * \ (g - tf.reshape(grad_g / V_norm, [1] * len(norm_axes) + [-1]) * V) return V, V_norm, V_scaler, g_param, grad_g, grad_V
Example 5
Project: Multi-level-DCNet Author: ssrp File: capsulelayers.py License: GNU General Public License v3.0 | 6 votes |
def call(self, inputs, **kwargs): if type(inputs) is list: # true label is provided with shape = [None, n_classes], i.e. one-hot code. assert len(inputs) == 2 inputs, mask = inputs else: # if no true label, mask by the max length of capsules. Mainly used for prediction # compute lengths of capsules x = K.sqrt(K.sum(K.square(inputs), -1)) # generate the mask which is a one-hot code. # mask.shape=[None, n_classes]=[None, num_capsule] mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1]) # inputs.shape=[None, num_capsule, dim_capsule] # mask.shape=[None, num_capsule] # masked.shape=[None, num_capsule * dim_capsule] masked = K.batch_flatten(inputs * K.expand_dims(mask, -1)) return masked
Example 6
Project: keras-lookahead Author: CyberZHG File: optimizers.py License: MIT License | 6 votes |
def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] t = K.cast(self.iterations, K.floatx()) + 1 lr_t = self.learning_rate * (K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))) ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params] self.weights = [self.iterations] + ms + vs for p, g, m, v in zip(params, grads, ms, vs): m_t = (self.beta_1 * m) + (1. - self.beta_1) * g v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g) p_t = lr_t * m_t / (K.sqrt(v_t) + self.epsilon) self.updates.append(K.update(m, m_t)) self.updates.append(K.update(v, v_t)) self.updates.append(K.update_sub(p, p_t)) return self.updates
Example 7
Project: voxelmorph Author: voxelmorph File: metrics.py License: GNU General Public License v3.0 | 6 votes |
def loss(self, y_true, y_pred): # get the value for the true and fake images disc_true = self.disc(y_true) disc_pred = self.disc(y_pred) # sample a x_hat by sampling along the line between true and pred # z = tf.placeholder(tf.float32, shape=[None, 1]) # shp = y_true.get_shape()[0] # WARNING: SHOULD REALLY BE shape=[batch_size, 1] !!! # self.batch_size does not work, since it's not None!!! alpha = K.random_uniform(shape=[K.shape(y_pred)[0], 1, 1, 1]) diff = y_pred - y_true interp = y_true + alpha * diff # take gradient of D(x_hat) gradients = K.gradients(self.disc(interp), [interp])[0] grad_pen = K.mean(K.square(K.sqrt(K.sum(K.square(gradients), axis=1))-1)) # compute loss return (K.mean(disc_pred) - K.mean(disc_true)) + self.lambda_gp * grad_pen
Example 8
Project: CapsNet-Fashion-MNIST Author: XifengGuo File: capsulelayers.py License: MIT License | 6 votes |
def call(self, inputs, **kwargs): if type(inputs) is list: # true label is provided with shape = [None, n_classes], i.e. one-hot code. assert len(inputs) == 2 inputs, mask = inputs else: # if no true label, mask by the max length of capsules. Mainly used for prediction # compute lengths of capsules x = K.sqrt(K.sum(K.square(inputs), -1)) # generate the mask which is a one-hot code. # mask.shape=[None, n_classes]=[None, num_capsule] mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1]) # inputs.shape=[None, num_capsule, dim_capsule] # mask.shape=[None, num_capsule] # masked.shape=[None, num_capsule * dim_capsule] masked = K.batch_flatten(inputs * K.expand_dims(mask, -1)) return masked
Example 9
Project: MalConv-keras Author: j40903272 File: gen_adversarial.py License: MIT License | 6 votes |
def fgsm(model, inp, pad_idx, pad_len, e, step_size=0.001): adv = inp.copy() loss = K.mean(model.output[:, 0]) grads = K.gradients(loss, model.layers[1].output)[0] grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-8) mask = np.zeros(model.layers[1].output.shape[1:]) # embedding layer output shape mask[pad_idx:pad_idx+pad_len] = 1 grads *= K.constant(mask) iterate = K.function([model.layers[1].output], [loss, grads]) g = 0. step = int(1/step_size)*10 for _ in range(step): loss_value, grads_value = iterate([adv]) grads_value *= step_size g += grads_value adv += grads_value #print (e, loss_value, end='\r') if loss_value >= 0.9: break return adv, g, loss_value
Example 10
Project: MalConv-keras Author: j40903272 File: gen_adversarial2.py License: MIT License | 6 votes |
def fgsm(model, inp, pad_idx, pad_len, e, step_size=0.001, target_class=1): adv = inp.copy() loss = K.mean(model.output[:, target_class]) grads = K.gradients(loss, model.layers[1].output)[0] grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-8) mask = np.zeros(model.layers[1].output.shape[1:]) # embedding layer output shape mask[pad_idx:pad_idx+pad_len] = 1 grads *= K.constant(mask) iterate = K.function([model.layers[1].output], [loss, grads]) g = 0. step = int(1/step_size)*10 for _ in range(step): loss_value, grads_value = iterate([adv]) grads_value *= step_size g += grads_value adv += grads_value #print (e, loss_value, grads_value.mean(), end='\r') if loss_value >= 0.9: break return adv, g, loss_value
Example 11
Project: deep-models Author: LaurentMazare File: lstm_ln.py License: Apache License 2.0 | 5 votes |
def norm(self, xs, norm_id): mu = K.mean(xs, axis=-1, keepdims=True) sigma = K.sqrt(K.var(xs, axis=-1, keepdims=True) + 1e-3) xs = self.gs[norm_id] * (xs - mu) / (sigma + 1e-3) + self.bs[norm_id] return xs
Example 12
Project: CapsNet Author: l11x0m7 File: capsule.py License: MIT License | 5 votes |
def squash(s, axis=-1): """ Squash function. This could be viewed as one kind of activations. """ squared_s = K.sum(K.square(s), axis=axis, keepdims=True) scale = squared_s / (1 + squared_s) / K.sqrt(squared_s + K.epsilon()) return scale * s
Example 13
Project: CapsNet Author: l11x0m7 File: capsule.py License: MIT License | 5 votes |
def call(self, inputs, **kwargs): return K.sqrt(K.sum(K.square(inputs), axis=-1))
Example 14
Project: CapsNet Author: l11x0m7 File: capsule.py License: MIT License | 5 votes |
def call(self, inputs, **kwargs): # inputs -> (X, y), then output the mask of y # inputs -> X, then output the mask of prediction if type(inputs) is list or tuple: inputs, mask = inputs else: pred = K.sqrt(K.sum(K.square(inputs), axis=-1) + K.epsilon()) mask = K.one_hot(indices=K.argmax(pred, 1), num_classes=pred.get_shape().as_list()[1]) return K.batch_flatten(inputs * K.expand_dims(mask, axis=-1))
Example 15
Project: 3DGCN Author: blackmints File: loss.py License: MIT License | 5 votes |
def std_rmse(std=1): def rmse(y_true, y_pred): return K.sqrt(K.mean(K.square((y_pred - y_true)))) * std return rmse
Example 16
Project: fancy-cnn Author: textclf File: embeddings.py License: MIT License | 5 votes |
def __call__(self, p): if self.skip: return self.s * (p / K.clip(K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True)), 0.5, 100)) return self.s * (p / K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True)))
Example 17
Project: fancy-cnn Author: textclf File: embeddings.py License: MIT License | 5 votes |
def __call__(self, p): if self.skip: return self.s * (p / K.clip(K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True)), 0.5, 100)) return self.s * (p / K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True)))
Example 18
Project: DeepLearn Author: GauravBh1010tt File: DeepLearn_cornet.py License: MIT License | 5 votes |
def cor(self,y1, y2, lamda): y1_mean = K.mean(y1, axis=0) y1_centered = y1 - y1_mean y2_mean = K.mean(y2, axis=0) y2_centered = y2 - y2_mean corr_nr = K.sum(y1_centered * y2_centered, axis=0) corr_dr1 = K.sqrt(T.sum(y1_centered * y1_centered, axis=0) + 1e-8) corr_dr2 = K.sqrt(T.sum(y2_centered * y2_centered, axis=0) + 1e-8) corr_dr = corr_dr1 * corr_dr2 corr = corr_nr / corr_dr return K.sum(corr) * lamda
Example 19
Project: DeepLearn Author: GauravBh1010tt File: DeepLearn_cornet.py License: MIT License | 5 votes |
def sum_corr(model): view1 = np.load("test_v1.npy") view2 = np.load("test_v2.npy") x = project(model,[view1,np.zeros_like(view1)]) y = project(model,[np.zeros_like(view2),view2]) print "test correlation" corr = 0 for i in range(0,len(x[0])): x1 = x[:,i] - (np.ones(len(x))*(sum(x[:,i])/len(x))) x2 = y[:,i] - (np.ones(len(y))*(sum(y[:,i])/len(y))) nr = sum(x1 * x2)/(math.sqrt(sum(x1*x1))*math.sqrt(sum(x2*x2))) corr+=nr print corr
Example 20
Project: DeepLearn Author: GauravBh1010tt File: model_abcnn.py License: MIT License | 5 votes |
def compute_euclidean_match_score(l_r): l, r = l_r denominator = 1. + K.sqrt( -2 * K.batch_dot(l, r, axes=[2, 2]) + K.expand_dims(K.sum(K.square(l), axis=2), 2) + K.expand_dims(K.sum(K.square(r), axis=2), 1) ) denominator = K.maximum(denominator, K.epsilon()) return 1. / denominator
Example 21
Project: DeepLearn Author: GauravBh1010tt File: XRMB_CNN_17.06.v2.py License: MIT License | 5 votes |
def cor(self,y1, y2, lamda): y1_mean = K.mean(y1, axis=0) y1_centered = y1 - y1_mean y2_mean = K.mean(y2, axis=0) y2_centered = y2 - y2_mean corr_nr = K.sum(y1_centered * y2_centered, axis=0) corr_dr1 = K.sqrt(K.sum(y1_centered * y1_centered, axis=0) + 1e-8) corr_dr2 = K.sqrt(K.sum(y2_centered * y2_centered, axis=0) + 1e-8) corr_dr = corr_dr1 * corr_dr2 corr = corr_nr / corr_dr return K.sum(corr) * lamda
Example 22
Project: DeepLearn Author: GauravBh1010tt File: XRMB_CNN_17.06.v2.py License: MIT License | 5 votes |
def sum_corr(model): view1 = np.load("MFCC_Test.npy") view2 = np.load("XRMB_Test.npy") x = project(model,[view1,np.zeros_like(view2)]) y = project(model,[np.zeros_like(view1),view2]) print ("test correlation") corr = 0 for i in range(0,len(x[0])): x1 = x[:,i] - (np.ones(len(x))*(sum(x[:,i])/len(x))) x2 = y[:,i] - (np.ones(len(y))*(sum(y[:,i])/len(y))) nr = sum(x1 * x2)/(math.sqrt(sum(x1*x1))*math.sqrt(sum(x2*x2))) corr+=nr print (corr)
Example 23
Project: DeepLearn Author: GauravBh1010tt File: CorrMCNN_Arch2.py License: MIT License | 5 votes |
def sum_corr(model): view1 = np.load("test_v1.npy") view2 = np.load("test_v2.npy") x = project(model,[view1,np.zeros_like(view1)]) y = project(model,[np.zeros_like(view2),view2]) print ("test correlation") corr = 0 for i in range(0,len(x[0])): x1 = x[:,i] - (np.ones(len(x))*(sum(x[:,i])/len(x))) x2 = y[:,i] - (np.ones(len(y))*(sum(y[:,i])/len(y))) nr = sum(x1 * x2)/(math.sqrt(sum(x1*x1))*math.sqrt(sum(x2*x2))) corr+=nr print (corr)
Example 24
Project: face_classification Author: oarriaga File: grad_cam.py License: MIT License | 5 votes |
def normalize(x): # utility function to normalize a tensor by its L2 norm return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
Example 25
Project: Emotion Author: petercunha File: grad_cam.py License: MIT License | 5 votes |
def normalize(x): # utility function to normalize a tensor by its L2 norm return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
Example 26
Project: BERT Author: yyht File: layers.py License: Apache License 2.0 | 5 votes |
def call(self, x, **kwargs): u = K.mean(x, axis=-1, keepdims=True) s = K.mean(K.square(x - u), axis=-1, keepdims=True) z = (x - u) / K.sqrt(s + self.eps) return self.gamma * z + self.beta
Example 27
Project: BERT Author: yyht File: funcs.py License: Apache License 2.0 | 5 votes |
def scaled_dot_product_attention_tf(q, k, v, attn_mask, attention_dropout: float): w = K.batch_dot(q, k) # w is B, H, L, L w = w / K.sqrt(K.cast(shape_list(v)[-1], K.floatx())) if attn_mask is not None: w = attn_mask * w + (1.0 - attn_mask) * -1e9 w = K.softmax(w) w = Dropout(attention_dropout)(w) return K.batch_dot(w, v) # it is B, H, L, C//H [like v]
Example 28
Project: BERT Author: yyht File: funcs.py License: Apache License 2.0 | 5 votes |
def scaled_dot_product_attention_th(q, k, v, attn_mask, attention_dropout: float): w = theano_matmul(q, k) w = w / K.sqrt(K.cast(shape_list(v)[-1], K.floatx())) if attn_mask is not None: attn_mask = K.repeat_elements(attn_mask, shape_list(v)[1], 1) w = attn_mask * w + (1.0 - attn_mask) * -1e9 w = K.T.exp(w - w.max()) / K.T.exp(w - w.max()).sum(axis=-1, keepdims=True) w = Dropout(attention_dropout)(w) return theano_matmul(w, v)
Example 29
Project: BERT Author: yyht File: funcs.py License: Apache License 2.0 | 5 votes |
def gelu(x): return 0.5 * x * (1 + K.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * K.pow(x, 3)))) # https://stackoverflow.com/a/42194662/2796084
Example 30
Project: DogEmbeddings Author: ericzhao28 File: siamese.py License: MIT License | 5 votes |
def euclidean_distance(vects): x, y = vects return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))