Python tensorflow.keras.backend.square() Examples
The following are 14
code examples of tensorflow.keras.backend.square().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.backend
, or try the search function
.
Example #1
Source File: util.py From keras-rl2 with MIT License | 6 votes |
def huber_loss(y_true, y_pred, clip_value): # Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and # https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b # for details. assert clip_value > 0. x = y_true - y_pred if np.isinf(clip_value): # Spacial case for infinity since Tensorflow does have problems # if we compare `K.abs(x) < np.inf`. return .5 * K.square(x) condition = K.abs(x) < clip_value squared_loss = .5 * K.square(x) linear_loss = clip_value * (K.abs(x) - .5 * clip_value) import tensorflow as tf if hasattr(tf, 'select'): return tf.select(condition, squared_loss, linear_loss) # condition, true, false else: return tf.where(condition, squared_loss, linear_loss) # condition, true, false
Example #2
Source File: metrics.py From neuron with GNU General Public License v3.0 | 6 votes |
def loss(self, y_true, y_pred): if self.crop_indices is not None: y_true = utils.batch_gather(y_true, self.crop_indices) y_pred = utils.batch_gather(y_pred, self.crop_indices) ksq = K.square(y_pred - y_true) if self.vox_weights is not None: if self.vox_weights == 'y_true': ksq *= y_true elif self.vox_weights == 'expy_true': ksq *= tf.exp(y_true) else: ksq *= self.vox_weights if self.weights is not None: ksq *= self.weights return K.mean(ksq)
Example #3
Source File: metrics.py From neuron with GNU General Public License v3.0 | 6 votes |
def loss(self, y_true, y_pred): # get the value for the true and fake images disc_true = self.disc(y_true) disc_pred = self.disc(y_pred) # sample a x_hat by sampling along the line between true and pred # z = tf.placeholder(tf.float32, shape=[None, 1]) # shp = y_true.get_shape()[0] # WARNING: SHOULD REALLY BE shape=[batch_size, 1] !!! # self.batch_size does not work, since it's not None!!! alpha = K.random_uniform(shape=[K.shape(y_pred)[0], 1, 1, 1]) diff = y_pred - y_true interp = y_true + alpha * diff # take gradient of D(x_hat) gradients = K.gradients(self.disc(interp), [interp])[0] grad_pen = K.mean(K.square(K.sqrt(K.sum(K.square(gradients), axis=1))-1)) # compute loss return (K.mean(disc_pred) - K.mean(disc_true)) + self.lambda_gp * grad_pen
Example #4
Source File: conv_mod.py From StyleGAN2-Tensorflow-2.0 with MIT License | 5 votes |
def call(self, inputs): #To channels last x = tf.transpose(inputs[0], [0, 3, 1, 2]) #Get weight and bias modulations #Make sure w's shape is compatible with self.kernel w = K.expand_dims(K.expand_dims(K.expand_dims(inputs[1], axis = 1), axis = 1), axis = -1) #Add minibatch layer to weights wo = K.expand_dims(self.kernel, axis = 0) #Modulate weights = wo * (w+1) #Demodulate if self.demod: d = K.sqrt(K.sum(K.square(weights), axis=[1,2,3], keepdims = True) + 1e-8) weights = weights / d #Reshape/scale input x = tf.reshape(x, [1, -1, x.shape[2], x.shape[3]]) # Fused => reshape minibatch to convolution groups. w = tf.reshape(tf.transpose(weights, [1, 2, 3, 0, 4]), [weights.shape[1], weights.shape[2], weights.shape[3], -1]) x = tf.nn.conv2d(x, w, strides=self.strides, padding="SAME", data_format="NCHW") # Reshape/scale output. x = tf.reshape(x, [-1, self.filters, x.shape[2], x.shape[3]]) # Fused => reshape convolution groups back to minibatch. x = tf.transpose(x, [0, 2, 3, 1]) return x
Example #5
Source File: stylegan_two.py From StyleGAN2-Tensorflow-2.0 with MIT License | 5 votes |
def gradient_penalty(samples, output, weight): gradients = K.gradients(output, samples)[0] gradients_sqr = K.square(gradients) gradient_penalty = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape))) # (weight / 2) * ||grad||^2 # Penalize the gradient norm return K.mean(gradient_penalty) * weight
Example #6
Source File: util.py From keras-rl2 with MIT License | 5 votes |
def update(self, x): if x.ndim == len(self.shape): x = x.reshape(-1, *self.shape) assert x.shape[1:] == self.shape self._count += x.shape[0] self._sum += np.sum(x, axis=0) self._sumsq += np.sum(np.square(x), axis=0) self.mean = self._sum / float(self._count) self.std = np.sqrt(np.maximum(np.square(self.eps), self._sumsq / float(self._count) - np.square(self.mean)))
Example #7
Source File: layers.py From neuron with GNU General Public License v3.0 | 5 votes |
def call(self, x): return K.mean(K.batch_flatten(K.square(x[0] - x[1])), -1)
Example #8
Source File: losses.py From image-quality-assessment with Apache License 2.0 | 5 votes |
def earth_movers_distance(y_true, y_pred): cdf_true = K.cumsum(y_true, axis=-1) cdf_pred = K.cumsum(y_pred, axis=-1) emd = K.sqrt(K.mean(K.square(cdf_true - cdf_pred), axis=-1)) return K.mean(emd)
Example #9
Source File: utils.py From aitom with GNU General Public License v3.0 | 5 votes |
def correlation_coefficient_loss(y_true, y_pred): x = y_true y = y_pred mx = K.mean(x) my = K.mean(y) xm, ym = x-mx, y-my r_num = K.sum(tf.multiply(xm,ym)) r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym)))) r = r_num / r_den r = K.maximum(K.minimum(r, 1.0), -1.0) return 1 - K.square(r)
Example #10
Source File: utils.py From aitom with GNU General Public License v3.0 | 5 votes |
def angle_zyz_difference(ang1=np.zeros(3), ang2=np.zeros(3)): loc1_r = np.zeros(ang1.shape) loc2_r = np.zeros(ang2.shape) rm1 = rotation_matrix_zyz(ang1) rm2 = rotation_matrix_zyz(ang2) loc1_r_t = np.array([loc1_r, loc1_r, loc1_r]) loc2_r_t = np.array([loc2_r, loc2_r, loc2_r]) dif_m = (rm1.dot(np.eye(3) - loc1_r_t)).transpose() - (rm2.dot(np.eye(3) - loc2_r_t)).transpose() dif_d = math.sqrt(np.square(dif_m).sum()) return dif_d
Example #11
Source File: bilstm_siamese_network.py From DeepPavlov with Apache License 2.0 | 5 votes |
def _euclidian_dist(self, x_pair: List[Tensor]) -> Tensor: x1_norm = K.l2_normalize(x_pair[0], axis=1) x2_norm = K.l2_normalize(x_pair[1], axis=1) diff = x1_norm - x2_norm square = K.square(diff) _sum = K.sum(square, axis=1) _sum = K.clip(_sum, min_value=1e-12, max_value=None) dist = K.sqrt(_sum) / 2. return dist
Example #12
Source File: stylegan_two.py From StyleGAN2-Tensorflow-2.0 with MIT License | 4 votes |
def train_step(self, images, style, noise, perform_gp = True, perform_pl = False): with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: #Get style information w_space = [] pl_lengths = self.pl_mean for i in range(len(style)): w_space.append(self.GAN.S(style[i])) #Generate images generated_images = self.GAN.G(w_space + [noise]) #Discriminate real_output = self.GAN.D(images, training=True) fake_output = self.GAN.D(generated_images, training=True) #Hinge loss function gen_loss = K.mean(fake_output) divergence = K.mean(K.relu(1 + real_output) + K.relu(1 - fake_output)) disc_loss = divergence if perform_gp: #R1 gradient penalty disc_loss += gradient_penalty(images, real_output, 10) if perform_pl: #Slightly adjust W space w_space_2 = [] for i in range(len(style)): std = 0.1 / (K.std(w_space[i], axis = 0, keepdims = True) + 1e-8) w_space_2.append(w_space[i] + K.random_normal(tf.shape(w_space[i])) / (std + 1e-8)) #Generate from slightly adjusted W space pl_images = self.GAN.G(w_space_2 + [noise]) #Get distance after adjustment (path length) delta_g = K.mean(K.square(pl_images - generated_images), axis = [1, 2, 3]) pl_lengths = delta_g if self.pl_mean > 0: gen_loss += K.mean(K.square(pl_lengths - self.pl_mean)) #Get gradients for respective areas gradients_of_generator = gen_tape.gradient(gen_loss, self.GAN.GM.trainable_variables) gradients_of_discriminator = disc_tape.gradient(disc_loss, self.GAN.D.trainable_variables) #Apply gradients self.GAN.GMO.apply_gradients(zip(gradients_of_generator, self.GAN.GM.trainable_variables)) self.GAN.DMO.apply_gradients(zip(gradients_of_discriminator, self.GAN.D.trainable_variables)) return disc_loss, gen_loss, divergence, pl_lengths
Example #13
Source File: metrics.py From neuron with GNU General Public License v3.0 | 4 votes |
def dice(self, y_true, y_pred): """ compute dice for given Tensors """ if self.crop_indices is not None: y_true = utils.batch_gather(y_true, self.crop_indices) y_pred = utils.batch_gather(y_pred, self.crop_indices) if self.input_type == 'prob': # We assume that y_true is probabilistic, but just in case: if self.re_norm: y_true = tf.div_no_nan(y_true, K.sum(y_true, axis=-1, keepdims=True)) y_true = K.clip(y_true, K.epsilon(), 1) # make sure pred is a probability if self.re_norm: y_pred = tf.div_no_nan(y_pred, K.sum(y_pred, axis=-1, keepdims=True)) y_pred = K.clip(y_pred, K.epsilon(), 1) # Prepare the volumes to operate on # If we're doing 'hard' Dice, then we will prepare one-hot-based matrices of size # [batch_size, nb_voxels, nb_labels], where for each voxel in each batch entry, # the entries are either 0 or 1 if self.dice_type == 'hard': # if given predicted probability, transform to "hard max"" if self.input_type == 'prob': if self.approx_hard_max: y_pred_op = _hard_max(y_pred, axis=-1) y_true_op = _hard_max(y_true, axis=-1) else: y_pred_op = _label_to_one_hot(K.argmax(y_pred, axis=-1), self.nb_labels) y_true_op = _label_to_one_hot(K.argmax(y_true, axis=-1), self.nb_labels) # if given predicted label, transform to one hot notation else: assert self.input_type == 'max_label' y_pred_op = _label_to_one_hot(y_pred, self.nb_labels) y_true_op = _label_to_one_hot(y_true, self.nb_labels) # If we're doing soft Dice, require prob output, and the data already is as we need it # [batch_size, nb_voxels, nb_labels] else: assert self.input_type == 'prob', "cannot do soft dice with max_label input" y_pred_op = y_pred y_true_op = y_true # reshape to [batch_size, nb_voxels, nb_labels] batch_size = K.shape(y_true)[0] y_pred_op = K.reshape(y_pred_op, [batch_size, -1, K.shape(y_true)[-1]]) y_true_op = K.reshape(y_true_op, [batch_size, -1, K.shape(y_true)[-1]]) # compute dice for each entry in batch. # dice will now be [batch_size, nb_labels] top = 2 * K.sum(y_true_op * y_pred_op, 1) bottom = K.sum(K.square(y_true_op), 1) + K.sum(K.square(y_pred_op), 1) # make sure we have no 0s on the bottom. K.epsilon() bottom = K.maximum(bottom, self.area_reg) return top / bottom
Example #14
Source File: utils.py From neuron with GNU General Public License v3.0 | 4 votes |
def gaussian_kernel(sigma, windowsize=None, indexing='ij'): """ sigma will be a number of a list of numbers. # some guidance from my MATLAB file https://github.com/adalca/mivt/blob/master/src/gaussFilt.m Parameters: sigma: scalar or list of scalars windowsize (optional): scalar or list of scalars indicating the shape of the kernel Returns: ND kernel the same dimensiosn as the number of sigmas. Todo: could use MultivariateNormalDiag """ if not isinstance(sigma, (list, tuple)): sigma = [sigma] sigma = [np.maximum(f, np.finfo(float).eps) for f in sigma] nb_dims = len(sigma) # compute windowsize if windowsize is None: windowsize = [np.round(f * 3) * 2 + 1 for f in sigma] if len(sigma) != len(windowsize): raise ValueError('sigma and windowsize should have the same length.' 'Got vectors: ' + str(sigma) + 'and' + str(windowsize)) # ok, let's get to work. mid = [(w - 1)/2 for w in windowsize] # list of volume ndgrid # N-long list, each entry of shape volshape mesh = volshape_to_meshgrid(windowsize, indexing=indexing) mesh = [tf.cast(f, 'float32') for f in mesh] # compute independent gaussians diff = [mesh[f] - mid[f] for f in range(len(windowsize))] exp_term = [- K.square(diff[f])/(2 * (sigma[f]**2)) for f in range(nb_dims)] norms = [exp_term[f] - np.log(sigma[f] * np.sqrt(2 * np.pi)) for f in range(nb_dims)] # add an all-ones entry and transform into a large matrix norms_matrix = tf.stack(norms, axis=-1) # *volshape x N g = K.sum(norms_matrix, -1) # volshape g = tf.exp(g) g /= tf.reduce_sum(g) return g