Python tensorflow.python.keras.backend.mean() Examples
The following are 11
code examples of tensorflow.python.keras.backend.mean().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.keras.backend
, or try the search function
.

Example #1
Source Project: icme2019 Author: ShenDezhou File: normalization.py License: MIT License | 5 votes |
def call(self, x): mean = K.mean(x, axis=self.axis, keepdims=True) std = K.std(x, axis=self.axis, keepdims=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta
Example #2
Source Project: GraphEmbedding Author: shenweichen File: sdne.py License: MIT License | 5 votes |
def l_2nd(beta): def loss_2nd(y_true, y_pred): b_ = np.ones_like(y_true) b_[y_true != 0] = beta x = K.square((y_true - y_pred) * b_) t = K.sum(x, axis=-1, ) return K.mean(t) return loss_2nd
Example #3
Source Project: GraphEmbedding Author: shenweichen File: line.py License: MIT License | 5 votes |
def line_loss(y_true, y_pred): return -K.mean(K.log(K.sigmoid(y_true*y_pred)))
Example #4
Source Project: FATE Author: FederatedAI File: backend.py License: Apache License 2.0 | 5 votes |
def call(self, x, mask=None): """1, mask is a bool type tensor, need casting before compute. 2, mask shape in 2 dimension (batch_size, feature_dimension) """ if mask is not None: mask = K.repeat(mask, x.shape[-1]) mask = tf.transpose(mask, [0, 2, 1]) mask = tf.cast(mask, tf.float32) x = x * mask return K.sum(x, axis=1) / K.sum(mask, axis=1) else: return K.mean(x, axis=1)
Example #5
Source Project: BVAE-tf Author: alecGraves File: sample_layer.py License: The Unlicense | 5 votes |
def __init__(self, latent_regularizer='bvae', beta=100., **kwargs): ''' args: ------ latent_regularizer : str Either 'bvae', 'vae', or 'no' Determines whether regularization is applied to the latent space representation. beta : float beta > 1, used for 'bvae' latent_regularizer, (Unused if 'bvae' not selected) ------ ex. sample = SampleLayer('bvae', 16)([mean, logvar]) ''' if latent_regularizer.lower() in ['bvae', 'vae']: self.reg = latent_regularizer else: self.reg = None if self.reg == 'bvae': self.beta = beta elif self.reg == 'vae': self.beta = 1. super(SampleLayer, self).__init__(**kwargs)
Example #6
Source Project: BVAE-tf Author: alecGraves File: sample_layer.py License: The Unlicense | 5 votes |
def call(self, x, training=None): if len(x) != 2: raise Exception('input layers must be a list: mean and logvar') if len(x[0].shape) != 2 or len(x[1].shape) != 2: raise Exception('input shape is not a vector [batchSize, latentSize]') mean = x[0] logvar = x[1] # trick to allow setting batch at train/eval time if mean.shape[0].value == None or logvar.shape[0].value == None: return mean + 0*logvar # Keras needs the *0 so the gradinent is not None if self.reg is not None: # kl divergence: latent_loss = -0.5 * (1 + logvar - K.square(mean) - K.exp(logvar)) latent_loss = K.sum(latent_loss, axis=-1) # sum over latent dimension latent_loss = K.mean(latent_loss, axis=0) # avg over batch # use beta to force less usage of vector space: latent_loss = self.beta * latent_loss self.add_loss(latent_loss, x) def reparameterization_trick(): epsilon = K.random_normal(shape=logvar.shape, mean=0., stddev=1.) stddev = K.exp(logvar*0.5) return mean + stddev * epsilon return K.in_train_phase(reparameterization_trick, mean + 0*logvar, training=training) # TODO figure out why this is not working in the specified tf version???
Example #7
Source Project: delta Author: didi File: cmvn.py License: Apache License 2.0 | 5 votes |
def compute_cmvn(sums, square, count): ''' compute global feature mean and variance vars = E(x^2) - (E(x))^2 ''' mean = sums / count var = (square / count) - np.square(mean) return mean, var
Example #8
Source Project: delta Author: didi File: cmvn.py License: Apache License 2.0 | 5 votes |
def load_cmvn(path): ''' load mean and variance from cmvn.npy, then convert to TF Tensor ''' # [1, nbins, nchannels] mean, variance = np.load(path) # [1, 1, nbins, nchannels] mean = np.expand_dims(mean, axis=0) variance = np.expand_dims(variance, axis=0) mean = tf.convert_to_tensor(mean, dtype=tf.float32, name='cmvn_mean') variance = tf.convert_to_tensor( variance, dtype=tf.float32, name='cmvn_variance') return mean, variance
Example #9
Source Project: delta Author: didi File: cmvn.py License: Apache License 2.0 | 5 votes |
def apply_cmvn(feats, mean, variance, epsilon=1e-9): ''' TF: apply CMVN on feature''' return (feats - mean) * tf.rsqrt(variance + epsilon)
Example #10
Source Project: delta Author: didi File: cmvn.py License: Apache License 2.0 | 5 votes |
def apply_local_cmvn(feats, epsilon=1e-9): ''' feats: (NHWC) ''' mean = tf.expand_dims(keras_backend.mean(feats, axis=1), axis=1) var = tf.expand_dims(keras_backend.var(feats, axis=1), axis=1) feats = (feats - mean) * tf.rsqrt(var + epsilon) return feats
Example #11
Source Project: DeepCTR Author: shenweichen File: normalization.py License: Apache License 2.0 | 5 votes |
def call(self, inputs): mean = K.mean(inputs, axis=self.axis, keepdims=True) variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True) std = K.sqrt(variance + self.eps) outputs = (inputs - mean) / std if self.scale: outputs *= self.gamma if self.center: outputs += self.beta return outputs