Python tensorflow.expm1() Examples
The following are 9
code examples of tensorflow.expm1().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: autoencoders.py From BERT with Apache License 2.0 | 6 votes |
def bottleneck(self, x): hparams = self.hparams z_size = hparams.bottleneck_bits x_shape = common_layers.shape_list(x) with tf.variable_scope("vae"): mu = tf.layers.dense(x, z_size, name="mu") if hparams.mode != tf.estimator.ModeKeys.TRAIN: return mu, 0.0 # No sampling or kl loss on eval. log_sigma = tf.layers.dense(x, z_size, name="log_sigma") epsilon = tf.random_normal(x_shape[:-1] + [z_size]) z = mu + tf.exp(log_sigma / 2) * epsilon kl = 0.5 * tf.reduce_mean( tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1) free_bits = z_size // 4 kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0)) return z, kl_loss * hparams.kl_beta
Example #2
Source File: discretization.py From BERT with Apache License 2.0 | 6 votes |
def vae(x, z_size, name=None): """Simple variational autoencoder without discretization. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. name: Name for the bottleneck scope. Returns: Embedding function, latent, loss, mu and log_simga. """ with tf.variable_scope(name, default_name="vae"): mu = tf.layers.dense(x, z_size, name="mu") log_sigma = tf.layers.dense(x, z_size, name="log_sigma") shape = common_layers.shape_list(x) epsilon = tf.random_normal([shape[0], shape[1], 1, z_size]) z = mu + tf.exp(log_sigma / 2) * epsilon kl = 0.5 * tf.reduce_mean( tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1) free_bits = z_size // 4 kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0)) return z, kl_loss, mu, log_sigma
Example #3
Source File: Input.py From vimss with GNU General Public License v3.0 | 5 votes |
def denorm(logmagnitude): return tf.expm1(logmagnitude)
Example #4
Source File: Input.py From AdversarialAudioSeparation with MIT License | 5 votes |
def denorm(logmagnitude): ''' Exp(logmagnitude) - 1 :param logmagnitude: Log-normalized magnitude spectrogram :return: Unnormalized magnitude spectrogram ''' return tf.expm1(logmagnitude)
Example #5
Source File: Utilities.py From DeepDenoiser with Apache License 2.0 | 5 votes |
def signed_expm1(inputs): return tf.multiply(tf.sign(inputs), tf.expm1(tf.abs(inputs)))
Example #6
Source File: magrecnp.py From representation_mixing with BSD 3-Clause "New" or "Revised" License | 5 votes |
def sonify(spectrogram, samples, transform_op_fn, logscaled=True): graph = tf.Graph() with graph.as_default(): noise = tf.Variable(tf.random_normal([samples], stddev=1e-6)) x = transform_op_fn(noise) y = spectrogram if logscaled: x = tf.expm1(x) y = tf.expm1(y) x = tf.nn.l2_normalize(x) y = tf.nn.l2_normalize(y) tf.losses.mean_squared_error(x, y[-tf.shape(x)[0]:]) optimizer = tf.contrib.opt.ScipyOptimizerInterface( loss=tf.losses.get_total_loss(), var_list=[noise], tol=1e-16, method='L-BFGS-B', options={ 'maxiter': 1000, 'disp': True }) with tf.Session(graph=graph) as session: session.run(tf.global_variables_initializer()) optimizer.minimize(session) waveform = session.run(noise) return waveform
Example #7
Source File: sample_rnn_unaligned_speech_ljspeech.py From representation_mixing with BSD 3-Clause "New" or "Revised" License | 4 votes |
def sonify(spectrogram, samples, transform_op_fn, logscaled=True): graph = tf.Graph() with graph.as_default(): noise = tf.Variable(tf.random_normal([samples], stddev=1e-6)) x = transform_op_fn(noise) y = spectrogram if logscaled: x = tf.expm1(x) y = tf.expm1(y) # tf.nn.normalize arguments changed between versions... def normalize(a): return a / tf.sqrt(tf.maximum(tf.reduce_sum(a ** 2, axis=0), 1E-12)) x = normalize(x) y = normalize(y) tf.losses.mean_squared_error(x, y[-tf.shape(x)[0]:]) optimizer = tf.contrib.opt.ScipyOptimizerInterface( loss=tf.losses.get_total_loss(), var_list=[noise], tol=1e-16, method='L-BFGS-B', options={ 'maxiter': sonify_steps, 'disp': True }) # THIS REALLY SHOULDN'T RUN ON GPU BUT SEEMS TO? config = tf.ConfigProto( device_count={'CPU' : 1, 'GPU' : 0}, allow_soft_placement=True, log_device_placement=False ) with tf.Session(config=config, graph=graph) as session: session.run(tf.global_variables_initializer()) optimizer.minimize(session) waveform = session.run(noise) return waveform
Example #8
Source File: sample_rnn_unaligned_speech_ljspeech.py From representation_mixing with BSD 3-Clause "New" or "Revised" License | 4 votes |
def sonify(spectrogram, samples, transform_op_fn, logscaled=True): graph = tf.Graph() with graph.as_default(): noise = tf.Variable(tf.random_normal([samples], stddev=1e-6)) x = transform_op_fn(noise) y = spectrogram if logscaled: x = tf.expm1(x) y = tf.expm1(y) # tf.nn.normalize arguments changed between versions... def normalize(a): return a / tf.sqrt(tf.maximum(tf.reduce_sum(a ** 2, axis=0), 1E-12)) x = normalize(x) y = normalize(y) tf.losses.mean_squared_error(x, y[-tf.shape(x)[0]:]) optimizer = tf.contrib.opt.ScipyOptimizerInterface( loss=tf.losses.get_total_loss(), var_list=[noise], tol=1e-16, method='L-BFGS-B', options={ 'maxiter': sonify_steps, 'disp': True }) # THIS REALLY SHOULDN'T RUN ON GPU BUT SEEMS TO? config = tf.ConfigProto( device_count={'CPU' : 1, 'GPU' : 0}, allow_soft_placement=True, log_device_placement=False ) with tf.Session(config=config, graph=graph) as session: session.run(tf.global_variables_initializer()) optimizer.minimize(session) waveform = session.run(noise) return waveform
Example #9
Source File: sample_rnn_unaligned_speech_ljspeech.py From representation_mixing with BSD 3-Clause "New" or "Revised" License | 4 votes |
def sonify(spectrogram, samples, transform_op_fn, logscaled=True): graph = tf.Graph() with graph.as_default(): noise = tf.Variable(tf.random_normal([samples], stddev=1e-6)) x = transform_op_fn(noise) y = spectrogram if logscaled: x = tf.expm1(x) y = tf.expm1(y) # tf.nn.normalize arguments changed between versions... def normalize(a): return a / tf.sqrt(tf.maximum(tf.reduce_sum(a ** 2, axis=0), 1E-12)) x = normalize(x) y = normalize(y) tf.losses.mean_squared_error(x, y[-tf.shape(x)[0]:]) optimizer = tf.contrib.opt.ScipyOptimizerInterface( loss=tf.losses.get_total_loss(), var_list=[noise], tol=1e-16, method='L-BFGS-B', options={ 'maxiter': sonify_steps, 'disp': True }) # THIS REALLY SHOULDN'T RUN ON GPU BUT SEEMS TO? config = tf.ConfigProto( device_count={'CPU' : 1, 'GPU' : 0}, allow_soft_placement=True, log_device_placement=False ) with tf.Session(config=config, graph=graph) as session: session.run(tf.global_variables_initializer()) optimizer.minimize(session) waveform = session.run(noise) return waveform