Python tensorflow.is_inf() Examples
The following are 11
code examples of tensorflow.is_inf().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: exponential.py From tensorprob with MIT License | 6 votes |
def Exponential(lambda_, name=None): X = tf.placeholder(config.dtype, name=name) Distribution.logp = tf.log(lambda_) - lambda_*X def integral(lower, upper): upper_integrand = tf.cond( tf.is_inf(tf.cast(upper, config.dtype)), lambda: tf.constant(1, dtype=config.dtype), lambda: tf.exp(-lambda_*upper) ) lower_integrand = tf.cond( tf.is_inf(tf.cast(lower, config.dtype)), lambda: tf.constant(0, dtype=config.dtype), lambda: tf.exp(-lambda_*lower) ) return lower_integrand - upper_integrand Distribution.integral = integral return X
Example #2
Source File: uniform.py From tensorprob with MIT License | 6 votes |
def Uniform(name=None): X = tf.placeholder(config.dtype, name=name) Distribution.logp = tf.fill(tf.shape(X), config.dtype(0)) def integral(lower, upper): return tf.cond( tf.logical_or( tf.is_inf(tf.cast(lower, config.dtype)), tf.is_inf(tf.cast(upper, config.dtype)) ), lambda: tf.constant(1, dtype=config.dtype), lambda: tf.cast(upper, config.dtype) - tf.cast(lower, config.dtype), ) Distribution.integral = integral return X
Example #3
Source File: uniform.py From tensorprob with MIT License | 6 votes |
def UniformInt(name=None): X = tf.placeholder(config.int_dtype, name=name) Distribution.logp = tf.fill(tf.shape(X), config.dtype(0)) def integral(lower, upper): val = tf.cond( tf.logical_or( tf.is_inf(tf.ceil(tf.cast(lower, config.dtype))), tf.is_inf(tf.floor(tf.cast(upper, config.dtype))) ), lambda: tf.constant(1, dtype=config.dtype), lambda: tf.cast(upper, config.dtype) - tf.cast(lower, config.dtype), ) return val Distribution.integral = integral return X
Example #4
Source File: yellowfin.py From YellowFin with Apache License 2.0 | 6 votes |
def get_cubic_root(self): # We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2 # where x = sqrt(mu). # We substitute x, which is sqrt(mu), with x = y + 1. # It gives y^3 + py = q # where p = (D^2 h_min^2)/(2*C) and q = -p. # We use the Vieta's substution to compute the root. # There is only one real solution y (which is in [0, 1] ). # http://mathworld.wolfram.com/VietasSubstitution.html # assert_array = \ # [tf.Assert(tf.logical_not(tf.is_nan(self._dist_to_opt_avg) ), [self._dist_to_opt_avg,]), # tf.Assert(tf.logical_not(tf.is_nan(self._h_min) ), [self._h_min,]), # tf.Assert(tf.logical_not(tf.is_nan(self._grad_var) ), [self._grad_var,]), # tf.Assert(tf.logical_not(tf.is_inf(self._dist_to_opt_avg) ), [self._dist_to_opt_avg,]), # tf.Assert(tf.logical_not(tf.is_inf(self._h_min) ), [self._h_min,]), # tf.Assert(tf.logical_not(tf.is_inf(self._grad_var) ), [self._grad_var,])] # with tf.control_dependencies(assert_array): # EPS in the numerator to prevent momentum being exactly one in case of 0 gradient p = (self._dist_to_opt_avg + EPS)**2 * (self._h_min + EPS)**2 / 2 / (self._grad_var + EPS) w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0 w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0) y = w - p / 3.0 / (w + EPS) x = y + 1 return x
Example #5
Source File: normal.py From tensorprob with MIT License | 5 votes |
def Normal(mu, sigma, name=None): # TODO(chrisburr) Just use NormalN? X = tf.placeholder(config.dtype, name=name) Distribution.logp = _normal_logp(X, mu, sigma) def integral(lower, upper): upper_integrand = tf.cond( tf.is_inf(tf.cast(upper, config.dtype)), lambda: tf.constant(1, dtype=config.dtype), lambda: _normal_cdf(upper, mu, sigma) ) lower_integrand = tf.cond( tf.is_inf(tf.cast(lower, config.dtype)), lambda: tf.constant(0, dtype=config.dtype), lambda: _normal_cdf(lower, mu, sigma) ) return upper_integrand - lower_integrand Distribution.integral = integral return X # @Distribution # def NormalN(mus, sigmas, name=None): # X = tf.placeholder(config.dtype, name=name) # logps = [_normal_logp(X, mu, sigma) for mu, sigma in zip(mus, sigmas)] # def cdf(lim): # raise NotImplementedError # Distribution.logp = sum(logps) # Distribution.integral = lambda lower, upper: cdf(upper) - cdf(lower) # return X
Example #6
Source File: cwise_ops_test.py From deep_image_model with Apache License 2.0 | 5 votes |
def _compare(self, x, use_gpu): np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x) with self.test_session(use_gpu=use_gpu) as sess: inx = tf.convert_to_tensor(x) ofinite, oinf, onan = tf.is_finite(inx), tf.is_inf( inx), tf.is_nan(inx) tf_finite, tf_inf, tf_nan = sess.run([ofinite, oinf, onan]) self.assertAllEqual(np_inf, tf_inf) self.assertAllEqual(np_nan, tf_nan) self.assertAllEqual(np_finite, tf_finite) self.assertShapeEqual(np_inf, oinf) self.assertShapeEqual(np_nan, onan) self.assertShapeEqual(np_finite, ofinite)
Example #7
Source File: core.py From auto_yolo with MIT License | 5 votes |
def tf_safe_log(value, replacement_value=-100.0): log_value = tf.log(value + 1e-9) replace = tf.logical_or(tf.is_nan(log_value), tf.is_inf(log_value)) log_value = tf.where(replace, replacement_value * tf.ones_like(log_value), log_value) return log_value
Example #8
Source File: automatic_loss_scaler.py From OpenSeq2Seq with Apache License 2.0 | 5 votes |
def update_op(self, has_nan, amax): def overflow_case(): new_scale_val = tf.clip_by_value(self.scale / self.step_factor, self.scale_min, self.scale_max) scale_assign = tf.assign(self.scale, new_scale_val) overflow_iter_assign = tf.assign(self.last_overflow_iteration, self.iteration) with tf.control_dependencies([scale_assign, overflow_iter_assign]): return tf.identity(self.scale) def scale_case(): since_overflow = self.iteration - self.last_overflow_iteration should_update = tf.equal(since_overflow % self.step_window, 0) def scale_update_fn(): new_scale_val = tf.clip_by_value(self.scale * self.step_factor, self.scale_min, self.scale_max) return tf.assign(self.scale, new_scale_val) return tf.cond(should_update, scale_update_fn, lambda: self.scale) iter_update = tf.assign_add(self.iteration, 1) overflow = tf.logical_or(has_nan, tf.is_inf(amax)) update_op = tf.cond(overflow, overflow_case, scale_case) with tf.control_dependencies([update_op]): return tf.identity(iter_update)
Example #9
Source File: tools.py From stacked_capsule_autoencoders with Apache License 2.0 | 5 votes |
def gradient_summaries(gvs, suppress_inf_and_nans=False): """Creates summaries for norm, mean and var of gradients.""" gs = [gv[0] for gv in gvs] grad_global_norm = tf.global_norm(gs, 'gradient_global_norm') if suppress_inf_and_nans: is_nan_or_inf = tf.logical_or(tf.is_nan(grad_global_norm), tf.is_inf(grad_global_norm)) grad_global_norm = tf.where(is_nan_or_inf, tf.zeros_like(grad_global_norm) - 1., grad_global_norm) grad_abs_max, grad_abs_mean, grad_mean, grad_var = [0.] * 4 n_grads = 1e-8 for g, _ in gvs: if isinstance(g, tf.IndexedSlices): g = g.values if g is not None: current_n_grads = np.prod(g.shape.as_list()) abs_g = abs(g) mean, var = tf.nn.moments(g, list(range(len(g.shape)))) grad_abs_max = tf.maximum(grad_abs_max, tf.reduce_max(abs_g)) grad_abs_mean += tf.reduce_sum(abs_g) grad_mean += mean * current_n_grads grad_var += var n_grads += current_n_grads tf.summary.scalar('grad/abs_max', grad_abs_max) tf.summary.scalar('grad/abs_mean', grad_abs_mean / n_grads) tf.summary.scalar('grad/mean', grad_mean / n_grads) tf.summary.scalar('grad/var', grad_var / n_grads) return dict(grad_global_norm=grad_global_norm)
Example #10
Source File: test_forward.py From incubator-tvm with Apache License 2.0 | 5 votes |
def test_forward_isinf(): _verify_infiniteness_ops(tf.is_inf, "isinf")
Example #11
Source File: automatic_loss_scaler.py From OpenSeq2Seq with Apache License 2.0 | 4 votes |
def update_op(self, has_nan, amax): is_nonfinite = tf.logical_or(has_nan, tf.is_inf(amax)) x = tf.cond(is_nonfinite, lambda: tf.pow(2., self.log_max), lambda: tf.log(amax) / tf.log(tf.constant(2.))) x_hat_assn = tf.assign(self.x_hat, self.beta1 * self.x_hat + (1 - self.beta1) * x) b1_corr_assn = tf.assign(self.b1_correction, self.b1_correction * self.beta1) with tf.control_dependencies([x_hat_assn, b1_corr_assn]): mu = self.x_hat.read_value() / (1 - self.b1_correction.read_value()) slow_x_hat_assn = tf.assign(self.slow_x_hat, self.beta2 * self.slow_x_hat + (1 - self.beta2) * x) xsquared_hat_assn = tf.assign( self.xsquared_hat, self.beta2 * self.xsquared_hat + (1 - self.beta2) * (x * x), ) b2_corr_assn = tf.assign(self.b2_correction, self.b2_correction * self.beta2) with tf.control_dependencies([slow_x_hat_assn, xsquared_hat_assn, b2_corr_assn]): e_xsquared = self.xsquared_hat.read_value() / \ (1 - self.b2_correction.read_value()) slow_mu = self.slow_x_hat.read_value() / \ (1 - self.b2_correction.read_value()) sigma2 = e_xsquared - (slow_mu * slow_mu) sigma = tf.sqrt(tf.maximum(sigma2, tf.constant(0.))) log_cutoff = sigma * self.overflow_std_dev + mu log_difference = 16 - log_cutoff proposed_scale = tf.pow(2., log_difference) scale_update = tf.assign( self.scale, tf.clip_by_value(proposed_scale, self.scale_min, self.scale_max), ) iter_update = tf.assign_add(self.iteration, 1) with tf.control_dependencies([scale_update]): return tf.identity(iter_update)