Python tensorflow.python.layers.utils.smart_cond() Examples
The following are 30
code examples of tensorflow.python.layers.utils.smart_cond().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.layers.utils
, or try the search function
.
Example #1
Source File: basic.py From ReSAN with Apache License 2.0 | 5 votes |
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #2
Source File: preprocessing.py From benchmarks with Apache License 2.0 | 5 votes |
def get_image_resize_method(resize_method, batch_position=0): """Get tensorflow resize method. If resize_method is 'round_robin', return different methods based on batch position in a round-robin fashion. NOTE: If the batch size is not a multiple of the number of methods, then the distribution of methods will not be uniform. Args: resize_method: (string) nearest, bilinear, bicubic, area, or round_robin. batch_position: position of the image in a batch. NOTE: this argument can be an integer or a tensor Returns: one of resize type defined in tf.image.ResizeMethod. """ if resize_method != 'round_robin': return _RESIZE_METHOD_MAP[resize_method] # return a resize method based on batch position in a round-robin fashion. resize_methods = list(_RESIZE_METHOD_MAP.values()) def lookup(index): return resize_methods[index] def resize_method_0(): return utils.smart_cond(batch_position % len(resize_methods) == 0, lambda: lookup(0), resize_method_1) def resize_method_1(): return utils.smart_cond(batch_position % len(resize_methods) == 1, lambda: lookup(1), resize_method_2) def resize_method_2(): return utils.smart_cond(batch_position % len(resize_methods) == 2, lambda: lookup(2), lambda: lookup(3)) # NOTE(jsimsa): Unfortunately, we cannot use a single recursive function here # because TF would not be able to construct a finite graph. return resize_method_0()
Example #3
Source File: snns_cnn_mnist.py From AmusingPythonCodes with MIT License | 5 votes |
def dropout_selu(x, rate, alpha=-1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1 - binary_tensor) a = tf.sqrt(fixedPointVar / (keep_prob * ((1 - keep_prob) * tf.pow(alpha - fixedPointMean, 2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x)) # (3) Scale input to zero mean and unit variance
Example #4
Source File: utils.py From AmusingPythonCodes with MIT License | 5 votes |
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #5
Source File: get_selu_parameters.py From AmusingPythonCodes with MIT License | 5 votes |
def dropout_selu(x, rate, alpha=-my_alpha * my_lambda, fixed_point_mean=my_fixed_point_mean, fixed_point_var=my_fixed_point_var, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1 - binary_tensor) a = tf.sqrt(fixed_point_var / (keep_prob * ((1 - keep_prob) * tf.pow(alpha - fixed_point_mean, 2) + fixed_point_var))) b = fixed_point_mean - a * (keep_prob * fixed_point_mean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #6
Source File: basic.py From DiSAN with Apache License 2.0 | 5 votes |
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #7
Source File: basic.py From DiSAN with Apache License 2.0 | 5 votes |
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #8
Source File: preprocessing.py From dlcookbook-dlbs with Apache License 2.0 | 5 votes |
def get_image_resize_method(resize_method, batch_position=0): """Get tensorflow resize method. If resize_method is 'round_robin', return different methods based on batch position in a round-robin fashion. NOTE: If the batch size is not a multiple of the number of methods, then the distribution of methods will not be uniform. Args: resize_method: (string) nearest, bilinear, bicubic, area, or round_robin. batch_position: position of the image in a batch. NOTE: this argument can be an integer or a tensor Returns: one of resize type defined in tf.image.ResizeMethod. """ if resize_method != 'round_robin': return _RESIZE_METHOD_MAP[resize_method] # return a resize method based on batch position in a round-robin fashion. resize_methods = list(_RESIZE_METHOD_MAP.values()) def lookup(index): return resize_methods[index] def resize_method_0(): return utils.smart_cond(batch_position % len(resize_methods) == 0, lambda: lookup(0), resize_method_1) def resize_method_1(): return utils.smart_cond(batch_position % len(resize_methods) == 1, lambda: lookup(1), resize_method_2) def resize_method_2(): return utils.smart_cond(batch_position % len(resize_methods) == 2, lambda: lookup(2), lambda: lookup(3)) # NOTE(jsimsa): Unfortunately, we cannot use a single recursive function here # because TF would not be able to construct a finite graph. return resize_method_0()
Example #9
Source File: basic.py From ReSAN with Apache License 2.0 | 5 votes |
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #10
Source File: basic.py From ReSAN with Apache License 2.0 | 5 votes |
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #11
Source File: selu.py From AmusingPythonCodes with MIT License | 5 votes |
def dropout_selu(x, rate, alpha=-1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1 - binary_tensor) a = math_ops.sqrt( fixedPointVar / (keep_prob * ((1 - keep_prob) * math_ops.pow(alpha - fixedPointMean, 2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #12
Source File: basic.py From BiBloSA with Apache License 2.0 | 5 votes |
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #13
Source File: basic.py From BiBloSA with Apache License 2.0 | 5 votes |
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #14
Source File: basic.py From BiBloSA with Apache License 2.0 | 5 votes |
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #15
Source File: basic.py From BiBloSA with Apache License 2.0 | 5 votes |
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #16
Source File: general.py From BiBloSA with Apache License 2.0 | 5 votes |
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #17
Source File: basic.py From BiBloSA with Apache License 2.0 | 5 votes |
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #18
Source File: core.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def call(self, inputs, training=False): def dropped_inputs(): return nn.dropout(inputs, 1 - self.rate, noise_shape=self._get_noise_shape(inputs), seed=self.seed) return utils.smart_cond(training, dropped_inputs, lambda: array_ops.identity(inputs))
Example #19
Source File: normalization.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def _smart_select(pred, fn_then, fn_else): """Selects fn_then() or fn_else() based on the value of pred. The purpose of this function is the same as `utils.smart_cond`. However, at the moment there is a bug (b/36297356) that seems to kick in only when `smart_cond` delegates to `tf.cond`, which sometimes results in the training hanging when using parameter servers. This function will output the result of `fn_then` or `fn_else` if `pred` is known at graph construction time. Otherwise, it will use `tf.where` which will result in some redundant work (both branches will be computed but only one selected). However, the tensors involved will usually be small (means and variances in batchnorm), so the cost will be small and will not be incurred at all if `pred` is a constant. Args: pred: A boolean scalar `Tensor`. fn_then: A callable to use when pred==True. fn_else: A callable to use when pred==False. Returns: A `Tensor` whose value is fn_then() or fn_else() based on the value of pred. """ pred_value = utils.constant_value(pred) if pred_value: return fn_then() elif pred_value is False: return fn_else() t_then = array_ops.expand_dims(fn_then(), 0) t_else = array_ops.expand_dims(fn_else(), 0) pred = array_ops.reshape(pred, [1]) result = array_ops.where(pred, t_then, t_else) return array_ops.squeeze(result, [0])
Example #20
Source File: core.py From keras-lambda with MIT License | 5 votes |
def call(self, inputs, training=False): def dropped_inputs(): return nn.dropout(inputs, 1 - self.rate, noise_shape=self.noise_shape, seed=self.seed) return utils.smart_cond(training, dropped_inputs, lambda: array_ops.identity(inputs))
Example #21
Source File: selu.py From pointwise with MIT License | 5 votes |
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #22
Source File: core.py From lambda-packs with MIT License | 5 votes |
def call(self, inputs, training=False): def dropped_inputs(): return nn.dropout(inputs, 1 - self.rate, noise_shape=self._get_noise_shape(inputs), seed=self.seed) return utils.smart_cond(training, dropped_inputs, lambda: array_ops.identity(inputs))
Example #23
Source File: normalization.py From lambda-packs with MIT License | 5 votes |
def _smart_select(pred, fn_then, fn_else): """Selects fn_then() or fn_else() based on the value of pred. The purpose of this function is the same as `utils.smart_cond`. However, at the moment there is a bug (b/36297356) that seems to kick in only when `smart_cond` delegates to `tf.cond`, which sometimes results in the training hanging when using parameter servers. This function will output the result of `fn_then` or `fn_else` if `pred` is known at graph construction time. Otherwise, it will use `tf.where` which will result in some redundant work (both branches will be computed but only one selected). However, the tensors involved will usually be small (means and variances in batchnorm), so the cost will be small and will not be incurred at all if `pred` is a constant. Args: pred: A boolean scalar `Tensor`. fn_then: A callable to use when pred==True. fn_else: A callable to use when pred==False. Returns: A `Tensor` whose value is fn_then() or fn_else() based on the value of pred. """ pred_value = utils.constant_value(pred) if pred_value: return fn_then() elif pred_value is False: return fn_else() t_then = array_ops.expand_dims(fn_then(), 0) t_else = array_ops.expand_dims(fn_else(), 0) pred = array_ops.reshape(pred, [1]) result = array_ops.where(pred, t_then, t_else) return array_ops.squeeze(result, [0])
Example #24
Source File: core.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def call(self, inputs, training=False): def dropped_inputs(): return nn.dropout(inputs, 1 - self.rate, noise_shape=self.noise_shape, seed=self.seed) return utils.smart_cond(training, dropped_inputs, lambda: array_ops.identity(inputs))
Example #25
Source File: general.py From BERT with Apache License 2.0 | 5 votes |
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #26
Source File: basic.py From inferbeddings with MIT License | 5 votes |
def dropout_selu(x, rate, alpha=-1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1-binary_tensor) a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar))) b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x))
Example #27
Source File: snns_mlp_mnist.py From AmusingPythonCodes with MIT License | 5 votes |
def dropout_selu(x, rate, alpha=-1.7580993408473766, fixed_point_mean=0.0, fixed_point_var=1.0, noise_shape=None, seed=None, name=None, training=False): """Dropout to a value with rescaling.""" def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name): keep_prob = 1.0 - rate x = ops.convert_to_tensor(x, name="x") if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) if tensor_util.constant_value(keep_prob) == 1: return x noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x) random_tensor = keep_prob random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype) binary_tensor = math_ops.floor(random_tensor) ret = x * binary_tensor + alpha * (1 - binary_tensor) a = tf.sqrt(fixed_point_var / (keep_prob * ((1 - keep_prob) * tf.pow(alpha - fixed_point_mean, 2) + fixed_point_var))) b = fixed_point_mean - a * (keep_prob * fixed_point_mean + (1 - keep_prob) * alpha) ret = a * ret + b ret.set_shape(x.get_shape()) return ret with ops.name_scope(name, "dropout", [x]) as name: return utils.smart_cond(training, lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name), lambda: array_ops.identity(x)) # (3) Input data scaled to zero mean and unit variance # (1) Scale input to zero mean and unit variance
Example #28
Source File: preprocessing.py From parallax with Apache License 2.0 | 4 votes |
def distort_color(image, batch_position=0, distort_color_in_yiq=False, scope=None): """Distort the color of the image. Each color distortion is non-commutative and thus ordering of the color ops matters. Ideally we would randomly permute the ordering of the color ops. Rather then adding that level of complication, we select a distinct ordering of color ops based on the position of the image in a batch. Args: image: float32 Tensor containing single image. Tensor values should be in range [0, 1]. batch_position: the position of the image in a batch. NOTE: this argument can be an integer or a tensor distort_color_in_yiq: distort color of input images in YIQ space. scope: Optional scope for op_scope. Returns: color-distorted image """ with tf.name_scope(scope or 'distort_color'): def distort_fn_0(image=image): """Variant 0 of distort function.""" image = tf.image.random_brightness(image, max_delta=32. / 255.) if distort_color_in_yiq: image = distort_image_ops.random_hsv_in_yiq( image, lower_saturation=0.5, upper_saturation=1.5, max_delta_hue=0.2 * math.pi) else: image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) return image def distort_fn_1(image=image): """Variant 1 of distort function.""" image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) if distort_color_in_yiq: image = distort_image_ops.random_hsv_in_yiq( image, lower_saturation=0.5, upper_saturation=1.5, max_delta_hue=0.2 * math.pi) else: image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) return image image = utils.smart_cond(batch_position % 2 == 0, distort_fn_0, distort_fn_1) # The random_* ops do not necessarily clamp. image = tf.clip_by_value(image, 0.0, 1.0) return image
Example #29
Source File: preprocessing.py From parallax with Apache License 2.0 | 4 votes |
def get_image_resize_method(resize_method, batch_position=0): """Get tensorflow resize method. If resize_method is 'round_robin', return different methods based on batch position in a round-robin fashion. NOTE: If the batch size is not a multiple of the number of methods, then the distribution of methods will not be uniform. Args: resize_method: (string) nearest, bilinear, bicubic, area, or round_robin. batch_position: position of the image in a batch. NOTE: this argument can be an integer or a tensor Returns: one of resize type defined in tf.image.ResizeMethod. """ resize_methods_map = { 'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR, 'bilinear': tf.image.ResizeMethod.BILINEAR, 'bicubic': tf.image.ResizeMethod.BICUBIC, 'area': tf.image.ResizeMethod.AREA } if resize_method != 'round_robin': return resize_methods_map[resize_method] # return a resize method based on batch position in a round-robin fashion. resize_methods = resize_methods_map.values() def lookup(index): return resize_methods[index] def resize_method_0(): return utils.smart_cond(batch_position % len(resize_methods) == 0, lambda: lookup(0), resize_method_1) def resize_method_1(): return utils.smart_cond(batch_position % len(resize_methods) == 1, lambda: lookup(1), resize_method_2) def resize_method_2(): return utils.smart_cond(batch_position % len(resize_methods) == 2, lambda: lookup(2), lambda: lookup(3)) # NOTE(jsimsa): Unfortunately, we cannot use a single recursive function # here because TF would not be able to construct a finite graph. return resize_method_0()
Example #30
Source File: preprocessing.py From dlcookbook-dlbs with Apache License 2.0 | 4 votes |
def distort_color(image, batch_position=0, distort_color_in_yiq=False, scope=None): """Distort the color of the image. Each color distortion is non-commutative and thus ordering of the color ops matters. Ideally we would randomly permute the ordering of the color ops. Rather then adding that level of complication, we select a distinct ordering of color ops based on the position of the image in a batch. Args: image: float32 Tensor containing single image. Tensor values should be in range [0, 1]. batch_position: the position of the image in a batch. NOTE: this argument can be an integer or a tensor distort_color_in_yiq: distort color of input images in YIQ space. scope: Optional scope for op_scope. Returns: color-distorted image """ with tf.name_scope(scope or 'distort_color'): def distort_fn_0(image=image): """Variant 0 of distort function.""" image = tf.image.random_brightness(image, max_delta=32. / 255.) if distort_color_in_yiq: image = distort_image_ops.random_hsv_in_yiq( image, lower_saturation=0.5, upper_saturation=1.5, max_delta_hue=0.2 * math.pi) else: image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) return image def distort_fn_1(image=image): """Variant 1 of distort function.""" image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) if distort_color_in_yiq: image = distort_image_ops.random_hsv_in_yiq( image, lower_saturation=0.5, upper_saturation=1.5, max_delta_hue=0.2 * math.pi) else: image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) return image image = utils.smart_cond(batch_position % 2 == 0, distort_fn_0, distort_fn_1) # The random_* ops do not necessarily clamp. image = tf.clip_by_value(image, 0.0, 1.0) return image