Python tensorflow.compat.v1.float16() Examples

The following are 20 code examples of tensorflow.compat.v1.float16(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v1 , or try the search function .
Example #1
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 6 votes vote down vote up
def _verify_infiniteness_ops(tf_op, name):
    """test operator infinity ops"""

    # Only float types are allowed in Tensorflow for isfinite and isinf
    # float16 is failing on cuda
    tf_dtypes = ["float32", "float64"]
    for tf_dtype in tf_dtypes:
        shape = (8, 8)
        data = np.random.uniform(size=shape).astype(tf_dtype)
        data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty
        data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan

        tf.reset_default_graph()
        in_data = tf.placeholder(tf_dtype, shape, name="in_data")
        tf_op(in_data, name=name)
        compare_tf_with_tvm([data], ['in_data:0'], '{}:0'.format(name)) 
Example #2
Source File: official_ncf_model.py    From benchmarks with Apache License 2.0 6 votes vote down vote up
def _fp16_variable_creator(next_creator, **kwargs):
  """Variable creator to create variables in fp32 and cast them to fp16."""
  dtype = kwargs.get('dtype', None)
  initial_value = kwargs.get('initial_value', None)
  if dtype is None:
    if initial_value is not None and not callable(initial_value):
      dtype = initial_value.dtype
  if dtype == tf.float16:
    if callable(initial_value):
      new_initial_value = lambda: tf.cast(initial_value(), tf.float32)
    else:
      new_initial_value = tf.cast(initial_value, tf.float32)
    kwargs['dtype'] = tf.float32
    kwargs['initial_value'] = new_initial_value
    var = next_creator(**kwargs)
    return tf.cast(var, dtype=tf.float16)
  else:
    return next_creator(**kwargs) 
Example #3
Source File: model.py    From benchmarks with Apache License 2.0 6 votes vote down vote up
def __init__(self,
               model_name,
               batch_size,
               learning_rate,
               fp16_loss_scale,
               params=None):
    self.model_name = model_name
    self.batch_size = batch_size
    self.default_batch_size = batch_size
    self.learning_rate = learning_rate
    # TODO(reedwm) Set custom loss scales for each model instead of using the
    # default of 128.
    self.fp16_loss_scale = fp16_loss_scale

    # use_tf_layers specifies whether to build the model using tf.layers.
    # fp16_vars specifies whether to create the variables in float16.
    if params:
      self.use_tf_layers = params.use_tf_layers
      self.fp16_vars = params.fp16_vars
      self.data_type = tf.float16 if params.use_fp16 else tf.float32
    else:
      self.use_tf_layers = True
      self.fp16_vars = False
      self.data_type = tf.float32 
Example #4
Source File: utils.py    From Object_Detection_Tracking with Apache License 2.0 6 votes vote down vote up
def float16_scope():
  """Scope class for float16."""

  def _custom_getter(getter, *args, **kwargs):
    """Returns a custom getter that methods must be called under."""
    cast_to_float16 = False
    requested_dtype = kwargs['dtype']
    if requested_dtype == tf.float16:
      kwargs['dtype'] = tf.float32
      cast_to_float16 = True
    var = getter(*args, **kwargs)
    if cast_to_float16:
      var = tf.cast(var, tf.float16)
    return var

  with tf.variable_scope('', custom_getter=_custom_getter) as varscope:
    yield varscope 
Example #5
Source File: diet.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def _quantize(x, params, randomize=True):
  """Quantize x according to params, optionally randomizing the rounding."""
  if not params.quantize:
    return x

  if not randomize:
    return tf.bitcast(
        tf.cast(x / params.quantization_scale, tf.int16), tf.float16)

  abs_x = tf.abs(x)
  sign_x = tf.sign(x)
  y = abs_x / params.quantization_scale
  y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
  y = tf.minimum(y, tf.int16.max) * sign_x
  q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
  return q 
Example #6
Source File: diet.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def make_diet_var_getter(params):
  """Create a custom variable getter for diet variables according to params."""

  def diet_var_initializer(shape, dtype, partition_info=None):
    """Initializer for a diet variable."""
    del dtype
    del partition_info

    with common_layers.fn_device_dependency("diet_init") as out_deps:
      float_range = math.sqrt(3)
      ret = tf.random_uniform(shape, -float_range, float_range)
      if params.quantize:
        ret = _quantize(ret, params, randomize=False)
      out_deps.append(ret)
      return ret

  def diet_var_getter(getter, **kwargs):
    """Get diet variable and return it dequantized."""
    if params.quantize:
      kwargs["dtype"] = tf.float16
    kwargs["initializer"] = diet_var_initializer
    kwargs["trainable"] = False

    base_var = getter(**kwargs)

    dequantized = _dequantize(base_var, params)

    if not hasattr(params, "dequantized"):
      params.dequantized = defaultdict(list)
    params.dequantized[base_var.name].append(dequantized)

    return dequantized

  return diet_var_getter 
Example #7
Source File: film_resnet_model.py    From tensor2robot with Apache License 2.0 5 votes vote down vote up
def _custom_dtype_getter(self, getter, name, shape=None, dtype=DEFAULT_DTYPE,  # pylint: disable=keyword-arg-before-vararg
                           *args, **kwargs):
    """Creates variables in fp32, then casts to fp16 if necessary.

    This function is a custom getter. A custom getter is a function with the
    same signature as tf.get_variable, except it has an additional getter
    parameter. Custom getters can be passed as the `custom_getter` parameter of
    tf.variable_scope. Then, tf.get_variable will call the custom getter,
    instead of directly getting a variable itself. This can be used to change
    the types of variables that are retrieved with tf.get_variable.
    The `getter` parameter is the underlying variable getter, that would have
    been called if no custom getter was used. Custom getters typically get a
    variable with `getter`, then modify it in some way.

    This custom getter will create an fp32 variable. If a low precision
    (e.g. float16) variable was requested it will then cast the variable to the
    requested dtype. The reason we do not directly create variables in low
    precision dtypes is that applying small gradients to such variables may
    cause the variable not to change.

    Args:
      getter: The underlying variable getter, that has the same signature as
        tf.get_variable and returns a variable.
      name: The name of the variable to get.
      shape: The shape of the variable to get.
      dtype: The dtype of the variable to get. Note that if this is a low
        precision dtype, the variable will be created as a tf.float32 variable,
        then cast to the appropriate dtype
      *args: Additional arguments to pass unmodified to getter.
      **kwargs: Additional keyword arguments to pass unmodified to getter.

    Returns:
      A variable which is cast to fp16 if necessary.
    """

    if dtype in CASTABLE_TYPES:
      var = getter(name, shape, tf.float32, *args, **kwargs)
      return tf.cast(var, dtype=dtype, name=name + '_cast')
    else:
      return getter(name, shape, dtype, *args, **kwargs) 
Example #8
Source File: resnet.py    From tensor2robot with Apache License 2.0 5 votes vote down vote up
def _custom_dtype_getter(self, getter, name, shape=None, dtype=DEFAULT_DTYPE,  # pylint: disable=keyword-arg-before-vararg
                           *args, **kwargs):
    """Creates variables in fp32, then casts to fp16 if necessary.

    This function is a custom getter. A custom getter is a function with the
    same signature as tf.get_variable, except it has an additional getter
    parameter. Custom getters can be passed as the `custom_getter` parameter of
    tf.variable_scope. Then, tf.get_variable will call the custom getter,
    instead of directly getting a variable itself. This can be used to change
    the types of variables that are retrieved with tf.get_variable.
    The `getter` parameter is the underlying variable getter, that would have
    been called if no custom getter was used. Custom getters typically get a
    variable with `getter`, then modify it in some way.
    This custom getter will create an fp32 variable. If a low precision
    (e.g. float16) variable was requested it will then cast the variable to the
    requested dtype. The reason we do not directly create variables in low
    precision dtypes is that applying small gradients to such variables may
    cause the variable not to change.
    Args:
      getter: The underlying variable getter, that has the same signature as
        tf.get_variable and returns a variable.
      name: The name of the variable to get.
      shape: The shape of the variable to get.
      dtype: The dtype of the variable to get. Note that if this is a low
        precision dtype, the variable will be created as a tf.float32 variable,
        then cast to the appropriate dtype
      *args: Additional arguments to pass unmodified to getter.
      **kwargs: Additional keyword arguments to pass unmodified to getter.
    Returns:
      A variable which is cast to fp16 if necessary.
    """

    if dtype in CASTABLE_TYPES:
      var = getter(name, shape, tf.float32, *args, **kwargs)
      return tf.cast(var, dtype=dtype, name=six.ensure_str(name) + '_cast')
    else:
      return getter(name, shape, dtype, *args, **kwargs) 
Example #9
Source File: utils.py    From Object_Detection_Tracking with Apache License 2.0 5 votes vote down vote up
def build_model_with_precision(pp, mm, ii, *args, **kwargs):
  """Build model with its inputs/params for a specified precision context.

  This is highly specific to this codebase, and not intended to be general API.
  Advanced users only. DO NOT use it if you don't know what it does.
  NOTE: short argument names are intended to avoid conficts with kwargs.

  Args:
    pp: A string, precision policy name, such as "mixed_float16".
    mm: A function, for rmodel builder.
    ii: A tensor, for model inputs.
    *args: A list of model arguments.
    **kwargs: A dict, extra model parameters.

  Returns:
    the output of mm model.
  """
  if pp == 'mixed_bfloat16':
    set_precision_policy(pp)
    inputs = tf.cast(ii, tf.bfloat16)
    with tf.tpu.bfloat16_scope():
      outputs = mm(inputs, *args, **kwargs)
    set_precision_policy('float32')
  elif pp == 'mixed_float16':
    set_precision_policy(pp)
    inputs = tf.cast(ii, tf.float16)
    with float16_scope():
      outputs = mm(inputs, *args, **kwargs)
    set_precision_policy('float32')
  elif not pp or pp == 'float32':
    outputs = mm(ii, *args, **kwargs)
  else:
    raise ValueError('Unknow precision name {}'.format(pp))

  # Users are responsible to convert the dtype of all outputs.
  return outputs 
Example #10
Source File: mtf_image_transformer.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def activation_type(self):
    hparams = self._hparams
    if hparams.activation_dtype == "float32":
      activation_dtype = tf.float32
    elif hparams.activation_dtype == "float16":
      activation_dtype = tf.float16
    elif hparams.activation_dtype == "bfloat16":
      activation_dtype = tf.bfloat16
    else:
      raise ValueError(
          "unknown hparams.activation_dtype %s" % hparams.activation_dtype)
    return activation_dtype 
Example #11
Source File: mtf_resnet.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def set_activation_type(self):
    hparams = self._hparams
    if hparams.activation_dtype == "float32":
      activation_dtype = tf.float32
    elif hparams.activation_dtype == "float16":
      activation_dtype = tf.float16
    elif hparams.activation_dtype == "bfloat16":
      activation_dtype = tf.bfloat16
    else:
      raise ValueError(
          "unknown hparams.activation_dtype %s" % hparams.activation_dtype)
    return activation_dtype 
Example #12
Source File: transformer_vae_flow_prior_ops.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def _mixed_precision_is_enabled(hparams):
  """Should be the same as in common_attention, avoiding import."""
  activation_dtype = hparams.activation_dtype
  weight_dtype = hparams.weight_dtype
  return activation_dtype == tf.float16 and weight_dtype == tf.float32 
Example #13
Source File: quantization.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def float16_activations_var_getter(getter, *args, **kwargs):
  """A custom getter function for float32 parameters and float16 activations.

  This function ensures the following:
    1. All variables requested with type fp16 are stored as type fp32.
    2. All variables requested with type fp32 are returned as type fp16.
  See https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/
  #training_tensorflow for more information on this strategy.

  Args:
    getter: custom getter
    *args: arguments
    **kwargs: keyword arguments

  Returns:
    variables with the correct dtype.

  Raises:
    KeyError: if "dtype" is not provided as a kwarg.
  """
  requested_dtype = kwargs["dtype"]

  if requested_dtype == tf.float16:
    kwargs["dtype"] = tf.float32

  if requested_dtype == tf.float32:
    requested_dtype = tf.float16
  var = getter(*args, **kwargs)
  # This if statement is needed to guard the cast, because batch norm
  # assigns directly to the return value of this custom getter. The cast
  # makes the return value not a variable so it cannot be assigned. Batch
  # norm variables are always in fp32 so this if statement is never
  # triggered for them.
  if var.dtype.base_dtype != requested_dtype:
    var = tf.cast(var, requested_dtype)
  return var 
Example #14
Source File: nasnet_utils.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def _stacked_separable_conv(net, stride, operation, filter_size):
  """Takes in an operations and parses it to the correct sep operation."""
  num_layers, kernel_size = _operation_to_info(operation)
  net_type = net.dtype
  net = tf.cast(net, tf.float32) if net_type == tf.float16 else net

  for layer_num in range(num_layers - 1):
    net = tf.nn.relu(net)
    net = slim.separable_conv2d(
        net,
        filter_size,
        kernel_size,
        depth_multiplier=1,
        scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
        stride=stride)
    net = slim.batch_norm(
        net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))
    stride = 1
  net = tf.nn.relu(net)
  net = slim.separable_conv2d(
      net,
      filter_size,
      kernel_size,
      depth_multiplier=1,
      scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers),
      stride=stride)
  net = slim.batch_norm(
      net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers))
  net = tf.cast(net, net_type)
  return net 
Example #15
Source File: optimize.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def _mixed_precision_is_enabled(hparams):
  """Should be the same as in common_attention, avoiding import."""
  activation_dtype = hparams.activation_dtype
  weight_dtype = hparams.weight_dtype
  return activation_dtype == tf.float16 and weight_dtype == tf.float32 
Example #16
Source File: py_func_batch_env.py    From tensor2tensor with Apache License 2.0 5 votes vote down vote up
def simulate(self, action):
    """Step the batch of environments.

    The results of the step can be accessed from the variables defined below.

    Args:
      action: Tensor holding the batch of actions to apply.

    Returns:
      Operation.
    """
    with tf.name_scope("environment/simulate"):
      if action.dtype in (tf.float16, tf.float32, tf.float64):
        action = tf.check_numerics(action, "action")
      def step(action):
        step_response = self._batch_env.step(action)
        # Current env doesn't return `info`, but EnvProblem does.
        # TODO(afrozm): The proper way to do this is to make T2TGymEnv return
        # an empty info return value.
        if len(step_response) == 3:
          (observ, reward, done) = step_response
        else:
          (observ, reward, done, _) = step_response
        return (observ, reward.astype(np.float32), done)
      observ, reward, done = tf.py_func(
          step, [action],
          [self.observ_dtype, tf.float32, tf.bool], name="step")
      reward = tf.check_numerics(reward, "reward")
      reward.set_shape((len(self),))
      done.set_shape((len(self),))
      with tf.control_dependencies([self._observ.assign(observ)]):
        return tf.identity(reward), tf.identity(done) 
Example #17
Source File: convnet_builder.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def get_custom_getter(self):
    """Returns a custom getter that this class's methods must be called under.

    All methods of this class must be called under a variable scope that was
    passed this custom getter. Example:

    ```python
    network = ConvNetBuilder(...)
    with tf.variable_scope('cg', custom_getter=network.get_custom_getter()):
      network.conv(...)
      # Call more methods of network here
    ```

    Currently, this custom getter only does anything if self.use_tf_layers is
    True. In that case, it causes variables to be stored as dtype
    self.variable_type, then casted to the requested dtype, instead of directly
    storing the variable as the requested dtype.
    """
    def inner_custom_getter(getter, *args, **kwargs):
      """Custom getter that forces variables to have type self.variable_type."""
      if not self.use_tf_layers:
        return getter(*args, **kwargs)
      requested_dtype = kwargs['dtype']
      if not (requested_dtype == tf.float32 and
              self.variable_dtype == tf.float16):
        # Only change the variable dtype if doing so does not decrease variable
        # precision.
        kwargs['dtype'] = self.variable_dtype
      var = getter(*args, **kwargs)
      # This if statement is needed to guard the cast, because batch norm
      # assigns directly to the return value of this custom getter. The cast
      # makes the return value not a variable so it cannot be assigned. Batch
      # norm variables are always in fp32 so this if statement is never
      # triggered for them.
      if var.dtype.base_dtype != requested_dtype:
        var = tf.cast(var, requested_dtype)
      return var
    return inner_custom_getter 
Example #18
Source File: benchmark_cnn_test.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def _assert_correct_var_type(self, var, params):
    if 'gpu_cached_inputs' not in var.name:
      if params.use_fp16 and params.fp16_vars and 'batchnorm' not in var.name:
        expected_type = tf.float16
      else:
        expected_type = tf.float32
      self.assertEqual(var.dtype.base_dtype, expected_type) 
Example #19
Source File: official_ncf_model.py    From benchmarks with Apache License 2.0 5 votes vote down vote up
def build_network(self, inputs, phase_train=True, nclass=1001):
    try:
      from official.recommendation import neumf_model  # pylint: disable=g-import-not-at-top
    except ImportError as e:
      if 'neumf_model' not in e.message:
        raise
      raise ImportError('To use the experimental NCF model, you must clone the '
                        'repo https://github.com/tensorflow/models and add '
                        'tensorflow/models to the PYTHONPATH.')
    del nclass

    users, items, _ = inputs
    params = {
        'num_users': _NUM_USERS_20M,
        'num_items': _NUM_ITEMS_20M,
        'model_layers': (256, 256, 128, 64),
        'mf_dim': 64,
        'mf_regularization': 0,
        'mlp_reg_layers': (0, 0, 0, 0),
        'use_tpu': False
    }
    user_input = tf.keras.layers.Input(tensor=users, name='user_input')
    item_input = tf.keras.layers.Input(tensor=items, name='item_input')
    if self.data_type == tf.float32:
      keras_model = neumf_model.construct_model(user_input, item_input, params)
      logits = keras_model.output
    else:
      assert self.data_type == tf.float16
      old_floatx = tf.keras.backend.floatx()
      try:
        tf.keras.backend.set_floatx('float16')
        # We cannot rely on the variable_scope's fp16 custom getter here,
        # because the NCF model uses keras layers, which ignore variable scopes.
        # So we use a variable_creator_scope instead.
        with tf.variable_creator_scope(_fp16_variable_creator):
          keras_model = neumf_model.construct_model(user_input, item_input,
                                                    params)
        logits = tf.cast(keras_model.output, tf.float32)
      finally:
        tf.keras.backend.set_floatx(old_floatx)
    return model.BuildNetworkResult(logits=logits, extra_info=None) 
Example #20
Source File: model.py    From benchmarks with Apache License 2.0 4 votes vote down vote up
def build_network(self,
                    inputs,
                    phase_train=True,
                    nclass=1001):
    """Returns logits from input images.

    Args:
      inputs: The input images and labels
      phase_train: True during training. False during evaluation.
      nclass: Number of classes that the images can belong to.

    Returns:
      A BuildNetworkResult which contains the logits and model-specific extra
        information.
    """
    images = inputs[0]
    images = self.gpu_preprocess_nhwc(images, phase_train)
    if self.data_format == 'NCHW':
      images = tf.transpose(images, [0, 3, 1, 2])
    var_type = tf.float32
    if self.data_type == tf.float16 and self.fp16_vars:
      var_type = tf.float16
    network = convnet_builder.ConvNetBuilder(
        images, self.depth, phase_train, self.use_tf_layers, self.data_format,
        self.data_type, var_type)
    with tf.variable_scope('cg', custom_getter=network.get_custom_getter()):
      self.add_inference(network)
      # Add the final fully-connected class layer
      logits = (
          network.affine(nclass, activation='linear')
          if not self.skip_final_affine_layer() else network.top_layer)
      mlperf.logger.log(key=mlperf.tags.MODEL_HP_FINAL_SHAPE,
                        value=logits.shape.as_list()[1:])
      aux_logits = None
      if network.aux_top_layer is not None:
        with network.switch_to_aux_top_layer():
          aux_logits = network.affine(nclass, activation='linear', stddev=0.001)
    if self.data_type == tf.float16:
      # TODO(reedwm): Determine if we should do this cast here.
      logits = tf.cast(logits, tf.float32)
      if aux_logits is not None:
        aux_logits = tf.cast(aux_logits, tf.float32)
    return BuildNetworkResult(
        logits=logits, extra_info=None if aux_logits is None else aux_logits)