Python tensorflow.python.ops.resource_variable_ops.ResourceVariable() Examples

The following are 28 code examples of tensorflow.python.ops.resource_variable_ops.ResourceVariable(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.resource_variable_ops , or try the search function .
Example #1
Source File: variable_scope.py    From lambda-packs with MIT License 6 votes vote down vote up
def variable(initial_value=None,
             trainable=True,
             collections=None,
             validate_shape=True,
             caching_device=None,
             name=None,
             dtype=None):
  if get_variable_scope().use_resource:
    return resource_variable_ops.ResourceVariable(
        initial_value=initial_value, trainable=trainable,
        collections=collections, validate_shape=validate_shape,
        caching_device=caching_device, name=name, dtype=dtype)
  else:
    return variables.Variable(
        initial_value=initial_value, trainable=trainable,
        collections=collections, validate_shape=validate_shape,
        caching_device=caching_device, name=name, dtype=dtype) 
Example #2
Source File: training_util.py    From keras-lambda with MIT License 6 votes vote down vote up
def assert_global_step(global_step_tensor):
  """Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.

  Args:
    global_step_tensor: `Tensor` to test.
  """
  if not (isinstance(global_step_tensor, variables.Variable) or
          isinstance(global_step_tensor, ops.Tensor) or
          isinstance(global_step_tensor,
                     resource_variable_ops.ResourceVariable)):
    raise TypeError(
        'Existing "global_step" must be a Variable or Tensor: %s.' %
        global_step_tensor)

  if not global_step_tensor.dtype.base_dtype.is_integer:
    raise TypeError('Existing "global_step" does not have integer type: %s' %
                    global_step_tensor.dtype)

  if global_step_tensor.get_shape().ndims != 0:
    raise TypeError('Existing "global_step" is not scalar: %s' %
                    global_step_tensor.get_shape()) 
Example #3
Source File: training_util.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def assert_global_step(global_step_tensor):
  """Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.

  Args:
    global_step_tensor: `Tensor` to test.
  """
  if not (isinstance(global_step_tensor, variables.Variable) or
          isinstance(global_step_tensor, ops.Tensor) or
          isinstance(global_step_tensor,
                     resource_variable_ops.ResourceVariable)):
    raise TypeError(
        'Existing "global_step" must be a Variable or Tensor: %s.' %
        global_step_tensor)

  if not global_step_tensor.dtype.base_dtype.is_integer:
    raise TypeError('Existing "global_step" does not have integer type: %s' %
                    global_step_tensor.dtype)

  if (global_step_tensor.get_shape().ndims != 0 and
      global_step_tensor.get_shape().is_fully_defined()):
    raise TypeError('Existing "global_step" is not scalar: %s' %
                    global_step_tensor.get_shape()) 
Example #4
Source File: saver.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def __init__(self, var, slice_spec, name):
      self._var_device = var.device
      if isinstance(var, ops.Tensor):
        self.handle_op = var.op.inputs[0]
        tensor = var
      elif isinstance(var, resource_variable_ops.ResourceVariable):

        def _read_variable_closure(v):
          def f():
            with ops.device(v.device):
              x = v.read_value()
            with ops.device("/device:CPU:0"):
              return array_ops.identity(x)
          return f

        self.handle_op = var.handle
        tensor = _read_variable_closure(var)
      else:
        raise ValueError(
            "Saveable is neither a resource variable nor a read operation."
            " Got: %s" % repr(var))
      spec = BaseSaverBuilder.SaveSpec(tensor, slice_spec, name)
      super(BaseSaverBuilder.ResourceVariableSaveable, self).__init__(
          var, [spec], name) 
Example #5
Source File: variable_scope.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def variable(initial_value=None,
             trainable=True,
             collections=None,
             validate_shape=True,
             caching_device=None,
             name=None,
             dtype=None):
  if get_variable_scope().use_resource:
    return resource_variable_ops.ResourceVariable(
        initial_value=initial_value, trainable=trainable,
        collections=collections, validate_shape=validate_shape,
        caching_device=caching_device, name=name, dtype=dtype)
  else:
    return variables.Variable(
        initial_value=initial_value, trainable=trainable,
        collections=collections, validate_shape=validate_shape,
        caching_device=caching_device, name=name, dtype=dtype) 
Example #6
Source File: embedding_ops.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def _gather(params, ids, name=None):
  """Helper function for _embedding_lookup_and_transform.

  This function gathers embeddings from a single tensor. The gather deals with
  resource variables specially.

  Args:
    params: A `Tensor` of embeddings.
    ids: A `Tensor` indexing the embeddings to be retrieved from `params`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with the same type as `params`.
  """
  if isinstance(params, resource_variable_ops.ResourceVariable):
    return params.sparse_read(ids, name=name)
  else:
    return array_ops.gather(params, ids, name=name) 
Example #7
Source File: training_util.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def assert_global_step(global_step_tensor):
  """Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.

  Args:
    global_step_tensor: `Tensor` to test.
  """
  if not (isinstance(global_step_tensor, variables.Variable) or
          isinstance(global_step_tensor, ops.Tensor) or
          isinstance(global_step_tensor,
                     resource_variable_ops.ResourceVariable)):
    raise TypeError(
        'Existing "global_step" must be a Variable or Tensor: %s.' %
        global_step_tensor)

  if not global_step_tensor.dtype.base_dtype.is_integer:
    raise TypeError('Existing "global_step" does not have integer type: %s' %
                    global_step_tensor.dtype)

  if global_step_tensor.get_shape().ndims != 0:
    raise TypeError('Existing "global_step" is not scalar: %s' %
                    global_step_tensor.get_shape()) 
Example #8
Source File: dataset_ops.py    From lambda-packs with MIT License 6 votes vote down vote up
def _estimate_data_distribution(c, num_examples_per_class_seen):
  """Estimate data distribution as labels are seen.

  Args:
    c: The class labels.  Type `int32`, shape `[batch_size]`.
    num_examples_per_class_seen: A `ResourceVariable` containing counts.
      Type `int64`, shape `[num_classes]`.

  Returns:
    dist: The updated distribution.  Type `float32`, shape `[num_classes]`.
  """
  num_classes = num_examples_per_class_seen.get_shape()[0].value
  # Update the class-count based on what labels are seen in
  # batch.  But do this asynchronously to avoid performing a
  # cross-device round-trip.  Just use the cached value.
  num_examples_per_class_seen = num_examples_per_class_seen.assign_add(
      math_ops.reduce_sum(
          array_ops.one_hot(c, num_classes, dtype=dtypes.int64),
          0))
  init_prob_estimate = math_ops.truediv(
      num_examples_per_class_seen,
      math_ops.reduce_sum(num_examples_per_class_seen))
  return math_ops.cast(init_prob_estimate, dtypes.float32) 
Example #9
Source File: training_util.py    From lambda-packs with MIT License 6 votes vote down vote up
def assert_global_step(global_step_tensor):
  """Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.

  Args:
    global_step_tensor: `Tensor` to test.
  """
  if not (isinstance(global_step_tensor, variables.Variable) or
          isinstance(global_step_tensor, ops.Tensor) or
          isinstance(global_step_tensor,
                     resource_variable_ops.ResourceVariable)):
    raise TypeError(
        'Existing "global_step" must be a Variable or Tensor: %s.' %
        global_step_tensor)

  if not global_step_tensor.dtype.base_dtype.is_integer:
    raise TypeError('Existing "global_step" does not have integer type: %s' %
                    global_step_tensor.dtype)

  if global_step_tensor.get_shape().ndims != 0:
    raise TypeError('Existing "global_step" is not scalar: %s' %
                    global_step_tensor.get_shape()) 
Example #10
Source File: embedding_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def _do_gather(params, ids, name=None):
  """Deals with doing gather differently for resource variables."""
  if isinstance(params, resource_variable_ops.ResourceVariable):
    return params.sparse_read(ids, name=name)
  return array_ops.gather(params, ids, name=name) 
Example #11
Source File: function.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def getvar(
      self,
      getter,
      name,
      shape=None,
      dtype=None,
      initializer=None,
      reuse=None,
      trainable=True,
      collections=None,  # pylint: disable=redefined-outer-name
      use_resource=None,
      **kwargs):
    """A custom variable getter."""
    # Here, we switch the default graph to the outer graph and ask the
    # variable scope in which the function is defined to give us the
    # variable. The variable is stashed in extra_vars and returned to
    # the caller.
    #
    # We capture these variables so that the variable definition is
    # hoisted upward to the outer most graph.
    with self._outer_graph.as_default():
      # pylint: disable=protected-access
      var = self._vscope.get_variable(
          vs._get_default_variable_store(),
          name,
          shape=shape,
          dtype=dtype,
          initializer=initializer,
          reuse=reuse,
          trainable=trainable,
          collections=collections,
          use_resource=use_resource)
      self.extra_vars.append(var)
      if isinstance(var, resource_variable_ops.ResourceVariable):
        # For resource-based variables read the variable outside the function
        # and pass in the value. This ensures that the function is pure and
        # differentiable. TODO(apassos) this may have performance problems if
        # the function will only do embedding lookups on the variable.
        return var.value()
      return var 
Example #12
Source File: test_util.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _eval_helper(self, tensors):
    if isinstance(tensors, ops.EagerTensor):
      return tensors.numpy()
    if isinstance(tensors, resource_variable_ops.ResourceVariable):
      return tensors.read_value().numpy()

    if isinstance(tensors, tuple):
      return tuple([self._eval_helper(t) for t in tensors])
    elif isinstance(tensors, list):
      return [self._eval_helper(t) for t in tensors]
    elif isinstance(tensors, dict):
      assert not tensors, "Only support empty dict now."
      return dict()
    else:
      raise ValueError("Unsupported type.") 
Example #13
Source File: graph_callable.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def __init__(self, name, initializer, shape, dtype, trainable):
    self.name = name
    if initializer is None:
      initializer = _default_initializer(name, shape, dtype)
    initial_value = lambda: initializer(shape, dtype=dtype)

    with context.eager_mode():
      self.variable = resource_variable_ops.ResourceVariable(
          initial_value=initial_value, name=name, dtype=dtype,
          trainable=trainable)
    self.shape = shape
    self.dtype = dtype
    self.placeholder = None
    self.trainable = trainable 
Example #14
Source File: normalization.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _assign_moving_average(self, variable, value, one_minus_decay):
    with ops.name_scope(None, 'AssignMovingAvg',
                        [variable, value, one_minus_decay]) as scope:
      with ops.colocate_with(variable):
        update_delta = math_ops.multiply(
            math_ops.subtract(variable.read_value(), value),
            one_minus_decay)
        if isinstance(variable, resource_variable_ops.ResourceVariable):
          # state_ops.assign_sub does an extra read_variable_op after the
          # assign. We avoid that here.
          return gen_resource_variable_ops.assign_sub_variable_op(
              variable.handle, update_delta, name=scope)
        else:
          return state_ops.assign_sub(variable, update_delta, name=scope) 
Example #15
Source File: optimizer.py    From lambda-packs with MIT License 5 votes vote down vote up
def _get_variable_for(v):
  """Returns the ResourceVariable responsible for v, or v if not necessary."""
  if v.op.type == "VarHandleOp":
    for var in variables.trainable_variables():
      if (isinstance(var, resource_variable_ops.ResourceVariable)
          and var.handle.op is v.op):
        return var
    raise ValueError("Got %s but  could not locate source variable." % (str(v)))
  return v 
Example #16
Source File: saver.py    From lambda-packs with MIT License 5 votes vote down vote up
def __init__(self, var, slice_spec, name):
      if isinstance(var, ops.Tensor):
        self.handle_op = var.op.inputs[0]
      elif isinstance(var, resource_variable_ops.ResourceVariable):
        self.handle_op = var.handle
      else:
        raise ValueError(
            "Saveable is neither a resource variable nor a read operation."
            " Got: %s" % repr(var))
      spec = BaseSaverBuilder.SaveSpec(var, slice_spec, name)
      super(BaseSaverBuilder.ResourceVariableSaveable, self).__init__(
          var, [spec], name) 
Example #17
Source File: optimizer.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _get_variable_for(v):
  """Returns the ResourceVariable responsible for v, or v if not necessary."""
  if context.in_eager_mode():
    return v
  if v.op.type == "VarHandleOp":
    for var in variables.trainable_variables():
      if (isinstance(var, resource_variable_ops.ResourceVariable)
          and var.handle.op is v.op):
        return var
    raise ValueError("Got %s but could not locate source variable." % (str(v)))
  return v 
Example #18
Source File: gradient_descent_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testMinimizeSparseResourceVariable(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = resource_variable_ops.ResourceVariable(
            [[1.0, 2.0]], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable(
            [3.0], dtype=dtype)
        x = tf.constant([[4.0], [5.0]], dtype=dtype)
        pred = tf.matmul(tf.nn.embedding_lookup([var0], [0]), x)
        pred = tf.matmul(var0, x) + var1
        loss = pred*pred
        sgd_op = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
        # TODO(apassos) calling initialize_resources on all resources here
        # doesn't work because the sessions and graph are reused across unit
        # tests and this would mean trying to reinitialize variables. Figure out
        # a long-term solution for this.
        resources.initialize_resources([var0, var1]).run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
        self.assertAllCloseAccordingToType([3.0], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
        np_grad = 2 * np_pred
        self.assertAllCloseAccordingToType(
            [[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
        self.assertAllCloseAccordingToType(
            [3.0 - np_grad], var1.eval()) 
Example #19
Source File: gradient_descent_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testMinimizeResourceVariable(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = resource_variable_ops.ResourceVariable(
            [[1.0, 2.0]], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable(
            [3.0], dtype=dtype)
        x = tf.constant([[4.0], [5.0]], dtype=dtype)
        pred = tf.matmul(var0, x) + var1
        loss = pred*pred
        sgd_op = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
        # TODO(apassos) calling initialize_resources on all resources here
        # doesn't work because the sessions and graph are reused across unit
        # tests and this would mean trying to reinitialize variables. Figure out
        # a long-term solution for this.
        resources.initialize_resources([var0, var1]).run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
        self.assertAllCloseAccordingToType([3.0], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
        np_grad = 2 * np_pred
        self.assertAllCloseAccordingToType(
            [[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
        self.assertAllCloseAccordingToType(
            [3.0 - np_grad], var1.eval()) 
Example #20
Source File: gradient_descent_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testBasicResourceVariable(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = resource_variable_ops.ResourceVariable(
            [1.0, 2.0], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable(
            [3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        sgd_op = tf.train.GradientDescentOptimizer(3.0).apply_gradients(zip(
            [grads0, grads1], [var0, var1]))
        # TODO(apassos) calling initialize_resources on all resources here
        # doesn't work because the sessions and graph are reused across unit
        # tests and this would mean trying to reinitialize variables. Figure out
        # a long-term solution for this.
        resources.initialize_resources([var0, var1]).run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
        self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        self.assertAllCloseAccordingToType(
            [1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
        self.assertAllCloseAccordingToType(
            [3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval()) 
Example #21
Source File: variables_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testResourceVariable(self):
    a = variables_lib2.global_variable(0, use_resource=False)
    b = variables_lib2.global_variable(0, use_resource=True)
    self.assertIsInstance(a, variables_lib.Variable)
    self.assertNotIsInstance(a, resource_variable_ops.ResourceVariable)
    self.assertIsInstance(b, resource_variable_ops.ResourceVariable) 
Example #22
Source File: variables_test.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def testResourceVariable(self):
    a = variables_lib2.local_variable(0, use_resource=False)
    b = variables_lib2.local_variable(0, use_resource=True)
    self.assertIsInstance(a, variables_lib.Variable)
    self.assertNotIsInstance(a, resource_variable_ops.ResourceVariable)
    self.assertIsInstance(b, resource_variable_ops.ResourceVariable) 
Example #23
Source File: function.py    From lambda-packs with MIT License 5 votes vote down vote up
def getvar(
      self,
      getter,
      name,
      shape=None,
      dtype=None,
      initializer=None,
      reuse=None,
      trainable=True,
      collections=None,  # pylint: disable=redefined-outer-name
      use_resource=None,
      **kwargs):
    """A custom variable getter."""
    # Here, we switch the default graph to the outer graph and ask the
    # variable scope in which the function is defined to give us the
    # variable. The variable is stashed in extra_vars and returned to
    # the caller.
    #
    # We capture these variables so that the variable definition is
    # hoisted upward to the outer most graph.
    with self._outer_graph.as_default():
      # pylint: disable=protected-access
      var = self._vscope.get_variable(
          vs._get_default_variable_store(),
          name,
          shape=shape,
          dtype=dtype,
          initializer=initializer,
          reuse=reuse,
          trainable=trainable,
          collections=collections,
          use_resource=use_resource)
      self.extra_vars.append(var)
      if isinstance(var, resource_variable_ops.ResourceVariable):
        # For resource-based variables read the variable outside the function
        # and pass in the value. This ensures that the function is pure and
        # differentiable. TODO(apassos) this may have performance problems if
        # the function will only do embedding lookups on the variable.
        return var.value()
      return var 
Example #24
Source File: feature_column.py    From lambda-packs with MIT License 5 votes vote down vote up
def _is_variable(v):
  """Returns true if `v` is a variable."""
  return isinstance(v, (variables.Variable,
                        resource_variable_ops.ResourceVariable)) 
Example #25
Source File: embedding_ops.py    From lambda-packs with MIT License 5 votes vote down vote up
def _do_gather(params, ids, name=None):
  """Deals with doing gather differently for resource variables."""
  if isinstance(params, resource_variable_ops.ResourceVariable):
    return params.sparse_read(ids, name=name)
  return array_ops.gather(params, ids, name=name) 
Example #26
Source File: hook.py    From PySyft-TensorFlow with Apache License 2.0 4 votes vote down vote up
def _hook_variable(self, syft_type: type):
        """Adds PySyft Tensor functionality to tf.Variable.

        In practice, the user is generally working with subclasses of
        tf.Variable, e.g. ResourceVariable, so we hook methods for those and
        only override the tf.Variable constructor to provide syft registration.
        You may read about what kind of modifications are made in the methods
        that this method calls.

        Args:
            syft_type: The abstract type whose methods should all be added to
                the ResourceVariable class.
        """
        # Reinitialize init method of Torch tensor with Syft init
        self._add_registration_to___init__(tf.Variable)

        # Overload Torch tensor properties with Syft properties
        self._hook_properties(tf.Variable)

        # Overload auto overloaded with Torch methods
        exclude = [
            "__class__",
            "__delattr__",
            "__dict__",
            "__dir__",
            "__doc__",
            "__format__",
            "__getattribute__",
            "__hash__",
            "__init__",
            "__init_subclass__",
            "__weakref__",
            "__module__",
            "__ne__",
            "__new__",
            "__reduce__",
            "__reduce_ex__",
            "__setattr__",
            "__sizeof__",
            "__subclasshook__",
        ]
        self._transfer_methods_to_framework_class(ResourceVariable, syft_type, exclude)
        self._hook_properties(ResourceVariable)
        self._hook_native_methods(ResourceVariable) 
Example #27
Source File: saver.py    From lambda-packs with MIT License 4 votes vote down vote up
def OpListToDict(op_list):
    """Create a dictionary of names to operation lists.

    Args:
      op_list: A list, tuple, or set of Variables or SaveableObjects.

    Returns:
      A dictionary of names to the operations that must be saved under
      that name.  Variables with save_slice_info are grouped together under the
      same key in no particular order.

    Raises:
      TypeError: If the type of op_list or its elements is not supported.
      ValueError: If at least two saveables share the same name.
    """
    if not isinstance(op_list, (list, tuple, set)):
      raise TypeError("Variables to save should be passed in a dict or a "
                      "list: %s" % op_list)
    op_list = set(op_list)
    names_to_saveables = {}
    # pylint: disable=protected-access
    for var in op_list:
      if isinstance(var, BaseSaverBuilder.SaveableObject):
        names_to_saveables[var.name] = var
      elif isinstance(var, variables.PartitionedVariable):
        if var.name in names_to_saveables:
          raise ValueError("At least two variables have the same name: %s" %
                           var.name)
        names_to_saveables[var.name] = var
      elif ((isinstance(var, variables.Variable) or
             isinstance(var, resource_variable_ops.ResourceVariable)) and
            var._save_slice_info):
        name = var._save_slice_info.full_name
        if name in names_to_saveables:
          if not isinstance(names_to_saveables[name], list):
            raise ValueError("Mixing slices and non-slices with the same name: "
                             "%s" % name)
          names_to_saveables[name].append(var)
        else:
          names_to_saveables[name] = [var]
      else:
        var = ops.internal_convert_to_tensor(var, as_ref=True)
        if not BaseSaverBuilder._IsVariable(var):
          raise TypeError("Variable to save is not a Variable: %s" % var)
        if var.op.type == "ReadVariableOp":
          name = var.op.inputs[0].op.name
        else:
          name = var.op.name
        if name in names_to_saveables:
          raise ValueError("At least two variables have the same name: %s" %
                           name)
        names_to_saveables[name] = var
      # pylint: enable=protected-access
    return names_to_saveables 
Example #28
Source File: saver.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 4 votes vote down vote up
def OpListToDict(op_list):
    """Create a dictionary of names to operation lists.

    Args:
      op_list: A list, tuple, or set of Variables or SaveableObjects.

    Returns:
      A dictionary of names to the operations that must be saved under
      that name.  Variables with save_slice_info are grouped together under the
      same key in no particular order.

    Raises:
      TypeError: If the type of op_list or its elements is not supported.
      ValueError: If at least two saveables share the same name.
    """
    if not isinstance(op_list, (list, tuple, set)):
      raise TypeError("Variables to save should be passed in a dict or a "
                      "list: %s" % op_list)
    op_list = set(op_list)
    names_to_saveables = {}
    # pylint: disable=protected-access
    for var in op_list:
      if isinstance(var, BaseSaverBuilder.SaveableObject):
        names_to_saveables[var.name] = var
      elif isinstance(var, variables.PartitionedVariable):
        if var.name in names_to_saveables:
          raise ValueError("At least two variables have the same name: %s" %
                           var.name)
        names_to_saveables[var.name] = var
      elif isinstance(var, variables.Variable) and var._save_slice_info:
        name = var._save_slice_info.full_name
        if name in names_to_saveables:
          if not isinstance(names_to_saveables[name], list):
            raise ValueError("Mixing slices and non-slices with the same name: "
                             "%s" % name)
          names_to_saveables[name].append(var)
        else:
          names_to_saveables[name] = [var]
      else:
        if context.in_graph_mode():
          var = ops.internal_convert_to_tensor(var, as_ref=True)
          if not BaseSaverBuilder._IsVariable(var):
            raise TypeError("Variable to save is not a Variable: %s" % var)
          if var.op.type == "ReadVariableOp":
            name = var.op.inputs[0].op.name
          else:
            name = var.op.name
          if name in names_to_saveables:
            raise ValueError("At least two variables have the same name: %s" %
                             name)
          names_to_saveables[name] = var
        else:
          if not isinstance(var, resource_variable_ops.ResourceVariable):
            raise ValueError("Can only save/restore ResourceVariable eager "
                             "mode is enabled, type: %s." % type(var))
          names_to_saveables[var._shared_name] = var

      # pylint: enable=protected-access
    return names_to_saveables