Python tensorflow.NodeDef() Examples

The following are 30 code examples of tensorflow.NodeDef(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: register.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def _split(converter, node: Any, inputs: List[str]) -> Any:
    if node.op == "SplitV":
        # node.op is SplitV when num_or_size_splits is a list
        x_in = converter.outputs[inputs[0]]
        size_splits = converter.outputs[inputs[1]]
        axis = converter.outputs[inputs[2]]

        size_splits = size_splits.attr["value"].tensor
        num_or_size_splits = list(array.array("I", size_splits.tensor_content))

    else:
        # node.op is Split when num_or_size_splits is an integer
        axis = converter.outputs[inputs[0]]
        x_in = converter.outputs[inputs[1]]

        num_or_size_splits = node.attr["num_split"].i

    if isinstance(x_in, tf.NodeDef):
        input_out = _nodef_to_private_pond(converter, x_in)
    else:
        input_out = x_in

    axis_val = axis.attr["value"].tensor.int_val[0]

    return tfe.split(input_out, num_or_size_splits, axis_val) 
Example #2
Source File: abstract_net.py    From bonnet with GNU General Public License v3.0 6 votes vote down vote up
def assign_to_device(self, op_dev, var_dev='/cpu:0'):
    """Returns a function to place variables on the var_dev, and the ops in the
    op_dev.

    Args:
      op_dev: Device for ops
      var_dev: Device for variables
    """
    VAR_OPS = ['Variable', 'VariableV2', 'AutoReloadVariable',
               'MutableHashTable', 'MutableHashTableOfTensors',
               'MutableDenseHashTable']

    def _assign(op):
      node_def = op if isinstance(op, tf.NodeDef) else op.node_def
      if node_def.op in VAR_OPS:
        return "/" + var_dev
      else:
        return op_dev
    return _assign 
Example #3
Source File: optimize_for_inference_lib.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def node_from_map(node_map, name):
  """Pulls a node def from a dictionary for a given name.

  Args:
    node_map: Dictionary containing an entry indexed by name for every node.
    name: Identifies the node we want to find.

  Returns:
    NodeDef of the node with the given name.

  Raises:
    ValueError: If the node isn't present in the dictionary.
  """
  stripped_name = node_name_from_input(name)
  if stripped_name not in node_map:
    raise ValueError("No node named '%s' found in map." % name)
  return node_map[stripped_name] 
Example #4
Source File: optimize_for_inference_lib.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def values_from_const(node_def):
  """Extracts the values from a const NodeDef as a numpy ndarray.

  Args:
    node_def: Const NodeDef that has the values we want to access.

  Returns:
    Numpy ndarray containing the values.

  Raises:
    ValueError: If the node isn't a Const.
  """
  if node_def.op != "Const":
    raise ValueError(
        "Node named '%s' should be a Const op for values_from_const." %
        node_def.name)
  input_tensor = node_def.attr["value"].tensor
  tensor_value = tensor_util.MakeNdarray(input_tensor)
  return tensor_value 
Example #5
Source File: quantize_graph.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def quantize_nodes_recursively(self, current_node):
    """The entry point for quantizing nodes to eight bit and back."""
    if self.already_visited[current_node.name]:
      return
    self.already_visited[current_node.name] = True
    for input_node_name in current_node.input:
      input_node_name = node_name_from_input(input_node_name)
      input_node = self.nodes_map[input_node_name]
      self.quantize_nodes_recursively(input_node)
    nodes_to_quantize = ["Conv2D", "BiasAdd", "MatMul"]
    if any(current_node.op in s for s in nodes_to_quantize):
      for input_name in current_node.input:
        input_name = node_name_from_input(input_name)
        input_node = self.nodes_map[input_name]
        self.quantize_node(input_node)
      self.quantize_node(current_node)
    else:
      new_node = tf.NodeDef()
      new_node.CopyFrom(current_node)
      self.add_output_graph_node(new_node) 
Example #6
Source File: utils.py    From ngraph-python with Apache License 2.0 6 votes vote down vote up
def tf_obj_shape(input):
    """
    Convert tf objects to shape tuple.

    Arguments:
        input: tf.TensorShape, tf.Tensor, tf.AttrValue or tf.NodeDef
               the corresponding tensorflow object

    Returns:
        tuple: shape of the tensorflow object
    """
    if isinstance(input, tf.TensorShape):
        return tuple([int(i.value) for i in input])
    elif isinstance(input, tf.Tensor):
        return tf_obj_shape(input.get_shape())
    elif isinstance(input, tf.AttrValue):
        return tuple([int(d.size) for d in input.shape.dim])
    elif isinstance(input, tf.NodeDef):
        return tf_obj_shape(input.attr['shape'])
    else:
        raise TypeError("Input to `tf_obj_shape` has the wrong type.") 
Example #7
Source File: register.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def _nodef_to_numpy_array(x):
    """Map a NodeDef x to a np.array."""
    dtype = x.attr["dtype"].type
    x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]

    content = x.attr["value"].tensor.tensor_content

    if dtype == tf.float32:
        type_code = "f"
        if not content:
            content = x.attr["value"].tensor.float_val
    elif dtype == tf.float64:
        type_code = "d"
        if not content:
            content = x.attr["value"].tensor.double_val
    elif dtype == tf.int32:
        type_code = "i"
        if not content:
            content = x.attr["value"].tensor.int_val
    else:
        raise TypeError("Unsupported dtype")

    nums = array.array(type_code, content)

    return np.array(nums).reshape(x_shape) 
Example #8
Source File: gpu_utils.py    From transformer-xl-chinese with Apache License 2.0 5 votes vote down vote up
def assign_to_gpu(gpu=0, ps_dev="/device:CPU:0"):
    def _assign(op):
        node_def = op if isinstance(op, tf.NodeDef) else op.node_def
        if node_def.op == "Variable":
            return ps_dev
        else:
            return "/gpu:%d" % gpu
    return _assign 
Example #9
Source File: refine_graph.py    From nnabla with Apache License 2.0 5 votes vote down vote up
def affine(self, op_list):
        if not self.affine_resolved:
            transpose_perm_name = self.fork_name("transpose_perm")
            transpose_perm = np.array([0, 3, 1, 2], dtype=np.int32)
            transpose_perm_tensor = self._create_const_node(
                transpose_perm_name, transpose_perm)
            self.op_dict[transpose_perm_name] = Operator(transpose_perm_tensor)

            input = op_list[0].node.input[0]
            transpose_name = self.fork_name("transpose")
            node = tf.NodeDef()
            node.name = transpose_name
            node.op = "Transpose"
            node.input.extend([input, transpose_perm_name])
            node.attr['T'].type = op_list[0].node.attr['T'].type
            self.op_dict[node.name] = Operator(node)
            self.op_dict[node.name].inputs = [
                self.op_dict[input], self.op_dict[transpose_perm_name]]
            self.op_dict[node.name].outputs = op_list[0]
            self.op_dict[input].outputs.remove(op_list[0])
            self.op_dict[input].outputs.append(self.op_dict[node.name])
            op_list[0].node.input[0] = transpose_name
            op_list[0].inputs.remove(self.op_dict[input])
            op_list[0].inputs.append(self.op_dict[node.name])
            self.affine_resolved = True
        return op_list[-1] 
Example #10
Source File: gpu_utils.py    From transformer-xl with Apache License 2.0 5 votes vote down vote up
def assign_to_gpu(gpu=0, ps_dev="/device:CPU:0"):
    def _assign(op):
        node_def = op if isinstance(op, tf.NodeDef) else op.node_def
        if node_def.op == "Variable":
            return ps_dev
        else:
            return "/gpu:%d" % gpu
    return _assign 
Example #11
Source File: model_deploy.py    From Cross-Modal-Projection-Learning with MIT License 5 votes vote down vote up
def variables_device(self):
        """Returns the device to use for variables created inside the clone.

        Returns:
          A value suitable for `tf.device()`.
        """
        device = ''
        if self._num_ps_tasks > 0:
            device += self._ps_device
        device += '/device:CPU:0'

        class _PSDeviceChooser(object):
            """Slim device chooser for variables when using PS."""

            def __init__(self, device, tasks):
                self._device = device
                self._tasks = tasks
                self._task = 0

            def choose(self, op):
                if op.device:
                    return op.device
                node_def = op if isinstance(op, tf.NodeDef) else op.node_def
                if node_def.op == 'Variable':
                    t = self._task
                    self._task = (self._task + 1) % self._tasks
                    d = '%s/task:%d' % (self._device, t)
                    return d
                else:
                    return op.device

        if not self._num_ps_tasks:
            return device
        else:
            chooser = _PSDeviceChooser(device, self._num_ps_tasks)
            return chooser.choose 
Example #12
Source File: model_deploy.py    From uai-sdk with Apache License 2.0 5 votes vote down vote up
def variables_device(self):
    """Returns the device to use for variables created inside the clone.

    Returns:
      A value suitable for `tf.device()`.
    """
    device = ''
    if self._num_ps_tasks > 0:
      device += self._ps_device
    device += '/device:CPU:0'

    class _PSDeviceChooser(object):
      """Slim device chooser for variables when using PS."""

      def __init__(self, device, tasks):
        self._device = device
        self._tasks = tasks
        self._task = 0

      def choose(self, op):
        if op.device:
          return op.device
        node_def = op if isinstance(op, tf.NodeDef) else op.node_def
        if node_def.op.startswith('Variable'):
          t = self._task
          self._task = (self._task + 1) % self._tasks
          d = '%s/task:%d' % (self._device, t)
          return d
        else:
          return op.device

    if not self._num_ps_tasks:
      return device
    else:
      chooser = _PSDeviceChooser(device, self._num_ps_tasks)
      return chooser.choose 
Example #13
Source File: model_deploy.py    From tumblr-emotions with Apache License 2.0 5 votes vote down vote up
def variables_device(self):
    """Returns the device to use for variables created inside the clone.

    Returns:
      A value suitable for `tf.device()`.
    """
    device = ''
    if self._num_ps_tasks > 0:
      device += self._ps_device
    device += '/device:CPU:0'

    class _PSDeviceChooser(object):
      """Slim device chooser for variables when using PS."""

      def __init__(self, device, tasks):
        self._device = device
        self._tasks = tasks
        self._task = 0

      def choose(self, op):
        if op.device:
          return op.device
        node_def = op if isinstance(op, tf.NodeDef) else op.node_def
        if node_def.op.startswith('Variable'):
          t = self._task
          self._task = (self._task + 1) % self._tasks
          d = '%s/task:%d' % (self._device, t)
          return d
        else:
          return op.device

    if not self._num_ps_tasks:
      return device
    else:
      chooser = _PSDeviceChooser(device, self._num_ps_tasks)
      return chooser.choose 
Example #14
Source File: model_deploy.py    From TwinGAN with Apache License 2.0 5 votes vote down vote up
def variables_device(self):
    """Returns the device to use for variables created inside the clone.

    Returns:
      A value suitable for `tf.device()`.
    """
    device = ''
    if self._num_ps_tasks > 0:
      device += self._ps_device
    if not self._clone_on_cpu:
      # According to nvidia, GPU may work better for single GPU training if the network is small.
      device += '/device:GPU:%d' %(self._gpu_start_index)

    class _PSDeviceChooser(object):
      """Slim device chooser for variables when using PS."""

      def __init__(self, device, tasks):
        self._device = device
        self._tasks = tasks
        self._task = 0

      def choose(self, op):
        if op.device:
          return op.device
        node_def = op if isinstance(op, tf.NodeDef) else op.node_def
        if node_def.op.startswith('Variable'):
          t = self._task
          self._task = (self._task + 1) % self._tasks
          d = '%s/task:%d' % (self._device, t)
          return d
        else:
          return op.device

    if not self._num_ps_tasks:
      return device
    else:
      chooser = _PSDeviceChooser(device, self._num_ps_tasks)
      return chooser.choose 
Example #15
Source File: model_deploy.py    From YOLO2TensorFlow with Apache License 2.0 5 votes vote down vote up
def variables_device(self):
    """Returns the device to use for variables created inside the clone.

    Returns:
      A value suitable for `tf.device()`.
    """
    device = ''
    if self._num_ps_tasks > 0:
      device += self._ps_device
    device += '/device:CPU:0'

    class _PSDeviceChooser(object):
      """Slim device chooser for variables when using PS."""

      def __init__(self, device, tasks):
        self._device = device
        self._tasks = tasks
        self._task = 0

      def choose(self, op):
        if op.device:
          return op.device
        node_def = op if isinstance(op, tf.NodeDef) else op.node_def
        if node_def.op == 'Variable':
          t = self._task
          self._task = (self._task + 1) % self._tasks
          d = '%s/task:%d' % (self._device, t)
          return d
        else:
          return op.device

    if not self._num_ps_tasks:
      return device
    else:
      chooser = _PSDeviceChooser(device, self._num_ps_tasks)
      return chooser.choose 
Example #16
Source File: model_deploy.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def variables_device(self):
    """Returns the device to use for variables created inside the clone.

    Returns:
      A value suitable for `tf.device()`.
    """
    device = ''
    if self._num_ps_tasks > 0:
      device += self._ps_device
    device += '/device:CPU:0'

    class _PSDeviceChooser(object):
      """Slim device chooser for variables when using PS."""

      def __init__(self, device, tasks):
        self._device = device
        self._tasks = tasks
        self._task = 0

      def choose(self, op):
        if op.device:
          return op.device
        node_def = op if isinstance(op, tf.NodeDef) else op.node_def
        if node_def.op == 'Variable':
          t = self._task
          self._task = (self._task + 1) % self._tasks
          d = '%s/task:%d' % (self._device, t)
          return d
        else:
          return op.device

    if not self._num_ps_tasks:
      return device
    else:
      chooser = _PSDeviceChooser(device, self._num_ps_tasks)
      return chooser.choose 
Example #17
Source File: model_deploy.py    From aster with MIT License 5 votes vote down vote up
def variables_device(self):
    """Returns the device to use for variables created inside the clone.

    Returns:
      A value suitable for `tf.device()`.
    """
    device = ''
    if self._num_ps_tasks > 0:
      device += self._ps_device
    device += '/device:GPU:0'

    class _PSDeviceChooser(object):
      """Slim device chooser for variables when using PS."""

      def __init__(self, device, tasks):
        self._device = device
        self._tasks = tasks
        self._task = 0

      def choose(self, op):
        if op.device:
          return op.device
        node_def = op if isinstance(op, tf.NodeDef) else op.node_def
        if node_def.op.startswith('Variable'):
          t = self._task
          self._task = (self._task + 1) % self._tasks
          d = '%s/task:%d' % (self._device, t)
          return d
        else:
          return op.device

    if not self._num_ps_tasks:
      return device
    else:
      chooser = _PSDeviceChooser(device, self._num_ps_tasks)
      return chooser.choose 
Example #18
Source File: variables.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def variable_device(device, name):
  """Fix the variable device to colocate its ops."""
  if callable(device):
    var_name = tf.get_variable_scope().name + '/' + name
    var_def = tf.NodeDef(name=var_name, op='Variable')
    device = device(var_def)
  if device is None:
    device = ''
  return device 
Example #19
Source File: variables.py    From terngrad with Apache License 2.0 5 votes vote down vote up
def variable_device(device, name):
  """Fix the variable device to colocate its ops."""
  if callable(device):
    var_name = tf.get_variable_scope().name + '/' + name
    var_def = tf.NodeDef(name=var_name, op='Variable')
    device = device(var_def)
  if device is None:
    device = ''
  return device 
Example #20
Source File: models.py    From Object_Detection_Tracking with Apache License 2.0 5 votes vote down vote up
def assign_to_device(compute_device, controller_device): # ps: paramter server
  """Returns a function to place variables on the ps_device.

  Args:
    device: Device for everything but variables
    ps_device: Device to put the variables on. Example values are /GPU:0
    and /CPU:0.

  If ps_device is not set then the variables will be placed on the default
  device.
  The best device for shared varibles depends on the platform as well as the
  model. Start with CPU:0 and then test GPU:0 to see if there is an
  improvement.
  """
  def _assign(op):
    node_def = op if isinstance(op, tf.NodeDef) else op.node_def
    if node_def.op in PS_OPS:
      return controller_device
    else:
      return compute_device
  return _assign


#----------------------------------


# 05/2019, the code will still use other gpu even if we have set visible list;
# seems a v1.13 bug
# yes it is a v1.13 bug, something to do with XLA:
# https://github.com/horovod/horovod/issues/876 
Example #21
Source File: gpu_utils.py    From embedding-as-service with MIT License 5 votes vote down vote up
def assign_to_gpu(gpu=0, ps_dev="/device:CPU:0"):
    def _assign(op):
        node_def = op if isinstance(op, tf.NodeDef) else op.node_def
        if node_def.op == "Variable":
            return ps_dev
        else:
            return "/gpu:%d" % gpu
    return _assign 
Example #22
Source File: model_deploy.py    From BMW-TensorFlow-Training-GUI with Apache License 2.0 5 votes vote down vote up
def variables_device(self):
    """Returns the device to use for variables created inside the clone.

    Returns:
      A value suitable for `tf.device()`.
    """
    device = ''
    if self._num_ps_tasks > 0:
      device += self._ps_device
    device += '/device:CPU:0'

    class _PSDeviceChooser(object):
      """Slim device chooser for variables when using PS."""

      def __init__(self, device, tasks):
        self._device = device
        self._tasks = tasks
        self._task = 0

      def choose(self, op):
        if op.device:
          return op.device
        node_def = op if isinstance(op, tf.NodeDef) else op.node_def
        if node_def.op.startswith('Variable'):
          t = self._task
          self._task = (self._task + 1) % self._tasks
          d = '%s/task:%d' % (self._device, t)
          return d
        else:
          return op.device

    if not self._num_ps_tasks:
      return device
    else:
      chooser = _PSDeviceChooser(device, self._num_ps_tasks)
      return chooser.choose 
Example #23
Source File: variables.py    From deeplearning-benchmark with Apache License 2.0 5 votes vote down vote up
def variable_device(device, name):
  """Fix the variable device to colocate its ops."""
  if callable(device):
    var_name = tf.get_variable_scope().name + '/' + name
    var_def = tf.NodeDef(name=var_name, op='Variable')
    device = device(var_def)
  if device is None:
    device = ''
  return device 
Example #24
Source File: register.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def _nodef_to_private_pond(converter, x):
    """Map a NodeDef x to a PrivatePondTensor."""
    dtype = x.attr["dtype"].type
    warn_msg = "Unexpected dtype {} found at node {}"
    err_msg = "Unsupported dtype {} found at node {}"

    x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]

    if not x_shape:
        if dtype == tf.float32:
            nums = x.attr["value"].tensor.float_val
        elif dtype == tf.float64:
            nums = x.attr["value"].tensor.float_val
        elif dtype == tf.int32:
            logging.warning(warn_msg, dtype, x.name)
            nums = x.attr["value"].tensor.int_val
        else:
            raise TypeError(err_msg.format(dtype, x.name))

        def inputter_fn():
            return tf.constant(np.array(nums).reshape(1, 1))

    else:
        if dtype == tf.float32:
            nums = array.array("f", x.attr["value"].tensor.tensor_content)
        elif dtype == tf.float64:
            nums = array.array("d", x.attr["value"].tensor.tensor_content)
        elif dtype == tf.int32:
            logging.warning(warn_msg, dtype, x.name)
            nums = array.array("i", x.attr["value"].tensor.tensor_content)
        else:
            raise TypeError(err_msg.format(dtype, x.name))

        def inputter_fn():
            return tf.constant(np.array(nums).reshape(x_shape))

    x_private = tfe.define_private_input(converter.model_provider, inputter_fn)

    return x_private 
Example #25
Source File: register.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def _nodef_to_public_pond(converter, x):
    """Map a NodeDef x to a PublicPondTensor."""
    dtype = x.attr["dtype"].type
    x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]

    if not x_shape:
        if dtype == tf.float32:
            nums = x.attr["value"].tensor.float_val
        elif dtype == tf.float64:
            nums = x.attr["value"].tensor.float_val
        elif dtype == tf.int32:
            nums = x.attr["value"].tensor.int_val
        else:
            raise TypeError("Unsupported dtype")

        def inputter_fn():
            return tf.constant(np.array(nums).reshape(1, 1))

    else:
        if dtype == tf.float32:
            nums = array.array("f", x.attr["value"].tensor.tensor_content)
        elif dtype == tf.float64:
            nums = array.array("d", x.attr["value"].tensor.tensor_content)
        elif dtype == tf.int32:
            nums = array.array("i", x.attr["value"].tensor.tensor_content)
        else:
            raise TypeError("Unsupported dtype")

        def inputter_fn():
            return tf.constant(np.array(nums).reshape(x_shape))

    x_public = tfe.define_public_input(converter.model_provider, inputter_fn)

    return x_public 
Example #26
Source File: register.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def _slice(converter, node, inputs):
    x_in = converter.outputs[inputs[0]]
    begin = _nodef_to_numpy_array(converter.outputs[inputs[1]])
    size = _nodef_to_numpy_array(converter.outputs[inputs[2]])

    if isinstance(x_in, tf.NodeDef):
        input_out = _nodef_to_private_pond(converter, x_in)
    else:
        input_out = x_in

    # Slice is a special case of strided_slice. Slice takes size (the number of
    # elements we want to slice) as an input. However strided_slice takes end
    # (integer until which the slicing takes place) as input.
    # We can infere the end parameter with : end[i] = begin[i] + size[i].
    # If size is negative, the stepping go towards smaller indices.
    # In this case we can infer the end parameter with:
    # end[i] = input_shape[i] - size[i] + 1
    end = np.zeros(len(begin))
    input_shape = x_in.shape.as_list()

    # if size is negative take the input dimension
    for i in range(len(end)):  # pylint: disable=consider-using-enumerate
        if size[i] < 0:
            end[i] = input_shape[i] - size[i] + 1
        else:
            end[i] = begin[i] + size[i]

    return tfe.strided_slice(input_out, begin, end)


# pylint: enable=unused-argument
# pylint: enable=missing-docstring 
Example #27
Source File: register.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def _sub(converter, node: Any, inputs: List[str]) -> Any:
    a = converter.outputs[inputs[0]]
    b = converter.outputs[inputs[1]]

    if isinstance(a, tf.NodeDef):
        a_out = _nodef_to_public_pond(converter, a)
    else:
        a_out = a

    if isinstance(b, tf.NodeDef):
        b_out = _nodef_to_public_pond(converter, b)
    else:
        b_out = b

    return tfe.sub(a_out, b_out) 
Example #28
Source File: register.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def _add(converter, node: Any, inputs: List[str]) -> Any:
    a = converter.outputs[inputs[0]]
    b = converter.outputs[inputs[1]]

    if isinstance(a, tf.NodeDef):
        a_out = _nodef_to_public_pond(converter, a)
    else:
        a_out = a

    if isinstance(b, tf.NodeDef):
        b_out = _nodef_to_public_pond(converter, b)
    else:
        b_out = b

    return tfe.add(a_out, b_out) 
Example #29
Source File: register.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def _rsqrt(converter, node: Any, inputs: List[str]) -> Any:
    x_in = converter.outputs[inputs[0]]

    if isinstance(x_in, tf.NodeDef):
        tensor = x_in.attr["value"].tensor
        shape = [i.size for i in tensor.tensor_shape.dim]

        dtype = x_in.attr["dtype"].type
        if dtype == tf.float32:
            nums = array.array("f", tensor.tensor_content)
        elif dtype == tf.float64:
            nums = array.array("d", tensor.tensor_content)

        else:
            raise TypeError("Unsupported dtype for rsqrt")

        def inputter_fn():
            return tf.constant(1 / np.sqrt(np.array(nums).reshape(shape)))

    else:
        # XXX this is a little weird but the input into rsqrt is public and
        # being used only for batchnorm at the moment
        prot = tfe.get_protocol()
        # pylint: disable=protected-access
        decoded = prot._decode(x_in.value_on_0, True)

        # pylint: enable=protected-access

        def inputter_fn():
            return tf.rsqrt(decoded)

    x = tfe.define_public_input(converter.model_provider, inputter_fn)

    return x 
Example #30
Source File: register.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def _gather(converter, node: Any, inputs: List[str]) -> Any:
    x_in = converter.outputs[inputs[0]]
    indices = converter.outputs[inputs[1]]
    axis = converter.outputs[inputs[2]]

    if isinstance(x_in, tf.NodeDef):
        input_out = _nodef_to_private_pond(converter, x_in)
    else:
        input_out = x_in

    indices_out = list(_nodef_to_numpy_array(indices))

    axis_val = axis.attr["value"].tensor.int_val[0]

    return tfe.gather(input_out, indices_out, axis_val)