Python tensorflow.tensor() Examples

The following are 30 code examples of tensorflow.tensor(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: layers.py    From MobileNet with Apache License 2.0 6 votes vote down vote up
def __dense_p(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0,
              bias=0.0):
    """
    Fully connected layer
    :param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
    :param x: (tf.tensor) The input to the layer (N, D).
    :param output_dim: (integer) It specifies H, the output second dimension of the fully connected layer [ie:(N, H)]
    :param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
    :param l2_strength:(weight decay) (float) L2 regularization parameter.
    :param bias: (float) Amount of bias. (if not float, it means pretrained bias)
    :return out: The output of the layer. (N, H)
    """
    n_in = x.get_shape()[-1].value
    with tf.variable_scope(name):
        if w == None:
            w = __variable_with_weight_decay([n_in, output_dim], initializer, l2_strength)
        __variable_summaries(w)
        if isinstance(bias, float):
            bias = tf.get_variable("layer_biases", [output_dim], tf.float32, tf.constant_initializer(bias))
        __variable_summaries(bias)
        output = tf.nn.bias_add(tf.matmul(x, w), bias)
        return output 
Example #2
Source File: layers.py    From deepchem with MIT License 6 votes vote down vote up
def distance_matrix(self, D):
    """Calcuates the distance matrix from the distance tensor

    B = batch_size, N = max_num_atoms, M = max_num_neighbors, d = num_features

    Parameters
    ----------
    D: tf.Tensor of shape (B, N, M, d)
      Distance tensor.

    Returns
    -------
    R: tf.Tensor of shape (B, N, M)
       Distance matrix.
    """
    R = tf.reduce_sum(tf.multiply(D, D), 3)
    R = tf.sqrt(R)
    return R 
Example #3
Source File: tensor.py    From spleeter with MIT License 6 votes vote down vote up
def check_tensor_shape(tensor_tf, target_shape):
    """ Return a Tensorflow boolean graph that indicates whether
    sample[features_key] has the specified target shape. Only check
    not None entries of target_shape.

    :param tensor_tf: Tensor to check shape for.
    :param target_shape: Target shape to compare tensor to.
    :returns: True if shape is valid, False otherwise (as TF boolean).
    """
    result = tf.constant(True)
    for i, target_length in enumerate(target_shape):
        if target_length:
            result = tf.logical_and(
                result,
                tf.equal(tf.constant(target_length), tf.shape(tensor_tf)[i]))
    return result 
Example #4
Source File: layers.py    From deepchem with MIT License 6 votes vote down vote up
def call(self, inputs):
    """Invokes this layer.

    Parameters
    ----------
    inputs: list
      Should be of form `inputs=[coords, nbr_list]` where `coords` is a tensor of shape `(None, N, 3)` and `nbr_list` is a list.
    """
    if len(inputs) != 2:
      raise ValueError("InteratomicDistances requires coords,nbr_list")
    coords, nbr_list = (inputs[0], inputs[1])
    N_atoms, M_nbrs, ndim = self.N_atoms, self.M_nbrs, self.ndim
    # Shape (N_atoms, M_nbrs, ndim)
    nbr_coords = tf.gather(coords, nbr_list)
    # Shape (N_atoms, M_nbrs, ndim)
    tiled_coords = tf.tile(
        tf.reshape(coords, (N_atoms, 1, ndim)), (1, M_nbrs, 1))
    # Shape (N_atoms, M_nbrs)
    return tf.reduce_sum((tiled_coords - nbr_coords)**2, axis=2) 
Example #5
Source File: tensor.py    From spleeter with MIT License 6 votes vote down vote up
def from_float32_to_uint8(
        tensor,
        tensor_key='tensor',
        min_key='min',
        max_key='max'):
    """

    :param tensor:
    :param tensor_key:
    :param min_key:
    :param max_key:
    :returns:
    """
    tensor_min = tf.reduce_min(tensor)
    tensor_max = tf.reduce_max(tensor)
    return {
        tensor_key: tf.cast(
            (tensor - tensor_min) / (tensor_max - tensor_min + 1e-16)
            * 255.9999, dtype=tf.uint8),
        min_key: tensor_min,
        max_key: tensor_max
    } 
Example #6
Source File: layers.py    From deepchem with MIT License 6 votes vote down vote up
def __init__(self, num_filters, **kwargs):
    """
    Parameters
    ----------
    num_filters: int
      Number of filters to have in the output

    in_layers: list of Layers or tensors
      [V, A, mask]
      V are the vertex features must be of shape (batch, vertex, channel)

      A are the adjacency matrixes for each graph
        Shape (batch, from_vertex, adj_matrix, to_vertex)

      mask is optional, to be used when not every graph has the
      same number of vertices

    Returns: tf.tensor
    Returns a tf.tensor with a graph convolution applied
    The shape will be (batch, vertex, self.num_filters)
    """
    super(GraphCNN, self).__init__(**kwargs)
    self.num_filters = num_filters 
Example #7
Source File: layers.py    From astroNN with MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        """
        :Note: Equivalent to __call__()
        :param inputs: Tensor to be applied
        :type inputs: tf.Tensor
        :return: Tensor after applying the layer which is just the original tensor
        :rtype: tf.Tensor
        """
        if self.always_on:
            return tf.stop_gradient(inputs)
        else:
            if training is None:
                training = tfk.backend.learning_phase()
            output_tensor = tf.where(tf.equal(training, True), tf.stop_gradient(inputs), inputs)
            output_tensor._uses_learning_phase = True
            return output_tensor 
Example #8
Source File: aby3.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def add(self, other):
        """
    Add `other` to this ABY3Tensor.  This can be another tensor with the same
    backing or a primitive.

    This function returns a new ABY3Tensor and does not modify this one.

    :param ABY3Tensor other: a or primitive (e.g. a float)
    :return: A new ABY3Tensor with `other` added.
    :rtype: ABY3Tensor
    """
        if self.share_type == ARITHMETIC:
            return self.prot.add(self, other)
        else:
            raise ValueError(
                "unsupported share type for add: {}".format(self.share_type)
            ) 
Example #9
Source File: layers.py    From PADME with MIT License 6 votes vote down vote up
def shared(self, in_layers):
    """
    Create a copy of this layer that shares variables with it.

    This is similar to clone(), but where clone() creates two independent layers,
    this causes the layers to share variables with each other.

    Parameters
    ----------
    in_layers: list tensor
    List in tensors for the shared layer

    Returns
    -------
    Layer
    """
    if self.variable_scope == '':
      return self.clone(in_layers)
    raise ValueError('%s does not implement shared()' % self.__class__.__name__) 
Example #10
Source File: layers.py    From PADME with MIT License 6 votes vote down vote up
def add_summary_to_tg(self, tb_input=None):
    """
    Create the summary operation for this layer, if set_summary() has been called on it.
    Can only be called after self.create_layer to gaurentee that name is not None.

    Parameters
    ----------
    tb_input: tensor
      the tensor to log to Tensorboard. If None, self.out_tensor is used.
    """
    if self.tensorboard == False:
      return
    if tb_input == None:
      tb_input = self.out_tensor
    if self.summary_op == "tensor_summary":
      tf.summary.tensor_summary(self.name, tb_input, self.summary_description, 
                                self.collections)
    elif self.summary_op == 'scalar':
      tf.summary.scalar(self.name, tb_input, self.collections)
    elif self.summary_op == 'histogram':
      tf.summary.histogram(self.name, tb_input, self.collections) 
Example #11
Source File: aby3.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def define_public_tensor(
        self,
        tensor: tf.Tensor,
        apply_scaling: bool = True,
        share_type=ARITHMETIC,
        name: Optional[str] = None,
        factory: Optional[AbstractFactory] = None,
    ):
        """
    Convert a tf.Tensor to an ABY3PublicTensor.
    """
        assert isinstance(tensor, tf.Tensor)
        assert (
            tensor.shape.is_fully_defined()
        ), "Shape of input '{}' is not fully defined".format(name if name else "")

        factory = factory or self.int_factory

        with tf.name_scope("public-tensor"):
            tensor = self._encode(tensor, apply_scaling)
            w = factory.tensor(tensor)
            return ABY3PublicTensor(self, [w, w, w], apply_scaling, share_type) 
Example #12
Source File: simple_gaussian_cnn_model.py    From garage with MIT License 6 votes vote down vote up
def _build(self, obs_input, name=None):
        """Build model.

        Args:
            obs_input (tf.Tensor): Entire time-series observation input.
            name (str): Inner model name, also the variable scope of the
                inner model, if exist. One example is
                garage.tf.models.Sequential.

        Returns:
            tf.tensor: Action.
            tf.tensor: Mean.
            tf.Tensor: Log of standard deviation.
            garage.distributions.DiagonalGaussian: Distribution.

        """
        del name
        return_var = tf.compat.v1.get_variable(
            'return_var', (), initializer=tf.constant_initializer(0.5))
        mean = tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var)
        log_std = tf.fill((tf.shape(obs_input)[0], self.output_dim), 0.5)
        action = mean + log_std * 0.5
        dist = DiagonalGaussian(self.output_dim)
        # action will be 0.5 + 0.5 * 0.5 = 0.75
        return action, mean, log_std, dist 
Example #13
Source File: layers.py    From PADME with MIT License 6 votes vote down vote up
def distance_matrix(self, D):
    """Calcuates the distance matrix from the distance tensor

    B = batch_size, N = max_num_atoms, M = max_num_neighbors, d = num_features

    Parameters
    ----------
    D: tf.Tensor of shape (B, N, M, d)
      Distance tensor.

    Returns
    -------
    R: tf.Tensor of shape (B, N, M)
       Distance matrix.

    """

    R = tf.reduce_sum(tf.multiply(D, D), 3)
    R = tf.sqrt(R)
    return R 
Example #14
Source File: simple_gaussian_mlp_model.py    From garage with MIT License 6 votes vote down vote up
def _build(self, obs_input, name=None):
        """Build model.

        Args:
            obs_input (tf.Tensor): Entire time-series observation input.
            name (str): Inner model name, also the variable scope of the
                inner model, if exist. One example is
                garage.tf.models.Sequential.

        Returns:
            tf.tensor: Mean.
            tf.Tensor: Log of standard deviation.
            garage.distributions.DiagonalGaussian: Distribution.

        """
        del name
        return_var = tf.compat.v1.get_variable(
            'return_var', (), initializer=tf.constant_initializer(0.5))
        mean = tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var)
        log_std = tf.fill((tf.shape(obs_input)[0], self.output_dim),
                          np.log(0.5))
        dist = DiagonalGaussian(self.output_dim)
        # action will be 0.5 + 0.5 * 0.5 = 0.75
        return mean, log_std, dist 
Example #15
Source File: layers.py    From MobileNet with Apache License 2.0 6 votes vote down vote up
def avg_pool_2d(x, size=(2, 2), stride=(2, 2), name='avg_pooling'):
    """
        Average pooling 2D Wrapper
        :param x: (tf.tensor) The input to the layer (N,H,W,C).
        :param size: (tuple) This specifies the size of the filter as well as the stride.
        :param name: (string) Scope name.
        :return: The output is the same input but halfed in both width and height (N,H/2,W/2,C).
    """
    size_x, size_y = size
    stride_x, stride_y = stride
    return tf.nn.avg_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding='VALID',
                          name=name)


############################################################################################################
# Utilities for layers 
Example #16
Source File: layers.py    From PADME with MIT License 6 votes vote down vote up
def __init__(self, num_filters, **kwargs):
    """

    Parameters
    ----------
    num_filters: int
      Number of filters to have in the output

    in_layers: list of Layers or tensors
      [V, A, mask]
      V are the vertex features must be of shape (batch, vertex, channel)

      A are the adjacency matrixes for each graph
        Shape (batch, from_vertex, adj_matrix, to_vertex)

      mask is optional, to be used when not every graph has the
      same number of vertices

    Returns: tf.tensor
    Returns a tf.tensor with a graph convolution applied
    The shape will be (batch, vertex, self.num_filters)
    """
    self.num_filters = num_filters
    super(GraphCNN, self).__init__(**kwargs) 
Example #17
Source File: layers.py    From PADME with MIT License 6 votes vote down vote up
def set_summary(self, summary_op, summary_description=None, collections=None):
    """Annotates a tensor with a tf.summary operation

    This causes self.out_tensor to be logged to Tensorboard.

    Parameters
    ----------
    summary_op: str
      summary operation to annotate node
    summary_description: object, optional
      Optional summary_pb2.SummaryDescription()
    collections: list of graph collections keys, optional
      New summary op is added to these collections. Defaults to [GraphKeys.SUMMARIES]
    """
    supported_ops = {'tensor_summary', 'scalar', 'histogram'}
    if summary_op not in supported_ops:
      raise ValueError(
          "Invalid summary_op arg. Only 'tensor_summary', 'scalar', 'histogram' supported"
      )
    self.summary_op = summary_op
    self.summary_description = summary_description
    self.collections = collections
    self.tensorboard = True 
Example #18
Source File: tf_cnnvis.py    From tf_cnnvis with MIT License 6 votes vote down vote up
def _deconvolution(graph, sess, op_tensor, X, feed_dict):
    out = []
    with graph.as_default() as g:
        # get shape of tensor
        tensor_shape = op_tensor.get_shape().as_list()

        with sess.as_default() as sess:
            # creating placeholders to pass featuremaps and
            # creating gradient ops
            featuremap = [tf.placeholder(tf.int32) for i in range(config["N"])]
            reconstruct = [tf.gradients(tf.transpose(tf.transpose(op_tensor)[featuremap[i]]), X)[0] for i in range(config["N"])]

            # Execute the gradient operations in batches of 'n'
            for i in range(0, tensor_shape[-1], config["N"]):
                c = 0
                for j in range(config["N"]):
                    if (i + j) < tensor_shape[-1]:
                        feed_dict[featuremap[j]] = i + j
                        c += 1
                if c > 0:
                    out.extend(sess.run(reconstruct[:c], feed_dict = feed_dict))
    return out 
Example #19
Source File: tf_ops.py    From safekit with MIT License 5 votes vote down vote up
def fan_scale(initrange, activation, tensor_in):
    """
    Creates a scaling factor for weight initialization according to best practices.

    :param initrange: Scaling in addition to fan_in scale.
    :param activation: A tensorflow non-linear activation function
    :param tensor_in: Input tensor to layer of network to scale weights for.
    :return: (float) scaling factor for weight initialization.
    """
    if activation == tf.nn.relu:
        initrange *= np.sqrt(2.0/float(tensor_in.get_shape().as_list()[1]))
    else:
        initrange *= (1.0/np.sqrt(float(tensor_in.get_shape().as_list()[1])))
    return initrange 
Example #20
Source File: layers.py    From MobileNet with Apache License 2.0 5 votes vote down vote up
def flatten(x):
    """
    Flatten a (N,H,W,C) input into (N,D) output. Used for fully connected layers after conolution layers
    :param x: (tf.tensor) representing input
    :return: flattened output
    """
    all_dims_exc_first = np.prod([v.value for v in x.get_shape()[1:]])
    o = tf.reshape(x, [-1, all_dims_exc_first])
    return o


############################################################################################################
# Pooling Methods 
Example #21
Source File: Linear.py    From ULTRA with Apache License 2.0 5 votes vote down vote up
def build(self, input_list, noisy_params=None,
              noise_rate=0.05, is_training=False, **kwargs):
        """ Create the Linear model

        Args:
            input_list: (list<tf.tensor>) A list of tensors containing the features
                        for a list of documents.
            noisy_params: (dict<parameter_name, tf.variable>) A dictionary of noisy parameters to add.
            noise_rate: (float) A value specify how much noise to add.
            is_training: (bool) A flag indicating whether the model is running in training mode.

        Returns:
            A list of tf.Tensor containing the ranking scores for each instance in input_list.
        """
        with tf.variable_scope(tf.get_variable_scope(), initializer=self.initializer,
                               reuse=tf.AUTO_REUSE):
            input_data = tf.concat(input_list, axis=0)
            output_data = input_data
            output_sizes = [1]

            if self.layer_norm is None and self.hparams.norm in BaseRankingModel.NORM_FUNC_DIC:
                self.layer_norm = []
                for j in range(len(output_sizes)):
                    self.layer_norm.append(BaseRankingModel.NORM_FUNC_DIC[self.hparams.norm](
                        name="layer_norm_%d" % j))

            current_size = output_data.get_shape()[-1].value
            for j in range(len(output_sizes)):
                if self.layer_norm is not None:
                    output_data = self.layer_norm[j](
                        output_data, training=is_training)
                expand_W = self.get_variable(
                    "linear_W_%d" % j, [current_size, output_sizes[j]], noisy_params=noisy_params, noise_rate=noise_rate)
                expand_b = self.get_variable(
                    "linear_b_%d" % j, [output_sizes[j]], noisy_params=noisy_params, noise_rate=noise_rate)
                output_data = tf.nn.bias_add(
                    tf.matmul(output_data, expand_W), expand_b)

            return tf.split(output_data, len(input_list), axis=0) 
Example #22
Source File: tf_ops.py    From safekit with MIT License 5 votes vote down vote up
def weights(distribution, shape, dtype=tf.float32, initrange=1e-5,
            seed=None, l2=0.0, name='weights'):
    """
    Wrapper parameterizing common constructions of tf.Variables.

    :param distribution: A string identifying distribution 'tnorm' for truncated normal, 'rnorm' for random normal, 'constant' for constant, 'uniform' for uniform.
    :param shape: Shape of weight tensor.
    :param dtype: dtype for weights
    :param initrange: Scales standard normal and trunctated normal, value of constant dist., and range of uniform dist. [-initrange, initrange].
    :param seed: For reproducible results.
    :param l2: Floating point number determining degree of of l2 regularization for these weights in gradient descent update.
    :param name: For variable scope.
    :return: A tf.Variable.
    """
    with tf.variable_scope(name):
        if distribution == 'norm':
            wghts = tf.Variable(initrange * tf.random_normal(shape, 0, 1, dtype, seed))
        elif distribution == 'tnorm':
            wghts = tf.Variable(initrange * tf.truncated_normal(shape, 0, 1, dtype, seed))
        elif distribution == 'uniform':
            wghts = tf.Variable(tf.random_uniform(shape, -initrange, initrange, dtype, seed))
        elif distribution == 'constant':
            wghts = tf.Variable(tf.constant(initrange, dtype=dtype, shape=shape))
        else:
            raise ValueError("Argument 'distribution takes values 'norm', 'tnorm', 'uniform', 'constant', "
                             "Received %s" % distribution)
        if l2 != 0.0:
            tf.add_to_collection('losses', tf.multiply(tf.nn.l2_loss(wghts), l2, name=name + 'weight_loss'))
        return wghts 
Example #23
Source File: tensor.py    From spleeter with MIT License 5 votes vote down vote up
def sync_apply(tensor_dict, func, concat_axis=1):
    """ Return a function that applies synchronously the provided func on the
    provided dictionnary of tensor. This means that func is applied to the
    concatenation of the tensors in tensor_dict. This is useful for performing
    random operation that needs the same drawn value on multiple tensor, such
    as a random time-crop on both input data and label (the same crop should be
    applied to both input data and label, so random crop cannot be applied
    separately on each of them).

    IMPORTANT NOTE: all tensor are assumed to be the same shape.

    Params:
        - tensor_dict: dictionary (key: strings, values: tf.tensor)
        a dictionary of tensor.
        - func: function
        function to be applied to the concatenation of the tensors in
        tensor_dict
        - concat_axis: int
        The axis on which to perform the concatenation.

        Returns:
        processed tensors dictionary with the same name (keys) as input
        tensor_dict.
    """
    if concat_axis not in {0, 1}:
        raise NotImplementedError(
            'Function only implemented for concat_axis equal to 0 or 1')
    tensor_list = list(tensor_dict.values())
    concat_tensor = tf.concat(tensor_list, concat_axis)
    processed_concat_tensor = func(concat_tensor)
    tensor_shape = tf.shape(list(tensor_dict.values())[0])
    D = tensor_shape[concat_axis]
    if concat_axis == 0:
        return {
            name: processed_concat_tensor[index * D:(index + 1) * D, :, :]
            for index, name in enumerate(tensor_dict)
        }
    return {
        name: processed_concat_tensor[:, index * D:(index + 1) * D, :]
        for index, name in enumerate(tensor_dict)
    } 
Example #24
Source File: gaussian_mlp_encoder.py    From garage with MIT License 5 votes vote down vote up
def build(self, embedding_input, name=None):
        """Build encoder.

        Args:
            embedding_input (tf.Tensor) : Embedding input.
            name (str): Name of the model, which is also the name scope.

        Returns:
            tfp.distributions.MultivariateNormalDiag: Distribution.
            tf.tensor: Mean.
            tf.Tensor: Log of standard deviation.

        """
        with tf.compat.v1.variable_scope(self._variable_scope):
            return self.model.build(embedding_input, name=name) 
Example #25
Source File: base_ranking_model.py    From ULTRA with Apache License 2.0 5 votes vote down vote up
def selu(x):
    """ Create the scaled exponential linear unit (SELU) activation function. More information can be found in
            Klambauer, G., Unterthiner, T., Mayr, A. and Hochreiter, S., 2017. Self-normalizing neural networks. In Advances in neural information processing systems (pp. 971-980).

        Args:
            x: (tf.Tensor) A tensor containing a set of numbers

        Returns:
            The tf.Tensor produced by applying SELU on each element in x.
        """
    with tf.name_scope('selu') as scope:
        alpha = 1.6732632423543772848170429916717
        scale = 1.0507009873554804934193349852946
        return scale * tf.where(x >= 0.0, x, alpha * tf.nn.elu(x)) 
Example #26
Source File: base_ranking_model.py    From ULTRA with Apache License 2.0 5 votes vote down vote up
def build(self, input_list, noisy_params=None,
              noise_rate=0.05, is_training=False, **kwargs):
        """ Create the model

        Args:
            input_list: (list<tf.tensor>) A list of tensors containing the features
                        for a list of documents.
            noisy_params: (dict<parameter_name, tf.variable>) A dictionary of noisy parameters to add.
            noise_rate: (float) A value specify how much noise to add.
            is_training: (bool) A flag indicating whether the model is running in training mode.

        Returns:
            A list of tf.Tensor containing the ranking scores for each instance in input_list.
        """
        pass 
Example #27
Source File: SetRank.py    From ULTRA with Apache License 2.0 5 votes vote down vote up
def build(self, input_list, noisy_params=None,
              noise_rate=0.05, is_training=False, **kwargs):
        """ Create the SetRank model (no supports for noisy parameters)

        Args:
            input_list: (list<tf.tensor>) A list of tensors containing the features
                        for a list of documents.
            noisy_params: (dict<parameter_name, tf.variable>) A dictionary of noisy parameters to add.
            noise_rate: (float) A value specify how much noise to add.
            is_training: (bool) A flag indicating whether the model is running in training mode.

        Returns:
            A list of tf.Tensor containing the ranking scores for each instance in input_list.
        """
        with tf.variable_scope(tf.get_variable_scope() or "transformer", reuse=tf.AUTO_REUSE, initializer=self.initializer):
            sco_cur = tf.get_variable_scope()
            print(sco_cur.name, "sco_cur.name")
            mask = None
            batch_size = tf.shape(input_list[0])[0]
            feature_size = tf.shape(input_list[0])[1]
            list_size = len(input_list)
            ind = list(range(0, list_size))
            random.shuffle(ind)

#                 input_list=[input_list[i] for i in ind ]
            x = [tf.expand_dims(e, 1)for e in input_list]

            x = tf.concat(axis=1, values=x)  # [batch,len_seq,feature_size]
            x = self.Encoder_layer(x, is_training, mask)  # [batch,len_seq,1]
            output = []
            for i in range(list_size):
                output.append(x[:, i, :])
#                 reind_output=[None]*list_size
#                 for i in range(list_size):
#                     reind_output[ind[i]]=output[i]
#                 output=reind_output
        return output  # [len_seq,batch,1] 
Example #28
Source File: aby3.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def define_constant(
        self,
        value: Union[np.ndarray, int, float],
        apply_scaling: bool = True,
        share_type=ARITHMETIC,
        name: Optional[str] = None,
        factory: Optional[AbstractFactory] = None,
    ):
        """
    Define a constant to use in computation.

    .. code-block:: python

        x = prot.define_constant(np.array([1,2,3,4]), apply_scaling=False)

    :See: tf.constant

    :param bool apply_scaling: Whether or not to scale the value.
    :param str name: What name to give to this node in the graph.
    :param AbstractFactory factory: Which tensor type to represent this value with.
    """
        assert isinstance(value, (np.ndarray, int, float))

        if isinstance(value, (int, float)):
            value = np.array([value])

        factory = factory or self.int_factory

        value = self._encode(value, apply_scaling)
        with tf.name_scope("constant{}".format("-" + name if name else "")):
            with tf.device(self.servers[0].device_name):
                x_on_0 = factory.constant(value)

            with tf.device(self.servers[1].device_name):
                x_on_1 = factory.constant(value)

            with tf.device(self.servers[2].device_name):
                x_on_2 = factory.constant(value)

        return ABY3Constant(self, [x_on_0, x_on_1, x_on_2], apply_scaling, share_type) 
Example #29
Source File: aby3.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def _encode(
        self,
        rationals: Union[tf.Tensor, np.ndarray],
        apply_scaling: bool,
        factory=None,
    ) -> Union[tf.Tensor, np.ndarray]:
        """
    Encode tensor of rational numbers into tensor of ring elements. Output is
    of same type as input to allow function to be used for constants.
    """

        with tf.name_scope("encode"):

            # we first scale as needed
            if apply_scaling:
                scaled = rationals * self.fixedpoint_config.scaling_factor
            else:
                scaled = rationals

            # and then we round to integers

            if isinstance(scaled, np.ndarray):
                integers = scaled.astype(int).astype(object)

            elif isinstance(scaled, tf.Tensor):
                factory = factory or self.int_factory
                tf_native_type = factory.native_type
                assert tf_native_type in TF_NATIVE_TYPES
                integers = tf.cast(scaled, dtype=tf_native_type)

            else:
                raise TypeError("Don't know how to encode {}".format(type(rationals)))

            assert type(rationals) == type(integers)
            return integers 
Example #30
Source File: aby3.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def _indexer_private(prot: ABY3, tensor: ABY3PrivateTensor, slc) -> "ABY3PrivateTensor":
    shares = tensor.unwrapped
    results = [[None] * 2 for _ in range(3)]
    with tf.name_scope("index"):
        for i in range(3):
            with tf.device(prot.servers[i].device_name):
                results[i][0] = shares[i][0][slc]
                results[i][1] = shares[i][1][slc]
    return ABY3PrivateTensor(prot, results, tensor.is_scaled, tensor.share_type)