Python tensorflow.dtype() Examples

The following are 30 code examples of tensorflow.dtype(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: settings.py    From probflow with MIT License 6 votes vote down vote up
def get_datatype():
    """Get the default datatype used for Tensors

    Returns
    -------
    dtype : tf.dtype or torch.dtype
        The current default datatype
    """
    if __SETTINGS__._DATATYPE is None:
        if get_backend() == 'pytorch':
            import torch
            return torch.float32
        else:
            import tensorflow as tf
            return tf.dtypes.float32
    else:
        return __SETTINGS__._DATATYPE 
Example #2
Source File: utils_tf.py    From graph_nets with Apache License 2.0 6 votes vote down vote up
def _check_valid_index(index, element_name):
  """Verifies if a value with `element_name` is a valid index."""
  if isinstance(index, int):
    return True
  elif isinstance(index, tf.Tensor):
    if index.dtype != tf.int32 and index.dtype != tf.int64:
      raise TypeError(
          "Invalid tensor `{}` parameter. Valid tensor indices must have "
          "types tf.int32 or tf.int64, got {}."
          .format(element_name, index.dtype))
    if index.shape.as_list():
      raise TypeError(
          "Invalid tensor `{}` parameter. Valid tensor indices must be scalars "
          "with shape [], got{}"
          .format(element_name, index.shape.as_list()))
    return True
  else:
    raise TypeError(
        "Invalid `{}` parameter. Valid tensor indices must be integers "
        "or tensors, got {}."
        .format(element_name, type(index))) 
Example #3
Source File: utils_tf.py    From graph_nets with Apache License 2.0 6 votes vote down vote up
def _to_compatible_data_dicts(data_dicts):
  """Convert the content of `data_dicts` to tensors of the right type.

  All fields are converted to `Tensor`s. The index fields (`SENDERS` and
  `RECEIVERS`) and number fields (`N_NODE`, `N_EDGE`) are cast to `tf.int32`.

  Args:
    data_dicts: An iterable of dictionaries with keys `ALL_KEYS` and
      values either `None`s, or quantities that can be converted to `Tensor`s.

  Returns:
    A list of dictionaries containing `Tensor`s or `None`s.
  """
  results = []
  for data_dict in data_dicts:
    result = {}
    for k, v in data_dict.items():
      if v is None:
        result[k] = None
      else:
        dtype = tf.int32 if k in [SENDERS, RECEIVERS, N_NODE, N_EDGE] else None
        result[k] = tf.convert_to_tensor(v, dtype)
    results.append(result)
  return results 
Example #4
Source File: utils_tf.py    From graph_nets with Apache License 2.0 6 votes vote down vote up
def _populate_number_fields(data_dict):
  """Returns a dict with the number fields N_NODE, N_EDGE filled in.

  The N_NODE field is filled if the graph contains a non-`None` NODES field;
  otherwise, it is set to 0.
  The N_EDGE field is filled if the graph contains a non-`None` RECEIVERS field;
  otherwise, it is set to 0.

  Args:
    data_dict: An input `dict`.

  Returns:
    The data `dict` with number fields.
  """
  dct = data_dict.copy()
  for number_field, data_field in [[N_NODE, NODES], [N_EDGE, RECEIVERS]]:
    if dct.get(number_field) is None:
      if dct[data_field] is not None:
        dct[number_field] = tf.shape(dct[data_field])[0]
      else:
        dct[number_field] = tf.constant(0, dtype=tf.int32)
  return dct 
Example #5
Source File: utils_tf.py    From graph_nets with Apache License 2.0 6 votes vote down vote up
def _placeholders_from_graphs_tuple(graph, force_dynamic_num_graphs=True):
  """Creates a `graphs.GraphsTuple` of placeholders that matches a numpy graph.

  Args:
    graph: A `graphs.GraphsTuple` that contains numpy data.
    force_dynamic_num_graphs: A `bool` that forces the batch dimension to be
      dynamic. Defaults to `True`.

  Returns:
    A `graphs.GraphsTuple` containing placeholders.
  """
  graph_dtypes = graph.map(
      lambda v: tf.as_dtype(v.dtype) if v is not None else None, ALL_FIELDS)
  graph_shapes = graph.map(lambda v: list(v.shape) if v is not None else None,
                           ALL_FIELDS)
  return _build_placeholders_from_specs(
      graph_dtypes,
      graph_shapes,
      force_dynamic_num_graphs=force_dynamic_num_graphs) 
Example #6
Source File: settings.py    From probflow with MIT License 6 votes vote down vote up
def set_datatype(datatype):
    """Set the datatype to use for Tensors

    Parameters
    ----------
    datatype : tf.dtype or torch.dtype
        The default datatype to use
    """
    if get_backend() == 'pytorch':
        import torch
        if datatype is None or isinstance(datatype, torch.dtype):
            __SETTINGS__._DATATYPE = datatype
        else:
            raise TypeError('datatype must be a torch.dtype')
    else:
        import tensorflow as tf
        if datatype is None or isinstance(datatype, tf.dtypes.DType):
            __SETTINGS__._DATATYPE = datatype
        else:
            raise TypeError('datatype must be a tf.dtypes.DType') 
Example #7
Source File: util.py    From wide_deep with MIT License 6 votes vote down vote up
def column_to_dtype(feature, feature_conf):
    """Parse columns to tf.dtype
     Return: 
         similar to _csv_column_defaults()
     """
    _column_dtype_dic = OrderedDict()
    _column_dtype_dic['label'] = tf.int32
    for f in feature:
        if f in feature_conf:
            conf = feature_conf[f]
            if conf['type'] == 'category':
                if conf['transform'] == 'identity':  # identity category column need int type
                    _column_dtype_dic[f] = tf.int32
                else:
                    _column_dtype_dic[f] = tf.string
            else:
                _column_dtype_dic[f] = tf.float32  # 0.0 for float32
        else:
            _column_dtype_dic[f] = tf.string
    return _column_dtype_dic 
Example #8
Source File: network_ops.py    From hyperspectral-autoencoders with MIT License 6 votes vote down vote up
def balance_classes(y_target,num_classes):
    """ Calculates the class weights needed to balance the classes, based on the number of samples of each class in the \
        batch of data.

    Args:
        y_target (tensor): One-hot classification labels (1D vector). Shape [numSamples x numClasses]
        num_classes (int):

    Returns:
        (tensor): A weighting for each class that balances their contribution to the loss. Shape [numClasses].
    """
    y_target = tf.reshape( y_target, [-1, num_classes] )
    class_count = tf.add( tf.reduce_sum( y_target, axis=0 ), tf.constant( [1]*num_classes, dtype=tf.float32 ) )
    class_weights = tf.multiply( tf.divide( tf.ones( ( 1, num_classes) ), class_count ), tf.reduce_max( class_count ) )

    return class_weights 
Example #9
Source File: network_ops.py    From hyperspectral-autoencoders with MIT License 6 votes vote down vote up
def loss_function_crossentropy_1D( y_pred, y_target, class_weights=None, num_classes=None):
    """ Cross entropy loss function op, comparing 1D tensors for network prediction and target. Weights the classes \
        when calculating the loss to balance un-even training batches. If class weights are not provided, then no \
        weighting is done (weight of 1 assigned to each class).

    Args:
        y_pred (tensor): Output of network (1D vector of class scores). Shape [numSamples x numClasses].
        y_target (tensor): One-hot classification labels (1D vector). Shape [numSamples x numClasses].
        class_weights (tensor): Weight for each class. Shape [numClasses].
        num_classes (int):

    Returns:
        (tensor): Cross-entropy loss.
    """

    if class_weights==None:
        class_weights = tf.constant(1,shape=[num_classes],dtype=tf.dtypes.float32)

    sample_weights = tf.reduce_sum( tf.multiply(y_target, class_weights ), axis=1) # weight of each sample
    loss = tf.reduce_mean( tf.losses.softmax_cross_entropy(
        onehot_labels=y_target,logits=y_pred,weights=sample_weights ) )

    return loss 
Example #10
Source File: tensor_ops.py    From hart with GNU General Public License v3.0 6 votes vote down vote up
def _bbox_to_mask(yy, region_size, dtype):
    # trim bounding box exeeding region_size on top and left
    neg_part = tf.nn.relu(-yy[:2])
    core = tf.ones(tf.to_int32(tf.round(yy[2:] - neg_part)), dtype=dtype)

    y1 = tf.maximum(yy[0], 0.)
    x1 = tf.maximum(yy[1], 0.)

    y2 = tf.minimum(region_size[0], yy[0] + yy[2])
    x2 = tf.minimum(region_size[1], yy[1] + yy[3])

    padding = (y1, region_size[0] - y2, x1, region_size[1] - x2)
    padding = tf.reshape(tf.stack(padding), (-1, 2))
    padding = tf.to_int32(tf.round(padding))
    mask = tf.pad(core, padding)

    # trim bounding box exeeding region_size on bottom and right
    rs = tf.to_int32(tf.round(region_size))
    mask = mask[:rs[0], :rs[1]]
    mask.set_shape((None, None))
    return mask 
Example #11
Source File: tensor_ops.py    From hart with GNU General Public License v3.0 6 votes vote down vote up
def bbox_to_mask(bbox, region_size, output_size, dtype=tf.float32):
    """Creates a binary mask of size `region_size` where rectangle given by
    `bbox` is filled with ones and the rest is zeros. Finally, the binary mask
    is resized to `output_size` with bilinear interpolation.

    :param bbox: tensor of shape (..., 4)
    :param region_size: tensor of shape (..., 2)
    :param output_size: 2-tuple of ints
    :param dtype: tf.dtype
    :return: a tensor of shape = (..., output_size)
    """
    shape = tf.concat(axis=0, values=(tf.shape(bbox)[:-1], output_size))
    bbox = tf.reshape(bbox, (-1, 4))
    region_size = tf.reshape(region_size, (-1, 2))

    def create_mask(args):
        yy, region_size = args
        return _bbox_to_mask_fixed_size(yy, region_size, output_size, dtype)

    mask = tf.map_fn(create_mask, (bbox, region_size), dtype=dtype)
    return tf.reshape(mask, shape) 
Example #12
Source File: utils.py    From DRL_DeliveryDuel with MIT License 5 votes vote down vote up
def __init__(self, shape, dtype=tf.float32, name=None):
        """Creates a placeholder for a batch of tensors of a given shape and dtype

        Parameters
        ----------
        shape: [int]
            shape of a single elemenet of the batch
        dtype: tf.dtype
            number representation used for tensor contents
        name: str
            name of the underlying placeholder
        """
        super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name)) 
Example #13
Source File: utils.py    From rl_graph_generation with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, shape, dtype=tf.float32, name=None):
        """Creates a placeholder for a batch of tensors of a given shape and dtype

        Parameters
        ----------
        shape: [int]
            shape of a single elemenet of the batch
        dtype: tf.dtype
            number representation used for tensor contents
        name: str
            name of the underlying placeholder
        """
        super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name)) 
Example #14
Source File: utils_tf.py    From graph_nets with Apache License 2.0 5 votes vote down vote up
def set_zero_global_features(graph,
                             global_size,
                             dtype=tf.float32,
                             name="set_zero_global_features"):
  """Completes the global state of a graph.

  Args:
    graph: A `graphs.GraphsTuple` with a `None` global state.
    global_size: (int) the dimension for the created global features.
    dtype: (tensorflow type) the type for the created global features.
    name: (string, optional) A name for the operation.

  Returns:
    The same graph but for the global field, which is a `Tensor` of shape
    `[num_graphs, global_size]`, type `dtype` and filled with zeros.

  Raises:
    ValueError: If the `GLOBALS` field of `graph` is not `None`.
    ValueError: If `global_size` is not `None`.
  """
  if graph.globals is not None:
    raise ValueError(
        "Cannot complete global state if graph already has global features.")
  if global_size is None:
    raise ValueError("Cannot complete globals with None global_size")
  with tf.name_scope(name):
    n_graphs = get_num_graphs(graph)
    return graph._replace(
        globals=tf.zeros(shape=[n_graphs, global_size], dtype=dtype)) 
Example #15
Source File: utils_tf.py    From graph_nets with Apache License 2.0 5 votes vote down vote up
def set_zero_edge_features(graph,
                           edge_size,
                           dtype=tf.float32,
                           name="set_zero_edge_features"):
  """Completes the edge state of a graph.

  Args:
    graph: A `graphs.GraphsTuple` with a `None` edge state.
    edge_size: (int) the dimension for the created edge features.
    dtype: (tensorflow type) the type for the created edge features.
    name: (string, optional) A name for the operation.

  Returns:
    The same graph but for the edge field, which is a `Tensor` of shape
    `[number_of_edges, edge_size]`, where `number_of_edges = sum(graph.n_edge)`,
    with type `dtype` and filled with zeros.

  Raises:
    ValueError: If the `EDGES` field is not None in `graph`.
    ValueError: If the `RECEIVERS` or `SENDERS` field are None in `graph`.
    ValueError: If `edge_size` is None.
  """
  if graph.edges is not None:
    raise ValueError(
        "Cannot complete edge state if the graph already has edge features.")
  if graph.receivers is None or graph.senders is None:
    raise ValueError(
        "Cannot complete edge state if the receivers or senders are None.")
  if edge_size is None:
    raise ValueError("Cannot complete edges with None edge_size")
  with tf.name_scope(name):
    senders_leading_size = graph.senders.shape.as_list()[0]
    if senders_leading_size is not None:
      n_edges = senders_leading_size
    else:
      n_edges = tf.reduce_sum(graph.n_edge)
    return graph._replace(
        edges=tf.zeros(shape=[n_edges, edge_size], dtype=dtype)) 
Example #16
Source File: utils_tf.py    From graph_nets with Apache License 2.0 5 votes vote down vote up
def set_zero_node_features(graph,
                           node_size,
                           dtype=tf.float32,
                           name="set_zero_node_features"):
  """Completes the node state of a graph.

  Args:
    graph: A `graphs.GraphsTuple` with a `None` edge state.
    node_size: (int) the dimension for the created node features.
    dtype: (tensorflow type) the type for the created nodes features.
    name: (string, optional) A name for the operation.

  Returns:
    The same graph but for the node field, which is a `Tensor` of shape
    `[number_of_nodes, node_size]`  where `number_of_nodes = sum(graph.n_node)`,
    with type `dtype`, filled with zeros.

  Raises:
    ValueError: If the `NODES` field is not None in `graph`.
    ValueError: If `node_size` is None.
  """
  if graph.nodes is not None:
    raise ValueError(
        "Cannot complete node state if the graph already has node features.")
  if node_size is None:
    raise ValueError("Cannot complete nodes with None node_size")
  with tf.name_scope(name):
    n_nodes = tf.reduce_sum(graph.n_node)
    return graph._replace(
        nodes=tf.zeros(shape=[n_nodes, node_size], dtype=dtype)) 
Example #17
Source File: tensor_ops.py    From hart with GNU General Public License v3.0 5 votes vote down vote up
def _bbox_to_mask_fixed_size(yy, region_size, output_size, dtype):

    mask = _bbox_to_mask(yy, region_size, dtype)

    nonzero_region = tf.greater(tf.reduce_prod(tf.shape(mask)), 0)
    mask = tf.cond(nonzero_region, lambda: mask, lambda: tf.zeros(output_size, dtype))
    mask = tf.image.resize_images(mask[..., tf.newaxis], output_size)[..., 0]
    return mask 
Example #18
Source File: tf_utils.py    From mapr2 with Apache License 2.0 5 votes vote down vote up
def __init__(self, shape, dtype=tf.float32, name=None):
        """Creates a placeholder for a batch of tensors of a given shape and dtype
        Parameters
        ----------
        shape: [int]
            shape of a single elemenet of the batch
        dtype: tf.dtype
            number representation used for tensor contents
        name: str
            name of the underlying placeholder
        """
        super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name)) 
Example #19
Source File: baselines_legacy.py    From pysc2-examples with Apache License 2.0 5 votes vote down vote up
def __init__(self, shape, dtype=tf.float32, name=None):
        """Creates a placeholder for a batch of tensors of a given shape and dtype
        Parameters
        ----------
        shape: [int]
            shape of a single elemenet of the batch
        dtype: tf.dtype
            number representation used for tensor contents
        name: str
            name of the underlying placeholder
        """
        super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name)) 
Example #20
Source File: bert_models.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 5 votes vote down vote up
def gather_indexes(sequence_tensor, positions):
  """Gathers the vectors at the specific positions.

  Args:
      sequence_tensor: Sequence output of `BertModel` layer of shape
        (`batch_size`, `seq_length`, num_hidden) where num_hidden is number of
        hidden units of `BertModel` layer.
      positions: Positions ids of tokens in sequence to mask for pretraining of
        with dimension (batch_size, max_predictions_per_seq) where
        `max_predictions_per_seq` is maximum number of tokens to mask out and
        predict per each sequence.

  Returns:
      Masked out sequence tensor of shape (batch_size * max_predictions_per_seq,
      num_hidden).
  """
  sequence_shape = modeling.get_shape_list(
      sequence_tensor, name='sequence_output_tensor')
  batch_size = sequence_shape[0]
  seq_length = sequence_shape[1]
  width = sequence_shape[2]

  flat_offsets = tf.keras.backend.reshape(
      tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
  flat_positions = tf.keras.backend.reshape(positions + flat_offsets, [-1])
  flat_sequence_tensor = tf.keras.backend.reshape(
      sequence_tensor, [batch_size * seq_length, width])
  output_tensor = tf.gather(flat_sequence_tensor, flat_positions)

  return output_tensor 
Example #21
Source File: parameters.py    From strawberryfields with Apache License 2.0 5 votes vote down vote up
def is_object_array(p):
    """Returns True iff p is an object array.

    Args:
        p (Any): object to be checked

    Returns:
        bool: True iff p is a NumPy object array
    """
    return isinstance(p, np.ndarray) and p.dtype == object 
Example #22
Source File: utils.py    From lirpg with MIT License 5 votes vote down vote up
def __init__(self, shape, dtype=tf.float32, name=None):
        """Creates a placeholder for a batch of tensors of a given shape and dtype

        Parameters
        ----------
        shape: [int]
            shape of a single elemenet of the batch
        dtype: tf.dtype
            number representation used for tensor contents
        name: str
            name of the underlying placeholder
        """
        super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name)) 
Example #23
Source File: tf_util.py    From maddpg with MIT License 5 votes vote down vote up
def __init__(self, shape, dtype=tf.float32, name=None):
        """Creates a placeholder for a batch of tensors of a given shape and dtype

        Parameters
        ----------
        shape: [int]
            shape of a single elemenet of the batch
        dtype: tf.dtype
            number representation used for tensor contents
        name: str
            name of the underlying placeholder
        """
        super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name)) 
Example #24
Source File: utils.py    From sonic_contest with MIT License 5 votes vote down vote up
def __init__(self, shape, dtype=tf.float32, name=None):
        """Creates a placeholder for a batch of tensors of a given shape and dtype

        Parameters
        ----------
        shape: [int]
            shape of a single elemenet of the batch
        dtype: tf.dtype
            number representation used for tensor contents
        name: str
            name of the underlying placeholder
        """
        super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name)) 
Example #25
Source File: tf_util.py    From m3ddpg with MIT License 5 votes vote down vote up
def __init__(self, shape, dtype=tf.float32, name=None):
        """Creates a placeholder for a batch of tensors of a given shape and dtype

        Parameters
        ----------
        shape: [int]
            shape of a single elemenet of the batch
        dtype: tf.dtype
            number representation used for tensor contents
        name: str
            name of the underlying placeholder
        """
        super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name)) 
Example #26
Source File: bert_models.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 4 votes vote down vote up
def squad_model(bert_config, max_seq_length, float_type, initializer=None):
  """Returns BERT Squad model along with core BERT model to import weights.

  Args:
    bert_config: BertConfig, the config defines the core Bert model.
    max_seq_length: integer, the maximum input sequence length.
    float_type: tf.dtype, tf.float32 or tf.bfloat16.
    initializer: Initializer for weights in BertSquadLogitsLayer.

  Returns:
    Two tensors, start logits and end logits, [batch x sequence length].
  """
  unique_ids = tf.keras.layers.Input(
      shape=(1,), dtype=tf.int32, name='unique_ids')
  input_word_ids = tf.keras.layers.Input(
      shape=(max_seq_length,), dtype=tf.int32, name='input_ids')
  input_mask = tf.keras.layers.Input(
      shape=(max_seq_length,), dtype=tf.int32, name='input_mask')
  input_type_ids = tf.keras.layers.Input(
      shape=(max_seq_length,), dtype=tf.int32, name='segment_ids')

  core_model = modeling.get_bert_model(
      input_word_ids,
      input_mask,
      input_type_ids,
      config=bert_config,
      name='bert_model',
      float_type=float_type)

  # `BertSquadModel` only uses the sequnce_output which
  # has dimensionality (batch_size, sequence_length, num_hidden).
  sequence_output = core_model.outputs[1]

  if initializer is None:
    initializer = tf.keras.initializers.TruncatedNormal(
        stddev=bert_config.initializer_range)
  squad_logits_layer = BertSquadLogitsLayer(
      initializer=initializer, float_type=float_type, name='squad_logits')
  start_logits, end_logits = squad_logits_layer(sequence_output)

  squad = tf.keras.Model(
      inputs={
          'unique_ids': unique_ids,
          'input_ids': input_word_ids,
          'input_mask': input_mask,
          'segment_ids': input_type_ids,
      },
      outputs=[unique_ids, start_logits, end_logits],
      name='squad_model')
  return squad, core_model 
Example #27
Source File: bert_models.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 4 votes vote down vote up
def classifier_model(bert_config,
                     float_type,
                     num_labels,
                     max_seq_length,
                     final_layer_initializer=None):
  """BERT classifier model in functional API style.

  Construct a Keras model for predicting `num_labels` outputs from an input with
  maximum sequence length `max_seq_length`.

  Args:
    bert_config: BertConfig, the config defines the core BERT model.
    float_type: dtype, tf.float32 or tf.bfloat16.
    num_labels: integer, the number of classes.
    max_seq_length: integer, the maximum input sequence length.
    final_layer_initializer: Initializer for final dense layer. Defaulted
      TruncatedNormal initializer.

  Returns:
    Combined prediction model (words, mask, type) -> (one-hot labels)
    BERT sub-model (words, mask, type) -> (bert_outputs)
  """
  input_word_ids = tf.keras.layers.Input(
      shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids')
  input_mask = tf.keras.layers.Input(
      shape=(max_seq_length,), dtype=tf.int32, name='input_mask')
  input_type_ids = tf.keras.layers.Input(
      shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids')
  bert_model = modeling.get_bert_model(
      input_word_ids,
      input_mask,
      input_type_ids,
      config=bert_config,
      float_type=float_type)
  pooled_output = bert_model.outputs[0]
  if final_layer_initializer is not None:
    initializer = final_layer_initializer
  else:
    initializer = tf.keras.initializers.TruncatedNormal(
        stddev=bert_config.initializer_range)

  output = tf.keras.layers.Dropout(rate=bert_config.hidden_dropout_prob)(
      pooled_output)
  output = tf.keras.layers.Dense(
      num_labels,
      kernel_initializer=initializer,
      name='output',
      dtype=float_type)(
          output)
  return tf.keras.Model(
      inputs={
          'input_word_ids': input_word_ids,
          'input_mask': input_mask,
          'input_type_ids': input_type_ids
      },
      outputs=output), bert_model 
Example #28
Source File: utils_tf.py    From graph_nets with Apache License 2.0 4 votes vote down vote up
def _build_placeholders_from_specs(dtypes,
                                   shapes,
                                   force_dynamic_num_graphs=True):
  """Creates a `graphs.GraphsTuple` of placeholders with `dtypes` and `shapes`.

  The dtypes and shapes arguments are instances of `graphs.GraphsTuple` that
  contain dtypes and shapes, or `None` values for the fields for which no
  placeholder should be created. The leading dimension the nodes and edges are
  dynamic because the numbers of nodes and edges can vary.
  If `force_dynamic_num_graphs` is True, then the number of graphs is assumed to
  be dynamic and all fields leading dimensions are set to `None`.
  If `force_dynamic_num_graphs` is False, then `N_NODE`, `N_EDGE` and `GLOBALS`
  leading dimensions are statically defined.

  Args:
    dtypes: A `graphs.GraphsTuple` that contains `tf.dtype`s or `None`s.
    shapes: A `graphs.GraphsTuple` that contains `list`s of integers,
      `tf.TensorShape`s, or `None`s.
    force_dynamic_num_graphs: A `bool` that forces the batch dimension to be
      dynamic. Defaults to `True`.

  Returns:
    A `graphs.GraphsTuple` containing placeholders.

  Raises:
    ValueError: The `None` fields in `dtypes` and `shapes` do not match.
  """
  dct = {}
  for field in ALL_FIELDS:
    dtype = getattr(dtypes, field)
    shape = getattr(shapes, field)
    if dtype is None or shape is None:
      if not (shape is None and dtype is None):
        raise ValueError(
            "only one of dtype and shape are None for field {}".format(field))
      dct[field] = None
    elif not shape:
      raise ValueError("Shapes must have at least rank 1")
    else:
      shape = list(shape)
      if field not in [N_NODE, N_EDGE, GLOBALS] or force_dynamic_num_graphs:
        shape[0] = None

      dct[field] = tf.placeholder(dtype, shape=shape, name=field)

  return graphs.GraphsTuple(**dct) 
Example #29
Source File: utils_tf.py    From graph_nets with Apache License 2.0 4 votes vote down vote up
def placeholders_from_networkxs(graph_nxs,
                                node_shape_hint=None,
                                edge_shape_hint=None,
                                data_type_hint=tf.float32,
                                force_dynamic_num_graphs=True,
                                name="placeholders_from_networkxs"):
  """Constructs placeholders compatible with a list of networkx instances.

  Given a list of networkxs instances, constructs placeholders compatible with
  the shape of those graphs.

  The networkx graph should be set up such that, for fixed shapes `node_shape`,
   `edge_shape` and `global_shape`:
    - `graph_nx.nodes(data=True)[i][-1]["features"]` is, for any node index i, a
      tensor of shape `node_shape`, or `None`;
    - `graph_nx.edges(data=True)[i][-1]["features"]` is, for any edge index i, a
      tensor of shape `edge_shape`, or `None`;
    - `graph_nx.edges(data=True)[i][-1]["index"]`, if present, defines the order
      in which the edges will be sorted in the resulting `data_dict`;
    - `graph_nx.graph["features"] is a tensor of shape `global_shape` or `None`.

  Args:
    graph_nxs: A container of `networkx.MultiDiGraph`s.
    node_shape_hint: (iterable of `int` or `None`, default=`None`) If the graph
      does not contain nodes, the trailing shape for the created `NODES` field.
      If `None` (the default), this field is left `None`. This is not used if
      `graph_nx` contains at least one node.
    edge_shape_hint: (iterable of `int` or `None`, default=`None`) If the graph
      does not contain edges, the trailing shape for the created `EDGES` field.
      If `None` (the default), this field is left `None`. This is not used if
      `graph_nx` contains at least one edge.
    data_type_hint: (numpy dtype, default=`np.float32`) If the `NODES` or
      `EDGES` fields are autocompleted, their type.
    force_dynamic_num_graphs: A `bool` that forces the batch dimension to be
      dynamic. Defaults to `True`.
    name: (string, optional) A name for the operation.

  Returns:
    An instance of `graphs.GraphTuple` placeholders compatible with the
      dimensions of the graph_nxs.
  """
  with tf.name_scope(name):
    graph = utils_np.networkxs_to_graphs_tuple(graph_nxs, node_shape_hint,
                                               edge_shape_hint,
                                               data_type_hint.as_numpy_dtype)
    return _placeholders_from_graphs_tuple(
        graph, force_dynamic_num_graphs=force_dynamic_num_graphs) 
Example #30
Source File: parameters.py    From strawberryfields with Apache License 2.0 4 votes vote down vote up
def par_evaluate(params, dtype=None):
    """Evaluate an Operation parameter sequence.

    Any parameters descending from :class:`sympy.Basic` are evaluated, others are returned as-is.
    Evaluation means that free and measured parameters are replaced by their numeric values.
    NumPy object arrays are evaluated elementwise.

    Alternatively, evaluates a single parameter and returns its value.

    Args:
        params (Sequence[Any]): parameters to evaluate
        dtype (None, np.dtype, tf.dtype): NumPy or TensorFlow datatype to optionally cast atomic symbols
            to *before* evaluating the parameter expression. Note that if the atom
            is a TensorFlow tensor, a NumPy datatype can still be passed; ``tensorflow.dtype.as_dtype()``
            is used to determine the corresponding TensorFlow dtype internally.

    Returns:
        list[Any]: evaluated parameters
    """
    scalar = False
    if not isinstance(params, collections.abc.Sequence):
        scalar = True
        params = [params]

    def do_evaluate(p):
        """Evaluates a single parameter."""
        if is_object_array(p):
            return np.array([do_evaluate(k) for k in p])

        if not par_is_symbolic(p):
            return p

        # using lambdify we can also substitute np.ndarrays and tf.Tensors for the atoms
        atoms = list(p.atoms(MeasuredParameter, FreeParameter))
        # evaluate the atoms of the expression
        vals = [k._eval_evalf(None) for k in atoms]
        # use the tensorflow printer if any of the symbolic parameter values are TF objects
        # (we do it like this to avoid importing tensorflow if it's not needed)
        is_tf = (type(v).__module__.startswith("tensorflow") for v in vals)
        printer = "tensorflow" if any(is_tf) else "numpy"
        func = sympy.lambdify(atoms, p, printer)

        if dtype is not None:
            # cast the input values
            if printer == "tensorflow":
                import tensorflow as tf

                tfdtype = tf.as_dtype(dtype)
                vals = [tf.cast(v, dtype=tfdtype) for v in vals]
            else:
                vals = [dtype(v) for v in vals]

        return func(*vals)

    ret = list(map(do_evaluate, params))
    if scalar:
        return ret[0]
    return ret