Python tensorflow.make_tensor_proto() Examples

The following are 30 code examples of tensorflow.make_tensor_proto(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: serving_utils.py    From BERT with Apache License 2.0 6 votes vote down vote up
def make_grpc_request_fn(servable_name, server, timeout_secs):
  """Wraps function to make grpc requests with runtime args."""
  stub = _create_stub(server)

  def _make_grpc_request(examples):
    """Builds and sends request to TensorFlow model server."""
    request = predict_pb2.PredictRequest()
    request.model_spec.name = servable_name
    request.inputs["input"].CopyFrom(
        tf.make_tensor_proto(
            [ex.SerializeToString() for ex in examples], shape=[len(examples)]))
    response = stub.Predict(request, timeout_secs)
    outputs = tf.make_ndarray(response.outputs["outputs"])
    scores = tf.make_ndarray(response.outputs["scores"])
    assert len(outputs) == len(scores)
    return [{  # pylint: disable=g-complex-comprehension
        "outputs": output,
        "scores": score
    } for output, score in zip(outputs, scores)]

  return _make_grpc_request 
Example #2
Source File: ende_client.py    From OpenNMT-tf with MIT License 6 votes vote down vote up
def send_request(stub, model_name, batch_tokens, timeout=5.0):
  """Sends a translation request.

  Args:
    stub: The prediction service stub.
    model_name: The model to request.
    tokens: A list of tokens.
    timeout: Timeout after this many seconds.

  Returns:
    A future.
  """
  batch_tokens, lengths, max_length = pad_batch(batch_tokens)
  batch_size = len(lengths)
  request = predict_pb2.PredictRequest()
  request.model_spec.name = model_name
  request.inputs["tokens"].CopyFrom(tf.make_tensor_proto(
      batch_tokens, dtype=tf.string, shape=(batch_size, max_length)))
  request.inputs["length"].CopyFrom(tf.make_tensor_proto(
      lengths, dtype=tf.int32, shape=(batch_size,)))
  return stub.Predict.future(request, timeout) 
Example #3
Source File: run_inference.py    From tfx-bsl with Apache License 2.0 6 votes vote down vote up
def _post_process(
      self, elements: List[Union[tf.train.Example, tf.train.SequenceExample]],
      outputs: Sequence[Mapping[Text, Any]]
  ) -> Iterable[prediction_log_pb2.PredictLog]:
    result = []
    for output in outputs:
      predict_log = prediction_log_pb2.PredictLog()
      for output_alias, values in output.items():
        values = np.array(values)
        tensor_proto = tf.make_tensor_proto(
            values=values,
            dtype=tf.as_dtype(values.dtype).as_datatype_enum,
            shape=np.expand_dims(values, axis=0).shape)
        predict_log.response.outputs[output_alias].CopyFrom(tensor_proto)
      result.append(predict_log)
    return result


# TODO(b/131873699): Add typehints once
# [BEAM-8381](https://issues.apache.org/jira/browse/BEAM-8381)
# is fixed.
# TODO(b/143484017): Add batch_size back off in the case there are functional
# reasons large batch sizes cannot be handled. 
Example #4
Source File: remote_executor_test.py    From federated with Apache License 2.0 6 votes vote down vote up
def test_compute_returns_result(self, mock_stub):
    tensor_proto = tf.make_tensor_proto(1)
    any_pb = any_pb2.Any()
    any_pb.Pack(tensor_proto)
    value = executor_pb2.Value(tensor=any_pb)
    response = executor_pb2.ComputeResponse(value=value)
    instance = mock_stub.return_value
    instance.Compute = mock.Mock(side_effect=[response])
    loop = asyncio.get_event_loop()
    executor = create_remote_executor()
    type_signature = computation_types.FunctionType(None, tf.int32)
    comp = remote_executor.RemoteValue(executor_pb2.ValueRef(), type_signature,
                                       executor)

    result = loop.run_until_complete(comp.compute())

    instance.Compute.assert_called_once()
    self.assertEqual(result, 1) 
Example #5
Source File: test_graph.py    From utensor_cgen with Apache License 2.0 6 votes vote down vote up
def test_op_info():
    np_array = np.array([1, 2, 3], dtype=np.float32)
    t_proto = tf.make_tensor_proto(np_array, dtype=np.float32)
    ugraph = uTensorGraph(output_nodes=['dummy'])
    op_info = OperationInfo(name='testing_op',
                            input_tensors=[],
                            n_inputs=0,
                            output_tensors=[],
                            n_outputs=0,
                            op_type='no_op',
                            lib_name='tensorflow',
                            op_attr={
                                '_utensor_to_skip': [1, 2, 3],
                                '_utensor_skip_this_too': None,
                                'tensor_no_skip': t_proto
                            },
                            ugraph=ugraph)
    assert op_info.op_attr.get('_utensor_to_skip', None) == [1, 2, 3]
    assert op_info.op_attr.get('_utensor_skip_this_too') is None
    generic_tensor = op_info.op_attr.get('tensor_no_skip')
    assert isinstance(generic_tensor,
                      TensorProtoConverter.__utensor_generic_type__)
    assert (generic_tensor.np_array == np_array).all()
    assert op_info.name in ugraph.ops_info 
Example #6
Source File: estimator.py    From estimator with Apache License 2.0 6 votes vote down vote up
def _write_checkpoint_path_to_summary(output_dir, checkpoint_path,
                                      current_global_step):
  """Writes `checkpoint_path` into summary file in the given output directory.

  Args:
    output_dir: `str`, directory to write the summary file in.
    checkpoint_path: `str`, checkpoint file path to be written to summary file.
    current_global_step: `int`, the current global step.
  """

  checkpoint_path_tag = 'checkpoint_path'

  tf.compat.v1.logging.info('Saving \'%s\' summary for global step %d: %s',
                            checkpoint_path_tag, current_global_step,
                            checkpoint_path)
  summary_proto = summary_pb2.Summary()
  summary_proto.value.add(
      tag=checkpoint_path_tag,
      tensor=tf.make_tensor_proto(checkpoint_path, dtype=tf.dtypes.string))
  summary_writer = tf.compat.v1.summary.FileWriterCache.get(output_dir)
  summary_writer.add_summary(summary_proto, current_global_step)
  summary_writer.flush() 
Example #7
Source File: serving_grpc_client.py    From deep_learning with MIT License 6 votes vote down vote up
def grpc_predict_raw(data):
    port = 8500
    channel = grpc.insecure_channel('{host}:{port}'.format(host=host, port=port))
    # channel = implementations.insecure_channel(host, int(port))

    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'textcnn_model'
    request.model_spec.signature_name = "serving_default"

    tensor_protos = {
        # 一条一条的请求方式
        'sentence':tf.make_tensor_proto(data['sentence'], dtype=tf.int64, shape=[1, 55])
    }
    for k in tensor_protos:
        request.inputs[k].CopyFrom(tensor_protos[k])

    response = stub.Predict(request, 5.0)
    print(response) 
Example #8
Source File: pipeline_invoke_tfserving.py    From models with Apache License 2.0 5 votes vote down vote up
def _transform_request(request):
    request_str = request.decode('utf-8')
    request_json = json.loads(request_str)
    request_np = (np.array(request_json['image'], dtype=np.float32) / 255.0).reshape(1, 28, 28)
    image_tensor = tf.make_tensor_proto(request_np, dtype=tf.float32)
    return {"image": image_tensor} 
Example #9
Source File: pipeline_invoke_tfserving.py    From models with Apache License 2.0 5 votes vote down vote up
def _transform_request(request):
    request_str = request.decode('utf-8')
    request_json = json.loads(request_str)
    request_np = (np.array(request_json['image'], dtype=np.float32) / 255.0).reshape(1, 28, 28)
    image_tensor = tf.make_tensor_proto(request_np, dtype=tf.float32)
    return {"image": image_tensor} 
Example #10
Source File: pipeline_invoke_tfserving.py    From models with Apache License 2.0 5 votes vote down vote up
def _transform_request(request):
    request_str = request.decode('utf-8')
    request_json = json.loads(request_str)
    request_np = (np.array(request_json['image'], dtype=np.float32) / 255.0).reshape(1, 28, 28)
    image_tensor = tf.make_tensor_proto(request_np, dtype=tf.float32)
    return {"image": image_tensor} 
Example #11
Source File: run_inference.py    From tfx-bsl with Apache License 2.0 5 votes vote down vote up
def _post_process(
      self, elements: Union[Sequence[tf.train.Example],
                            Sequence[tf.train.SequenceExample]],
      outputs: Mapping[Text, np.ndarray]
  ) -> Iterable[prediction_log_pb2.PredictLog]:
    input_tensor_alias = self._io_tensor_spec.input_tensor_alias
    signature_name = self._signatures[0].name
    batch_size = len(elements)
    for output_alias, output in outputs.items():
      if len(output.shape) < 1 or output.shape[0] != batch_size:
        raise ValueError(
            'Expected output tensor %s to have at least one '
            'dimension, with the first having a size equal to the input batch '
            'size %s. Instead found %s' %
            (output_alias, batch_size, output.shape))
    predict_log_tmpl = prediction_log_pb2.PredictLog()
    predict_log_tmpl.request.model_spec.signature_name = signature_name
    predict_log_tmpl.response.model_spec.signature_name = signature_name
    input_tensor_proto = predict_log_tmpl.request.inputs[input_tensor_alias]
    input_tensor_proto.dtype = tf.string.as_datatype_enum
    input_tensor_proto.tensor_shape.dim.add().size = 1

    result = []
    for i in range(batch_size):
      predict_log = prediction_log_pb2.PredictLog()
      predict_log.CopyFrom(predict_log_tmpl)
      predict_log.request.inputs[input_tensor_alias].string_val.append(
          elements[i].SerializeToString())
      for output_alias, output in outputs.items():
        # Mimic tensor::Split
        tensor_proto = tf.make_tensor_proto(
            values=output[i],
            dtype=tf.as_dtype(output[i].dtype).as_datatype_enum,
            shape=np.expand_dims(output[i], axis=0).shape)
        predict_log.response.outputs[output_alias].CopyFrom(tensor_proto)
      result.append(predict_log)
    return result 
Example #12
Source File: run_inference_test.py    From tfx-bsl with Apache License 2.0 5 votes vote down vote up
def test_model_predict(self):
    predictions = [{'output_1': [0.901], 'output_2': [0.997]}]
    builder = http.RequestMockBuilder({
        'ml.projects.predict':
            (None, self._make_response_body(predictions, successful=True))
    })
    resource = discovery.build(
        'ml',
        'v1',
        http=http.HttpMock(self._discovery_testdata_dir,
                           {'status': http_client.OK}),
        requestBuilder=builder)
    with mock.patch('googleapiclient.discovery.' 'build') as response_mock:
      response_mock.side_effect = lambda service, version: resource
      inference_spec_type = model_spec_pb2.InferenceSpecType(
          ai_platform_prediction_model_spec=model_spec_pb2
          .AIPlatformPredictionModelSpec(
              project_id='test-project',
              model_name='test-model',
          ))

      prediction_log = prediction_log_pb2.PredictionLog()
      prediction_log.predict_log.response.outputs['output_1'].CopyFrom(
          tf.make_tensor_proto(values=[0.901], dtype=tf.double, shape=(1, 1)))
      prediction_log.predict_log.response.outputs['output_2'].CopyFrom(
          tf.make_tensor_proto(values=[0.997], dtype=tf.double, shape=(1, 1)))

      self._set_up_pipeline(inference_spec_type)
      assert_that(self.pcoll, equal_to([prediction_log]))
      self._run_inference_with_beam() 
Example #13
Source File: tensorflow_serving_client_workload.py    From PerfKitBenchmarker with Apache License 2.0 5 votes vote down vote up
def classify_random_image(self):
    """Chooses a random image and sends a prediction request to the server.

    If a response is receieved before the requests timesout, its latency is
    saved, and the request is counted as successful. If the request timesout
    or otherwise errors, its latency is discarded, and it is counted as a
    failed request.
    """
    image = self.get_random_image()
    with open(image, 'rb') as f:
      data = f.read()
      request = predict_pb2.PredictRequest()
      request.model_spec.name = MODEL_NAME
      request.model_spec.signature_name = 'serving_default'
      request.inputs['image_bytes'].CopyFrom(
          tf.make_tensor_proto(data, shape=[1]))

      try:
        start_time = time.time()
        self.stub.Predict(request, FLAGS.rpc_timeout)
        end_time = time.time()
        with self.thread_lock:
          self.num_completed_requests += 1
          self.latencies.append(end_time - start_time)

      except ExpirationError:
        with self.thread_lock:
          self.num_failed_requests += 1 
Example #14
Source File: data_compat.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _migrate_histogram_value(value):
  histogram_value = value.histo
  bucket_lefts = [histogram_value.min] + histogram_value.bucket_limit[:-1]
  bucket_rights = histogram_value.bucket_limit[:-1] + [histogram_value.max]
  bucket_counts = histogram_value.bucket
  buckets = np.array([bucket_lefts, bucket_rights, bucket_counts]).transpose()

  tensor_proto = tf.make_tensor_proto(buckets)
  summary_metadata = histogram_metadata.create_summary_metadata(
      display_name=value.metadata.display_name or value.tag,
      description=value.metadata.summary_description)
  return tf.Summary.Value(tag=value.tag,
                          metadata=summary_metadata,
                          tensor=tensor_proto) 
Example #15
Source File: data_compat.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _migrate_image_value(value):
  image_value = value.image
  data = [tf.compat.as_bytes(str(image_value.width)),
          tf.compat.as_bytes(str(image_value.height)),
          tf.compat.as_bytes(image_value.encoded_image_string)]

  tensor_proto = tf.make_tensor_proto(data)
  summary_metadata = image_metadata.create_summary_metadata(
      display_name=value.metadata.display_name or value.tag,
      description=value.metadata.summary_description)
  return tf.Summary.Value(tag=value.tag,
                          metadata=summary_metadata,
                          tensor=tensor_proto) 
Example #16
Source File: data_compat.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _migrate_audio_value(value):
  audio_value = value.audio
  data = [[audio_value.encoded_audio_string, b'']]  # empty label
  tensor_proto = tf.make_tensor_proto(data)
  summary_metadata = audio_metadata.create_summary_metadata(
      display_name=value.metadata.display_name or value.tag,
      description=value.metadata.summary_description,
      encoding=audio_metadata.Encoding.Value('WAV'))
  return tf.Summary.Value(tag=value.tag,
                          metadata=summary_metadata,
                          tensor=tensor_proto) 
Example #17
Source File: data_compat.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _migrate_scalar_value(value):
  scalar_value = value.simple_value
  tensor_proto = tf.make_tensor_proto(scalar_value)
  summary_metadata = scalar_metadata.create_summary_metadata(
      display_name=value.metadata.display_name or value.tag,
      description=value.metadata.summary_description)
  return tf.Summary.Value(tag=value.tag,
                          metadata=summary_metadata,
                          tensor=tensor_proto) 
Example #18
Source File: summary.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def pb(name, data, display_name=None, description=None):
  """Create a text summary protobuf.

  Arguments:
    name: A name for the generated node. Will also serve as a series name in
      TensorBoard.
    data: A Python bytestring (of type bytes), or Unicode string. Or a numpy
      data array of those types.
    display_name: Optional name for this summary in TensorBoard, as a
      `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      `str`. Markdown is supported. Defaults to empty.

  Raises:
    ValueError: If the type of the data is unsupported.

  Returns:
    A `tf.Summary` protobuf object.
  """
  try:
    tensor = tf.make_tensor_proto(data, dtype=tf.string)
  except TypeError as e:
    raise ValueError(e)

  if display_name is None:
    display_name = name
  summary_metadata = metadata.create_summary_metadata(
      display_name=display_name, description=description)
  summary = tf.Summary()
  summary.value.add(tag='%s/text_summary' % name,
                    metadata=summary_metadata,
                    tensor=tensor)
  return summary 
Example #19
Source File: summary.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def pb(name, data, display_name=None, description=None):
  """Create a scalar summary protobuf.

  Arguments:
    name: A unique name for the generated summary, including any desired
      name scopes.
    data: A rank-0 `np.array` or array-like form (so raw `int`s and
      `float`s are fine, too).
    display_name: Optional name for this summary in TensorBoard, as a
      `str`. Defaults to `name`.
    description: Optional long-form description for this summary, as a
      `str`. Markdown is supported. Defaults to empty.

  Returns:
    A `tf.Summary` protobuf object.
  """
  data = np.array(data)
  if data.shape != ():
    raise ValueError('Expected scalar shape for data, saw shape: %s.'
                     % data.shape)
  if data.dtype.kind not in ('b', 'i', 'u', 'f'):  # bool, int, uint, float
    raise ValueError('Cast %s to float is not supported' % data.dtype.name)
  tensor = tf.make_tensor_proto(data.astype(np.float32))

  if display_name is None:
    display_name = name
  summary_metadata = metadata.create_summary_metadata(
      display_name=display_name, description=description)
  summary = tf.Summary()
  summary.value.add(tag='%s/scalar_summary' % name,
                    metadata=summary_metadata,
                    tensor=tensor)
  return summary 
Example #20
Source File: pipeline_invoke_tfserving.py    From models with Apache License 2.0 5 votes vote down vote up
def _transform_request(request):
    """
    Convert from bytes/json/etc to dict of tf.tensor/np.array/etc
    :param request:
    :return:
    """
    # TODO: Uncomment out one of the examples below - or provide your own implementation
    #
    # Note:  The dict keys used below (ie. 'image') depend on the TF SignatureDef of your exported SavedModel
    #
    # Example 1: Convert json version of an image starting from raw bytes => dict{} to feed TF Serving
    #
    # request_str = request.decode('utf-8')
    # request_json = json.loads(request_str)
    # request_np = (np.array(request_json['image'], dtype=np.float32) / 255.0).reshape(1, 28, 28)
    # image_tensor = tf.make_tensor_proto(request_np, dtype=tf.float32)
    # transformed_request_dict = {"image": image_tensor}
    # return transformed_request_dict   # Becomes `PredictRequest.inputs['image'] = image_tensor`

    # Example 2: Convert raw bytes version of an image => dict{} to feed TF Serving
    #
    # image_tensor = tf.make_tensor_proto([request], shape=[1])
    # transformed_request_dict['image'] = image_tensor
    # return transformed_request_dict   # Becomes `PredictRequest.inputs['image'] = image_tensor`


# input: dict{}
# return: anything you want! (json, bytes, etc) 
Example #21
Source File: jaxboard.py    From BERT with Apache License 2.0 5 votes vote down vote up
def text(self, tag, textdata, step=None):
    """Saves a text summary.

    Args:
      tag: str: label for this data
      textdata: string, or 1D/2D list/numpy array of strings
      step: int: training step
    Note: markdown formatting is rendered by tensorboard.
    """
    if step is None:
      step = self._step
    else:
      self._step = step
    smd = SummaryMetadata(
        plugin_data=SummaryMetadata.PluginData(plugin_name='text'))
    if isinstance(textdata, (str, bytes)):
      tensor = tf.make_tensor_proto(
          values=[textdata.encode(encoding='utf_8')], shape=(1,))
    else:
      textdata = onp.array(textdata)  # convert lists, jax arrays, etc.
      datashape = onp.shape(textdata)
      if len(datashape) == 1:
        tensor = tf.make_tensor_proto(
            values=[td.encode(encoding='utf_8') for td in textdata],
            shape=(datashape[0],))
      elif len(datashape) == 2:
        tensor = tf.make_tensor_proto(
            values=[
                td.encode(encoding='utf_8') for td in onp.reshape(textdata, -1)
            ],
            shape=(datashape[0], datashape[1]))
    summary = Summary(
        value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
    self.add_summary(summary, step)


# Copied from gin/tf/utils.py:GinConfigSaverHook 
Example #22
Source File: pipeline_invoke_tflite.py    From models with Apache License 2.0 5 votes vote down vote up
def _transform_request(request):
    """
    Convert from bytes/json/etc to dict of tf.tensor/np.array/etc
    :param request:
    :return:
    """
    # TODO: Uncomment out one of the examples below - or provide your own implementation
    #
    # Note:  The dict keys used below (ie. 'image') depend on the TF SignatureDef of your exported SavedModel
    #
    # Example 1: Convert json version of an image starting from raw bytes => dict{} to feed TF Serving
    #
    # request_str = request.decode('utf-8')
    # request_json = json.loads(request_str)
    # request_np = (np.array(request_json['image'], dtype=np.float32) / 255.0).reshape(1, 28, 28)
    # return {"image": request_np}

    # Example 2: Convert raw bytes version of an image => dict{} to feed TF Serving
    #
    # image_tensor = tf.make_tensor_proto([request], shape=[1])
    # transformed_request_dict['image'] = image_tensor
    # return transformed_request_dict   # Becomes `PredictRequest.inputs['image'] = image_tensor`


# input: dict{}
# return: anything you want! (json, bytes, etc) 
Example #23
Source File: test_tf_predictor.py    From sagemaker-python-sdk with Apache License 2.0 5 votes vote down vote up
def test_predict_request_json(sagemaker_session):
    data = [6.4, 3.2, 0.5, 1.5]
    tensor_proto = tf.make_tensor_proto(
        values=np.asarray(data), shape=[1, len(data)], dtype=tf.float32
    )
    predictor = RealTimePredictor(
        sagemaker_session=sagemaker_session,
        endpoint=ENDPOINT,
        deserializer=tf_json_deserializer,
        serializer=tf_json_serializer,
    )

    mock_response(
        json.dumps(CLASSIFICATION_RESPONSE).encode("utf-8"), sagemaker_session, JSON_CONTENT_TYPE
    )

    result = predictor.predict(tensor_proto)

    sagemaker_session.sagemaker_runtime_client.invoke_endpoint.assert_called_once_with(
        Accept=JSON_CONTENT_TYPE,
        Body=json_format.MessageToJson(tensor_proto),
        ContentType=JSON_CONTENT_TYPE,
        EndpointName="myendpoint",
    )

    assert result == CLASSIFICATION_RESPONSE 
Example #24
Source File: conftest.py    From utensor_cgen with Apache License 2.0 5 votes vote down vote up
def tf_quint8_tensor():
    return make_tensor_proto(255*np.random.rand(3, 3),
                             types_pb2.DT_QUINT8) 
Example #25
Source File: conftest.py    From utensor_cgen with Apache License 2.0 5 votes vote down vote up
def tf_qint8_tensor():
    return make_tensor_proto(127*np.random.rand(3, 3),
                             types_pb2.DT_QINT8) 
Example #26
Source File: converter.py    From utensor_cgen with Apache License 2.0 5 votes vote down vote up
def get_tf_value(cls, value):
    return make_tensor_proto(value.np_array, dtype=value.dtype) 
Example #27
Source File: request_builder.py    From tfx with Apache License 2.0 5 votes vote down vote up
def _BuildPredictRequests(self, signature_name: Text,
                            serialized_input_key: Text):
    for record in self._records:
      request = predict_pb2.PredictRequest()
      request.model_spec.name = self._model_name
      request.model_spec.signature_name = signature_name
      request.inputs[serialized_input_key].CopyFrom(
          tf.make_tensor_proto([record]))
      yield request 
Example #28
Source File: test_tf_predictor.py    From sagemaker-python-sdk with Apache License 2.0 5 votes vote down vote up
def test_predict_tensor_request_csv(sagemaker_session):
    data = [6.4, 3.2, 0.5, 1.5]
    tensor_proto = tf.make_tensor_proto(
        values=np.asarray(data), shape=[1, len(data)], dtype=tf.float32
    )
    predictor = RealTimePredictor(
        serializer=tf_csv_serializer,
        deserializer=tf_json_deserializer,
        sagemaker_session=sagemaker_session,
        endpoint=ENDPOINT,
    )

    mock_response(
        json.dumps(CLASSIFICATION_RESPONSE).encode("utf-8"), sagemaker_session, JSON_CONTENT_TYPE
    )

    result = predictor.predict(tensor_proto)

    sagemaker_session.sagemaker_runtime_client.invoke_endpoint.assert_called_once_with(
        Accept=JSON_CONTENT_TYPE,
        Body="6.4,3.2,0.5,1.5",
        ContentType=CSV_CONTENT_TYPE,
        EndpointName="myendpoint",
    )

    assert result == CLASSIFICATION_RESPONSE 
Example #29
Source File: greeter_summary.py    From tensorboard-plugin-example with Apache License 2.0 5 votes vote down vote up
def pb(tag, guest, display_name=None, description=None):
  """Create a greeting summary for the given guest.

  Arguments:
    tag: The string tag associated with the summary.
    guest: The string name of the guest to greet.
    display_name: If set, will be used as the display name in
      TensorBoard. Defaults to `tag`.
    description: A longform readable description of the summary data.
      Markdown is supported.
    """
  message = 'Hello, %s!' % guest
  tensor = tf.make_tensor_proto(message, dtype=tf.string)

  # We have no metadata to store, but we do need to add a plugin_data entry
  # so that we know this summary is associated with the greeter plugin.
  # We could use this entry to pass additional metadata other than the
  # PLUGIN_NAME by using the content parameter.
  summary_metadata = tf.SummaryMetadata(
      display_name=display_name,
      summary_description=description,
      plugin_data=tf.SummaryMetadata.PluginData(
          plugin_name=PLUGIN_NAME))

  summary = tf.Summary()
  summary.value.add(tag=tag,
                    metadata=summary_metadata,
                    tensor=tensor)
  return summary 
Example #30
Source File: jaxboard.py    From trax with Apache License 2.0 5 votes vote down vote up
def text(self, tag, textdata, step=None):
    """Saves a text summary.

    Args:
      tag: str: label for this data
      textdata: string, or 1D/2D list/numpy array of strings
      step: int: training step
    Note: markdown formatting is rendered by tensorboard.
    """
    if step is None:
      step = self._step
    else:
      self._step = step
    smd = tf.compat.v1.SummaryMetadata(
        plugin_data=tf.compat.v1.SummaryMetadata.PluginData(plugin_name='text'))
    if isinstance(textdata, (str, bytes)):
      tensor = tf.make_tensor_proto(
          values=[textdata.encode(encoding='utf_8')], shape=(1,))
    else:
      textdata = np.array(textdata)  # convert lists, jax arrays, etc.
      datashape = np.shape(textdata)
      if len(datashape) == 1:
        tensor = tf.make_tensor_proto(
            values=[td.encode(encoding='utf_8') for td in textdata],
            shape=(datashape[0],))
      elif len(datashape) == 2:
        tensor = tf.make_tensor_proto(
            values=[
                td.encode(encoding='utf_8') for td in np.reshape(textdata, -1)
            ],
            shape=(datashape[0], datashape[1]))
    summary = tf.compat.v1.Summary(
        value=[tf.compat.v1.Summary.Value(
            tag=tag, metadata=smd, tensor=tensor)])
    self.add_summary(summary, step)


# Copied from gin/tf/utils.py:GinConfigSaverHook