Python tensorflow.double() Examples

The following are 5 code examples of tensorflow.double(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: auc_util.py    From icme2019 with MIT License 5 votes vote down vote up
def auroc(y_true, y_pred):
    return tf.py_func(roc_auc_score, (y_true, y_pred), tf.double) 
Example #2
Source File: categorical.py    From Kaggler with MIT License 5 votes vote down vote up
def auc(y, p):
        return tf.py_function(roc_auc_score, (y, p), tf.double) 
Example #3
Source File: run_inference_test.py    From tfx-bsl with Apache License 2.0 5 votes vote down vote up
def test_model_predict(self):
    predictions = [{'output_1': [0.901], 'output_2': [0.997]}]
    builder = http.RequestMockBuilder({
        'ml.projects.predict':
            (None, self._make_response_body(predictions, successful=True))
    })
    resource = discovery.build(
        'ml',
        'v1',
        http=http.HttpMock(self._discovery_testdata_dir,
                           {'status': http_client.OK}),
        requestBuilder=builder)
    with mock.patch('googleapiclient.discovery.' 'build') as response_mock:
      response_mock.side_effect = lambda service, version: resource
      inference_spec_type = model_spec_pb2.InferenceSpecType(
          ai_platform_prediction_model_spec=model_spec_pb2
          .AIPlatformPredictionModelSpec(
              project_id='test-project',
              model_name='test-model',
          ))

      prediction_log = prediction_log_pb2.PredictionLog()
      prediction_log.predict_log.response.outputs['output_1'].CopyFrom(
          tf.make_tensor_proto(values=[0.901], dtype=tf.double, shape=(1, 1)))
      prediction_log.predict_log.response.outputs['output_2'].CopyFrom(
          tf.make_tensor_proto(values=[0.997], dtype=tf.double, shape=(1, 1)))

      self._set_up_pipeline(inference_spec_type)
      assert_that(self.pcoll, equal_to([prediction_log]))
      self._run_inference_with_beam() 
Example #4
Source File: bucketize_integration_test.py    From transform with Apache License 2.0 4 votes vote down vote up
def _construct_test_bucketization_parameters():
  args_without_dtype = (
      (range(1, 10), [4, 7], False, None, False, False),
      (range(1, 100), [25, 50, 75], False, None, False, False),

      # The following is similar to range(1, 100) test above, except that
      # only odd numbers are in the input; so boundaries differ (26 -> 27 and
      # 76 -> 77).
      (range(1, 100, 2), [24, 50, 75], False, None, False, False),

      # Test some inversely sorted inputs, and with different strides, and
      # boundaries/buckets.
      (range(9, 0, -1), [4, 7], False, None, False, False),
      (range(19, 0, -1), [10], False, None, False, False),
      (range(99, 0, -1), [50], False, None, False, False),
      (range(99, 0, -1), [34, 67], False, None, False, False),
      (range(99, 0, -2), [33, 67], False, None, False, False),
      (range(99, 0, -1), range(10, 100, 10), False, None, False, False),

      # These tests do a random shuffle of the inputs, which must not affect the
      # boundaries (or the computed buckets).
      (range(99, 0, -1), range(10, 100, 10), True, None, False, False),
      (range(1, 100), range(10, 100, 10), True, None, False, False),

      # The following test is with multiple batches (3 batches with default
      # batch of 1000).
      (range(1, 3000), [1499], False, None, False, False),
      (range(1, 3000), [1000, 2000], False, None, False, False),

      # Test with specific error for bucket boundaries. This is same as the test
      # above with 3 batches and a single boundary, but with a stricter error
      # tolerance (0.001) than the default error (0.01). The result is that the
      # computed boundary in the test below is closer to the middle (1501) than
      # that computed by the boundary of 1503 above.
      (range(1, 3000), [1500], False, 0.001, False, False),

      # Test with specific error for bucket boundaries, with more relaxed error
      # tolerance (0.1) than the default (0.01). Now the boundary diverges
      # further to 1504 (compared to boundary of 1501 with error 0.001, and
      # boundary of 1503 with error 0.01).
      (range(1, 3000), [1503], False, 0.1, False, False),

      # Tests for tft.apply_buckets.
      (range(1, 100), [25, 50, 75], False, 0.00001, True, False),
      # TODO(b/78569039): Enable this test.
      # (range(1, 100), [26, 51, 76], False, 0.00001, True, True),
  )
  dtypes = (tf.int32, tf.int64, tf.float32, tf.float64, tf.double)
  return (x + (dtype,) for x in args_without_dtype for dtype in dtypes) 
Example #5
Source File: operation_layers.py    From onnx2keras with MIT License 4 votes vote down vote up
def convert_cast(node, params, layers, lambda_func, node_name, keras_name):
    """
    Convert Cast layer
    :param node: current operation node
    :param params: operation attributes
    :param layers: available keras layers
    :param lambda_func: function for keras Lambda layer
    :param node_name: internal converter name
    :param keras_name: resulting layer name
    :return: None
    """
    logger = logging.getLogger('onnx2keras:cast')

    if len(node.input) != 1:
        assert AttributeError('More than 1 input for cast layer.')

    if is_numpy(layers[node.input[0]]):
        logger.debug('Cast numpy array')

        cast_map = {
            1: np.float32,
            2: np.uint8,
            3: np.int8,
            5: np.int16,
            6: np.int32,
            7: np.int64,
            9: np.bool,
            10: np.float16,
            11: np.double,
        }

        layers[node_name] = cast_map[params['to']](node.input[0])
    else:
        input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)

        def target_layer(x, dtype=params['to']):
            import tensorflow as tf
            cast_map = {
                1: tf.float32,
                2: tf.uint8,
                3: tf.int8,
                5: tf.int16,
                6: tf.int32,
                7: tf.int64,
                9: tf.bool,
                10: tf.float16,
                11: tf.double,
            }
            return tf.cast(x, cast_map[dtype])

        lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
        layers[node_name] = lambda_layer(input_0)
        lambda_func[keras_name] = target_layer