Python tensorflow.image() Examples

The following are 30 code examples of tensorflow.image(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: preprocessor_test.py    From object_detector_app with MIT License 6 votes vote down vote up
def testRandomDistortColor(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_distort_color, {}))
    images_original = self.createTestImages()
    images_original_shape = tf.shape(images_original)
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_distorted_color = tensor_dict[fields.InputDataFields.image]
    images_distorted_color_shape = tf.shape(images_distorted_color)
    with self.test_session() as sess:
      (images_original_shape_, images_distorted_color_shape_) = sess.run(
          [images_original_shape, images_distorted_color_shape])
      self.assertAllEqual(images_original_shape_, images_distorted_color_shape_) 
Example #2
Source File: preprocessor_test.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def testRandomVerticalFlipWithEmptyBoxes(self):
    preprocess_options = [(preprocessor.random_vertical_flip, {})]
    images = self.expectedImagesAfterNormalization()
    boxes = self.createEmptyTestBoxes()
    tensor_dict = {fields.InputDataFields.image: images,
                   fields.InputDataFields.groundtruth_boxes: boxes}
    images_expected1 = self.expectedImagesAfterUpDownFlip()
    boxes_expected = self.createEmptyTestBoxes()
    images_expected2 = images
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images = tensor_dict[fields.InputDataFields.image]
    boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]

    images_diff1 = tf.squared_difference(images, images_expected1)
    images_diff2 = tf.squared_difference(images, images_expected2)
    images_diff = tf.multiply(images_diff1, images_diff2)
    images_diff_expected = tf.zeros_like(images_diff)

    with self.test_session() as sess:
      (images_diff_, images_diff_expected_, boxes_,
       boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
                                    boxes_expected])
      self.assertAllClose(boxes_, boxes_expected_)
      self.assertAllClose(images_diff_, images_diff_expected_) 
Example #3
Source File: preprocessor_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testRandomPixelValueScale(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_pixel_value_scale, {}))
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_min = tf.to_float(images) * 0.9 / 255.0
    images_max = tf.to_float(images) * 1.1 / 255.0
    images = tensor_dict[fields.InputDataFields.image]
    values_greater = tf.greater_equal(images, images_min)
    values_less = tf.less_equal(images, images_max)
    values_true = tf.fill([1, 4, 4, 3], True)
    with self.test_session() as sess:
      (values_greater_, values_less_, values_true_) = sess.run(
          [values_greater, values_less, values_true])
      self.assertAllClose(values_greater_, values_true_)
      self.assertAllClose(values_less_, values_true_) 
Example #4
Source File: preprocessor_test.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def testRandomHorizontalFlipWithEmptyBoxes(self):
    preprocess_options = [(preprocessor.random_horizontal_flip, {})]
    images = self.expectedImagesAfterNormalization()
    boxes = self.createEmptyTestBoxes()
    tensor_dict = {fields.InputDataFields.image: images,
                   fields.InputDataFields.groundtruth_boxes: boxes}
    images_expected1 = self.expectedImagesAfterLeftRightFlip()
    boxes_expected = self.createEmptyTestBoxes()
    images_expected2 = images
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images = tensor_dict[fields.InputDataFields.image]
    boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]

    images_diff1 = tf.squared_difference(images, images_expected1)
    images_diff2 = tf.squared_difference(images, images_expected2)
    images_diff = tf.multiply(images_diff1, images_diff2)
    images_diff_expected = tf.zeros_like(images_diff)

    with self.test_session() as sess:
      (images_diff_, images_diff_expected_, boxes_,
       boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
                                    boxes_expected])
      self.assertAllClose(boxes_, boxes_expected_)
      self.assertAllClose(images_diff_, images_diff_expected_) 
Example #5
Source File: preprocessor_test.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def testRandomImageScale(self):
    preprocess_options = [(preprocessor.random_image_scale, {})]
    images_original = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images_scaled = tensor_dict[fields.InputDataFields.image]
    images_original_shape = tf.shape(images_original)
    images_scaled_shape = tf.shape(images_scaled)
    with self.test_session() as sess:
      (images_original_shape_, images_scaled_shape_) = sess.run(
          [images_original_shape, images_scaled_shape])
      self.assertTrue(
          images_original_shape_[1] * 0.5 <= images_scaled_shape_[1])
      self.assertTrue(
          images_original_shape_[1] * 2.0 >= images_scaled_shape_[1])
      self.assertTrue(
          images_original_shape_[2] * 0.5 <= images_scaled_shape_[2])
      self.assertTrue(
          images_original_shape_[2] * 2.0 >= images_scaled_shape_[2]) 
Example #6
Source File: preprocessor_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testRandomAdjustBrightness(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_adjust_brightness, {}))
    images_original = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_bright = tensor_dict[fields.InputDataFields.image]
    image_original_shape = tf.shape(images_original)
    image_bright_shape = tf.shape(images_bright)
    with self.test_session() as sess:
      (image_original_shape_, image_bright_shape_) = sess.run(
          [image_original_shape, image_bright_shape])
      self.assertAllEqual(image_original_shape_, image_bright_shape_) 
Example #7
Source File: preprocessor_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testRandomAdjustContrast(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_adjust_contrast, {}))
    images_original = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_contrast = tensor_dict[fields.InputDataFields.image]
    image_original_shape = tf.shape(images_original)
    image_contrast_shape = tf.shape(images_contrast)
    with self.test_session() as sess:
      (image_original_shape_, image_contrast_shape_) = sess.run(
          [image_original_shape, image_contrast_shape])
      self.assertAllEqual(image_original_shape_, image_contrast_shape_) 
Example #8
Source File: preprocessor_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testRandomAdjustHue(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_adjust_hue, {}))
    images_original = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_hue = tensor_dict[fields.InputDataFields.image]
    image_original_shape = tf.shape(images_original)
    image_hue_shape = tf.shape(images_hue)
    with self.test_session() as sess:
      (image_original_shape_, image_hue_shape_) = sess.run(
          [image_original_shape, image_hue_shape])
      self.assertAllEqual(image_original_shape_, image_hue_shape_) 
Example #9
Source File: preprocessor_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testRandomDistortColor(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_distort_color, {}))
    images_original = self.createTestImages()
    images_original_shape = tf.shape(images_original)
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_distorted_color = tensor_dict[fields.InputDataFields.image]
    images_distorted_color_shape = tf.shape(images_distorted_color)
    with self.test_session() as sess:
      (images_original_shape_, images_distorted_color_shape_) = sess.run(
          [images_original_shape, images_distorted_color_shape])
      self.assertAllEqual(images_original_shape_, images_distorted_color_shape_) 
Example #10
Source File: preprocessor_test.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def testRandomPixelValueScale(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_pixel_value_scale, {}))
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_min = tf.to_float(images) * 0.9 / 255.0
    images_max = tf.to_float(images) * 1.1 / 255.0
    images = tensor_dict[fields.InputDataFields.image]
    values_greater = tf.greater_equal(images, images_min)
    values_less = tf.less_equal(images, images_max)
    values_true = tf.fill([1, 4, 4, 3], True)
    with self.test_session() as sess:
      (values_greater_, values_less_, values_true_) = sess.run(
          [values_greater, values_less, values_true])
      self.assertAllClose(values_greater_, values_true_)
      self.assertAllClose(values_less_, values_true_) 
Example #11
Source File: preprocessor_test.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def testRandomRotation90WithEmptyBoxes(self):
    preprocess_options = [(preprocessor.random_rotation90, {})]
    images = self.expectedImagesAfterNormalization()
    boxes = self.createEmptyTestBoxes()
    tensor_dict = {fields.InputDataFields.image: images,
                   fields.InputDataFields.groundtruth_boxes: boxes}
    images_expected1 = self.expectedImagesAfterRot90()
    boxes_expected = self.createEmptyTestBoxes()
    images_expected2 = images
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images = tensor_dict[fields.InputDataFields.image]
    boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]

    images_diff1 = tf.squared_difference(images, images_expected1)
    images_diff2 = tf.squared_difference(images, images_expected2)
    images_diff = tf.multiply(images_diff1, images_diff2)
    images_diff_expected = tf.zeros_like(images_diff)

    with self.test_session() as sess:
      (images_diff_, images_diff_expected_, boxes_,
       boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
                                    boxes_expected])
      self.assertAllClose(boxes_, boxes_expected_)
      self.assertAllClose(images_diff_, images_diff_expected_) 
Example #12
Source File: preprocessor_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testRandomImageScale(self):
    preprocess_options = [(preprocessor.random_image_scale, {})]
    images_original = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images_scaled = tensor_dict[fields.InputDataFields.image]
    images_original_shape = tf.shape(images_original)
    images_scaled_shape = tf.shape(images_scaled)
    with self.test_session() as sess:
      (images_original_shape_, images_scaled_shape_) = sess.run(
          [images_original_shape, images_scaled_shape])
      self.assertTrue(
          images_original_shape_[1] * 0.5 <= images_scaled_shape_[1])
      self.assertTrue(
          images_original_shape_[1] * 2.0 >= images_scaled_shape_[1])
      self.assertTrue(
          images_original_shape_[2] * 0.5 <= images_scaled_shape_[2])
      self.assertTrue(
          images_original_shape_[2] * 2.0 >= images_scaled_shape_[2]) 
Example #13
Source File: preprocessor_test.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def testNormalizeImage(self):
    preprocess_options = [(preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 256,
        'target_minval': -1,
        'target_maxval': 1
    })]
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images = tensor_dict[fields.InputDataFields.image]
    images_expected = self.expectedImagesAfterNormalization()

    with self.test_session() as sess:
      (images_, images_expected_) = sess.run(
          [images, images_expected])
      images_shape_ = images_.shape
      images_expected_shape_ = images_expected_.shape
      expected_shape = [1, 4, 4, 3]
      self.assertAllEqual(images_expected_shape_, images_shape_)
      self.assertAllEqual(images_shape_, expected_shape)
      self.assertAllClose(images_, images_expected_) 
Example #14
Source File: preprocessor_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testRandomBlackPatches(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_black_patches, {
        'size_to_image_ratio': 0.5
    }))
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    blacked_tensor_dict = preprocessor.preprocess(tensor_dict,
                                                  preprocessing_options)
    blacked_images = blacked_tensor_dict[fields.InputDataFields.image]
    images_shape = tf.shape(images)
    blacked_images_shape = tf.shape(blacked_images)

    with self.test_session() as sess:
      (images_shape_, blacked_images_shape_) = sess.run(
          [images_shape, blacked_images_shape])
      self.assertAllEqual(images_shape_, blacked_images_shape_) 
Example #15
Source File: preprocessor_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testRandomResizeMethod(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_resize_method, {
        'target_size': (75, 150)
    }))
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    resized_tensor_dict = preprocessor.preprocess(tensor_dict,
                                                  preprocessing_options)
    resized_images = resized_tensor_dict[fields.InputDataFields.image]
    resized_images_shape = tf.shape(resized_images)
    expected_images_shape = tf.constant([1, 75, 150, 3], dtype=tf.int32)

    with self.test_session() as sess:
      (expected_images_shape_, resized_images_shape_) = sess.run(
          [expected_images_shape, resized_images_shape])
      self.assertAllEqual(expected_images_shape_,
                          resized_images_shape_) 
Example #16
Source File: preprocessor_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testResizeToRange(self):
    """Tests image resizing, checking output sizes."""
    in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
    min_dim = 50
    max_dim = 100
    expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]

    for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
      in_image = tf.random_uniform(in_shape)
      out_image = preprocessor.resize_to_range(
          in_image, min_dimension=min_dim, max_dimension=max_dim)
      out_image_shape = tf.shape(out_image)

      with self.test_session() as sess:
        out_image_shape = sess.run(out_image_shape)
        self.assertAllEqual(out_image_shape, expected_shape) 
Example #17
Source File: preprocessor_test.py    From object_detector_app with MIT License 6 votes vote down vote up
def testResizeToRangeSameMinMax(self):
    """Tests image resizing, checking output sizes."""
    in_shape_list = [[312, 312, 3], [299, 299, 3]]
    min_dim = 320
    max_dim = 320
    expected_shape_list = [[320, 320, 3], [320, 320, 3]]

    for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
      in_image = tf.random_uniform(in_shape)
      out_image = preprocessor.resize_to_range(
          in_image, min_dimension=min_dim, max_dimension=max_dim)
      out_image_shape = tf.shape(out_image)

      with self.test_session() as sess:
        out_image_shape = sess.run(out_image_shape)
        self.assertAllEqual(out_image_shape, expected_shape) 
Example #18
Source File: preprocessor_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testResizeToRangeSameMinMax(self):
    """Tests image resizing, checking output sizes."""
    in_shape_list = [[312, 312, 3], [299, 299, 3]]
    min_dim = 320
    max_dim = 320
    expected_shape_list = [[320, 320, 3], [320, 320, 3]]

    for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
      in_image = tf.random_uniform(in_shape)
      out_image = preprocessor.resize_to_range(
          in_image, min_dimension=min_dim, max_dimension=max_dim)
      out_image_shape = tf.shape(out_image)

      with self.test_session() as sess:
        out_image_shape = sess.run(out_image_shape)
        self.assertAllEqual(out_image_shape, expected_shape) 
Example #19
Source File: preprocessor_test.py    From object_detector_app with MIT License 6 votes vote down vote up
def testResizeToRange(self):
    """Tests image resizing, checking output sizes."""
    in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]]
    min_dim = 50
    max_dim = 100
    expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]]

    for in_shape, expected_shape in zip(in_shape_list, expected_shape_list):
      in_image = tf.random_uniform(in_shape)
      out_image = preprocessor.resize_to_range(
          in_image, min_dimension=min_dim, max_dimension=max_dim)
      out_image_shape = tf.shape(out_image)

      with self.test_session() as sess:
        out_image_shape = sess.run(out_image_shape)
        self.assertAllEqual(out_image_shape, expected_shape) 
Example #20
Source File: preprocessor_test.py    From object_detector_app with MIT License 6 votes vote down vote up
def testRandomResizeMethod(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_resize_method, {
        'target_size': (75, 150)
    }))
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    resized_tensor_dict = preprocessor.preprocess(tensor_dict,
                                                  preprocessing_options)
    resized_images = resized_tensor_dict[fields.InputDataFields.image]
    resized_images_shape = tf.shape(resized_images)
    expected_images_shape = tf.constant([1, 75, 150, 3], dtype=tf.int32)

    with self.test_session() as sess:
      (expected_images_shape_, resized_images_shape_) = sess.run(
          [expected_images_shape, resized_images_shape])
      self.assertAllEqual(expected_images_shape_,
                          resized_images_shape_) 
Example #21
Source File: preprocessor_test.py    From object_detector_app with MIT License 6 votes vote down vote up
def testNormalizeImage(self):
    preprocess_options = [(preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 256,
        'target_minval': -1,
        'target_maxval': 1
    })]
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images = tensor_dict[fields.InputDataFields.image]
    images_expected = self.expectedImagesAfterNormalization()

    with self.test_session() as sess:
      (images_, images_expected_) = sess.run(
          [images, images_expected])
      images_shape_ = images_.shape
      images_expected_shape_ = images_expected_.shape
      expected_shape = [1, 4, 4, 3]
      self.assertAllEqual(images_expected_shape_, images_shape_)
      self.assertAllEqual(images_shape_, expected_shape)
      self.assertAllClose(images_, images_expected_) 
Example #22
Source File: preprocessor_test.py    From object_detector_app with MIT License 6 votes vote down vote up
def testRandomBlackPatches(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_black_patches, {
        'size_to_image_ratio': 0.5
    }))
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    blacked_tensor_dict = preprocessor.preprocess(tensor_dict,
                                                  preprocessing_options)
    blacked_images = blacked_tensor_dict[fields.InputDataFields.image]
    images_shape = tf.shape(images)
    blacked_images_shape = tf.shape(blacked_images)

    with self.test_session() as sess:
      (images_shape_, blacked_images_shape_) = sess.run(
          [images_shape, blacked_images_shape])
      self.assertAllEqual(images_shape_, blacked_images_shape_) 
Example #23
Source File: preprocessor_test.py    From object_detector_app with MIT License 6 votes vote down vote up
def testRandomPixelValueScale(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_pixel_value_scale, {}))
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_min = tf.to_float(images) * 0.9 / 255.0
    images_max = tf.to_float(images) * 1.1 / 255.0
    images = tensor_dict[fields.InputDataFields.image]
    values_greater = tf.greater_equal(images, images_min)
    values_less = tf.less_equal(images, images_max)
    values_true = tf.fill([1, 4, 4, 3], True)
    with self.test_session() as sess:
      (values_greater_, values_less_, values_true_) = sess.run(
          [values_greater, values_less, values_true])
      self.assertAllClose(values_greater_, values_true_)
      self.assertAllClose(values_less_, values_true_) 
Example #24
Source File: preprocessor_test.py    From object_detector_app with MIT License 6 votes vote down vote up
def testRandomImageScale(self):
    preprocess_options = [(preprocessor.random_image_scale, {})]
    images_original = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images_scaled = tensor_dict[fields.InputDataFields.image]
    images_original_shape = tf.shape(images_original)
    images_scaled_shape = tf.shape(images_scaled)
    with self.test_session() as sess:
      (images_original_shape_, images_scaled_shape_) = sess.run(
          [images_original_shape, images_scaled_shape])
      self.assertTrue(
          images_original_shape_[1] * 0.5 <= images_scaled_shape_[1])
      self.assertTrue(
          images_original_shape_[1] * 2.0 >= images_scaled_shape_[1])
      self.assertTrue(
          images_original_shape_[2] * 0.5 <= images_scaled_shape_[2])
      self.assertTrue(
          images_original_shape_[2] * 2.0 >= images_scaled_shape_[2]) 
Example #25
Source File: preprocessor_test.py    From object_detector_app with MIT License 6 votes vote down vote up
def testRandomAdjustBrightness(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_adjust_brightness, {}))
    images_original = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_bright = tensor_dict[fields.InputDataFields.image]
    image_original_shape = tf.shape(images_original)
    image_bright_shape = tf.shape(images_bright)
    with self.test_session() as sess:
      (image_original_shape_, image_bright_shape_) = sess.run(
          [image_original_shape, image_bright_shape])
      self.assertAllEqual(image_original_shape_, image_bright_shape_) 
Example #26
Source File: preprocessor_test.py    From object_detector_app with MIT License 6 votes vote down vote up
def testRandomAdjustContrast(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_adjust_contrast, {}))
    images_original = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_contrast = tensor_dict[fields.InputDataFields.image]
    image_original_shape = tf.shape(images_original)
    image_contrast_shape = tf.shape(images_contrast)
    with self.test_session() as sess:
      (image_original_shape_, image_contrast_shape_) = sess.run(
          [image_original_shape, image_contrast_shape])
      self.assertAllEqual(image_original_shape_, image_contrast_shape_) 
Example #27
Source File: preprocessor_test.py    From object_detector_app with MIT License 6 votes vote down vote up
def testRandomAdjustHue(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_adjust_hue, {}))
    images_original = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_hue = tensor_dict[fields.InputDataFields.image]
    image_original_shape = tf.shape(images_original)
    image_hue_shape = tf.shape(images_hue)
    with self.test_session() as sess:
      (image_original_shape_, image_hue_shape_) = sess.run(
          [image_original_shape, image_hue_shape])
      self.assertAllEqual(image_original_shape_, image_hue_shape_) 
Example #28
Source File: preprocessor_test.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def testNormalizeImage(self):
    preprocess_options = [(preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 256,
        'target_minval': -1,
        'target_maxval': 1
    })]
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images = tensor_dict[fields.InputDataFields.image]
    images_expected = self.expectedImagesAfterNormalization()

    with self.test_session() as sess:
      (images_, images_expected_) = sess.run(
          [images, images_expected])
      images_shape_ = images_.shape
      images_expected_shape_ = images_expected_.shape
      expected_shape = [1, 4, 4, 3]
      self.assertAllEqual(images_expected_shape_, images_shape_)
      self.assertAllEqual(images_shape_, expected_shape)
      self.assertAllClose(images_, images_expected_) 
Example #29
Source File: preprocessor_test.py    From object_detector_app with MIT License 5 votes vote down vote up
def testStrictRandomCropImageWithKeypoints(self):
    image = self.createColorfulTestImage()[0]
    boxes = self.createTestBoxes()
    labels = self.createTestLabels()
    keypoints = self.createTestKeypoints()
    with mock.patch.object(
        tf.image,
        'sample_distorted_bounding_box'
    ) as mock_sample_distorted_bounding_box:
      mock_sample_distorted_bounding_box.return_value = (
          tf.constant([6, 143, 0], dtype=tf.int32),
          tf.constant([190, 237, -1], dtype=tf.int32),
          tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32))
      (new_image, new_boxes, new_labels,
       new_keypoints) = preprocessor._strict_random_crop_image(
           image, boxes, labels, keypoints=keypoints)
      with self.test_session() as sess:
        new_image, new_boxes, new_labels, new_keypoints = sess.run([
            new_image, new_boxes, new_labels, new_keypoints])

        expected_boxes = np.array([
            [0.0, 0.0, 0.75789469, 1.0],
            [0.23157893, 0.24050637, 0.75789469, 1.0],
        ], dtype=np.float32)
        expected_keypoints = np.array([
            [[np.nan, np.nan],
             [np.nan, np.nan],
             [np.nan, np.nan]],
            [[0.38947368, 0.07173],
             [0.49473682, 0.24050637],
             [0.60000002, 0.40928277]]
        ], dtype=np.float32)
        self.assertAllEqual(new_image.shape, [190, 237, 3])
        self.assertAllClose(
            new_boxes.flatten(), expected_boxes.flatten())
        self.assertAllClose(
            new_keypoints.flatten(), expected_keypoints.flatten()) 
Example #30
Source File: preprocessor_test.py    From object_detector_app with MIT License 5 votes vote down vote up
def testRandomCropToAspectRatio(self):
    preprocessing_options = [(preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    })]

    images = self.createTestImages()
    boxes = self.createTestBoxes()
    labels = self.createTestLabels()
    tensor_dict = {
        fields.InputDataFields.image: images,
        fields.InputDataFields.groundtruth_boxes: boxes,
        fields.InputDataFields.groundtruth_classes: labels
    }
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images = tensor_dict[fields.InputDataFields.image]

    preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {
        'aspect_ratio': 2.0
    })]
    cropped_tensor_dict = preprocessor.preprocess(tensor_dict,
                                                  preprocessing_options)

    cropped_images = cropped_tensor_dict[fields.InputDataFields.image]
    cropped_boxes = cropped_tensor_dict[
        fields.InputDataFields.groundtruth_boxes]
    boxes_shape = tf.shape(boxes)
    cropped_boxes_shape = tf.shape(cropped_boxes)
    images_shape = tf.shape(images)
    cropped_images_shape = tf.shape(cropped_images)

    with self.test_session() as sess:
      (boxes_shape_, cropped_boxes_shape_, images_shape_,
       cropped_images_shape_) = sess.run([
           boxes_shape, cropped_boxes_shape, images_shape, cropped_images_shape
       ])
      self.assertAllEqual(boxes_shape_, cropped_boxes_shape_)
      self.assertEqual(images_shape_[1], cropped_images_shape_[1] * 2)
      self.assertEqual(images_shape_[2], cropped_images_shape_[2])