Python cv2.COLOR_BGR2HSV Examples

The following are 30 code examples of cv2.COLOR_BGR2HSV(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: tracking.py    From OpenCV-Computer-Vision-Projects-with-Python with MIT License 8 votes vote down vote up
def _update_mean_shift_bookkeeping(self, frame, box_grouped):
        """Preprocess all valid bounding boxes for mean-shift tracking

            This method preprocesses all relevant bounding boxes (those that
            have been detected by both mean-shift tracking and saliency) for
            the next mean-shift step.

            :param frame: current RGB input frame
            :param box_grouped: list of bounding boxes
        """
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        self.object_roi = []
        self.object_box = []
        for box in box_grouped:
            (x, y, w, h) = box
            hsv_roi = hsv[y:y + h, x:x + w]
            mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)),
                               np.array((180., 255., 255.)))
            roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
            cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

            self.object_roi.append(roi_hist)
            self.object_box.append(box) 
Example #2
Source File: main.py    From Traffic-Sign-Detection with MIT License 8 votes vote down vote up
def remove_other_color(img):
    frame = cv2.GaussianBlur(img, (3,3), 0) 
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # define range of blue color in HSV
    lower_blue = np.array([100,128,0])
    upper_blue = np.array([215,255,255])
    # Threshold the HSV image to get only blue colors
    mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)

    lower_white = np.array([0,0,128], dtype=np.uint8)
    upper_white = np.array([255,255,255], dtype=np.uint8)
    # Threshold the HSV image to get only blue colors
    mask_white = cv2.inRange(hsv, lower_white, upper_white)

    lower_black = np.array([0,0,0], dtype=np.uint8)
    upper_black = np.array([170,150,50], dtype=np.uint8)

    mask_black = cv2.inRange(hsv, lower_black, upper_black)

    mask_1 = cv2.bitwise_or(mask_blue, mask_white)
    mask = cv2.bitwise_or(mask_1, mask_black)
    # Bitwise-AND mask and original image
    #res = cv2.bitwise_and(frame,frame, mask= mask)
    return mask 
Example #3
Source File: datasets.py    From pruning_yolov3 with GNU General Public License v3.0 7 votes vote down vote up
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
    x = (np.random.uniform(-1, 1, 3) * np.array([hgain, sgain, vgain]) + 1).astype(np.float32)  # random gains
    img_hsv = (cv2.cvtColor(img, cv2.COLOR_BGR2HSV) * x.reshape((1, 1, 3))).clip(None, 255).astype(np.uint8)
    cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)  # no return needed


# def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):  # original version
#     # SV augmentation by 50%
#     img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)  # hue, sat, val
#
#     S = img_hsv[:, :, 1].astype(np.float32)  # saturation
#     V = img_hsv[:, :, 2].astype(np.float32)  # value
#
#     a = random.uniform(-1, 1) * sgain + 1
#     b = random.uniform(-1, 1) * vgain + 1
#     S *= a
#     V *= b
#
#     img_hsv[:, :, 1] = S if a < 1 else S.clip(None, 255)
#     img_hsv[:, :, 2] = V if b < 1 else V.clip(None, 255)
#     cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)  # no return needed 
Example #4
Source File: thresholding.py    From smashscan with MIT License 7 votes vote down vote up
def standard_test(self):
        for fnum in range(self.start_fnum, self.stop_fnum):
            frame = util.get_frame(self.capture, fnum)
            frame = frame[280:, :]
            frame_HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

            mask = cv2.inRange(frame_HSV, (self.low_H, self.low_S, self.low_V),
                (self.high_H, self.high_S, self.high_V))

            res = cv2.bitwise_and(frame, frame, mask=mask)
            res_inv = cv2.bitwise_and(frame, frame, mask=cv2.bitwise_not(mask))

            cv2.imshow(self.window_name, mask)
            cv2.imshow('Video Capture AND', res)
            cv2.imshow('Video Capture INV', res_inv)

            if cv2.waitKey(30) & 0xFF == ord('q'):
                break


    # A number of methods corresponding to the various trackbars available. 
Example #5
Source File: inklings_tracker.py    From IkaLog with Apache License 2.0 7 votes vote down vote up
def _detect_team_color(self, pixels):
        criteria = \
            (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)

        pixels = np.array(pixels.reshape((-1, 3)), dtype=np.float32)

        ret, label, center = cv2.kmeans(
            pixels, 2, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)

        # one is black, another is the team color.

        colors = np.array(center, dtype=np.uint8).reshape((1, 2, 3))
        colors_hsv = cv2.cvtColor(colors, cv2.COLOR_BGR2HSV)
        x = np.argmax(colors_hsv[:, :, 2])
        team_color_bgr = colors[0, x, :]
        team_color_hsv = colors_hsv[0, x, :]

        return {
            'rgb': cv2.cvtColor(colors, cv2.COLOR_BGR2RGB).tolist()[0][x],
            'hsv': cv2.cvtColor(colors, cv2.COLOR_BGR2HSV).tolist()[0][x],
        } 
Example #6
Source File: cv2_aug_transforms.py    From openseg.pytorch with MIT License 6 votes vote down vote up
def __call__(self, img, labelmap=None, maskmap=None):
        assert isinstance(img, np.ndarray)
        assert labelmap is None or isinstance(labelmap, np.ndarray)
        assert maskmap is None or isinstance(maskmap, np.ndarray)

        if random.random() > self.ratio:
            return img, labelmap, maskmap

        img = img.astype(np.float32)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        img[:, :, 0] += random.uniform(-self.delta, self.delta)
        img[:, :, 0][img[:, :, 0] > 360] -= 360
        img[:, :, 0][img[:, :, 0] < 0] += 360
        img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
        img = np.clip(img, 0, 255).astype(np.uint8)
        return img, labelmap, maskmap 
Example #7
Source File: data.py    From FRRN with MIT License 6 votes vote down vote up
def augment(self, image, target):
        """Augments the data.

        Args:
            image: The image.
            target: The target image.

        Returns:
            A tuple of augmented image and target image.
        """
        # Sample the color factor.
        factor = np.random.uniform(self._min_delta, self._max_delta)

        hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

        hsv_image[:, :, 1] *= factor
        hsv_image[:, :, 1] = np.clip(hsv_image[:, :, 1], 0.0, 1.0)

        image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)

        return image, target 
Example #8
Source File: data.py    From FRRN with MIT License 6 votes vote down vote up
def augment(self, image, target):
        """Augments the data.

        Args:
            image: The image.
            target: The target image.

        Returns:
            A tuple of augmented image and target image.
        """
        # Sample the color factor.
        factor = np.random.uniform(self._min_delta, self._max_delta)

        hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

        hsv_image[:, :, 0] += factor

        # Make sure the values are in [-360, 360].
        hsv_image[:, :, 0] += 360 * (hsv_image[:, :, 0] < 360)
        hsv_image[:, :, 0] -= 360 * (hsv_image[:, :, 0] > 360)

        image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)

        return image, target 
Example #9
Source File: ChickenVision.py    From ChickenVision with MIT License 6 votes vote down vote up
def threshold_video(lower_color, upper_color, blur):


    # Convert BGR to HSV
    hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)

    # hold the HSV image to get only red colors
    mask = cv2.inRange(hsv, lower_color, upper_color)

    # Returns the masked imageBlurs video to smooth out image

    return mask



# Finds the tape targets from the masked image and displays them on original stream + network tales 
Example #10
Source File: mobileface_makeup.py    From MobileFace with MIT License 6 votes vote down vote up
def face_whiten(self, im_bgr, whiten_rate=0.15):
        """Face whitening.
        Parameters
        ----------
        im_bgr: mat 
            The Mat data format of reading from the original image using opencv.
        whiten_rate: float, default is 0.15
            The face whitening rate.
        Returns
        -------
        type: mat
            The result of face whitening.
        """  
        im_hsv = cv2.cvtColor(im_bgr, cv2.COLOR_BGR2HSV)
        im_hsv[:,:,-1] = np.minimum(im_hsv[:,:,-1] * (1 + whiten_rate), 255).astype('uint8')
        im_whiten = cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR)
        return im_whiten 
Example #11
Source File: object_detection_using_color.py    From Hands-On-Machine-Learning-with-OpenCV-4 with MIT License 6 votes vote down vote up
def capture_histogram(path_of_sample):

    # read the image
    color = cv2.imread(path_of_sample)

    # convert to HSV
    color_hsv = cv2.cvtColor(color, cv2.COLOR_BGR2HSV)

    # compute the histogram
    object_hist = cv2.calcHist([color_hsv],      # image
                               [0, 1],           # channels
                               None,             # no mask
                               [180, 256],       # size of histogram
                               [0, 180, 0, 256]  # channel values
                               )

    # min max normalization
    cv2.normalize(object_hist, object_hist, 0, 255, cv2.NORM_MINMAX)

    return object_hist 
Example #12
Source File: camshift_object_tracker.py    From automl-video-ondevice with Apache License 2.0 6 votes vote down vote up
def run(self, frame):
    """Processes a single frame.

    Args:
      frame: The np.array image frame.
    """
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    dst = cv2.calcBackProject([hsv], [0, 1], self.roi_hist, [0, 180, 0, 255], 1)
    _, self.box = cv2.CamShift(dst, self.box, self.term_crit)

    (x, y, x2, y2) = self.glob_to_relative(
        (self.box[0], self.box[1], self.box[0] + self.box[2],
         self.box[1] + self.box[3]))

    self.annotation.bbox.left = x
    self.annotation.bbox.top = y
    self.annotation.bbox.right = x2
    self.annotation.bbox.bottom = y2

    self.age = self.age + 1
    self.degrade() 
Example #13
Source File: camshift_object_tracker.py    From automl-video-ondevice with Apache License 2.0 6 votes vote down vote up
def calculate_roi_hist(self, frame):
    """Calculates region of interest histogram.

    Args:
      frame: The np.array image frame to calculate ROI histogram for.
    """
    (x, y, w, h) = self.box
    roi = frame[y:y + h, x:x + w]

    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)),
                       np.array((180., 255., 255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0, 1], mask, [180, 255],
                            [0, 180, 0, 255])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    self.roi_hist = roi_hist

  # Run this every frame 
Example #14
Source File: result_detail.py    From IkaLog with Apache License 2.0 6 votes vote down vote up
def analyze_team_colors(self, context, img):
        # スクリーンショットからチームカラーを推測
        assert 'won' in context['game']
        assert img is not None

        if context['game']['won']:
            my_team_color_bgr = img[115:116, 1228:1229]
            counter_team_color_bgr = img[452:453, 1228:1229]
        else:
            counter_team_color_bgr = img[115:116, 1228:1229]
            my_team_color_bgr = img[452:453, 1228:1229]

        my_team_color = {
            'rgb': cv2.cvtColor(my_team_color_bgr, cv2.COLOR_BGR2RGB).tolist()[0][0],
            'hsv': cv2.cvtColor(my_team_color_bgr, cv2.COLOR_BGR2HSV).tolist()[0][0],
        }

        counter_team_color = {
            'rgb': cv2.cvtColor(counter_team_color_bgr, cv2.COLOR_BGR2RGB).tolist()[0][0],
            'hsv': cv2.cvtColor(counter_team_color_bgr, cv2.COLOR_BGR2HSV).tolist()[0][0],
        }

        return (my_team_color, counter_team_color) 
Example #15
Source File: image.py    From DeepForest with MIT License 6 votes vote down vote up
def __call__(self, image):
        """ Apply a visual effect on the image.

        Args
            image: Image to adjust
        """

        if self.contrast_factor:
            image = adjust_contrast(image, self.contrast_factor)
        if self.brightness_delta:
            image = adjust_brightness(image, self.brightness_delta)

        if self.hue_delta or self.saturation_factor:

            image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

            if self.hue_delta:
                image = adjust_hue(image, self.hue_delta)
            if self.saturation_factor:
                image = adjust_saturation(image, self.saturation_factor)

            image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

        return image 
Example #16
Source File: filters.py    From IkaLog with Apache License 2.0 6 votes vote down vote up
def _run_filter(self, img_bgr=None, img_gray=None):
        if (img_bgr is None):
            return self._run_filter_gray_image(img_gray)

        # カラー画像から白い部分だけ抜き出した白黒画像を作る

        assert(len(img_bgr.shape) == 3)
        assert(img_bgr.shape[2] == 3)

        sat_min = min(self.sat_range)
        sat_max = max(self.sat_range)
        vis_min = min(self.visibility_range)
        vis_max = max(self.visibility_range)

        assert(sat_min >= 0 and sat_max <= 256)
        assert(vis_min >= 0 and vis_max <= 256)

        img_hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)
        img_match_s = cv2.inRange(img_hsv[:, :, 1], sat_min, sat_max)
        img_match_v = cv2.inRange(img_hsv[:, :, 2], vis_min, vis_max)
        img_match = img_match_s & img_match_v
        return img_match 
Example #17
Source File: visual_augmentation.py    From face_landmark with Apache License 2.0 6 votes vote down vote up
def __call__(self, image):


        if self.contrast_range is not None:
            contrast_factor = _uniform(self.contrast_range)
            image = adjust_contrast(image,contrast_factor)
        if self.brightness_range is not None:
            brightness_delta = _uniform(self.brightness_range)
            image = adjust_brightness(image, brightness_delta)

        if self.hue_range is not None or self.saturation_range is not None:

            image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

            if self.hue_range is not None:
                hue_delta = _uniform(self.hue_range)
                image = adjust_hue(image, hue_delta)

            if self.saturation_range is not None:
                saturation_factor = _uniform(self.saturation_range)
                image = adjust_saturation(image, saturation_factor)

            image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

        return image 
Example #18
Source File: __init__.py    From uiautomator2 with MIT License 6 votes vote down vote up
def brightness(self, im):
        '''
        Return the brightness of an image
        Args:
            im(numpy): image

        Returns:
            float, average brightness of an image
        '''
        im_hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(im_hsv)
        height, weight = v.shape[:2]
        total_bright = 0
        for i in v:
            total_bright = total_bright + sum(i)
        return float(total_bright) / (height * weight) 
Example #19
Source File: special_gauge.py    From IkaLog with Apache License 2.0 5 votes vote down vote up
def match_no_cache(self, context):
        if self.is_another_scene_matched(context, 'GameTimerIcon') == False:
            return False

        frame = context['engine']['frame']

        img = frame[34:34+102, 1117:1117+102]
        img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        img_filtered = img_hsv[:, :, 1]
        img_filtered[img_hsv[:, :, 1] > 64] = 255
        img_filtered[img_hsv[:, :, 2] > 64] = 255
        img_filtered[img_filtered <= 64] = 0
        img_masked = img_filtered & self._mask_gauge[:, :, 0]
        # cv2.imshow('gauge', img_masked)

        pixels = np.sum(img_masked) / 255
        value = int(pixels / self._gauge_pixels * 100)
        last_value = context['game'].get('special_gauge', None)
        last_charged = context['game'].get('special_gauge_charged', False)

        charged = False
        if value > 95:
            img_white = matcher.MM_WHITE()(frame[34:34+102, 1117:1117+102, :])
            img_white_masked = img_white & self._mask_gauge[:, :, 0]
            white_score = np.sum(img_white_masked / 255)
            charged = (white_score > 0)

        if value != last_value:
            context['game']['special_gauge'] = value
            self._call_plugins('on_game_special_gauge_update')

        if (not last_charged) and (charged):
            self._call_plugins('on_game_special_gauge_charged')
        context['game']['special_gauge_charged'] = charged

        return False 
Example #20
Source File: data_generator.py    From GCA-Matting with MIT License 5 votes vote down vote up
def __call__(self, sample):
        fg, alpha = sample['fg'], sample['alpha']
        # if alpha is all 0 skip
        if np.all(alpha==0):
            return sample
        # convert to HSV space, convert to float32 image to keep precision during space conversion.
        fg = cv2.cvtColor(fg.astype(np.float32)/255.0, cv2.COLOR_BGR2HSV)
        # Hue noise
        hue_jitter = np.random.randint(-40, 40)
        fg[:, :, 0] = np.remainder(fg[:, :, 0].astype(np.float32) + hue_jitter, 360)
        # Saturation noise
        sat_bar = fg[:, :, 1][alpha > 0].mean()
        sat_jitter = np.random.rand()*(1.1 - sat_bar)/5 - (1.1 - sat_bar) / 10
        sat = fg[:, :, 1]
        sat = np.abs(sat + sat_jitter)
        sat[sat>1] = 2 - sat[sat>1]
        fg[:, :, 1] = sat
        # Value noise
        val_bar = fg[:, :, 2][alpha > 0].mean()
        val_jitter = np.random.rand()*(1.1 - val_bar)/5-(1.1 - val_bar) / 10
        val = fg[:, :, 2]
        val = np.abs(val + val_jitter)
        val[val>1] = 2 - val[val>1]
        fg[:, :, 2] = val
        # convert back to BGR space
        fg = cv2.cvtColor(fg, cv2.COLOR_HSV2BGR)
        sample['fg'] = fg*255

        return sample 
Example #21
Source File: helpers.py    From Color-Tracker with MIT License 5 votes vote down vote up
def find_object_contours(image: np.ndarray, hsv_lower_value: Union[Tuple[int], List[int]],
                         hsv_upper_value: Union[Tuple[int], List[int]], kernel: np.ndarray):
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv, tuple(hsv_lower_value), tuple(hsv_upper_value))
    if kernel is not None:
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=1)
    return cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] 
Example #22
Source File: augmentations.py    From DRFNet with MIT License 5 votes vote down vote up
def __call__(self, image, boxes=None, labels=None):
        if self.current == 'BGR' and self.transform == 'HSV':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        elif self.current == 'HSV' and self.transform == 'BGR':
            image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
        else:
            raise NotImplementedError
        return image, boxes, labels 
Example #23
Source File: object_detection_using_color.py    From Hands-On-Machine-Learning-with-OpenCV-4 with MIT License 5 votes vote down vote up
def locate_object(frame, object_hist):

    # convert to HSV
    hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # apply back projection to image using object_hist as
    # the model histogram
    object_segment = cv2.calcBackProject(
        [hsv_frame], [0, 1], object_hist, [0, 180, 0, 256], 1)

    # find the contours
    img, contours, _ = cv2.findContours(
        object_segment,
        cv2.RETR_TREE,
        cv2.CHAIN_APPROX_SIMPLE)

    flag = None
    max_area = 0

    # find the contour with the greatest area
    for (i, c) in enumerate(contours):
        area = cv2.contourArea(c)
        if area > max_area:
            max_area = area
            flag = i

    # get the rectangle
    if flag is not None and max_area > 1000:
        cnt = contours[flag]
        coords = cv2.boundingRect(cnt)
        return coords

    return None


# compute the color histogram 
Example #24
Source File: data_augment.py    From DRFNet with MIT License 5 votes vote down vote up
def _distort(image):
    def _convert(image, alpha=1, beta=0):
        tmp = image.astype(float) * alpha + beta
        tmp[tmp < 0] = 0
        tmp[tmp > 255] = 255
        image[:] = tmp

    image = image.copy()

    if random.randrange(2):
        _convert(image, beta=random.uniform(-32, 32))

    if random.randrange(2):
        _convert(image, alpha=random.uniform(0.5, 1.5))

    image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

    if random.randrange(2):
        tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
        tmp %= 180
        image[:, :, 0] = tmp

    if random.randrange(2):
        _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))

    image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image 
Example #25
Source File: ocr.py    From smashscan with MIT License 5 votes vote down vote up
def ocr_test(img, hsv_flag, avg_flag=False, gau_flag=False,
    med_flag=False, bil_flag=False, inv_flag=True):

    # Create a grayscale and HSV copy of the input image.
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    # If the HSV flag is enabled, select white OR red -> (High S AND Mid H)'
    if hsv_flag:
        mask = cv2.inRange(img_hsv, (15, 50, 0), (160, 255, 255))
        result_img = cv2.bitwise_and(img_gray, img_gray,
            mask=cv2.bitwise_not(mask))
    else:
        result_img = img_gray

    # Apply a post blurring filter according to the input flag given.
    # https://docs.opencv.org/3.4.5/d4/d13/tutorial_py_filtering.html
    if avg_flag:
        result_img = cv2.blur(result_img, (5, 5))
    elif gau_flag:
        result_img = cv2.GaussianBlur(result_img, (5, 5), 0)
    elif med_flag:
        result_img = cv2.medianBlur(result_img, 5)
    elif bil_flag:
        result_img = cv2.bilateralFilter(result_img, 9, 75, 75)

    # Invert the image to give the image a black on white background.
    if inv_flag:
        result_img = cv2.bitwise_not(result_img)

    display_ocr_test_flags(hsv_flag, avg_flag, gau_flag,
        med_flag, bil_flag, inv_flag)
    show_ocr_result(result_img)


# Display the OCR test flags in a structured format. 
Example #26
Source File: augmentations.py    From Grid-Anchor-based-Image-Cropping-Pytorch with MIT License 5 votes vote down vote up
def __call__(self, image, boxes=None, labels=None):
        if self.current == 'BGR' and self.transform == 'HSV':
            image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        elif self.current == 'HSV' and self.transform == 'BGR':
            image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
        else:
            raise NotImplementedError
        return image, boxes, labels 
Example #27
Source File: set_hand_histogram.py    From Sign-Language-Interpreter-using-Deep-Learning with MIT License 5 votes vote down vote up
def get_hand_hist():
	cam = cv2.VideoCapture(1)
	if cam.read()[0]==False:
		cam = cv2.VideoCapture(0)
	x, y, w, h = 300, 100, 300, 300
	flagPressedC, flagPressedS = False, False
	imgCrop = None
	while True:
		img = cam.read()[1]
		img = cv2.flip(img, 1)
		img = cv2.resize(img, (640, 480))
		hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
		
		keypress = cv2.waitKey(1)
		if keypress == ord('c'):		
			hsvCrop = cv2.cvtColor(imgCrop, cv2.COLOR_BGR2HSV)
			flagPressedC = True
			hist = cv2.calcHist([hsvCrop], [0, 1], None, [180, 256], [0, 180, 0, 256])
			cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
		elif keypress == ord('s'):
			flagPressedS = True	
			break
		if flagPressedC:	
			dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)
			dst1 = dst.copy()
			disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10))
			cv2.filter2D(dst,-1,disc,dst)
			blur = cv2.GaussianBlur(dst, (11,11), 0)
			blur = cv2.medianBlur(blur, 15)
			ret,thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
			thresh = cv2.merge((thresh,thresh,thresh))
			#cv2.imshow("res", res)
			cv2.imshow("Thresh", thresh)
		if not flagPressedS:
			imgCrop = build_squares(img)
		#cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2)
		cv2.imshow("Set hand histogram", img)
	cam.release()
	cv2.destroyAllWindows()
	with open("hist", "wb") as f:
		pickle.dump(hist, f) 
Example #28
Source File: final.py    From Sign-Language-Interpreter-using-Deep-Learning with MIT License 5 votes vote down vote up
def get_img_contour_thresh(img):
	img = cv2.flip(img, 1)
	imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
	dst = cv2.calcBackProject([imgHSV], [0, 1], hist, [0, 180, 0, 256], 1)
	disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10))
	cv2.filter2D(dst,-1,disc,dst)
	blur = cv2.GaussianBlur(dst, (11,11), 0)
	blur = cv2.medianBlur(blur, 15)
	thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
	thresh = cv2.merge((thresh,thresh,thresh))
	thresh = cv2.cvtColor(thresh, cv2.COLOR_BGR2GRAY)
	thresh = thresh[y:y+h, x:x+w]
	contours = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[0]
	return img, contours, thresh 
Example #29
Source File: features.py    From OpenMosaic with GNU General Public License v3.0 5 votes vote down vote up
def extractFeature(image):
    entry = {}
    entry["b"] = getAverageColor(image, 0, 256)
    entry["g"] = getAverageColor(image, 1, 256)
    entry["r"] = getAverageColor(image, 2, 256)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    entry["h"] = getAverageColor(image, 0, 180)
    entry["s"] = getAverageColor(image, 1, 256)
    entry["v"] = getAverageColor(image, 2, 256)
    return entry 
Example #30
Source File: image_functions.py    From niryo_one_ros with GNU General Public License v3.0 5 votes vote down vote up
def threshold_hsv(img, list_min_v, list_max_v, reverse_hue=False, use_s_prime=False):
    """
    Take BGR image (OpenCV imread result) and return thresholded image
    according to values on HSV (Hue, Saturation, Value)
    Pixel will worth 1 if a pixel has a value between min_v and max_v for all channels
    :param img: image BGR if rgb_space = False
    :param list_min_v: list corresponding to [min_value_H,min_value_S,min_value_V]
    :param list_max_v: list corresponding to [max_value_H,max_value_S,max_value_V]
    :param use_s_prime: Bool -> True if you want to use S channel as S' = S x V else classic
    :param reverse_hue: Useful for Red color cause it is at both extremum
    :return: threshold image
    """
    frame_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    if use_s_prime:
        frame_hsv[:, :, 1] = (1. / 255) * frame_hsv[:, :, 1] * frame_hsv[:, :, 2].astype(np.uint8)

    if not reverse_hue:
        return cv2.inRange(frame_hsv, tuple(list_min_v), tuple(list_max_v))
    else:
        list_min_v_c = list(list_min_v)
        list_max_v_c = list(list_max_v)
        lower_bound_red, higher_bound_red = sorted([list_min_v_c[0], list_max_v_c[0]])
        list_min_v_c[0], list_max_v_c[0] = 0, lower_bound_red
        low_red_im = cv2.inRange(frame_hsv, tuple(list_min_v_c), tuple(list_max_v_c))
        list_min_v_c[0], list_max_v_c[0] = higher_bound_red, 179
        high_red_im = cv2.inRange(frame_hsv, tuple(list_min_v_c), tuple(list_max_v_c))
        return cv2.addWeighted(low_red_im, 1.0, high_red_im, 1.0, 0)