Python cv2.bitwise_and() Examples

The following are 30 code examples of cv2.bitwise_and(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: thresholding.py    From smashscan with MIT License 12 votes vote down vote up
def contour_filter(self, frame):
        _, contours, _ = cv2.findContours(frame,
            cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        new_frame = np.zeros(frame.shape, np.uint8)
        for i, contour in enumerate(contours):
            c_area = cv2.contourArea(contour)
            if self.contour_min_area <= c_area <= self.contour_max_area:
                mask = np.zeros(frame.shape, np.uint8)
                cv2.drawContours(mask, contours, i, 255, cv2.FILLED)
                mask = cv2.bitwise_and(frame, mask)
                new_frame = cv2.bitwise_or(new_frame, mask)
        frame = new_frame

        if self.contour_disp_flag:
            frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
            cv2.drawContours(frame, contours, -1, (255, 0, 0), 1)

        return frame


    # A number of methods corresponding to the various trackbars available. 
Example #2
Source File: pySaliencyMap.py    From pliers with BSD 3-Clause "New" or "Revised" License 8 votes vote down vote up
def SMGetSalientRegion(self, src):
        # get a binarized saliency map
        binarized_SM = self.SMGetBinarizedSM(src)
        # GrabCut
        img = src.copy()
        mask = np.where(
            (binarized_SM != 0), cv2.GC_PR_FGD, cv2.GC_PR_BGD).astype('uint8')
        bgdmodel = np.zeros((1, 65), np.float64)
        fgdmodel = np.zeros((1, 65), np.float64)
        rect = (0, 0, 1, 1)  # dummy
        iterCount = 1
        cv2.grabCut(img, mask=mask, rect=rect, bgdModel=bgdmodel,
                    fgdModel=fgdmodel, iterCount=iterCount, mode=cv2.GC_INIT_WITH_MASK)
        # post-processing
        mask_out = np.where(
            (mask == cv2.GC_FGD) + (mask == cv2.GC_PR_FGD), 255, 0).astype('uint8')
        output = cv2.bitwise_and(img, img, mask=mask_out)
        return output 
Example #3
Source File: main.py    From Traffic-Sign-Detection with MIT License 8 votes vote down vote up
def remove_other_color(img):
    frame = cv2.GaussianBlur(img, (3,3), 0) 
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # define range of blue color in HSV
    lower_blue = np.array([100,128,0])
    upper_blue = np.array([215,255,255])
    # Threshold the HSV image to get only blue colors
    mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)

    lower_white = np.array([0,0,128], dtype=np.uint8)
    upper_white = np.array([255,255,255], dtype=np.uint8)
    # Threshold the HSV image to get only blue colors
    mask_white = cv2.inRange(hsv, lower_white, upper_white)

    lower_black = np.array([0,0,0], dtype=np.uint8)
    upper_black = np.array([170,150,50], dtype=np.uint8)

    mask_black = cv2.inRange(hsv, lower_black, upper_black)

    mask_1 = cv2.bitwise_or(mask_blue, mask_white)
    mask = cv2.bitwise_or(mask_1, mask_black)
    # Bitwise-AND mask and original image
    #res = cv2.bitwise_and(frame,frame, mask= mask)
    return mask 
Example #4
Source File: masterForgery.py    From signature_extractor with MIT License 8 votes vote down vote up
def getSignature(img):
    imgSize = np.shape(img)

    gImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Adaptive Thresholding requires the blocksize to be odd and bigger than 1
    blockSize = 1 / 8 * imgSize[0] / 2 * 2 + 1
    if blockSize <= 1:
        blockSize = imgSize[0] / 2 * 2 + 1
    const = 10

    mask = cv2.adaptiveThreshold(gImg, maxValue = 255, adaptiveMethod = cv2.ADAPTIVE_THRESH_MEAN_C, thresholdType = cv2.THRESH_BINARY, blockSize = blockSize, C = const)
    rmask = cv2.bitwise_not(mask)

    return (cv2.bitwise_and(img, img, mask=rmask), rmask)

# First Prompt 
Example #5
Source File: single_roi_tracker.py    From ethoscope with GNU General Public License v3.0 7 votes vote down vote up
def _pre_process_input_minimal(self, img, mask, t, darker_fg=True):
        if self._buff_grey is None:
            self._buff_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            if mask is None:
                mask = np.ones_like(self._buff_grey) * 255

        cv2.cvtColor(img,cv2.COLOR_BGR2GRAY, self._buff_grey)

        cv2.erode(self._buff_grey, self._erode_kern, dst=self._buff_grey)

        if darker_fg:
            cv2.subtract(255, self._buff_grey, self._buff_grey)


        if mask is not None:
            cv2.bitwise_and(self._buff_grey, mask, self._buff_grey)
            return self._buff_grey 
Example #6
Source File: thresholding.py    From smashscan with MIT License 7 votes vote down vote up
def standard_test(self):
        for fnum in range(self.start_fnum, self.stop_fnum):
            frame = util.get_frame(self.capture, fnum)
            frame = frame[280:, :]
            frame_HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

            mask = cv2.inRange(frame_HSV, (self.low_H, self.low_S, self.low_V),
                (self.high_H, self.high_S, self.high_V))

            res = cv2.bitwise_and(frame, frame, mask=mask)
            res_inv = cv2.bitwise_and(frame, frame, mask=cv2.bitwise_not(mask))

            cv2.imshow(self.window_name, mask)
            cv2.imshow('Video Capture AND', res)
            cv2.imshow('Video Capture INV', res_inv)

            if cv2.waitKey(30) & 0xFF == ord('q'):
                break


    # A number of methods corresponding to the various trackbars available. 
Example #7
Source File: webcam_track_blobs.py    From pc-drone with MIT License 6 votes vote down vote up
def add_blobs(crop_frame):
    frame=cv2.GaussianBlur(crop_frame, (3, 3), 0)
    # Convert BGR to HSV
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # define range of green color in HSV
    lower_green = np.array([70,50,50])
    upper_green = np.array([85,255,255])
    # Threshold the HSV image to get only blue colors
    mask = cv2.inRange(hsv, lower_green, upper_green)
    mask = cv2.erode(mask, None, iterations=1)
    mask = cv2.dilate(mask, None, iterations=1)    
    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(frame,frame, mask= mask)
    detector = cv2.SimpleBlobDetector_create(params)
    # Detect blobs.
    reversemask=255-mask
    keypoints = detector.detect(reversemask)
    if keypoints:
        print "found blobs"
        if len(keypoints) > 4:
            keypoints.sort(key=(lambda s: s.size))
            keypoints=keypoints[0:3]
        # Draw detected blobs as red circles.
        # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
        im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    else:
        print "no blobs"
        im_with_keypoints=crop_frame
        
    return im_with_keypoints #, max_blob_dist, blob_center, keypoint_in_orders 
Example #8
Source File: 05_cartoonizing.py    From OpenCV-3-x-with-Python-By-Example with MIT License 6 votes vote down vote up
def cartoonize_image(img, ksize=5, sketch_mode=False):
    num_repetitions, sigma_color, sigma_space, ds_factor = 10, 5, 7, 4 
    # Convert image to grayscale 
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 
 
    # Apply median filter to the grayscale image 
    img_gray = cv2.medianBlur(img_gray, 7) 
 
    # Detect edges in the image and threshold it 
    edges = cv2.Laplacian(img_gray, cv2.CV_8U, ksize=ksize) 
    ret, mask = cv2.threshold(edges, 100, 255, cv2.THRESH_BINARY_INV) 
 
    # 'mask' is the sketch of the image 
    if sketch_mode: 
        return cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) 
 
    # Resize the image to a smaller size for faster computation 
    img_small = cv2.resize(img, None, fx=1.0/ds_factor, fy=1.0/ds_factor, interpolation=cv2.INTER_AREA)
 
    # Apply bilateral filter the image multiple times 
    for i in range(num_repetitions): 
        img_small = cv2.bilateralFilter(img_small, ksize, sigma_color, sigma_space) 
 
    img_output = cv2.resize(img_small, None, fx=ds_factor, fy=ds_factor, interpolation=cv2.INTER_LINEAR) 
 
    dst = np.zeros(img_gray.shape) 
 
    # Add the thick boundary lines to the image using 'AND' operator 
    dst = cv2.bitwise_and(img_output, img_output, mask=mask) 
    return dst 
Example #9
Source File: part-6-lane-finder.py    From pygta5 with GNU General Public License v3.0 6 votes vote down vote up
def roi(img, vertices):
    
    #blank mask:
    mask = np.zeros_like(img)   
    
    #filling pixels inside the polygon defined by "vertices" with the fill color    
    cv2.fillPoly(mask, vertices, 255)
    
    #returning the image only where mask pixels are nonzero
    masked = cv2.bitwise_and(img, mask)
    return masked 
Example #10
Source File: cvutils.py    From 1ZLAB_PyEspCar with GNU General Public License v3.0 6 votes vote down vote up
def backprojection(target, roihist):
    '''图像预处理'''
    hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)
    dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)
    # Now convolute with circular disc
    disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
    cv2.filter2D(dst,-1,disc,dst)
    # threshold and binary AND
    ret,binary = cv2.threshold(dst,80,255,0)
    # 创建 核
    kernel = np.ones((5,5), np.uint8)
    iter_time = 1
    # 闭运算
    binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel,iterations=iter_time)

    thresh = cv2.merge((binary,binary,binary))
    target_filter = cv2.bitwise_and(target,thresh)
    
    return binary, target_filter 
Example #11
Source File: anomalyMapGen.py    From neural-road-inspector with MIT License 5 votes vote down vote up
def _filter_by_street_map(img, street_map_path):
	orig_street_img = cv2.imread(street_map_path)

	gray_street_img = cv2.cvtColor(orig_street_img, cv2.COLOR_BGR2GRAY)
	ret, binary_street_mask = cv2.threshold(gray_street_img, 250, 255, cv2.THRESH_BINARY)

	img = img.astype(int)
	return cv2.bitwise_and(img, img, mask=binary_street_mask)

# TODO: Thresholding?
# Strutural Similarly Measure: http://www.pyimagesearch.com/2014/09/15/python-compare-two-images/ 
Example #12
Source File: color_filtering.py    From Python-Code with MIT License 5 votes vote down vote up
def get_color(frame, color):
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    color_dict = color_list.getColorList()
    mask = cv2.inRange(hsv, color_dict[color][0], color_dict[color][1])
    res = cv2.bitwise_and(frame, frame, mask=mask)
    # 存储一张图片
    cv2.imwrite(filename + color + '.jpg', mask)
    # 展示一张图片
    cv2.imshow('Result', res)
    cv2.waitKey(0) 
Example #13
Source File: BackgroundRemove.py    From vidpipe with GNU General Public License v3.0 5 votes vote down vote up
def processFrame( self, frame_in ):
        # version 1 - moving average
        if self._avg == None:
            self._avg = np.float32( frame_in )
        cv2.accumulateWeighted( frame_in, self._avg, self._speed )
        background = cv2.convertScaleAbs( self._avg )
        active_area = cv2.absdiff( frame_in, background )

        #version 2 - MOG - Gausian Mixture-based Background/Foreground Segmentation Algorithm
        fgmask = self._fgbg.apply( frame_in ,learningRate = 0.01 )
        #active_area = cv2.bitwise_and( frame_in, frame_in, mask = fgmask )

        return fgmask 
Example #14
Source File: Back_sub.py    From virtual-dressing-room with Apache License 2.0 5 votes vote down vote up
def remove(self,frm):
        _,self.final_mask=cv2.threshold(self.final_mask,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        res=cv2.bitwise_and(frm,frm,mask=self.final_mask)
        return res 
Example #15
Source File: multi_fly_tracker.py    From ethoscope with GNU General Public License v3.0 5 votes vote down vote up
def _pre_process_input_minimal(self, img, mask, t, darker_fg=True):
        blur_rad = int(self._object_expected_size * np.max(img.shape) / 2.0)

        if blur_rad % 2 == 0:
            blur_rad += 1

        if self._buff_grey is None:
            self._buff_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            if mask is None:
                mask = np.ones_like(self._buff_grey) * 255

        cv2.cvtColor(img,cv2.COLOR_BGR2GRAY, self._buff_grey)
        # cv2.imshow("dbg",self._buff_grey)
        cv2.GaussianBlur(self._buff_grey,(blur_rad,blur_rad),1.2, self._buff_grey)
        if darker_fg:
            cv2.subtract(255, self._buff_grey, self._buff_grey)

        #
        mean = cv2.mean(self._buff_grey, mask)

        scale = 128. / mean[0]

        cv2.multiply(self._buff_grey, scale, dst = self._buff_grey)


        if mask is not None:
            cv2.bitwise_and(self._buff_grey, mask, self._buff_grey)
            return self._buff_grey 
Example #16
Source File: image_transformation.py    From Sign-Language-Recognition with MIT License 5 votes vote down vote up
def make_background_black(frame):
    """
    Makes everything apart from the main object of interest to be
    black in color.
    """
    logger.debug("Making background black...")

    # Convert from RGB to HSV
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # Prepare the first mask.
    # Tuned parameters to match the skin color of the input images...
    lower_boundary = np.array([0, 40, 30], dtype="uint8")
    upper_boundary = np.array([43, 255, 254], dtype="uint8")
    skin_mask = cv2.inRange(frame, lower_boundary, upper_boundary)

    # Apply a series of erosions and dilations to the mask using an
    # elliptical kernel
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    skin_mask = cv2.erode(skin_mask, kernel, iterations=2)
    skin_mask = cv2.dilate(skin_mask, kernel, iterations=2)

    # Prepare the second mask
    lower_boundary = np.array([170, 80, 30], dtype="uint8")
    upper_boundary = np.array([180, 255, 250], dtype="uint8")
    skin_mask2 = cv2.inRange(frame, lower_boundary, upper_boundary)

    # Combine the effect of both the masks to create the final frame.
    skin_mask = cv2.addWeighted(skin_mask, 0.5, skin_mask2, 0.5, 0.0)
    # Blur the mask to help remove noise.
    # skin_mask = cv2.medianBlur(skin_mask, 5)
    frame_skin = cv2.bitwise_and(frame, frame, mask=skin_mask)
    frame = cv2.addWeighted(frame, 1.5, frame_skin, -0.5, 0)
    frame_skin = cv2.bitwise_and(frame, frame, mask=skin_mask)

    logger.debug("Done!")
    return frame_skin 
Example #17
Source File: coco_dataset.py    From maskrcnn with MIT License 5 votes vote down vote up
def show_images(self, anchor, n_max=None, image_ids=None):
        iter = self.generate(anchor, n_max, image_ids)
        for data, _ in iter:
            img = data[0][0]
            # rgb -> bgr
            img = np.flip(img, axis=2).astype(np.uint8)
            boxes = data[3][0]
            masks = data[5][0]
            # 0パディングした行は除く
            idx_pos = np.where(np.any(boxes, axis=1))[0]
            boxes = boxes[idx_pos]
            masks = masks[idx_pos]
            c = [i for i in range(255)[::(255 // boxes.shape[0] - 1)]]
            i = 0
            for bbox, mask in zip(boxes, masks):
                bbox = bbox.astype(np.uint8)
                mask = mask.astype(np.uint8)
                color = (c[i], c[::-1][i], 0)
                # bbox
                cv2.rectangle(img, (bbox[1], bbox[0]),
                              (bbox[3], bbox[2]), color)
                # # mask
                # mask_img = np.zeros(img.shape, img.dtype)
                # mask_img[:, :] = color
                mask = np.dstack([mask, mask, mask])
                mask[:, :, 0][mask[:, :, 0] == 1] = color[0]
                mask[:, :, 1][mask[:, :, 1] == 1] = color[1]
                mask[:, :, 2][mask[:, :, 2] == 1] = color[2]
                # mask_img = cv2.bitwise_and(mask_img, mask_img, mask=mask)
                cv2.addWeighted(mask, 1, img, 1, 0, img)
                i += 1
            cv2.imshow('img', img)
            cv2.waitKey(0) 
Example #18
Source File: test_extractor.py    From HalloPy with MIT License 5 votes vote down vote up
def test_contour_extreme_point_tracking(self):
        """Test for tracking extreme_points without optical flow (e.g until calibrated).  """
        # setup
        test_path = utils.get_full_path('docs/material_for_testing/back_ground_removed_frame.jpg')
        test_image = cv2.imread(test_path)

        # todo: use mockito here to mock preprocessing elements
        flags_handler = FlagsHandler()
        detector = Detector(flags_handler)
        extractor = Extractor(flags_handler)

        # Background model preparations.
        bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)

        cap = cv2.VideoCapture(0)
        while flags_handler.quit_flag is False:
            ret, frame = cap.read()
            frame = cv2.flip(frame, 1)

            # Remove background from input frame.
            fgmask = bg_model.apply(frame, learningRate=0)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            res = cv2.bitwise_and(frame, frame, mask=fgmask)

            # Clip frames ROI.
            back_ground_removed_clipped = ImageTestTool.clip_roi(res,
                                                                 {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})

            if flags_handler.background_capture_required is True:
                bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)
                flags_handler.background_capture_required = False

            detector.input_frame_for_feature_extraction = back_ground_removed_clipped
            extractor.extract = detector

            image = extractor.get_drawn_extreme_contour_points()
            cv2.imshow('test_contour_extreme_point_tracking', image)
            flags_handler.keyboard_input = cv2.waitKey(1) 
Example #19
Source File: test_extractor.py    From HalloPy with MIT License 5 votes vote down vote up
def test_max_distance_between_top_ext_point_and_palm_center_point(self):
        """Test if max distance is found correctly. """
        # setup
        # todo: use mockito here to mock preprocessing elements
        flags_handler = FlagsHandler()
        detector = Detector(flags_handler)
        extractor = Extractor(flags_handler)

        # Background model preparations.
        bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)

        cap = cv2.VideoCapture(0)
        while flags_handler.quit_flag is False:
            ret, frame = cap.read()
            frame = cv2.flip(frame, 1)

            # Remove background from input frame.
            fgmask = bg_model.apply(frame, learningRate=0)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            res = cv2.bitwise_and(frame, frame, mask=fgmask)

            # Clip frames ROI.
            back_ground_removed_clipped = ImageTestTool.clip_roi(res,
                                                                 {'cap_region_x_begin': 0.6, 'cap_region_y_end': 0.6})

            if flags_handler.background_capture_required is True:
                bg_model = cv2.createBackgroundSubtractorMOG2(0, 50)
                flags_handler.background_capture_required = False

            detector.input_frame_for_feature_extraction = back_ground_removed_clipped
            extractor.extract = detector

            # run
            image = extractor.get_drawn_extreme_contour_points()
            cv2.line(image, extractor.palm_center_point, (extractor.ext_top[0], extractor.palm_center_point[
                1] - extractor.max_distance_from_ext_top_point_to_palm_center), (255, 255, 255), thickness=2)
            cv2.imshow('test_max_distance_between_top_ext_point_and_palm_center_point', image)
            flags_handler.keyboard_input = cv2.waitKey(1) 
Example #20
Source File: controller.py    From HalloPy with MIT License 5 votes vote down vote up
def detected_frame(self, preprocessed_faced_covered_input_frame):
        """Function for removing background from input frame. """
        if self.flag_handler.background_capture_required is True:
            self._bg_model = cv2.createBackgroundSubtractorMOG2(0, self._bg_Sub_Threshold)
            self.flag_handler.background_capture_required = False
        if self._bg_model is not None:
            fgmask = self._bg_model.apply(preprocessed_faced_covered_input_frame, learningRate=self._learning_Rate)
            kernel = np.ones((3, 3), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            res = cv2.bitwise_and(preprocessed_faced_covered_input_frame, preprocessed_faced_covered_input_frame,
                                  mask=fgmask)
            self._input_frame_with_hand = res[
                                          0:int(
                                              self._cap_region_y_end * preprocessed_faced_covered_input_frame.shape[0]),
                                          int(self._cap_region_x_begin * preprocessed_faced_covered_input_frame.shape[
                                              1]):
                                          preprocessed_faced_covered_input_frame.shape[
                                              1]]  # clip the ROI 
Example #21
Source File: predict_mrcnn.py    From maskrcnn with MIT License 5 votes vote down vote up
def add_mask(dest_img, mask, bbox, color, image_shape):
    threshold = 0.5
    y1, x1, y2, x2 = bbox
    h, w = y2 - y1, x2 - x1
    logger.debug("y1, x1, y2, x2: %s, h, w: %s", (y1, x1, y2, x2), (h, w))
    logger.debug("mask.shape: %s", mask.shape)
    mask = scipy.misc.imresize(mask, (h, w),
                               interp='bilinear').astype(np.float32)
    # scipy.misc.imresizeの結果は0~255にスケールされるので、0〜1に戻す。
    mask /= 255.0
    # 0 or 1に変換。
    mask = np.where(mask >= threshold, 1, 0).astype(np.uint8)

    # 0~image_shapeの枠外のマスクは除外する
    _y1, _x1, _y2, _x2 = max(0, y1), max(0, x1), min(image_shape[0], y2), \
        min(image_shape[1], x2)
    d_y1, d_x1, d_y2, d_x2 = _y1 - y1, _x1 - x1, _y2 - y2, _x2 - x2
    mask = mask[d_y1:h + d_y2, d_x1:w + d_x2]

    # マスクを画像に配置。image_shapeは入力画像の[h, w]
    fullsize_mask = np.zeros(image_shape, dtype=np.uint8)
    fullsize_mask[_y1:_y2, _x1:_x2] = mask

    logger.debug("mask.shape: %s, image_shape: %s, bbox: %s (%s) ",
                 mask.shape, image_shape, bbox, (y2 - y1, x2 - x1))
    logger.debug("d_y1, d_x1, d_y2, d_x2: %s, mask.shape: %s ",
                 (d_y1, d_x1, d_y2, d_x2), mask.shape)

    # # mask
    mask_image = np.zeros(image_shape + [3], dtype=np.uint8)
    mask_image[:, :] = color
    mask_image = cv2.bitwise_and(mask_image, mask_image, mask=fullsize_mask)
    # mask = np.dstack([mask, mask, mask])
    # mask[:, :, 0][mask[:, :, 0] == 1] = color[0]
    # mask[:, :, 1][mask[:, :, 1] == 1] = color[1]
    # mask[:, :, 2][mask[:, :, 2] == 1] = color[2]
    cv2.addWeighted(mask_image, 1.5, dest_img, 1, 0, dest_img) 
Example #22
Source File: adaptive_bg_tracker.py    From ethoscope with GNU General Public License v3.0 5 votes vote down vote up
def _pre_process_input_minimal(self, img, mask, t, darker_fg=True):
        blur_rad = int(self._object_expected_size * np.max(img.shape) / 2.0)

        if blur_rad % 2 == 0:
            blur_rad += 1

        if self._buff_grey is None:
            self._buff_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
            if mask is None:
                mask = np.ones_like(self._buff_grey) * 255

        cv2.cvtColor(img,cv2.COLOR_BGR2GRAY, self._buff_grey)
        # cv2.imshow("dbg",self._buff_grey)
        cv2.GaussianBlur(self._buff_grey,(blur_rad,blur_rad),1.2, self._buff_grey)
        if darker_fg:
            cv2.subtract(255, self._buff_grey, self._buff_grey)

        #
        mean = cv2.mean(self._buff_grey, mask)

        scale = 128. / mean[0]

        cv2.multiply(self._buff_grey, scale, dst = self._buff_grey)


        if mask is not None:
            cv2.bitwise_and(self._buff_grey, mask, self._buff_grey)
            return self._buff_grey 
Example #23
Source File: common.py    From baxter_demos with Apache License 2.0 5 votes vote down vote up
def colorSegmentation(img, blur_radius, radius, open_radius, color):

    #Get color of point in image
    #print self.point
    #Grab the R, G, B channels as separate matrices
    #use cv2.threshold on each of them
    #AND the three images together
    bw = numpy.ones(img.shape[0:2], numpy.uint8)
    maxvals = [179, 255, 255]
    for i in range(3):
        minval = color[i] - radius
        maxval = color[i] + radius
        if radius > color[i]:
            minval = 0
        elif radius + color[i] > maxvals[i]:
            minval = color[i] - radius

        channel = img[:, :, i]
        retval, minthresh = cv2.threshold(channel, minval, 255, cv2.THRESH_BINARY)
        retval, maxthresh = cv2.threshold(channel, maxval, 255, cv2.THRESH_BINARY_INV)
        bw = cv2.bitwise_and(bw, minthresh)
        bw = cv2.bitwise_and(bw, maxthresh)
    bw *= 255
    
    if open_radius != 0:
        open_kernel = numpy.array([open_radius, open_radius])

        bw = cv2.morphologyEx(bw, cv2.MORPH_OPEN, open_kernel, iterations = 4)

    return bw 
Example #24
Source File: detect_movement.py    From OpenCV-3-x-with-Python-By-Example with MIT License 5 votes vote down vote up
def frame_diff(prev_frame, cur_frame, next_frame): 
    # Absolute difference between current frame and next frame 
    diff_frames1 = cv2.absdiff(next_frame, cur_frame) 
 
    # Absolute difference between current frame and 
     # previous frame 
    diff_frames2 = cv2.absdiff(cur_frame, prev_frame) 
 
    # Return the result of bitwise 'AND' between the 
     # above two resultant images 
    return cv2.bitwise_and(diff_frames1, diff_frames2) 
 
# Capture the frame from webcam 
Example #25
Source File: SkinDetection.py    From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def main():
    const = 1
    max_value = 255
    faces_image = cv2.imread("../data/faces.jpeg", 1)

    faces_image = cv2.cvtColor(faces_image, cv2.COLOR_BGR2HSV)
    faces_image_700 = cv2.resize(faces_image, (700, 700))

    hue = faces_image_700[:, :, 0]
    satr = faces_image_700[:, :, 1]
    value = faces_image_700[:, :, 2]

    hsv_images = np.concatenate((hue, satr, value), axis=1)

    _, hue_thresh = cv2.threshold(hue, 10, max_value, cv2.THRESH_BINARY_INV)
    _, satr_thresh = cv2.threshold(satr, 40, max_value, cv2.THRESH_BINARY)

    skin_image = cv2.bitwise_and(hue_thresh, satr_thresh)

    cv2.imshow("Hue Image", hue_thresh)
    cv2.imshow("Saturation Image", satr_thresh)

    cv2.imshow("SKin Detected Image", skin_image)

    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example #26
Source File: new.py    From Fingers-Detection-using-OpenCV-and-Python with MIT License 5 votes vote down vote up
def removeBG(frame):
    fgmask = bgModel.apply(frame,learningRate=learningRate)
    # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    # res = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)

    kernel = np.ones((3, 3), np.uint8)
    fgmask = cv2.erode(fgmask, kernel, iterations=1)
    res = cv2.bitwise_and(frame, frame, mask=fgmask)
    return res 
Example #27
Source File: LogicalOperation.py    From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def main():
    basePath = "../data/"

    imageFileOne = basePath + "4.1.04.tiff"
    imageFileTwo = basePath + "4.1.05.tiff"

    imageOne = cv2.imread(imageFileOne, 1)
    imageTwo = cv2.imread(imageFileTwo, 1)

    imageOneRGB = cv2.cvtColor(imageOne, cv2.COLOR_BGR2RGB)
    imageTwoRGB = cv2.cvtColor(imageTwo, cv2.COLOR_BGR2RGB)

    negativeImage = cv2.bitwise_not(imageOneRGB)
    andImage = cv2.bitwise_and(imageOneRGB, imageTwoRGB)
    orImage = cv2.bitwise_or(imageOneRGB, imageTwoRGB)
    xorImage = cv2.bitwise_xor(imageOneRGB, imageTwoRGB)

    imageNames = [imageOneRGB, imageTwoRGB, negativeImage, andImage, orImage, xorImage]
    imageTitles = ["Image One", "Image Two", "Negative", "AND", "OR", "XOR"]

    for i in range(6):
        plt.subplot(2, 3, i + 1)
        plt.imshow(imageNames[i])
        plt.title(imageTitles[i])
        plt.xticks([])
        plt.yticks([])

    plt.show() 
Example #28
Source File: trappedball_fill.py    From LineFiller with MIT License 5 votes vote down vote up
def flood_fill_multi(image, max_iter=20000):
    """Perform multi flood fill operations until all valid areas are filled.
    This operation will fill all rest areas, which may result large amount of fills.

    # Arguments
        image: an image. the image should contain white background, black lines and black fills.
               the white area is unfilled area, and the black area is filled area.
        max_iter: max iteration number.
    # Returns
        an array of fills' points.
    """
    print('floodfill')

    unfill_area = image
    filled_area = []

    for _ in range(max_iter):
        points = get_unfilled_point(unfill_area)

        if not len(points) > 0:
            break

        fill = flood_fill_single(unfill_area, (points[0][0], points[0][1]))
        unfill_area = cv2.bitwise_and(unfill_area, fill)

        filled_area.append(np.where(fill == 0))

    return filled_area 
Example #29
Source File: FingerDetection.py    From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def hist_masking(frame, hist):
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)

    disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (31, 31))
    cv2.filter2D(dst, -1, disc, dst)

    ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)

    # thresh = cv2.dilate(thresh, None, iterations=5)

    thresh = cv2.merge((thresh, thresh, thresh))

    return cv2.bitwise_and(frame, thresh) 
Example #30
Source File: metrics.py    From detection-2016-nipsws with MIT License 5 votes vote down vote up
def calculate_overlapping(img_mask, gt_mask):
    gt_mask *= 1.0
    img_and = cv2.bitwise_and(img_mask, gt_mask)
    j = np.count_nonzero(img_and)
    i = np.count_nonzero(gt_mask)
    overlap = float(float(j)/float(i))
    return overlap