Python cv2.inRange() Examples

The following are 30 code examples of cv2.inRange(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: HSV Color Picker.py    From HSV-Color-Picker with MIT License 9 votes vote down vote up
def pick_color(event,x,y,flags,param):
    if event == cv2.EVENT_LBUTTONDOWN:
        pixel = image_hsv[y,x]

        #HUE, SATURATION, AND VALUE (BRIGHTNESS) RANGES. TOLERANCE COULD BE ADJUSTED.
        # Set range = 0 for hue and range = 1 for saturation and brightness
        # set upper_or_lower = 1 for upper and upper_or_lower = 0 for lower
        hue_upper = check_boundaries(pixel[0], 10, 0, 1)
        hue_lower = check_boundaries(pixel[0], 10, 0, 0)
        saturation_upper = check_boundaries(pixel[1], 10, 1, 1)
        saturation_lower = check_boundaries(pixel[1], 10, 1, 0)
        value_upper = check_boundaries(pixel[2], 40, 1, 1)
        value_lower = check_boundaries(pixel[2], 40, 1, 0)

        upper =  np.array([hue_upper, saturation_upper, value_upper])
        lower =  np.array([hue_lower, saturation_lower, value_lower])
        print(lower, upper)

        #A MONOCHROME MASK FOR GETTING A BETTER VISION OVER THE COLORS 
        image_mask = cv2.inRange(image_hsv,lower,upper)
        cv2.imshow("Mask",image_mask) 
Example #2
Source File: main.py    From Traffic-Sign-Detection with MIT License 8 votes vote down vote up
def remove_other_color(img):
    frame = cv2.GaussianBlur(img, (3,3), 0) 
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # define range of blue color in HSV
    lower_blue = np.array([100,128,0])
    upper_blue = np.array([215,255,255])
    # Threshold the HSV image to get only blue colors
    mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)

    lower_white = np.array([0,0,128], dtype=np.uint8)
    upper_white = np.array([255,255,255], dtype=np.uint8)
    # Threshold the HSV image to get only blue colors
    mask_white = cv2.inRange(hsv, lower_white, upper_white)

    lower_black = np.array([0,0,0], dtype=np.uint8)
    upper_black = np.array([170,150,50], dtype=np.uint8)

    mask_black = cv2.inRange(hsv, lower_black, upper_black)

    mask_1 = cv2.bitwise_or(mask_blue, mask_white)
    mask = cv2.bitwise_or(mask_1, mask_black)
    # Bitwise-AND mask and original image
    #res = cv2.bitwise_and(frame,frame, mask= mask)
    return mask 
Example #3
Source File: tracking.py    From OpenCV-Computer-Vision-Projects-with-Python with MIT License 8 votes vote down vote up
def _update_mean_shift_bookkeeping(self, frame, box_grouped):
        """Preprocess all valid bounding boxes for mean-shift tracking

            This method preprocesses all relevant bounding boxes (those that
            have been detected by both mean-shift tracking and saliency) for
            the next mean-shift step.

            :param frame: current RGB input frame
            :param box_grouped: list of bounding boxes
        """
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        self.object_roi = []
        self.object_box = []
        for box in box_grouped:
            (x, y, w, h) = box
            hsv_roi = hsv[y:y + h, x:x + w]
            mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)),
                               np.array((180., 255., 255.)))
            roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
            cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

            self.object_roi.append(roi_hist)
            self.object_box.append(box) 
Example #4
Source File: hsv_track.py    From DroneSimLab with MIT License 7 votes vote down vote up
def find_red(img):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv,(130,130,180),(255,255,255))
    mask = cv2.erode(mask, np.ones((2,1)) , iterations=1)
    mask = cv2.dilate(mask, None, iterations=3)
    cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]
    frame=img.copy()    
    ###based on example from  http://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv
    if len(cnts) > 0:
        c = max(cnts, key=cv2.contourArea)
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        M = cv2.moments(c)
        center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
        if radius > 3:
            cv2.circle(frame, (int(x), int(y)), 12,(0, 255, 255), 2)
    return frame 
Example #5
Source File: follower_p.py    From rosbook with Apache License 2.0 7 votes vote down vote up
def image_callback(self, msg):
    image = self.bridge.imgmsg_to_cv2(msg,desired_encoding='bgr8')
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    lower_yellow = numpy.array([ 10,  10,  10])
    upper_yellow = numpy.array([255, 255, 250])
    mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
    
    h, w, d = image.shape
    search_top = 3*h/4
    search_bot = 3*h/4 + 20
    mask[0:search_top, 0:w] = 0
    mask[search_bot:h, 0:w] = 0
    M = cv2.moments(mask)
    if M['m00'] > 0:
      cx = int(M['m10']/M['m00'])
      cy = int(M['m01']/M['m00'])
      cv2.circle(image, (cx, cy), 20, (0,0,255), -1)
      # BEGIN CONTROL
      err = cx - w/2
      self.twist.linear.x = 0.2
      self.twist.angular.z = -float(err) / 100
      self.cmd_vel_pub.publish(self.twist)
      # END CONTROL
    cv2.imshow("window", image)
    cv2.waitKey(3) 
Example #6
Source File: color_detection.py    From deepgaze with MIT License 7 votes vote down vote up
def returnMask(self, frame, morph_opening=True, blur=True, kernel_size=5, iterations=1):
        """Given an input frame return the black/white mask.
 
        This version of the function does not use the blur and bitwise 
        operations, then the resulting frame contains white pixels
        in correspondance of the skin found during the searching process.
        @param frame the original frame (color)
        """
        #Convert to HSV and eliminate pixels outside the range
        frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        frame_filtered = cv2.inRange(frame_hsv, self.min_range, self.max_range)
        if(morph_opening==True):
            kernel = np.ones((kernel_size,kernel_size), np.uint8)
            frame_filtered = cv2.morphologyEx(frame_filtered, cv2.MORPH_OPEN, kernel, iterations=iterations)
        #Applying Gaussian Blur
        if(blur==True): 
            frame_filtered = cv2.GaussianBlur(frame_filtered, (kernel_size,kernel_size), 0)
        return frame_filtered 
Example #7
Source File: main.py    From python-turtle-draw-svg with GNU General Public License v3.0 7 votes vote down vote up
def drawBitmap(w_image):
    print('Reducing the colors...')
    Z = w_image.reshape((-1, 3))

    # convert to np.float32
    Z = np.float32(Z)

    # define criteria, number of clusters(K) and apply kmeans()
    criteria = (cv2.TERM_CRITERIA_EPS, 10, 1.0)
    global K
    ret, label, center = cv2.kmeans(
        Z, K, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)

    # Now convert back into uint8, and make original image
    center = np.uint8(center)
    res = center[label.flatten()]
    res = res.reshape(w_image.shape)
    no = 1
    for i in center:
        sys.stdout.write('\rDrawing: %.2f%% [' % (
            no / K * 100) + '#' * no + ' ' * (K - no) + ']')
        no += 1
        res2 = cv2.inRange(res, i, i)
        res2 = cv2.bitwise_not(res2)
        cv2.imwrite('.tmp.bmp', res2)
        os.system('potrace.exe .tmp.bmp -s --flat')
        # print(i)
        drawSVG('.tmp.svg', '#%02x%02x%02x' % (i[2], i[1], i[0]))
    os.remove('.tmp.bmp')
    os.remove('.tmp.svg')
    print('\n\rFinished, close the window to exit.')
    te.done() 
Example #8
Source File: thresholding.py    From smashscan with MIT License 7 votes vote down vote up
def standard_test(self):
        for fnum in range(self.start_fnum, self.stop_fnum):
            frame = util.get_frame(self.capture, fnum)
            frame = frame[280:, :]
            frame_HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

            mask = cv2.inRange(frame_HSV, (self.low_H, self.low_S, self.low_V),
                (self.high_H, self.high_S, self.high_V))

            res = cv2.bitwise_and(frame, frame, mask=mask)
            res_inv = cv2.bitwise_and(frame, frame, mask=cv2.bitwise_not(mask))

            cv2.imshow(self.window_name, mask)
            cv2.imshow('Video Capture AND', res)
            cv2.imshow('Video Capture INV', res_inv)

            if cv2.waitKey(30) & 0xFF == ord('q'):
                break


    # A number of methods corresponding to the various trackbars available. 
Example #9
Source File: raidnearby.py    From PGSS with GNU General Public License v3.0 6 votes vote down vote up
def getMonMask(self, img):
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        # define range of blue color in HSV
        lower_blue = np.array([94, 130, 70])
        upper_blue = np.array([114, 160, 110])

        # Threshold the HSV image to get only shadow colors
        mask = cv2.inRange(hsv, lower_blue, upper_blue)
        kernel = np.ones((2, 2), np.uint8)
        mask = cv2.dilate(mask, kernel, iterations=1)
        final_mask = 255 - cv2.medianBlur(mask, 3) # invert mask

        return final_mask

    # Detect gym from raid sighting image 
Example #10
Source File: webcam_track_blobs.py    From pc-drone with MIT License 6 votes vote down vote up
def add_blobs(crop_frame):
    frame=cv2.GaussianBlur(crop_frame, (3, 3), 0)
    # Convert BGR to HSV
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # define range of green color in HSV
    lower_green = np.array([70,50,50])
    upper_green = np.array([85,255,255])
    # Threshold the HSV image to get only blue colors
    mask = cv2.inRange(hsv, lower_green, upper_green)
    mask = cv2.erode(mask, None, iterations=1)
    mask = cv2.dilate(mask, None, iterations=1)    
    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(frame,frame, mask= mask)
    detector = cv2.SimpleBlobDetector_create(params)
    # Detect blobs.
    reversemask=255-mask
    keypoints = detector.detect(reversemask)
    if keypoints:
        print "found blobs"
        if len(keypoints) > 4:
            keypoints.sort(key=(lambda s: s.size))
            keypoints=keypoints[0:3]
        # Draw detected blobs as red circles.
        # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
        im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    else:
        print "no blobs"
        im_with_keypoints=crop_frame
        
    return im_with_keypoints #, max_blob_dist, blob_center, keypoint_in_orders 
Example #11
Source File: Tshirt.py    From virtual-dressing-room with Apache License 2.0 6 votes vote down vote up
def detect_shirt(self):
        
        
        #self.dst=cv2.inRange(self.norm_rgb,np.array([self.lb,self.lg,self.lr],np.uint8),np.array([self.b,self.g,self.r],np.uint8))
        self.dst=cv2.inRange(self.norm_rgb,np.array([20,20,20],np.uint8),np.array([255,110,80],np.uint8))
        cv2.threshold(self.dst,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY)
        fg=cv2.erode(self.dst,None,iterations=2)
        #cv2.imshow("fore",fg)  
        bg=cv2.dilate(self.dst,None,iterations=3)
        _,bg=cv2.threshold(bg, 1,128,1)
        #cv2.imshow("back",bg)
        
        mark=cv2.add(fg,bg)
        mark32=np.int32(mark)
        cv2.watershed(self.norm_rgb,mark32)
        self.m=cv2.convertScaleAbs(mark32)
        _,self.m=cv2.threshold(self.m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        #cv2.imshow("final_tshirt",self.m)
        
        cntr,h=cv2.findContours(self.m,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE)
               
        return self.m,cntr 
Example #12
Source File: ChickenVision.py    From ChickenVision with MIT License 6 votes vote down vote up
def threshold_video(lower_color, upper_color, blur):


    # Convert BGR to HSV
    hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)

    # hold the HSV image to get only red colors
    mask = cv2.inRange(hsv, lower_color, upper_color)

    # Returns the masked imageBlurs video to smooth out image

    return mask



# Finds the tape targets from the masked image and displays them on original stream + network tales 
Example #13
Source File: OCR.py    From display_ocr with GNU General Public License v2.0 6 votes vote down vote up
def getthresholdedimg(hsv):
    yellow = cv2.inRange(hsv, np.array((20, 100, 100)), np.array((30, 255, 255)))
    blue = cv2.inRange(hsv, np.array((100, 100, 100)), np.array((120, 255, 255)))
    both = cv2.add(yellow, blue)
    return both 
Example #14
Source File: ImageProcessing.py    From OctoPNP with GNU Affero General Public License v3.0 6 votes vote down vote up
def _maskBackground(self, img, mask_corners = True):
        h,w,c = np.shape(img)

        blur_img=cv2.blur(img, (5,5))
        hsv = cv2.cvtColor(blur_img, cv2.COLOR_BGR2HSV)

        lower_color = np.array([22,28,26])
        upper_color = np.array([103,255,255])

        # create binary mask by finding background color range
        mask = cv2.inRange(hsv, self.lower_mask_color, self.upper_mask_color)
        # remove the corners from mask since they are prone to illumination problems
        if(mask_corners):
            circle_mask = np.zeros((h, w), np.uint8)
            circle_mask[:, :] = 255
            cv2.circle(circle_mask,(w/2, h/2), min(w/2, h/2), 0, -1)
            mask = cv2.bitwise_or(mask,circle_mask)
        # invert mask to get white objects on black background
        #inverse_mask = 255 - mask

        if self._interactive: cv2.imshow("binary mask", mask)
        if self._interactive: cv2.waitKey(0)

        return mask 
Example #15
Source File: L2_color_target.py    From SCUTTLE with MIT License 6 votes vote down vote up
def colorTarget(color_range=((0, 0, 0), (255, 255, 255))):

    image = cam.newImage()
    if filter == 'RGB':
        frame_to_thresh = image.copy()
    else:
        frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)                # convert image to hsv colorspace RENAME THIS TO IMAGE_HSV

    thresh = cv2.inRange(frame_to_thresh, color_range[0], color_range[1])
    mask = thresh

    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]    # generates number of contiguous "1" pixels
    if len(cnts) == 0:                                                                      # begin processing if there are "1" pixels discovered
        return np.array([None, None, 0])
    else:
        c = max(cnts, key=cv2.contourArea)                                                  # return the largest target area
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        if radius > 4:
            return np.array([round(x, 1), round(y, 1), round(radius, 1)]) 
Example #16
Source File: L2_track_target.py    From SCUTTLE with MIT License 6 votes vote down vote up
def colorTarget(color_range=((0, 0, 0), (255, 255, 255))):

    image = cam.newImage()
    if filter == 'RGB':
        frame_to_thresh = image.copy()
    else:
        frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)                            # convert image to hsv colorspace RENAME THIS TO IMAGE_HSV

    thresh = cv2.inRange(frame_to_thresh, color_range[0], color_range[1])

    # apply a blur function
    kernel = np.ones((5, 5), np.uint8)
    mask = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)                                 # Apply blur
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)                                  # Apply blur 2nd iteration

    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]    # generates number of contiguous "1" pixels
    if len(cnts) > 0:                                                                       # begin processing if there are "1" pixels discovered
        c = max(cnts, key=cv2.contourArea)                                                  # return the largest target area
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        return np.array([round(x, 1), round(y, 1), round(radius, 1)])
    else:
        return np.array([None, None, 0]) 
Example #17
Source File: camshift_object_tracker.py    From automl-video-ondevice with Apache License 2.0 6 votes vote down vote up
def calculate_roi_hist(self, frame):
    """Calculates region of interest histogram.

    Args:
      frame: The np.array image frame to calculate ROI histogram for.
    """
    (x, y, w, h) = self.box
    roi = frame[y:y + h, x:x + w]

    hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)),
                       np.array((180., 255., 255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0, 1], mask, [180, 255],
                            [0, 180, 0, 255])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    self.roi_hist = roi_hist

  # Run this every frame 
Example #18
Source File: test_monkey.py    From ATX with Apache License 2.0 5 votes vote down vote up
def test_features():
    from atx.drivers.android_minicap import AndroidDeviceMinicap
    cv2.namedWindow("preview")
    d = AndroidDeviceMinicap()

    # r, h, c, w = 200, 100, 200, 100
    # track_window = (c, r, w, h)
    # oldimg = cv2.imread('base1.png')
    # roi = oldimg[r:r+h, c:c+w]
    # hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    # mask = cv2.inRange(hsv_roi, 0, 255)
    # roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
    # cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
    # term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,  10, 1)


    while True:
        try:
            w, h = d._screen.shape[:2]
            img = cv2.resize(d._screen, (h/2, w/2))
            cv2.imshow('preview', img)

            hist = cv2.calcHist([img], [0], None, [256], [0,256])
            plt.plot(plt.hist(hist.ravel(), 256))
            plt.show()
            # if img.shape == oldimg.shape:
            #     # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            #     # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt)
            #     # x, y, w, h = track_window
            #     cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
            #     cv2.imshow('preview', img)
            # # cv2.imshow('preview', img)
            cv2.waitKey(1)
        except KeyboardInterrupt:
            break

    cv2.destroyWindow('preview') 
Example #19
Source File: maskprocessor.py    From neural-road-inspector with MIT License 5 votes vote down vote up
def _get_mask(img, target):
	"""get a binary mask filtered by target color."""
	tolerenace = 10
	mask = cv2.inRange(img, target-tolerenace, target+tolerenace)
	return (mask != 0) 
Example #20
Source File: surf_image_processing.py    From Indian-Sign-Language-Recognition with MIT License 5 votes vote down vote up
def func2(path):    
    frame = cv2.imread(path)
    frame = cv2.resize(frame,(128,128))
    converted2 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Convert from RGB to HSV
    #cv2.imshow("original",converted2)

    lowerBoundary = np.array([0,40,30],dtype="uint8")
    upperBoundary = np.array([43,255,254],dtype="uint8")
    skinMask = cv2.inRange(converted, lowerBoundary, upperBoundary)
    skinMask = cv2.addWeighted(skinMask,0.5,skinMask,0.5,0.0)
    #cv2.imshow("masked",skinMask)
    
    skinMask = cv2.medianBlur(skinMask, 5)
    
    skin = cv2.bitwise_and(converted2, converted2, mask = skinMask)
    
    #cv2.imshow("masked2",skin)
    img2 = cv2.Canny(skin,60,60)
    #cv2.imshow("edge detection",img2)
    img2 = cv2.resize(img2,(256,256))
    orb = cv2.xfeatures2d.ORB_create()
    kp, des = orb.detectAndCompute(img2,None)

    #print(len(des2))
    img2 = cv2.drawKeypoints(img2,kp,None,color=(0,255,0), flags=0)
    #plt.imshow(img2),plt.show()
    
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    return des2
#func("001.jpg") 
Example #21
Source File: camera_opencv.py    From Adeept_RaspTank with MIT License 5 votes vote down vote up
def findColor(self, frame_image):
        hsv = cv2.cvtColor(frame_image, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, colorLower, colorUpper)#1
        mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)
        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)[-2]
        center = None
        if len(cnts) > 0:
            self.findColorDetection = 1
            c = max(cnts, key=cv2.contourArea)
            ((self.box_x, self.box_y), self.radius) = cv2.minEnclosingCircle(c)
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
            X = int(self.box_x)
            Y = int(self.box_y)
            error_Y = 240 - Y
            error_X = 320 - X
            # CVThread.servoMove(CVThread.P_servo, CVThread.P_direction, error_X)
            CVThread.servoMove(CVThread.T_servo, CVThread.T_direction, error_Y)

            # if CVThread.X_lock == 1 and CVThread.Y_lock == 1:
            if CVThread.Y_lock == 1:
                led.setColor(255,78,0)
                # switch.switch(1,1)
                # switch.switch(2,1)
                # switch.switch(3,1)
            else:
                led.setColor(0,78,255)
                # switch.switch(1,0)
                # switch.switch(2,0)
                # switch.switch(3,0)
        else:
            self.findColorDetection = 0
            move.motorStop()
        self.pause() 
Example #22
Source File: follower_line_finder.py    From rosbook with Apache License 2.0 5 votes vote down vote up
def image_callback(self, msg):
    image = self.bridge.imgmsg_to_cv2(msg,desired_encoding='bgr8')
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    lower_yellow = numpy.array([ 10,  10,  10])
    upper_yellow = numpy.array([255, 255, 250])
    mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
    
    # BEGIN CROP
    h, w, d = image.shape
    search_top = 3*h/4
    search_bot = search_top + 20
    mask[0:search_top, 0:w] = 0
    mask[search_bot:h, 0:w] = 0
    # END CROP
    # BEGIN FINDER
    M = cv2.moments(mask)
    if M['m00'] > 0:
      cx = int(M['m10']/M['m00'])
      cy = int(M['m01']/M['m00'])
    # END FINDER
    # BEGIN CIRCLE
      cv2.circle(image, (cx, cy), 20, (0,0,255), -1)
    # END CIRCLE

    cv2.imshow("window", image)
    cv2.waitKey(3) 
Example #23
Source File: image_transformation.py    From Sign-Language-Recognition with MIT License 5 votes vote down vote up
def make_background_black(frame):
    """
    Makes everything apart from the main object of interest to be
    black in color.
    """
    logger.debug("Making background black...")

    # Convert from RGB to HSV
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # Prepare the first mask.
    # Tuned parameters to match the skin color of the input images...
    lower_boundary = np.array([0, 40, 30], dtype="uint8")
    upper_boundary = np.array([43, 255, 254], dtype="uint8")
    skin_mask = cv2.inRange(frame, lower_boundary, upper_boundary)

    # Apply a series of erosions and dilations to the mask using an
    # elliptical kernel
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    skin_mask = cv2.erode(skin_mask, kernel, iterations=2)
    skin_mask = cv2.dilate(skin_mask, kernel, iterations=2)

    # Prepare the second mask
    lower_boundary = np.array([170, 80, 30], dtype="uint8")
    upper_boundary = np.array([180, 255, 250], dtype="uint8")
    skin_mask2 = cv2.inRange(frame, lower_boundary, upper_boundary)

    # Combine the effect of both the masks to create the final frame.
    skin_mask = cv2.addWeighted(skin_mask, 0.5, skin_mask2, 0.5, 0.0)
    # Blur the mask to help remove noise.
    # skin_mask = cv2.medianBlur(skin_mask, 5)
    frame_skin = cv2.bitwise_and(frame, frame, mask=skin_mask)
    frame = cv2.addWeighted(frame, 1.5, frame_skin, -0.5, 0)
    frame_skin = cv2.bitwise_and(frame, frame, mask=skin_mask)

    logger.debug("Done!")
    return frame_skin 
Example #24
Source File: privacy.py    From deda with GNU General Public License v3.0 5 votes vote down vote up
def _selectColour(self,colour):
        edgeHue = int(rgb_to_hsv(*colour)[0]*180)
        TOLERANCE = 30
        edgeHue1 = (edgeHue-TOLERANCE)%181
        edgeHue2 = (edgeHue+TOLERANCE)%181
        hsv = cv2.cvtColor(self.im, cv2.COLOR_BGR2HSV)
        if edgeHue2 < edgeHue1: 
            raise Exception("Implementation change necessary: "
                +"Trying to select range that includes 180..0.")
        im = cv2.inRange(hsv, (edgeHue1,100,100),(edgeHue2,255,255))
        return im 
Example #25
Source File: judge_multi_color.py    From Python-Code with MIT License 5 votes vote down vote up
def getColor(frame, count_color):

    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    color = []

    color_dict = color_list.getColorList()

    # 颜色字典的颜色数量
    num_color = len(color_dict)
    sum = [0] * num_color
    search_color_list = []
    # 对每个颜色进行判断,d是颜色字符串(如:red)
    for (d, i) in zip(color_dict, range(num_color)):
        
        search_color_list.append(d)
        # 根据阈值构建掩膜
        mask = cv2.inRange(hsv, color_dict[d][0], color_dict[d][1])
        # 腐蚀操作
        mask = cv2.erode(mask, None, iterations=2)
        # 膨胀操作,其实先腐蚀再膨胀的效果是开运算,去除噪点
        mask = cv2.dilate(mask, None, iterations=2)
        img, cnts, hiera = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                            cv2.CHAIN_APPROX_SIMPLE)
        # 有轮廓才进行后面的判断
        
        if len(cnts) > 0:
            for c in cnts:
                sum[i] += cv2.contourArea(c)

    find_color_list = heapq.nlargest(count_color, sum)
    for j in range(count_color):     
        color.append(search_color_list[sum.index(find_color_list[j])])

    return color 
Example #26
Source File: invisibility_cloak.py    From snapchat-filters-opencv with MIT License 5 votes vote down vote up
def invisibility(image):

    # converting from BGR to HSV color space
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    # cv2.imshow("hsv", hsv[..., 0])

    # Range for lower red
    lower_red = np.array([0, 120, 70])
    upper_red = np.array([10, 255, 255])
    mask1 = cv2.inRange(hsv, lower_red, upper_red)

    # Range for upper range
    lower_red = np.array([170, 120, 70])
    upper_red = np.array([180, 255, 255])
    mask2 = cv2.inRange(hsv, lower_red, upper_red)

    # Generating the final mask to detect red color
    mask1 = mask1 + mask2

    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))

    # creating an inverted mask to segment out the cloth from the frame
    mask2 = cv2.bitwise_not(mask1)
    # Segmenting the cloth out of the frame using bitwise and with the inverted mask
    res1 = cv2.bitwise_and(image, image, mask=mask2)

    # creating image showing static background frame pixels only for the masked region
    res2 = cv2.bitwise_and(background, background, mask=mask1)
    # Generating the final output
    final_output = cv2.addWeighted(res1, 1, res2, 1, 0)
    return final_output 
Example #27
Source File: color_filtering.py    From Python-Code with MIT License 5 votes vote down vote up
def get_color(frame, color):
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    color_dict = color_list.getColorList()
    mask = cv2.inRange(hsv, color_dict[color][0], color_dict[color][1])
    res = cv2.bitwise_and(frame, frame, mask=mask)
    # 存储一张图片
    cv2.imwrite(filename + color + '.jpg', mask)
    # 展示一张图片
    cv2.imshow('Result', res)
    cv2.waitKey(0) 
Example #28
Source File: Calibrate.py    From Distance-between-camera-and-object with GNU General Public License v3.0 5 votes vote down vote up
def getmp(image):
    
    gra = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    gray = gra[:,:,2]
    gray = cv2.GaussianBlur(gray, (7,7), 0)
    gray = cv2.GaussianBlur(gray, (7,7), 0)
    edged = cv2.inRange(gray, 0, 70)
    edged = cv2.dilate(edged, None, iterations=10)
    edged = cv2.erode(edged, None, iterations=5)
    edged = cv2.dilate(edged, None, iterations=10)
    edged = cv2.erode(edged, None, iterations=30)
    edged = cv2.dilate(edged, None, iterations=40)
    ff=np.where(edged>50)
    a1=np.mean(ff[1])  
    return a1 
Example #29
Source File: Calculate.py    From Distance-between-camera-and-object with GNU General Public License v3.0 5 votes vote down vote up
def getmp(image):
    
    gra = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    gray = gra[:,:,2]
    gray = cv2.GaussianBlur(gray, (7,7), 0)
    gray = cv2.GaussianBlur(gray, (7,7), 0)
    edged = cv2.inRange(gray, 0, 70)
    edged = cv2.dilate(edged, None, iterations=10)
    edged = cv2.erode(edged, None, iterations=5)
    edged = cv2.dilate(edged, None, iterations=10)
    edged = cv2.erode(edged, None, iterations=30)
    edged = cv2.dilate(edged, None, iterations=40)
    ff=np.where(edged>50)
    a1=np.mean(ff[1])  
    return a1 
Example #30
Source File: imutils.py    From craves.ai with GNU General Public License v3.0 5 votes vote down vote up
def create_mask(self, img, color):
        img = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)

        if color == 'green':
            threshold = [(20, 0, 128), (235, 128, 255)]
        elif color == 'white':
            threshold = [(100, 110, 110), (200, 140, 140)]

        else:
            raise Exception('Color undefined')
        
        mask = cv2.inRange(img, threshold[0], threshold[1])
        # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
        # mask =  cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        # mask =  cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)

        mask = mask > 0

        # img = cv2.cvtColor(img, cv2.COLOR_YCR_CB2BGR)

        # thres_img = np.zeros_like(img, np.uint8)
        # thres_img[mask] = img[mask]

        binary_img = np.zeros((img.shape[0],img.shape[1]), np.uint8)
        binary_img[mask] = 255

        # cv2.imshow('img', binary_img)
        # cv2.waitKey(0)
        # exit(0)

        return mask