Python cv2.MORPH_OPEN Examples

The following are 30 code examples of cv2.MORPH_OPEN(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: color_detection.py    From deepgaze with MIT License 7 votes vote down vote up
def returnMask(self, frame, morph_opening=True, blur=True, kernel_size=5, iterations=1):
        """Given an input frame return the black/white mask.
 
        This version of the function does not use the blur and bitwise 
        operations, then the resulting frame contains white pixels
        in correspondance of the skin found during the searching process.
        @param frame the original frame (color)
        """
        #Convert to HSV and eliminate pixels outside the range
        frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        frame_filtered = cv2.inRange(frame_hsv, self.min_range, self.max_range)
        if(morph_opening==True):
            kernel = np.ones((kernel_size,kernel_size), np.uint8)
            frame_filtered = cv2.morphologyEx(frame_filtered, cv2.MORPH_OPEN, kernel, iterations=iterations)
        #Applying Gaussian Blur
        if(blur==True): 
            frame_filtered = cv2.GaussianBlur(frame_filtered, (kernel_size,kernel_size), 0)
        return frame_filtered 
Example #2
Source File: L2_track_target.py    From SCUTTLE with MIT License 6 votes vote down vote up
def colorTarget(color_range=((0, 0, 0), (255, 255, 255))):

    image = cam.newImage()
    if filter == 'RGB':
        frame_to_thresh = image.copy()
    else:
        frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)                            # convert image to hsv colorspace RENAME THIS TO IMAGE_HSV

    thresh = cv2.inRange(frame_to_thresh, color_range[0], color_range[1])

    # apply a blur function
    kernel = np.ones((5, 5), np.uint8)
    mask = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)                                 # Apply blur
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)                                  # Apply blur 2nd iteration

    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]    # generates number of contiguous "1" pixels
    if len(cnts) > 0:                                                                       # begin processing if there are "1" pixels discovered
        c = max(cnts, key=cv2.contourArea)                                                  # return the largest target area
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        return np.array([round(x, 1), round(y, 1), round(radius, 1)])
    else:
        return np.array([None, None, 0]) 
Example #3
Source File: cv_functions.py    From R-CNN_LIGHT with MIT License 5 votes vote down vote up
def opening(binary_img=None, k_size=2, iterations=1):

    kernel = np.ones((k_size, k_size), np.uint8)

    return cv2.morphologyEx(binary_img, cv2.MORPH_OPEN, kernel, iterations=iterations) # iteration = loop 
Example #4
Source File: EyeCanSee.py    From cv-lane with Apache License 2.0 5 votes vote down vote up
def filter_smooth_thres(self, RANGE, color):
        for (lower, upper) in RANGE:
            lower = np.array(lower, dtype='uint8')
            upper = np.array(upper, dtype='uint8')

            mask_bottom = cv2.inRange(self.img_roi_bottom_hsv, lower, upper)
            mask_top = cv2.inRange(self.img_roi_top_hsv, lower, upper)

        blurred_bottom = cv2.medianBlur(mask_bottom, 5)
        blurred_top = cv2.medianBlur(mask_top, 5)

        # Morphological transformation
        kernel = np.ones((2, 2), np.uint8)
        smoothen_bottom = blurred_bottom #cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel, iterations=5)
        smoothen_top = blurred_top  # cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel, iterations=5)

        """
        if self.debug:
            cv2.imshow('mask bottom ' + color, mask_bottom)
            cv2.imshow('blurred bottom' + color, blurred_bottom)

            cv2.imshow('mask top ' + color, mask_top)
            cv2.imshow('blurred top' + color, blurred_top)
        """

        return smoothen_bottom, smoothen_top

    # Gets metadata from our contours 
Example #5
Source File: main.py    From sbb_textline_detection with Apache License 2.0 5 votes vote down vote up
def get_text_region_contours_and_boxes(self, image):
        rgb_class_of_texts = (1, 1, 1)
        mask_texts = np.all(image == rgb_class_of_texts, axis=-1)

        image = np.repeat(mask_texts[:, :, np.newaxis], 3, axis=2) * 255
        image = image.astype(np.uint8)

        image = cv2.morphologyEx(image, cv2.MORPH_OPEN, self.kernel)
        image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, self.kernel)


        imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        _, thresh = cv2.threshold(imgray, 0, 255, 0)

        contours, hierarchy = cv2.findContours(thresh.copy(), cv2.cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        
        main_contours = self.filter_contours_area_of_image(thresh, contours, hierarchy, max_area=1, min_area=0.00001)
        self.boxes = []
        
        for jj in range(len(main_contours)):
            x, y, w, h = cv2.boundingRect(main_contours[jj])
            self.boxes.append([x, y, w, h])
            

        return main_contours 
Example #6
Source File: main.py    From sbb_textline_detection with Apache License 2.0 5 votes vote down vote up
def return_rotated_contours(self,slope,img_patch):
            dst = self.rotate_image(img_patch, slope)
            dst = dst.astype(np.uint8)
            dst = dst[:, :, 0]
            dst[dst != 0] = 1
            
            imgray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
            _, thresh = cv2.threshold(imgray, 0, 255, 0)
            thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
            thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
            contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            return contours 
Example #7
Source File: dilate_erode.py    From DDRNet with MIT License 5 votes vote down vote up
def rm_dot(depth_im, mask=None):
    """Open operation: erode then dilate"""
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(5, 5))
    depth_im_ed = cv2.morphologyEx(depth_im, cv2.MORPH_OPEN, kernel)  # erode_dilate
    if not mask:
        mask = np.greater(depth_im, 0)
    depth_im_ed = np.where(mask, depth_im, depth_im_ed)
    return depth_im_ed 
Example #8
Source File: main.py    From speed-detector with MIT License 5 votes vote down vote up
def filter_mask (mask):
	# I want some pretty drastic closing
	kernel_close = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
	kernel_open = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 8))
	kernel_dilate = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

	# Remove noise
	opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel_open)
	# Close holes within contours
	closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel_close)
	# Merge adjacent blobs
	dilation = cv2.dilate(closing, kernel_dilate, iterations = 2)

	return dilation 
Example #9
Source File: mask_morphology.py    From NucleiDetectron with Apache License 2.0 5 votes vote down vote up
def opencv_segmentation(mask, kernel=k_3x3, k=3):
    # noise removal
    opening = cv.morphologyEx(mask, cv.MORPH_OPEN, kernel, iterations=k)

    # sure background area
    sure_bg = cv.dilate(opening, kernel, iterations=k)

    # Finding sure foreground area
    dist_transform = cv.distanceTransform(opening,cv.DIST_L2, 5)
    ret, sure_fg = cv.threshold(dist_transform, 0.7*dist_transform.max(), 255, 0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv.subtract(sure_bg, sure_fg)

    # Marker labelling
    ret, markers = cv.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers = markers + 1

    # Now, mark the region of unknown with zero
    markers[unknown > 0] = 0

    labels_ws = cv.watershed(cv.cvtColor(mask, cv.COLOR_GRAY2RGB), markers)

    if labels_ws.max() - 1 < 2:
        return [mask], labels_ws

    res_masks = []
    for idx in range(2,  labels_ws.max() + 1):
        m = labels_ws == idx
        if m.sum() > 5:
            m = cv.dilate(m.astype(np.uint8), kernel, iterations=1)
            res_masks.append(m)
    return res_masks, labels_ws 
Example #10
Source File: data.py    From BraTs with MIT License 5 votes vote down vote up
def save_result(args, output):
    ''' Save Prediction results overlapping on original MRI images

    Args:
        args (argparse):    Arguments parsered in command-lind
        output (np.array):  Prediction Results by segementation model
    '''

    file_path = []
    file_path += glob.glob(os.path.join(args.image_root, args.image_folder1, '*.jpg'))
    file_path = sorted(file_path)

    output = np.argmax(output, axis=-1)*255
    kernel = np.ones((5,5),np.uint8)

    for i in range(output.shape[0]):
        save_path = os.path.join(args.output_root, str(i)+'.jpg')

        img = cv2.imread(file_path[i])
        pred = cv2.morphologyEx(output[i].astype(np.uint8), cv2.MORPH_OPEN, kernel)
        pred = cv2.morphologyEx(pred, cv2.MORPH_CLOSE, kernel)
        pred = np.expand_dims(pred, axis=2)
        zeros = np.zeros(pred.shape)
        pred = np.concatenate((zeros,zeros,pred), axis=2)
        img = img + pred
        if img.max() > 0:
            img = (img/img.max())*255
        else:
            img = (img/1)*255
        cv2.imwrite(save_path, img) 
Example #11
Source File: utils.py    From BraTs with MIT License 5 votes vote down vote up
def erode_dilate(outputs, kernel_size=7):
    kernel = np.ones((kernel_size,kernel_size),np.uint8)
    outputs = outputs.astype(np.uint8)
    for i in range(outputs.shape[0]):
        img = outputs[i]
        img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
        img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
        outputs[i] = img
    return outputs 
Example #12
Source File: morphological_operations.py    From Mastering-OpenCV-4-with-Python with MIT License 5 votes vote down vote up
def opening(image, kernel_type, kernel_size):
    """Opens the image with the specified kernel type and size"""

    kernel = build_kernel(kernel_type, kernel_size)
    ope = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
    return ope


# This function applies the morphological gradient to the image 
Example #13
Source File: geometries.py    From deeposlandia with MIT License 5 votes vote down vote up
def extract_geometry_vertices(mask, structure_size=(10, 10), approx_eps=0.01):
    """Extract polygon vertices from a boolean mask with the help of OpenCV
    utilities, as a numpy array

    Parameters
    ----------
    mask : numpy.array
        Image mask where to find polygons
    structure_size : tuple
        Size of the cv2 structuring artefact, as a tuple of horizontal and
    vertical pixels
    approx_eps : double
        Approximation coefficient, aiming at building more simplified polygons
    (this coefficient lies between 0 and 1, the larger the value is, the more
    important the approximation is)

    Returns
    -------
    numpy.array
        List of polygons contained in the mask, identified by their vertices
    """
    structure = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, structure_size)
    denoised = cv2.morphologyEx(mask, cv2.MORPH_OPEN, structure)
    grown = cv2.morphologyEx(denoised, cv2.MORPH_CLOSE, structure)
    _, contours, hierarchy = cv2.findContours(
        grown, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
    )
    polygons = [
        cv2.approxPolyDP(
            c, epsilon=approx_eps * cv2.arcLength(c, closed=True), closed=True
        )
        for c in contours
    ]
    return polygons, hierarchy 
Example #14
Source File: background.py    From Stereo-Pose-Machines with GNU General Public License v2.0 5 votes vote down vote up
def segment(self, im):
        mask = np.square(im.astype('float32') - self.bgim
                ).sum(axis=2) / 20
        mask = np.clip(mask, 0, 255).astype('uint8')
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.kernel)
        mask = cv2.dilate(mask, self.dilate_k)
        mask = mask.astype('uint8')
        return (mask > 10).astype('float32') *255 
Example #15
Source File: image.py    From soccerontable with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def robust_edge_detection(img):
    # Find edges
    kernel_size = 5
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    blur_gray = cv2.GaussianBlur(gray, (kernel_size, kernel_size), 0)
    # io.imagesc(blur_gray)
    edges = cv2.Canny((blur_gray * 255).astype(np.uint8), 10, 200, apertureSize=5)
    # io.imagesc(edges)
    lsd = cv2.createLineSegmentDetector(0)
    lines = lsd.detect(edges)[0]  # Position 0 of the returned tuple are the detected lines

    long_lines = []
    for j in range(lines.shape[0]):
        x1, y1, x2, y2 = lines[j, 0, :]
        if np.linalg.norm(np.array([x1, y1]) - np.array([x2, y2])) > 50:
            long_lines.append(lines[j, :, :])

    lines = np.array(long_lines)
    edges = 1 * np.ones_like(img)
    drawn_img = lsd.drawSegments(edges, lines)
    edges = (drawn_img[:, :, 2] > 1).astype(np.float32)

    kernel = np.ones((7, 7), np.uint8)

    edges = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)
    kernel = np.ones((3, 3), np.uint8)
    edges = cv2.morphologyEx(edges, cv2.MORPH_OPEN, kernel)

    return edges 
Example #16
Source File: vanishing_point.py    From vanishing-point-detection with MIT License 5 votes vote down vote up
def hough_transform(img):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # Convert image to grayscale
    kernel = np.ones((15, 15), np.uint8)

    opening = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)  # Open (erode, then dilate)
    edges = cv2.Canny(opening, 50, 150, apertureSize=3)  # Canny edge detection
    lines = cv2.HoughLines(edges, 1, np.pi / 180, 200)  # Hough line detection

    hough_lines = []
    # Lines are represented by rho, theta; converted to endpoint notation
    if lines is not None:
        for line in lines:
            hough_lines.extend(list(starmap(endpoints, line)))

    return hough_lines 
Example #17
Source File: invisibility_cloak.py    From snapchat-filters-opencv with MIT License 5 votes vote down vote up
def invisibility(image):

    # converting from BGR to HSV color space
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    # cv2.imshow("hsv", hsv[..., 0])

    # Range for lower red
    lower_red = np.array([0, 120, 70])
    upper_red = np.array([10, 255, 255])
    mask1 = cv2.inRange(hsv, lower_red, upper_red)

    # Range for upper range
    lower_red = np.array([170, 120, 70])
    upper_red = np.array([180, 255, 255])
    mask2 = cv2.inRange(hsv, lower_red, upper_red)

    # Generating the final mask to detect red color
    mask1 = mask1 + mask2

    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))

    # creating an inverted mask to segment out the cloth from the frame
    mask2 = cv2.bitwise_not(mask1)
    # Segmenting the cloth out of the frame using bitwise and with the inverted mask
    res1 = cv2.bitwise_and(image, image, mask=mask2)

    # creating image showing static background frame pixels only for the masked region
    res2 = cv2.bitwise_and(background, background, mask=mask1)
    # Generating the final output
    final_output = cv2.addWeighted(res1, 1, res2, 1, 0)
    return final_output 
Example #18
Source File: background_substraction.py    From snapchat-filters-opencv with MIT License 5 votes vote down vote up
def bg_substraction(image):
    image = fgbg.apply(image)
    image = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
    # Transform again to BGR
    image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
    return image 
Example #19
Source File: binarization.py    From dhSegment with GNU General Public License v3.0 5 votes vote down vote up
def cleaning_binary(mask: np.ndarray, kernel_size: int=5) -> np.ndarray:
    """
    Uses mathematical morphology to clean and remove small elements from binary images.

    :param mask: the binary image to clean
    :param kernel_size: size of the kernel
    :return: the cleaned mask
    """

    ksize_open = (kernel_size, kernel_size)
    ksize_close = (kernel_size, kernel_size)
    mask = cv2.morphologyEx((mask.astype(np.uint8, copy=False) * 255), cv2.MORPH_OPEN, kernel=np.ones(ksize_open))
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel=np.ones(ksize_close))
    return np.uint8(mask / 255) 
Example #20
Source File: fake_util.py    From CRAFT_keras with Apache License 2.0 5 votes vote down vote up
def watershed(src):
    """
    Performs a marker-based image segmentation using the watershed algorithm.
    :param src: 8-bit 1-channel image.
    :return: 32-bit single-channel image (map) of markers.
    """
    # cv2.imwrite('{}.png'.format(np.random.randint(1000)), src)
    gray = src.copy()
    img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
    # h, w = gray.shape[:2]
    # block_size = (min(h, w) // 4 + 1) * 2 + 1
    # thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, block_size, 0)
    _ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

    # noise removal
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)

    # sure background area
    sure_bg = cv2.dilate(opening, kernel, iterations=3)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    # dist_transform = opening & gray
    # cv2.imshow('dist_transform', dist_transform)
    # _ret, sure_bg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255, cv2.THRESH_BINARY_INV)
    _ret, sure_fg = cv2.threshold(dist_transform, 0.2 * dist_transform.max(), 255, cv2.THRESH_BINARY)

    # Finding unknown region
    # sure_bg = np.uint8(sure_bg)
    sure_fg = np.uint8(sure_fg)
    # cv2.imshow('sure_fg', sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    # Marker label
    lingret, marker_map = cv2.connectedComponents(sure_fg)
    # Add one to all labels so that sure background is not 0, but 1
    marker_map = marker_map + 1

    # Now, mark the region of unknown with zero
    marker_map[unknown == 255] = 0

    marker_map = cv2.watershed(img, marker_map)

    return marker_map 
Example #21
Source File: color_detection.py    From deepgaze with MIT License 5 votes vote down vote up
def returnMask(self, frame, morph_opening=True, blur=True, kernel_size=5, iterations=1):
        """Given an input frame in BGR return the black/white mask.
 
        @param frame the original frame (color)
        @param morph_opening it is a erosion followed by dilatation to remove noise
        @param blur to smoth the image it is possible to apply Gaussian Blur
        @param kernel_size is the kernel dimension used for morph and blur
        """
        if(self.template_hsv is None): return None
        #Convert the input framge from BGR -> HSV
        frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        #Set the template histogram
        template_hist = cv2.calcHist([self.template_hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )
        #Normalize the template histogram and apply backprojection
        cv2.normalize(template_hist, template_hist, 0, 255, cv2.NORM_MINMAX)
        frame_hsv = cv2.calcBackProject([frame_hsv], [0,1], template_hist, [0,180,0,256], 1)
        #Get the kernel and apply a convolution
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size,kernel_size))
        frame_hsv = cv2.filter2D(frame_hsv, -1, kernel)
        #Applying the morph open operation (erosion followed by dilation)
        if(morph_opening==True):
            kernel = np.ones((kernel_size,kernel_size), np.uint8)
            frame_hsv = cv2.morphologyEx(frame_hsv, cv2.MORPH_OPEN, kernel, iterations=iterations)
        #Applying Gaussian Blur
        if(blur==True): 
            frame_hsv = cv2.GaussianBlur(frame_hsv, (kernel_size,kernel_size), 0)
        #Get the threshold
        ret, frame_threshold = cv2.threshold(frame_hsv, 50, 255, 0)
        #Merge the threshold matrices
        return cv2.merge((frame_threshold,frame_threshold,frame_threshold)) 
Example #22
Source File: color_detection.py    From deepgaze with MIT License 5 votes vote down vote up
def returnMask(self, frame, morph_opening=True, blur=True, kernel_size=5, iterations=1):
        """Given an input frame in BGR return the black/white mask.
 
        @param frame the original frame (color)
        @param morph_opening it is a erosion followed by dilatation to remove noise
        @param blur to smoth the image it is possible to apply Gaussian Blur
        @param kernel_size is the kernel dimension used for morph and blur
        """
        if(len(self.template_hsv_list) == 0): return None
        frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask = np.zeros((frame.shape[0], frame.shape[1]))
        for template_hsv in self.template_hsv_list:
            #Set the template histogram
            template_hist = cv2.calcHist([template_hsv],[0, 1], None, [256, 256], [0, 256, 0, 256] )
            #Normalize the template histogram and apply backprojection
            cv2.normalize(template_hist, template_hist, 0, 255, cv2.NORM_MINMAX)
            frame_hsv_back = cv2.calcBackProject([frame_hsv], [0,1], template_hist, [0,256,0,256], 1)
            #Get the kernel and apply a convolution
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size,kernel_size))
            frame_hsv_clean = cv2.filter2D(frame_hsv_back, -1, kernel)
            #Applying the morph open operation (erosion followed by dilation)
            if(morph_opening==True):
                kernel = np.ones((kernel_size,kernel_size), np.uint8)
                frame_hsv_clean = cv2.morphologyEx(frame_hsv_clean, cv2.MORPH_OPEN, kernel, iterations=iterations)
            #Applying Gaussian Blur
            if(blur==True): 
                frame_hsv_clean = cv2.GaussianBlur(frame_hsv_clean, (kernel_size,kernel_size), 0)
            #Get the threshold
            ret, frame_hsv_threshold = cv2.threshold(frame_hsv_clean, 50, 255, 0)
            mask = np.add(mask, frame_hsv_threshold) #Add the threshold to the mask


        #Normalize the mask because it contains
        #values added during the previous loop
        #Attention: here it is not necessary to normalize because the astype(np.uint8) method
        #will resize to 255 each value which is higher that that...
        #cv2.normalize(mask, mask, 0, 255, cv2.NORM_MINMAX) #Not necessary
        ret, mask = cv2.threshold(mask.astype(np.uint8), 50, 255, 0)
        return cv2.merge((mask,mask,mask)) 
Example #23
Source File: image_functions.py    From niryo_one_ros with GNU General Public License v3.0 5 votes vote down vote up
def morphological_transformations(im_thresh, morpho_type="CLOSE", kernel_shape=(5, 5), kernel_type="ELLIPSE"):
    """
    Take black & white image and apply morphological transformation
    :param im_thresh: Black & White Image
    :param morpho_type: CLOSE/OPEN/ERODE/DILATE => See on OpenCV/Google if you do not know these words
    :param kernel_shape: tuple corresponding to the size of the kernel
    :param kernel_type: RECT/ELLIPSE/CROSS => see on OpenCV
    :return: image after processing
    """
    if kernel_type == "CROSS":
        kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, kernel_shape)
    elif kernel_type == "RECT":
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_shape)
    else:
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernel_shape)

    if morpho_type == "OPEN":
        morph_type = cv2.MORPH_OPEN
    elif morpho_type == "DILATE":
        morph_type = cv2.MORPH_DILATE
    elif morpho_type == "ERODE":
        morph_type = cv2.MORPH_ERODE
    else:
        morph_type = cv2.MORPH_CLOSE

    return cv2.morphologyEx(im_thresh, morph_type, kernel)


# Contours 
Example #24
Source File: motion_detection.py    From pynvr with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def motionDetected(self, new_frame):
        frame = self.preprocessInputFrame(new_frame)

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, (21, 21), 0)

        if self.prevFrame is None:
            self.prevFrame = gray
            return False

        frameDiff = cv.absdiff(gray, self.prevFrame)

        # kernel = np.ones((5, 5), np.uint8)

        opening = cv.morphologyEx(frameDiff, cv.MORPH_OPEN, None)  # noqa
        closing = cv.morphologyEx(frameDiff, cv.MORPH_CLOSE, None)  # noqa

        ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)

        height = np.size(th1, 0)
        width = np.size(th1, 1)

        nb = cv.countNonZero(th1)

        avg = (nb * 100) / (height * width)  # Calculate the average of black pixel in the image

        self.prevFrame = gray

        # cv.DrawContours(currentframe, self.currentcontours, (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED)
        # cv.imshow("frame", current_frame)

        ret = avg > self.threshold   # If over the ceiling trigger the alarm

        if ret:
            self.updateMotionDetectionDts()

        return ret 
Example #25
Source File: core.py    From robosat with MIT License 5 votes vote down vote up
def denoise(mask, eps):
    """Removes noise from a mask.

    Args:
      mask: the mask to remove noise from.
      eps: the morphological operation's kernel size for noise removal, in pixel.

    Returns:
      The mask after applying denoising.
    """

    struct = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (eps, eps))
    return cv2.morphologyEx(mask, cv2.MORPH_OPEN, struct) 
Example #26
Source File: cut_part.py    From 2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement with MIT License 5 votes vote down vote up
def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'):  # 将灰度图二值化,后面两个参数调试用
    """
    求取梯度,二值化
    :param img_blurred: 滤波后的图片
    :param image_name: 图片名,测试用
    :param save_path: 保存路径,测试用
    :return:  二值化后的图片
    """
    gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0)
    gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1)
    img_gradient = cv2.subtract(gradX, gradY)
    img_gradient = cv2.convertScaleAbs(img_gradient)  # sobel算子,计算梯度, 也可以用canny算子替代

    # 这里改进成自适应阈值,貌似没用
    img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3)
    # cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh)  # 二值化 阈值未调整好

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel)
    img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel)
    img_closed = cv2.erode(img_closed, None, iterations=9)
    img_closed = cv2.dilate(img_closed, None, iterations=9)  # 腐蚀膨胀
    # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小

    return img_closed 
Example #27
Source File: size_detector.py    From gaps with MIT License 5 votes vote down vote up
def _filter_image(self, image):
        _, thresh = cv2.threshold(image, 200, 255, cv2.THRESH_BINARY)
        opened = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, (5, 5), iterations=3)

        return cv2.bitwise_not(opened) 
Example #28
Source File: mask.py    From dreampower with GNU General Public License v3.0 5 votes vote down vote up
def _execute(self, *args):
        """
        Create mask ref.

        :param args: <[RGB,RGB]>image correct, image mask
        :return: <RGB> image
        """
        # Create a total green image
        green = np.zeros((512, 512, 3), np.uint8)
        green[:, :, :] = (0, 255, 0)  # (B, G, R)

        # Define the green color filter
        f1 = np.asarray([0, 250, 0])  # green color filter
        f2 = np.asarray([10, 255, 10])

        # From mask, extrapolate only the green mask
        green_mask = cv2.inRange(args[1], f1, f2)  # green is 0

        # (OPTIONAL) Apply dilate and open to mask
        kernel = np.ones((5, 5), np.uint8)  # Try change it?
        green_mask = cv2.dilate(green_mask, kernel, iterations=1)
        # green_mask = cv2.morphologyEx(green_mask, cv2.MORPH_OPEN, kernel)

        # Create an inverted mask
        green_mask_inv = cv2.bitwise_not(green_mask)

        # Cut correct and green image, using the green_mask & green_mask_inv
        res1 = cv2.bitwise_and(args[0], args[0], mask=green_mask_inv)
        res2 = cv2.bitwise_and(green, green, mask=green_mask)

        # Compone:
        return cv2.add(res1, res2) 
Example #29
Source File: skin_detector.py    From SkinDetector with MIT License 5 votes vote down vote up
def closing(mask):
    assert isinstance(mask, numpy.ndarray), 'mask must be a numpy array'
    assert mask.ndim == 2, 'mask must be a greyscale image'
    logger.debug("closing mask of shape {0}".format(mask.shape))

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=2)

    return mask 
Example #30
Source File: RegionOfInterest.py    From DoNotSnap with GNU General Public License v3.0 5 votes vote down vote up
def roiMask(image, boundaries):
    scale = max([1.0, np.average(np.array(image.shape)[0:2] / 400.0)])
    shape = (int(round(image.shape[1] / scale)), int(round(image.shape[0] / scale)))

    small_color = cv2.resize(image, shape, interpolation=cv2.INTER_LINEAR)

    # reduce details and remove noise for better edge detection
    small_color = cv2.bilateralFilter(small_color, 8, 64, 64)
    small_color = cv2.pyrMeanShiftFiltering(small_color, 8, 64, maxLevel=1)
    small = cv2.cvtColor(small_color, cv2.COLOR_BGR2HSV)

    hue = small[::, ::, 0]
    intensity = cv2.cvtColor(small_color, cv2.COLOR_BGR2GRAY)

    edges = extractEdges(hue, intensity)
    roi = roiFromEdges(edges)
    weight_map = weightMap(hue, intensity, edges, roi)

    _, final_mask = cv2.threshold(roi, 5, 255, cv2.THRESH_BINARY)
    small = cv2.bitwise_and(small, small, mask=final_mask)

    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))

    for (lower, upper) in boundaries:
        lower = np.array([lower, 80, 50], dtype="uint8")
        upper = np.array([upper, 255, 255], dtype="uint8")

        # find the colors within the specified boundaries and apply
        # the mask
        mask = cv2.inRange(small, lower, upper)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=3)
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
        final_mask = cv2.bitwise_and(final_mask, mask)

    # blur the mask for better contour extraction
    final_mask = cv2.GaussianBlur(final_mask, (5, 5), 0)
    return (final_mask, weight_map, scale)