Python cv2.moments() Examples
The following are 30
code examples of cv2.moments().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: helpers.py From Color-Tracker with MIT License | 9 votes |
def get_contour_centers(contours: np.ndarray) -> np.ndarray: """ Calculate the centers of the contours :param contours: Contours detected with find_contours :return: object centers as numpy array """ if len(contours) == 0: return np.array([]) # ((x, y), radius) = cv2.minEnclosingCircle(c) centers = np.zeros((len(contours), 2), dtype=np.int16) for i, c in enumerate(contours): M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) centers[i] = center return centers
Example #2
Source File: omr.py From omr with MIT License | 7 votes |
def calculate_contour_features(contour): """Calculates interesting properties (features) of a contour. We use these features to match shapes (contours). In this script, we are interested in finding shapes in our input image that look like a corner. We do that by calculating the features for many contours in the input image and comparing these to the features of the corner contour. By design, we know exactly what the features of the real corner contour look like - check out the calculate_corner_features function. It is crucial for these features to be invariant both to scale and rotation. In other words, we know that a corner is a corner regardless of its size or rotation. In the past, this script implemented its own features, but OpenCV offers much more robust scale and rotational invariant features out of the box - the Hu moments. """ moments = cv2.moments(contour) return cv2.HuMoments(moments)
Example #3
Source File: follower_p.py From rosbook with Apache License 2.0 | 7 votes |
def image_callback(self, msg): image = self.bridge.imgmsg_to_cv2(msg,desired_encoding='bgr8') hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) lower_yellow = numpy.array([ 10, 10, 10]) upper_yellow = numpy.array([255, 255, 250]) mask = cv2.inRange(hsv, lower_yellow, upper_yellow) h, w, d = image.shape search_top = 3*h/4 search_bot = 3*h/4 + 20 mask[0:search_top, 0:w] = 0 mask[search_bot:h, 0:w] = 0 M = cv2.moments(mask) if M['m00'] > 0: cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) cv2.circle(image, (cx, cy), 20, (0,0,255), -1) # BEGIN CONTROL err = cx - w/2 self.twist.linear.x = 0.2 self.twist.angular.z = -float(err) / 100 self.cmd_vel_pub.publish(self.twist) # END CONTROL cv2.imshow("window", image) cv2.waitKey(3)
Example #4
Source File: hsv_track.py From DroneSimLab with MIT License | 7 votes |
def find_red(img): hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv,(130,130,180),(255,255,255)) mask = cv2.erode(mask, np.ones((2,1)) , iterations=1) mask = cv2.dilate(mask, None, iterations=3) cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2] frame=img.copy() ###based on example from http://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv if len(cnts) > 0: c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) if radius > 3: cv2.circle(frame, (int(x), int(y)), 12,(0, 255, 255), 2) return frame
Example #5
Source File: 47.2-使用SVM进行-手写数据OCR.py From OpenCV-Python-Tutorial with MIT License | 6 votes |
def deskew(img): m = cv2.moments(img) if abs(m['mu02']) < 1e-2: return img.copy() skew = m['mu11'] / m['mu02'] M = np.float32([[1, skew, -0.5 * SZ * skew], [0, 1, 0]]) img = cv2.warpAffine(img, M, (SZ, SZ), flags=affine_flags) return img # 计算图像 X 方向和 Y 方向的 Sobel 导数
Example #6
Source File: mnist_helpers.py From mnist-helper with MIT License | 6 votes |
def deskew(image, image_shape, negated=False): """ This method deskwes an image using moments :param image: a numpy nd array input image :param image_shape: a tuple denoting the image`s shape :param negated: a boolean flag telling whether the input image is a negated one :returns: a numpy nd array deskewd image """ # negate the image if not negated: image = 255-image # calculate the moments of the image m = cv2.moments(image) if abs(m['mu02']) < 1e-2: return image.copy() # caclulating the skew skew = m['mu11']/m['mu02'] M = numpy.float32([[1, skew, -0.5*image_shape[0]*skew], [0,1,0]]) img = cv2.warpAffine(image, M, image_shape, flags=cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR) return img
Example #7
Source File: __init__.py From rubiks-cube-tracker with MIT License | 6 votes |
def __init__(self, rubiks_parent, index, contour, heirarchy, debug): self.rubiks_parent = rubiks_parent self.index = index self.contour = contour self.heirarchy = heirarchy peri = cv2.arcLength(contour, True) self.approx = cv2.approxPolyDP(contour, 0.1 * peri, True) self.area = cv2.contourArea(contour) self.corners = len(self.approx) self.width = None self.debug = debug # compute the center of the contour M = cv2.moments(contour) if M["m00"]: self.cX = int(M["m10"] / M["m00"]) self.cY = int(M["m01"] / M["m00"]) # if self.cX == 188 and self.cY == 93: # log.warning("CustomContour M %s" % pformat(M)) else: self.cX = None self.cY = None
Example #8
Source File: page_dewarp.py From page_dewarp with MIT License | 6 votes |
def blob_mean_and_tangent(contour): moments = cv2.moments(contour) area = moments['m00'] mean_x = moments['m10'] / area mean_y = moments['m01'] / area moments_matrix = np.array([ [moments['mu20'], moments['mu11']], [moments['mu11'], moments['mu02']] ]) / area _, svd_u, _ = cv2.SVDecomp(moments_matrix) center = np.array([mean_x, mean_y]) tangent = svd_u[:, 0].flatten().copy() return center, tangent
Example #9
Source File: contours_hu_moments.py From Mastering-OpenCV-4-with-Python with MIT License | 5 votes |
def centroid(moments): """Returns centroid based on moments""" x_centroid = round(moments['m10'] / moments['m00']) y_centroid = round(moments['m01'] / moments['m00']) return x_centroid, y_centroid
Example #10
Source File: Contours.py From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License | 5 votes |
def main(): image = cv2.imread("../data/detect_blob.png", 1) gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) binay_thresh = cv2.adaptiveThreshold(gray_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1) _, contours, _ = cv2.findContours(binay_thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(image, contours, -1, (0, 255, 0), 3) new_image = np.zeros((image.shape[0], image.shape[1], 3), np.uint8) for cnt in contours: cv2.drawContours(new_image, [cnt], -1, (255, 0, 255), -1) # get contour area using 'contourArea' method area_cnt = cv2.contourArea(cnt) # get the perimeter of any contour using 'arcLength' perimeter_cnt = cv2.arcLength(cnt, True) # get centroid oy contour using moments M = cv2.moments(cnt) cx = int(M['m10'] / M['m00']) cy = int(M['m01'] / M['m00']) cv2.circle(new_image, (cx, cy), 3, (0, 255, 0), -1) print("Area : {}, Perimeter : {}".format(area_cnt, perimeter_cnt)) cv2.imshow("Contoured Image", new_image) cv2.waitKey(0) cv2.destroyAllWindows()
Example #11
Source File: contours_analysis.py From Mastering-OpenCV-4-with-Python with MIT License | 5 votes |
def roundness(contour, moments): """Calculates the roundness of a contour""" length = cv2.arcLength(contour, True) k = (length * length) / (moments['m00'] * 4 * np.pi) return k
Example #12
Source File: contours_ellipses.py From Mastering-OpenCV-4-with-Python with MIT License | 5 votes |
def eccentricity_from_moments(moments): """Calculates the eccentricity from the moments of the contour""" a1 = (moments['mu20'] + moments['mu02']) / 2 a2 = np.sqrt(4 * moments['mu11'] ** 2 + (moments['mu20'] - moments['mu02']) ** 2) / 2 ecc = np.sqrt(1 - (a1 - a2) / (a1 + a2)) return ecc
Example #13
Source File: dataset.py From PracticalPythonAndOpenCV_CaseStudies with GNU General Public License v3.0 | 5 votes |
def de_skew(image, width): # Grab the width and height of the image and compute moments for the image (h, w) = image.shape[:2] moments = cv2.moments(image) # De-skew the image by applying an affine transformation skew = moments["mu11"] / moments["mu02"] matrix = np.float32([[1, skew, -0.5 * w * skew], [0, 1, 0]]) image = cv2.warpAffine(image, matrix, (w, h), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR) # Resize the image to have a constant width image = imutils.resize(image, width=width) # Return the de-skewed image return image
Example #14
Source File: svm_handwritten_digits_recognition_preprocessing_hog.py From Mastering-OpenCV-4-with-Python with MIT License | 5 votes |
def deskew(img): """Pre-processing of the images""" m = cv2.moments(img) if abs(m['mu02']) < 1e-2: return img.copy() skew = m['mu11'] / m['mu02'] M = np.float32([[1, skew, -0.5 * SIZE_IMAGE * skew], [0, 1, 0]]) img = cv2.warpAffine(img, M, (SIZE_IMAGE, SIZE_IMAGE), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR) return img
Example #15
Source File: squareClass.py From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International | 5 votes |
def __init__(self, position, c1, c2, c3, c4, index, image, state=''): # ID self.position = position self.index = index # Corners self.c1 = c1 self.c2 = c2 self.c3 = c3 self.c4 = c4 # State self.state = state # Actual polygon as a numpy array of corners self.contours = np.array([c1, c2, c3, c4], dtype=np.int32) # Properties of the contour self.area = cv2.contourArea(self.contours) self.perimeter = cv2.arcLength(self.contours, True) M = cv2.moments(self.contours) cx = int(M['m10'] / M['m00']) cy = int(M['m01'] / M['m00']) # ROI is the small circle within the square on which we will do the averaging self.roi = (cx, cy) self.radius = 5 # Empty color. The colour the square has when it's not occupied, i.e. shade of black or white. By storing these # at the beginnig of the game, we can then make much more robust predictions on how the state of the board has # changed. self.emptyColor = self.roiColor(image)
Example #16
Source File: ColoredObjectDetector.py From robot-camera-platform with GNU General Public License v3.0 | 5 votes |
def process(self, image): self.detected = False hsv_frame = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv_frame, self.__hsv_bounds[0], self.__hsv_bounds[1]) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] if len(contours) == 0: return largest_contour = max(contours, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(largest_contour) M = cv2.moments(largest_contour) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) self.circle_coordonates = (center, int(radius)) self.detected = True
Example #17
Source File: digits.py From PyCV-time with MIT License | 5 votes |
def deskew(img): m = cv2.moments(img) if abs(m['mu02']) < 1e-2: return img.copy() skew = m['mu11']/m['mu02'] M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR) return img
Example #18
Source File: digits.py From PyCV-time with MIT License | 5 votes |
def deskew(img): m = cv2.moments(img) if abs(m['mu02']) < 1e-2: return img.copy() skew = m['mu11']/m['mu02'] M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR) return img
Example #19
Source File: heading.py From PyCV-time with MIT License | 5 votes |
def find_contour_center(contour): if contour[0] is None: return None M = cv2.moments(contour[0]) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) return cx, cy
Example #20
Source File: create_dataset.py From keras-autoencoder with GNU General Public License v3.0 | 5 votes |
def detect_ball(frame): blurred = cv2.GaussianBlur(frame, (11, 11), 0) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, greenLower, greenUpper) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None # only proceed if at least one contour was found if len(cnts) == 0: return # find the largest contour in the mask, then use # it to compute the minimum enclosing circle and # centroid c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) if radius < 10: print('Too small') return return center, radius
Example #21
Source File: svm_handwritten_digits_recognition_preprocessing_hog_c_gamma.py From Mastering-OpenCV-4-with-Python with MIT License | 5 votes |
def deskew(img): """Pre-processing of the images""" m = cv2.moments(img) if abs(m['mu02']) < 1e-2: return img.copy() skew = m['mu11'] / m['mu02'] M = np.float32([[1, skew, -0.5 * SIZE_IMAGE * skew], [0, 1, 0]]) img = cv2.warpAffine(img, M, (SIZE_IMAGE, SIZE_IMAGE), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR) return img
Example #22
Source File: mask_analysis.py From deepgaze with MIT License | 5 votes |
def returnMaxAreaCenter(self, mask): """it returns the centre of the contour with largest area. This method could be useful to find the center of a face when a skin detector filter is used. @param mask the binary image to use in the function @return get the x and y center coords of the contour whit the largest area. In case of error it returns a tuple (None, None) """ if(mask is None): return (None, None) mask = np.copy(mask) if(len(mask.shape) == 3): mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) contours, hierarchy = cv2.findContours(mask, 1, 2) area_array = np.zeros(len(contours)) #contains the area of the contours counter = 0 for cnt in contours: #cv2.drawContours(image, [cnt], 0, (0,255,0), 3) #print("Area: " + str(cv2.contourArea(cnt))) area_array[counter] = cv2.contourArea(cnt) counter += 1 if(area_array.size==0): return (None, None) #the array is empty max_area_index = np.argmax(area_array) #return the index of the max_area element #cv2.drawContours(image, [contours[max_area_index]], 0, (0,255,0), 3) #Get the centre of the max_area element cnt = contours[max_area_index] M = cv2.moments(cnt) #calculate the moments if(M['m00'] == 0): return (None, None) cx = int(M['m10']/M['m00']) #get the center from the moments cy = int(M['m01']/M['m00']) return (cx, cy) #return the center coords
Example #23
Source File: follower_line_finder.py From rosbook with Apache License 2.0 | 5 votes |
def image_callback(self, msg): image = self.bridge.imgmsg_to_cv2(msg,desired_encoding='bgr8') hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) lower_yellow = numpy.array([ 10, 10, 10]) upper_yellow = numpy.array([255, 255, 250]) mask = cv2.inRange(hsv, lower_yellow, upper_yellow) # BEGIN CROP h, w, d = image.shape search_top = 3*h/4 search_bot = search_top + 20 mask[0:search_top, 0:w] = 0 mask[search_bot:h, 0:w] = 0 # END CROP # BEGIN FINDER M = cv2.moments(mask) if M['m00'] > 0: cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) # END FINDER # BEGIN CIRCLE cv2.circle(image, (cx, cy), 20, (0,0,255), -1) # END CIRCLE cv2.imshow("window", image) cv2.waitKey(3)
Example #24
Source File: contours_ellipses.py From Mastering-OpenCV-4-with-Python with MIT License | 5 votes |
def roundness(contour, moments): """Calculates the roundness of a contour""" length = cv2.arcLength(contour, True) k = (length * length) / (moments['m00'] * 4 * np.pi) return k
Example #25
Source File: camera_opencv.py From Adeept_RaspTank with MIT License | 5 votes |
def findColor(self, frame_image): hsv = cv2.cvtColor(frame_image, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, colorLower, colorUpper)#1 mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None if len(cnts) > 0: self.findColorDetection = 1 c = max(cnts, key=cv2.contourArea) ((self.box_x, self.box_y), self.radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) X = int(self.box_x) Y = int(self.box_y) error_Y = 240 - Y error_X = 320 - X # CVThread.servoMove(CVThread.P_servo, CVThread.P_direction, error_X) CVThread.servoMove(CVThread.T_servo, CVThread.T_direction, error_Y) # if CVThread.X_lock == 1 and CVThread.Y_lock == 1: if CVThread.Y_lock == 1: led.setColor(255,78,0) # switch.switch(1,1) # switch.switch(2,1) # switch.switch(3,1) else: led.setColor(0,78,255) # switch.switch(1,0) # switch.switch(2,0) # switch.switch(3,0) else: self.findColorDetection = 0 move.motorStop() self.pause()
Example #26
Source File: zebrafishAnalysis.py From tierpsy-tracker with MIT License | 5 votes |
def getTailStartPoint(head_mask, head_point, config): # Calculate the angle from the head point to the contour center # Then, 'walk' down the line from the head point to the contour center point a set length contours, hierarchy = cv2.findContours(head_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:] contour = contours[-1] # Get contour center contour_moments = cv2.moments(contour) contour_center_x = int(contour_moments['m10'] / contour_moments['m00']) contour_center_y = int(contour_moments['m01'] / contour_moments['m00']) head_x = head_point[0] head_y = head_point[1] head_contour_center_angle = math.atan2(contour_center_y - head_y, contour_center_x - head_x) head_x = head_point[0] head_y = head_point[1] # Calculate tail start point tail_start_x = head_x + config.tail_offset * math.cos(head_contour_center_angle) tail_start_y = head_y + config.tail_offset * math.sin(head_contour_center_angle) return (int(tail_start_x), int(tail_start_y))
Example #27
Source File: omr.py From omr with MIT License | 5 votes |
def get_centroid(contour): m = cv2.moments(contour) x = int(m["m10"] / m["m00"]) y = int(m["m01"] / m["m00"]) return (x, y)
Example #28
Source File: contours.py From imutils with MIT License | 5 votes |
def label_contour(image, c, i, color=(0, 255, 0), thickness=2): # compute the center of the contour area and draw a circle # representing the center M = cv2.moments(c) cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) # draw the contour and label number on the image cv2.drawContours(image, [c], -1, color, thickness) cv2.putText(image, "#{}".format(i + 1), (cX - 20, cY), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2) # return the image with the contour number drawn on it return image
Example #29
Source File: main.py From sbb_textline_detection with Apache License 2.0 | 5 votes |
def find_contours_mean_y_diff(self,contours_main): M_main=[cv2.moments(contours_main[j]) for j in range(len(contours_main))] cy_main=[(M_main[j]['m01']/(M_main[j]['m00']+1e-32)) for j in range(len(M_main))] return np.mean( np.diff( np.sort( np.array(cy_main) ) ) )
Example #30
Source File: FingerDetection.py From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License | 5 votes |
def centroid(max_contour): moment = cv2.moments(max_contour) if moment['m00'] != 0: cx = int(moment['m10'] / moment['m00']) cy = int(moment['m01'] / moment['m00']) return cx, cy else: return None