Python cv2.RANSAC Examples

The following are 30 code examples for showing how to use cv2.RANSAC(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module cv2 , or try the search function .

Example 1
Project: focusstack   Author: cmcguinness   File: FocusStack.py    License: Apache License 2.0 6 votes vote down vote up
def findHomography(image_1_kp, image_2_kp, matches):
    image_1_points = np.zeros((len(matches), 1, 2), dtype=np.float32)
    image_2_points = np.zeros((len(matches), 1, 2), dtype=np.float32)

    for i in range(0,len(matches)):
        image_1_points[i] = image_1_kp[matches[i].queryIdx].pt
        image_2_points[i] = image_2_kp[matches[i].trainIdx].pt


    homography, mask = cv2.findHomography(image_1_points, image_2_points, cv2.RANSAC, ransacReprojThreshold=2.0)

    return homography


#
#   Align the images so they overlap properly...
#
# 
Example 2
Project: GIFT   Author: zju3dv   File: evaluation.py    License: Apache License 2.0 6 votes vote down vote up
def estimate_relative_pose_from_correspondence(pts1, pts2, K1, K2):
        f_avg = (K1[0, 0] + K2[0, 0]) / 2
        pts1, pts2 = np.ascontiguousarray(pts1, np.float32), np.ascontiguousarray(pts2, np.float32)

        pts_l_norm = cv2.undistortPoints(np.expand_dims(pts1, axis=1), cameraMatrix=K1, distCoeffs=None)
        pts_r_norm = cv2.undistortPoints(np.expand_dims(pts2, axis=1), cameraMatrix=K2, distCoeffs=None)

        E, mask = cv2.findEssentialMat(pts_l_norm, pts_r_norm, focal=1.0, pp=(0., 0.),
                                       method=cv2.RANSAC, prob=0.999, threshold=3.0 / f_avg)
        points, R_est, t_est, mask_pose = cv2.recoverPose(E, pts_l_norm, pts_r_norm)
        return mask[:,0].astype(np.bool), R_est, t_est 
Example 3
Project: pyslam   Author: luigifreda   File: visual_odometry.py    License: GNU General Public License v3.0 6 votes vote down vote up
def removeOutliersByMask(self, mask): 
        if mask is not None:    
            n = self.kpn_cur.shape[0]     
            mask_index = [ i for i,v in enumerate(mask) if v > 0]    
            self.kpn_cur = self.kpn_cur[mask_index]           
            self.kpn_ref = self.kpn_ref[mask_index]           
            if self.des_cur is not None: 
                self.des_cur = self.des_cur[mask_index]        
            if self.des_ref is not None: 
                self.des_ref = self.des_ref[mask_index]  
            if kVerbose:
                print('removed ', n-self.kpn_cur.shape[0],' outliers')                

    # fit essential matrix E with RANSAC such that:  p2.T * E * p1 = 0  where  E = [t21]x * R21
    # out: [Rrc, trc]   (with respect to 'ref' frame) 
    # N.B.1: trc is estimated up to scale (i.e. the algorithm always returns ||trc||=1, we need a scale in order to recover a translation which is coherent with previous estimated poses)
    # N.B.2: this function has problems in the following cases: [see Hartley/Zisserman Book]
    # - 'geometrical degenerate correspondences', e.g. all the observed features lie on a plane (the correct model for the correspondences is an homography) or lie on a ruled quadric 
    # - degenerate motions such a pure rotation (a sufficient parallax is required) or an infinitesimal viewpoint change (where the translation is almost zero)
    # N.B.3: the five-point algorithm (used for estimating the Essential Matrix) seems to work well in the degenerate planar cases [Five-Point Motion Estimation Made Easy, Hartley]
    # N.B.4: as reported above, in case of pure rotation, this algorithm will compute a useless fundamental matrix which cannot be decomposed to return the rotation 
Example 4
Project: videoseg   Author: pathak22   File: dm_tracker.py    License: MIT License 6 votes vote down vote up
def frame_homography(totalPts, homTh):
    """
    Filter foreground points i.e. the outlier points found by fitting
    homography using RANSAC
    Input:
        totalPts: (numAllPoints, 4): x0, y0, x1, y1
        fgPts: (numAllPoints, 4): x0, y0, x1, y1
    """
    if totalPts.ndim != 2 or totalPts.shape[0] < 8 or homTh < 0:
        return totalPts

    import cv2
    p1 = totalPts[:, :2].astype('float')
    p2 = totalPts[:, 2:4].astype('float')
    _, status = cv2.findHomography(
        p1, p2, cv2.RANSAC, ransacReprojThreshold=homTh)
    fgPts = totalPts[status[:, 0] == 0, :]
    return fgPts 
Example 5
Project: ImageProcessingProjects   Author: shekkizh   File: StitchingFromVideo.py    License: MIT License 6 votes vote down vote up
def getHomography(self, rightKps, rightDescriptor):
        rawMatches = self.matcher.knnMatch(self.leftDescriptor, rightDescriptor, 2)
        matches = []

        for m in rawMatches:
            if(len(m)==2 and m[0].distance < m[1].distance*self.ratio):
                matches.append((m[0].trainIdx, m[0].queryIdx))

        if(len(matches) >=4):
            # print(matches)
            ptsB = np.float32([self.leftKps[i] for (_, i) in matches])
            ptsA = np.float32([rightKps[i] for (i, _) in matches])

            # ptsB = H*ptsA
            H, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, self.reprojThresh)
            return H

        return None 
Example 6
Project: ImageProcessingProjects   Author: shekkizh   File: ImageStitching.py    License: MIT License 6 votes vote down vote up
def getHomography(self, rightKps, rightDescriptor):
        rawMatches = self.matcher.knnMatch(self.leftDescriptor, rightDescriptor, 2)
        matches = []

        for m in rawMatches:
            if(len(m)==2 and m[0].distance < m[1].distance*self.ratio):
                matches.append((m[0].trainIdx, m[0].queryIdx))

        if(len(matches) >=4):
            # print(matches)
            ptsB = np.float32([self.leftKps[i] for (_, i) in matches])
            ptsA = np.float32([rightKps[i] for (i, _) in matches])

            # ptsB = H*ptsA
            H, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, self.reprojThresh)
            return H

        return None 
Example 7
Project: hfnet   Author: ethz-asl   File: local_descriptors.py    License: MIT License 6 votes vote down vote up
def compute_homography_error(kpts1, kpts2, matches, shape2, H_gt):
    if matches.shape[0] == 0:
        return False, None
    kpts1 = kpts1[matches[:, 0]]
    kpts2 = kpts2[matches[:, 1]]
    H, _ = cv2.findHomography(kpts2, kpts1, cv2.RANSAC, 3.0)
    if H is None:
        return None

    w, h = shape2
    corners2 = to_homogeneous(
        np.array([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]))
    corners1_gt = np.dot(corners2, np.transpose(H_gt))
    corners1_gt = corners1_gt[:, :2] / corners1_gt[:, 2:]
    corners1 = np.dot(corners2, np.transpose(H))
    corners1 = corners1[:, :2] / corners1[:, 2:]
    mean_dist = np.mean(np.linalg.norm(corners1 - corners1_gt, axis=1))
    return mean_dist 
Example 8
Project: ImageAnalysis   Author: UASLab   File: Placer.py    License: MIT License 6 votes vote down vote up
def findHomography(self, i1, i2, pairs):
        src = []
        dst = []
        for pair in pairs:
            c1 = i1.coord_list[pair[0]]
            c2 = i2.coord_list[pair[1]]
            src.append( c1 )
            dst.append( c2 )
        #H, status = cv2.findHomography(np.array([src]).astype(np.float32),
        #                               np.array([dst]).astype(np.float32),
        #                               cv2.RANSAC, 5.0)
        H, status = cv2.findHomography(np.array([src]).astype(np.float32),
                                       np.array([dst]).astype(np.float32))
        #print str(affine)
        return H

    # compare against best 'placed' image (averaging transform
    # matrices together directly doesn't do what we want) 
Example 9
def match(self, key_a, fea_a, key_b, fea_b):
		# Compute the raw matches and initialize the list of actual matches
		matcher = cv2.DescriptorMatcher_create(self.distance_method)
		raw_matches = matcher.knnMatch(fea_b, fea_a, 2)
		matches = []

		# Loop over the raw matches
		for match in raw_matches:
			# Ensure the distance is within a certain ratio of each other
			if len(match) == 2 and match[0].distance < match[1].distance * self.ratio:
				matches.append((match[0].trainIdx, match[0].queryIdx))

		# Check to see if there are enough matches to process
		if len(matches) > self.min_matches:
			# Construct the two sets of points
			poi_a = np.float32([key_a[i] for (i, _) in matches])
			poi_b = np.float32([key_b[j] for (_, j) in matches])

			# Compute the homography between the two sets of points and compute the ratio of matched points
			(_, status) = cv2.findHomography(poi_a, poi_b, cv2.RANSAC, 4.0)

			# Return the ratio of the number of matched keypoints to the total number of keypoints
			return float(status.sum()) / status.size

		# No matches were found
		return -1.0 
Example 10
Project: OpenCV-Python-Tutorial   Author: makelove   File: plane_tracker.py    License: MIT License 5 votes vote down vote up
def track(self, frame):
        '''Returns a list of detected TrackedTarget objects'''
        self.frame_points, frame_descrs = self.detect_features(frame)
        if len(self.frame_points) < MIN_MATCH_COUNT:
            return []
        matches = self.matcher.knnMatch(frame_descrs, k = 2)
        matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75]
        if len(matches) < MIN_MATCH_COUNT:
            return []
        matches_by_id = [[] for _ in xrange(len(self.targets))]
        for m in matches:
            matches_by_id[m.imgIdx].append(m)
        tracked = []
        for imgIdx, matches in enumerate(matches_by_id):
            if len(matches) < MIN_MATCH_COUNT:
                continue
            target = self.targets[imgIdx]
            p0 = [target.keypoints[m.trainIdx].pt for m in matches]
            p1 = [self.frame_points[m.queryIdx].pt for m in matches]
            p0, p1 = np.float32((p0, p1))
            H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
            status = status.ravel() != 0
            if status.sum() < MIN_MATCH_COUNT:
                continue
            p0, p1 = p0[status], p1[status]

            x0, y0, x1, y1 = target.rect
            quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
            quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)

            track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad)
            tracked.append(track)
        tracked.sort(key = lambda t: len(t.p0), reverse=True)
        return tracked 
Example 11
Project: OpenCV-Python-Tutorial   Author: makelove   File: asift.py    License: MIT License 5 votes vote down vote up
def match_and_draw(win):
        with Timer('matching'):
            raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
            print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
        else:
            H, status = None, None
            print('%d matches found, not enough for homography estimation' % len(p1))

        vis = explore_match(win, img1, img2, kp_pairs, None, H) 
Example 12
Project: OpenCV-Python-Tutorial   Author: makelove   File: find_obj.py    License: MIT License 5 votes vote down vote up
def match_and_draw(win):
        print('matching...')
        raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
            print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        else:
            H, status = None, None
            print('%d matches found, not enough for homography estimation' % len(p1))

        vis = explore_match(win, img1, img2, kp_pairs, status, H) 
Example 13
Project: Airtest   Author: AirtestProject   File: keypoint_base.py    License: Apache License 2.0 5 votes vote down vote up
def _find_homography(self, sch_pts, src_pts):
        """多组特征点对时,求取单向性矩阵."""
        try:
            M, mask = cv2.findHomography(sch_pts, src_pts, cv2.RANSAC, 5.0)
        except Exception:
            import traceback
            traceback.print_exc()
            raise HomographyError("OpenCV error in _find_homography()...")
        else:
            if mask is None:
                raise HomographyError("In _find_homography(), find no transfomation matrix...")
            else:
                return M, mask 
Example 14
Project: Airtest   Author: AirtestProject   File: sift.py    License: Apache License 2.0 5 votes vote down vote up
def _find_homography(sch_pts, src_pts):
    """多组特征点对时,求取单向性矩阵."""
    try:
        M, mask = cv2.findHomography(sch_pts, src_pts, cv2.RANSAC, 5.0)
    except Exception:
        import traceback
        traceback.print_exc()
        raise HomographyError("OpenCV error in _find_homography()...")
    else:
        if mask is None:
            raise HomographyError("In _find_homography(), find no mask...")
        else:
            return M, mask 
Example 15
Project: douglas-quaid   Author: CIRCL   File: distance_ransac_orb.py    License: GNU General Public License v3.0 5 votes vote down vote up
def ransac_orb_distance(self, pic_package_from: Dict, pic_package_to: Dict) -> Dict[str, sd.AlgoMatch]:
        """
        Distance between two provided pictures (dicts) with RANSAC-ORB methods
        :param pic_package_from: first picture dict
        :param pic_package_to: second picture dict
        :return: A dictionary of algo name to the match detail (distance, decision ..)
        """

        answer = {}
        self.logger.info("RANSAC-Orb distance computation ... ")

        # Verify if what is needed to compute it is present
        if pic_package_from.get("ORB_DESCRIPTORS", None) is None \
                or pic_package_to.get("ORB_DESCRIPTORS", None) is None:
            self.logger.warning(f"RANSAC-ORB descriptors are NOT presents in the results.")
            raise AlgoFeatureNotPresentError("None RANSAC-ORB descriptors in orb distance.")

        # Verify if what is needed to compute it is present
        if pic_package_from.get("ORB_KEYPOINTS", None) is None \
                or pic_package_to.get("ORB_KEYPOINTS", None) is None:
            self.logger.warning(f"RANSAC-ORB keypoints are NOT presents in the results.")
            raise AlgoFeatureNotPresentError("None RANSAC-ORB keypoints in orb distance.")

        # Add result for enabled algorithms
        try:
            if self.fe_conf.RANSAC_ORB.get("is_enabled", False):
                answer = self.add_results(self.fe_conf.RANSAC_ORB, pic_package_from, pic_package_to, answer)

        except Exception as e:
            self.logger.error(traceback.print_tb(e.__traceback__))
            self.logger.error("Error during RANSAC-orb distance calculation : " + str(e))

        return answer 
Example 16
Project: douglas-quaid   Author: CIRCL   File: distance_ransac_orb.py    License: GNU General Public License v3.0 5 votes vote down vote up
def filter_matches(matches: List, matches_threshold_to_accelerate: float) -> List:
        # Output a list of filtered matches, according to distance
        # Do remove the farthest matches to greatly accelerate RANSAC
        # From : http://answers.opencv.org/question/984/performance-of-findhomography/

        diminished_matches = []
        for m in matches:
            if m.distance < matches_threshold_to_accelerate:
                diminished_matches.append(m)

        return diminished_matches 
Example 17
Project: douglas-quaid   Author: CIRCL   File: distance_ransac_orb.py    License: GNU General Public License v3.0 5 votes vote down vote up
def find_homography(keypoints_pic1, keypoints_pic2, matches) -> (List, np.float32, np.float32):
        # Find an Homography matrix between two pictures
        # From two list of keypoints and a list of matches, extrat
        # A list of good matches found by RANSAC and two transformation matrix (an homography and a rigid homography/affine)

        # Instanciate outputs
        good = []

        # Transforming keypoints to list of points
        src_pts = np.float32([keypoints_pic1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
        dst_pts = np.float32([keypoints_pic2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)

        # Find the transformation between points
        transformation_matrix, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        # Compute a rigid transformation (without depth, only scale + rotation + translation)
        transformation_rigid_matrix, rigid_mask = cv2.estimateAffinePartial2D(src_pts, dst_pts)

        # Get a mask list for matches = A list that says "This match is an in/out-lier"
        matchesMask = mask.ravel().tolist()

        # Filter the matches list thanks to the mask
        for i, element in enumerate(matchesMask):
            if element == 1:
                good.append(matches[i])

        return good, transformation_matrix, transformation_rigid_matrix 
Example 18
Project: dual-fisheye-video-stitching   Author: cynricfu   File: stitcher.py    License: MIT License 5 votes vote down vote up
def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
                       ratio, reprojThresh):
        # FLANN parameters
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)   # or pass empty dictionary

        # compute the raw matches
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        rawMatches = flann.knnMatch(featuresA, featuresB, k=2)

        # perform Lowe's ratio test to get actual matches
        matches = []
        for m, n in rawMatches:
            # ensure the distance is within a certain ratio of each
            # other (i.e. Lowe's ratio test)
            if m.distance < ratio * n.distance:
                # here queryIdx corresponds to kpsA
                # trainIdx corresponds to kpsB
                matches.append((m.trainIdx, m.queryIdx))

        # computing a homography requires at least 4 matches
        if len(matches) > 4:
            # construct the two sets of points
            ptsA = np.float32([kpsA[i] for (_, i) in matches])
            ptsB = np.float32([kpsB[i] for (i, _) in matches])

            # compute the homography between the two sets of points
            (H, status) = cv2.findHomography(
                ptsB, ptsA, cv2.RANSAC, reprojThresh)

            # return the matches along with the homograpy matrix
            # and status of each matched point
            return (matches, H, status)
        else:
            # otherwise, no homograpy could be computed
            return None 
Example 19
Project: Panoramic-Image-Stitching-using-invariant-features   Author: AVINASH793   File: panorama.py    License: MIT License 5 votes vote down vote up
def Compute_Homography(self,pointsA,pointsB,max_Threshold):
        #to compute homography using points in both images

        (H, status) = cv2.findHomography(pointsA, pointsB, cv2.RANSAC, max_Threshold)
        return (H,status) 
Example 20
Project: OpenCV-3-x-with-Python-By-Example   Author: PacktPublishing   File: pose_estimation.py    License: MIT License 5 votes vote down vote up
def track_target(self, frame): 
        self.cur_keypoints, self.cur_descriptors = self.detect_features(frame) 

        if len(self.cur_keypoints) < self.min_matches: return []
        try: matches = self.feature_matcher.knnMatch(self.cur_descriptors, k=2)
        except Exception as e:
            print('Invalid target, please select another with features to extract')
            return []
        matches = [match[0] for match in matches if len(match) == 2 and match[0].distance < match[1].distance * 0.75] 
        if len(matches) < self.min_matches: return [] 
 
        matches_using_index = [[] for _ in range(len(self.tracking_targets))] 
        for match in matches: 
            matches_using_index[match.imgIdx].append(match) 
 
        tracked = [] 
        for image_index, matches in enumerate(matches_using_index): 
            if len(matches) < self.min_matches: continue 
 
            target = self.tracking_targets[image_index] 
            points_prev = [target.keypoints[m.trainIdx].pt for m in matches]
            points_cur = [self.cur_keypoints[m.queryIdx].pt for m in matches]
            points_prev, points_cur = np.float32((points_prev, points_cur))
            H, status = cv2.findHomography(points_prev, points_cur, cv2.RANSAC, 3.0) 
            status = status.ravel() != 0

            if status.sum() < self.min_matches: continue 
 
            points_prev, points_cur = points_prev[status], points_cur[status] 
 
            x_start, y_start, x_end, y_end = target.rect 
            quad = np.float32([[x_start, y_start], [x_end, y_start], [x_end, y_end], [x_start, y_end]])
            quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)
            track = self.tracked_target(target=target, points_prev=points_prev, points_cur=points_cur, H=H, quad=quad) 
            tracked.append(track) 
 
        tracked.sort(key = lambda x: len(x.points_prev), reverse=True) 
        return tracked 
 
    # Detect features in the selected ROIs and return the keypoints and descriptors 
Example 21
Project: airtest   Author: NetEase   File: findobj.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def match_and_draw(win):
        print 'matching...'
        raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
            print '%d / %d  inliers/matched' % (np.sum(status), len(status))
        else:
            H, status = None, None
            print '%d matches found, not enough for homography estimation' % len(p1)

        vis = explore_match(win, img1, img2, kp_pairs, status, H) 
Example 22
Project: airtest   Author: NetEase   File: find_obj.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def match_and_draw(win):
        print 'matching...'
        raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
            print '%d / %d  inliers/matched' % (np.sum(status), len(status))
        else:
            H, status = None, None
            print '%d matches found, not enough for homography estimation' % len(p1)

        vis = explore_match(win, img1, img2, kp_pairs, status, H) 
Example 23
Project: airtest   Author: NetEase   File: image_SIFT.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _homography(src_pts,dst_pts,template_width,template_height,match_point=None):
    row,col,dim = dst_pts.shape
    if match_point:
        for i in range(row):
            match_point.append([int(dst_pts[i][0][0]),int(dst_pts[i][0][1])])
    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    pts = np.float32([[0, 0], [0, template_height - 1], 
                    [template_width - 1, template_height - 1], 
                    [template_width - 1, 0]]).reshape(-1, 1, 2)
    #找到一个变换矩阵,从查询图映射到检测图片
    dst = cv2.perspectiveTransform(pts, M) 
    return dst 
Example 24
Project: airtest   Author: NetEase   File: sift.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def find(search_file, image_file, threshold=None):
    '''
    param threshold are disabled in sift match.
    '''
    sch = _cv2open(search_file, 0)
    img = _cv2open(image_file, 0)

    kp_sch, des_sch = sift.detectAndCompute(sch, None)
    kp_img, des_img = sift.detectAndCompute(img, None)

    if len(kp_sch) < MIN_MATCH_COUNT or len(kp_img) < MIN_MATCH_COUNT:
        return None

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)

    matches = flann.knnMatch(des_sch, des_img, k=2)

    good = []
    for m,n in matches:
        if m.distance < 0.7*n.distance:
            good.append(m)

    if len(good) > MIN_MATCH_COUNT:
        sch_pts = np.float32([kp_sch[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        img_pts = np.float32([kp_img[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) 

        M, mask = cv2.findHomography(sch_pts, img_pts, cv2.RANSAC, 5.0)
        # matchesMask = mask.ravel().tolist()

        h, w = sch.shape
        pts = np.float32([ [0, 0], [0, h-1], [w-1, h-1], [w-1, 0] ]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)
        lt, br = dst[0][0], dst[2][0]
        return map(int, (lt[0]+w/2, lt[1]+h/2))
    else:
        return None 
Example 25
Project: pyslam   Author: luigifreda   File: utils_geom.py    License: GNU General Public License v3.0 5 votes vote down vote up
def check_dist_epipolar_line(kp1,kp2,F12,sigma2_kp2):
    # Epipolar line in second image l = kp1' * F12 = [a b c]
    l = np.dot(F12.T,np.array([kp1[0],kp1[1],1]))
    num = l[0]*kp2[0] + l[1]*kp2[1] + l[2]  # kp1' * F12 * kp2
    den = l[0]*l[0] + l[1]*l[1]   # a*a+b*b

    if(den==0):
    #if(den < 1e-20):
        return False

    dist_sqr = num*num/den              # squared (minimum) distance of kp2 from the epipolar line l
    return dist_sqr < 3.84 * sigma2_kp2 # value of inverse cumulative chi-square for 1 DOF (Hartley Zisserman pag 567)



# fit essential matrix E with RANSAC such that:  p2.T * E * p1 = 0  where  E = [t21]x * R21
# input: kpn_ref and kpn_cur are two arrays of [Nx2] normalized coordinates of matched keypoints 
# out: a) Trc: homogeneous transformation matrix containing Rrc, trc  ('cur' frame with respect to 'ref' frame)    pr = Trc * pc 
#      b) mask_match: array of N elements, every element of which is set to 0 for outliers and to 1 for the other points (computed only in the RANSAC and LMedS methods)
# N.B.1: trc is estimated up to scale (i.e. the algorithm always returns ||trc||=1, we need a scale in order to recover a translation which is coherent with previous estimated poses)
# N.B.2: this function has problems in the following cases: [see Hartley/Zisserman Book]
# - 'geometrical degenerate correspondences', e.g. all the observed features lie on a plane (the correct model for the correspondences is an homography) or lie a ruled quadric 
# - degenerate motions such a pure rotation (a sufficient parallax is required) or an infinitesimal viewpoint change (where the translation is almost zero)
# N.B.3: the five-point algorithm (used for estimating the Essential Matrix) seems to work well in the degenerate planar cases [Five-Point Motion Estimation Made Easy, Hartley]
# N.B.4: as reported above, in case of pure rotation, this algorithm will compute a useless fundamental matrix which cannot be decomposed to return a correct rotation 
# N.B.5: the OpenCV findEssentialMat function uses the five-point algorithm solver by D. Nister => hence it should work well in the degenerate planar cases 
Example 26
Project: pyslam   Author: luigifreda   File: utils_geom.py    License: GNU General Public License v3.0 5 votes vote down vote up
def estimate_pose_ess_mat(kpn_ref, kpn_cur, method=cv2.RANSAC, prob=0.999, threshold=0.0003):	
    # here, the essential matrix algorithm uses the five-point algorithm solver by D. Nister (see the notes and paper above )     
    E, mask_match = cv2.findEssentialMat(kpn_cur, kpn_ref, focal=1, pp=(0., 0.), method=method, prob=prob, threshold=threshold)                         
    _, R, t, mask = cv2.recoverPose(E, kpn_cur, kpn_ref, focal=1, pp=(0., 0.))   
    return poseRt(R,t.T), mask_match  # Trc, mask_mat         


# z rotation, input in radians 
Example 27
Project: pyslam   Author: luigifreda   File: initializer.py    License: GNU General Public License v3.0 5 votes vote down vote up
def estimatePose(self, kpn_ref, kpn_cur):	     
        # here, the essential matrix algorithm uses the five-point algorithm solver by D. Nister (see the notes and paper above )     
        E, self.mask_match = cv2.findEssentialMat(kpn_cur, kpn_ref, focal=1, pp=(0., 0.), method=cv2.RANSAC, prob=kRansacProb, threshold=kRansacThresholdNormalized)                         
        _, R, t, mask = cv2.recoverPose(E, kpn_cur, kpn_ref, focal=1, pp=(0., 0.))                                                     
        return poseRt(R,t.T)  # Trc  homogeneous transformation matrix with respect to 'ref' frame,  pr_= Trc * pc_        

    # push the first image 
Example 28
Project: pyslam   Author: luigifreda   File: visual_odometry.py    License: GNU General Public License v3.0 5 votes vote down vote up
def estimatePose(self, kps_ref, kps_cur):	
        kp_ref_u = self.cam.undistort_points(kps_ref)	
        kp_cur_u = self.cam.undistort_points(kps_cur)	        
        self.kpn_ref = self.cam.unproject_points(kp_ref_u)
        self.kpn_cur = self.cam.unproject_points(kp_cur_u)
        if kUseEssentialMatrixEstimation:
            # the essential matrix algorithm is more robust since it uses the five-point algorithm solver by D. Nister (see the notes and paper above )
            E, self.mask_match = cv2.findEssentialMat(self.kpn_cur, self.kpn_ref, focal=1, pp=(0., 0.), method=cv2.RANSAC, prob=kRansacProb, threshold=kRansacThresholdNormalized)
        else:
            # just for the hell of testing fundamental matrix fitting ;-) 
            F, self.mask_match = self.computeFundamentalMatrix(kp_cur_u, kp_ref_u)
            E = self.cam.K.T @ F @ self.cam.K    # E = K.T * F * K 
        #self.removeOutliersFromMask(self.mask)  # do not remove outliers, the last unmatched/outlier features can be matched and recognized as inliers in subsequent frames                          
        _, R, t, mask = cv2.recoverPose(E, self.kpn_cur, self.kpn_ref, focal=1, pp=(0., 0.))   
        return R,t  # Rrc, trc (with respect to 'ref' frame) 
Example 29
Project: pyslam   Author: luigifreda   File: slam.py    License: GNU General Public License v3.0 5 votes vote down vote up
def estimate_pose_by_fitting_ess_mat(self, f_ref, f_cur, idxs_ref, idxs_cur): 
        # N.B.: in order to understand the limitations of fitting an essential mat, read the comments of the method self.estimate_pose_ess_mat() 
        self.timer_pose_est.start()
        # estimate inter frame camera motion by using found keypoint matches 
        # output of the following function is:  Trc = [Rrc, trc] with ||trc||=1  where c=cur, r=ref  and  pr = Trc * pc 
        Mrc, self.mask_match = estimate_pose_ess_mat(f_ref.kpsn[idxs_ref], f_cur.kpsn[idxs_cur], 
                                                     method=cv2.RANSAC, prob=kRansacProb, threshold=kRansacThresholdNormalized)   
        #Mcr = np.linalg.inv(poseRt(Mrc[:3, :3], Mrc[:3, 3]))   
        Mcr = inv_T(Mrc)
        estimated_Tcw = np.dot(Mcr, f_ref.pose)
        self.timer_pose_est.refresh()      

        # remove outliers from keypoint matches by using the mask computed with inter frame pose estimation        
        mask_idxs = (self.mask_match.ravel() == 1)
        self.num_inliers = sum(mask_idxs)
        print('# inliers: ', self.num_inliers )
        idxs_ref = idxs_ref[mask_idxs]
        idxs_cur = idxs_cur[mask_idxs]

        # if there are not enough inliers do not use the estimated pose 
        if self.num_inliers < kNumMinInliersEssentialMat:
            #f_cur.update_pose(f_ref.pose) # reset estimated pose to previous frame 
            Printer.red('Essential mat: not enough inliers!')  
        else:
            # use the estimated pose as an initial guess for the subsequent pose optimization 
            # set only the estimated rotation (essential mat computation does not provide a scale for the translation, see above) 
            #f_cur.pose[:3,:3] = estimated_Tcw[:3,:3] # copy only the rotation 
            #f_cur.pose[:,3] = f_ref.pose[:,3].copy() # override translation with ref frame translation 
            Rcw = estimated_Tcw[:3,:3] # copy only the rotation 
            tcw = f_ref.pose[:3,3]     # override translation with ref frame translation          
            f_cur.update_rotation_and_translation(Rcw, tcw)     
        return  idxs_ref, idxs_cur 
Example 30
Project: videoseg   Author: pathak22   File: dm_tracker.py    License: MIT License 5 votes vote down vote up
def shot_homography(shotTracks, homTh):
    """
    Filter foreground points i.e. the outlier points found by fitting
    homography using RANSAC
    Input:
        shotTracks: (numFrames, numAllPoints, 2)
        fgTracks: (numFrames, numForegroundPoints, 2)
    """
    if shotTracks.ndim < 3 or shotTracks.shape[0] < 2 or homTh < 0:
        return shotTracks

    import cv2
    status = 1
    for i in range(1, shotTracks.shape[0]):
        if shotTracks[i - 1, 0, 2] > -1000:
            p1 = shotTracks[i - 1, :, 2:].astype('float')
        else:
            p1 = shotTracks[i - 1, :, :2].astype('float')
        p2 = shotTracks[i, :, :2].astype('float')
        _, new_status = cv2.findHomography(
            p1, p2, cv2.RANSAC, ransacReprojThreshold=homTh)
        status = new_status * status

    fgTracks = shotTracks[:, status[:, 0] == 0, :]
    print(shotTracks.shape[0], shotTracks.shape[1], fgTracks.shape[1])
    return fgTracks