Python cv2.RANSAC Examples

The following are 30 code examples of cv2.RANSAC(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: evaluation.py    From GIFT with Apache License 2.0 10 votes vote down vote up
def estimate_relative_pose_from_correspondence(pts1, pts2, K1, K2):
        f_avg = (K1[0, 0] + K2[0, 0]) / 2
        pts1, pts2 = np.ascontiguousarray(pts1, np.float32), np.ascontiguousarray(pts2, np.float32)

        pts_l_norm = cv2.undistortPoints(np.expand_dims(pts1, axis=1), cameraMatrix=K1, distCoeffs=None)
        pts_r_norm = cv2.undistortPoints(np.expand_dims(pts2, axis=1), cameraMatrix=K2, distCoeffs=None)

        E, mask = cv2.findEssentialMat(pts_l_norm, pts_r_norm, focal=1.0, pp=(0., 0.),
                                       method=cv2.RANSAC, prob=0.999, threshold=3.0 / f_avg)
        points, R_est, t_est, mask_pose = cv2.recoverPose(E, pts_l_norm, pts_r_norm)
        return mask[:,0].astype(np.bool), R_est, t_est 
Example #2
Source File: dm_tracker.py    From videoseg with MIT License 7 votes vote down vote up
def frame_homography(totalPts, homTh):
    """
    Filter foreground points i.e. the outlier points found by fitting
    homography using RANSAC
    Input:
        totalPts: (numAllPoints, 4): x0, y0, x1, y1
        fgPts: (numAllPoints, 4): x0, y0, x1, y1
    """
    if totalPts.ndim != 2 or totalPts.shape[0] < 8 or homTh < 0:
        return totalPts

    import cv2
    p1 = totalPts[:, :2].astype('float')
    p2 = totalPts[:, 2:4].astype('float')
    _, status = cv2.findHomography(
        p1, p2, cv2.RANSAC, ransacReprojThreshold=homTh)
    fgPts = totalPts[status[:, 0] == 0, :]
    return fgPts 
Example #3
Source File: FocusStack.py    From focusstack with Apache License 2.0 6 votes vote down vote up
def findHomography(image_1_kp, image_2_kp, matches):
    image_1_points = np.zeros((len(matches), 1, 2), dtype=np.float32)
    image_2_points = np.zeros((len(matches), 1, 2), dtype=np.float32)

    for i in range(0,len(matches)):
        image_1_points[i] = image_1_kp[matches[i].queryIdx].pt
        image_2_points[i] = image_2_kp[matches[i].trainIdx].pt


    homography, mask = cv2.findHomography(image_1_points, image_2_points, cv2.RANSAC, ransacReprojThreshold=2.0)

    return homography


#
#   Align the images so they overlap properly...
#
# 
Example #4
Source File: visual_odometry.py    From pyslam with GNU General Public License v3.0 6 votes vote down vote up
def removeOutliersByMask(self, mask): 
        if mask is not None:    
            n = self.kpn_cur.shape[0]     
            mask_index = [ i for i,v in enumerate(mask) if v > 0]    
            self.kpn_cur = self.kpn_cur[mask_index]           
            self.kpn_ref = self.kpn_ref[mask_index]           
            if self.des_cur is not None: 
                self.des_cur = self.des_cur[mask_index]        
            if self.des_ref is not None: 
                self.des_ref = self.des_ref[mask_index]  
            if kVerbose:
                print('removed ', n-self.kpn_cur.shape[0],' outliers')                

    # fit essential matrix E with RANSAC such that:  p2.T * E * p1 = 0  where  E = [t21]x * R21
    # out: [Rrc, trc]   (with respect to 'ref' frame) 
    # N.B.1: trc is estimated up to scale (i.e. the algorithm always returns ||trc||=1, we need a scale in order to recover a translation which is coherent with previous estimated poses)
    # N.B.2: this function has problems in the following cases: [see Hartley/Zisserman Book]
    # - 'geometrical degenerate correspondences', e.g. all the observed features lie on a plane (the correct model for the correspondences is an homography) or lie on a ruled quadric 
    # - degenerate motions such a pure rotation (a sufficient parallax is required) or an infinitesimal viewpoint change (where the translation is almost zero)
    # N.B.3: the five-point algorithm (used for estimating the Essential Matrix) seems to work well in the degenerate planar cases [Five-Point Motion Estimation Made Easy, Hartley]
    # N.B.4: as reported above, in case of pure rotation, this algorithm will compute a useless fundamental matrix which cannot be decomposed to return the rotation 
Example #5
Source File: ImageStitching.py    From ImageProcessingProjects with MIT License 6 votes vote down vote up
def getHomography(self, rightKps, rightDescriptor):
        rawMatches = self.matcher.knnMatch(self.leftDescriptor, rightDescriptor, 2)
        matches = []

        for m in rawMatches:
            if(len(m)==2 and m[0].distance < m[1].distance*self.ratio):
                matches.append((m[0].trainIdx, m[0].queryIdx))

        if(len(matches) >=4):
            # print(matches)
            ptsB = np.float32([self.leftKps[i] for (_, i) in matches])
            ptsA = np.float32([rightKps[i] for (i, _) in matches])

            # ptsB = H*ptsA
            H, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, self.reprojThresh)
            return H

        return None 
Example #6
Source File: Placer.py    From ImageAnalysis with MIT License 6 votes vote down vote up
def findHomography(self, i1, i2, pairs):
        src = []
        dst = []
        for pair in pairs:
            c1 = i1.coord_list[pair[0]]
            c2 = i2.coord_list[pair[1]]
            src.append( c1 )
            dst.append( c2 )
        #H, status = cv2.findHomography(np.array([src]).astype(np.float32),
        #                               np.array([dst]).astype(np.float32),
        #                               cv2.RANSAC, 5.0)
        H, status = cv2.findHomography(np.array([src]).astype(np.float32),
                                       np.array([dst]).astype(np.float32))
        #print str(affine)
        return H

    # compare against best 'placed' image (averaging transform
    # matrices together directly doesn't do what we want) 
Example #7
Source File: local_descriptors.py    From hfnet with MIT License 6 votes vote down vote up
def compute_homography_error(kpts1, kpts2, matches, shape2, H_gt):
    if matches.shape[0] == 0:
        return False, None
    kpts1 = kpts1[matches[:, 0]]
    kpts2 = kpts2[matches[:, 1]]
    H, _ = cv2.findHomography(kpts2, kpts1, cv2.RANSAC, 3.0)
    if H is None:
        return None

    w, h = shape2
    corners2 = to_homogeneous(
        np.array([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]))
    corners1_gt = np.dot(corners2, np.transpose(H_gt))
    corners1_gt = corners1_gt[:, :2] / corners1_gt[:, 2:]
    corners1 = np.dot(corners2, np.transpose(H))
    corners1 = corners1[:, :2] / corners1[:, 2:]
    mean_dist = np.mean(np.linalg.norm(corners1 - corners1_gt, axis=1))
    return mean_dist 
Example #8
Source File: StitchingFromVideo.py    From ImageProcessingProjects with MIT License 6 votes vote down vote up
def getHomography(self, rightKps, rightDescriptor):
        rawMatches = self.matcher.knnMatch(self.leftDescriptor, rightDescriptor, 2)
        matches = []

        for m in rawMatches:
            if(len(m)==2 and m[0].distance < m[1].distance*self.ratio):
                matches.append((m[0].trainIdx, m[0].queryIdx))

        if(len(matches) >=4):
            # print(matches)
            ptsB = np.float32([self.leftKps[i] for (_, i) in matches])
            ptsA = np.float32([rightKps[i] for (i, _) in matches])

            # ptsB = H*ptsA
            H, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, self.reprojThresh)
            return H

        return None 
Example #9
Source File: find_obj.py    From PyCV-time with MIT License 5 votes vote down vote up
def match_and_draw(win):
        print 'matching...'
        raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
            print '%d / %d  inliers/matched' % (np.sum(status), len(status))
        else:
            H, status = None, None
            print '%d matches found, not enough for homography estimation' % len(p1)

        vis = explore_match(win, img1, img2, kp_pairs, status, H) 
Example #10
Source File: Placer.py    From ImageAnalysis with MIT License 5 votes vote down vote up
def findGroupHomography(self, i1):
        # find the homography matrix representing the best fit against
        # all the placed neighbors.  Builds a cumulative src/dest list
        # with our src points listed once for each image pair.

        src = []
        dst = []
        for i, pairs in enumerate(i1.match_list):
            if len(pairs) < 4:
                # can't compute homography on < 4 points
                continue
            i2 = self.image_list[i]
            if not i2.placed:
                # don't consider non-yet-placed neighbors
                continue
            # add coordinate matches for this image pair
            for pair in pairs:
                c1 = i1.coord_list[pair[0]]
                c2 = i2.coord_list[pair[1]]
                src.append( c1 )
                dst.append( c2 )
        if len(src) < 4:
            # no placed neighbors, just return the identity matrix
            return np.identity(3)
        # find the homography matrix on the communlative set of all
        # matching coordinates for all matching image pairs
        # simultaneously...
        H, status = cv2.findHomography(np.array([src]).astype(np.float32),
                                       np.array([dst]).astype(np.float32),
                                       cv2.RANSAC, 5.0)
        if H == None:
            # it's possible given a degenerate point set, the
            # homography estimator will return None
            return np.identity(3)
        return H
    
    # compare against best 'placed' image (averaging transform
    # matrices together directly doesn't do what we want) 
Example #11
Source File: asift.py    From PyCV-time with MIT License 5 votes vote down vote up
def match_and_draw(win):
        with Timer('matching'):
            raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
            print '%d / %d  inliers/matched' % (np.sum(status), len(status))
            # do not draw outliers (there will be a lot of them)
            kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
        else:
            H, status = None, None
            print '%d matches found, not enough for homography estimation' % len(p1)

        vis = explore_match(win, img1, img2, kp_pairs, None, H) 
Example #12
Source File: plane_tracker.py    From PyCV-time with MIT License 5 votes vote down vote up
def track(self, frame):
        '''Returns a list of detected TrackedTarget objects'''
        self.frame_points, self.frame_descrs = self.detect_features(frame)
        if len(self.frame_points) < MIN_MATCH_COUNT:
            return []
        matches = self.matcher.knnMatch(self.frame_descrs, k = 2)
        matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75]
        if len(matches) < MIN_MATCH_COUNT:
            return []
        matches_by_id = [[] for _ in xrange(len(self.targets))]
        for m in matches:
            matches_by_id[m.imgIdx].append(m)
        tracked = []
        for imgIdx, matches in enumerate(matches_by_id):
            if len(matches) < MIN_MATCH_COUNT:
                continue
            target = self.targets[imgIdx]
            p0 = [target.keypoints[m.trainIdx].pt for m in matches]
            p1 = [self.frame_points[m.queryIdx].pt for m in matches]
            p0, p1 = np.float32((p0, p1))
            H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
            status = status.ravel() != 0
            if status.sum() < MIN_MATCH_COUNT:
                continue
            p0, p1 = p0[status], p1[status]

            x0, y0, x1, y1 = target.rect
            quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
            quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)

            track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad)
            tracked.append(track)
        tracked.sort(key = lambda t: len(t.p0), reverse=True)
        return tracked 
Example #13
Source File: sift.py    From SoTu with MIT License 5 votes vote down vote up
def filter(self, pt_qt):
        if len(pt_qt) > 0:
            pt_q, pt_t = zip(*pt_qt)
            # 获取匹配坐标的变换矩阵和正常点的掩码
            M, mask = cv2.findHomography(np.float32(pt_q).reshape(-1, 1, 2),
                                         np.float32(pt_t).reshape(-1, 1, 2),
                                         cv2.RANSAC, 3)
            return mask.ravel().tolist()
        else:
            return [] 
Example #14
Source File: plane_tracker.py    From OpenCV-Python-Tutorial with MIT License 5 votes vote down vote up
def track(self, frame):
        '''Returns a list of detected TrackedTarget objects'''
        self.frame_points, frame_descrs = self.detect_features(frame)
        if len(self.frame_points) < MIN_MATCH_COUNT:
            return []
        matches = self.matcher.knnMatch(frame_descrs, k = 2)
        matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75]
        if len(matches) < MIN_MATCH_COUNT:
            return []
        matches_by_id = [[] for _ in xrange(len(self.targets))]
        for m in matches:
            matches_by_id[m.imgIdx].append(m)
        tracked = []
        for imgIdx, matches in enumerate(matches_by_id):
            if len(matches) < MIN_MATCH_COUNT:
                continue
            target = self.targets[imgIdx]
            p0 = [target.keypoints[m.trainIdx].pt for m in matches]
            p1 = [self.frame_points[m.queryIdx].pt for m in matches]
            p0, p1 = np.float32((p0, p1))
            H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
            status = status.ravel() != 0
            if status.sum() < MIN_MATCH_COUNT:
                continue
            p0, p1 = p0[status], p1[status]

            x0, y0, x1, y1 = target.rect
            quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
            quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)

            track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad)
            tracked.append(track)
        tracked.sort(key = lambda t: len(t.p0), reverse=True)
        return tracked 
Example #15
Source File: tools.py    From BlindWatermark with GNU General Public License v3.0 5 votes vote down vote up
def run(self):
        img = cv2.imread(self.ori_img)
        img2 = cv2.imread(self.attacked_img)

        height = img.shape[0]
        width  = img.shape[1]
        # Initiate SIFT detector
        orb = cv2.ORB_create(128)
        MIN_MATCH_COUNT=10
        # find the keypoints and descriptors with SIFT
        kp1, des1 = orb.detectAndCompute(img,None)
        kp2, des2 = orb.detectAndCompute(img2,None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        search_params = dict(checks = 50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)



        des1 = np.float32(des1)
        des2 = np.float32(des2)

        matches = flann.knnMatch(des1,des2,k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m,n in matches:
            if m.distance < self.rate*n.distance:
                good.append(m)

        if len(good)>MIN_MATCH_COUNT:
            src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
            dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
            M, mask = cv2.findHomography( dst_pts,src_pts, cv2.RANSAC,5.0)
            out = cv2.warpPerspective(img2, M, (width,height)) #先列后行
            cv2.imwrite(self.outfile_name,out)
            self.num_of_good.emit(len(good),self.outfile_name)
        else :
            self.num_of_good.emit(0,'') 
Example #16
Source File: tools.py    From BlindWatermark with GNU General Public License v3.0 5 votes vote down vote up
def recovery(ori_img,attacked_img,outfile_name = './recoveried.png',rate=0.7):
    img = cv2.imread(ori_img)
    img2 = cv2.imread(attacked_img)

    height = img.shape[0]
    width  = img.shape[1]
    # Initiate SIFT detector
    orb = cv2.ORB_create(128)
    MIN_MATCH_COUNT=10
    # find the keypoints and descriptors with SIFT
    kp1, des1 = orb.detectAndCompute(img,None)
    kp2, des2 = orb.detectAndCompute(img2,None)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)



    des1 = np.float32(des1)
    des2 = np.float32(des2)

    matches = flann.knnMatch(des1,des2,k=2)

    # store all the good matches as per Lowe's ratio test.
    good = []
    for m,n in matches:
        if m.distance < rate*n.distance:
            good.append(m)

    if len(good)>MIN_MATCH_COUNT:
        src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
        M, mask = cv2.findHomography( dst_pts,src_pts, cv2.RANSAC,5.0)
        out = cv2.warpPerspective(img2, M, (width,height)) #先列后行
        cv2.imwrite(outfile_name,out) 
Example #17
Source File: visual_odometry.py    From Monocular-Visual-Inertial-Odometry with MIT License 5 votes vote down vote up
def processSecondFrame(self):
		self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
		E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
		_, self.cur_R, self.cur_t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
		self.frame_stage = STAGE_DEFAULT_FRAME 
		self.px_ref = self.px_cur 
Example #18
Source File: ImageUtility.py    From ImageStitch with MIT License 5 votes vote down vote up
def getOffsetByRansac(self, kpsA, kpsB, matches, offsetEvaluate=100):
        """
        功能:通过求Ransac的方法获得偏移量(不完善)
        :param kpsA: 第一张图像的特征
        :param kpsB: 第二张图像的特征
        :param matches: 配准列表
        :param offsetEvaluate:对于Ransac求属于最小范围的个数,大于本阈值,则正确
        :return:返回(totalStatus, [dx, dy]), totalStatus 是否正确,[dx, dy]默认[0, 0]
        """
        totalStatus = False
        ptsA = np.float32([kpsA[i] for (_, i) in matches])
        ptsB = np.float32([kpsB[i] for (i, _) in matches])
        if len(matches) == 0:
            return (totalStatus, [0, 0], 0)
        # 计算视角变换矩阵
        H1 = cv2.getAffineTransform(ptsA, ptsB)
        # print("H1")
        # print(H1)
        (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, 3, 0.9)
        trueCount = 0
        for i in range(0, len(status)):
            if status[i] == True:
                trueCount = trueCount + 1
        if trueCount >= offsetEvaluate:
            totalStatus = True
            adjustH = H.copy()
            adjustH[0, 2] = 0;adjustH[1, 2] = 0
            adjustH[2, 0] = 0;adjustH[2, 1] = 0
            return (totalStatus ,[np.round(np.array(H).astype(np.int)[1,2]) * (-1), np.round(np.array(H).astype(np.int)[0,2]) * (-1)], adjustH)
        else:
            return (totalStatus, [0, 0], 0) 
Example #19
Source File: visual_odometry.py    From Monocular-Visual-Inertial-Odometry with MIT License 5 votes vote down vote up
def processFrame(self, frame_id):
		self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
		E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
		_, R, t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
		absolute_scale = self.getAbsoluteScale(frame_id)
		if(absolute_scale > 0.1):
			self.cur_t = self.cur_t + absolute_scale*self.cur_R.dot(t) 
			self.cur_R = R.dot(self.cur_R)
		if(self.px_ref.shape[0] < kMinNumFeature):
			self.px_cur = self.detector.detect(self.new_frame)
			self.px_cur = np.array([x.pt for x in self.px_cur], dtype=np.float32)
		self.px_ref = self.px_cur 
Example #20
Source File: plane_tracker.py    From PyCV-time with MIT License 5 votes vote down vote up
def track(self, frame):
        '''Returns a list of detected TrackedTarget objects'''
        self.frame_points, self.frame_descrs = self.detect_features(frame)
        if len(self.frame_points) < MIN_MATCH_COUNT:
            return []
        matches = self.matcher.knnMatch(self.frame_descrs, k = 2)
        matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * 0.75]
        if len(matches) < MIN_MATCH_COUNT:
            return []
        matches_by_id = [[] for _ in xrange(len(self.targets))]
        for m in matches:
            matches_by_id[m.imgIdx].append(m)
        tracked = []
        for imgIdx, matches in enumerate(matches_by_id):
            if len(matches) < MIN_MATCH_COUNT:
                continue
            target = self.targets[imgIdx]
            p0 = [target.keypoints[m.trainIdx].pt for m in matches]
            p1 = [self.frame_points[m.queryIdx].pt for m in matches]
            p0, p1 = np.float32((p0, p1))
            H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)
            status = status.ravel() != 0
            if status.sum() < MIN_MATCH_COUNT:
                continue
            p0, p1 = p0[status], p1[status]

            x0, y0, x1, y1 = target.rect
            quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]])
            quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2)

            track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad)
            tracked.append(track)
        tracked.sort(key = lambda t: len(t.p0), reverse=True)
        return tracked 
Example #21
Source File: find_obj.py    From PyCV-time with MIT License 5 votes vote down vote up
def match_and_draw(win):
        print 'matching...'
        raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
            print '%d / %d  inliers/matched' % (np.sum(status), len(status))
        else:
            H, status = None, None
            print '%d matches found, not enough for homography estimation' % len(p1)

        vis = explore_match(win, img1, img2, kp_pairs, status, H) 
Example #22
Source File: coregistration.py    From eo-learn with MIT License 5 votes vote down vote up
def check_params(self):
        if not (self.params.get('Model') in ['Euler', 'PartialAffine', 'Homography']):
            LOGGER.info("{:s}:Model set to Euler".format(self.__class__.__name__))
            self.params['Model'] = 'Euler'
        if not (self.params.get('Descriptor') in ['SIFT', 'SURF']):
            LOGGER.info("{:s}:Descriptor set to SIFT".format(self.__class__.__name__))
            self.params['Descriptor'] = 'SIFT'
        if not isinstance(self.params.get('MaxIters'), int):
            LOGGER.info("{:s}:RANSAC MaxIters set to 1000".format(self.__class__.__name__))
            self.params['MaxIters'] = 1000
        if not isinstance(self.params.get('RANSACThreshold'), float):
            LOGGER.info("{:s}:RANSAC threshold set to 7.0".format(self.__class__.__name__))
            self.params['RANSACThreshold'] = 7.0 
Example #23
Source File: find_obj.py    From ImageAnalysis with MIT License 5 votes vote down vote up
def match_and_draw(win):
        print('matching...')
        raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
        p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
        if len(p1) >= 4:
            H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
            print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        else:
            H, status = None, None
            print('%d matches found, not enough for homography estimation' % len(p1))

        vis = explore_match(win, img1, img2, kp_pairs, status, H) 
Example #24
Source File: ImageStitching.py    From ImageProcessingProjects with MIT License 5 votes vote down vote up
def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
                       ratio, reprojThresh):
        # compute the raw matches and initialize the list of actual
        # matches
        matcher = cv2.DescriptorMatcher_create("BruteForce")
        rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
        matches = []

        # loop over the raw matches
        for m in rawMatches:
            # ensure the distance is within a certain ratio of each
            # other (i.e. Lowe's ratio test)
            if len(m) == 2 and m[0].distance < m[1].distance * ratio:
                matches.append((m[0].trainIdx, m[0].queryIdx))

                # computing a homography requires at least 4 matches
        if len(matches) > 4:
            # construct the two sets of points
            ptsA = np.float32([kpsA[i] for (_, i) in matches])
            ptsB = np.float32([kpsB[i] for (i, _) in matches])

            # compute the homography between the two sets of points
            (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
                                             reprojThresh)

            # return the matches along with the homograpy matrix
            # and status of each matched point
            return (matches, H, status)

        # otherwise, no homograpy could be computed
        return None 
Example #25
Source File: dm_tracker.py    From videoseg with MIT License 5 votes vote down vote up
def shot_homography(shotTracks, homTh):
    """
    Filter foreground points i.e. the outlier points found by fitting
    homography using RANSAC
    Input:
        shotTracks: (numFrames, numAllPoints, 2)
        fgTracks: (numFrames, numForegroundPoints, 2)
    """
    if shotTracks.ndim < 3 or shotTracks.shape[0] < 2 or homTh < 0:
        return shotTracks

    import cv2
    status = 1
    for i in range(1, shotTracks.shape[0]):
        if shotTracks[i - 1, 0, 2] > -1000:
            p1 = shotTracks[i - 1, :, 2:].astype('float')
        else:
            p1 = shotTracks[i - 1, :, :2].astype('float')
        p2 = shotTracks[i, :, :2].astype('float')
        _, new_status = cv2.findHomography(
            p1, p2, cv2.RANSAC, ransacReprojThreshold=homTh)
        status = new_status * status

    fgTracks = shotTracks[:, status[:, 0] == 0, :]
    print(shotTracks.shape[0], shotTracks.shape[1], fgTracks.shape[1])
    return fgTracks 
Example #26
Source File: slam.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def estimate_pose_by_fitting_ess_mat(self, f_ref, f_cur, idxs_ref, idxs_cur): 
        # N.B.: in order to understand the limitations of fitting an essential mat, read the comments of the method self.estimate_pose_ess_mat() 
        self.timer_pose_est.start()
        # estimate inter frame camera motion by using found keypoint matches 
        # output of the following function is:  Trc = [Rrc, trc] with ||trc||=1  where c=cur, r=ref  and  pr = Trc * pc 
        Mrc, self.mask_match = estimate_pose_ess_mat(f_ref.kpsn[idxs_ref], f_cur.kpsn[idxs_cur], 
                                                     method=cv2.RANSAC, prob=kRansacProb, threshold=kRansacThresholdNormalized)   
        #Mcr = np.linalg.inv(poseRt(Mrc[:3, :3], Mrc[:3, 3]))   
        Mcr = inv_T(Mrc)
        estimated_Tcw = np.dot(Mcr, f_ref.pose)
        self.timer_pose_est.refresh()      

        # remove outliers from keypoint matches by using the mask computed with inter frame pose estimation        
        mask_idxs = (self.mask_match.ravel() == 1)
        self.num_inliers = sum(mask_idxs)
        print('# inliers: ', self.num_inliers )
        idxs_ref = idxs_ref[mask_idxs]
        idxs_cur = idxs_cur[mask_idxs]

        # if there are not enough inliers do not use the estimated pose 
        if self.num_inliers < kNumMinInliersEssentialMat:
            #f_cur.update_pose(f_ref.pose) # reset estimated pose to previous frame 
            Printer.red('Essential mat: not enough inliers!')  
        else:
            # use the estimated pose as an initial guess for the subsequent pose optimization 
            # set only the estimated rotation (essential mat computation does not provide a scale for the translation, see above) 
            #f_cur.pose[:3,:3] = estimated_Tcw[:3,:3] # copy only the rotation 
            #f_cur.pose[:,3] = f_ref.pose[:,3].copy() # override translation with ref frame translation 
            Rcw = estimated_Tcw[:3,:3] # copy only the rotation 
            tcw = f_ref.pose[:3,3]     # override translation with ref frame translation          
            f_cur.update_rotation_and_translation(Rcw, tcw)     
        return  idxs_ref, idxs_cur 
Example #27
Source File: visual_odometry.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def estimatePose(self, kps_ref, kps_cur):	
        kp_ref_u = self.cam.undistort_points(kps_ref)	
        kp_cur_u = self.cam.undistort_points(kps_cur)	        
        self.kpn_ref = self.cam.unproject_points(kp_ref_u)
        self.kpn_cur = self.cam.unproject_points(kp_cur_u)
        if kUseEssentialMatrixEstimation:
            # the essential matrix algorithm is more robust since it uses the five-point algorithm solver by D. Nister (see the notes and paper above )
            E, self.mask_match = cv2.findEssentialMat(self.kpn_cur, self.kpn_ref, focal=1, pp=(0., 0.), method=cv2.RANSAC, prob=kRansacProb, threshold=kRansacThresholdNormalized)
        else:
            # just for the hell of testing fundamental matrix fitting ;-) 
            F, self.mask_match = self.computeFundamentalMatrix(kp_cur_u, kp_ref_u)
            E = self.cam.K.T @ F @ self.cam.K    # E = K.T * F * K 
        #self.removeOutliersFromMask(self.mask)  # do not remove outliers, the last unmatched/outlier features can be matched and recognized as inliers in subsequent frames                          
        _, R, t, mask = cv2.recoverPose(E, self.kpn_cur, self.kpn_ref, focal=1, pp=(0., 0.))   
        return R,t  # Rrc, trc (with respect to 'ref' frame) 
Example #28
Source File: initializer.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def estimatePose(self, kpn_ref, kpn_cur):	     
        # here, the essential matrix algorithm uses the five-point algorithm solver by D. Nister (see the notes and paper above )     
        E, self.mask_match = cv2.findEssentialMat(kpn_cur, kpn_ref, focal=1, pp=(0., 0.), method=cv2.RANSAC, prob=kRansacProb, threshold=kRansacThresholdNormalized)                         
        _, R, t, mask = cv2.recoverPose(E, kpn_cur, kpn_ref, focal=1, pp=(0., 0.))                                                     
        return poseRt(R,t.T)  # Trc  homogeneous transformation matrix with respect to 'ref' frame,  pr_= Trc * pc_        

    # push the first image 
Example #29
Source File: utils_geom.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def estimate_pose_ess_mat(kpn_ref, kpn_cur, method=cv2.RANSAC, prob=0.999, threshold=0.0003):	
    # here, the essential matrix algorithm uses the five-point algorithm solver by D. Nister (see the notes and paper above )     
    E, mask_match = cv2.findEssentialMat(kpn_cur, kpn_ref, focal=1, pp=(0., 0.), method=method, prob=prob, threshold=threshold)                         
    _, R, t, mask = cv2.recoverPose(E, kpn_cur, kpn_ref, focal=1, pp=(0., 0.))   
    return poseRt(R,t.T), mask_match  # Trc, mask_mat         


# z rotation, input in radians 
Example #30
Source File: utils_geom.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def check_dist_epipolar_line(kp1,kp2,F12,sigma2_kp2):
    # Epipolar line in second image l = kp1' * F12 = [a b c]
    l = np.dot(F12.T,np.array([kp1[0],kp1[1],1]))
    num = l[0]*kp2[0] + l[1]*kp2[1] + l[2]  # kp1' * F12 * kp2
    den = l[0]*l[0] + l[1]*l[1]   # a*a+b*b

    if(den==0):
    #if(den < 1e-20):
        return False

    dist_sqr = num*num/den              # squared (minimum) distance of kp2 from the epipolar line l
    return dist_sqr < 3.84 * sigma2_kp2 # value of inverse cumulative chi-square for 1 DOF (Hartley Zisserman pag 567)



# fit essential matrix E with RANSAC such that:  p2.T * E * p1 = 0  where  E = [t21]x * R21
# input: kpn_ref and kpn_cur are two arrays of [Nx2] normalized coordinates of matched keypoints 
# out: a) Trc: homogeneous transformation matrix containing Rrc, trc  ('cur' frame with respect to 'ref' frame)    pr = Trc * pc 
#      b) mask_match: array of N elements, every element of which is set to 0 for outliers and to 1 for the other points (computed only in the RANSAC and LMedS methods)
# N.B.1: trc is estimated up to scale (i.e. the algorithm always returns ||trc||=1, we need a scale in order to recover a translation which is coherent with previous estimated poses)
# N.B.2: this function has problems in the following cases: [see Hartley/Zisserman Book]
# - 'geometrical degenerate correspondences', e.g. all the observed features lie on a plane (the correct model for the correspondences is an homography) or lie a ruled quadric 
# - degenerate motions such a pure rotation (a sufficient parallax is required) or an infinitesimal viewpoint change (where the translation is almost zero)
# N.B.3: the five-point algorithm (used for estimating the Essential Matrix) seems to work well in the degenerate planar cases [Five-Point Motion Estimation Made Easy, Hartley]
# N.B.4: as reported above, in case of pure rotation, this algorithm will compute a useless fundamental matrix which cannot be decomposed to return a correct rotation 
# N.B.5: the OpenCV findEssentialMat function uses the five-point algorithm solver by D. Nister => hence it should work well in the degenerate planar cases