Python cv2.BFMatcher() Examples

The following are 30 code examples of cv2.BFMatcher(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: find_obj.py    From OpenCV-Python-Tutorial with MIT License 10 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.xfeatures2d.SIFT_create()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.xfeatures2d.SURF_create(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB_create(400)
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv2.AKAZE_create()
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv2.BRISK_create()
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #2
Source File: image_detect_01.py    From image-detect with MIT License 8 votes vote down vote up
def matchAB(fileA, fileB):
    # 读取图像数据
    imgA = cv2.imread(fileA)
    imgB = cv2.imread(fileB)

    # 转换成灰色
    grayA = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY)
    grayB = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY)

    # akaze特征量抽出
    akaze = cv2.AKAZE_create()
    kpA, desA = akaze.detectAndCompute(grayA, None)
    kpB, desB = akaze.detectAndCompute(grayB, None)

    # BFMatcher定义和图形化
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(desB, desB)
    matches = sorted(matches, key=lambda x: x.distance)
    matched_image = cv2.drawMatches(imgA, kpA, imgB, kpB, matches, None, flags=2)

    plt.imshow(cv2.cvtColor(matched_image, cv2.COLOR_BGR2RGB))
    plt.show() 
Example #3
Source File: frame_matching.py    From hfnet with MIT License 7 votes vote down vote up
def baseline_sift_matching(img1, img2):
    sift = cv2.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    matches = cv2.BFMatcher().knnMatch(des1, des2, k=2)

    good = [[m] for m, n in matches if m.distance < 0.7*n.distance]
    img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None,
                              matchColor=(0, 255, 0), matchesMask=None,
                              singlePointColor=(255, 0, 0), flags=0)
    return img3 
Example #4
Source File: findobj.py    From airtest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #5
Source File: find_obj.py    From PyCV-time with MIT License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #6
Source File: find_obj.py    From ImageAnalysis with MIT License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(400)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #7
Source File: descriptors.py    From hfnet with MIT License 6 votes vote down vote up
def matching(desc1, desc2, do_ratio_test=False, cross_check=True):
    if desc1.dtype == np.bool and desc2.dtype == np.bool:
        desc1, desc2 = np.packbits(desc1, axis=1), np.packbits(desc2, axis=1)
        norm = cv2.NORM_HAMMING
    else:
        desc1, desc2 = np.float32(desc1), np.float32(desc2)
        norm = cv2.NORM_L2

    if do_ratio_test:
        matches = []
        matcher = cv2.BFMatcher(norm)
        for m, n in matcher.knnMatch(desc1, desc2, k=2):
            m.distance = 1.0 if (n.distance == 0) else m.distance / n.distance
            matches.append(m)
    else:
        matcher = cv2.BFMatcher(norm, crossCheck=cross_check)
        matches = matcher.match(desc1, desc2)
    return matches_cv2np(matches) 
Example #8
Source File: find_obj.py    From airtest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #9
Source File: timing.py    From imips_open with GNU General Public License v3.0 6 votes vote down vote up
def evalLfNet():
    # lf_net took 2:49 for 2761 images according to its own progress bar timer
    # 169/2761*2 -> 0.122 for two images
    # Add to that the following time for matching (0.48ms -> negligible):
    seq_fps = baselines.parseLFNetOuts(
        eval_set, FLAGS.baseline_num_ips)
    matcher = cv2.BFMatcher()
    times = []
    for pair in eval_set:
        folder = pair.seqname
        [a, b] = pair.indices
        forward_passes = [seq_fps['%s%s' % (folder, i)] for i in [a, b]]
        t = time.time()
        matches = matcher.match(
            forward_passes[0].descriptors, forward_passes[1].descriptors)
        times.append(time.time() - t)
        print(times[-1])
    return times 
Example #10
Source File: timing.py    From imips_open with GNU General Public License v3.0 6 votes vote down vote up
def evalSurf():
    # SIFT
    print('SURF')
    sift = cv2.xfeatures2d.SURF_create()
    matcher = cv2.BFMatcher()
    times = []
    for pair in eval_set:
        t = time.time()
        kpts = [sift.detect(im) for im in pair.im]
        srt = [sorted(kpt, key=lambda x: x.response, reverse=True) for kpt in kpts]
        srt128 = [s[:128] for s in srt]
        d = [sift.compute(im, s)[1] for im, s in zip(pair.im, srt128)]
        matches = matcher.match(d[0], d[1])
        times.append(time.time() - t)
        print(times[-1])
    return times 
Example #11
Source File: timing.py    From imips_open with GNU General Public License v3.0 6 votes vote down vote up
def evalSift():
    # SIFT
    print('SIFT')
    sift = cv2.xfeatures2d.SIFT_create()
    matcher = cv2.BFMatcher()
    times = []
    for pair in eval_set:
        t = time.time()
        kpts = [sift.detect(im) for im in pair.im]
        srt = [sorted(kpt, key=lambda x: x.response, reverse=True) for kpt in kpts]
        srt128 = [s[:128] for s in srt]
        d = [sift.compute(im, s)[1] for im, s in zip(pair.im, srt128)]
        matches = matcher.match(d[0], d[1])
        times.append(time.time() - t)
        print(times[-1])
    return times 
Example #12
Source File: statistics.py    From dual-fisheye-video-stitching with MIT License 6 votes vote down vote up
def BFMatch_SIFT(img1, img2):
    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    # Apply ratio test
    good = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good.append([m])

    return (kp1, kp2, good) 
Example #13
Source File: feature_match.py    From dual-fisheye-video-stitching with MIT License 6 votes vote down vote up
def BFMatch_SIFT(img1, img2):
    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # BFMatcher with default params
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)

    # Apply ratio test
    good = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good.append([m])

    return (kp1, kp2, good) 
Example #14
Source File: picture_bow_orber.py    From douglas-quaid with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, fe_conf: feature_extractor_conf.Default_feature_extractor_conf):
        # STD attributes
        self.fe_conf: feature_extractor_conf.Default_feature_extractor_conf = fe_conf
        self.logger = logging.getLogger(__name__)
        self.logger.info("Creation of a Picture BoW Orber")

        self.algo = cv2.ORB_create(nfeatures=fe_conf.ORB_KEYPOINTS_NB)
        # TODO : Dictionnary path / Vocabulary
        self.bow_descriptor = cv2.BOWImgDescriptorExtractor(self.algo, cv2.BFMatcher(cv2.NORM_HAMMING))
        self.vocab_loaded = False
        try :
            vocab = BoWOrb_Vocabulary_Creator.load_vocab_from_file(fe_conf.BOW_VOCAB_PATH)
            self.bow_descriptor.setVocabulary(vocab)
            self.vocab_loaded = True
        except Exception as e :
            self.logger.error(f"No vocabulary file provided. Not possible to use Bow-ORB : {e}") 
Example #15
Source File: keypoint_matching_contrib.py    From Airtest with Apache License 2.0 6 votes vote down vote up
def init_detector(self):
        """Init keypoint detector object."""
        # BRIEF is a feature descriptor, recommand CenSurE as a fast detector:
        if check_cv_version_is_new():
            # OpenCV3/4, star/brief is in contrib module, you need to compile it seperately.
            try:
                self.star_detector = cv2.xfeatures2d.StarDetector_create()
                self.brief_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create()
            except:
                import traceback
                traceback.print_exc()
                print("to use %s, you should build contrib with opencv3.0" % self.METHOD_NAME)
                raise NoModuleError("There is no %s module in your OpenCV environment !" % self.METHOD_NAME)
        else:
            # OpenCV2.x
            self.star_detector = cv2.FeatureDetector_create("STAR")
            self.brief_extractor = cv2.DescriptorExtractor_create("BRIEF")

        # create BFMatcher object:
        self.matcher = cv2.BFMatcher(cv2.NORM_L1)  # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable) 
Example #16
Source File: 04_flann_feature_match.py    From Practical-Computer-Vision with MIT License 5 votes vote down vote up
def brute_force_matcher(des1, des2):
    """
    Brute force matcher to match ORB feature descriptors
    """
    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck=True)
    # Match descriptors.
    matches = bf.match(des1,des2)

    # Sort them in the order of their distance.
    matches = sorted(matches, key = lambda x:x.distance)

    return matches 
Example #17
Source File: opencvhelper.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self.matcher = cv2.BFMatcher(cv2.NORM_L2) 
Example #18
Source File: opencvhelper.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self.matcher = cv2.BFMatcher(cv2.NORM_L2) 
Example #19
Source File: feature_matcher.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, norm_type=cv2.NORM_HAMMING, cross_check = False, ratio_test=kRatioTest, type = FeatureMatcherTypes.BF):
        super().__init__(norm_type=norm_type, cross_check=cross_check, ratio_test=ratio_test, type=type)
        self.matcher = cv2.BFMatcher(norm_type, cross_check)     
        self.matcher_name = 'BfFeatureMatcher'   


# Flann Matcher 
Example #20
Source File: model.py    From ad-versarial with MIT License 5 votes vote down vote up
def __init__(self, path_to_templates,
                 match_threshold=0.2, match_threshold_small=0.5):
        self.cv2_sift = cv2.xfeatures2d.SIFT_create()
        self.bf = cv2.BFMatcher()
        self.match_threshold = match_threshold
        self.match_threshold_small = match_threshold_small

        templates, keypoints = self.load_templates(path_to_templates)
        self.templates = templates
        self.keypoints = keypoints 
Example #21
Source File: close_kitti_loops.py    From calc2.0 with Apache License 2.0 5 votes vote down vote up
def close_loop(db, dbkp, descr, kp):
    matcher = cv2.BFMatcher(cv2.NORM_L2)
    kp, kp_d = kp
    db = np.concatenate(tuple(db), axis=0)
    sim = np.sum(descr * db, axis=-1) 
    
    top_k_sim_ind = np.argpartition(sim, -K)[-K:] 

    max_sim = -1.0
    i_max_sim = -1
    best_match_tuple = None

    for k in top_k_sim_ind:
        db_kp, db_kp_d = dbkp[k]
        matches = matcher.knnMatch(kp_d, db_kp_d, 2)
        good = []
        pts1 = []
        pts2 = []
        for m,n in matches:
            if m.distance < 0.7*n.distance:
                good.append(m)
                pts1.append(db_kp[m.trainIdx].pt)
                pts2.append(kp[m.queryIdx].pt)
        if len(good) > 7:
            pts1 = np.int32(pts1)
            pts2 = np.int32(pts2)
            curr_sim = sim[k]
            if curr_sim > max_sim:
                max_sim = curr_sim
                i_max_sim = k
                best_match_tuple = (kp, db_kp, good, pts1, pts2)
    
    if i_max_sim > -1:
        F, mask = cv2.findFundamentalMat(best_match_tuple[3],
                     best_match_tuple[4], cv2.FM_RANSAC)
        if F is None:
            max_sim=-1.0
            i_max_sim = -1
    return i_max_sim 
Example #22
Source File: motion_correction.py    From minian with GNU General Public License v3.0 5 votes vote down vote up
def detect_and_correct_old(mov):
    surf = cv2.xfeatures2d.SURF_create(200)
    matcher = cv2.BFMatcher(crossCheck=True)
    detect_list = [surf.detectAndCompute(f, None) for f in mov]
    kp_list = [d[0] for d in detect_list]
    des_list = [d[1] for d in detect_list]
    match_list = []
    for des0, des1 in zip(des_list[:-1], des_list[1:]):
        match_list.append(matcher.match(des0, des1))
    matching_points = []
    for iframe, matches in enumerate(match_list):
        points0 = []
        points1 = []
        matches.sort(key=lambda ma: ma.distance, reverse=True)
        for ma in matches[:3]:
            points0.append(kp_list[iframe][ma.queryIdx].pt)
            points1.append(kp_list[iframe + 1][ma.trainIdx].pt)
        points0 = np.float32(np.array(points0))
        points1 = np.float32(np.array(points1))
        matching_points.append((points0, points1))
    trans_list = [
        cv2.getAffineTransform(pt[0], pt[1]) for pt in matching_points
    ]
    mov_correct = mov.copy()
    for iframe, trans in enumerate(trans_list):
        mov_correct[iframe + 1] = cv2.warpAffine(mov_correct[iframe], trans,
                                                 mov[0].shape[::-1])
    return mov_correct 
Example #23
Source File: stitch.py    From Image-stitcher with MIT License 5 votes vote down vote up
def __init__(self, image1: np.ndarray, image2: np.ndarray, method: Enum=Method.SIFT, threshold=800) -> None:
        """输入两幅图像,计算其特征值
        此类用于输入两幅图像,计算其特征值,输入两幅图像分别为numpy数组格式的图像,其中的method参数要求输入SURF、SIFT或者ORB,threshold参数为特征值检测所需的阈值。

        Args:
            image1 (np.ndarray): 图像一
            image2 (np.ndarray): 图像二
            method (Enum, optional): Defaults to Method.SIFT. 特征值检测方法
            threshold (int, optional): Defaults to 800. 特征值阈值

        """

        self.image1 = image1
        self.image2 = image2
        self.method = method
        self.threshold = threshold

        self._keypoints1: List[cv2.KeyPoint] = None
        self._descriptors1: np.ndarray = None
        self._keypoints2: List[cv2.KeyPoint] = None
        self._descriptors2: np.ndarray = None

        if self.method == Method.ORB:
            # error if not set this
            self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        else:
            # self.matcher = cv2.BFMatcher(crossCheck=True)
            self.matcher = cv2.FlannBasedMatcher()

        self.match_points = []

        self.image_points1 = np.array([])
        self.image_points2 = np.array([]) 
Example #24
Source File: voc_obj_similarity.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def ORB_img_similarity(img1_path,img2_path):
    """
    :param img1_path: 图片1路径
    :param img2_path: 图片2路径
    :return: 图片相似度
    """
    try:
        # 读取图片
        img1 = cv2.imread(img1_path)
        img2 = cv2.imread(img2_path)

        # 初始化ORB检测器
        orb = cv2.ORB_create()
        kp1, des1 = orb.detectAndCompute(img1, None)
        kp2, des2 = orb.detectAndCompute(img2, None)

        # 提取并计算特征点
        bf = cv2.BFMatcher(cv2.NORM_HAMMING)
        # knn筛选结果
        matches = bf.knnMatch(des1, trainDescriptors=des2, k=2)

        # 查看最大匹配点数目
        good = [m for (m, n) in matches if m.distance < 0.75 * n.distance]
        similary = len(good) / len(matches)
        return similary

    except:
        return 0


# 计算图片的局部哈希值--pHash 
Example #25
Source File: feature_match.py    From dual-fisheye-video-stitching with MIT License 5 votes vote down vote up
def BFMatch_ORB(img1, img2):
    # Initiate SIFT detector
    orb = cv2.ORB_create()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = orb.detectAndCompute(img1, None)
    kp2, des2 = orb.detectAndCompute(img2, None)
    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    # Match descriptors.
    matches = bf.match(des1, des2)
    # Sort them in the order of their distance.
    matches = sorted(matches, key=lambda x: x.distance)

    return (kp1, kp2, matches) 
Example #26
Source File: 04_feature_match.py    From Practical-Computer-Vision with MIT License 5 votes vote down vote up
def brute_force_matcher(des1, des2):
    """
    Brute force matcher to match ORB feature descriptors
    """
    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck=True)
    # Match descriptors.
    matches = bf.match(des1,des2)

    # Sort them in the order of their distance.
    matches = sorted(matches, key = lambda x:x.distance)

    return matches 
Example #27
Source File: 08_compute_F_mat.py    From Practical-Computer-Vision with MIT License 5 votes vote down vote up
def brute_force_matcher(des1, des2):
    """
    Brute force matcher to match ORB feature descriptors
    """
    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck=True)
    # Match descriptors.
    matches = bf.match(des1,des2)

    # Sort them in the order of their distance.
    matches = sorted(matches, key = lambda x:x.distance)

    return matches 
Example #28
Source File: ftlib.py    From sea_ice_drift with GNU General Public License v3.0 5 votes vote down vote up
def _get_matches(descriptors1, descriptors2, matcher, norm, verbose):
    ''' Match keypoints using BFMatcher with cv2.NORM_HAMMING '''
    t0 = time.time()
    bf = matcher(norm)
    matches = bf.knnMatch(descriptors1, descriptors2, k=2)
    t1 = time.time()
    if verbose:
        print('Keypoints matched', t1 - t0)
    return matches 
Example #29
Source File: ftlib.py    From sea_ice_drift with GNU General Public License v3.0 5 votes vote down vote up
def get_match_coords(keyPoints1, descriptors1,
                                    keyPoints2, descriptors2,
                                    matcher=cv2.BFMatcher,
                                    norm=cv2.NORM_HAMMING,
                                    ratio_test=0.7,
                                    verbose=True,
                                    **kwargs):
    ''' Filter matching keypoints and convert to X,Y coordinates
    Parameters
    ----------
        keyPoints1 : list - keypoints on img1 from find_key_points()
        descriptors1 : list - descriptors on img1 from find_key_points()
        keyPoints2 : list - keypoints on img2 from find_key_points()
        descriptors2 : list - descriptors on img2 from find_key_points()
        matcher : matcher from CV2
        norm : int - type of distance
        ratio_test : float - Lowe ratio
        verbose : bool - print some output ?
    Returns
    -------
        x1, y1, x2, y2 : coordinates of start and end of displacement [pixels]
    '''
    matches = _get_matches(descriptors1,
                           descriptors2, matcher, norm, verbose)
    x1, y1, x2, y2 = _filter_matches(matches, ratio_test,
                                     keyPoints1, keyPoints2, verbose)
    return x1, y1, x2, y2 
Example #30
Source File: opencv_py.py    From python-urbanPlanning with MIT License 5 votes vote down vote up
def matchSift(imgA,imgB):   
    img1 = cv2.imread(imgA, 0) 
    img2 = cv2.imread(imgB, 0)  
    sift = cv2.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)  #获取SIFT关键点和描述子
    kp2, des2 = sift.detectAndCompute(img2, None)  
    bf = cv2.BFMatcher()  
    matches = bf.knnMatch(des1, des2, k=2)  #根据描述子匹配图像,返回n个最佳匹配
    """
    .   @param k Count of best matches found per each query descriptor or less if a query descriptor has less than k possible matches in total.
    The result of matches = bf.match(des1,des2) line is a list of DMatch objects. This DMatch object has following attributes:
    DMatch.distance - Distance between descriptors. The lower, the better it is.
    DMatch.trainIdx - Index of the descriptor in train descriptors
    DMatch.queryIdx - Index of the descriptor in query descriptors
    DMatch.imgIdx - Index of the train image.
    参看:https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
    """
    print(type(matches),matches[:2],(matches[0][0].distance,matches[0][1].distance))
    good = []  
    for m, n in matches:  
        if m.distance < 0.75 * n.distance:  #因为k=2,因此返回距离最近和次近关键点,比较最近和次近,满足最近/次近<value,才被认为匹配。ratio test explained by D.Lowe in his paper
            good.append([m])  
    
    imgM = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good[0:int(1*len(good)):int(0.1*len(good))], None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)  
    fig, ax=plt.subplots(figsize=(50,30))
    ax.imshow(imgM), plt.show()    
#    cv2.imshow('matchSift',imgM)
#    cv2.waitKey()