Python cv2.drawMatchesKnn() Examples

The following are 8 code examples of cv2.drawMatchesKnn(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: frame_matching.py    From hfnet with MIT License 7 votes vote down vote up
def baseline_sift_matching(img1, img2):
    sift = cv2.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)
    matches = cv2.BFMatcher().knnMatch(des1, des2, k=2)

    good = [[m] for m, n in matches if m.distance < 0.7*n.distance]
    img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None,
                              matchColor=(0, 255, 0), matchesMask=None,
                              singlePointColor=(255, 0, 0), flags=0)
    return img3 
Example #2
Source File: model.py    From ad-versarial with MIT License 5 votes vote down vote up
def sift_pred(cv2_sift, bf, query_kp, query_des, patch,
              patch_kp=None, patch_des=None,
              template_img=None, draw_matches=False, ratio=0.6, fp=False):

    if patch_kp is None or patch_des is None:
        patch_kp, patch_des = get_keypoints(cv2_sift, patch)

    if patch_des is None:
        match_list = []
    else:
        match_list = bf.knnMatch(query_des, patch_des, k=2)
        match_list = [m for m in match_list if len(m) == 2]

    # Apply ratio test
    good = []
    score = 0.0
    for m, n in match_list:
        if m.distance < ratio * n.distance:
            good.append([m])
            if not fp:
                score += n.distance / np.maximum(m.distance, 0.01)
        else:
            if fp:
                score += np.sqrt((m.distance / n.distance - ratio))

    if draw_matches:
        template_img = resize(template_img.copy())
        if has_alpha(template_img):
            template_img = blend_white(template_img)
        if has_alpha(patch):
            patch = blend_white(patch)

        drawn_matches = cv2.drawMatchesKnn(template_img,
                                           query_kp,
                                           resize(patch),
                                           patch_kp,
                                           good, None, flags=2)

        return score, len(good), drawn_matches

    return score, len(good) 
Example #3
Source File: image_proc.py    From onmyoji_bot with GNU General Public License v3.0 5 votes vote down vote up
def match_img_knn(queryImage, trainingImage, thread=0):
    sift = cv2.xfeatures2d.SIFT_create()  # 创建sift检测器
    kp1, des1 = sift.detectAndCompute(queryImage, None)
    kp2, des2 = sift.detectAndCompute(trainingImage, None)
    #print(len(kp1))
    # 设置Flannde参数
    FLANN_INDEX_KDTREE = 1
    indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    searchParams = dict(checks=50)
    flann = cv2.FlannBasedMatcher(indexParams, searchParams)
    matches = flann.knnMatch(des1, des2, k=2)

    good = []

    # 设置好初始匹配值
    matchesMask = [[0, 0] for i in range(len(matches))]
    for i, (m, n) in enumerate(matches):
        if m.distance < 0.7*n.distance:  # 舍弃小于0.7的匹配结果
            matchesMask[i] = [1, 0]
            good.append(m)

    s = sorted(good, key=lambda x: x.distance)
    '''
    drawParams=dict(matchColor=(0,0,255),singlePointColor=(255,0,0),matchesMask=matchesMask,flags=0) #给特征点和匹配的线定义颜色
    resultimage=cv2.drawMatchesKnn(queryImage,kp1,trainingImage,kp2,matches,None,**drawParams) #画出匹配的结果
    cv2.imshow('res',resultimage)
    cv2.waitKey(0)
    '''
    #print(len(good))
    if len(good) > thread:
        maxLoc = kp2[s[0].trainIdx].pt
        #print(maxLoc)
        return (int(maxLoc[0]), int(maxLoc[1]))
    else:
        return (0, 0) 
Example #4
Source File: frame_matching.py    From hfnet with MIT License 5 votes vote down vote up
def debug_matching(frame1, frame2, path_image1, path_image2, matches,
                   matches_mask, num_points, use_ratio_test):
    img1 = cv2.imread(path_image1, 0)
    img2 = cv2.imread(path_image2, 0)

    kp1 = get_ocv_kpts_from_np(frame1['keypoints'][:num_points, :])
    kp2 = get_ocv_kpts_from_np(frame2['keypoints'][:num_points, :])

    if use_ratio_test:
        img = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None,
                                 matchColor=(0, 255, 0),
                                 matchesMask=matches_mask,
                                 singlePointColor=(255, 0, 0), flags=0)
    else:
        img = cv2.drawMatches(img1, kp1, img2, kp2, matches, None,
                              matchColor=(0, 255, 0),
                              singlePointColor=(255, 0, 0), flags=0)

    img_sift = baseline_sift_matching(img1, img2)

    fig = plt.figure(figsize=(2, 1))
    fig.add_subplot(2, 1, 1)
    plt.imshow(img)
    plt.title('Custom features')
    fig.add_subplot(2, 1, 2)
    plt.imshow(img_sift)
    plt.title('SIFT')
    plt.show() 
Example #5
Source File: opencv_py.py    From python-urbanPlanning with MIT License 5 votes vote down vote up
def matchSift(imgA,imgB):   
    img1 = cv2.imread(imgA, 0) 
    img2 = cv2.imread(imgB, 0)  
    sift = cv2.xfeatures2d.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)  #获取SIFT关键点和描述子
    kp2, des2 = sift.detectAndCompute(img2, None)  
    bf = cv2.BFMatcher()  
    matches = bf.knnMatch(des1, des2, k=2)  #根据描述子匹配图像,返回n个最佳匹配
    """
    .   @param k Count of best matches found per each query descriptor or less if a query descriptor has less than k possible matches in total.
    The result of matches = bf.match(des1,des2) line is a list of DMatch objects. This DMatch object has following attributes:
    DMatch.distance - Distance between descriptors. The lower, the better it is.
    DMatch.trainIdx - Index of the descriptor in train descriptors
    DMatch.queryIdx - Index of the descriptor in query descriptors
    DMatch.imgIdx - Index of the train image.
    参看:https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
    """
    print(type(matches),matches[:2],(matches[0][0].distance,matches[0][1].distance))
    good = []  
    for m, n in matches:  
        if m.distance < 0.75 * n.distance:  #因为k=2,因此返回距离最近和次近关键点,比较最近和次近,满足最近/次近<value,才被认为匹配。ratio test explained by D.Lowe in his paper
            good.append([m])  
    
    imgM = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good[0:int(1*len(good)):int(0.1*len(good))], None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)  
    fig, ax=plt.subplots(figsize=(50,30))
    ax.imshow(imgM), plt.show()    
#    cv2.imshow('matchSift',imgM)
#    cv2.waitKey() 
Example #6
Source File: trainer_matches.py    From Yugioh-bot with MIT License 5 votes vote down vote up
def get_matches(self, train, corr):
        train_img = cv2.imread(train, 0)
        query_img = self.query
        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(train_img, None)
        kp2, des2 = sift.detectAndCompute(query_img, None)
        if des1 is None or des2 is None:
            return False
        # create BFMatcher object
        bf = cv2.BFMatcher()
        try:
            matches = bf.knnMatch(des1, des2, k=2)
        except cv2.error:
            return False
        good_matches = []
        cluster = []
        for m, n in matches:
            img2_idx = m.trainIdx
            img1_idx = m.queryIdx
            (x1, y1) = kp1[img1_idx].pt
            (x2, y2) = kp2[img2_idx].pt
            # print("Comare %d to %d and %d to %d" % (x1,x2,y1,y2))
            if m.distance < 0.8 * n.distance and y2 > self.yThreshold and x2 < self.xThreshold:
                good_matches.append([m])
                cluster.append([int(x2), int(y2)])
        if len(cluster) <= corr:
            return False
        self.kmeans = KMeans(n_clusters=1, random_state=0).fit(cluster)
        new_cluster, new_matches = self.compare_distances(train_img, cluster, good_matches)
        if len(new_cluster) == 0 or len(new_cluster) / len(cluster) < .5:
            return False
        img3 = cv2.drawMatchesKnn(
            train_img, kp1, query_img, kp2, new_matches, None, flags=2)
        if self._debug:
            self.images.append(img3)
            self.debug_matcher(img3)
        return True 
Example #7
Source File: trainer_matches.py    From Yugioh-bot with MIT License 5 votes vote down vote up
def get_matches(self, train, corr):
        train_img = cv2.imread(train, 0)
        query_img = self.query
        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()

        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(train_img, None)
        kp2, des2 = sift.detectAndCompute(query_img, None)
        if des1 is None or des2 is None:
            return False
        # create BFMatcher object
        bf = cv2.BFMatcher()
        try:
            matches = bf.knnMatch(des1, des2, k=2)
        except cv2.error:
            return False
        good_matches = []
        cluster = []
        for m, n in matches:
            img2_idx = m.trainIdx
            img1_idx = m.queryIdx
            (x1, y1) = kp1[img1_idx].pt
            (x2, y2) = kp2[img2_idx].pt
            # print("Comare %d to %d and %d to %d" % (x1,x2,y1,y2))
            if m.distance < 0.8 * n.distance and self.in_box(x2, y2):
                good_matches.append([m])
                cluster.append([int(x2), int(y2)])
        if len(cluster) <= corr:
            return False
        self.kmeans = KMeans(n_clusters=1, random_state=0).fit(cluster)
        new_cluster, new_matches = self.compare_distances(train_img, cluster, good_matches)
        if len(new_cluster) == 0 or len(new_cluster) / len(cluster) < .5:
            return False
        img3 = cv2.drawMatchesKnn(
            train_img, kp1, query_img, kp2, new_matches, None, flags=2)
        if self._debug:
            self.images.append(img3)
            self.debug_matcher(img3)
        return True 
Example #8
Source File: feature.py    From findit with MIT License 4 votes vote down vote up
def get_feature_point_list(
        self, template_pic_object: np.ndarray, target_pic_object: np.ndarray
    ) -> typing.Sequence[Point]:
        """
        compare via feature matching

        :param template_pic_object:
        :param target_pic_object:
        :return:
        """
        # IMPORTANT
        # sift and surf can not be used in python >= 3.8
        # so we switch it to ORB detector
        # maybe not enough precisely now

        # Initiate ORB detector
        orb = cv2.ORB_create()

        # find the keypoints and descriptors with ORB
        template_kp, template_desc = orb.detectAndCompute(template_pic_object, None)
        target_kp, target_desc = orb.detectAndCompute(target_pic_object, None)

        # key points count
        logger.debug(f"template key point count: {len(template_kp)}")
        logger.debug(f"target key point count: {len(target_kp)}")

        # find 2 points, which are the closest
        # 找到帧和帧之间的一致性的过程就是在一个描述符集合(询问集)中找另一个集合(相当于训练集)的最近邻。 这里找到 每个描述符 的 最近邻与次近邻
        # 一个正确的匹配会更接近第一个邻居。换句话说,一个不正确的匹配,两个邻居的距离是相似的。因此,我们可以通过查看二者距离的不同来评判距匹配程度的好坏。
        # more details: https://blog.csdn.net/liangjiubujiu/article/details/80418079
        # flann = cv2.FlannBasedMatcher()
        # matches = flann.knnMatch(template_desc, target_desc, k=2)

        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        # 特征描述子匹配
        matches = bf.knnMatch(template_desc, target_desc, k=1)

        # matches are something like:
        # [[<DMatch 0x12400a350>, <DMatch 0x12400a430>], [<DMatch 0x124d6a170>, <DMatch 0x124d6a450>]]

        logger.debug(f"matches num: {len(matches)}")

        # TODO here is a sample to show feature points
        # temp = cv2.drawMatchesKnn(template_pic_object, kp1, target_pic_object, kp2, matches, None, flags=2)
        # cv2.imshow('feature_points', temp)
        # cv2.waitKey(0)

        good = list()
        if matches:
            good = matches[0]

        # get positions
        point_list = list()
        for each in good:
            target_idx = each.trainIdx
            each_point = Point(*target_kp[target_idx].pt)
            point_list.append(each_point)

        return point_list