Python cv2.ORB_create() Examples

The following are 30 code examples of cv2.ORB_create(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: find_obj.py    From OpenCV-Python-Tutorial with MIT License 10 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.xfeatures2d.SIFT_create()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.xfeatures2d.SURF_create(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB_create(400)
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv2.AKAZE_create()
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv2.BRISK_create()
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #2
Source File: image.py    From ImageAnalysis with MIT License 6 votes vote down vote up
def make_detector(self):
        global detector
        
        detector_node = getNode('/config/detector', True)
        if detector_node.getString('detector') == 'SIFT':
            max_features = detector_node.getInt('sift_max_features')
            #detector = cv2.xfeatures2d.SIFT_create(nfeatures=max_features)
            detector = cv2.xfeatures2d.SIFT_create()
        elif detector_node.getString('detector') == 'SURF':
            threshold = detector_node.getFloat('surf_hessian_threshold')
            nOctaves = detector_node.getInt('surf_noctaves')
            detector = cv2.xfeatures2d.SURF_create(hessianThreshold=threshold, nOctaves=nOctaves)
        elif detector_node.getString('detector') == 'ORB':
            max_features = detector_node.getInt('orb_max_features')
            detector = cv2.ORB_create(max_features)
        elif detector_node.getString('detector') == 'Star':
            maxSize = detector_node.getInt('star_max_size')
            responseThreshold = detector_node.getInt('star_response_threshold')
            lineThresholdProjected = detector_node.getInt('star_line_threshold_projected')
            lineThresholdBinarized = detector_node.getInt('star_line_threshold_binarized')
            suppressNonmaxSize = detector_node.getInt('star_suppress_nonmax_size')
            detector = cv2.xfeatures2d.StarDetector_create(maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize) 
Example #3
Source File: 08_compute_F_mat.py    From Practical-Computer-Vision with MIT License 6 votes vote down vote up
def compute_orb_keypoints(filename):
    """
    Reads image from filename and computes ORB keypoints
    Returns image, keypoints and descriptors. 
    """
    # load image
    img = cv2.imread(filename)
    
    # create orb object
    orb = cv2.ORB_create()
    
    # set parameters 
    orb.setScoreType(cv2.FAST_FEATURE_DETECTOR_TYPE_9_16)
    orb.setWTA_K(3)
    
    # detect keypoints
    kp = orb.detect(img,None)

    # for detected keypoints compute descriptors. 
    kp, des = orb.compute(img, kp)
    
    return img,kp, des 
Example #4
Source File: 04_feature_match.py    From Practical-Computer-Vision with MIT License 6 votes vote down vote up
def compute_orb_keypoints(filename):
    """
    Takes in filename to read and computes ORB keypoints
    Returns image, keypoints and descriptors 
    """

    img = cv2.imread(filename)
    img = cv2.pyrDown(img)
    img = cv2.pyrDown(img)
    # img = cv2.pyrDown(img)
    # img = cv2.pyrDown(img)
    # create orb object
    orb = cv2.ORB_create()
    
    # set parameters 
    orb.setScoreType(cv2.FAST_FEATURE_DETECTOR_TYPE_9_16)
    orb.setWTA_K(3)
    
    kp = orb.detect(img,None)

    kp, des = orb.compute(img, kp)
    return img,kp,  des 
Example #5
Source File: 04_flann_feature_match.py    From Practical-Computer-Vision with MIT License 6 votes vote down vote up
def compute_orb_keypoints(filename):
    """
    Takes in filename to read and computes ORB keypoints
    Returns image, keypoints and descriptors 
    """

    img = cv2.imread(filename)
    # create orb object
    orb = cv2.ORB_create()
    
    # set parameters 
    orb.setScoreType(cv2.FAST_FEATURE_DETECTOR_TYPE_9_16)
    orb.setWTA_K(3)
    
    kp = orb.detect(img,None)

    kp, des = orb.compute(img, kp)
    return img,kp,  des 
Example #6
Source File: 04_orb_detections.py    From Practical-Computer-Vision with MIT License 6 votes vote down vote up
def compute_orb_keypoints(filename):
    """
    Reads image from filename and computes ORB keypoints
    Returns image, keypoints and descriptors. 
    """
    # load image
    img = cv2.imread(filename)
    
    # create orb object
    orb = cv2.ORB_create()
    
    # set parameters 
    orb.setScoreType(cv2.FAST_FEATURE_DETECTOR_TYPE_9_16)
    orb.setWTA_K(3)
    
    # detect keypoints
    kp = orb.detect(img,None)

    # for detected keypoints compute descriptors. 
    kp, des = orb.compute(img, kp)
    return img,kp, des 
Example #7
Source File: test_pickle_import_export.py    From douglas-quaid with GNU General Public License v3.0 6 votes vote down vote up
def test_pickle_import_export_ORB(self):
        algo = cv2.ORB_create(nfeatures=10)
        img = cv2.imread(str(self.test_file_path / "original.bmp"), 0)

        # compute the descriptors with ORB
        kp, des = algo.detectAndCompute(img, None)

        self.logger.debug(f"Keypoints : {kp}")
        self.logger.debug(f"Example of Keypoint")

        pickler = pickle_import_export.Pickler()
        self.logger.debug("Save to pickle ... ")
        pc = pickler.get_pickle_from_object(kp)
        self.logger.debug("Load from pickle ... ")
        kp2 = pickler.get_object_from_pickle(pc)

        for i, k in enumerate(kp):
            self.assertEqual(kp[i].response, kp2[i].response)
            self.assertEqual(kp[i].angle, kp2[i].angle)
            self.assertEqual(kp[i].class_id, kp2[i].class_id)
            self.assertEqual(kp[i].octave, kp2[i].octave)
            self.assertEqual(kp[i].pt, kp2[i].pt)
            self.assertEqual(kp[i].size, kp2[i].size) 
Example #8
Source File: picture_bow_orber.py    From douglas-quaid with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, fe_conf: feature_extractor_conf.Default_feature_extractor_conf):
        # STD attributes
        self.fe_conf: feature_extractor_conf.Default_feature_extractor_conf = fe_conf
        self.logger = logging.getLogger(__name__)
        self.logger.info("Creation of a Picture BoW Orber")

        self.algo = cv2.ORB_create(nfeatures=fe_conf.ORB_KEYPOINTS_NB)
        # TODO : Dictionnary path / Vocabulary
        self.bow_descriptor = cv2.BOWImgDescriptorExtractor(self.algo, cv2.BFMatcher(cv2.NORM_HAMMING))
        self.vocab_loaded = False
        try :
            vocab = BoWOrb_Vocabulary_Creator.load_vocab_from_file(fe_conf.BOW_VOCAB_PATH)
            self.bow_descriptor.setVocabulary(vocab)
            self.vocab_loaded = True
        except Exception as e :
            self.logger.error(f"No vocabulary file provided. Not possible to use Bow-ORB : {e}") 
Example #9
Source File: pose_flow.py    From detectron2-pipeline with MIT License 5 votes vote down vote up
def orb_matching(frame1, frame2, orb_features=10000):
    # Initiate ORB detector
    orb = cv2.ORB_create(nfeatures=orb_features, scoreType=cv2.ORB_FAST_SCORE)

    # find the keypoints and descriptors with ORB
    kp1, des1 = orb.detectAndCompute(frame1, None)
    kp2, des2 = orb.detectAndCompute(frame2, None)

    return flann_matching((kp1, des1), (kp2, des2))


# stack all already tracked people's info together(thanks @ZongweiZhou1) 
Example #10
Source File: plane_tracker.py    From OpenCV-Python-Tutorial with MIT License 5 votes vote down vote up
def __init__(self):
        self.detector = cv2.ORB_create( nfeatures = 1000 )
        self.matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
        self.targets = []
        self.frame_points = [] 
Example #11
Source File: pose_tracker.py    From detectron2-pipeline with MIT License 5 votes vote down vote up
def __init__(self, link_len=100, num=7, mag=30, match=0.2, orb_features=1000):
        self.frame_tracks = []
        self.last_pid = 0
        self.link_len = link_len
        self.num = num
        self.mag = mag
        self.match = match
        self.orb_features = orb_features
        self.orb = cv2.ORB_create(nfeatures=orb_features, scoreType=cv2.ORB_FAST_SCORE) 
Example #12
Source File: baselines.py    From sips2_open with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, type_string):
        if type_string == 'surf':
            self.detector = cv2.xfeatures2d.SURF_create()
        elif type_string == 'sift':
            self.detector = cv2.xfeatures2d.SIFT_create()
        elif type_string == 'orb':
            self.detector = cv2.ORB_create()
        else:
            assert False 
Example #13
Source File: featureDetection.py    From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def main():
    org_image = cv2.imread("../data/house.tiff", 1)
    '''
    SURF is better than SIFT and computes and detects feature fast, 
    but unfortunately both are paid.

    Alternative, we have ORB by OpenCV. Free. OSS.
    PARAM: nfeatures : Number of features to be detected.
                       Default value is around 100.
    '''

    sift = cv2.xfeatures2d.SIFT_create()
    surf = cv2.xfeatures2d.SURF_create()
    orb = cv2.ORB_create(nfeatures=1000)

    kp_sift, decep_sift = sift.detectAndCompute(org_image, None)
    kp_surf, decep_sift = surf.detectAndCompute(org_image, None)
    kp_orb, decep_sift = orb.detectAndCompute(org_image, None)

    org_image_sift = cv2.drawKeypoints(org_image, kp_sift, None)
    org_image_surf = cv2.drawKeypoints(org_image, kp_surf, None)
    org_image_orb = cv2.drawKeypoints(org_image, kp_orb, None)

    cv2.imshow("SIFT Features Detected", org_image_sift)
    cv2.imshow("SURF Features Detected", org_image_surf)
    cv2.imshow("ORB Features Detected", org_image_orb)

    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example #14
Source File: tools.py    From BlindWatermark with GNU General Public License v3.0 5 votes vote down vote up
def run(self):
        img = cv2.imread(self.ori_img)
        img2 = cv2.imread(self.attacked_img)

        height = img.shape[0]
        width  = img.shape[1]
        # Initiate SIFT detector
        orb = cv2.ORB_create(128)
        MIN_MATCH_COUNT=10
        # find the keypoints and descriptors with SIFT
        kp1, des1 = orb.detectAndCompute(img,None)
        kp2, des2 = orb.detectAndCompute(img2,None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        search_params = dict(checks = 50)

        flann = cv2.FlannBasedMatcher(index_params, search_params)



        des1 = np.float32(des1)
        des2 = np.float32(des2)

        matches = flann.knnMatch(des1,des2,k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m,n in matches:
            if m.distance < self.rate*n.distance:
                good.append(m)

        if len(good)>MIN_MATCH_COUNT:
            src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
            dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
            M, mask = cv2.findHomography( dst_pts,src_pts, cv2.RANSAC,5.0)
            out = cv2.warpPerspective(img2, M, (width,height)) #先列后行
            cv2.imwrite(self.outfile_name,out)
            self.num_of_good.emit(len(good),self.outfile_name)
        else :
            self.num_of_good.emit(0,'') 
Example #15
Source File: tools.py    From BlindWatermark with GNU General Public License v3.0 5 votes vote down vote up
def recovery(ori_img,attacked_img,outfile_name = './recoveried.png',rate=0.7):
    img = cv2.imread(ori_img)
    img2 = cv2.imread(attacked_img)

    height = img.shape[0]
    width  = img.shape[1]
    # Initiate SIFT detector
    orb = cv2.ORB_create(128)
    MIN_MATCH_COUNT=10
    # find the keypoints and descriptors with SIFT
    kp1, des1 = orb.detectAndCompute(img,None)
    kp2, des2 = orb.detectAndCompute(img2,None)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    flann = cv2.FlannBasedMatcher(index_params, search_params)



    des1 = np.float32(des1)
    des2 = np.float32(des2)

    matches = flann.knnMatch(des1,des2,k=2)

    # store all the good matches as per Lowe's ratio test.
    good = []
    for m,n in matches:
        if m.distance < rate*n.distance:
            good.append(m)

    if len(good)>MIN_MATCH_COUNT:
        src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
        M, mask = cv2.findHomography( dst_pts,src_pts, cv2.RANSAC,5.0)
        out = cv2.warpPerspective(img2, M, (width,height)) #先列后行
        cv2.imwrite(outfile_name,out) 
Example #16
Source File: thesis.py    From Image-stitcher with MIT License 5 votes vote down vote up
def time_of_detector():
    import time
    image = [3, 19, 20]
    for name in image:
        image = cv2.imread(("../resource/{}-left.jpg".format(name)))
        for method in (cv2.xfeatures2d.SURF_create, cv2.xfeatures2d.SIFT_create, cv2.ORB_create):

            start = time.time()
            for i in range(10):
                method().detectAndCompute(image, None)
            print("Image {}, using {}".format(name, method))
            print("Spend time: ", time.time() - start) 
Example #17
Source File: ImageUtility.py    From ImageStitch with MIT License 5 votes vote down vote up
def detectAndDescribe(self, image, featureMethod):
        '''
    	功能:计算图像的特征点集合,并返回该点集&描述特征
    	:param image:需要分析的图像
    	:return:返回特征点集,及对应的描述特征(kps, features)
    	'''
        if self.isGPUAvailable == False: # CPU mode
            if featureMethod == "sift":
                descriptor = cv2.xfeatures2d.SIFT_create()
            elif featureMethod == "surf":
                descriptor = cv2.xfeatures2d.SURF_create()
            elif featureMethod == "orb":
                descriptor = cv2.ORB_create(self.orbNfeatures, self.orbScaleFactor, self.orbNlevels, self.orbEdgeThreshold, self.orbFirstLevel, self.orbWTA_K, 0, self.orbPatchSize, self.orbFastThreshold)
            # 检测SIFT特征点,并计算描述子
            kps, features = descriptor.detectAndCompute(image, None)
            # 将结果转换成NumPy数组
            kps = np.float32([kp.pt for kp in kps])
        else:                           # GPU mode
            if featureMethod == "sift":
                # 目前GPU-SIFT尚未开发,先采用CPU版本的替代
                descriptor = cv2.xfeatures2d.SIFT_create()
                kps, features = descriptor.detectAndCompute(image, None)
                kps = np.float32([kp.pt for kp in kps])
            elif featureMethod == "surf":
                kps, features = self.npToKpsAndDescriptors(myGpuFeatures.detectAndDescribeBySurf(image, self.surfHessianThreshold, self.surfNOctaves,self.surfNOctaveLayers, self.surfIsExtended, self.surfKeypointsRatio, self.surfIsUpright))
            elif featureMethod == "orb":
                kps, features = self.npToKpsAndDescriptors(myGpuFeatures.detectAndDescribeByOrb(image, self.orbNfeatures, self.orbScaleFactor, self.orbNlevels, self.orbEdgeThreshold, self.orbFirstLevel, self.orbWTA_K, 0, self.orbPatchSize, self.orbFastThreshold, self.orbBlurForDescriptor))
        # 返回特征点集,及对应的描述特征
        return (kps, features) 
Example #18
Source File: voc_obj_similarity.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def ORB_img_similarity(img1_path,img2_path):
    """
    :param img1_path: 图片1路径
    :param img2_path: 图片2路径
    :return: 图片相似度
    """
    try:
        # 读取图片
        img1 = cv2.imread(img1_path)
        img2 = cv2.imread(img2_path)

        # 初始化ORB检测器
        orb = cv2.ORB_create()
        kp1, des1 = orb.detectAndCompute(img1, None)
        kp2, des2 = orb.detectAndCompute(img2, None)

        # 提取并计算特征点
        bf = cv2.BFMatcher(cv2.NORM_HAMMING)
        # knn筛选结果
        matches = bf.knnMatch(des1, trainDescriptors=des2, k=2)

        # 查看最大匹配点数目
        good = [m for (m, n) in matches if m.distance < 0.75 * n.distance]
        similary = len(good) / len(matches)
        return similary

    except:
        return 0


# 计算图片的局部哈希值--pHash 
Example #19
Source File: picture_orber.py    From douglas-quaid with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, fe_conf: feature_extractor_conf.Default_feature_extractor_conf):
        # STD attributes
        self.fe_conf : feature_extractor_conf.Default_feature_extractor_conf = fe_conf
        self.logger = logging.getLogger(__name__)
        self.logger.info("Creation of a Picture Orber")
        self.algo = cv2.ORB_create(nfeatures=fe_conf.ORB_KEYPOINTS_NB) 
Example #20
Source File: frame.py    From twitchslam with MIT License 5 votes vote down vote up
def extractFeatures(img):
  orb = cv2.ORB_create()
  # detection
  pts = cv2.goodFeaturesToTrack(np.mean(img, axis=2).astype(np.uint8), 3000, qualityLevel=0.01, minDistance=7)

  # extraction
  kps = [cv2.KeyPoint(x=f[0][0], y=f[0][1], _size=20) for f in pts]
  kps, des = orb.compute(img, kps)

  # return pts and des
  return np.array([(kp.pt[0], kp.pt[1]) for kp in kps]), des 
Example #21
Source File: hook.py    From stagesepx with MIT License 5 votes vote down vote up
def __init__(self, *_, **__):
        super().__init__(*_, **__)
        self._orb = cv2.ORB_create() 
Example #22
Source File: ftlib.py    From sea_ice_drift with GNU General Public License v3.0 5 votes vote down vote up
def find_key_points(image,
                    edgeThreshold=34,
                    nFeatures=100000,
                    nLevels=7,
                    patchSize=34,
                    **kwargs):
    ''' Initiate detector and find key points on an image
    Parameters
    ----------
        image : 2D UInt8 Numpy array - image
        edgeThreshold : int - parameter for OpenCV detector
        nFeatures : int - parameter for OpenCV detector
        nLevels : int - parameter for OpenCV detector
        patchSize : int - parameter for OpenCV detector
    Returns
    -------
        keyPoints : list - coordinates of keypoint on image
        descriptors : list - binary descriptos of kepoints
    '''
    if cv2.__version__.startswith('3.') or cv2.__version__.startswith('4.'):
        detector = cv2.ORB_create()
        detector.setEdgeThreshold(edgeThreshold)
        detector.setMaxFeatures(nFeatures)
        detector.setNLevels(nLevels)
        detector.setPatchSize(patchSize)
    else:
        detector = cv2.ORB()
        detector.setInt('edgeThreshold', edgeThreshold)
        detector.setInt('nFeatures', nFeatures)
        detector.setInt('nLevels', nLevels)
        detector.setInt('patchSize', patchSize)
    print('ORB detector initiated')

    keyPoints, descriptors = detector.detectAndCompute(image, None)
    print('Key points found: %d' % len(keyPoints))
    return keyPoints, descriptors 
Example #23
Source File: keypoint_matching.py    From Airtest with Apache License 2.0 5 votes vote down vote up
def init_detector(self):
        """Init keypoint detector object."""
        self.detector = cv2.ORB_create()
        # create BFMatcher object:
        self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING)  # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable) 
Example #24
Source File: device.py    From fitch with MIT License 5 votes vote down vote up
def get_interest_point_list(self, *args, **kwargs) -> typing.List[cv2.KeyPoint]:
        """ find key points with ORB engine """
        p = self.screen_shot_to_object()
        orb = cv2.ORB_create(*args, **kwargs)
        return orb.detect(p, None) 
Example #25
Source File: test_db_adder.py    From douglas-quaid with GNU General Public License v3.0 5 votes vote down vote up
def get_descriptors(self, filename="original.bmp"):
        self.algo = cv2.ORB_create(nfeatures=10)
        orb_pic = cv2.imread(str(self.test_file_path / filename), 0)

        key_points, descriptors = self.algo.detectAndCompute(orb_pic, None)

        return key_points, descriptors 
Example #26
Source File: bow_orb_vocabulary_creator.py    From douglas-quaid with GNU General Public License v3.0 5 votes vote down vote up
def create_dict_from_folder(self, in_folder: pathlib.Path,
                                out_folder: pathlib.Path,
                                vocab_size: int,
                                nb_keypoints: int):
        # Extract the list of pictures to process
        files_list = self.get_file_list_from_folder(in_folder)

        # Define the nb of elements in the vocabulary we want
        # TODO : Define as a function of the number of pictures ?
        bow_trainer = cv2.BOWKMeansTrainer(vocab_size)
        algo = cv2.ORB_create(nfeatures=nb_keypoints)

        descriptors = None
        nb_corr_pictures = 0
        for curr_img_path in files_list:
            orb_pic = cv2.imread(str(curr_img_path))
            # arr = np.asarray(bytearray(orb_pic), dtype=np.uint8)
            # orb_pic = cv2.imdecode(arr, -1)
            # print(f"Picture converted to CV2 UMAT {type(orb_pic)}")

            key_points, descriptors = algo.detectAndCompute(orb_pic, None)

            if descriptors is not None:
                bow_trainer.add(np.float32(descriptors))
                nb_corr_pictures += 1
                if nb_corr_pictures % 20 == 0:
                    print(f"Working on pictures {nb_corr_pictures} out of {len(files_list)}")
            else:
                print(f"Descriptors not usables for pictures : {curr_img_path} are {descriptors}")

        print(f"Nb Pictures processed : {nb_corr_pictures}")
        print(f"Nb Pictures discarded : {len(files_list) - nb_corr_pictures}")

        vocab = bow_trainer.cluster().astype(descriptors.dtype)
        self.save_vocab_to_file(vocab, out_folder)

        return vocab 
Example #27
Source File: params.py    From stereo_ptam with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, config='GFTT-BRIEF'):
        super().__init__()

        if config == 'GFTT-BRIEF':
            self.feature_detector = cv2.GFTTDetector_create(
                maxCorners=1000, minDistance=15.0, 
                qualityLevel=0.001, useHarrisDetector=False)

            self.descriptor_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create(
                bytes=32, use_orientation=False)

        elif config == 'ORB-BRIEF':
            self.feature_detector = cv2.ORB_create(
                nfeatures=200, scaleFactor=1.2, nlevels=1, edgeThreshold=31)

            self.descriptor_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create(
                bytes=32, use_orientation=False)
            
        else:
            raise NotImplementedError

        self.descriptor_matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)

        self.matching_cell_size = 15   # pixels
        self.matching_neighborhood = 2
        self.matching_distance = 25

        self.frustum_near = 0.1  # meters
        self.frustum_far = 50.0

        self.lc_max_inbetween_distance = 4   # meters
        self.lc_distance_threshold = 1.5
        self.lc_embedding_distance = 22.0

        self.view_image_width = 400
        self.view_image_height = 250
        self.view_camera_width = 0.1
        self.view_viewpoint_x = 0
        self.view_viewpoint_y = -1
        self.view_viewpoint_z = -10
        self.view_viewpoint_f = 2000 
Example #28
Source File: feature_match.py    From dual-fisheye-video-stitching with MIT License 5 votes vote down vote up
def BFMatch_ORB(img1, img2):
    # Initiate SIFT detector
    orb = cv2.ORB_create()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = orb.detectAndCompute(img1, None)
    kp2, des2 = orb.detectAndCompute(img2, None)
    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    # Match descriptors.
    matches = bf.match(des1, des2)
    # Sort them in the order of their distance.
    matches = sorted(matches, key=lambda x: x.distance)

    return (kp1, kp2, matches) 
Example #29
Source File: statistics.py    From dual-fisheye-video-stitching with MIT License 5 votes vote down vote up
def BFMatch_ORB(img1, img2):
    # Initiate SIFT detector
    orb = cv2.ORB_create()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = orb.detectAndCompute(img1, None)
    kp2, des2 = orb.detectAndCompute(img2, None)
    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    # Match descriptors.
    matches = bf.match(des1, des2)
    # Sort them in the order of their distance.
    matches = sorted(matches, key=lambda x: x.distance)

    return (kp1, kp2, matches) 
Example #30
Source File: baselines.py    From imips_open with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, type_string):
        if type_string == 'surf':
            self.detector = cv2.xfeatures2d.SURF_create()
        elif type_string == 'sift':
            self.detector = cv2.xfeatures2d.SIFT_create()
        elif type_string == 'orb':
            self.detector = cv2.ORB_create()
        else:
            assert False