Python cv2.SURF Examples

The following are 7 code examples of cv2.SURF(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: keypoint_matching_contrib.py    From Airtest with Apache License 2.0 8 votes vote down vote up
def init_detector(self):
        """Init keypoint detector object."""
        # BRIEF is a feature descriptor, recommand CenSurE as a fast detector:
        if check_cv_version_is_new():
            # OpenCV3/4, surf is in contrib module, you need to compile it seperately.
            try:
                self.detector = cv2.xfeatures2d.SURF_create(self.HESSIAN_THRESHOLD, upright=self.UPRIGHT)
            except:
                import traceback
                traceback.print_exc()
                raise NoModuleError("There is no %s module in your OpenCV environment, need contribmodule!" % self.METHOD_NAME)
        else:
            # OpenCV2.x
            self.detector = cv2.SURF(self.HESSIAN_THRESHOLD, upright=self.UPRIGHT)

        # # create FlnnMatcher object:
        self.matcher = cv2.FlannBasedMatcher({'algorithm': self.FLANN_INDEX_KDTREE, 'trees': 5}, dict(checks=50)) 
Example #2
Source File: findobj.py    From airtest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #3
Source File: find_obj.py    From airtest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #4
Source File: find_obj.py    From ImageAnalysis with MIT License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(400)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #5
Source File: find_obj.py    From PyCV-time with MIT License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #6
Source File: servoing_designed_features_quad_panda3d_env.py    From citysim3d with MIT License 4 votes vote down vote up
def __init__(self, action_space, feature_type=None, filter_features=None,
                 max_time_steps=100, distance_threshold=4.0, **kwargs):
        """
        filter_features indicates whether to filter out key points that are not
        on the object in the current image. Key points in the target image are
        always filtered out.
        """
        SimpleQuadPanda3dEnv.__init__(self, action_space, **kwargs)
        ServoingEnv.__init__(self, env=self, max_time_steps=max_time_steps, distance_threshold=distance_threshold)

        lens = self.camera_node.node().getLens()
        self._observation_space.spaces['points'] = BoxSpace(np.array([-np.inf, lens.getNear(), -np.inf]),
                                                            np.array([np.inf, lens.getFar(), np.inf]))
        film_size = tuple(int(s) for s in lens.getFilmSize())
        self.mask_camera_sensor = Panda3dMaskCameraSensor(self.app, (self.skybox_node, self.city_node),
                                                          size=film_size,
                                                          near_far=(lens.getNear(), lens.getFar()),
                                                          hfov=lens.getFov())
        for cam in self.mask_camera_sensor.cam:
            cam.reparentTo(self.camera_sensor.cam)

        self.filter_features = True if filter_features is None else False
        self._feature_type = feature_type or 'sift'
        if cv2.__version__.split('.')[0] == '3':
            from cv2.xfeatures2d import SIFT_create, SURF_create
            from cv2 import ORB_create
            if self.feature_type == 'orb':
                # https://github.com/opencv/opencv/issues/6081
                cv2.ocl.setUseOpenCL(False)
        else:
            SIFT_create = cv2.SIFT
            SURF_create = cv2.SURF
            ORB_create = cv2.ORB
        if self.feature_type == 'sift':
            self._feature_extractor = SIFT_create()
        elif self.feature_type == 'surf':
            self._feature_extractor = SURF_create()
        elif self.feature_type == 'orb':
            self._feature_extractor = ORB_create()
        else:
            raise ValueError("Unknown feature extractor %s" % self.feature_type)
        if self.feature_type == 'orb':
            self._matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        else:
            self._matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
        self._target_key_points = None
        self._target_descriptors = None 
Example #7
Source File: adam_descriptors.py    From optimeyes with MIT License 4 votes vote down vote up
def main():
    opencv_haystack =cv2.imread('adam.jpg')
    opencv_needle = cv2.imread('adam_rightnostril.jpg')
    ngrey = cv2.cvtColor(opencv_needle, cv2.COLOR_BGR2GRAY)
    hgrey = cv2.cvtColor(opencv_haystack, cv2.COLOR_BGR2GRAY)
    import pdb
    pdb.set_trace()
    # build feature detector and descriptor extractor
    hessian_threshold = 175
    detector = cv2.SURF(hessian_threshold)
    (hkeypoints, hdescriptors) = detector.detect(hgrey, None, useProvidedKeypoints = False)
    (nkeypoints, ndescriptors) = detector.detect(ngrey, None, useProvidedKeypoints = False)

    # extract vectors of size 64 from raw descriptors numpy arrays
    rowsize = len(hdescriptors) / len(hkeypoints)
    if rowsize > 1:
        hrows = numpy.array(hdescriptors, dtype = numpy.float32).reshape((-1, rowsize))
        nrows = numpy.array(ndescriptors, dtype = numpy.float32).reshape((-1, rowsize))
        print "haystack rows shape", hrows.shape
        print "needle rows shape", nrows.shape
    else:
        print '*****************************************************8888'
        hrows = numpy.array(hdescriptors, dtype = numpy.float32)
        nrows = numpy.array(ndescriptors, dtype = numpy.float32)
        rowsize = len(hrows[0])

    # kNN training - learn mapping from hrow to hkeypoints index
    samples = hrows
    responses = numpy.arange(len(hkeypoints), dtype = numpy.float32)
    print "sample length", len(samples), "response length", len(responses)
    knn = cv2.KNearest()
    knn.train(samples,responses)

    # retrieve index and value through enumeration
    for i, descriptor in enumerate(nrows):
        descriptor = numpy.array(descriptor, dtype = numpy.float32).reshape((1, rowsize))
        print i, 'descriptor shape', descriptor.shape, 'sample shape', samples[0].shape
        retval, results, neigh_resp, dists = knn.find_nearest(descriptor, 1)
        res, dist =  int(results[0][0]), dists[0][0]
        print 'result', res, 'distance', dist

        if dist < 0.1:
            # draw matched keypoints in red color
            color = (0, 0, 255)
        else:
            # draw unmatched in blue color
            color = (255, 0, 0)
        # draw matched key points on haystack image
        x,y = hkeypoints[res].pt
        center = (int(x),int(y))
        cv2.circle(opencv_haystack,center,2,color,-1)
        # draw matched key points on needle image
        x,y = nkeypoints[i].pt
        center = (int(x),int(y))
        cv2.circle(opencv_needle,center,2,color,-1)

    cv2.imshow('haystack',opencv_haystack)
    cv2.imshow('needle',opencv_needle)
    cv2.waitKey(0)
    cv2.destroyAllWindows()