Python cv2.DescriptorExtractor_create() Examples

The following are 10 code examples of cv2.DescriptorExtractor_create(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: keypoint_matching_contrib.py    From Airtest with Apache License 2.0 6 votes vote down vote up
def init_detector(self):
        """Init keypoint detector object."""
        # BRIEF is a feature descriptor, recommand CenSurE as a fast detector:
        if check_cv_version_is_new():
            # OpenCV3/4, star/brief is in contrib module, you need to compile it seperately.
            try:
                self.star_detector = cv2.xfeatures2d.StarDetector_create()
                self.brief_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create()
            except:
                import traceback
                traceback.print_exc()
                print("to use %s, you should build contrib with opencv3.0" % self.METHOD_NAME)
                raise NoModuleError("There is no %s module in your OpenCV environment !" % self.METHOD_NAME)
        else:
            # OpenCV2.x
            self.star_detector = cv2.FeatureDetector_create("STAR")
            self.brief_extractor = cv2.DescriptorExtractor_create("BRIEF")

        # create BFMatcher object:
        self.matcher = cv2.BFMatcher(cv2.NORM_L1)  # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable) 
Example #2
Source File: stitcher.py    From dual-fisheye-video-stitching with MIT License 6 votes vote down vote up
def detectAndDescribe(self, image):
        # check to see if we are using OpenCV 3.X
        if int(cv2.__version__[0]) >= 3:
            # detect and extract features from the image
            descriptor = cv2.xfeatures2d.SIFT_create()
            (kps, features) = descriptor.detectAndCompute(image, None)

        # otherwise, we are using OpenCV 2.4.X
        else:
            # convert the image to grayscale
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

            # detect keypoints in the image
            detector = cv2.FeatureDetector_create("SIFT")
            kps = detector.detect(gray)

            # extract features from the image
            extractor = cv2.DescriptorExtractor_create("SIFT")
            (kps, features) = extractor.compute(gray, kps)

        # convert the keypoints from KeyPoint objects to NumPy arrays
        kps = np.float32([kp.pt for kp in kps])

        # return a tuple of keypoints and features
        return (kps, features) 
Example #3
Source File: factories.py    From imutils with MIT License 6 votes vote down vote up
def DescriptorExtractor_create(extractor, *args, **kw_args):
        """

        :param extractor: string of the type of descriptor extractor to return
        :param args: positional arguments for extractor
        :param kw_args: keyword arguments for extractor
        :return: the key extractor object
        """
        try:
            extr = _EXTRACTOR_FACTORY[extractor.upper()]
        except KeyError:
            if extractor.upper() in _CONTRIB_FUNCS:
                msg = "OpenCV needs to be compiled with opencv_contrib to support {}".format(extractor)
                raise AttributeError(msg)
            raise AttributeError("{} not a supported extractor".format(extractor))

        return extr(*args, **kw_args) 
Example #4
Source File: CVAnalysis_old.py    From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International 6 votes vote down vote up
def get_sift_descriptors (image, kpts):
	"""
		Function: get_sift_descriptor
		-----------------------------
		given an image and a list of keypoints, this returns
		(keypoints, descriptors), each a list
	"""
	sift_descriptor = cv2.DescriptorExtractor_create('SIFT')
	return sift_descriptor.compute (image, kpts)[1]







####################################################################################################
#################[ --- FINDING BOARD_IMAGE HOMOGRAPHY FROM POINTS CORRESPONDENCES --- ]#############
#################################################################################################### 
Example #5
Source File: CVAnalyzer.py    From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International 6 votes vote down vote up
def __init__ (self):
		"""
			PUBLIC: Constructor
			-------------------
			board_image: BoardImage object, the first frame
		"""
		#=====[ Step 1: set up feature extractors	]=====
		self.corner_detector = cv2.FeatureDetector_create ('HARRIS')
		self.sift_descriptor = cv2.DescriptorExtractor_create('SIFT')










	####################################################################################################
	##############################[ --- FIND BOARD CORNER CORRESPONDENCES --- ]#########################
	#################################################################################################### 
Example #6
Source File: factories.py    From imutils with MIT License 5 votes vote down vote up
def DescriptorExtractor_create(method):
        method = method.upper()
        if method == "ROOTSIFT":
            return RootSIFT()
        return cv2.DescriptorExtractor_create(method) 
Example #7
Source File: rootsift.py    From imutils with MIT License 5 votes vote down vote up
def __init__(self):
		# initialize the SIFT feature extractor for OpenCV 2.4
		if is_cv2():
			self.extractor = cv2.DescriptorExtractor_create("SIFT")

		# otherwise initialize the SIFT feature extractor for OpenCV 3+
		else:
			self.extractor = cv2.xfeatures2d.SIFT_create() 
Example #8
Source File: CVAnalysis.py    From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International 5 votes vote down vote up
def get_sift_descriptors (image, kpts):
	"""
		Function: get_sift_descriptors
		------------------------------
		given an image and a list of keypoints, this returns
		(keypoints, descriptors), each a list
	"""
	sift_descriptor = cv2.DescriptorExtractor_create('SIFT')
	return sift_descriptor.compute (image, kpts)[1] 
Example #9
Source File: extractor.py    From omgh with MIT License 5 votes vote down vote up
def __init__(self, storage):
        super(SIFT_SIFT_Extractor, self).__init__(storage)
        self.STORAGE_SUB_NAME = 'sift_sift'
        self.sub_folder = self.storage.get_sub_folder(
            self.STORAGE_SUPER_NAME, self.STORAGE_SUB_NAME)
        self.storage.ensure_dir(self.sub_folder)

        self._keypoint_detector = cv2.FeatureDetector_create("SIFT")
        self._keypoint_extractor = cv2.DescriptorExtractor_create("SIFT") 
Example #10
Source File: classify.py    From DoNotSnap with GNU General Public License v3.0 4 votes vote down vote up
def main(image_file):
    image = Image.open(image_file)
    if image is None:
        print 'Could not load image "%s"' % sys.argv[1]
        return

    image = np.array(image.convert('RGB'), dtype=np.uint8)
    image = image[:, :, ::-1].copy()

    winSize = (200, 200)
    stepSize = 32

    roi = extractRoi(image, winSize, stepSize)
    weight_map, mask_scale = next(roi)

    samples = [(rect, scale, cv2.cvtColor(window, cv2.COLOR_BGR2GRAY))
               for rect, scale, window in roi]

    X_test = [window for rect, scale, window in samples]
    coords = [(rect, scale) for rect, scale, window in samples]

    extractor = cv2.FeatureDetector_create('SURF')
    detector = cv2.DescriptorExtractor_create('SURF')

    affine = AffineInvariant(extractor, detector)

    saved = pickle.load(open('classifier.pkl', 'rb'))

    feature_transform = saved['pipe']
    model = saved['model']

    print 'Extracting Affine transform invariant features'
    affine_invariant_features = affine.transform(X_test)
    print 'Matching features with template'
    features = feature_transform.transform(affine_invariant_features)

    rects = classify(model, features, coords, weight_map, mask_scale)
    for (left, top, right, bottom) in non_max_suppression_fast(rects, 0.4):
        cv2.rectangle(image, (left, top), (right, bottom), (0, 0, 0), 10)
        cv2.rectangle(image, (left, top), (right, bottom), (32, 32, 255), 5)

    plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
    plt.show()