Python cv2.FeatureDetector_create() Examples

The following are code examples for showing how to use cv2.FeatureDetector_create(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don't like. You can also save this page to your account.

Example 1
Project: pybot   Author: spillai   File: feature_detection.py    (license) View Source Project 6 votes vote down vote up
def get_dense_detector(step=4, levels=7, scale=np.sqrt(2)): 
    """
    Standalone dense detector instantiation
    """
    detector = cv2.FeatureDetector_create('Dense')
    detector.setInt('initXyStep', step)
    # detector.setDouble('initFeatureScale', 0.5)

    detector.setDouble('featureScaleMul', scale)
    detector.setInt('featureScaleLevels', levels)

    detector.setBool('varyImgBoundWithScale', True)
    detector.setBool('varyXyStepWithScale', False)

    # detector = cv2.PyramidAdaptedFeatureDetector(detector, maxLevel=4)
    return detector 
Example 2
Project: pybot   Author: spillai   File: feature_detection.py    (license) View Source Project 5 votes vote down vote up
def get_detector(detector='dense', step=4, levels=7, scale=np.sqrt(2)): 
    """ Get opencv dense-sampler or specific feature detector """
    if detector == 'dense': 
        return get_dense_detector(step=step, levels=levels, scale=scale)
    else: 
        detector = cv2.FeatureDetector_create(detector)
        return cv2.PyramidAdaptedFeatureDetector(detector, maxLevel=levels) 
Example 3
Project: code   Author: ActiveState   File: recipe-578261.py    (MIT License) View Source Project 5 votes vote down vote up
def test_feature_detector(detector, imfname):
    image = cv2.imread(imfname)
    forb = cv2.FeatureDetector_create(detector)
    # Detect crashes program if image is not greyscale
    t1 = time.time()
    kpts = forb.detect(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
    t2 = time.time()
    print detector, 'number of KeyPoint objects', len(kpts), '(time', t2-t1, ')'

    return kpts 
Example 4
Project: Automatic_Group_Photography_Enhancement   Author: Yuliang-Zou   File: facegroup.py    (license) View Source Project 5 votes vote down vote up
def obtainSimilarityScore(img1,img2):
    detector = cv2.FeatureDetector_create("SIFT")
    descriptor = cv2.DescriptorExtractor_create("SIFT")
    skp = detector.detect(img1)
    skp, sd = descriptor.compute(img1, skp)
    tkp = detector.detect(img2)
    tkp, td = descriptor.compute(img2, tkp)
    num1 = 0
    for i in range(len(sd)):
          kp_value_min = np.inf
          kp_value_2min = np.inf
          for j in range(len(td)):
               kp_value = 0
               for k in range(128):
                     kp_value = (sd[i][k]-td[j][k]) *(sd[i][k]-td[j][k]) + kp_value
               if kp_value < kp_value_min:
                     kp_value_2min = kp_value_min
                     kp_value_min = kp_value
          if kp_value_min < 0.8*kp_value_2min:
               num1 = num1+1     
    num2 = 0
    for i in range(len(td)):
          kp_value_min = np.inf
          kp_value_2min = np.inf
          for j in range(len(sd)):
               kp_value = 0
               for k in range(128):
                     kp_value = (td[i][k]-sd[j][k]) *(td[i][k]-sd[j][k]) + kp_value
               if kp_value < kp_value_min:
                     kp_value_2min = kp_value_min
                     kp_value_min = kp_value
          if kp_value_min < 0.8*kp_value_2min:
               num2 = num2+1
    K1 = num1*1.0/len(skp)
    K2 = num2*1.0/len(tkp)
    SimilarityScore  = 100*(K1+K2)*1.0/2    
    return SimilarityScore 
Example 5
Project: papacamera   Author: 340StarObserver   File: feature.py    (license) View Source Project 5 votes vote down vote up
def calculate_feature(bin_data):
	"""
	calculate the feature data of an image

	parameter :
		'bin_data' is the binary stream format of an image
	return value :
		a tuple of ( keypoints, descriptors, (height,width) )
		keypoints is like [ pt1, pt2, pt3, ... ]
		descriptors is a numpy array
	"""
	buff=numpy.frombuffer(bin_data,numpy.uint8)
	img_obj=cv2.imdecode(buff,cv2.CV_LOAD_IMAGE_GRAYSCALE)
	surf=cv2.FeatureDetector_create("SURF")
	surf.setInt("hessianThreshold",400)
	surf_extractor=cv2.DescriptorExtractor_create("SURF")
	keypoints=surf.detect(img_obj,None)
	keypoints,descriptors=surf_extractor.compute(img_obj,keypoints)
	res_keypoints=[]
	for point in keypoints:
		res_keypoints.append(point.pt)
	del buff
	del surf
	del surf_extractor
	del keypoints
	return res_keypoints,numpy.array(descriptors),img_obj.shape 
Example 6
Project: wi_wacv14   Author: VChristlein   File: feat_ex.py    (license) View Source Project 5 votes vote down vote up
def __init__(self, detector_name, feat_type):
        self.feat_type = feat_type        
        self.detector = cv2.FeatureDetector_create(detector_name)
        self.descriptor_ex = cv2.DescriptorExtractor_create(feat_type) 
Example 7
Project: OpenCV-Bright-Spots-Eye-Detection   Author: bodhwani   File: pupil_detect.py    (license) View Source Project 5 votes vote down vote up
def find_pupil(gray_image, minsize=.1, maxsize=.5):
    detector = cv2.FeatureDetector_create('MSER')
    features_all = detector.detect(gray_image)
    features_big = [feature for feature in features_all if feature.size > gray_image.shape[0]*minsize]
    features_small = [feature for feature in features_big if feature.size < gray_image.shape[0]*maxsize]
    if len(features_small) == 0:
        return None
    features_sorted = sort_features_by_brightness(gray_image, features_small)
    pupil = features_sorted[0]
    return (int(pupil.pt[0]), int(pupil.pt[1]), int(pupil.size/2)) 
Example 8
Project: DoNotSnap   Author: AVGInnovationLabs   File: classify.py    (license) View Source Project 4 votes vote down vote up
def main(image_file):
    image = Image.open(image_file)
    if image is None:
        print 'Could not load image "%s"' % sys.argv[1]
        return

    image = np.array(image.convert('RGB'), dtype=np.uint8)
    image = image[:, :, ::-1].copy()

    winSize = (200, 200)
    stepSize = 32

    roi = extractRoi(image, winSize, stepSize)
    weight_map, mask_scale = next(roi)

    samples = [(rect, scale, cv2.cvtColor(window, cv2.COLOR_BGR2GRAY))
               for rect, scale, window in roi]

    X_test = [window for rect, scale, window in samples]
    coords = [(rect, scale) for rect, scale, window in samples]

    extractor = cv2.FeatureDetector_create('SURF')
    detector = cv2.DescriptorExtractor_create('SURF')

    affine = AffineInvariant(extractor, detector)

    saved = pickle.load(open('classifier.pkl', 'rb'))

    feature_transform = saved['pipe']
    model = saved['model']

    print 'Extracting Affine transform invariant features'
    affine_invariant_features = affine.transform(X_test)
    print 'Matching features with template'
    features = feature_transform.transform(affine_invariant_features)

    rects = classify(model, features, coords, weight_map, mask_scale)
    for (left, top, right, bottom) in non_max_suppression_fast(rects, 0.4):
        cv2.rectangle(image, (left, top), (right, bottom), (0, 0, 0), 10)
        cv2.rectangle(image, (left, top), (right, bottom), (32, 32, 255), 5)

    plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
    plt.show() 
Example 9
Project: PyDatSet   Author: dnlcrl   File: gtsrb.py    (license) View Source Project 4 votes vote down vote up
def _extract_feature(X, feature):
    """Performs feature extraction
        :param X:       data (rows=images, cols=pixels)
        :param feature: which feature to extract
                        - None:   no feature is extracted
                        - "gray": grayscale features
                        - "rgb":  RGB features
                        - "hsv":  HSV features
                        - "surf": SURF features
                        - "hog":  HOG features
        :returns:       X (rows=samples, cols=features)
    """

    # transform color space
    if feature == 'gray' or feature == 'surf':
        X = [cv2.cvtColor(x, cv2.COLOR_BGR2GRAY) for x in X]
    elif feature == 'hsv':
        X = [cv2.cvtColor(x, cv2.COLOR_BGR2HSV) for x in X]

    # operate on smaller image
    small_size = (32, 32)
    X = [cv2.resize(x, small_size) for x in X]

    # extract features
    if feature == 'surf':
        surf = cv2.SURF(400)
        surf.upright = True
        surf.extended = True
        num_surf_features = 36

        # create dense grid of keypoints
        dense = cv2.FeatureDetector_create("Dense")
        kp = dense.detect(np.zeros(small_size).astype(np.uint8))

        # compute keypoints and descriptors
        kp_des = [surf.compute(x, kp) for x in X]

        # the second element is descriptor: choose first num_surf_features
        # elements
        X = [d[1][:num_surf_features, :] for d in kp_des]
    elif feature == 'hog':
        # histogram of gradients
        block_size = (small_size[0] / 2, small_size[1] / 2)
        block_stride = (small_size[0] / 4, small_size[1] / 4)
        cell_size = block_stride
        num_bins = 9
        hog = cv2.HOGDescriptor(small_size, block_size, block_stride,
                                cell_size, num_bins)
        X = [hog.compute(x) for x in X]
    elif feature is not None:
        # normalize all intensities to be between 0 and 1
        X = np.array(X).astype(np.float32) / 255

        # subtract mean
        X = [x - np.mean(x) for x in X]

    X = [x.flatten() for x in X]
    return X