Python cv2.SIFT Examples

The following are 28 code examples of cv2.SIFT(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: keypoint_matching_contrib.py    From Airtest with Apache License 2.0 7 votes vote down vote up
def init_detector(self):
        """Init keypoint detector object."""
        # BRIEF is a feature descriptor, recommand CenSurE as a fast detector:
        if check_cv_version_is_new():
            # OpenCV3/4, sift is in contrib module, you need to compile it seperately.
            try:
                self.detector = cv2.xfeatures2d.SIFT_create(edgeThreshold=10)
            except:
                import traceback
                traceback.print_exc()
                raise NoModuleError("There is no %s module in your OpenCV environment, need contribmodule!" % self.METHOD_NAME)
        else:
            # OpenCV2.x
            self.detector = cv2.SIFT(edgeThreshold=10)

        # # create FlnnMatcher object:
        self.matcher = cv2.FlannBasedMatcher({'algorithm': self.FLANN_INDEX_KDTREE, 'trees': 5}, dict(checks=50)) 
Example #2
Source File: findobj.py    From airtest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #3
Source File: find_obj.py    From airtest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #4
Source File: find_obj.py    From PyCV-time with MIT License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #5
Source File: find_obj.py    From ImageAnalysis with MIT License 6 votes vote down vote up
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.SIFT()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.SURF(400)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB(400)
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher 
Example #6
Source File: auto.py    From airtest with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def feature_similarity(image_1,image_2,threshold=0.7):
    """SIFT Feature based image similarity
    
    @param image_1: np.array(the first input image)
    @param image_2: np.array(the second input image)
    @param threshold: float(the lower's threshold)
    @return similarity: float(range from [0,1], the bigger the more similar)
    @return good_match_num; int(the number of good match point pairs)
    """    
    kp1, des1 = _sift_extract(image_1)
    kp2, des2 = _sift_extract(image_2)
    if len(kp1) <= len(kp2):
        num = len(kp1)
    else:
        num = len(kp2)
    if num <= 0:
        similarity, good_match_num = 0.0, 0.0
    else:
        good_match, default = _searchAndmatch(des1, des2, threshold)
        good_match_num = float(len(good_match))
        similarity = good_match_num/num
    return similarity, good_match_num 
Example #7
Source File: sift.py    From Airtest with Apache License 2.0 5 votes vote down vote up
def _init_sift():
    """Make sure that there is SIFT module in OpenCV."""
    if cv2.__version__.startswith("3."):
        # OpenCV3.x, sift is in contrib module, you need to compile it seperately.
        try:
            sift = cv2.xfeatures2d.SIFT_create(edgeThreshold=10)
        except:
            print("to use SIFT, you should build contrib with opencv3.0")
            raise NoSIFTModuleError("There is no SIFT module in your OpenCV environment !")
    else:
        # OpenCV2.x, just use it.
        sift = cv2.SIFT(edgeThreshold=10)

    return sift 
Example #8
Source File: screen_finder.py    From PyCV-time with MIT License 5 votes vote down vote up
def __init__(self):
        
        self.screen_shape = None
        self._detector = cv2.SIFT()
        self._screen_img = None
        self._screen_features = None
        
        self.clear_found() 
Example #9
Source File: screen_finder.py    From PyCV-time with MIT License 5 votes vote down vote up
def __init__(self):
        
        self.screen_shape = None
        self._detector = cv2.SIFT()
        self._screen_img = None
        self._screen_features = None
        
        self.clear_found() 
Example #10
Source File: screen_finder.py    From PyCV-time with MIT License 5 votes vote down vote up
def __init__(self):
        
        self.screen_shape = None
        self._detector = cv2.SIFT()
        self._screen_img = None
        self._screen_features = None
        
        self.clear_found() 
Example #11
Source File: __init__.py    From aircv with MIT License 5 votes vote down vote up
def main():
    print(cv2.IMREAD_COLOR)
    print(cv2.IMREAD_GRAYSCALE)
    print(cv2.IMREAD_UNCHANGED)
    imsrc = imread('testdata/1s.png')
    imsch = imread('testdata/1t.png')
    print(brightness(imsrc))
    print(brightness(imsch))

    pt = find(imsrc, imsch)
    #mark_point(imsrc, pt)
    #show(imsrc)
    imsrc = imread('testdata/2s.png')
    imsch = imread('testdata/2t.png')
    result = find_all_template(imsrc, imsch)
    print(result)
    pts = []
    for match in result:
        pt = match["result"]
        #mark_point(imsrc, pt)
        pts.append(pt)
    # pts.sort()
    #show(imsrc)
    # print pts
    # print sorted(pts, key=lambda p: p[0])

    imsrc = imread('yl/bg_half.png')
    imsch = imread('yl/q_small.png')
    print(result)
    print('SIFT count=', sift_count(imsch))
    print(find_sift(imsrc, imsch))
    print(find_all_sift(imsrc, imsch))
    print(find_all_template(imsrc, imsch))
    print(find_all(imsrc, imsch)) 
Example #12
Source File: __init__.py    From aircv with MIT License 5 votes vote down vote up
def find_sift(im_source, im_search, min_match_count=4):
    '''
    SIFT特征点匹配
    '''
    res = find_all_sift(im_source, im_search, min_match_count, maxcnt=1)
    if not res:
        return None
    return res[0] 
Example #13
Source File: __init__.py    From aircv with MIT License 5 votes vote down vote up
def _sift_instance(edge_threshold=100):
    if hasattr(cv2, 'SIFT'):
        return cv2.SIFT(edgeThreshold=edge_threshold)
    return cv2.xfeatures2d.SIFT_create(edgeThreshold=edge_threshold) 
Example #14
Source File: auto.py    From airtest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def hist_similarity(image_1, image_2):
    """color hist based image similarity
    
    @param image_1: np.array(the first input image)
    @param image_2: np.array(the second input image)
    @return similarity: float(range from [0,1], the bigger the more similar)
    """
    if image_1.ndim == 2 and image_2.ndim == 2:
        hist_1 = cv2.calcHist([image_1], [0], None, [256], [0.0, 255.0])
        hist_2 = cv2.calcHist([image_2], [0], None, [256], [0.0, 255.0])
        similarity = cv2.compareHist(hist_1, hist_2, cv2.cv.CV_COMP_CORREL)
    elif image_1.ndim == 3 and image_2.ndim == 3:
        """R,G,B split"""
        b_1, g_1, r_1 = cv2.split(image_1)
        b_2, g_2, r_2 = cv2.split(image_2)
        hist_b_1 = cv2.calcHist([b_1], [0], None, [256], [0.0, 255.0])
        hist_g_1 = cv2.calcHist([g_1], [0], None, [256], [0.0, 255.0])
        hist_r_1 = cv2.calcHist([r_1], [0], None, [256], [0.0, 255.0])
        hist_b_2 = cv2.calcHist([b_2], [0], None, [256], [0.0, 255.0])
        hist_g_2 = cv2.calcHist([g_2], [0], None, [256], [0.0, 255.0])
        hist_r_2 = cv2.calcHist([r_2], [0], None, [256], [0.0, 255.0])
        similarity_b = cv2.compareHist(hist_b_1,hist_b_2,cv2.cv.CV_COMP_CORREL)
        similarity_g = cv2.compareHist(hist_g_1,hist_g_2,cv2.cv.CV_COMP_CORREL)
        similarity_r = cv2.compareHist(hist_r_1,hist_r_2,cv2.cv.CV_COMP_CORREL)
        sum_bgr = similarity_b + similarity_g + similarity_r
        similarity = sum_bgr/3.
    else:
        gray_1 = cv2.cvtColor(image_1,cv2.cv.CV_RGB2GRAY)
        gray_2 = cv2.cvtColor(image_2,cv2.cv.CV_RGB2GRAY)
        hist_1 = cv2.calcHist([gray_1], [0], None, [256], [0.0, 255.0])
        hist_2 = cv2.calcHist([gray_2], [0], None, [256], [0.0, 255.0])
        similarity = cv2.compareHist(hist_1, hist_2, cv2.cv.CV_COMP_CORREL)
    return similarity

#SIFT based similarity 
Example #15
Source File: auto.py    From airtest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _homography(src_pts,dst_pts,template_width,template_height,match_point=None):
    row,col,dim = dst_pts.shape
    if match_point:
        for i in range(row):
            match_point.append([int(dst_pts[i][0][0]),int(dst_pts[i][0][1])])
    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    pts = np.float32([[0, 0], [0, template_height - 1], 
                    [template_width - 1, template_height - 1], 
                    [template_width - 1, 0]]).reshape(-1, 1, 2)
    #找到一个变换矩阵,从查询图映射到检测图片
    dst = cv2.perspectiveTransform(pts, M) 
    return dst

#SIFT + Homography 
Example #16
Source File: auto.py    From airtest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _re_cluster_center(image, centers, match_points,
                        hori_distance, veti_distance, threshold):
    """refine center
    
    @param image: np.array(input target image)
    @param match_points: list(contains points that are matched)
    @param hori_distance: int(restrict the horizontal distance between points)
    @param veti_distance: int(restrict the vetical distance between points)
    @param threshold: int(define the least points belonging to one center) 
    @return new_centers: list(they are sorted by the ascending of y coordinate)
    """  
    re_centers = []
    re_match_points = _reremove(match_points)
    for i in range(len(centers)):
        sum_x, sum_y, k = 0, 0, 0
        top_left_x = centers[i][0]-int(hori_distance/2)
        top_left_y = centers[i][1]-int(veti_distance/2)
        bottom_right_x = centers[i][0] + int(hori_distance/2)
        bottom_right_y = centers[i][1] + int(veti_distance/2)
        for j in range(len(re_match_points)):
            '''选择在中心点附近一定区域内的关键点'''
            if (abs(centers[i][0]-re_match_points[j][0])<int(hori_distance/2) and
                abs(centers[i][1]-re_match_points[j][1])<int(veti_distance/2) and
                0 < top_left_x and 0 < top_left_y and
                bottom_right_x<image.shape[1] and bottom_right_y<image.shape[0]):
                    sum_x = sum_x + re_match_points[j][0]
                    sum_y = sum_y + re_match_points[j][1]
                    k = k + 1
        if threshold <= k  and 0 < k:
            [x, y] = [int(float(sum_x)/k), int(float(sum_y)/k)]
            re_centers.append([x, y])
    new_centers = _sort_point_list(re_centers)
    return new_centers

#SIFT extraction 
Example #17
Source File: image_SIFT.py    From airtest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _sift_extract(image):
    sift = cv2.SIFT()
    keypoints, descriptors = sift.detectAndCompute(image, None)
    return keypoints, descriptors

#search and match keypoint_pair 
Example #18
Source File: image_SIFT.py    From airtest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _reremove(list):
    checked = []
    for e in list:
        if e not in checked: checked.append(e)
    return checked            
        
#SIFT extraction 
Example #19
Source File: __init__.py    From uiautomator2 with MIT License 5 votes vote down vote up
def find_sift(self, im_source, im_search, min_match_count=4):
        '''
        SIFT特征点匹配
        '''
        res = self.find_all_sift(im_source, im_search, min_match_count, maxcnt=1)
        if not res:
            return None
        return res[0] 
Example #20
Source File: __init__.py    From uiautomator2 with MIT License 5 votes vote down vote up
def _sift_instance(self, edge_threshold=100):
        if hasattr(cv2, 'SIFT'):
            return cv2.SIFT(edgeThreshold=edge_threshold)
        return cv2.xfeatures2d.SIFT_create(edgeThreshold=edge_threshold) 
Example #21
Source File: image.py    From airtest with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def find_image_position(origin='origin.png', query='query.png', outfile=None):
    '''
    find all image positions
    @return None if not found else a tuple: (origin.shape, query.shape, postions)
    might raise Exception
    '''
    img1 = cv2.imread(query, 0) # query image(small)
    img2 = cv2.imread(origin, 0) # train image(big)

    # Initiate SIFT detector
    sift = cv2.SIFT()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1,None)
    kp2, des2 = sift.detectAndCompute(img2,None)
    print len(kp1), len(kp2)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    # flann
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    # store all the good matches as per Lowe's ratio test.
    good = []
    for m,n in matches:
        if m.distance < 0.7*n.distance:
            good.append(m)
    print len(kp1), len(kp2), 'good cnt:', len(good)

    if len(good)*1.0/len(kp1) < 0.5:
    #if len(good)<MIN_MATCH_COUNT:
        print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
        return img2.shape, img1.shape, []

    queryPts = []
    trainPts = []
    for dm in good:
        queryPts.append(kp1[dm.queryIdx])
        trainPts.append(kp2[dm.trainIdx])

    img3 = cv2.drawKeypoints(img1, queryPts)
    cv2.imwrite('image/query.png', img3)

    img3 = cv2.drawKeypoints(img2, trainPts)
    point = _middlePoint(trainPts)
    print 'position in', point

    if outfile:
        edge = 10
        top_left = (point[0]-edge, point[1]-edge)
        bottom_right = (point[0]+edge, point[1]+edge)
        cv2.rectangle(img3, top_left, bottom_right, 255, 2)
        cv2.imwrite(outfile, img3)
    return img2.shape, img1.shape, [point] 
Example #22
Source File: auto.py    From airtest with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def _homography_match(source_image, template_image, src_points, dst_points,
                     num, match_point=None, option=0):
    """ SIFT+Homography match
    
    @param source_image: np.array(input source image)
    @param template_image: np.array(input template image)
    @param src_points: np.array(template image's match keypoints)
    @param dst_points: np.array(source image's match keypoints)
    @param match_point: list(if not none, save the matched points)
    @param num: int(the number of template_image's keypoints)
    @param option: int(if it is zero, the similarity method will not be not 
                        used to verify the accuracy of the detected center)
    @return [center_x, center_y: list(match center)
    """  
    [height, width] = [template_image.shape[0], template_image.shape[1]]
    dst = _homography(src_points, dst_points, width,height,match_point)
    center, count = [0.0, 0.0], 0
    if dst.any():
        for i in range(dst.shape[0]):
            if (0 <= int(dst[i][0][0]) <= source_image.shape[1] and
                0 <= int(dst[i][0][1]) <= source_image.shape[0]):
                    center = center + dst[i][0]
                    count = count + 1
        if count < 1 or (center[0] == 0.0 and center[1] == 0.0):
            return None
        else:
            center_x = int(center[0] / count)
            center_y = int(center[1] / count)
            if option == 0:
               return [center_x, center_y]
            else:               
                re_center = [center_x,center_y]
                rect_img = _region_copy(source_image,re_center,width,height,1)
                hist_value = hist_similarity(rect_img, template_image)
                if DEBUG: print "342_hist_value: ", hist_value
                rect_img2 = _region_copy(source_image,re_center,width,height, 2)
                value,kp_num = feature_similarity(rect_img2,template_image,0.7)
                if DEBUG: print "345_sift_value and kp_num: ", value, kp_num
                if (hist_value<-0.0006 and 14< kp_num<=45 and 
                    (value < 0.32 or kp_num==45)): # 0.4 >> 0.32 
                    return None
                '''rule has been obtained from the experiments'''    
                if ((value>0.39) or (kp_num<=14 and 0.34<value) or (22<=kp_num) 
                    or ((kp_num <= 9) and num <= (10*kp_num))):
                        return [center_x, center_y]
                else:
                    rect_img3 = _region_copy(source_image,re_center,height,width,2)
                    val2,kp_num2 = feature_similarity(rect_img3,template_image,0.7)
                    if DEBUG: print "356_sift_value and kp_num: ", val2, kp_num2
                    if ((0.28<val2 and value<=val2 and 13<kp_num2) or (35<kp_num2  
                        and kp_num < kp_num2) or (kp_num == 14 and kp_num2==12 
                        and 0.25 <= value)):
                            return [center_x, center_y]
                    else:
                        return None

#image match with little match keypoints 
Example #23
Source File: roomba.py    From Roomba980-Python with MIT License 4 votes vote down vote up
def match_outlines(self, orig_image, skewed_image):
        orig_image = np.array(orig_image)
        skewed_image = np.array(skewed_image)
        try:
            surf = cv2.xfeatures2d.SURF_create(400)
        except Exception:
            surf = cv2.SIFT(400)
        kp1, des1 = surf.detectAndCompute(orig_image, None)
        kp2, des2 = surf.detectAndCompute(skewed_image, None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)

        MIN_MATCH_COUNT = 10
        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

            # see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.html for details
            ss = M[0, 1]
            sc = M[0, 0]
            scaleRecovered = math.sqrt(ss * ss + sc * sc)
            thetaRecovered = math.atan2(ss, sc) * 180 / math.pi
            self.log.info("MAP: Calculated scale difference: %.2f, "
                          "Calculated rotation difference: %.2f" %
                          (scaleRecovered, thetaRecovered))

            #deskew image
            im_out = cv2.warpPerspective(skewed_image, np.linalg.inv(M),
                (orig_image.shape[1], orig_image.shape[0]))
            return im_out

        else:
            self.log.warn("MAP: Not  enough  matches are found   -   %d/%d"
                          % (len(good), MIN_MATCH_COUNT))
            return skewed_image 
Example #24
Source File: roomba.py    From Roomba980-Python with MIT License 4 votes vote down vote up
def match_outlines(self, orig_image, skewed_image):
        orig_image = np.array(orig_image)
        skewed_image = np.array(skewed_image)
        try:
            surf = cv2.xfeatures2d.SURF_create(400)
        except Exception:
            surf = cv2.SIFT(400)
        kp1, des1 = surf.detectAndCompute(orig_image, None)
        kp2, des2 = surf.detectAndCompute(skewed_image, None)

        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)

        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)

        MIN_MATCH_COUNT = 10
        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good
                                  ]).reshape(-1, 1, 2)

            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

            # see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.html for details
            ss = M[0, 1]
            sc = M[0, 0]
            scaleRecovered = math.sqrt(ss * ss + sc * sc)
            thetaRecovered = math.atan2(ss, sc) * 180 / math.pi
            self.log.info("MAP: Calculated scale difference: %.2f, "
                          "Calculated rotation difference: %.2f" %
                          (scaleRecovered, thetaRecovered))

            #deskew image
            im_out = cv2.warpPerspective(skewed_image, np.linalg.inv(M),
                (orig_image.shape[1], orig_image.shape[0]))
            return im_out

        else:
            self.log.warn("MAP: Not  enough  matches are found   -   %d/%d"
                          % (len(good), MIN_MATCH_COUNT))
            return skewed_image 
Example #25
Source File: image.py    From ATX with Apache License 2.0 4 votes vote down vote up
def find_image_position(origin='origin.png', query='query.png', outfile=None):
    '''
    find all image positions
    @return None if not found else a tuple: (origin.shape, query.shape, postions)
    might raise Exception
    '''
    img1 = cv2.imread(query, 0) # query image(small)
    img2 = cv2.imread(origin, 0) # train image(big)

    # Initiate SIFT detector
    sift = cv2.SIFT()

    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1,None)
    kp2, des2 = sift.detectAndCompute(img2,None)
    print len(kp1), len(kp2)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)

    # flann
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)

    # store all the good matches as per Lowe's ratio test.
    good = []
    for m,n in matches:
        if m.distance < 0.7*n.distance:
            good.append(m)
    print len(kp1), len(kp2), 'good cnt:', len(good)

    if len(good)*1.0/len(kp1) < 0.5:
    #if len(good)<MIN_MATCH_COUNT:
        print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
        return img2.shape, img1.shape, []

    queryPts = []
    trainPts = []
    for dm in good:
        queryPts.append(kp1[dm.queryIdx])
        trainPts.append(kp2[dm.trainIdx])

    img3 = cv2.drawKeypoints(img1, queryPts)
    cv2.imwrite('image/query.png', img3)

    img3 = cv2.drawKeypoints(img2, trainPts)
    point = _middlePoint(trainPts)
    print 'position in', point

    if outfile:
        edge = 10
        top_left = (point[0]-edge, point[1]-edge)
        bottom_right = (point[0]+edge, point[1]+edge)
        cv2.rectangle(img3, top_left, bottom_right, 255, 2)
        cv2.imwrite(outfile, img3)
    return img2.shape, img1.shape, [point] 
Example #26
Source File: 04_sift_features.py    From Practical-Computer-Vision with MIT License 4 votes vote down vote up
def compute_fast_det(img, is_nms=True, thresh = 10):
    gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    # Initiate FAST object with default values
    fast = cv2.FastFeatureDetector_create() #FastFeatureDetector()

#     # find and draw the keypoints
    if not is_nms:
        fast.setNonmaxSuppression(0)

    fast.setThreshold(thresh)

    kp = fast.detect(img,None)
    cv2.drawKeypoints(img, kp, img, color=(255,0,0))
    
    

    sift = cv2.SIFT()
    kp = sift.detect(gray,None)

    img=cv2.drawKeypoints(gray,kp)

    plt.figure(figsize=(12, 8))
    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    plt.axis('off')
    plt.show() 
Example #27
Source File: dataset_navcam.py    From DEMUD with Apache License 2.0 4 votes vote down vote up
def  extract_sift(cls, rawfilename, winsize, nbins):
    """read_ppm(rawfilename, filename)

    Read in raw pixel data from rawfilename (.ppm).
    Create a histogram around each pixel to become
    the feature vector for that obsevation (pixel).
    Pickle the result and save it to filename.
    Note: does NOT update object fields.
    Follow this with a call to readin().
    """
    if cls._VL_SIFT_:
      # VLSIFT matlab 

      im  = Image.open(rawfilename)
      (width, height) = im.size

      mlab.bb_sift(N.array(im), 'temp.mat')
      sift_features = scipy.io.loadmat('temp.mat')
      kp = sift_features['f_']
      sift_features = sift_features['d_']
      sift_features  = scipy.concatenate((sift_features.transpose(), kp[2:4].transpose()), 1).transpose()

      labels = [];
      for ikp in kp.transpose():
        (x,y) = ikp[0:2]
        labels    += ['(%d,%d)' % (y,x)]
    else:
      #Opencv SIFT 
      img = cv2.imread(rawfilename)
      gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
      height, width = gray.shape

      # Computing SIFT
      sift = cv2.SIFT(edgeThreshold = 3)
      kp, des = sift.detectAndCompute(gray,None)

      labels  = []
      sift_features = N.transpose(des)
      scale_angle = []

      for ikp in kp:
        (x,y) = ikp.pt
        scale_angle.append([ikp.size/12, ikp.angle])
        labels    += ['(%d,%d)' % (y,x)]
    
      scale_angle = N.array(scale_angle)
      sift_features  = scipy.concatenate((sift_features.transpose(), scale_angle), 1).transpose()

    return (sift_features, labels, width, height) 
Example #28
Source File: servoing_designed_features_quad_panda3d_env.py    From citysim3d with MIT License 4 votes vote down vote up
def __init__(self, action_space, feature_type=None, filter_features=None,
                 max_time_steps=100, distance_threshold=4.0, **kwargs):
        """
        filter_features indicates whether to filter out key points that are not
        on the object in the current image. Key points in the target image are
        always filtered out.
        """
        SimpleQuadPanda3dEnv.__init__(self, action_space, **kwargs)
        ServoingEnv.__init__(self, env=self, max_time_steps=max_time_steps, distance_threshold=distance_threshold)

        lens = self.camera_node.node().getLens()
        self._observation_space.spaces['points'] = BoxSpace(np.array([-np.inf, lens.getNear(), -np.inf]),
                                                            np.array([np.inf, lens.getFar(), np.inf]))
        film_size = tuple(int(s) for s in lens.getFilmSize())
        self.mask_camera_sensor = Panda3dMaskCameraSensor(self.app, (self.skybox_node, self.city_node),
                                                          size=film_size,
                                                          near_far=(lens.getNear(), lens.getFar()),
                                                          hfov=lens.getFov())
        for cam in self.mask_camera_sensor.cam:
            cam.reparentTo(self.camera_sensor.cam)

        self.filter_features = True if filter_features is None else False
        self._feature_type = feature_type or 'sift'
        if cv2.__version__.split('.')[0] == '3':
            from cv2.xfeatures2d import SIFT_create, SURF_create
            from cv2 import ORB_create
            if self.feature_type == 'orb':
                # https://github.com/opencv/opencv/issues/6081
                cv2.ocl.setUseOpenCL(False)
        else:
            SIFT_create = cv2.SIFT
            SURF_create = cv2.SURF
            ORB_create = cv2.ORB
        if self.feature_type == 'sift':
            self._feature_extractor = SIFT_create()
        elif self.feature_type == 'surf':
            self._feature_extractor = SURF_create()
        elif self.feature_type == 'orb':
            self._feature_extractor = ORB_create()
        else:
            raise ValueError("Unknown feature extractor %s" % self.feature_type)
        if self.feature_type == 'orb':
            self._matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        else:
            self._matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
        self._target_key_points = None
        self._target_descriptors = None