Python cv2.UMat() Examples
The following are 26
code examples of cv2.UMat().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: segmentation_mask.py From Clothing-Detection with GNU General Public License v3.0 | 6 votes |
def _findContours(self): contours = [] masks = self.masks.detach().numpy() for mask in masks: mask = cv2.UMat(mask) contour, hierarchy = cv2_util.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1 ) reshaped_contour = [] for entity in contour: assert len(entity.shape) == 3 assert ( entity.shape[1] == 1 ), "Hierarchical contours are not allowed" reshaped_contour.append(entity.reshape(-1).tolist()) contours.append(reshaped_contour) return contours
Example #2
Source File: segmentation_mask.py From DF-Traffic-Sign-Identification with MIT License | 6 votes |
def _findContours(self): contours = [] masks = self.masks.detach().numpy() for mask in masks: mask = cv2.UMat(mask) contour, hierarchy = cv2.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1 ) reshaped_contour = [] for entity in contour: assert len(entity.shape) == 3 assert entity.shape[1] == 1, "Hierarchical contours are not allowed" reshaped_contour.append(entity.reshape(-1).tolist()) contours.append(reshaped_contour) return contours
Example #3
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 6 votes |
def get_name(img): # cv2.imshow("method3", img) # cv2.waitKey() print('name') _, _, red = cv2.split(img) #split 会自动将UMat转换回Mat red = cv2.UMat(red) red = hist_equal(red) red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 151, 50) # red = cv2.medianBlur(red, 3) red = img_resize(red, 150) img = img_resize(img, 150) # showimg(red) # cv2.imwrite('name.png', red) # img2 = Image.open('address.png') # img = Image.fromarray(cv2.UMat.get(red).astype('uint8')) #return get_result_vary_length(red, 'chi_sim', img, '-psm 7') return get_result_vary_length(red, 'chi_sim', img, '--psm 7') # return punc_filter(pytesseract.image_to_string(img, lang='chi_sim', config='-psm 13').replace(" ",""))
Example #4
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 6 votes |
def find_address(crop_gray, crop_org): template = cv2.UMat(cv2.imread('address_mask_%s.jpg'%pixel_x, 0)) # showimg(template) #showimg(crop_gray) w, h = cv2.UMat.get(template).shape[::-1] #t1 = round(time.time()*1000) res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED) #t2 = round(time.time()*1000) #print 'time:%s'%(t2-t1) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) top_left = (max_loc[0] + w, max_loc[1] - int(20*x)) bottom_right = (top_left[0] + int(1700*x), top_left[1] + int(550*x)) result = cv2.UMat.get(crop_org)[top_left[1]-10:bottom_right[1], top_left[0]-10:bottom_right[0]] cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2) #showimg(crop_gray) return cv2.UMat(result)
Example #5
Source File: UMatFileVideoStream.py From python-opencv-gpu-video with MIT License | 6 votes |
def __init__(self, path, queueSize=128): # initialize the file video stream along with the boolean # used to indicate if the thread should be stopped or not self.stream = cv2.VideoCapture(path) self.stopped = False self.count = 0 # initialize the queue used to store frames read from # the video file self.Q = Queue(maxsize=queueSize) # We need some info from the file first. See more at: # https://docs.opencv.org/4.1.0/d4/d15/group__videoio__flags__base.html#gaeb8dd9c89c10a5c63c139bf7c4f5704d self.width = int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)) self.height = int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)) # since this version uses UMat to store the images to # we need to initialize them beforehand self.frames = [0] * queueSize for ii in range(queueSize): self.frames[ii] = cv2.UMat(self.height, self.width, cv2.CV_8UC3)
Example #6
Source File: segmentation_mask.py From EmbedMask with MIT License | 6 votes |
def _findContours(self): contours = [] masks = self.masks.detach().numpy() for mask in masks: mask = cv2.UMat(mask) contour, hierarchy = cv2.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1 ) reshaped_contour = [] for entity in contour: assert len(entity.shape) == 3 assert entity.shape[1] == 1, "Hierarchical contours are not allowed" reshaped_contour.append(entity.reshape(-1).tolist()) contours.append(reshaped_contour) return contours
Example #7
Source File: segmentation_mask.py From maskrcnn-benchmark with MIT License | 6 votes |
def _findContours(self): contours = [] masks = self.masks.detach().numpy() for mask in masks: mask = cv2.UMat(mask) contour, hierarchy = cv2_util.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1 ) reshaped_contour = [] for entity in contour: assert len(entity.shape) == 3 assert ( entity.shape[1] == 1 ), "Hierarchical contours are not allowed" reshaped_contour.append(entity.reshape(-1).tolist()) contours.append(reshaped_contour) return contours
Example #8
Source File: segmentation_mask.py From Parsing-R-CNN with MIT License | 6 votes |
def _findContours(self): contours = [] masks = self.masks.detach().numpy() for mask in masks: mask = cv2.UMat(mask) contour, hierarchy = cv2.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1 ) reshaped_contour = [] for entity in contour: assert len(entity.shape) == 3 assert entity.shape[1] == 1, "Hierarchical contours are not allowed" reshaped_contour.append(entity.reshape(-1).tolist()) contours.append(reshaped_contour) return contours
Example #9
Source File: segmentation_mask.py From sampling-free with MIT License | 6 votes |
def _findContours(self): contours = [] masks = self.masks.detach().numpy() for mask in masks: mask = cv2.UMat(mask) contour, hierarchy = cv2_util.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1 ) reshaped_contour = [] for entity in contour: assert len(entity.shape) == 3 assert ( entity.shape[1] == 1 ), "Hierarchical contours are not allowed" reshaped_contour.append(entity.reshape(-1).tolist()) contours.append(reshaped_contour) return contours
Example #10
Source File: segmentation_mask.py From DetNAS with MIT License | 6 votes |
def _findContours(self): contours = [] masks = self.masks.detach().numpy() for mask in masks: mask = cv2.UMat(mask) contour, hierarchy = cv2_util.findContours( mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1 ) reshaped_contour = [] for entity in contour: assert len(entity.shape) == 3 assert ( entity.shape[1] == 1 ), "Hierarchical contours are not allowed" reshaped_contour.append(entity.reshape(-1).tolist()) contours.append(reshaped_contour) return contours
Example #11
Source File: trackgesture.py From CNNGestureRecognizer with MIT License | 5 votes |
def binaryMask(frame, x0, y0, width, height, framecount, plot ): global guessGesture, visualize, mod, saveImg cv2.rectangle(frame, (x0,y0),(x0+width,y0+height),(0,255,0),1) #roi = cv2.UMat(frame[y0:y0+height, x0:x0+width]) roi = frame[y0:y0+height, x0:x0+width] gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray,(5,5),2) th3 = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,2) ret, res = cv2.threshold(th3, minValue, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) if saveImg == True: saveROIImg(res) elif guessGesture == True and (framecount % 5) == 4: #ores = cv2.UMat.get(res) t = threading.Thread(target=myNN.guessGesture, args = [mod, res]) t.start() elif visualize == True: layer = int(input("Enter which layer to visualize ")) cv2.waitKey(1) myNN.visualizeLayers(mod, res, layer) visualize = False return res #%% # This is the new mask mode. It simply tries to remove the background content by taking a image of the # background and subtracts it from the new frame contents of the ROI window. # So in order to use it correctly, keep the contents of ROI window stable and without your hand in it # and then press 'x' key. If you can see the contents of ROI window all blank then it means you are # good to go for gesture prediction
Example #12
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 5 votes |
def generate_mask(x): name_mask_pic = cv2.UMat(cv2.imread('name_mask.jpg')) sex_mask_pic = cv2.UMat(cv2.imread('sex_mask.jpg')) nation_mask_pic = cv2.UMat(cv2.imread('nation_mask.jpg')) birth_mask_pic = cv2.UMat(cv2.imread('birth_mask.jpg')) year_mask_pic = cv2.UMat(cv2.imread('year_mask.jpg')) month_mask_pic = cv2.UMat(cv2.imread('month_mask.jpg')) day_mask_pic = cv2.UMat(cv2.imread('day_mask.jpg')) address_mask_pic = cv2.UMat(cv2.imread('address_mask.jpg')) idnum_mask_pic = cv2.UMat(cv2.imread('idnum_mask.jpg')) name_mask_pic = img_resize_x(name_mask_pic) sex_mask_pic = img_resize_x(sex_mask_pic) nation_mask_pic = img_resize_x(nation_mask_pic) birth_mask_pic = img_resize_x(birth_mask_pic) year_mask_pic = img_resize_x(year_mask_pic) month_mask_pic = img_resize_x(month_mask_pic) day_mask_pic = img_resize_x(day_mask_pic) address_mask_pic = img_resize_x(address_mask_pic) idnum_mask_pic = img_resize_x(idnum_mask_pic) cv2.imwrite('name_mask_%s.jpg'%pixel_x, name_mask_pic) cv2.imwrite('sex_mask_%s.jpg' %pixel_x, sex_mask_pic) cv2.imwrite('nation_mask_%s.jpg' %pixel_x, nation_mask_pic) cv2.imwrite('birth_mask_%s.jpg' %pixel_x, birth_mask_pic) cv2.imwrite('year_mask_%s.jpg' % pixel_x, year_mask_pic) cv2.imwrite('month_mask_%s.jpg' % pixel_x, month_mask_pic) cv2.imwrite('day_mask_%s.jpg' % pixel_x, day_mask_pic) cv2.imwrite('address_mask_%s.jpg' %pixel_x, address_mask_pic) cv2.imwrite('idnum_mask_%s.jpg' %pixel_x, idnum_mask_pic) #用于生成模板
Example #13
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 5 votes |
def img_resize_gray(imgorg): #imgorg = cv2.imread(imgname) crop = imgorg size = cv2.UMat.get(crop).shape # print size height = size[0] width = size[1] # 参数是根据3840调的 height = int(height * 3840 * x / width) # print height crop = cv2.resize(src=crop, dsize=(int(3840 * x), height), interpolation=cv2.INTER_CUBIC) return hist_equal(cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)), crop
Example #14
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 5 votes |
def find_name(crop_gray, crop_org): template = cv2.UMat(cv2.imread('name_mask_%s.jpg'%pixel_x, 0)) # showimg(crop_org) w, h = cv2.UMat.get(template).shape[::-1] res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) # print(max_loc) top_left = (max_loc[0] + w, max_loc[1] - int(20*x)) bottom_right = (top_left[0] + int(700*x), top_left[1] + int(300*x)) result = cv2.UMat.get(crop_org)[top_left[1]-10:bottom_right[1], top_left[0]-10:bottom_right[0]] cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2) # showimg(result) return cv2.UMat(result)
Example #15
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 5 votes |
def find_idnum(crop_gray, crop_org): template = cv2.UMat(cv2.imread('idnum_mask_%s.jpg'%pixel_x, 0)) # showimg(template) #showimg(crop_gray) w, h = cv2.UMat.get(template).shape[::-1] res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) top_left = (max_loc[0] + w, max_loc[1] - int(20*x)) bottom_right = (top_left[0] + int(2300*x), top_left[1] + int(300*x)) result = cv2.UMat.get(crop_org)[top_left[1]-10:bottom_right[1], top_left[0]-10:bottom_right[0]] cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2) #showimg(crop_gray) return cv2.UMat(result)
Example #16
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 5 votes |
def get_sex(img): _, _, red = cv2.split(img) print('sex') red = cv2.UMat(red) red = hist_equal(red) red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50) # red = cv2.medianBlur(red, 3) # cv2.imwrite('address.png', img) # img2 = Image.open('address.png') red = img_resize(red, 150) # cv2.imwrite('sex.png', red) # img = Image.fromarray(cv2.UMat.get(red).astype('uint8')) #return get_result_fix_length(red, 1, 'sex', '-psm 10') return get_result_fix_length(red, 1, 'chi_sim', '--psm 10') # return pytesseract.image_to_string(img, lang='sex', config='-psm 10').replace(" ","")
Example #17
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 5 votes |
def get_idnum_and_birth(img): _, _, red = cv2.split(img) print('idnum') red = cv2.UMat(red) red = hist_equal(red) red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50) red = img_resize(red, 150) # cv2.imwrite('idnum_red.png', red) #idnum_str = get_result_fix_length(red, 18, 'idnum', '-psm 8') # idnum_str = get_result_fix_length(red, 18, 'eng', '--psm 8 ') img = Image.fromarray(cv2.UMat.get(red).astype('uint8')) idnum_str = get_result_vary_length(red, 'eng', img, '--psm 8 ') return idnum_str, idnum_str[6:14]
Example #18
Source File: CThermal.py From Thermal_Image_Analysis with MIT License | 4 votes |
def get_spots(self, thermal_image): CFlir.drawing = True image_copy = thermal_image.copy() original_copy = image_copy.copy() if len(original_copy.shape) < 3: cmap_copy = cv.applyColorMap(original_copy, cv.COLORMAP_JET) point = [] spot_points = [] flag = [True] cv.namedWindow('Image') cv.setMouseCallback('Image', CFlir.draw_spots, (point, flag) ) while(1): image_copy = original_copy.copy() for i in range(0,len(spot_points)): cv.circle(image_copy, spot_points[i] , 5, 0, -1) try: cv.circle(cmap_copy, spot_points[i] , 5, 0, -1) except: cv.circle(original_copy, spot_points[i] , 5, 0, -1) if len(point) > 0: cv.circle(image_copy, tuple(point) , 5, 0, -1) if flag[0] == False: spot_points.append(tuple(point)) flag[0] = True cv.imshow('Image', image_copy) k = cv.waitKey(1) & 0xff if k == 13 or k == 141 : break CFlir.drawing = False cv.destroyAllWindows() # origi_copy = cv.UMat(origi_copy) if len(original_copy.shape) == 3: gray = cv.cvtColor(original_copy, cv.COLOR_BGR2GRAY) else: gray = cv.cvtColor(cmap_copy, cv.COLOR_BGR2GRAY) ret,thresh = cv.threshold(gray, 10, 255, cv.THRESH_BINARY_INV) contours, hierarchy = cv.findContours(thresh,cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE ) self.spots = contours
Example #19
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 4 votes |
def hist_equal(img): # clahe_size = 8 # clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(clahe_size, clahe_size)) # result = clahe.apply(img) #test #result = cv2.equalizeHist(img) image = img.get() #UMat to Mat # result = cv2.equalizeHist(image) lut = np.zeros(256, dtype = image.dtype )#创建空的查找表 #lut = np.zeros(256) hist= cv2.calcHist([image], #计算图像的直方图 [0], #使用的通道 None, #没有使用mask [256], #it is a 1D histogram [0,256]) minBinNo, maxBinNo = 0, 255 #计算从左起第一个不为0的直方图柱的位置 for binNo, binValue in enumerate(hist): if binValue != 0: minBinNo = binNo break #计算从右起第一个不为0的直方图柱的位置 for binNo, binValue in enumerate(reversed(hist)): if binValue != 0: maxBinNo = 255-binNo break #print minBinNo, maxBinNo #生成查找表 for i,v in enumerate(lut): if i < minBinNo: lut[i] = 0 elif i > maxBinNo: lut[i] = 255 else: lut[i] = int(255.0*(i-minBinNo)/(maxBinNo-minBinNo)+0.5) #计算,调用OpenCV cv2.LUT函数,参数 image -- 输入图像,lut -- 查找表 #print lut result = cv2.LUT(image, lut) #print type(result) #showimg(result) return cv2.UMat(result)
Example #20
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 4 votes |
def get_result_vary_length(red, langset, org_img, custom_config=''): red_org = red # cv2.fastNlMeansDenoising(red, red, 4, 7, 35) rec, red = cv2.threshold(red, 127, 255, cv2.THRESH_BINARY_INV) image, contours, hierarchy = cv2.findContours(red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(len(contours)) # 描边一次可以减少噪点 cv2.drawContours(red, contours, -1, (255, 255, 255), 1) color_img = cv2.cvtColor(red, cv2.COLOR_GRAY2BGR) numset_contours = [] height_list=[] width_list=[] for cnt in contours: x, y, w, h = cv2.boundingRect(cnt) height_list.append(h) # print(h,w) width_list.append(w) height_list.remove(max(height_list)) width_list.remove(max(width_list)) height_threshold = 0.70*max(height_list) width_threshold = 1.4 * max(width_list) # print('height_threshold:'+str(height_threshold)+'width_threshold:'+str(width_threshold)) big_rect=[] for cnt in contours: x, y, w, h = cv2.boundingRect(cnt) if h > height_threshold and w < width_threshold: # print(h,w) numset_contours.append((x, y, w, h)) big_rect.append((x, y)) big_rect.append((x + w, y + h)) big_rect_nparray = np.array(big_rect, ndmin=3) x, y, w, h = cv2.boundingRect(big_rect_nparray) # imgrect = cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 255, 0), 2) # showimg(imgrect) # showimg(cv2.UMat.get(org_img)[y:y + h, x:x + w]) result_string = '' result_string += pytesseract.image_to_string(cv2.UMat.get(red_org)[y-10:y + h + 10, x-10:x + w + 10], lang=langset, config=custom_config) print(result_string) # cv2.imwrite('varylength.png', cv2.UMat.get(org_img)[y:y + h, x:x + w]) # cv2.imwrite('varylengthred.png', cv2.UMat.get(red_org)[y:y + h, x:x + w]) # numset_contours.sort(key=lambda num: num[0]) # for x, y, w, h in numset_contours: # result_string += pytesseract.image_to_string(cv2.UMat.get(color_img)[y:y + h, x:x + w], lang=langset, config=custom_config) return punc_filter(result_string)
Example #21
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 4 votes |
def get_result_fix_length(red, fix_length, langset, custom_config=''): red_org = red cv2.fastNlMeansDenoising(red, red, 4, 7, 35) rec, red = cv2.threshold(red, 127, 255, cv2.THRESH_BINARY_INV) image, contours, hierarchy = cv2.findContours(red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(len(contours)) # 描边一次可以减少噪点 cv2.drawContours(red, contours, -1, (0, 255, 0), 1) color_img = cv2.cvtColor(red, cv2.COLOR_GRAY2BGR) # for x, y, w, h in contours: # imgrect = cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 255, 0), 2) # showimg(imgrect) h_threshold = 54 numset_contours = [] calcu_cnt = 1 for cnt in contours: x, y, w, h = cv2.boundingRect(cnt) if h > h_threshold: numset_contours.append((x, y, w, h)) while len(numset_contours) != fix_length: if calcu_cnt > 50: print(u'计算次数过多!目前阈值为:', h_threshold) break numset_contours = [] calcu_cnt += 1 if len(numset_contours) > fix_length: h_threshold += 1 contours_cnt = 0 for cnt in contours: x, y, w, h = cv2.boundingRect(cnt) if h > h_threshold: contours_cnt += 1 numset_contours.append((x, y, w, h)) if len(numset_contours) < fix_length: h_threshold -= 1 contours_cnt = 0 for cnt in contours: x, y, w, h = cv2.boundingRect(cnt) if h > h_threshold: contours_cnt += 1 numset_contours.append((x, y, w, h)) result_string = '' numset_contours.sort(key=lambda num: num[0]) for x, y, w, h in numset_contours: result_string += pytesseract.image_to_string(cv2.UMat.get(red_org)[y-10:y + h + 10, x-10:x + w + 10], lang=langset, config=custom_config) # print(new_r) # cv2.imwrite('fixlengthred.png', cv2.UMat.get(red_org)[y-10:y + h +10 , x-10:x + w + 10]) print(result_string) return result_string
Example #22
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 4 votes |
def get_nation(img): _, _, red = cv2.split(img) print('nation') red = cv2.UMat(red) red = hist_equal(red) red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50) red = img_resize(red, 150) # cv2.imwrite('nation.png', red) # img = Image.fromarray(cv2.UMat.get(red).astype('uint8')) #return get_result_fix_length(red, 1, 'nation', '-psm 10') return get_result_fix_length(red, 1, 'chi_sim', '--psm 10') # return pytesseract.image_to_string(img, lang='nation', config='-psm 13').replace(" ","") # def get_birth(year, month, day): # _, _, red = cv2.split(year) # red = cv2.UMat(red) # red = hist_equal(red) # red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50) # red = img_resize(red, 150) # # cv2.imwrite('year_red.png', red) # year_red = red # # _, _, red = cv2.split(month) # red = cv2.UMat(red) # red = hist_equal(red) # red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50) # #red = cv2.erode(red,kernel,iterations = 1) # red = img_resize(red, 150) # # cv2.imwrite('month_red.png', red) # month_red = red # # _, _, red = cv2.split(day) # red = cv2.UMat(red) # red = hist_equal(red) # red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50) # red = img_resize(red, 150) # # cv2.imwrite('day_red.png', red) # day_red = red # # return pytesseract.image_to_string(img, lang='birth', config='-psm 7') # return get_result_fix_length(year_red, 4, 'eng', '-c tessedit_char_whitelist=0123456789 -psm 13'), \ # get_result_vary_length(month_red, 'eng', '-c tessedit_char_whitelist=0123456789 -psm 13'), \ # get_result_vary_length(day_red, 'eng', '-c tessedit_char_whitelist=0123456789 -psm 13')
Example #23
Source File: idcardocr.py From idcardocr with GNU General Public License v3.0 | 4 votes |
def find_sex(crop_gray, crop_org): template = cv2.UMat(cv2.imread('sex_mask_%s.jpg'%pixel_x, 0)) # showimg(template) w, h = cv2.UMat.get(template).shape[::-1] res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) top_left = (max_loc[0] + w, max_loc[1] - int(20*x)) bottom_right = (top_left[0] + int(300*x), top_left[1] + int(300*x)) result = cv2.UMat.get(crop_org)[top_left[1]-10:bottom_right[1], top_left[0]-10:bottom_right[0]] cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2) #showimg(crop_gray) return cv2.UMat(result)
Example #24
Source File: findidcard.py From idcardocr with GNU General Public License v3.0 | 4 votes |
def find(self, img2_name): print(u'进入身份证模版匹配流程...') img1_name = 'idcard_mask.jpg' MIN_MATCH_COUNT = 10 img1 = cv2.UMat(cv2.imread(img1_name, 0)) # queryImage in Gray img1 = self.img_resize(img1, 640) # self.showimg(img1) #img1 = idocr.hist_equal(img1) img2 = cv2.UMat(cv2.imread(img2_name, 0)) # trainImage in Gray # print(img2.get().shape) img2 = self.img_resize(img2, 1920) #img2 = idocr.hist_equal(img2) img_org = cv2.UMat(cv2.imread(img2_name)) img_org = self.img_resize(img_org, 1920) # Initiate SIFT detector t1 = round(time.time() * 1000) sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 10) flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1,des2,k=2) # store all the good matches as per Lowe's ratio test. #两个最佳匹配之间距离需要大于ratio 0.7,距离过于相似可能是噪声点 good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) #reshape为(x,y)数组 if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) #用HomoGraphy计算图像与图像之间映射关系, M为转换矩阵 M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() #使用转换矩阵M计算出img1在img2的对应形状 h,w = cv2.UMat.get(img1).shape M_r=np.linalg.inv(M) im_r = cv2.warpPerspective(img_org, M_r, (w,h)) # self.showimg(im_r) else: print("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)) matchesMask = None #draw_params = dict(matchColor = (0,255,0), # draw matches in green color # singlePointColor = None, # matchesMask = matchesMask, # draw only inliers # flags = 2) #img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) #plt.imshow(img3, 'gray'),plt.show() t2 = round(time.time() * 1000) print(u'查找身份证耗时:%s' % (t2 - t1)) return im_r
Example #25
Source File: trackgesture.py From CNNGestureRecognizer with MIT License | 4 votes |
def skinMask(frame, x0, y0, width, height, framecount, plot): global guessGesture, visualize, mod, saveImg # HSV values low_range = np.array([0, 50, 80]) upper_range = np.array([30, 200, 255]) cv2.rectangle(frame, (x0,y0),(x0+width,y0+height),(0,255,0),1) #roi = cv2.UMat(frame[y0:y0+height, x0:x0+width]) roi = frame[y0:y0+height, x0:x0+width] hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) #Apply skin color range mask = cv2.inRange(hsv, low_range, upper_range) mask = cv2.erode(mask, skinkernel, iterations = 1) mask = cv2.dilate(mask, skinkernel, iterations = 1) #blur mask = cv2.GaussianBlur(mask, (15,15), 1) #bitwise and mask original frame res = cv2.bitwise_and(roi, roi, mask = mask) # color to grayscale res = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY) if saveImg == True: saveROIImg(res) elif guessGesture == True and (framecount % 5) == 4: #res = cv2.UMat.get(res) t = threading.Thread(target=myNN.guessGesture, args = [mod, res]) t.start() elif visualize == True: layer = int(input("Enter which layer to visualize ")) cv2.waitKey(0) myNN.visualizeLayers(mod, res, layer) visualize = False return res #%%
Example #26
Source File: trackgesture.py From CNNGestureRecognizer with MIT License | 4 votes |
def bkgrndSubMask(frame, x0, y0, width, height, framecount, plot): global guessGesture, takebkgrndSubMask, visualize, mod, bkgrnd, saveImg cv2.rectangle(frame, (x0,y0),(x0+width,y0+height),(0,255,0),1) roi = frame[y0:y0+height, x0:x0+width] #roi = cv2.UMat(frame[y0:y0+height, x0:x0+width]) roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) #Take background image if takebkgrndSubMask == True: bkgrnd = roi takebkgrndSubMask = False print("Refreshing background image for mask...") #Take a diff between roi & bkgrnd image contents diff = cv2.absdiff(roi, bkgrnd) _, diff = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY) mask = cv2.GaussianBlur(diff, (3,3), 5) mask = cv2.erode(diff, skinkernel, iterations = 1) mask = cv2.dilate(diff, skinkernel, iterations = 1) res = cv2.bitwise_and(roi, roi, mask = mask) if saveImg == True: saveROIImg(res) elif guessGesture == True and (framecount % 5) == 4: t = threading.Thread(target=myNN.guessGesture, args = [mod, res]) t.start() #t.join() #myNN.update(plot) elif visualize == True: layer = int(input("Enter which layer to visualize ")) cv2.waitKey(0) myNN.visualizeLayers(mod, res, layer) visualize = False return res #%%