Python cv2.imshow() Examples
The following are 30
code examples of cv2.imshow().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: generate_coco_json.py From coco-json-converter with GNU General Public License v3.0 | 14 votes |
def __get_annotation__(self, mask, image=None): _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentation = [] for contour in contours: # Valid polygons have >= 6 coordinates (3 points) if contour.size >= 6: segmentation.append(contour.flatten().tolist()) RLEs = cocomask.frPyObjects(segmentation, mask.shape[0], mask.shape[1]) RLE = cocomask.merge(RLEs) # RLE = cocomask.encode(np.asfortranarray(mask)) area = cocomask.area(RLE) [x, y, w, h] = cv2.boundingRect(mask) if image is not None: image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cv2.drawContours(image, contours, -1, (0,255,0), 1) cv2.rectangle(image,(x,y),(x+w,y+h), (255,0,0), 2) cv2.imshow("", image) cv2.waitKey(1) return segmentation, [x, y, w, h], area
Example #2
Source File: BlurDetection.py From python-- with GNU General Public License v3.0 | 10 votes |
def _lapulaseDetection(self, imgName): """ :param strdir: 文件所在的目录 :param name: 文件名称 :return: 检测模糊后的分数 """ # step1: 预处理 img2gray, reImg = self.preImgOps(imgName) # step2: laplacian算子 获取评分 resLap = cv2.Laplacian(img2gray, cv2.CV_64F) score = resLap.var() print("Laplacian %s score of given image is %s", str(score)) # strp3: 绘制图片并保存 不应该写在这里 抽象出来 这是共有的部分 newImg = self._drawImgFonts(reImg, str(score)) newDir = self.strDir + "/_lapulaseDetection_/" if not os.path.exists(newDir): os.makedirs(newDir) newPath = newDir + imgName # 显示 cv2.imwrite(newPath, newImg) # 保存图片 cv2.imshow(imgName, newImg) cv2.waitKey(0) # step3: 返回分数 return score
Example #3
Source File: vachat.py From The-chat-room with MIT License | 10 votes |
def run(self): print("VEDIO server starts...") self.sock.bind(self.ADDR) self.sock.listen(1) conn, addr = self.sock.accept() print("remote VEDIO client success connected...") data = "".encode("utf-8") payload_size = struct.calcsize("L") cv2.namedWindow('Remote', cv2.WINDOW_AUTOSIZE) while True: while len(data) < payload_size: data += conn.recv(81920) packed_size = data[:payload_size] data = data[payload_size:] msg_size = struct.unpack("L", packed_size)[0] while len(data) < msg_size: data += conn.recv(81920) zframe_data = data[:msg_size] data = data[msg_size:] frame_data = zlib.decompress(zframe_data) frame = pickle.loads(frame_data) cv2.imshow('Remote', frame) if cv2.waitKey(1) & 0xFF == 27: break
Example #4
Source File: calibrate_camera.py From derplearning with MIT License | 9 votes |
def live_undistort(camera, camera_matrix, distortion_coefficients): """ Using a given calibration matrix, display the distorted, undistorted, and cropped frame""" scaled_camera_matrix, roi = cv2.getOptimalNewCameraMatrix( camera_matrix, distortion_coefficients, camera.size, 1, camera.size ) while True: ret, frame = camera.cap.read() assert ret distorted_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) undistorted_frame = cv2.undistort( distorted_frame, camera_matrix, distortion_coefficients, None, scaled_camera_matrix, ) roi_x, roi_y, roi_w, roi_h = roi cropped_frame = undistorted_frame[roi_y : roi_y + roi_h, roi_x : roi_x + roi_w] cv2.imshow("distorted %s" % (distorted_frame.shape,), distorted_frame) cv2.imshow("undistorted %s" % (undistorted_frame.shape,), undistorted_frame) cv2.imshow("cropped %s" % (cropped_frame.shape,), cropped_frame) cv2.waitKey(10)
Example #5
Source File: demo_caffe.py From MobileNetv2-SSDLite with MIT License | 8 votes |
def detect(imgfile): origimg = cv2.imread(imgfile) img = preprocess(origimg) img = img.astype(np.float32) img = img.transpose((2, 0, 1)) net.blobs['data'].data[...] = img out = net.forward() box, conf, cls = postprocess(origimg, out) for i in range(len(box)): p1 = (box[i][0], box[i][1]) p2 = (box[i][2], box[i][3]) cv2.rectangle(origimg, p1, p2, (0,255,0)) p3 = (max(p1[0], 15), max(p1[1], 15)) title = "%s:%.2f" % (COCO_CLASSES[int(cls[i])], conf[i]) cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1) cv2.imshow("SSD", origimg) k = cv2.waitKey(0) & 0xff #Exit if ESC pressed if k == 27 : return False return True
Example #6
Source File: misc.py From Automatic-Identification-and-Counting-of-Blood-Cells with GNU General Public License v3.0 | 7 votes |
def show2(im, allobj): for obj in allobj: cv2.rectangle(im, (obj[1], obj[2]), (obj[3], obj[4]), (0,0,255),2) cv2.imshow('result', im) cv2.waitKey() cv2.destroyAllWindows()
Example #7
Source File: camera_calibration.py From Advanced_Lane_Lines with MIT License | 7 votes |
def test(): """ read the pickle file on disk and implement undistor on image show the oringal/undistort image """ print("Reading the pickle file...") pickle_file = open("./camera_cal.p", "rb") dist_pickle = pickle.load(pickle_file) mtx = dist_pickle["mtx"] dist = dist_pickle["dist"] pickle_file.close() print("Reading the sample image...") img = cv2.imread('corners_founded/corners_found13.jpg') img_size = (img.shape[1],img.shape[0]) dst = cv2.undistort(img, mtx, dist, None, mtx) # dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY) # Visualize undistortion print("Visulize the result...") f, (ax1,ax2) = plt.subplots(1,2, figsize=(20,10)) ax1.imshow(img), ax1.set_title('Original Image', fontsize=15) ax2.imshow(dst), ax2.set_title('Undistored Image', fontsize=15) plt.show()
Example #8
Source File: misc.py From Traffic-Signs-and-Object-Detection with GNU General Public License v3.0 | 7 votes |
def show(im, allobj, S, w, h, cellx, celly): for obj in allobj: a = obj[5] % S b = obj[5] // S cx = a + obj[1] cy = b + obj[2] centerx = cx * cellx centery = cy * celly ww = obj[3]**2 * w hh = obj[4]**2 * h cv2.rectangle(im, (int(centerx - ww/2), int(centery - hh/2)), (int(centerx + ww/2), int(centery + hh/2)), (0,0,255), 2) cv2.imshow('result', im) cv2.waitKey() cv2.destroyAllWindows()
Example #9
Source File: misc.py From Traffic_sign_detection_YOLO with MIT License | 7 votes |
def show(im, allobj, S, w, h, cellx, celly): for obj in allobj: a = obj[5] % S b = obj[5] // S cx = a + obj[1] cy = b + obj[2] centerx = cx * cellx centery = cy * celly ww = obj[3]**2 * w hh = obj[4]**2 * h cv2.rectangle(im, (int(centerx - ww/2), int(centery - hh/2)), (int(centerx + ww/2), int(centery + hh/2)), (0,0,255), 2) cv2.imshow('result', im) cv2.waitKey() cv2.destroyAllWindows()
Example #10
Source File: misc.py From Automatic-Identification-and-Counting-of-Blood-Cells with GNU General Public License v3.0 | 7 votes |
def show(im, allobj, S, w, h, cellx, celly): for obj in allobj: a = obj[5] % S b = obj[5] // S cx = a + obj[1] cy = b + obj[2] centerx = cx * cellx centery = cy * celly ww = obj[3]**2 * w hh = obj[4]**2 * h cv2.rectangle(im, (int(centerx - ww/2), int(centery - hh/2)), (int(centerx + ww/2), int(centery + hh/2)), (0,0,255), 2) cv2.imshow('result', im) cv2.waitKey() cv2.destroyAllWindows()
Example #11
Source File: detect.py From pedestrian-haar-based-detector with GNU General Public License v2.0 | 7 votes |
def main(): #IMG PATHS imagePath = "test3.jpg" cascPath = "cascades/haarcascade_pedestrian.xml" pplCascade = cv2.CascadeClassifier(cascPath) image = cv2.imread(imagePath) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = normalize_grayimage(gray) pedestrians = pplCascade.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=10, minSize=(32,96), flags = cv2.cv.CV_HAAR_SCALE_IMAGE ) print "Found {0} ppl!".format(len(pedestrians)) #Draw a rectangle around the detected objects for (x, y, w, h) in pedestrians: cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2) cv2.imwrite("saida.jpg", image) cv2.imshow("Ppl found", image) cv2.waitKey(0) return 0
Example #12
Source File: demo_caffe_voc.py From MobileNetv2-SSDLite with MIT License | 7 votes |
def detect(imgfile): origimg = cv2.imread(imgfile) img = preprocess(origimg) img = img.astype(np.float32) img = img.transpose((2, 0, 1)) net.blobs['data'].data[...] = img out = net.forward() box, conf, cls = postprocess(origimg, out) for i in range(len(box)): p1 = (box[i][0], box[i][1]) p2 = (box[i][2], box[i][3]) cv2.rectangle(origimg, p1, p2, (0,255,0)) p3 = (max(p1[0], 15), max(p1[1], 15)) title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i]) cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1) cv2.imshow("SSD", origimg) k = cv2.waitKey(0) & 0xff #Exit if ESC pressed if k == 27 : return False return True
Example #13
Source File: BlurDetection.py From python-- with GNU General Public License v3.0 | 6 votes |
def _SMD2Detection(self, imgName): """ 灰度方差乘积 :param imgName: :return: """ # step 1 图像的预处理 img2gray, reImg = self.preImgOps(imgName) f=self._imageToMatrix(img2gray)/255.0 x, y = f.shape score = 0 for i in range(x - 1): for j in range(y - 1): score += np.abs(f[i+1,j]-f[i,j])*np.abs(f[i,j]-f[i,j+1]) # strp3: 绘制图片并保存 不应该写在这里 抽象出来 这是共有的部分 score=score newImg = self._drawImgFonts(reImg, str(score)) newDir = self.strDir + "/_SMD2Detection_/" if not os.path.exists(newDir): os.makedirs(newDir) newPath = newDir + imgName cv2.imwrite(newPath, newImg) # 保存图片 cv2.imshow(imgName, newImg) cv2.waitKey(0) return score
Example #14
Source File: objectDetectorYOLO.py From Traffic_sign_detection_YOLO with MIT License | 6 votes |
def processFrames(self): try: for img in self.anotations_list: img = img.split(';') # print(img) # ret,imgcv = cap.read() if self.video: ret,imgcv = self.cap.read() else: imgcv = cv2.imread(os.path.join('../',self.config["dataset"],img[0])) result = self.tfnet.return_predict(imgcv) print(result) imgcv = self.drawBoundingBox(imgcv,result) cv2.imshow('detected objects',imgcv) if cv2.waitKey(10) == ord('q'): print('exitting loop') break except KeyboardInterrupt: cv2.destroyAllWindows() print('exitting program')
Example #15
Source File: BlurDetection.py From python-- with GNU General Public License v3.0 | 6 votes |
def _Variance(self, imgName): """ 灰度方差乘积 :param imgName: :return: """ # step 1 图像的预处理 img2gray, reImg = self.preImgOps(imgName) f = self._imageToMatrix(img2gray) # strp3: 绘制图片并保存 不应该写在这里 抽象出来 这是共有的部分 score = np.var(f) newImg = self._drawImgFonts(reImg, str(score)) newDir = self.strDir + "/_Variance_/" if not os.path.exists(newDir): os.makedirs(newDir) newPath = newDir + imgName cv2.imwrite(newPath, newImg) # 保存图片 cv2.imshow(imgName, newImg) cv2.waitKey(0) return score
Example #16
Source File: misc.py From Traffic_sign_detection_YOLO with MIT License | 6 votes |
def show2(im, allobj): for obj in allobj: cv2.rectangle(im, (obj[1], obj[2]), (obj[3], obj[4]), (0,0,255),2) cv2.imshow('result', im) cv2.waitKey() cv2.destroyAllWindows()
Example #17
Source File: facerec_from_webcam_faster.py From face-attendance-machine with Apache License 2.0 | 6 votes |
def face_process(): myprint("face process start",time.time()) # Find all the faces and face encodings in the current frame of video # face_locations = face_recognition.face_locations(rgb_small_frame, model="cnn") myprint('face_locations start', time.time()) face_locations = face_recognition.face_locations(rgb_small_frame, model="hog") myprint('face_locations end', time.time()) myprint('face_encodings start', time.time()) face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) myprint('face_encodings end', time.time()) face_names = [] for face_encoding in face_encodings: # optimize start 采用KNN 排名*权重, 在类别上进行叠加,然后排序取出top1 name, dis = vote_class(face_encoding) # optimize end 采用 排名*权重, 在类别上进行叠加,然后排序取出top1 face_names.append(name) # 将人脸数据 # Display the results for (top, right, bottom, left), name in zip(face_locations, face_names): # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 4 right *= 4 bottom *= 4 left *= 4 myprint('putText start', time.time()) # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) # Draw a label with a name below the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) myprint("putText end " + name, time.time()) # say hello and save record to file myprint('process_face_records start', time.time()) process_face_records(name) myprint('process_face_records end', time.time()) # Display the resulting image cv2.imshow('Video', frame) myprint("face process end", time.time())
Example #18
Source File: esr_visualizer.py From Udacity-SDC-Radar-Driver-Micro-Challenge with MIT License | 6 votes |
def update(self, radarData): self.img = np.zeros((self.height, self.width, self.channels), np.uint8) cv2.line(self.img, (10, 0), (self.width/2 - 5, self.height), (100, 255, 255)) cv2.line(self.img, (self.width - 10, 0), (self.width/2 + 5, self.height), (100, 255, 255)) for track_number in range(1, 65): if str(track_number)+'_track_range' in radarData: track_range = radarData[str(track_number)+'_track_range'] track_angle = (float(radarData[str(track_number)+'_track_angle'])+90.0)*math.pi/180 x_pos = math.cos(track_angle)*track_range*4 y_pos = math.sin(track_angle)*track_range*4 cv2.circle(self.img, (self.width/2 + int(x_pos), self.height - int(y_pos) - 10), 5, (255, 255, 255)) #cv2.putText(self.img, str(track_number), # (self.width/2 + int(x_pos)-2, self.height - int(y_pos) - 10), self.font, 1, (255,255,255), 2) cv2.imshow("Radar", self.img) cv2.waitKey(2)
Example #19
Source File: boxing.py From ICDAR-2019-SROIE with MIT License | 6 votes |
def draw(): f = open(box_path + 'jpglist.txt') # read each image and its label line = f.readline() line_num =0 while line: line_num=line_num+1 print('Image:', line_num) name = line.strip('\n') img = cv2.imread(image_path + name) img_size = img.shape img_size = img_size[0]*img_size[1] # read each coordinate and draw box f_txt = open(image_path + name.strip('.jpg') + '.txt') #line_txt = f_txt.readline() # pass the first ROI information line_txt = f_txt.readline() while line_txt: coor = line_txt.split(',') x1 = int(coor[0].strip('\'')) y1 = int(coor[1].strip('\'')) x3 = int(coor[4].strip('\'')) y3 = int(coor[5].strip('\'')) text = coor[8].strip('\n').strip('\'') text_show = text + '(' + str(x1) + ',' + str(y1) +')' cv2.rectangle(img, (x1, y1), (x3, y3), (255, 0, 0), 1) #cv2.putText(img, text_show, (x1, y1 - 1), # cv2.FONT_HERSHEY_TRIPLEX, 0.35, (0, 0, 255), 1) line_txt = f_txt.readline() cv2.imwrite(box_path + name, img) line = f.readline() # img = cv2.imshow('image', img) # cv2.waitKey(0)
Example #20
Source File: streaming.py From olympe with BSD 3-Clause "New" or "Revised" License | 6 votes |
def show_yuv_frame(self, window_name, yuv_frame): # the VideoFrame.info() dictionary contains some useful information # such as the video resolution info = yuv_frame.info() height, width = info["yuv"]["height"], info["yuv"]["width"] # yuv_frame.vmeta() returns a dictionary that contains additional # metadata from the drone (GPS coordinates, battery percentage, ...) # convert pdraw YUV flag to OpenCV YUV flag cv2_cvt_color_flag = { olympe.PDRAW_YUV_FORMAT_I420: cv2.COLOR_YUV2BGR_I420, olympe.PDRAW_YUV_FORMAT_NV12: cv2.COLOR_YUV2BGR_NV12, }[info["yuv"]["format"]] # yuv_frame.as_ndarray() is a 2D numpy array with the proper "shape" # i.e (3 * height / 2, width) because it's a YUV I420 or NV12 frame # Use OpenCV to convert the yuv frame to RGB cv2frame = cv2.cvtColor(yuv_frame.as_ndarray(), cv2_cvt_color_flag) # Use OpenCV to show this frame cv2.imshow(window_name, cv2frame) cv2.waitKey(1) # please OpenCV for 1 ms...
Example #21
Source File: esr_visualizer.py From Udacity-SDC-Radar-Driver-Micro-Challenge with MIT License | 6 votes |
def update(self, radarData): self.img = np.zeros((self.height, self.width, self.channels), np.uint8) cv2.line(self.img, (10, 0), (self.width/2 - 5, self.height), (100, 255, 255)) cv2.line(self.img, (self.width - 10, 0), (self.width/2 + 5, self.height), (100, 255, 255)) for track_number in range(1, 65): if str(track_number)+'_track_range' in radarData: track_range = radarData[str(track_number)+'_track_range'] track_angle = (float(radarData[str(track_number)+'_track_angle'])+90.0)*math.pi/180 x_pos = math.cos(track_angle)*track_range*4 y_pos = math.sin(track_angle)*track_range*4 cv2.circle(self.img, (self.width/2 + int(x_pos), self.height - int(y_pos) - 10), 5, (255, 255, 255)) #cv2.putText(self.img, str(track_number), # (self.width/2 + int(x_pos)-2, self.height - int(y_pos) - 10), self.font, 1, (255,255,255), 2) cv2.imshow("Radar", self.img) cv2.waitKey(2)
Example #22
Source File: BlurDetection.py From python-- with GNU General Public License v3.0 | 6 votes |
def _blurDetection(self, imgName): # step 1 图像的预处理 img2gray, reImg = self.preImgOps(imgName) imgMat=self._imageToMatrix(img2gray)/255.0 x, y = imgMat.shape score = 0 for i in range(x - 2): for j in range(y - 2): score += (imgMat[i + 2, j] - imgMat[i, j]) ** 2 # step3: 绘制图片并保存 不应该写在这里 抽象出来 这是共有的部分 score=score/10 newImg = self._drawImgFonts(reImg, str(score)) newDir = self.strDir + "/_blurDetection_/" if not os.path.exists(newDir): os.makedirs(newDir) newPath = newDir + imgName cv2.imwrite(newPath, newImg) # 保存图片 cv2.imshow(imgName, newImg) cv2.waitKey(0) return score
Example #23
Source File: fivek.py From exposure with MIT License | 5 votes |
def test(): dp = FiveKDataProvider('2k_train') while True: d = dp.get_next_batch(64) cv2.imshow('img', d[0][0, :, :, ::-1]) cv2.waitKey(0)
Example #24
Source File: webcam.py From facemoji with MIT License | 5 votes |
def show_webcam_and_run(model, emoticons, window_size=None, window_name='webcam', update_time=10): """ Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces. :param model: Learnt emotion detection model. :param emoticons: List of emotions images. :param window_size: Size of webcam image window. :param window_name: Name of webcam image window. :param update_time: Image update time interval. """ cv2.namedWindow(window_name, WINDOW_NORMAL) if window_size: width, height = window_size cv2.resizeWindow(window_name, width, height) vc = cv2.VideoCapture(0) if vc.isOpened(): read_value, webcam_image = vc.read() else: print("webcam not found") return while read_value: for normalized_face, (x, y, w, h) in find_faces(webcam_image): prediction = model.predict(normalized_face) # do prediction if cv2.__version__ != '3.1.0': prediction = prediction[0] image_to_draw = emoticons[prediction] draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h)) cv2.imshow(window_name, webcam_image) read_value, webcam_image = vc.read() key = cv2.waitKey(update_time) if key == 27: # exit on ESC break cv2.destroyWindow(window_name)
Example #25
Source File: label.py From derplearning with MIT License | 5 votes |
def display(self): """Blit all the status on the screen""" self.window[: self.frame.shape[0], :, :] = self.frame horizon_percent = self.config["camera"]["pitch"] / self.config["camera"]["vfov"] + 0.5 # Horizon line self.window[int(self.f_h * horizon_percent), :, :] = (255, 0, 255) # Clear status buffer self.window[self.f_h :, :, :] = 0 # Draw label bar self.window[self.f_h : self.f_h + self.l_h // 2, :, :] = self.autonomous_bar self.window[self.f_h + self.l_h // 2 : self.f_h + self.l_h, :, :] = self.quality_bar # Draw current timestamp vertical line current_x = self.frame_pos(self.frame_id) self.window[self.f_h + self.l_h :, current_x, :] = self.bar_color(self.quality) # Draw zero line self.window[self.f_h + self.l_h + self.bhh, :, :] = (96, 96, 96) offset = self.f_h + self.bhh + self.l_h self.window[self.window_speeds + offset, np.arange(self.f_w), :] = (255, 64, 255) self.window[self.window_steers + offset, np.arange(self.f_w), :] = (64, 255, 255) text = "%05i %07.3f %06.3f %06.3f" % (self.frame_id, (self.camera_times[self.frame_id] / 1E9) % 100, self.camera_steers[self.frame_id], self.camera_speeds[self.frame_id]) font = cv2.FONT_HERSHEY_SIMPLEX pink = (255, 128, 255) offset = (0, int(self.scale * 30)) cv2.putText(self.window, text, offset, font, self.scale, pink, 1, cv2.LINE_AA) cv2.imshow(self.window_name, self.window)
Example #26
Source File: calibrate_camera.py From derplearning with MIT License | 5 votes |
def live_calibrate(camera, pattern_shape, n_matches_needed): """ Find calibration parameters as the user moves a checkerboard in front of the camera """ print("Looking for %s checkerboard" % (pattern_shape,)) criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) example_3d = np.zeros((pattern_shape[0] * pattern_shape[1], 3), np.float32) example_3d[:, :2] = np.mgrid[0 : pattern_shape[1], 0 : pattern_shape[0]].T.reshape(-1, 2) points_3d = [] points_2d = [] while len(points_3d) < n_matches_needed: ret, frame = camera.cap.read() assert ret gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) ret, corners = cv2.findCirclesGrid( gray_frame, pattern_shape, flags=cv2.CALIB_CB_ASYMMETRIC_GRID ) cv2.imshow("camera", frame) if ret: points_3d.append(example_3d.copy()) points_2d.append(corners) print("Found calibration %i of %i" % (len(points_3d), n_matches_needed)) drawn_frame = cv2.drawChessboardCorners(frame, pattern_shape, corners, ret) cv2.imshow("calib", drawn_frame) cv2.waitKey(10) ret, camera_matrix, distortion_coefficients, _, _ = cv2.calibrateCamera( points_3d, points_2d, gray_frame.shape[::-1], None, None ) assert ret return camera_matrix, distortion_coefficients
Example #27
Source File: artist.py From exposure with MIT License | 5 votes |
def test(): dp = ArtistDataProvider('C') while True: d = dp.get_next_batch(64) cv2.imshow('img', d[0][0, :, :, ::-1]) cv2.waitKey(0)
Example #28
Source File: pdf_sample_layer.py From exposure with MIT License | 5 votes |
def test1(): import cv2 batch_size = 1024 img = cv2.imread('data/doggy.jpg').mean(axis=2) pdf_batch = np.empty( shape=(batch_size, img.shape[0], img.shape[1]), dtype=np.float32) for i in range(batch_size): pdf_batch[i] = img pdf = tf.placeholder(tf.float32, (batch_size, img.shape[0], img.shape[1])) noise = tf.placeholder(tf.float32, (batch_size, 1)) with tf.Session() as sess: indices = pdf_sample_2d(pdf, noise) image_buffer = np.zeros( shape=(img.shape[0], img.shape[1]), dtype=np.float32) while True: indices_out = sess.run( indices, feed_dict={pdf: pdf_batch, noise: np.random.rand(batch_size, 1)}) for ind in indices_out: image_buffer[ind[0]][ind[1]] += 1 cv2.imshow('img', image_buffer / np.max(image_buffer)) cv2.waitKey(30)
Example #29
Source File: functions.py From 3D-HourGlass-Network with MIT License | 5 votes |
def playVideoFromAVI(s): cap = cv2.VideoCapture(s) # Check if camera opened successfully if (cap.isOpened()== False): print("Error opening video stream or file") # Read until video is completed while(cap.isOpened()): # Capture frame-by-frame ret, frame = cap.read() if ret == True: # Display the resulting frame cv2.imshow('Frame',frame) # Press Q on keyboard to exit if cv2.waitKey(1) & 0xFF == ord('q'): break # Break the loop else: break # When everything done, release the video capture object cap.release() # Closes all the frames cv2.destroyAllWindows()
Example #30
Source File: pdraw.py From olympe with BSD 3-Clause "New" or "Revised" License | 5 votes |
def yuv_frame_cb(yuv_frame): """ This function will be called by Olympe for each decoded YUV frame. :type yuv_frame: olympe.VideoFrame """ # the VideoFrame.info() dictionary contains some useful information # such as the video resolution info = yuv_frame.info() height, width = info["yuv"]["height"], info["yuv"]["width"] # yuv_frame.vmeta() returns a dictionary that contains additional # metadata from the drone (GPS coordinates, battery percentage, ...) # convert pdraw YUV flag to OpenCV YUV flag cv2_cvt_color_flag = { PDRAW_YUV_FORMAT_I420: cv2.COLOR_YUV2BGR_I420, PDRAW_YUV_FORMAT_NV12: cv2.COLOR_YUV2BGR_NV12, }[info["yuv"]["format"]] # yuv_frame.as_ndarray() is a 2D numpy array with the proper "shape" # i.e (3 * height / 2, width) because it's a YUV I420 or NV12 frame # Use OpenCV to convert the yuv frame to RGB cv2frame = cv2.cvtColor(yuv_frame.as_ndarray(), cv2_cvt_color_flag) # Use OpenCV to show this frame cv2.imshow("Olympe Pdraw Example", cv2frame) cv2.waitKey(1) # please OpenCV for 1 ms...