Python cv2.waitKey() Examples

The following are code examples for showing how to use cv2.waitKey(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: RaspberryPi-Mertracking   Author: MertArduino   File: mertracking.py    GNU General Public License v3.0 11 votes vote down vote up
def live_video(camera_port=0):
        """
        Opens a window with live video.
        :param camera:
        :return:
        """

        video_capture = cv2.VideoCapture(camera_port)

        while True:
            # Capture frame-by-frame
            ret, frame = video_capture.read()

            # Display the resulting frame
            cv2.imshow('Video', frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        # When everything is done, release the capture
        video_capture.release()
        cv2.destroyAllWindows() 
Example 2
Project: mmdetection   Author: open-mmlab   File: webcam_demo.py    Apache License 2.0 7 votes vote down vote up
def main():
    args = parse_args()

    model = init_detector(
        args.config, args.checkpoint, device=torch.device('cuda', args.device))

    camera = cv2.VideoCapture(args.camera_id)

    print('Press "Esc", "q" or "Q" to exit.')
    while True:
        ret_val, img = camera.read()
        result = inference_detector(model, img)

        ch = cv2.waitKey(1)
        if ch == 27 or ch == ord('q') or ch == ord('Q'):
            break

        show_result(
            img, result, model.CLASSES, score_thr=args.score_thr, wait_time=1) 
Example 3
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: objectDetectorYOLO.py    MIT License 6 votes vote down vote up
def processFrames(self):
        try:
            for img in self.anotations_list:
                img = img.split(';')
                # print(img)
                # ret,imgcv = cap.read()
                if self.video:
                    ret,imgcv = self.cap.read()
                else:
                    imgcv = cv2.imread(os.path.join('../',self.config["dataset"],img[0]))
                result = self.tfnet.return_predict(imgcv)
                print(result)
                imgcv = self.drawBoundingBox(imgcv,result)        
                cv2.imshow('detected objects',imgcv)
                if cv2.waitKey(10) == ord('q'):
                    print('exitting loop')
                    break
        except KeyboardInterrupt:
            cv2.destroyAllWindows()
            print('exitting program') 
Example 4
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: misc.py    MIT License 6 votes vote down vote up
def show(im, allobj, S, w, h, cellx, celly):
    for obj in allobj:
        a = obj[5] % S
        b = obj[5] // S
        cx = a + obj[1]
        cy = b + obj[2]
        centerx = cx * cellx
        centery = cy * celly
        ww = obj[3]**2 * w
        hh = obj[4]**2 * h
        cv2.rectangle(im,
            (int(centerx - ww/2), int(centery - hh/2)),
            (int(centerx + ww/2), int(centery + hh/2)),
            (0,0,255), 2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 5
Project: coco-json-converter   Author: hazirbas   File: generate_coco_json.py    GNU General Public License v3.0 6 votes vote down vote up
def __get_annotation__(self, mask, image=None):

        _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        segmentation = []
        for contour in contours:
            # Valid polygons have >= 6 coordinates (3 points)
            if contour.size >= 6:
                segmentation.append(contour.flatten().tolist())
        RLEs = cocomask.frPyObjects(segmentation, mask.shape[0], mask.shape[1])
        RLE = cocomask.merge(RLEs)
        # RLE = cocomask.encode(np.asfortranarray(mask))
        area = cocomask.area(RLE)
        [x, y, w, h] = cv2.boundingRect(mask)

        if image is not None:
            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            cv2.drawContours(image, contours, -1, (0,255,0), 1)
            cv2.rectangle(image,(x,y),(x+w,y+h), (255,0,0), 2)
            cv2.imshow("", image)
            cv2.waitKey(1)

        return segmentation, [x, y, w, h], area 
Example 6
Project: Automatic-Identification-and-Counting-of-Blood-Cells   Author: MahmudulAlam   File: misc.py    GNU General Public License v3.0 6 votes vote down vote up
def show(im, allobj, S, w, h, cellx, celly):
    for obj in allobj:
        a = obj[5] % S
        b = obj[5] // S
        cx = a + obj[1]
        cy = b + obj[2]
        centerx = cx * cellx
        centery = cy * celly
        ww = obj[3]**2 * w
        hh = obj[4]**2 * h
        cv2.rectangle(im,
            (int(centerx - ww/2), int(centery - hh/2)),
            (int(centerx + ww/2), int(centery + hh/2)),
            (0,0,255), 2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 7
Project: python--   Author: Leezhen2014   File: BlurDetection.py    GNU General Public License v3.0 6 votes vote down vote up
def _blurDetection(self, imgName):

        # step 1 图像的预处理
        img2gray, reImg = self.preImgOps(imgName)
        imgMat=self._imageToMatrix(img2gray)/255.0
        x, y = imgMat.shape
        score = 0
        for i in range(x - 2):
            for j in range(y - 2):
                score += (imgMat[i + 2, j] - imgMat[i, j]) ** 2
        # step3: 绘制图片并保存  不应该写在这里  抽象出来   这是共有的部分
        score=score/10
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_blurDetection_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存图片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return score 
Example 8
Project: python--   Author: Leezhen2014   File: BlurDetection.py    GNU General Public License v3.0 6 votes vote down vote up
def _SMDDetection(self, imgName):

        # step 1 图像的预处理
        img2gray, reImg = self.preImgOps(imgName)
        f=self._imageToMatrix(img2gray)/255.0
        x, y = f.shape
        score = 0
        for i in range(x - 1):
            for j in range(y - 1):
                score += np.abs(f[i+1,j]-f[i,j])+np.abs(f[i,j]-f[i+1,j])
        # strp3: 绘制图片并保存  不应该写在这里  抽象出来   这是共有的部分
        score=score/100
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_SMDDetection_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存图片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return score 
Example 9
Project: python--   Author: Leezhen2014   File: BlurDetection.py    GNU General Public License v3.0 6 votes vote down vote up
def _SMD2Detection(self, imgName):
        """
        灰度方差乘积
        :param imgName:
        :return:
        """
        # step 1 图像的预处理
        img2gray, reImg = self.preImgOps(imgName)
        f=self._imageToMatrix(img2gray)/255.0
        x, y = f.shape
        score = 0
        for i in range(x - 1):
            for j in range(y - 1):
                score += np.abs(f[i+1,j]-f[i,j])*np.abs(f[i,j]-f[i,j+1])
        # strp3: 绘制图片并保存  不应该写在这里  抽象出来   这是共有的部分
        score=score
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_SMD2Detection_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存图片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return score 
Example 10
Project: python--   Author: Leezhen2014   File: BlurDetection.py    GNU General Public License v3.0 6 votes vote down vote up
def _Variance(self, imgName):
        """
               灰度方差乘积
               :param imgName:
               :return:
               """
        # step 1 图像的预处理
        img2gray, reImg = self.preImgOps(imgName)
        f = self._imageToMatrix(img2gray)

        # strp3: 绘制图片并保存  不应该写在这里  抽象出来   这是共有的部分
        score = np.var(f)
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_Variance_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存图片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return score 
Example 11
Project: python--   Author: Leezhen2014   File: BlurDetection.py    GNU General Public License v3.0 6 votes vote down vote up
def _lapulaseDetection(self, imgName):
        """
        :param strdir: 文件所在的目录
        :param name: 文件名称
        :return: 检测模糊后的分数
        """
        # step1: 预处理
        img2gray, reImg = self.preImgOps(imgName)
        # step2: laplacian算子 获取评分
        resLap = cv2.Laplacian(img2gray, cv2.CV_64F)
        score = resLap.var()
        print("Laplacian %s score of given image is %s", str(score))
        # strp3: 绘制图片并保存  不应该写在这里  抽象出来   这是共有的部分
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_lapulaseDetection_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        # 显示
        cv2.imwrite(newPath, newImg)  # 保存图片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)

        # step3: 返回分数
        return score 
Example 12
Project: MobileNetv2-SSDLite   Author: PINTO0309   File: demo_caffe.py    MIT License 6 votes vote down vote up
def detect(imgfile):
    origimg = cv2.imread(imgfile)
    img = preprocess(origimg)
    
    img = img.astype(np.float32)
    img = img.transpose((2, 0, 1))

    net.blobs['data'].data[...] = img
    out = net.forward() 
    box, conf, cls = postprocess(origimg, out)

    for i in range(len(box)):
       p1 = (box[i][0], box[i][1])
       p2 = (box[i][2], box[i][3])
       cv2.rectangle(origimg, p1, p2, (0,255,0))
       p3 = (max(p1[0], 15), max(p1[1], 15))
       title = "%s:%.2f" % (COCO_CLASSES[int(cls[i])], conf[i])
       cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
    cv2.imshow("SSD", origimg)
 
    k = cv2.waitKey(0) & 0xff
        #Exit if ESC pressed
    if k == 27 : return False
    return True 
Example 13
Project: MobileNetv2-SSDLite   Author: PINTO0309   File: demo_caffe_voc.py    MIT License 6 votes vote down vote up
def detect(imgfile):
    origimg = cv2.imread(imgfile)
    img = preprocess(origimg)
    
    img = img.astype(np.float32)
    img = img.transpose((2, 0, 1))

    net.blobs['data'].data[...] = img
    out = net.forward() 
    box, conf, cls = postprocess(origimg, out)

    for i in range(len(box)):
       p1 = (box[i][0], box[i][1])
       p2 = (box[i][2], box[i][3])
       cv2.rectangle(origimg, p1, p2, (0,255,0))
       p3 = (max(p1[0], 15), max(p1[1], 15))
       title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i])
       cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
    cv2.imshow("SSD", origimg)
 
    k = cv2.waitKey(0) & 0xff
        #Exit if ESC pressed
    if k == 27 : return False
    return True 
Example 14
Project: zed-python-api   Author: stereolabs   File: live_camera.py    MIT License 6 votes vote down vote up
def record(cam, runtime, mat):
    vid = sl.ERROR_CODE.ERROR_CODE_FAILURE
    out = False
    while vid != sl.ERROR_CODE.SUCCESS and not out:
        filepath = input("Enter filepath name: ")
        vid = cam.enable_recording(filepath)
        print(repr(vid))
        if vid == sl.ERROR_CODE.SUCCESS:
            print("Recording started...")
            out = True
            print("Hit spacebar to stop recording: ")
            key = False
            while key != 32:  # for spacebar
                err = cam.grab(runtime)
                if err == sl.ERROR_CODE.SUCCESS:
                    cam.retrieve_image(mat)
                    cv2.imshow("ZED", mat.get_data())
                    key = cv2.waitKey(5)
                    cam.record()
        else:
            print("Help: you must enter the filepath + filename + SVO extension.")
            print("Recording not started.")
    cam.disable_recording()
    print("Recording finished.")
    cv2.destroyAllWindows() 
Example 15
Project: Super_TF   Author: Dhruv-Mohan   File: Read_classification_dataset.py    MIT License 6 votes vote down vote up
def main():
    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    dummy_reader = Dataset_reader_classification(filename=_DATASET_PATH_, num_classes=_CLASSES_)
    #dummy_reader.pre_process_image(writer_pre_proc)

    with tf.Session() as sess:
        init_op.run()
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        images, labels = dummy_reader.next_batch(_BATCH_SIZE_)
        meanimage = sess.run([dummy_reader.mean_image])[0]
        print(meanimage)
        print(images[0])
        if _SHOW_IMAGES_ :
            for image in images:
                cv2.imshow('Image', image)
                cv2.imshow('Meanimage',meanimage)
                cv2.waitKey(0)

        coord.request_stop()
        coord.join(threads) 
Example 16
Project: tf-cpn   Author: megvii-detection   File: mask.py    MIT License 6 votes vote down vote up
def showMask(img_obj):
    img = cv2.imread(img_obj['fpath'])
    img_ori = img.copy()
    gtmasks = img_obj['gtmasks']
    n = len(gtmasks)
    print(img.shape)
    for i, mobj in enumerate(gtmasks):
        if not (type(mobj['mask']) is list):
            print("Pass a RLE mask")
            continue
        else:
            pts = np.round(np.asarray(mobj['mask'][0]))
            pts = pts.reshape(pts.shape[0] // 2, 2)
            pts = np.int32(pts)
            color = np.uint8(np.random.rand(3) * 255).tolist()
            cv2.fillPoly(img, [pts], color)
    cv2.addWeighted(img, 0.5, img_ori, 0.5, 0, img)
    cv2.imshow("Mask", img)
    cv2.waitKey(0) 
Example 17
Project: derplearning   Author: notkarol   File: camera.py    MIT License 6 votes vote down vote up
def sense(self):
        """ Read the next video frame. If we couldn't get it, use the previous one """
        if not self.ready:
           self.__connect()

        if self.ready:
            frame = None
            ret, frame = self.cap.read()
            if ret:
                frame = util.resize(frame, (self.width, self.height))
                sensor_name = self.config['name']
                self.state[sensor_name] = frame
                if self.state['debug']:
                    cv2.imshow('frame', frame)
                    cv2.waitKey(1)                
            else:
                print("Camera: Unable to get frame")
                self.ready = False
        return self.ready 
Example 18
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: misc.py    MIT License 5 votes vote down vote up
def show2(im, allobj):
    for obj in allobj:
        cv2.rectangle(im,
            (obj[1], obj[2]), 
            (obj[3], obj[4]), 
            (0,0,255),2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 19
Project: rl_3d   Author: avdmitry   File: agent_a3c.py    MIT License 5 votes vote down vote up
def Test(agent):
    if (test_write_video):
        size = (640, 480)
        fps = 30.0
        fourcc = cv2.VideoWriter_fourcc(*'XVID')  # cv2.cv.CV_FOURCC(*'XVID')
        out_video = cv2.VideoWriter(path_work_dir + "test.avi", fourcc, fps, size)

    reward_total = 0
    num_episodes = 30
    while (num_episodes != 0):
        if (not env.IsRunning()):
            env.Reset()
            agent.Reset()
            print("Total reward: {}".format(reward_total))
            reward_total = 0
            num_episodes -= 1

        state_raw = env.Observation()

        state = Preprocess(state_raw)
        action = agent.Act(state)

        for _ in xrange(frame_repeat):
            if (test_display):
                cv2.imshow("frame-test", state_raw)
                cv2.waitKey(20)

            if (test_write_video):
                out_video.write(state_raw)

            reward = env.Act(action, 1)
            reward_total += reward

            if (not env.IsRunning()):
                break

            state_raw = env.Observation() 
Example 20
Project: rl_3d   Author: avdmitry   File: agent_dqn.py    MIT License 5 votes vote down vote up
def Preprocess(img):
    #cv2.imshow("frame-train", img)
    #cv2.waitKey(20)
    if (channels == 1):
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img = cv2.resize(img, (resolution[1], resolution[0]))
    #cv2.imshow("frame-train", img)
    #cv2.waitKey(200)
    return np.reshape(img, resolution) 
Example 21
Project: rl_3d   Author: avdmitry   File: agent_dqn.py    MIT License 5 votes vote down vote up
def Test(agent):
    if (test_write_video):
        size = (640, 480)
        fps = 30.0 #/ frame_repeat
        fourcc = cv2.VideoWriter_fourcc(*'XVID')  # cv2.cv.CV_FOURCC(*'XVID')
        out_video = cv2.VideoWriter(path_work_dir + "test.avi", fourcc, fps, size)

    reward_total = 0
    num_episodes = 30
    while (num_episodes != 0):
        if (not env.IsRunning()):
            env.Reset()
            print("Total reward: {}".format(reward_total))
            reward_total = 0
            num_episodes -= 1

        state_raw = env.Observation()

        state = Preprocess(state_raw)
        action = agent.GetAction(state)

        for _ in xrange(frame_repeat):
            # Display.
            if (test_display):
                cv2.imshow("frame-test", state_raw)
                cv2.waitKey(20)

            if (test_write_video):
                out_video.write(state_raw)

            reward = env.Act(action, 1)
            reward_total += reward

            if (not env.IsRunning()):
                break

            state_raw = env.Observation() 
Example 22
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: cv2Iterator.py    Apache License 2.0 5 votes vote down vote up
def __next__(self):
        ret, frame = self._capture.read()
        if cv2.waitKey(1) & 0xFF == ord('q') or ret is not True:
            raise StopIteration
        if self._frame_resize:
            frame = cv2.resize(frame, (self._frame_resize[0], self._frame_resize[1]))
        return frame 
Example 23
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: rl_data.py    Apache License 2.0 5 votes vote down vote up
def visual(X, show=True):
    X = X.transpose((0, 2, 3, 1))
    N = X.shape[0]
    n = int(math.ceil(math.sqrt(N)))
    h = X.shape[1]
    w = X.shape[2]
    buf = np.zeros((h*n, w*n, X.shape[3]), dtype=np.uint8)
    for i in range(N):
        x = i%n
        y = i//n
        buf[h*y:h*(y+1), w*x:w*(x+1), :] = X[i]
    if show:
        cv2.imshow('a', buf)
        cv2.waitKey(1)
    return buf 
Example 24
Project: Adeept_PiCar-B   Author: adeept   File: client.py    GNU General Public License v3.0 5 votes vote down vote up
def video_show():
    while True:
        frame = footage_socket.recv_string()
        img = base64.b64decode(frame)
        npimg = np.fromstring(img, dtype=np.uint8)
        source = cv2.imdecode(npimg, 1)
        cv2.imshow("Stream", source)
        cv2.waitKey(1) 
Example 25
Project: Adeept_PiCar-B   Author: adeept   File: client_reverse.py    GNU General Public License v3.0 5 votes vote down vote up
def video_show():
    while True:
        frame = footage_socket.recv_string()
        img = base64.b64decode(frame)
        npimg = np.fromstring(img, dtype=np.uint8)
        source = cv2.imdecode(npimg, 1)
        cv2.imshow("Stream", source)
        cv2.waitKey(1) 
Example 26
Project: tf-cnn-lstm-ocr-captcha   Author: Luonic   File: to_tfrecords.py    MIT License 5 votes vote down vote up
def augment_image(image):
    image = 255 - image
    width, height = img_size
    image = cv2.resize(image, (width, height))
    augmenter = ImageAugmenter(width, height,
                               # width and height of the image (must be the same for all images in the batch)
                               hflip=False,  # flip horizontally with 50% probability
                               vflip=False,  # flip vertically with 50% probability
                               scale_to_percent=(0.9, 1.05),  # 1.1 scale the image to 70%-130% of its original size
                               scale_axis_equally=False,  # allow the axis to be scaled unequally (e.g. x more than y)
                               rotation_deg=2,  # 2 rotate between -25 and +25 degrees
                               shear_deg=5,  # 25 shear between -10 and +10 degrees
                               translation_x_px=8,  # 1 translate between -5 and +5 px on the x-axis
                               translation_y_px=2,  # (-6, 4)
                               blur_radius=0,  # blur radius that will be applied between 0..blur_radius
                               noise_variance=0,
                               motion_blur_radius=0,
                               motion_blur_strength=0
                               )
    image = augmenter.augment_batch(np.array([image], dtype=np.uint8))[0]
    image *= 255
    image = 255 - image
    image = image.astype(np.uint8)
    # cv2.imshow("image", image)
    # cv2.waitKey(0)
    return image 
Example 27
Project: NAO   Author: AISTLAB   File: wsNaoVisionMT.py    MIT License 5 votes vote down vote up
def run(self):
    if self._showWindows:
      cv2.namedWindow("raw")
      cv2.namedWindow("hsv")
      cv2.namedWindow("ball")
      cv2.namedWindow("gate")
      cv2.setMouseCallback("hsv",self.on_mouse)
    else:
      cv2.destroyAllWindows()
    while self._startMonitor:
      self.getRawImage()
      time.sleep(0.1)
      self.getHSV()
      self._threshBall=self.getROI(self._ball_min,self._ball_max)
      self._threshGate=self.getROI(self._gate_min,self._gate_max)
      gx1,gy1,gw1,gh1=self.getBoundingRectangle(self._threshGate.copy())
      
      self.getLines(self.getROI(self._ball_min,self._ball_max))
      
      self._gateBounding=(gx1,gy1,gw1,gh1)
      gx2,gy2,gw2,gh2=self.getNearstRectangle(self._threshGate)
      self._nearstObstacleBounding=(gx2,gy2,gw2,gh2)
      
      x,y,r=self.getBallImageInfo()
      self._ballImageInfo=(x,y,r)
      self._ballSpaceDistance=self.getSpaceBallDistance()
      
      if self._showWindows:
        cv2.circle(self._raw,(x,y),r,(255,255,0),2)
        cv2.circle(self._raw,(x,y),2,(0,255,255),2)
        cv2.rectangle(self._raw,(gx1,gy1),(gx1+gw1,gy1+gh1),(0,255,0),2)
        cv2.rectangle(self._raw,(gx2,gy2),(gx2+gw2,gy2+gh2),(0,0,255),2)
        cv2.putText(self._raw,"%.2f %.2f %.2f %.2f"%(gx2,gy2,gw2,gh2),\
                    (10,20),cv2.FONT_HERSHEY_PLAIN,1.2,(0,0,255))
        cv2.line(self._raw, self._line[0],self._line[1], (0, 0, 255),2)
        cv2.imshow("raw",self._raw)
        cv2.imshow("hsv",self._hsv)
        cv2.imshow("ball",self._threshBall)
        cv2.imshow("gate",self._threshGate)
        cv2.waitKey(10) 
Example 28
Project: NAO   Author: AISTLAB   File: nao_controller.py    MIT License 5 votes vote down vote up
def run(self):
        global is_finished
        cv2.namedWindow("img")
        cv2.setMouseCallback("img", self.on_mouse)
        while True:
            self.img = self.get_img()
            cv2.imshow("img", self.img)
            ch = 0xFF & cv2.waitKey(5)
            if ch == 27:
                is_finished = True
                break 
Example 29
Project: deep-nn-car   Author: scope-lab-vu   File: Controller.py    MIT License 5 votes vote down vote up
def liveStream(self):
        context = zmq.Context()
        print("Connecting to DeepNNCar livestream...")
        sock = context.socket(zmq.REQ)
        sock.connect("tcp://%s:%s" %(self.deepNNCarIP,"5002"))
        while 1:
            message = "1"
            sock.send(message.encode())
            data = sock.recv()
            #data = data.decode()
            nparr = np.fromstring(data, np.uint8)
            frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
            cv2.imshow('Live Feed',frame)
            cv2.waitKey(33) 
Example 30
Project: deep-nn-car   Author: scope-lab-vu   File: Client.py    MIT License 5 votes vote down vote up
def liveStreamReceiver(sock,dcTuple,nullEvents):
    # receive size of iamge
    img_size = sock.recv(32)
    if not img_size:
        sock.sendall("1")
        sock.recv(10)
        sock.sendall("1")
        return (1+nullEvents)
    try:
        img_len = int(img_size)-21
    except Exception as e:
        sock.sendall("1")
        sock.recv(10)
        sock.sendall("1")
        return (1+nullEvents)
    sock.sendall(img_size)
    e=0
    data = ''
    d = "default"
    # Receive image data from rpi and store in data
    while e < img_len and len(d) != 0:
        d = sock.recv(16000)
        e += len(d)
        data += d
    nparr = np.fromstring(data, np.uint8)
    frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
    cv2.imshow('Live Feed', frame)
    cv2.waitKey(100)
    #frame = SafetyManager.preprocess(frame)
    #fm = SafetyManager.measureBlurriness(frame)
    #lanedecision = SafetyManager.laneDetect(frame)
    response = "1"
    sock.sendall(response)
    return 0 
Example 31
Project: ARPET   Author: juliagarriga   File: cmt_tracker.py    MIT License 5 votes vote down vote up
def track(self, out):

        cv2.namedWindow(self.window_name)

        stop = False

        while not stop:

            # Obtain new frame
            frame, t = self.frames.next()

            if frame is not None:

                found_persons = self.locate(frame)

                if found_persons:
                    # TODO: IF MORE THAN ONE PERSON IS FOUND, CHECK FOR SIMILARITIES BETWEEN THE TRACKING
                    (startX, startY, endX, endY) = found_persons[0].astype(int)

                    # Detection bounding box
                    cv2.rectangle(frame, (startX, startY), (endX, endY), (0,255,0), 2, 1)

                    rect = np.array([startX, startY, endX - startX, endY - startY])

                    stop = self.track_object(frame, out, rect)

                frame = cv2.cvtColor(cv2.resize(frame, (0, 0), fx=3, fy=3), cv2.COLOR_RGB2BGR)
                cv2.imshow(self.window_name, frame)

                # Exit if ESC pressed
                k = cv2.waitKey(1) & 0xff
                if k == 27 : break

        cv2.destroyWindow(self.window_name) 
Example 32
Project: ARPET   Author: juliagarriga   File: util.py    MIT License 5 votes vote down vote up
def get_rect(im, title='get_rect'):
	mouse_params = {'tl': None, 'br': None, 'current_pos': None,
		'released_once': False}

	cv2.namedWindow(title)
	cv2.moveWindow(title, 100, 100)

	def onMouse(event, x, y, flags, param):

		param['current_pos'] = (x, y)

		if param['tl'] is not None and not (flags & cv2.EVENT_FLAG_LBUTTON):
			param['released_once'] = True

		if flags & cv2.EVENT_FLAG_LBUTTON:
			if param['tl'] is None:
				param['tl'] = param['current_pos']
			elif param['released_once']:
				param['br'] = param['current_pos']

	cv2.setMouseCallback(title, onMouse, mouse_params)
	cv2.imshow(title, im)

	while mouse_params['br'] is None:
		im_draw = np.copy(im)

		if mouse_params['tl'] is not None:
			cv2.rectangle(im_draw, mouse_params['tl'],
				mouse_params['current_pos'], (255, 0, 0))

		cv2.imshow(title, im_draw)
		_ = cv2.waitKey(10)

	cv2.destroyWindow(title)

	tl = (min(mouse_params['tl'][0], mouse_params['br'][0]),
		min(mouse_params['tl'][1], mouse_params['br'][1]))
	br = (max(mouse_params['tl'][0], mouse_params['br'][0]),
		max(mouse_params['tl'][1], mouse_params['br'][1]))

	return (tl, br) 
Example 33
Project: ARPET   Author: juliagarriga   File: tracker.py    MIT License 5 votes vote down vote up
def track(self, out=None):

        cv2.namedWindow(self.window_name)

        stop = False

        while not stop:

            # Obtain new frame
            frame, t = self.frames.next()

            if frame is not None:

                found_persons = self.locate(frame)

                frame = cv2.cvtColor(cv2.resize(frame, (0, 0), fx=3, fy=3), cv2.COLOR_RGB2BGR)
                cv2.imshow(self.window_name, frame)

                if out is not None:
                    out.write(frame) 

                if found_persons:
                    (startX, startY, endX, endY) = found_persons[0].astype(int)

                    # Detection bounding box
                    cv2.rectangle(frame, (startX, startY), (endX, endY), (0,255,0), 2, 1)

                    rect = np.array([startX, startY, endX - startX, endY - startY])           

                    stop = self.track_object(frame, out, rect)

                # Exit if ESC pressed
                k = cv2.waitKey(1) & 0xff
                if k == 27 : break

        cv2.destroyWindow(self.window_name) 
Example 34
Project: motion-tracking   Author: dansbecker   File: crop_vis.py    MIT License 5 votes vote down vote up
def show_img(img, boxes=None, window_name="Happy Dance Image", msec_to_show_for=1500, 
             save=False, filepath='None'):
    """Show an image, potentially with surrounding bounding boxes

    Args:
    ----
        img: np.ndarray
        boxes (optional): dct of bounding boxes where the keys hold the name (actual
            or predicted) and the values the coordinates of the boxes
        window_name (optional): str
        msec_to_show_for (optioanl): int
    """

    img_copy = img.copy() # Any drawing is inplace. Draw on copy to protect original.
    if boxes:
        color_dct = {'actual': (125, 255, 0), 'predicted': (0, 25, 255)}
        for box_type, box_coords  in boxes.items():
            cv2.rectangle(img_copy,
                          pt1=(box_coords[0], box_coords[1]),
                          pt2=(box_coords[2], box_coords[3]),
                          color=color_dct[box_type],
                          thickness=2)
    if not save: 
        cv2.imshow(window_name, img_copy)
        cv2.waitKey(msec_to_show_for)
        cv2.destroyWindow(window_name)
    else: 
        cv2.imwrite(filepath, img_copy) 
Example 35
Project: garden.facelock   Author: kivy-garden   File: __init__.py    MIT License 5 votes vote down vote up
def face_recognize(self):
        cap = cv2.VideoCapture(self.index)
        
        face_cascade = cv2.CascadeClassifier(self.cascade)
        '''
        face_cascade: cascade is entered here for further use.
        '''

        while(True):
            ret, frame = cap.read()
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            '''
            Converts coloured video to black and white(Grayscale).
            '''
            if np.any(face_cascade.detectMultiScale(gray, 1.3, 5)):
                
                print("Cascade found")
                
                self.dispatch('on_match')
                
                cv2.destroyAllWindows()
                for i in range(1, 5):
                    cv2.waitKey(1)
                break
            
            else:
                print("Not recognized")

            cv2.imshow('frame', frame)
            #Comment the above statement not to show the camera screen
            if cv2.waitKey(1) & 0xFF == ord('q'):
                print("Forcefully Closed")

                cv2.destroyAllWindows()
                for i in range(1, 5):
                    cv2.waitKey(1)
                break
        cap.release() 
Example 36
Project: Automatic-Identification-and-Counting-of-Blood-Cells   Author: MahmudulAlam   File: misc.py    GNU General Public License v3.0 5 votes vote down vote up
def show2(im, allobj):
    for obj in allobj:
        cv2.rectangle(im,
            (obj[1], obj[2]), 
            (obj[3], obj[4]), 
            (0,0,255),2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 37
Project: ICDAR-2019-SROIE   Author: zzzDavid   File: boxing.py    MIT License 5 votes vote down vote up
def draw():
    f = open(box_path + 'jpglist.txt')

    # read each image and its label
    line = f.readline()
    line_num =0
    while line:
        line_num=line_num+1
        print('Image:', line_num)
        name = line.strip('\n')
        img = cv2.imread(image_path + name)
        img_size = img.shape
        img_size = img_size[0]*img_size[1]

        # read each coordinate and draw box
        f_txt = open(image_path + name.strip('.jpg') + '.txt')
        #line_txt = f_txt.readline()  # pass the first ROI information
        line_txt = f_txt.readline()
        while line_txt:
            coor = line_txt.split(',')
            x1 = int(coor[0].strip('\''))
            y1 = int(coor[1].strip('\''))
            x3 = int(coor[4].strip('\''))
            y3 = int(coor[5].strip('\''))
            text = coor[8].strip('\n').strip('\'')
            text_show = text + '(' + str(x1) + ',' + str(y1) +')'

            cv2.rectangle(img, (x1, y1), (x3, y3), (255, 0, 0), 1)
            #cv2.putText(img, text_show, (x1, y1 - 1),
              #          cv2.FONT_HERSHEY_TRIPLEX, 0.35, (0, 0, 255), 1)
            line_txt = f_txt.readline()
        cv2.imwrite(box_path + name, img)
        line = f.readline()
        # img = cv2.imshow('image', img)
        # cv2.waitKey(0) 
Example 38
Project: python--   Author: Leezhen2014   File: BlurDetection.py    GNU General Public License v3.0 5 votes vote down vote up
def _Vollath(self,imgName):
        """
                       灰度方差乘积
                       :param imgName:
                       :return:
                       """
        # step 1 图像的预处理
        img2gray, reImg = self.preImgOps(imgName)
        f = self._imageToMatrix(img2gray)
        source=0
        x,y=f.shape
        for i in range(x-1):
            for j in range(y):
                source+=f[i,j]*f[i+1,j]
        source=source-x*y*np.mean(f)
        # strp3: 绘制图片并保存  不应该写在这里  抽象出来   这是共有的部分

        newImg = self._drawImgFonts(reImg, str(source))
        newDir = self.strDir + "/_Vollath_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存图片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return source 
Example 39
Project: Face-Recognition-for-Mobile-Robot   Author: gagolucasm   File: libs.py    MIT License 5 votes vote down vote up
def read_data(path):
    image_paths = [os.path.join(path, f) for f in os.listdir(path) if not f.endswith('re')]
    images=[]
    labels=[]
    for image_path in image_paths:
        #print image_path
        #cv2.namedWindow('Cargando fotos ...')
        imagen=cv2.imread(image_path)
        imagen=cv2.cvtColor(imagen, cv2.COLOR_BGR2GRAY)
        imagenn=np.array(imagen,'uint8')
        #cv2.imshow("Cargando fotos ...",imagenn)
        #cv2.waitKey(40)
        nbr = (os.path.split(image_path)[1].split(".")[0])
        images.append(imagenn)
        labels.append(nbr)
    id=set(labels)
    #print id
    dictid={}
    pos=0
    idlabel=[]
    for i in id:
        dictid[i]=pos
        pos=pos+1
    for i in labels:
        idlabel.append(dictid[i])
    return images,idlabel,dictid 
Example 40
Project: Sigmedia-AVSR   Author: georgesterpu   File: dataset_writer.py    GNU General Public License v3.0 5 votes vote down vote up
def read_bmp_dir(feature_dir, output_resolution, crop_lips=False):
    files = sorted(glob.glob(feature_dir + '/*.bmp'))
    data = []
    for file in files:
        image = imread(file)
        rows, cols, nchan = image.shape
        if crop_lips is True:
            image = image[(3*rows//5):, (1*cols//10):(9*cols//10), :]

        initial_area = np.prod(image.shape[0:2])
        desired_area = np.prod(output_resolution)

        if initial_area > desired_area:  # area better when decimating
            interp_method = cv2.INTER_AREA
        else:
            interp_method = cv2.INTER_CUBIC

        resized = cv2.resize(image, output_resolution, interpolation=interp_method)
        data.append(resized)

    video = np.asarray(data, dtype=np.float64)

    # for frame in video:
    #     frame = cv2.resize(frame, (512, 256), interpolation=cv2.INTER_CUBIC)
    #     cv2.imshow('video_stream', (frame) / 255)
    #     cv2.waitKey(30)

    video = (video - 128) / 128

    return video 
Example 41
Project: mtcnn-face-detect   Author: ResByte   File: webcam.py    MIT License 5 votes vote down vote up
def main():
    # Capture device. Usually 0 will be webcam and 1 will be usb cam.
    video_capture = cv2.VideoCapture(0)
    video_capture.set(3, 640)
    video_capture.set(4, 480)

    minsize = 25 # minimum size of face
    threshold = [ 0.6, 0.7, 0.7 ]  # three steps's threshold
    factor = 0.709 # scale factor


    sess = tf.Session()
    with sess.as_default():
        pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
        while(True):
            ret, frame = video_capture.read()
            if not ret:
                break
            # Display the resulting frame
            img = frame[:,:,0:3]
            boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
            print(boxes)
            for i in range(boxes.shape[0]):
                pt1 = (int(boxes[i][0]), int(boxes[i][1]))
                pt2 = (int(boxes[i][2]), int(boxes[i][3]))
                
                cv2.rectangle(frame, pt1, pt2, color=(0, 255, 0))
            cv2.imshow('Video', frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    video_capture.release()
    cv2.destroyAllWindows() 
Example 42
Project: vidrec   Author: leaveitout   File: vidrec.py    MIT License 5 votes vote down vote up
def record_video(filename='output.avi', width=640, height=480, color=True,
                 fps=20, codec='MJPG'):

    # TODO: Add ability to select the camera
    # Open video
    cap = cv2.VideoCapture(1)

    # Define the codec and create the VideoWriterobject
    # TODO: Check validity of the codec
    fourcc = cv2.VideoWriter_fourcc(*codec)
    out = cv2.VideoWriter(filename, fourcc, fps, (width, height))

    while(cap.isOpened()):
        ret, frame = cap.read()
        if ret is True:
            frame = cv2.flip(frame, 0)

            # write the flipped frame
            out.write(frame)

            cv2.imshow('frame', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break
        else:
            break

    # Release everything if job is finished
    cap.release()
    out.release()
    cv2.destroyAllWindows()
    return 0 
Example 43
Project: selfieexpression   Author: andrewjtimmons   File: face.py    MIT License 5 votes vote down vote up
def show_color_image(self):
    """ Display image on screen and close on key press. """
    cv2.imshow('img',self.color_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example 44
Project: selfieexpression   Author: andrewjtimmons   File: face.py    MIT License 5 votes vote down vote up
def show_grayscale_image(self):
    """ Display image on screen and close on key press. """
    cv2.imshow('img',self.grayscale_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example 45
Project: selfieexpression   Author: andrewjtimmons   File: face.py    MIT License 5 votes vote down vote up
def show_color_image(self):
    """ Display image on screen and close on key press. """
    cv2.imshow('img',self.color_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example 46
Project: selfieexpression   Author: andrewjtimmons   File: face.py    MIT License 5 votes vote down vote up
def show_grayscale_image(self):
    """ Display image on screen and close on key press. """
    cv2.imshow('img',self.grayscale_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example 47
Project: Cloud-Coverage-Calculator   Author: GDave50   File: cloud_detect.py    GNU General Public License v3.0 5 votes vote down vote up
def display_image(image):
    cv2.imshow("Finds", image)
    
    key = 0
    while key != 27:
        key = cv2.waitKey(30) & 0xff
    
    cv2.destroyAllWindows() 
Example 48
Project: zed-python-api   Author: stereolabs   File: read_svo.py    MIT License 5 votes vote down vote up
def main():

    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    print("Reading SVO file: {0}".format(filepath))

    init = sl.InitParameters(svo_input_filename=filepath,svo_real_time_mode=False)
    cam = sl.Camera()
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    mat = sl.Mat()

    key = ''
    print("  Save the current image:     s")
    print("  Quit the video reading:     q\n")
    while key != 113:  # for 'q' key
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            cam.retrieve_image(mat)
            cv2.imshow("ZED", mat.get_data())
            key = cv2.waitKey(1)
            saving_image(key, mat)
        else:
            key = cv2.waitKey(1)
    cv2.destroyAllWindows()

    print_camera_information(cam)
    saving_depth(cam)
    saving_point_cloud(cam)

    cam.close()
    print("\nFINISH") 
Example 49
Project: zed-python-api   Author: stereolabs   File: live_camera.py    MIT License 5 votes vote down vote up
def main():
    print("Running...")
    init = sl.InitParameters()
    cam = sl.Camera()
    if not cam.is_opened():
        print("Opening ZED Camera...")
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    mat = sl.Mat()

    print_camera_information(cam)
    print_help()

    key = ''
    while key != 113:  # for 'q' key
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            cam.retrieve_image(mat, sl.VIEW.VIEW_LEFT)
            cv2.imshow("ZED", mat.get_data())
            key = cv2.waitKey(5)
            settings(key, cam, runtime, mat)
        else:
            key = cv2.waitKey(5)
    cv2.destroyAllWindows()

    cam.close()
    print("\nFINISH") 
Example 50
Project: zed-python-api   Author: stereolabs   File: streaming_receiver.py    MIT License 5 votes vote down vote up
def main():

    init = sl.InitParameters()
    init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720
    init.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_PERFORMANCE

    if (len(sys.argv) > 1) :
        ip = sys.argv[1]
        init.set_from_stream(ip)
    else :
        print('Usage : python3 streaming_receiver.py ip')
        exit(1)

    cam = sl.Camera()
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit(1)

    runtime = sl.RuntimeParameters()
    mat = sl.Mat()

    key = ''
    print("  Quit : CTRL+C\n")
    while key != 113:
        err = cam.grab(runtime)
        if (err == sl.ERROR_CODE.SUCCESS) :
            cam.retrieve_image(mat, sl.VIEW.VIEW_LEFT)
            cv2.imshow("ZED", mat.get_data())
            key = cv2.waitKey(1)
        else :
            key = cv2.waitKey(1)

    cam.close()