Python cv2.imshow() Examples

The following are 30 code examples for showing how to use cv2.imshow(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module cv2 , or try the search function .

Example 1
Project: The-chat-room   Author: 11ze   File: vachat.py    License: MIT License 10 votes vote down vote up
def run(self):
        print("VEDIO server starts...")
        self.sock.bind(self.ADDR)
        self.sock.listen(1)
        conn, addr = self.sock.accept()
        print("remote VEDIO client success connected...")
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_AUTOSIZE)
        while True:
            while len(data) < payload_size:
                data += conn.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += conn.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            cv2.imshow('Remote', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break 
Example 2
Project: coco-json-converter   Author: hazirbas   File: generate_coco_json.py    License: GNU General Public License v3.0 10 votes vote down vote up
def __get_annotation__(self, mask, image=None):

        _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        segmentation = []
        for contour in contours:
            # Valid polygons have >= 6 coordinates (3 points)
            if contour.size >= 6:
                segmentation.append(contour.flatten().tolist())
        RLEs = cocomask.frPyObjects(segmentation, mask.shape[0], mask.shape[1])
        RLE = cocomask.merge(RLEs)
        # RLE = cocomask.encode(np.asfortranarray(mask))
        area = cocomask.area(RLE)
        [x, y, w, h] = cv2.boundingRect(mask)

        if image is not None:
            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            cv2.drawContours(image, contours, -1, (0,255,0), 1)
            cv2.rectangle(image,(x,y),(x+w,y+h), (255,0,0), 2)
            cv2.imshow("", image)
            cv2.waitKey(1)

        return segmentation, [x, y, w, h], area 
Example 3
Project: python--   Author: Leezhen2014   File: BlurDetection.py    License: GNU General Public License v3.0 10 votes vote down vote up
def _lapulaseDetection(self, imgName):
        """
        :param strdir: 文件所在的目录
        :param name: 文件名称
        :return: 检测模糊后的分数
        """
        # step1: 预处理
        img2gray, reImg = self.preImgOps(imgName)
        # step2: laplacian算子 获取评分
        resLap = cv2.Laplacian(img2gray, cv2.CV_64F)
        score = resLap.var()
        print("Laplacian %s score of given image is %s", str(score))
        # strp3: 绘制图片并保存  不应该写在这里  抽象出来   这是共有的部分
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_lapulaseDetection_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        # 显示
        cv2.imwrite(newPath, newImg)  # 保存图片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)

        # step3: 返回分数
        return score 
Example 4
Project: pedestrian-haar-based-detector   Author: felipecorrea   File: detect.py    License: GNU General Public License v2.0 8 votes vote down vote up
def main():
	#IMG PATHS
	imagePath = "test3.jpg"
	cascPath = "cascades/haarcascade_pedestrian.xml"

	pplCascade = cv2.CascadeClassifier(cascPath)
	image = cv2.imread(imagePath)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	
	gray = normalize_grayimage(gray)
	 
	pedestrians = pplCascade.detectMultiScale(
		gray,
		scaleFactor=1.2,
		minNeighbors=10,
		minSize=(32,96),
		flags = cv2.cv.CV_HAAR_SCALE_IMAGE
	)

	print "Found {0} ppl!".format(len(pedestrians))

	#Draw a rectangle around the detected objects
	for (x, y, w, h) in pedestrians:
		cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)

	cv2.imwrite("saida.jpg", image)
	cv2.imshow("Ppl found", image)
	cv2.waitKey(0)
	
	return 0 
Example 5
Project: MobileNetv2-SSDLite   Author: PINTO0309   File: demo_caffe.py    License: MIT License 8 votes vote down vote up
def detect(imgfile):
    origimg = cv2.imread(imgfile)
    img = preprocess(origimg)
    
    img = img.astype(np.float32)
    img = img.transpose((2, 0, 1))

    net.blobs['data'].data[...] = img
    out = net.forward() 
    box, conf, cls = postprocess(origimg, out)

    for i in range(len(box)):
       p1 = (box[i][0], box[i][1])
       p2 = (box[i][2], box[i][3])
       cv2.rectangle(origimg, p1, p2, (0,255,0))
       p3 = (max(p1[0], 15), max(p1[1], 15))
       title = "%s:%.2f" % (COCO_CLASSES[int(cls[i])], conf[i])
       cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
    cv2.imshow("SSD", origimg)
 
    k = cv2.waitKey(0) & 0xff
        #Exit if ESC pressed
    if k == 27 : return False
    return True 
Example 6
Project: derplearning   Author: notkarol   File: calibrate_camera.py    License: MIT License 8 votes vote down vote up
def live_undistort(camera, camera_matrix, distortion_coefficients):
    """ Using a given calibration matrix, display the distorted, undistorted, and cropped frame"""
    scaled_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(
        camera_matrix, distortion_coefficients, camera.size, 1, camera.size
    )
    while True:
        ret, frame = camera.cap.read()
        assert ret
        distorted_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        undistorted_frame = cv2.undistort(
            distorted_frame, camera_matrix, distortion_coefficients, None, scaled_camera_matrix,
        )
        roi_x, roi_y, roi_w, roi_h = roi
        cropped_frame = undistorted_frame[roi_y : roi_y + roi_h, roi_x : roi_x + roi_w]
        cv2.imshow("distorted %s" % (distorted_frame.shape,), distorted_frame)
        cv2.imshow("undistorted %s" % (undistorted_frame.shape,), undistorted_frame)
        cv2.imshow("cropped %s" % (cropped_frame.shape,), cropped_frame)
        cv2.waitKey(10) 
Example 7
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: misc.py    License: MIT License 7 votes vote down vote up
def show(im, allobj, S, w, h, cellx, celly):
    for obj in allobj:
        a = obj[5] % S
        b = obj[5] // S
        cx = a + obj[1]
        cy = b + obj[2]
        centerx = cx * cellx
        centery = cy * celly
        ww = obj[3]**2 * w
        hh = obj[4]**2 * h
        cv2.rectangle(im,
            (int(centerx - ww/2), int(centery - hh/2)),
            (int(centerx + ww/2), int(centery + hh/2)),
            (0,0,255), 2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 8
def show(im, allobj, S, w, h, cellx, celly):
    for obj in allobj:
        a = obj[5] % S
        b = obj[5] // S
        cx = a + obj[1]
        cy = b + obj[2]
        centerx = cx * cellx
        centery = cy * celly
        ww = obj[3]**2 * w
        hh = obj[4]**2 * h
        cv2.rectangle(im,
            (int(centerx - ww/2), int(centery - hh/2)),
            (int(centerx + ww/2), int(centery + hh/2)),
            (0,0,255), 2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 9
def show2(im, allobj):
    for obj in allobj:
        cv2.rectangle(im,
            (obj[1], obj[2]), 
            (obj[3], obj[4]), 
            (0,0,255),2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 10
Project: MobileNetv2-SSDLite   Author: PINTO0309   File: demo_caffe_voc.py    License: MIT License 7 votes vote down vote up
def detect(imgfile):
    origimg = cv2.imread(imgfile)
    img = preprocess(origimg)
    
    img = img.astype(np.float32)
    img = img.transpose((2, 0, 1))

    net.blobs['data'].data[...] = img
    out = net.forward() 
    box, conf, cls = postprocess(origimg, out)

    for i in range(len(box)):
       p1 = (box[i][0], box[i][1])
       p2 = (box[i][2], box[i][3])
       cv2.rectangle(origimg, p1, p2, (0,255,0))
       p3 = (max(p1[0], 15), max(p1[1], 15))
       title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i])
       cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
    cv2.imshow("SSD", origimg)
 
    k = cv2.waitKey(0) & 0xff
        #Exit if ESC pressed
    if k == 27 : return False
    return True 
Example 11
Project: Advanced_Lane_Lines   Author: ChengZhongShen   File: camera_calibration.py    License: MIT License 7 votes vote down vote up
def test():
	"""
	read the pickle file on disk and implement undistor on image
	show the oringal/undistort image
	"""
	print("Reading the pickle file...")
	pickle_file = open("./camera_cal.p", "rb")
	dist_pickle = pickle.load(pickle_file)
	mtx = dist_pickle["mtx"]  
	dist = dist_pickle["dist"]
	pickle_file.close()

	print("Reading the sample image...")
	img = cv2.imread('corners_founded/corners_found13.jpg')
	img_size = (img.shape[1],img.shape[0])
	dst = cv2.undistort(img, mtx, dist, None, mtx)

	# dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
	# Visualize undistortion
	print("Visulize the result...")
	f, (ax1,ax2) = plt.subplots(1,2, figsize=(20,10))
	ax1.imshow(img), ax1.set_title('Original Image', fontsize=15)
	ax2.imshow(dst), ax2.set_title('Undistored Image', fontsize=15)
	plt.show() 
Example 12
Project: Traffic-Signs-and-Object-Detection   Author: dark-archerx   File: misc.py    License: GNU General Public License v3.0 7 votes vote down vote up
def show(im, allobj, S, w, h, cellx, celly):
    for obj in allobj:
        a = obj[5] % S
        b = obj[5] // S
        cx = a + obj[1]
        cy = b + obj[2]
        centerx = cx * cellx
        centery = cy * celly
        ww = obj[3]**2 * w
        hh = obj[4]**2 * h
        cv2.rectangle(im,
            (int(centerx - ww/2), int(centery - hh/2)),
            (int(centerx + ww/2), int(centery + hh/2)),
            (0,0,255), 2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 13
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: objectDetectorYOLO.py    License: MIT License 6 votes vote down vote up
def processFrames(self):
        try:
            for img in self.anotations_list:
                img = img.split(';')
                # print(img)
                # ret,imgcv = cap.read()
                if self.video:
                    ret,imgcv = self.cap.read()
                else:
                    imgcv = cv2.imread(os.path.join('../',self.config["dataset"],img[0]))
                result = self.tfnet.return_predict(imgcv)
                print(result)
                imgcv = self.drawBoundingBox(imgcv,result)        
                cv2.imshow('detected objects',imgcv)
                if cv2.waitKey(10) == ord('q'):
                    print('exitting loop')
                    break
        except KeyboardInterrupt:
            cv2.destroyAllWindows()
            print('exitting program') 
Example 14
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: misc.py    License: MIT License 6 votes vote down vote up
def show2(im, allobj):
    for obj in allobj:
        cv2.rectangle(im,
            (obj[1], obj[2]), 
            (obj[3], obj[4]), 
            (0,0,255),2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 15
Project: face-attendance-machine   Author: matiji66   File: facerec_from_webcam_faster.py    License: Apache License 2.0 6 votes vote down vote up
def face_process():
    myprint("face process start",time.time())
    # Find all the faces and face encodings in the current frame of video
    # face_locations = face_recognition.face_locations(rgb_small_frame, model="cnn")
    myprint('face_locations start', time.time())
    face_locations = face_recognition.face_locations(rgb_small_frame, model="hog")
    myprint('face_locations end', time.time())
    myprint('face_encodings start', time.time())
    face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
    myprint('face_encodings end', time.time())
    face_names = []
    for face_encoding in face_encodings:
        # optimize start 采用KNN 排名*权重, 在类别上进行叠加,然后排序取出top1
        name, dis = vote_class(face_encoding)
        # optimize end 采用 排名*权重, 在类别上进行叠加,然后排序取出top1
        face_names.append(name)  # 将人脸数据

    # Display the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        # Scale back up face locations since the frame we detected in was scaled to 1/4 size
        top *= 4
        right *= 4
        bottom *= 4
        left *= 4
        myprint('putText start', time.time())
        # Draw a box around the face
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
        # Draw a label with a name below the face
        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
        myprint("putText end " + name, time.time())
        # say hello and save record to file
        myprint('process_face_records start', time.time())
        process_face_records(name)
        myprint('process_face_records end', time.time())

    # Display the resulting image
    cv2.imshow('Video', frame)
    myprint("face process end", time.time()) 
Example 16
Project: ICDAR-2019-SROIE   Author: zzzDavid   File: boxing.py    License: MIT License 6 votes vote down vote up
def draw():
    f = open(box_path + 'jpglist.txt')

    # read each image and its label
    line = f.readline()
    line_num =0
    while line:
        line_num=line_num+1
        print('Image:', line_num)
        name = line.strip('\n')
        img = cv2.imread(image_path + name)
        img_size = img.shape
        img_size = img_size[0]*img_size[1]

        # read each coordinate and draw box
        f_txt = open(image_path + name.strip('.jpg') + '.txt')
        #line_txt = f_txt.readline()  # pass the first ROI information
        line_txt = f_txt.readline()
        while line_txt:
            coor = line_txt.split(',')
            x1 = int(coor[0].strip('\''))
            y1 = int(coor[1].strip('\''))
            x3 = int(coor[4].strip('\''))
            y3 = int(coor[5].strip('\''))
            text = coor[8].strip('\n').strip('\'')
            text_show = text + '(' + str(x1) + ',' + str(y1) +')'

            cv2.rectangle(img, (x1, y1), (x3, y3), (255, 0, 0), 1)
            #cv2.putText(img, text_show, (x1, y1 - 1),
              #          cv2.FONT_HERSHEY_TRIPLEX, 0.35, (0, 0, 255), 1)
            line_txt = f_txt.readline()
        cv2.imwrite(box_path + name, img)
        line = f.readline()
        # img = cv2.imshow('image', img)
        # cv2.waitKey(0) 
Example 17
Project: python--   Author: Leezhen2014   File: BlurDetection.py    License: GNU General Public License v3.0 6 votes vote down vote up
def _blurDetection(self, imgName):

        # step 1 图像的预处理
        img2gray, reImg = self.preImgOps(imgName)
        imgMat=self._imageToMatrix(img2gray)/255.0
        x, y = imgMat.shape
        score = 0
        for i in range(x - 2):
            for j in range(y - 2):
                score += (imgMat[i + 2, j] - imgMat[i, j]) ** 2
        # step3: 绘制图片并保存  不应该写在这里  抽象出来   这是共有的部分
        score=score/10
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_blurDetection_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存图片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return score 
Example 18
Project: python--   Author: Leezhen2014   File: BlurDetection.py    License: GNU General Public License v3.0 6 votes vote down vote up
def _SMD2Detection(self, imgName):
        """
        灰度方差乘积
        :param imgName:
        :return:
        """
        # step 1 图像的预处理
        img2gray, reImg = self.preImgOps(imgName)
        f=self._imageToMatrix(img2gray)/255.0
        x, y = f.shape
        score = 0
        for i in range(x - 1):
            for j in range(y - 1):
                score += np.abs(f[i+1,j]-f[i,j])*np.abs(f[i,j]-f[i,j+1])
        # strp3: 绘制图片并保存  不应该写在这里  抽象出来   这是共有的部分
        score=score
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_SMD2Detection_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存图片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return score 
Example 19
Project: python--   Author: Leezhen2014   File: BlurDetection.py    License: GNU General Public License v3.0 6 votes vote down vote up
def _Variance(self, imgName):
        """
               灰度方差乘积
               :param imgName:
               :return:
               """
        # step 1 图像的预处理
        img2gray, reImg = self.preImgOps(imgName)
        f = self._imageToMatrix(img2gray)

        # strp3: 绘制图片并保存  不应该写在这里  抽象出来   这是共有的部分
        score = np.var(f)
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_Variance_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存图片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return score 
Example 20
Project: olympe   Author: Parrot-Developers   File: streaming.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def show_yuv_frame(self, window_name, yuv_frame):
        # the VideoFrame.info() dictionary contains some useful information
        # such as the video resolution
        info = yuv_frame.info()
        height, width = info["yuv"]["height"], info["yuv"]["width"]

        # yuv_frame.vmeta() returns a dictionary that contains additional
        # metadata from the drone (GPS coordinates, battery percentage, ...)

        # convert pdraw YUV flag to OpenCV YUV flag
        cv2_cvt_color_flag = {
            olympe.PDRAW_YUV_FORMAT_I420: cv2.COLOR_YUV2BGR_I420,
            olympe.PDRAW_YUV_FORMAT_NV12: cv2.COLOR_YUV2BGR_NV12,
        }[info["yuv"]["format"]]

        # yuv_frame.as_ndarray() is a 2D numpy array with the proper "shape"
        # i.e (3 * height / 2, width) because it's a YUV I420 or NV12 frame

        # Use OpenCV to convert the yuv frame to RGB
        cv2frame = cv2.cvtColor(yuv_frame.as_ndarray(), cv2_cvt_color_flag)
        # Use OpenCV to show this frame
        cv2.imshow(window_name, cv2frame)
        cv2.waitKey(1)  # please OpenCV for 1 ms... 
Example 21
Project: Udacity-SDC-Radar-Driver-Micro-Challenge   Author: diyjac   File: esr_visualizer.py    License: MIT License 6 votes vote down vote up
def update(self, radarData):
        self.img = np.zeros((self.height, self.width, self.channels), np.uint8)
        cv2.line(self.img, (10, 0), (self.width/2 - 5, self.height), (100, 255, 255))
        cv2.line(self.img, (self.width - 10, 0), (self.width/2 + 5, self.height), (100, 255, 255))

        for track_number in range(1, 65):
            if str(track_number)+'_track_range' in radarData:
                track_range = radarData[str(track_number)+'_track_range']
                track_angle = (float(radarData[str(track_number)+'_track_angle'])+90.0)*math.pi/180

                x_pos = math.cos(track_angle)*track_range*4
                y_pos = math.sin(track_angle)*track_range*4

                cv2.circle(self.img, (self.width/2 + int(x_pos), self.height - int(y_pos) - 10), 5, (255, 255, 255))
                #cv2.putText(self.img, str(track_number), 
                #    (self.width/2 + int(x_pos)-2, self.height - int(y_pos) - 10), self.font, 1, (255,255,255), 2)

        cv2.imshow("Radar", self.img)
        cv2.waitKey(2) 
Example 22
Project: Udacity-SDC-Radar-Driver-Micro-Challenge   Author: diyjac   File: esr_visualizer.py    License: MIT License 6 votes vote down vote up
def update(self, radarData):
        self.img = np.zeros((self.height, self.width, self.channels), np.uint8)
        cv2.line(self.img, (10, 0), (self.width/2 - 5, self.height), (100, 255, 255))
        cv2.line(self.img, (self.width - 10, 0), (self.width/2 + 5, self.height), (100, 255, 255))

        for track_number in range(1, 65):
            if str(track_number)+'_track_range' in radarData:
                track_range = radarData[str(track_number)+'_track_range']
                track_angle = (float(radarData[str(track_number)+'_track_angle'])+90.0)*math.pi/180

                x_pos = math.cos(track_angle)*track_range*4
                y_pos = math.sin(track_angle)*track_range*4

                cv2.circle(self.img, (self.width/2 + int(x_pos), self.height - int(y_pos) - 10), 5, (255, 255, 255))
                #cv2.putText(self.img, str(track_number), 
                #    (self.width/2 + int(x_pos)-2, self.height - int(y_pos) - 10), self.font, 1, (255,255,255), 2)

        cv2.imshow("Radar", self.img)
        cv2.waitKey(2) 
Example 23
Project: pedestrian-haar-based-detector   Author: felipecorrea   File: evaluation.py    License: GNU General Public License v2.0 5 votes vote down vote up
def normalize_grayimage(image):
	image = cv2.equalizeHist(image)
	#cv2.imshow("Equalized img", image)
	
	return image 
Example 24
Project: pedestrian-haar-based-detector   Author: felipecorrea   File: detect.py    License: GNU General Public License v2.0 5 votes vote down vote up
def normalize_grayimage(image):
	image = cv2.equalizeHist(image)
	cv2.imshow("Equalized img", image)
	
	return image 
Example 25
Project: pedestrian-haar-based-detector   Author: felipecorrea   File: tracking.py    License: GNU General Public License v2.0 5 votes vote down vote up
def normalize_grayimage(image):
	image = cv2.equalizeHist(image)
	cv2.imshow("Equalized img", image)
	
	return image 
Example 26
Project: animal-tracking   Author: colinlaney   File: track.py    License: Creative Commons Zero v1.0 Universal 5 votes vote down vote up
def drawFloorCrop(event, x, y, flags, params):
    global perspectiveMatrix, name, RENEW_TETRAGON
    imgCroppingPolygon = np.zeros_like(params['imgFloorCorners'])
    if event == cv2.EVENT_RBUTTONUP:
        cv2.destroyWindow(f'Floor Corners for {name}')
    if len(params['croppingPolygons'][name]) > 4 and event == cv2.EVENT_LBUTTONUP:
        RENEW_TETRAGON = True
        h = params['imgFloorCorners'].shape[0]
        # delete 5th extra vertex of the floor cropping tetragon
        params['croppingPolygons'][name] = np.delete(params['croppingPolygons'][name], -1, 0)
        params['croppingPolygons'][name] = params['croppingPolygons'][name] - [h,0]
        
        # Sort cropping tetragon vertices counter-clockwise starting with top left
        params['croppingPolygons'][name] = counterclockwiseSort(params['croppingPolygons'][name])
        # Get the matrix of perspective transformation
        params['croppingPolygons'][name] = np.reshape(params['croppingPolygons'][name], (4,2))
        tetragonVertices = np.float32(params['croppingPolygons'][name])
        tetragonVerticesUpd = np.float32([[0,0], [0,h], [h,h], [h,0]])
        perspectiveMatrix[name] = cv2.getPerspectiveTransform(tetragonVertices, tetragonVerticesUpd)
    if event == cv2.EVENT_LBUTTONDOWN:
        if len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON:
            params['croppingPolygons'][name] = np.array([[0,0]])
            RENEW_TETRAGON = False
        if len(params['croppingPolygons'][name]) == 1:
            params['croppingPolygons'][name][0] = [x,y]
        params['croppingPolygons'][name] = np.append(params['croppingPolygons'][name], [[x,y]], axis=0)
    if event == cv2.EVENT_MOUSEMOVE and not (len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON):
        params['croppingPolygons'][name][-1] = [x,y]
        if len(params['croppingPolygons'][name]) > 1:
            cv2.fillPoly(
                imgCroppingPolygon,
                [np.reshape(
                    params['croppingPolygons'][name],
                    (len(params['croppingPolygons'][name]),2)
                )],
                BGR_COLOR['green'], cv2.LINE_AA)
            imgCroppingPolygon = cv2.addWeighted(params['imgFloorCorners'], 1.0, imgCroppingPolygon, 0.5, 0.)
            cv2.imshow(f'Floor Corners for {name}', imgCroppingPolygon) 
Example 27
Project: The-chat-room   Author: 11ze   File: vachat.py    License: MIT License 5 votes vote down vote up
def run(self):
        while True:
            try:
                self.sock.connect(self.ADDR)
                break
            except:
                time.sleep(3)
                continue
        if self.showme:
            cv2.namedWindow('You', cv2.WINDOW_NORMAL)
        print("VEDIO client connected...")
        while self.cap.isOpened():
            ret, frame = self.cap.read()
            if self.showme:
                cv2.imshow('You', frame)
                if cv2.waitKey(1) & 0xFF == 27:
                    self.showme = False
                    cv2.destroyWindow('You')
            sframe = cv2.resize(frame, (0, 0), fx=self.fx, fy=self.fx)
            data = pickle.dumps(sframe)
            zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION)
            try:
                self.sock.sendall(struct.pack("L", len(zdata)) + zdata)
            except:
                break
            for i in range(self.interval):
                self.cap.read() 
Example 28
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: demo.py    License: Apache License 2.0 5 votes vote down vote up
def run_camera(args,ctx):
    assert args.batch_size == 1, "only batch size of 1 is supported"
    logging.info("Detection threshold is {}".format(args.thresh))
    iter = CameraIterator(frame_resize=parse_frame_resize(args.frame_resize))
    class_names = parse_class_names(args.class_names)
    mean_pixels = (args.mean_r, args.mean_g, args.mean_b)
    data_shape = int(args.data_shape)
    batch_size = int(args.batch_size)
    detector = Detector(
        get_symbol(args.network, data_shape, num_classes=len(class_names)),
        network_path(args.prefix, args.network, data_shape),
        args.epoch,
        data_shape,
        mean_pixels,
        batch_size,
        ctx
    )
    for frame in iter:
        logging.info("Frame info: shape %s type %s", frame.shape, frame.dtype)
        logging.info("Generating batch")
        data_batch = detector.create_batch(frame)
        logging.info("Detecting objects")
        detections_batch = detector.detect_batch(data_batch)
        #detections = [mx.nd.array((1,1,0.2,0.2,0.4,0.4))]
        detections = detections_batch[0]
        logging.info("%d detections", len(detections))
        for det in detections:
            obj = det.asnumpy()
            (klass, score, x0, y0, x1, y1) = obj
            if score > args.thresh:
                draw_detection(frame, obj, class_names)
        cv2.imshow('frame', frame) 
Example 29
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: rl_data.py    License: Apache License 2.0 5 votes vote down vote up
def visual(X, show=True):
    X = X.transpose((0, 2, 3, 1))
    N = X.shape[0]
    n = int(math.ceil(math.sqrt(N)))
    h = X.shape[1]
    w = X.shape[2]
    buf = np.zeros((h*n, w*n, X.shape[3]), dtype=np.uint8)
    for i in range(N):
        x = i%n
        y = i//n
        buf[h*y:h*(y+1), w*x:w*(x+1), :] = X[i]
    if show:
        cv2.imshow('a', buf)
        cv2.waitKey(1)
    return buf 
Example 30
Project: pynvr   Author: JFF-Bohdan   File: motion_detection.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def motionDetected(self, new_frame):
        frame = self.preprocessInputFrame(new_frame)

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, (21, 21), 0)

        if self.prevFrame is None:
            self.prevFrame = gray
            return False

        frameDiff = cv.absdiff(gray, self.prevFrame)

        # kernel = np.ones((5, 5), np.uint8)

        opening = cv.morphologyEx(frameDiff, cv.MORPH_OPEN, None)  # noqa
        closing = cv.morphologyEx(frameDiff, cv.MORPH_CLOSE, None)  # noqa

        ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)

        height = np.size(th1, 0)
        width = np.size(th1, 1)

        nb = cv.countNonZero(th1)

        avg = (nb * 100) / (height * width)  # Calculate the average of black pixel in the image

        self.prevFrame = gray

        # cv.DrawContours(currentframe, self.currentcontours, (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED)
        # cv.imshow("frame", current_frame)

        ret = avg > self.threshold   # If over the ceiling trigger the alarm

        if ret:
            self.updateMotionDetectionDts()

        return ret