Python cv2.destroyAllWindows() Examples

The following are code examples for showing how to use cv2.destroyAllWindows(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don't like. You can also save this page to your account.

Example 1
Project: SelfDrivingCar   Author: aguijarro   File: calibration_camera.py    (license) View Source Project 17 votes vote down vote up
def get_points():

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((6*8,3), np.float32)
    objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2)

    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d points in real world space
    imgpoints = [] # 2d points in image plane.

    # Make a list of calibration images
    images = glob.glob('calibration_wide/GO*.jpg')

    # Step through the list and search for chessboard corners
    for idx, fname in enumerate(images):
        img = cv2.imread(fname)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # Find the chessboard corners
        ret, corners = cv2.findChessboardCorners(gray, (8,6), None)

        # If found, add object points, image points
        if ret == True:
            objpoints.append(objp)
            imgpoints.append(corners)

            # Draw and display the corners
            cv2.drawChessboardCorners(img, (8,6), corners, ret)
            #write_name = 'corners_found'+str(idx)+'.jpg'
            #cv2.imwrite(write_name, img)
            cv2.imshow('img', img)
            cv2.waitKey(500)

    cv2.destroyAllWindows()
    return objpoints, imgpoints 
Example 2
Project: Mini-Projects   Author: gaborvecsei   File: Capture_Img_To_Drive.py    (license) View Source Project 10 votes vote down vote up
def CaptureImage():
	imageName = 'DontCare.jpg' #Just a random string
	cap = cv2.VideoCapture(0)
	while(True):
	    # Capture frame-by-frame
	    ret, frame = cap.read()

	    #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome
	    rgbImage = frame #For capture the image in RGB color space

	    # Display the resulting frame
	    cv2.imshow('Webcam',rgbImage)
	    #Wait to press 'q' key for capturing
	    if cv2.waitKey(1) & 0xFF == ord('q'):
	        #Set the image name to the date it was captured
	        imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg'
	        #Save the image
	        cv2.imwrite(imageName, rgbImage)
	        break
	# When everything done, release the capture
	cap.release()
	cv2.destroyAllWindows()
	#Returns the captured image's name
	return imageName 
Example 3
Project: FaceSwap   Author: Aravind-Suresh   File: main.py    (MIT License) View Source Project 7 votes vote down vote up
def videoize(func, args, src = 0, win_name = "Cam", delim_wait = 1, delim_key = 27):
    cap = cv2.VideoCapture(src)
    while(1):
        ret, frame = cap.read()
        # To speed up processing; Almost real-time on my PC
        frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5)
        frame = cv2.flip(frame, 1)
        out = func(frame, args)
        if out is None:
            continue
        out = cv2.resize(out, dsize=None, fx=1.4, fy=1.4)
        cv2.imshow(win_name, out)
        cv2.moveWindow(win_name, (s_w - out.shape[1])/2, (s_h - out.shape[0])/2)
        k = cv2.waitKey(delim_wait)

        if k == delim_key:
            cv2.destroyAllWindows()
            cap.release()
            return 
Example 4
Project: MultiObjectTracker   Author: alokwhitewolf   File: tracker.py    (license) View Source Project 7 votes vote down vote up
def get_fps(source, Videolength):
	cap = cv2.VideoCapture(source)
	frame_counter = 0
	print "Calculating Frames per second . . . "

	while (True):
		# Capture frame-by-frame

		ret, frame = cap.read()
		if not ret:
			break

		frame_counter += 1
		
	cap.release()
	cv2.destroyAllWindows()
	fps = float(frame_counter/Videolength)
	print "\nFPS is " +str(fps)+"\n"

	return fps

#Algorithm to check intersection of line segments
#It checks iteratively intersection between a pair of points(Last location of the vehicle) and pairs of points of another List(Pedestrian path) 
Example 5
Project: PyIntroduction   Author: tody411   File: video_capture.py    (license) View Source Project 7 votes vote down vote up
def cvCaptureVideo():
    capture = cv2.VideoCapture(0)

    if capture.isOpened() is False:
        raise("IO Error")

    cv2.namedWindow("Capture", cv2.WINDOW_NORMAL)

    while True:
        ret, image = capture.read()

        if ret == False:
            continue

        cv2.imshow("Capture", image)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    capture.release()
    cv2.destroyAllWindows()


# Matplot???Web???????????? 
Example 6
Project: MultiObjectTracker   Author: alokwhitewolf   File: main.py    (license) View Source Project 6 votes vote down vote up
def get_fps(source, Videolength):
	cap = cv2.VideoCapture("docs/video/traffic2")
	frame_counter = 0
	print "Calculating Frames per second . . . "

	while (True):
		# Capture frame-by-frame

		ret, frame = cap.read()
		if not ret:
			break

		frame_counter += 1

	cap.release()
	cv2.destroyAllWindows()
	fps = float(frame_counter/Videolength)
	print "\nFPS is " +str(fps)+"\n"

	return fps

#Algorithm to check intersection of line segments
#It checks iteratively intersection between a pair of points(Last location of the vehicle) and pairs of points of another List(Pedestrian path) 
Example 7
Project: Millennium-Eye   Author: Elysium1937   File: Falafel.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def main():
    NetworkTable.setIPAddress('10.19.37.2')
    NetworkTable.setClientMode()
    NetworkTable.initialize()
    sd = NetworkTable.getTable('SmartDashboard')
    #ms_list = []
    while True:
            time.sleep(0.1)
            start_time = datetime.now()

            # returns the elapsed milliseconds since the start of the program
            vision(sd)
            dt = datetime.now() - start_time
            ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
            #ms_list.append(ms)
            print ms
            #print np.mean(ms_list)
            cv2.destroyAllWindows() 
Example 8
Project: Millennium-Eye   Author: Elysium1937   File: Falafel Vision Processing.py    (GNU General Public License v3.0) View Source Project 6 votes vote down vote up
def main():
    NetworkTable.setIPAddress('10.19.37.2')
    NetworkTable.setClientMode()
    NetworkTable.initialize()
    sd = NetworkTable.getTable('SmartDashboard')
    #ms_list = []
    while True:
            time.sleep(0.1)
            start_time = datetime.now()

            # returns the elapsed milliseconds since the start of the program
            vision(sd)
            dt = datetime.now() - start_time
            ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
            print ms
            cv2.destroyAllWindows() 
Example 9
Project: Machine-Learning   Author: Jegathis   File: color_quantization.py    (license) View Source Project 6 votes vote down vote up
def color_quant(input,K,output):
    img = cv2.imread(input)
    Z = img.reshape((-1,3))
    # convert to np.float32
    Z = np.float32(Z)
    # define criteria, number of clusters(K) and apply kmeans()
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 15, 1.0)

    ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)

    # Now convert back into uint8, and make original image
    center = np.uint8(center)
    res = center[label.flatten()]
    res2 = res.reshape((img.shape))

    cv2.imshow('res2',res2)
    cv2.waitKey(0)
    cv2.imwrite(output, res2)
    cv2.destroyAllWindows() 
Example 10
Project: piwall-cvtools   Author: infinnovation   File: piwall.py    (license) View Source Project 6 votes vote down vote up
def hdSolidBlock(fn = "redHDSolidBlock.jpg", bgr = None):
    '''Generate test images as solid blocks of colour of known size, save to filename fn.'''
    # Create a zero (black) image of HD size with 3 colour dimensions.  Colour space assumed BGR by default.
    h = 1080
    w = 1920
    img = np.zeros((h,w,3),dtype="uint8")
    # Want to set all of the pixels to bgr tuple, default red, 8 bit colour
    if not bgr:
        bgr = [0,0,255]
    img[:,:] = bgr
    vw = ImageViewer(img)
    vw.windowShow()
    #cv2.imshow("zeroes", frame)
    #ch = 0xff & cv2.waitKey(10000)
    #cv2.destroyAllWindows()
    cv2.imwrite(fn, img) 
Example 11
Project: logo-detect   Author: sunbinbin1991   File: train.py    (license) View Source Project 6 votes vote down vote up
def show_cut_img(img_name):
  img = cv2.imread(img_name, 0)

  cut_img = cut(img)

  cv2.imshow('cut image', cut_img)
  cv2.waitKey(0)
  cv2.destroyAllWindows()

  return cut_img








# ??????????????????id??logoDirs???? 
Example 12
Project: tensorflow-yolo   Author: hjimce   File: misc.py    (license) View Source Project 6 votes vote down vote up
def show(im, allobj, S, w, h, cellx, celly):
    for obj in allobj:
        a = obj[5] % S
        b = obj[5] // S
        cx = a + obj[1]
        cy = b + obj[2]
        centerx = cx * cellx
        centery = cy * celly
        ww = obj[3]**2 * w
        hh = obj[4]**2 * h
        cv2.rectangle(im,
            (int(centerx - ww/2), int(centery - hh/2)),
            (int(centerx + ww/2), int(centery + hh/2)),
            (0,0,255), 2)
    cv2.imshow("result", im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 13
Project: cv-lane   Author: kendricktan   File: EyeCanSee.py    (license) View Source Project 6 votes vote down vote up
def get_hsv(self):
        cv2.namedWindow('hsv_extractor')
        while True:
            self.grab_frame()

            # Bottom ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_BOTTOM-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_BOTTOM + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Top ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_TOP-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Object
            cv2.rectangle(self.img_debug, (0, cvsettings.OBJECT_HEIGHT_PADDING), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP - cvsettings.OBJECT_HEIGHT_PADDING), (238, 130, 238), 2)

            self.hsv_frame = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)

            # Mouse handler
            cv2.setMouseCallback('hsv_extractor', self.on_mouse, 0)
            cv2.imshow('hsv_extractor', self.img_debug)

            key = cv2.waitKey(0) & 0xFF
            if key == ord('q'):
                break
        self.stop_camera()
        cv2.destroyAllWindows()

    # Starts camera (needs to be called before run) 
Example 14
Project: rekognition-video-utils   Author: awslabs   File: opencv_utils.py    (license) View Source Project 6 votes vote down vote up
def get_frames_every_x_sec(video, secs=1, fmt='opencv'):
    vidcap = cv2.VideoCapture(video)
    fps = get_frame_rate(vidcap)
    inc = int(fps * secs)
    length = int(vidcap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
    count = 0
    while vidcap.isOpened() and count <= length:
        if count % inc == 0:
            success, image = vidcap.read()
            if success:
                cv2_im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                if fmt == 'PIL':
                    im = Image.fromarray(cv2_im)
                #elif fmt == 'DISK':
                    #cv2.imwrite(os.path.join(path_output_dir, '%d.png') % count, image)
                else:
                    im = cv2_im
                yield count, im 
            else:
                break
        count += 1
    cv2.destroyAllWindows()
    vidcap.release()

# image region: img = img[c1:c1+25,r1:r1+25] # roi = gray[y1:y2, x1:x2] 
Example 15
Project: nelpy   Author: nelpy   File: homography.py    (license) View Source Project 6 votes vote down vote up
def pick_corrs(images, n_pts_to_pick=4):
    data = [ [[], 0, False, False, False, image, "Image %d" % i, n_pts_to_pick]
            for i, image in enumerate(images)]

    for d in data:
        win_name = d[6]
        cv2.namedWindow(win_name)
        cv2.setMouseCallback(win_name, corr_picker_callback, d)
        cv2.startWindowThread()
        cv2.imshow(win_name, d[5])

    key = None
    while key != '\n' and key != '\r' and key != 'q':
        key = cv2.waitKey(33)
        key = chr(key & 255) if key >= 0 else None

    cv2.destroyAllWindows()

    if key == 'q':
        return None
    else:
        return [d[0] for d in data] 
Example 16
Project: faceNet_RealTime   Author: jack55436001   File: facevalid_real_time.py    (license) View Source Project 6 votes vote down vote up
def main(args):

	saveFace = None;
	cap = cv2.VideoCapture(0)
	face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
	while(True):
	    # Capture frame-by-frame
	    ret, frame = cap.read()
	    faces = face_cascade.detectMultiScale(frame, 1.3, 5)
	    if len(faces) > 0:
	    	saveFace = frame
	    	break;
	    # Display the resulting frame
	    cv2.imshow('frame',frame)
	    if cv2.waitKey(1) & 0xFF == ord('q'):
	        break

	# When everything done, release the capture
	cap.release()
	cv2.destroyAllWindows()
	cv2.imwrite('C:/Users/USER/Desktop/facenet-RealTime/src/face_data/saveFace.jpg',frame)
	
	mypath = 'C:/Users/USER/Desktop/facenet-RealTime/src/face_data'
	onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
	myImage = []
	for file in onlyfiles:
		isImage = None
		file = mypath + '/' + file
		isImage = imghdr.what(file)
		if isImage != None:
			myImage.append(file)

	#begin facenet
	cp.main(args,myImage); 
Example 17
Project: BlurDetection   Author: whdcumt   File: main.py    (license) View Source Project 6 votes vote down vote up
def evaluate(img_col, args):
    numpy.seterr(all='ignore')
    assert isinstance(img_col, numpy.ndarray), 'img_col must be a numpy array'
    assert img_col.ndim == 3, 'img_col must be a color image ({0} dimensions currently)'.format(img_col.ndim)
    assert isinstance(args, argparse.Namespace), 'args must be of type argparse.Namespace not {0}'.format(type(args))
    img_gry = cv2.cvtColor(img_col, cv2.COLOR_RGB2GRAY)
    rows, cols = img_gry.shape
    crow, ccol = rows/2, cols/2
    f = numpy.fft.fft2(img_gry)
    fshift = numpy.fft.fftshift(f)
    fshift[crow-75:crow+75, ccol-75:ccol+75] = 0
    f_ishift = numpy.fft.ifftshift(fshift)
    img_fft = numpy.fft.ifft2(f_ishift)
    img_fft = 20*numpy.log(numpy.abs(img_fft))
    if args.display and not args.testing:
        cv2.destroyAllWindows()
        scripts.display('img_fft', img_fft)
        scripts.display('img_col', img_col)
        cv2.waitKey(0)
    result = numpy.mean(img_fft)
    return img_fft, result, result < args.thresh 
Example 18
Project: party-pi   Author: JustinShenk   File: play.py    (license) View Source Project 6 votes vote down vote up
def end_game(self):
        """ When everything is done, release the capture.

        """
        if not self.piCam:
            self.cam.release()
            quit_coord = (self.screenwidth // 4, self.screenheight // 3)
            try:
                draw_text(quit_coord, self.photo,
                          "Press any key to quit_", font_scale=1)
            except AttributeError:
                cv2.destroyAllWindows()
            # self.presentation(frame)
            # self.photo = self.overlayUI(self.photo)
        else:
            self.piCamera.close()

        cv2.imshow("PartyPi", self.photo)

        cv2.waitKey(0)
        cv2.destroyAllWindows() 
Example 19
Project: pycreate2   Author: MomsFriendlyRobotCompany   File: test.py    (license) View Source Project 6 votes vote down vote up
def read():
    db = shelve.open(filename)
    imgs = db['imgs']
    data = db['data']

    for i in range(len(imgs)):
        d = data[i]
        print(i, d)
        img = imgs[i]
        img = np.fromstring(img, np.uint8)
        frame = cv2.imdecode(img, 1)
        print('frame[{}] {}'.format(i, frame.shape))
        cv2.imshow('camera', frame)
        cv2.waitKey(300)

    print('bye ...')
    cv2.destroyAllWindows()
    db.close() 
Example 20
Project: zed-python   Author: stereolabs   File: live_camera.py    (license) View Source Project 6 votes vote down vote up
def record(cam, runtime, mat):
    vid = tp.PyERROR_CODE.PyERROR_CODE_FAILURE
    out = False
    while vid != tp.PyERROR_CODE.PySUCCESS and not out:
        filepath = input("Enter filepath name: ")
        vid = cam.enable_recording(filepath)
        print(repr(vid))
        if vid == tp.PyERROR_CODE.PySUCCESS:
            print("Recording started...")
            out = True
            print("Hit spacebar to stop recording: ")
            key = False
            while key != 32:  # for spacebar
                err = cam.grab(runtime)
                if err == tp.PyERROR_CODE.PySUCCESS:
                    cam.retrieve_image(mat)
                    cv2.imshow("ZED", mat.get_data())
                    key = cv2.waitKey(5)
                    cam.record()
        else:
            print("Help: you must enter the filepath + filename + SVO extension.")
            print("Recording not started.")
    cam.disable_recording()
    print("Recording finished.")
    cv2.destroyAllWindows() 
Example 21
Project: apparent-age-gender-classification   Author: danielyou0230   File: Modules.py    (license) View Source Project 6 votes vote down vote up
def debug_face_classifier(file):
	face_cascade = cv2.CascadeClassifier(xml_face_classifier)
	image = cv2.imread(file)
	
	image = imutils.resize(image, width=500)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	faces = face_cascade.detectMultiScale(image, 1.07, 3)
	print faces
	for (x, y, w, h) in faces:
		cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)
		#roi_gray = gray[y:y+h, x:x+w]
		#roi_color = image[y:y+h, x:x+w]

	cv2.imshow('Image', image)
	cv2.waitKey(0)
	cv2.destroyAllWindows() 
Example 22
Project: apparent-age-gender-classification   Author: danielyou0230   File: Modules.py    (license) View Source Project 6 votes vote down vote up
def debug_face_landmark(file, output=False, output_name='output'):
	detector = dlib.get_frontal_face_detector()
	predictor = dlib.shape_predictor(dat_face_landmark)

	image = cv2.imread(file)
	image = imutils.resize(image, width=500)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	img_size = gray.shape

	faces = detector(gray, 1)
	for (i, itr_face) in enumerate(faces):
		shape = predictor(gray, itr_face)
		shape = shape_to_np(shape)
		# convert dlib's rectangle to a OpenCV-style bounding box
		# [i.e., (x, y, w, h)], then draw the face bounding box
		(x, y, w, h) = rect_to_bb(itr_face, img_size, file)
		#print "landmark: ({:d}, {:d}) ({:d}, {:d})".format(x, y, w, h)
		
		cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
		# show the face number
		cv2.putText(image, "Face #{}".format(i + 1), (x - 10, y - 10),
			cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
	 
		# loop over the (x, y)-coordinates for the facial landmarks
		# and draw them on the image
		for (x, y) in shape:
			cv2.circle(image, (x, y), 1, (0, 0, 255), -1)

	# show the output image with the face detections + facial landmarks
	cv2.imshow(file, image)
	cv2.waitKey(0)
	if output:
		cv2.imwrite("../" + str(output_name + 1) + '.jpg', image)
	cv2.destroyAllWindows() 
Example 23
Project: FacePoseEstimation   Author: abhisharma7   File: facepose_detection.py    (license) View Source Project 6 votes vote down vote up
def image(self):

        img = cv2.imread(self.image_path)
        img = imutils.resize(img,width=min(800,img.shape[1]))
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray,(21,21),0)
        fullbody = self.HogDescriptor(gray)
        for (x,y,w,h) in fullbody:
            cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)

        faces = self.haar_facedetection(gray)
        for (x,y,w,h) in faces:
            cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
            roi_gray = gray[y:y+h, x:x+w]
            roi_color = img[y:y+h, x:x+w]
            eyes = self.haar_eyedetection(roi_gray)
            for (ex,ey,ew,eh) in eyes:
                cv2.rectangle(roi_color, (ex,ey), (ex+ew,ey+eh), (0,255,0),2) 
            smile = self.haar_smilecascade(roi_gray)
            for (sx,sy,sw,sh) in smile:
                cv2.rectangle(roi_color, (sx,sy), (sx+sw,sy+sh),(0,255,0),2)
        img = self.dlib_function(img)
        cv2.imshow('img',img)
        cv2.waitKey(0) 
        cv2.destroyAllWindows() 
Example 24
Project: single_shot_multibox_detector   Author: oarriaga   File: video_demo.py    (license) View Source Project 6 votes vote down vote up
def start_video(self, model):
        camera = cv2.VideoCapture(0)
        while True:
            frame = camera.read()[1]
            if frame is None:
                continue
            image_array = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            image_array = cv2.resize(image_array, (300, 300))
            image_array = substract_mean(image_array)
            image_array = np.expand_dims(image_array, 0)
            predictions = model.predict(image_array)
            detections = detect(predictions, self.prior_boxes)
            plot_detections(detections, frame, 0.6,
                            self.arg_to_class, self.colors)
            cv2.imshow('webcam', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        camera.release()
        cv2.destroyAllWindows() 
Example 25
Project: nnp   Author: dribnet   File: fitting.py    (license) View Source Project 6 votes vote down vote up
def do_key_press(symbol, modifiers):
    global cur_vector
    print("SO: {}".format(symbol))
    if(symbol == key.R):
        if theApp.use_camera:
            theApp.set_camera_recording(not theApp.camera_recording)
    if(symbol == key.T):
        theApp.show_camera = not theApp.show_camera
    elif(symbol == key.SPACE):
        print("SPACEBAR")
        snapshot(None);
    elif(symbol == key.ESCAPE):
        print("ESCAPE")
        cv2.destroyAllWindows()
        if theApp.use_camera:
            cv2.VideoCapture(0).release()
        sys.exit(0) 
Example 26
Project: LogoDetectionInVideo   Author: nmemme   File: svm_video.py    (license) View Source Project 6 votes vote down vote up
def test(path):
	cap = cv2.VideoCapture(path_video)
	testing=[]
	while(True):
		ret, frame = cap.read()
		res=cv2.resize(frame,(250,250))
		
		gray_image = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
		xarr=np.squeeze(np.array(gray_image).astype(np.float32))
		m,v=cv2.PCACompute(xarr)
		arr= np.array(v)
		flat_arr= arr.ravel()
		testing.append(flat_arr)
		#cv2.imshow('frame', frame)
		#if cv2.waitKey(1) & 0xFF == ord("q"):
         #   break
	#cap.release()
    #cv2.destroyAllWindows()
	logos=svm.predict(testing)
	uniqlogos=list(set(logos))
	for i in uniqlogos:
		print(i) 
Example 27
Project: action-recoginze   Author: WeiruZ   File: k-means_video.py    (license) View Source Project 6 votes vote down vote up
def cluster(frame_matrix):
    new_frame_matrix = []
    i = 0
    for frame in frame_matrix:
        print "reader {} frame".format(i)
        i += 1
        Z = frame.reshape((-1, 1))
        Z = np.float32(Z)

        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        K = 2

        ret, label, center = cv2.kmeans(Z, K, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
        center = np.uint8(center)
        res = center[label.flatten()]
        res2 = res.reshape((frame.shape))

        new_frame_matrix.append(res2)
        cv2.imshow('res2', res2)
        cv2.waitKey(1)
    cv2.destroyAllWindows() 
Example 28
Project: face-recognition   Author: pratush07   File: face_training.py    (license) View Source Project 6 votes vote down vote up
def face_train_video(train_path,subject,max_train,stream):
    cap = cv2.VideoCapture(stream)
    ret=True
    ctr = 0
    # minimum 10 frames/images per video 
    while(ctr < max_train):
        # read till end of frames
        ret, img = cap.read()
        if not ret:
            break
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  
        cv2.imshow("Recognizing Face", img)
        cv2.waitKey(10)
        cv2.imwrite( join(train_path,subject)+ "." + str(ctr) +".jpg",img) # writes image  to disk
        ctr = ctr + 1
    cap.release()
    cv2.destroyAllWindows()

# predict live feed 
Example 29
Project: trackingtermites   Author: dmrib   File: simulation.py    (license) View Source Project 6 votes vote down vote up
def simulate(self):
        """Displays termite trail recorded points at a black arena.

        Args:
            None.
        Returns:
            None.
        """
        self.video_source = video.VideoPlayer(self.params['original_video_path'], self.params['output_path'],
                                         self.params['arena_size'], [], True, 'MOG')
        simulation_length = min(len(x.trail) for x in self.termites)
        self.current_step = 0

        while self.current_step < simulation_length:
            self.background = np.zeros((self.params['arena_size'][1], self.params['arena_size'][0],
                                        3), np.uint8)
            self.draw()
            self.show()

            self.current_step += 1
            self.video_source.next_frame()

        cv2.destroyAllWindows() 
Example 30
Project: ghetto_omr   Author: pohzhiee   File: img_func.py    (license) View Source Project 6 votes vote down vote up
def splitimg(im_inp,n_row,n_col):
    #determine size of input image
    h_img, w_img = im_inp.shape[:2]
    #determine size of each cropped image
    h_row = h_img / num_rows
    w_col = w_img / num_cols
    #declare fragmented image matrix
    img_frag = np.empty((num_rows, num_cols, h_row, w_col), dtype=np.uint8)
    #fragments input image and put it into matrix
    for i in range(0, num_rows):
        h0 = h_row * i
        h1 = h_row * (i + 1)
        for j in range(0, num_cols):
            w0 = w_col * j
            w1 = w_col * (j + 1)
            img_frag[i, j] = im_inp[h0:h1, w0:w1]
            #uncomment following lines for debugging to show image
            # cv2.imshow('image1', img_frag[i, j])
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()
    return img_frag 
Example 31
Project: Interactive-object-tracking   Author: abhishekarya286   File: interaction_updated_global.py    (license) View Source Project 6 votes vote down vote up
def mask_bg(object_window,img) :
	''' This function outputs the surrounding pixels
	    Basically, image of background with masked target object'''
	global h_img,w_img
	x,y,w,h=object_window
	h_bg=h*2
	w_bg=2*w
	h_=0.5*h
	w_=0.5*w
	x_bg=int(max(x-(w_),0))
	y_bg=int(max(y-(h_),0))
	x_bg1=int(min(x_bg+w_bg,w_img-1))
	y_bg1=int(min(y_bg+h_bg,h_img-1))
	img[y:y+h,x:x+w]=0
	#print object_window
	#print x_bg,y_bg,x_bg1,y_bg1,img.shape
	bg_img=img[y_bg:y_bg1,x_bg:x_bg1]
	#cv2.imshow("masked_background",bg_img)
	#cv2.waitKey(0)
	#cv2.destroyAllWindows()
	return bg_img 
Example 32
Project: Interactive-object-tracking   Author: abhishekarya286   File: lab_global_optimisation.py    (license) View Source Project 6 votes vote down vote up
def mask_bg(object_window,img) :
	''' This function outputs the surrounding pixels
	    Basically, image of background with masked target object'''
	global h_img,w_img
	x,y,w,h=object_window
	h_bg=h*2
	w_bg=2*w
	h_=0.5*h
	w_=0.5*w
	x_bg=int(max(x-(w_),0))
	y_bg=int(max(y-(h_),0))
	x_bg1=int(min(x_bg+w_bg,w_img-1))
	y_bg1=int(min(y_bg+h_bg,h_img-1))
	img[y:y+h,x:x+w]=0
	#print object_window
	#print x_bg,y_bg,x_bg1,y_bg1,img.shape
	bg_img=img[y_bg:y_bg1,x_bg:x_bg1]
	#cv2.imshow("masked_background",bg_img)
	#cv2.waitKey(0)
	#cv2.destroyAllWindows()
	return bg_img 
Example 33
Project: AVSR-Deep-Speech   Author: pandeydivesh15   File: image_handler.py    (GNU General Public License v2.0) View Source Project 5 votes vote down vote up
def visualize_image(image, name="Image", resize=False, save_image=False, path=None):
	"""Helper function to visualize and save any image"""
	image = image.reshape([IMAGE_WIDTH, IMAGE_HEIGHT])
	image = image.astype(np.uint8)

	if resize: 
		image = cv2.resize(image, (IMAGE_WIDTH * 10, IMAGE_HEIGHT * 10))

	cv2.imshow(name, image)
	if cv2.waitKey(0) & 0xFF == ord('q'):
		cv2.destroyAllWindows()

	if save_image:
		assert path is not None
		cv2.imwrite(path, image) 
Example 34
Project: SudokuSolver   Author: Anve94   File: helper_functions.py    (license) View Source Project 5 votes vote down vote up
def image_preview(image):
    cv2.imshow('Image preview', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example 35
Project: SudokuSolver   Author: Anve94   File: helper_functions.py    (license) View Source Project 5 votes vote down vote up
def display_solution(square_borders, start_grid, solution, image):
    """ Writes the solution to an image and displays said image.
        Params:
            square_borders  -- A list containing the borders of all squares
            start_grid      -- A list containing the sudoku starting values
            solution        -- A list containing the sudoku solution
            image           -- The image to write to """
    cur_row = 0
    cur_col = 0
    for i, b in enumerate(square_borders):
        x, y, x2, y2 = b  # Tuple unpacking
        # Calculate bottom-left position for text
        text_x, text_y = ((x2+x) / 2) - 10, ((y2+y) / 2) + 10
        # Bottom-left corner for text position
        org = (text_x, text_y)
        # Only write text if the position was not set in the start_grid
        if start_grid[cur_row][cur_col] is 0:
            value = str(solution[cur_row][cur_col])
            cv2.putText(
                img=image,
                text=value,
                org=org,
                fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=1,
                color=(0, 255, 0),
                thickness=2)
        cur_col += 1
        if cur_col % 9 == 0:
            cur_row += 1
            cur_col = 0

    cv2.imshow('Solution', image)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example 36
Project: moVi   Author: netsecIITK   File: webcam.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def close(self):
        self.cap.release()
        cv2.destroyAllWindows()
        print("Closing camera") 
Example 37
Project: moVi   Author: netsecIITK   File: frame.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def close(self):
        cv2.destroyAllWindows()
        print("Closing window") 
Example 38
Project: opencv-gui-helper-tool   Author: maunesh   File: find_edges.py    (MIT License) View Source Project 5 votes vote down vote up
def main():
    parser = argparse.ArgumentParser(description='Visualizes the line for hough transform.')
    parser.add_argument('filename')

    args = parser.parse_args()

    img = cv2.imread(args.filename, cv2.IMREAD_GRAYSCALE)

    cv2.imshow('input', img)

    edge_finder = EdgeFinder(img, filter_size=13, threshold1=28, threshold2=115)

    print "Edge parameters:"
    print "GaussianBlur Filter Size: %f" % edge_finder.filterSize()
    print "Threshold1: %f" % edge_finder.threshold1()
    print "Threshold2: %f" % edge_finder.threshold2()

    (head, tail) = os.path.split(args.filename)

    (root, ext) = os.path.splitext(tail)

    smoothed_filename = os.path.join("output_images", root + "-smoothed" + ext)
    edge_filename = os.path.join("output_images", root + "-edges" + ext)

    cv2.imwrite(smoothed_filename, edge_finder.smoothedImage())
    cv2.imwrite(edge_filename, edge_finder.edgeImage())

    cv2.destroyAllWindows() 
Example 39
Project: LensCalibrator   Author: 1024jp   File: createimage.py    (MIT License) View Source Project 5 votes vote down vote up
def show_image(image, scale=1.0, window_title='Image'):
    """Display given image in a window.

    Arguments:
    image () -- Image to display.
    scale (float) -- Magnification of image.
    window_title (str) -- Title of window.
    """
    scaled_image = scale_image(image, scale)

    cv2.imshow(window_title, scaled_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example 40
Project: kaggle-review   Author: daxiongshu   File: cv_util.py    (license) View Source Project 5 votes vote down vote up
def show_image(im, name='image'):
    cv2.imshow(name, im)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example 41
Project: SudokuVisionSolver   Author: tusharsircar95   File: main.py    (license) View Source Project 5 votes vote down vote up
def showImage(img,caption='image'):
	cv2.imshow(caption,img)
	cv2.waitKey(0)
	cv2.destroyAllWindows()

# Matches a template of cross to detect inner grid lines and then removes them via flood filling 
Example 42
Project: deep-prior   Author: moberweger   File: realtimehandposepipeline.py    (GNU General Public License v3.0) View Source Project 5 votes vote down vote up
def threadConsumer(self):
        """
        Thread that consumes the frames, estimate the pose and display
        :return: None
        """
        
        while True:
            if self.stop.value:
                break
            try:
                frm = self.queue.get(block=False)
            except:
                if not self.stop.value:
                    continue
                else:
                    break

            startp = time.time()
            pose = self.estimatePose(frm['crop']) * self.config['cube'][2]/2. + frm['com3D']
            print("{}ms pose".format((time.time() - startp)*1000.))

            # Display the resulting frame
            starts = time.time()
            img = self.show(frm['frame'], pose, frm['M'])
            img = self.addStatusBar(img)
            cv2.imshow('frame', img)
            self.lastshow = time.time()
            self.processKey(cv2.waitKey(1) & 0xFF)
            print("{}ms display".format((time.time() - starts)*1000.))

        cv2.destroyAllWindows()
        print "Exiting consumer..."
        return True 
Example 43
Project: mtcnn   Author: daikankan   File: show_label.py    (license) View Source Project 5 votes vote down vote up
def show_bbox_landmark(list_file, path_data):
  with open(list_file, 'r') as f:
    annotations = f.readlines()
  num = len(annotations)
  print "%d pics in total" % num
  # random.shuffle(annotations)

  for line in annotations:
    line_split = line.strip().split(' ')
    print line_split[0]
    path_full = os.path.join(path_data, line_split[0])
    datum = cv2.imread(path_full)
    classes = float(line_split[1])
    bbox = [float(x) for x in line_split[2:6]]
    landmarks = [float(x) for x in line_split[6:]]
    print classes
    print bbox
    print landmarks

    (h, w, c) = datum.shape

    if (bbox[0] != -1):
      x1 = bbox[0] * w
      y1 = bbox[1] * h
      x2 = bbox[2] * w + w
      y2 = bbox[3] * h + h
      cv2.rectangle(datum, (int(x1), int(y1)), (int(x2), int(y2)),
                    (0, 255, 0), 1)

    if (landmarks[0] != -1):
      for i in range(5):
        cv2.circle(datum, (int(landmarks[i] * w), int(landmarks[i + 5] * h)),
                   2, (255, 0, 0))
    cv2.imshow(str(line_split[0]), datum)
    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example 44
Project: garden.facelock   Author: kivy-garden   File: __init__.py    (license) View Source Project 5 votes vote down vote up
def face_recognize(self):
        cap = cv2.VideoCapture(self.index)
        
        face_cascade = cv2.CascadeClassifier(self.cascade)
        '''
        face_cascade: cascade is entered here for further use.
        '''

        while(True):
            ret, frame = cap.read()
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            '''
            Converts coloured video to black and white(Grayscale).
            '''
            if np.any(face_cascade.detectMultiScale(gray, 1.3, 5)):
                
                print("Cascade found")
                
                self.dispatch('on_match')
                
                cv2.destroyAllWindows()
                for i in range(1, 5):
                    cv2.waitKey(1)
                break
            
            else:
                print("Not recognized")

            cv2.imshow('frame', frame)
            #Comment the above statement not to show the camera screen
            if cv2.waitKey(1) & 0xFF == ord('q'):
                print("Forcefully Closed")

                cv2.destroyAllWindows()
                for i in range(1, 5):
                    cv2.waitKey(1)
                break
        cap.release() 
Example 45
Project: SummerProject_MacularDegenerationDetection   Author: WDongYuan   File: UpperBoundary.py    (license) View Source Project 5 votes vote down vote up
def ToGrayImage(path):
	image = cv2.imread(path)
	gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	# cv2.imwrite('gray_image.jpg',gray_image)
	# cv2.imshow('color_image',image)
	# cv2.imshow('gray_image',gray_image)
	# cv2.waitKey(0)                 # Waits forever for user to press any key
	# cv2.destroyAllWindows()        # Closes displayed windows
	return gray_image 
Example 46
Project: SummerProject_MacularDegenerationDetection   Author: WDongYuan   File: EdgeDetection.py    (license) View Source Project 5 votes vote down vote up
def ToGrayImage(path):
	image = cv2.imread(path)
	gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	# cv2.imwrite('gray_image.jpg',gray_image)
	# cv2.imshow('color_image',image)
	# cv2.imshow('gray_image',gray_image)
	# cv2.waitKey(0)                 # Waits forever for user to press any key
	# cv2.destroyAllWindows()        # Closes displayed windows
	return gray_image 
Example 47
Project: face   Author: MOluwole   File: face_capture.py    (license) View Source Project 5 votes vote down vote up
def __init__(self, matric_num):
        WHITE = [255, 255, 255]

        face_cascade = cv2.CascadeClassifier('Haar/haarcascade_frontalcatface.xml')
        eye_cascade = cv2.CascadeClassifier('Haar/haarcascade_eye.xml')

        ID = NameFind.AddName(matric_num)
        Count = 0
        cap = cv2.VideoCapture(0)  # Camera object
        self.__trainer__ = None

        if not os.path.exists('dataSet'):
            os.makedirs('dataSet')

        while True:
            ret, img = cap.read()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # Convert the Camera to grayScale
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)  # Detect the faces and store the positions
            for (x, y, w, h) in faces:  # Frames  LOCATION X, Y  WIDTH, HEIGHT
                FaceImage = gray[y - int(h / 2): y + int(h * 1.5),
                            x - int(x / 2): x + int(w * 1.5)]  # The Face is isolated and cropped
                Img = (NameFind.DetectEyes(FaceImage))
                cv2.putText(gray, "FACE DETECTED", (x + (w / 2), y - 5), cv2.FONT_HERSHEY_DUPLEX, .4, WHITE)
                if Img is not None:
                    frame = Img  # Show the detected faces
                else:
                    frame = gray[y: y + h, x: x + w]
                cv2.imwrite("dataSet/" + matric_num.replace('/', '') + "." + str(ID) + "." + str(Count) + ".jpg", frame)
                Count = Count + 1
                # cv2.waitKey(300)
                cv2.imshow("CAPTURED PHOTO", frame)  # show the captured image
            cv2.imshow('Face Recognition System Capture Faces', gray)  # Show the video
            if Count == 150:
                Trainer()
                break
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        print 'FACE CAPTURE FOR THE SUBJECT IS COMPLETE'
        cap.release()
        cv2.destroyAllWindows() 
Example 48
Project: face   Author: MOluwole   File: recognizer.py    (license) View Source Project 5 votes vote down vote up
def __init__(self):

        face_cascade = cv2.CascadeClassifier('Haar/haarcascade_frontalcatface.xml')
        eye_cascade = cv2.CascadeClassifier('Haar/haarcascade_eye.xml')

        recognise = cv2.face.createEigenFaceRecognizer(15, 4000)  # creating EIGEN FACE RECOGNISER
        recognise.load("Recogniser/trainingDataEigan.xml")  # Load the training data

        # -------------------------     START THE VIDEO FEED ------------------------------------------
        cap = cv2.VideoCapture(0)  # Camera object
        # cap = cv2.VideoCapture('TestVid.wmv')   # Video object
        ID = 0
        while True:
            ret, img = cap.read()  # Read the camera object
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # Convert the Camera to gray
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)  # Detect the faces and store the positions
            for (x, y, w, h) in faces:  # Frames  LOCATION X, Y  WIDTH, HEIGHT
                # ------------ BY CONFIRMING THE EYES ARE INSIDE THE FACE BETTER FACE RECOGNITION IS GAINED ------------------
                gray_face = cv2.resize((gray[y: y + h, x: x + w]), (110, 110))  # The Face is isolated and cropped
                eyes = eye_cascade.detectMultiScale(gray_face)
                for (ex, ey, ew, eh) in eyes:
                    ID, conf = recognise.predict(gray_face)  # Determine the ID of the photo
                    NAME = NameFind.ID2Name(ID, conf)
                    NameFind.DispID(x, y, w, h, NAME, gray)
            cv2.imshow('EigenFace Face Recognition System', gray)  # Show the video
            if cv2.waitKey(1) & 0xFF == ord('q'):  # Quit if the key is Q
                break
        cap.release()
        cv2.destroyAllWindows() 
Example 49
Project: Glidr   Author: muinmomin   File: gesture.py    (license) View Source Project 5 votes vote down vote up
def start_detect_hand(gesture_call_back=None):
    #Open Camera object
    cap = cv2.VideoCapture(0)
    #fire_img = cv2.imread('./fire.png')
    #Decrease frame size
    cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, frameX)
    cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, frameY)
    pink_lower_bound = [160,50,160]
    pink_upper_bound = [180,255,255]
    yellow_lower_bound = [25, 100, 120]
    yellow_upper_bound = [155, 255, 255]
    
    while(True):
    
        ret, frame = cap.read()
        #print frame[frame.shape[0]/2][frame.shape[1]/2]
        # skin color [2,50,50], [15,255,255]
        # pink color [160,50,160], [180,255,255]
        # green color [50, 100, 100], [70, 255, 255]
        
        pink_cnts = get_contour(frame=frame, lower_bound=pink_lower_bound, upper_bound=pink_upper_bound)
        yellow_cnts = get_contour(frame=frame, lower_bound=yellow_lower_bound, upper_bound=yellow_upper_bound)        
        
        
        pink_center_mass, pink_radius = augment_graph(frame=frame, contour=pink_cnts)
        yellow_center_mass, yellow_radius = augment_graph(frame=frame, contour=yellow_cnts)
        #cv2.imshow('Gesture',frame)
        if gesture_call_back:
            gesture_call_back(pink_center_mass, pink_radius, yellow_center_mass, yellow_radius)
        #close the output video by pressing 'ESC'
        k = cv2.waitKey(2) & 0xFF
        if k == 27:
            break
    cap.release()
    cv2.destroyAllWindows() 
Example 50
Project: PyIntroduction   Author: tody411   File: display_image.py    (license) View Source Project 5 votes vote down vote up
def cvShowImageColor(image_file):
    image_bgr = cv2.imread(image_file)
    cv2.namedWindow('image', cv2.WINDOW_NORMAL)
    cv2.imshow('image', image_bgr)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

# OpenCV???????????????