Python cv2.getTickFrequency() Examples

The following are 30 code examples of cv2.getTickFrequency(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: h5_test.py    From keras-image-segmentation with MIT License 6 votes vote down vote up
def make_h5py():
    x_train_paths, y_train_paths = get_data('train')
    x_val_paths, y_val_paths = get_data('val')
    x_test_paths, y_test_paths = get_data('test')

    h5py_file = h5py.File(os.path.join(dir_path, 'data.h5'), 'w')
    
    start = cv2.getTickCount()
    write_data(h5py_file, 'train', x_train_paths, y_train_paths)
    time = (cv2.getTickCount()-start)/cv2.getTickFrequency()
    print ('parsing train data, Time:%.3fs'%time)

    start = cv2.getTickCount()
    write_data(h5py_file, 'val', x_val_paths, y_val_paths)
    time = (cv2.getTickCount()-start)/cv2.getTickFrequency()
    print ('parsing val data, Time:%.3fs'%time)

    start = cv2.getTickCount()
    write_data(h5py_file, 'test', x_test_paths, y_test_paths)
    time = (cv2.getTickCount()-start)/cv2.getTickFrequency()
    print ('parsing test data, Time:%.3fs'%time) 
Example #2
Source File: webcam_gui.py    From PyCV-time with MIT License 6 votes vote down vote up
def webcam_gui(filter_func, video_src=0):

    cap = cv2.VideoCapture(video_src)
    key_code = -1
    
    while(key_code == -1):
        t = cv2.getTickCount()
        # read a frame
        ret, frame = cap.read()
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - t) 
        print("Frame rate: " + str(fps))
        
        # run filter with the arguments
        frame_out = filter_func(frame)
        
        # show the image
        cv2.imshow('Press any key to exit', frame_out)
        
        # wait for the key
        key_code = cv2.waitKey(10)

    cap.release()
    cv2.destroyAllWindows() 
Example #3
Source File: predict.py    From fine-tuning with GNU General Public License v3.0 6 votes vote down vote up
def predict(name):
    frame = cv.imread(name)
    blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0], 1, crop=False)

    # Sets the input to the network
    net.setInput(blob)

    # Runs the forward pass to get output of the output layers
    outs = net.forward(getOutputsNames(net))

    # Remove the bounding boxes with low confidence
    boxes1=postprocess(frame, outs)
    # Put efficiency information. The function getPerfProfile returns the 
    # overall time for inference(t) and the timings for each of the layers(in layersTimes)
    t, _ = net.getPerfProfile()
    #label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
    #cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
    cv.imwrite("prediction.jpg",frame)
    return boxes1


# In[ ]: 
Example #4
Source File: yolo3.py    From ai-smarthome with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def detect(self, frame):
        fconv = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        image = Image.fromarray(fconv)
        # Create a 4D blob from a frame.
        blob = cv2.dnn.blobFromImage(frame, 1/255, (YoloV3.inpWidth, YoloV3.inpHeight), [0,0,0], 1, crop=False)
        # Sets the input to the network
        self.net.setInput(blob)
        # Runs the forward pass to get output of the output layers
        outs = self.net.forward(self.getOutputsNames())
        # Remove the bounding boxes with low confidence
        detection = self.postprocess(frame, outs, self.colors)

        # Put efficiency information. The function getPerfProfile returns the
        # overall time for inference(t) and the timings for each of the layers(in layersTimes)
        if self.drawPerformance:
            t, _ = self.net.getPerfProfile()
            label = 'Inference time: %.2f ms' % (t * 1000.0 / cv2.getTickFrequency())
            cv2.putText(frame, label, (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))

        return detection 
Example #5
Source File: common.py    From PyCV-time with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #6
Source File: helpers.py    From vidpipe with GNU General Public License v3.0 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #7
Source File: timer.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def pause(self): 
        now_time = cv2.getTickCount()
        self._accumulated += (now_time - self._start_time)/cv2.getTickFrequency() 
        self._is_paused = True 
Example #8
Source File: timer.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def elapsed(self):
        if self._is_paused:
            self._elapsed = self._accumulated
        else:
            now = cv2.getTickCount()
            self._elapsed = self._accumulated + (now - self._start_time)/cv2.getTickFrequency()        
        if self._is_verbose is True:      
            name =  self._name
            if self._is_paused:
                name += ' [paused]'
            message = 'Timer::' + name + ' - elapsed: ' + str(self._elapsed) 
            timer_print(message)
        return self._elapsed 
Example #9
Source File: common.py    From pi-tracking-telescope with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #10
Source File: common.py    From pi-tracking-telescope with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #11
Source File: common.py    From pi-tracking-telescope with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #12
Source File: common.py    From CameraCalibration with GNU General Public License v3.0 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #13
Source File: common.py    From TecoGAN with Apache License 2.0 5 votes vote down vote up
def clock():
    return cv.getTickCount() / cv.getTickFrequency() 
Example #14
Source File: common.py    From how_do_drones_work with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #15
Source File: common.py    From ImageAnalysis with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #16
Source File: test_webcam.py    From face_landmark_dnn with MIT License 5 votes vote down vote up
def webcam_main():
    print("Camera sensor warming up...")
    vs = cv2.VideoCapture(0)
    time.sleep(2.0)

    mark_detector = MarkDetector()
    
    # loop over the frames from the video stream
    while True:
        _, frame = vs.read()
        start = cv2.getTickCount()

        frame = imutils.resize(frame, width=750, height=750)
        frame = cv2.flip(frame, 1)
        faceboxes = mark_detector.extract_cnn_facebox(frame)

        if faceboxes is not None:
            for facebox in faceboxes:
                # Detect landmarks from image of 64X64 with grayscale.
                face_img = frame[facebox[1]: facebox[3],
                                    facebox[0]: facebox[2]]
                # cv2.rectangle(frame, (facebox[0], facebox[1]), (facebox[2], facebox[3]), (0, 255, 0), 2)
                face_img = cv2.resize(face_img, (CNN_INPUT_SIZE, CNN_INPUT_SIZE))
                face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)
                face_img0 = face_img.reshape(1, CNN_INPUT_SIZE, CNN_INPUT_SIZE, 1)

                land_start_time = time.time()
                marks = mark_detector.detect_marks_keras(face_img0)
                # marks *= 255
                marks *= facebox[2] - facebox[0]
                marks[:, 0] += facebox[0]
                marks[:, 1] += facebox[1]
                # Draw Predicted Landmarks
                mark_detector.draw_marks(frame, marks, color=(255, 255, 255), thick=2)

        fps_time = (cv2.getTickCount() - start)/cv2.getTickFrequency()
        cv2.putText(frame, '%.1ffps'%(1/fps_time) , (frame.shape[1]-65,15), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0,255,0))
        # show the frame
        cv2.imshow("Frame", frame)
        # writer.write(frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop() 
Example #17
Source File: common.py    From PyCV-time with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #18
Source File: common.py    From PyCV-time with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #19
Source File: common.py    From PyCV-time with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #20
Source File: inpainting.py    From open_model_zoo with Apache License 2.0 5 votes vote down vote up
def infer(self, image, mask):
        t0 = cv2.getTickCount()
        output = self._exec_model.infer(inputs={self._input_layer_names[0]: image, self._input_layer_names[1]: mask})
        self.infer_time = (cv2.getTickCount() - t0) / cv2.getTickFrequency()
        return output[self._output_layer_name] 
Example #21
Source File: estimator.py    From open_model_zoo with Apache License 2.0 5 votes vote down vote up
def _infer(self, prep_img):
        t0 = cv2.getTickCount()
        output = self._exec_model.infer(inputs={self._input_layer_name: prep_img})
        self.infer_time = ((cv2.getTickCount() - t0) / cv2.getTickFrequency())
        return output[self._output_layer_name][0] 
Example #22
Source File: detector.py    From open_model_zoo with Apache License 2.0 5 votes vote down vote up
def _infer(self, prep_img):
        t0 = cv2.getTickCount()
        output = self._exec_model.infer(inputs={self._input_layer_name: prep_img})
        self.infer_time = (cv2.getTickCount() - t0) / cv2.getTickFrequency()
        return output 
Example #23
Source File: detector.py    From open_model_zoo with Apache License 2.0 5 votes vote down vote up
def infer(self, image):
        t0 = cv2.getTickCount()
        output = self._exec_model.infer(inputs={self._input_layer_name: image})
        self.infer_time = (cv2.getTickCount() - t0) / cv2.getTickFrequency()
        return output 
Example #24
Source File: common.py    From OpenCV-Snapchat-DogFilter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #25
Source File: query_aimsim_images.py    From crossgap_il_rl with GNU General Public License v2.0 5 votes vote down vote up
def capture_image(self):
        t_start = cv2.getTickCount()
        responses = self.client.simGetImages([
            airsim.ImageRequest("0", airsim.ImageType.DepthVis),  # depth visualization image
            airsim.ImageRequest("1", airsim.ImageType.DepthPerspective, True),  # depth in perspective projection
            airsim.ImageRequest("1", airsim.ImageType.Scene)])  # scene vision image in png format
        # print('Retrieved images: %d', len(responses))

        for response in responses:
            if response.pixels_as_float:
                self.img_f_raw = img_tools.process_float_img(response)
                self.img_f = img_tools.float_img_to_display(self.img_f_raw)
                # img_f = img_tools.displat_float_img( img_tools.process_float_img(response))
                # cv2.imshow("img_float", img_tools.displat_float_img(img_f))
            elif response.compress:  # png format
                self.img_png = img_tools.process_compress_img(response)
                # cv2.imshow("img_png", img_png)
                pass
            else:  # uncompressed array
                self.img_rgba = img_tools.process_rgba_img(response)
                # cv2.imshow("img_rgba", img_rgba)
        try:
            self.img_f = np.uint8(self.img_f)
            self.img_f_rgb = cv2.cvtColor(self.img_f, cv2.COLOR_GRAY2RGB)
            self.img_combi = np.concatenate((self.img_png, 255 - self.img_f_rgb), axis=0)
            # print(vis.shape)
            # cv2.imshow("image", self.img_combi)

        except Exception as e:
            print(e)
            # print(img_f_rgb.shape, img_png.shape)
            pass
        # print("Query image cost time = %.2f" % ((cv2.getTickCount() - t_start) * 1000.0 / cv2.getTickFrequency()))
        return self.img_combi
        # cv2.waitKey(1) 
Example #26
Source File: common.py    From OpenCV-Python-Tutorial with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #27
Source File: callbacks.py    From keras-image-segmentation with MIT License 5 votes vote down vote up
def train_visualization_seg(self, model, epoch, path):
        # image_name_list = sorted(glob(os.path.join(self.flag.data_path,'val/IMAGE/*/frankfurt_000000_014480_leftImg8bit.png')))
        # print (image_name_list)

        image_name = path #'./result/frankfurt_000000_014480_leftImg8bit.png'
        image_height = self.flag.image_height
        image_width = self.flag.image_width
        
        imgInput = cv2.imread(image_name, self.flag.color_mode)
        imgInput = cv2.cvtColor(imgInput, cv2.COLOR_BGR2RGB)
        output_path = self.flag.output_dir
        input_data = imgInput.reshape((1,image_height,image_width,self.flag.color_mode*2+1))

        t_start = cv2.getTickCount()
        result = model.predict(input_data, 1)
        t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
        print ("[*] Predict Time: %.3f ms"%t_total)
        
        imgMask = (result[0]*255).astype(np.uint8)
        imgShow = cv2.cvtColor(imgInput, cv2.COLOR_RGB2BGR).copy()
        #cv2.cvtColor(imgInput, cv2.COLOR_GRAY2BGR)
        # imgMaskColor = cv2.applyColorMap(imgMask, cv2.COLORMAP_JET)
        imgMaskColor = imgMask
        imgShow = cv2.addWeighted(imgShow, 0.5, imgMaskColor, 0.6, 0.0)
        output_path = os.path.join(self.flag.output_dir, '%04d_'%epoch+os.path.basename(image_name))
        mask_path = os.path.join(self.flag.output_dir, 'mask_%04d_'%epoch+os.path.basename(image_name))
        cv2.imwrite(output_path, imgShow)
        cv2.imwrite(mask_path, imgMaskColor)
        # print "SAVE:[%s]"%output_path
        # cv2.imwrite(os.path.join(output_path, 'img%04d.png'%epoch), imgShow)
        # cv2.namedWindow("show", 0)
        # cv2.resizeWindow("show", 800, 800)
        # cv2.imshow("show", imgShow)
        # cv2.waitKey(1) 
Example #28
Source File: common.py    From MachineLearning with Apache License 2.0 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #29
Source File: common.py    From Traffic-Sign-Detection with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #30
Source File: run.py    From KCF-DSST-py with MIT License 5 votes vote down vote up
def tracker(cam, frame, bbox):
    tracker = KCFTracker(True, True, True) # (hog, fixed_Window, multi_scale)
    tracker.init(bbox, frame)
    
    while True:
        ok, frame = cam.read()

        timer = cv2.getTickCount()
        bbox = tracker.update(frame)
        bbox = list(map(int, bbox))
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Tracking success
        p1 = (int(bbox[0]), int(bbox[1]))
        p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
        cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)

        # Put FPS
        cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        cv2.imshow("Tracking", frame)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break

    cam.release()
    cv2.destroyAllWindows()