Python cv2.getTickCount() Examples

The following are 30 code examples of cv2.getTickCount(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: cross_grap_env.py    From crossgap_il_rl with GNU General Public License v2.0 7 votes vote down vote up
def replan(self):
        print(colorize("===== Replan  =====","red"))

        if self.game_mode == "with_bias":
            start_pos = self.current_pos
        else:
            start_pos = self.current_pos + self.pos_bias
        start_pos = self.start_pos
        self.narrow_gap.approach_trajectory(start_pos,
                                       self.current_spd,
                                       self.current_acc, if_draw=0)

        self.narrow_gap.cross_ballistic_trajectory(if_draw=0);

        self.approach_time = self.narrow_gap.Tf
        self.cross_time = self.narrow_gap.t_c

        final_target_rot = self.narrow_gap.get_rotation(self.approach_time + self.cross_time)
        final_target_rot = self.transform_R_world_to_ned*final_target_rot
        if (np.linalg.det(final_target_rot) == -1):
            final_target_rot[:, 1] = final_target_rot[:, 1] * -1
        self.final_rpy = transforms3d.euler.mat2euler( final_target_rot, axes = 'sxyz')

        self.trajectory_start_t = cv2.getTickCount()
    # Get observation 
Example #2
Source File: quad_rotor_controller_tf_gym.py    From crossgap_il_rl with GNU General Public License v2.0 6 votes vote down vote up
def control(self):
        self.current_time = (cv2.getTickCount() - self.t_start) / cv2.getTickFrequency()
        self.pid_ctrl.update_target(self.target_pos.ravel().tolist()[0], self.target_spd.ravel().tolist()[0], self.target_acc.ravel().tolist()[0], target_yaw=0)
        self.pid_ctrl.update_control()
        self.gym_obs = self.env.step(np.array([self.pid_ctrl.tar_roll, self.pid_ctrl.tar_pitch, self.pid_ctrl.tar_thrust]))
        self.refresh_state()
        r, p, y = self.pid_rpy_to_air_sim_rpy(self.pid_ctrl.tar_roll, self.pid_ctrl.tar_pitch, self.pid_ctrl.tar_yaw)
        throttle = self.pid_ctrl.tar_thrust/9.8
        if (self.if_log):
            self.logger.update("time_stamp", self.current_time)
            self.logger.update("current_pos", self.current_pos)
            self.logger.update("current_spd", self.current_spd)
            self.logger.update("current_acc", self.current_acc)
            self.logger.update("current_rot", self.current_rot)

            self.logger.update("target_pos", self.target_pos)
            self.logger.update("target_spd", self.target_spd)
            self.logger.update("target_acc", self.target_acc)
            self.logger.update("target_rot", self.target_rot)

            self.logger.update("input_roll", r)
            self.logger.update("input_pitch", p)
            self.logger.update("input_yaw", y)
            self.logger.update("input_throttle", throttle) 
Example #3
Source File: h5_test.py    From keras-image-segmentation with MIT License 6 votes vote down vote up
def make_h5py():
    x_train_paths, y_train_paths = get_data('train')
    x_val_paths, y_val_paths = get_data('val')
    x_test_paths, y_test_paths = get_data('test')

    h5py_file = h5py.File(os.path.join(dir_path, 'data.h5'), 'w')
    
    start = cv2.getTickCount()
    write_data(h5py_file, 'train', x_train_paths, y_train_paths)
    time = (cv2.getTickCount()-start)/cv2.getTickFrequency()
    print ('parsing train data, Time:%.3fs'%time)

    start = cv2.getTickCount()
    write_data(h5py_file, 'val', x_val_paths, y_val_paths)
    time = (cv2.getTickCount()-start)/cv2.getTickFrequency()
    print ('parsing val data, Time:%.3fs'%time)

    start = cv2.getTickCount()
    write_data(h5py_file, 'test', x_test_paths, y_test_paths)
    time = (cv2.getTickCount()-start)/cv2.getTickFrequency()
    print ('parsing test data, Time:%.3fs'%time) 
Example #4
Source File: webcam_gui.py    From PyCV-time with MIT License 6 votes vote down vote up
def webcam_gui(filter_func, video_src=0):

    cap = cv2.VideoCapture(video_src)
    key_code = -1
    
    while(key_code == -1):
        t = cv2.getTickCount()
        # read a frame
        ret, frame = cap.read()
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - t) 
        print("Frame rate: " + str(fps))
        
        # run filter with the arguments
        frame_out = filter_func(frame)
        
        # show the image
        cv2.imshow('Press any key to exit', frame_out)
        
        # wait for the key
        key_code = cv2.waitKey(10)

    cap.release()
    cv2.destroyAllWindows() 
Example #5
Source File: common.py    From how_do_drones_work with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #6
Source File: helpers.py    From vidpipe with GNU General Public License v3.0 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #7
Source File: timer.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def start(self):
        self._accumulated = 0         
        self._start_time = cv2.getTickCount() 
Example #8
Source File: timer.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def pause(self): 
        now_time = cv2.getTickCount()
        self._accumulated += (now_time - self._start_time)/cv2.getTickFrequency() 
        self._is_paused = True 
Example #9
Source File: timer.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def resume(self): 
        if self._is_paused: # considered only if paused 
            self._start_time = cv2.getTickCount()
            self._is_paused = False 
Example #10
Source File: timer.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def elapsed(self):
        if self._is_paused:
            self._elapsed = self._accumulated
        else:
            now = cv2.getTickCount()
            self._elapsed = self._accumulated + (now - self._start_time)/cv2.getTickFrequency()        
        if self._is_verbose is True:      
            name =  self._name
            if self._is_paused:
                name += ' [paused]'
            message = 'Timer::' + name + ' - elapsed: ' + str(self._elapsed) 
            timer_print(message)
        return self._elapsed 
Example #11
Source File: common.py    From pi-tracking-telescope with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #12
Source File: common.py    From pi-tracking-telescope with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #13
Source File: common.py    From pi-tracking-telescope with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #14
Source File: common.py    From CameraCalibration with GNU General Public License v3.0 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #15
Source File: common.py    From TecoGAN with Apache License 2.0 5 votes vote down vote up
def clock():
    return cv.getTickCount() / cv.getTickFrequency() 
Example #16
Source File: test_webcam.py    From face_landmark_dnn with MIT License 5 votes vote down vote up
def webcam_main():
    print("Camera sensor warming up...")
    vs = cv2.VideoCapture(0)
    time.sleep(2.0)

    mark_detector = MarkDetector()
    
    # loop over the frames from the video stream
    while True:
        _, frame = vs.read()
        start = cv2.getTickCount()

        frame = imutils.resize(frame, width=750, height=750)
        frame = cv2.flip(frame, 1)
        faceboxes = mark_detector.extract_cnn_facebox(frame)

        if faceboxes is not None:
            for facebox in faceboxes:
                # Detect landmarks from image of 64X64 with grayscale.
                face_img = frame[facebox[1]: facebox[3],
                                    facebox[0]: facebox[2]]
                # cv2.rectangle(frame, (facebox[0], facebox[1]), (facebox[2], facebox[3]), (0, 255, 0), 2)
                face_img = cv2.resize(face_img, (CNN_INPUT_SIZE, CNN_INPUT_SIZE))
                face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)
                face_img0 = face_img.reshape(1, CNN_INPUT_SIZE, CNN_INPUT_SIZE, 1)

                land_start_time = time.time()
                marks = mark_detector.detect_marks_keras(face_img0)
                # marks *= 255
                marks *= facebox[2] - facebox[0]
                marks[:, 0] += facebox[0]
                marks[:, 1] += facebox[1]
                # Draw Predicted Landmarks
                mark_detector.draw_marks(frame, marks, color=(255, 255, 255), thick=2)

        fps_time = (cv2.getTickCount() - start)/cv2.getTickFrequency()
        cv2.putText(frame, '%.1ffps'%(1/fps_time) , (frame.shape[1]-65,15), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0,255,0))
        # show the frame
        cv2.imshow("Frame", frame)
        # writer.write(frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop() 
Example #17
Source File: common.py    From ImageAnalysis with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #18
Source File: common.py    From PyCV-time with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #19
Source File: common.py    From PyCV-time with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #20
Source File: common.py    From PyCV-time with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #21
Source File: common.py    From PyCV-time with MIT License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #22
Source File: inpainting.py    From open_model_zoo with Apache License 2.0 5 votes vote down vote up
def infer(self, image, mask):
        t0 = cv2.getTickCount()
        output = self._exec_model.infer(inputs={self._input_layer_names[0]: image, self._input_layer_names[1]: mask})
        self.infer_time = (cv2.getTickCount() - t0) / cv2.getTickFrequency()
        return output[self._output_layer_name] 
Example #23
Source File: estimator.py    From open_model_zoo with Apache License 2.0 5 votes vote down vote up
def _infer(self, prep_img):
        t0 = cv2.getTickCount()
        output = self._exec_model.infer(inputs={self._input_layer_name: prep_img})
        self.infer_time = ((cv2.getTickCount() - t0) / cv2.getTickFrequency())
        return output[self._output_layer_name][0] 
Example #24
Source File: detector.py    From open_model_zoo with Apache License 2.0 5 votes vote down vote up
def _infer(self, prep_img):
        t0 = cv2.getTickCount()
        output = self._exec_model.infer(inputs={self._input_layer_name: prep_img})
        self.infer_time = (cv2.getTickCount() - t0) / cv2.getTickFrequency()
        return output 
Example #25
Source File: detector.py    From open_model_zoo with Apache License 2.0 5 votes vote down vote up
def infer(self, image):
        t0 = cv2.getTickCount()
        output = self._exec_model.infer(inputs={self._input_layer_name: image})
        self.infer_time = (cv2.getTickCount() - t0) / cv2.getTickFrequency()
        return output 
Example #26
Source File: common.py    From OpenCV-Snapchat-DogFilter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #27
Source File: Rapid_trajectory_generator.py    From crossgap_il_rl with GNU General Public License v2.0 5 votes vote down vote up
def load_random_data(save_dir, file_size):
    sample_set = np.random.choice(range(0, file_size), file_size, replace=False)
    sample_set = np.sort(sample_set)
    # sample_set = range(file_size)
    # print(random_set)
    file_name_vec = []
    for idx in sample_set:
        name = "%s\\traj_%d.pkl" % (save_dir, idx)
        file_name_vec.append(name)

    rapid_trajectory = Rapid_trajectory_generator()
    # t_start = cv2.getTickCount()
    in_data, out_data = rapid_trajectory.load_from_file_vector(file_name_vec)
    # print("cost time  = %.2f " % ((cv2.getTickCount() - t_start) * 1000.0 / cv2.getTickFrequency()))
    return in_data, out_data 
Example #28
Source File: common.py    From airtest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
Example #29
Source File: query_aimsim_images.py    From crossgap_il_rl with GNU General Public License v2.0 5 votes vote down vote up
def service_capture(self):
        while(1):
            t_start = cv2.getTickCount()
            self.capture_mutex.acquire()
            self.capture_state()
            self.capture_image()
            self.capture_mutex.release()


            self.cost_time = ((cv2.getTickCount() - t_start) * 1000.0 / cv2.getTickFrequency())
            # self.cost_time_vector.append(cost_time)
            # self.state_vector.append(self.raw_state)
            # print("Query raw_state cost time = %.2f" % cost_time)
            # self.update["Get_data_cost_time": cost_time] 
Example #30
Source File: query_aimsim_images.py    From crossgap_il_rl with GNU General Public License v2.0 5 votes vote down vote up
def capture_state(self):
        t_start = cv2.getTickCount()
        self.state = self.client.getMultirotorState()
        # self.collision_info = self.client.simGetCollisionInfo()
        # print("Query raw_state cost time = %.2f" % ((cv2.getTickCount() - t_start) * 1000.0 / cv2.getTickFrequency()))