Python queue.Queue() Examples
The following are 30
code examples of queue.Queue().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
queue
, or try the search function
.

Example #1
Source File: osdriver.py From multibootusb with GNU General Public License v2.0 | 7 votes |
def dd_iso_image(self, input_, output, gui_update, status_update): ''' Implementation for OS that use dd to write the iso image. ''' in_file_size = os.path.getsize(input_) cmd = [self.dd_exe, 'if=' + input_, 'of=' + self.physical_disk(output), 'bs=1M'] self.dd_iso_image_add_args(cmd, input_, output) kw_args = { 'stdout' : subprocess.PIPE, 'stderr' : subprocess.PIPE, 'shell' : False, } self.add_dd_iso_image_popen_args(kw_args) self.dd_iso_image_prepare(input, output, status_update) log('Executing => ' + str(cmd)) dd_process = subprocess.Popen(cmd, **kw_args) output_q = queue.Queue() while dd_process.poll() is None: self.dd_iso_image_readoutput(dd_process, gui_update, in_file_size, output_q) output_lines = [output_q.get() for i in range(output_q.qsize())] for l in output_lines: log('dd: ' + l) return self.dd_iso_image_interpret_result( dd_process.returncode, output_lines)
Example #2
Source File: webCrawler.py From Learning-Concurrency-in-Python with MIT License | 6 votes |
def main(): print("Starting our Web Crawler") baseUrl = input("Website > ") numberOfThreads = input("No Threads > ") linksToCrawl = queue.Queue() urlLock = threading.Lock() linksToCrawl.put(baseUrl) haveVisited = [] crawlers = [] errorLinks = [] for i in range(int(numberOfThreads)): crawler = Crawler(baseUrl, linksToCrawl, haveVisited, errorLinks, urlLock) crawler.start() crawlers.append(crawler) for crawler in crawlers: crawler.join() print("Total Number of Pages Visited {}".format(len(haveVisited))) print("Total Number of Pages with Errors {}".format(len(errorLinks)))
Example #3
Source File: 8_prodcons.py From deep-learning-note with MIT License | 6 votes |
def main(): nloops = randint(2, 20) q = Queue(32) threads = [] for i in nfuncs: t = MyThread(funcs[i], (q, nloops), funcs[i].__name__) threads.append(t) for i in nfuncs: threads[i].start() for i in nfuncs: threads[i].join() print("all DONE")
Example #4
Source File: rl_data.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def __init__(self, batch_size, input_length, nthreads=6, web_viz=False): super(RLDataIter, self).__init__() self.batch_size = batch_size self.input_length = input_length self.env = [self.make_env() for _ in range(batch_size)] self.act_dim = self.env[0].action_space.n self.state_ = None self.reset() self.provide_data = [mx.io.DataDesc('data', self.state_.shape, np.uint8)] self.web_viz = web_viz if web_viz: self.queue = queue.Queue() self.thread = Thread(target=make_web, args=(self.queue,)) self.thread.daemon = True self.thread.start() self.nthreads = nthreads if nthreads > 1: self.pool = multiprocessing.pool.ThreadPool(6)
Example #5
Source File: itn_helpers.py From django-payfast with MIT License | 6 votes |
def itn_handler(host, port): # type: (str, int) -> Iterator[Queue] """ Usage:: with itn_handler(ITN_HOST, ITN_PORT) as itn_queue: # ...complete PayFast payment... itn_data = itn_queue.get(timeout=2) """ server_address = (host, port) http_server = HTTPServer(server_address, PayFastITNHandler) http_server.itn_queue = Queue() # type: ignore executor = ThreadPoolExecutor(max_workers=1) executor.submit(http_server.serve_forever) try: yield http_server.itn_queue # type: ignore finally: http_server.shutdown()
Example #6
Source File: camera_node.py From RacingRobot with MIT License | 6 votes |
def extractInfo(self): try: while not self.exit: try: frame = self.frame_queue.get(block=True, timeout=1) except queue.Empty: print("Queue empty") continue try: # Publish new image msg = self.bridge.cv2_to_imgmsg(frame, 'rgb8') if not self.exit: self.image_publisher.publish(msg) except CvBridgeError as e: print("Error Converting cv image: {}".format(e.message)) self.frame_num += 1 except Exception as e: print("Exception after loop: {}".format(e)) raise
Example #7
Source File: timer_queue.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def _check_and_execute(self): wakeup_queue = self._wakeup_queue while 1: (next_expired_time, expired_timers) = self._get_expired_timers() for timer in expired_timers: try: # Note, please make timer callback effective/short timer() except Exception: logging.error(traceback.format_exc()) self._reset_timers(expired_timers) sleep_time = _calc_sleep_time(next_expired_time) try: wakeup = wakeup_queue.get(timeout=sleep_time) if wakeup is TEARDOWN_SENTINEL: break except Queue.Empty: pass logging.info('TimerQueue stopped.')
Example #8
Source File: ta_data_loader.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def __init__(self, job_scheduler, event_writer): """ @configs: a list like object containing a list of dict like object. Each element shall implement dict.get/[] like interfaces to get the value for a key. @job_scheduler: schedulering the jobs. shall implement get_ready_jobs @event_writer: write_events """ self._settings = self._read_default_settings() self._settings["daemonize_thread"] = False self._event_writer = event_writer self._wakeup_queue = queue.Queue() self._scheduler = job_scheduler self._timer_queue = tq.TimerQueue() self._executor = ce.ConcurrentExecutor(self._settings) self._started = False
Example #9
Source File: timer_queue.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def _check_and_execute(self): wakeup_queue = self._wakeup_queue while 1: (next_expired_time, expired_timers) = self._get_expired_timers() for timer in expired_timers: try: # Note, please make timer callback effective/short timer() except Exception: logging.error(traceback.format_exc()) self._reset_timers(expired_timers) sleep_time = _calc_sleep_time(next_expired_time) try: wakeup = wakeup_queue.get(timeout=sleep_time) if wakeup is TEARDOWN_SENTINEL: break except Queue.Empty: pass logging.info('TimerQueue stopped.')
Example #10
Source File: ta_data_loader.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def __init__(self, job_scheduler, event_writer): """ @configs: a list like object containing a list of dict like object. Each element shall implement dict.get/[] like interfaces to get the value for a key. @job_scheduler: schedulering the jobs. shall implement get_ready_jobs @event_writer: write_events """ self._settings = self._read_default_settings() self._settings["daemonize_thread"] = False self._event_writer = event_writer self._wakeup_queue = queue.Queue() self._scheduler = job_scheduler self._timer_queue = tq.TimerQueue() self._executor = ce.ConcurrentExecutor(self._settings) self._started = False
Example #11
Source File: loader.py From vergeml with MIT License | 5 votes |
def __init__(self, loader, split, ix_gen, max_items): """ :param loader: Object responsible for loading samples. :param split: train, val or test. :param ix_gen: An infinite generator yielding tuples (ix, n). :param max_items: The maximum number of samples to have in the queue. """ super().__init__() self.ix_gen = ix_gen self.outq = queue.Queue(max_items) self.split = split self.loader = loader self.daemon = True
Example #12
Source File: decorators.py From pkmeter with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __get__(self, inst, owner): if inst is None: return self key = self._func.__get__(inst, type(inst)) if key not in self._queues: self._queues[key] = queue.Queue() self._threads[key] = threading.Thread(target=self._thread_loop, args=[inst, key]) self._threads[key].daemon = True self._threads[key].start() return lambda *a, **k: self._queues[key].put((a, k))
Example #13
Source File: utils.py From pkmeter with BSD 3-Clause "New" or "Revised" License | 5 votes |
def iter_responses(urls, data=None, timeout=30): responses = queue.Queue() threads = [] _req = lambda url, data, timeout: responses.put(http_request(url, data, timeout)) for url in urls: threads.append(threading.Thread(target=_req, args=(url, data, timeout))) threads[-1].daemon = True threads[-1].start() for thread in threads: thread.join() responses.put(None) for response in iter(responses.get, None): yield response
Example #14
Source File: queueOperations.py From Learning-Concurrency-in-Python with MIT License | 5 votes |
def mySubscriber(queue): while True: item = queue.get() if item is None: break print("{} removed {} from the queue".format(threading.current_thread(), item)) print("Queue Size is now: {}".format(queue.qsize())) queue.task_done()
Example #15
Source File: zmirror.py From zmirror with MIT License | 5 votes |
def preload_streamed_response_content_async(requests_response_obj, buffer_queue): """ stream模式下, 预读远程响应的content :param requests_response_obj: :type buffer_queue: queue.Queue """ for particle_content in requests_response_obj.iter_content(stream_transfer_buffer_size): try: buffer_queue.put(particle_content, timeout=10) except queue.Full: # coverage: exclude traceback.print_exc() exit() if verbose_level >= 3: dbgprint('BufferSize', buffer_queue.qsize()) buffer_queue.put(None, timeout=10) exit()
Example #16
Source File: engine.py From PickTrue with MIT License | 5 votes |
def __init__( self, queue, target ): """ :type queue: queue.Queue """ super(StoppableThread, self).__init__() self.task_func = target self.queue = queue self.daemon = True self._stopped = False
Example #17
Source File: engine.py From PickTrue with MIT License | 5 votes |
def __init__(self, fetcher, num_workers=5, save_dir='.'): self.save_dir = save_dir self.num_workers = num_workers self._download_queue = Queue() self.counter = Counter() self.done = False self._stop = False self._all_task_add = False self.ensure_dir() def counter_wrapper(func): @wraps(func) def wrapped(task_item): ret = func(task_item=task_item) self.counter.increment_done() return ret return wrapped download_then_save = mk_download_save_function( fetcher ) _dts = counter_wrapper(download_then_save) self._download_workers = [ StoppableThread( self._download_queue, _dts, ) for _ in range(num_workers) ] self._start_daemons()
Example #18
Source File: channel.py From PickTrue with MIT License | 5 votes |
def __init__(self): self.recv_queue = Queue() self.send_queue = Queue()
Example #19
Source File: tumblrdownloader.py From TumblrDownloader with MIT License | 5 votes |
def _downloadimage(self,url_list): ''' Download a image to subdomain folder. ''' queue = Queue() for url in url_list: queue.put(url) for i in range(self._threads): t = DownloadThread(queue, self._folder_path, self._image_prefix) t.start() queue.join()
Example #20
Source File: bn.py From ACAN with MIT License | 5 votes |
def __init__(self, num_features, devices=None, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01): """Creates a synchronized, InPlace Activated Batch Normalization module Parameters ---------- num_features : int Number of feature channels in the input and output. devices : list of int or None IDs of the GPUs that will run the replicas of this module. eps : float Small constant to prevent numerical issues. momentum : float Momentum factor applied to compute running statistics as. affine : bool If `True` apply learned scale and shift transformation after normalization. activation : str Name of the activation functions, one of: `leaky_relu`, `elu` or `none`. slope : float Negative slope for the `leaky_relu` activation. """ super(InPlaceABNSync, self).__init__(num_features, eps, momentum, affine, activation, slope) self.devices = devices if devices else list(range(torch.cuda.device_count())) # Initialize queues self.worker_ids = self.devices[1:] self.master_queue = Queue(len(self.worker_ids)) self.worker_queues = [Queue(1) for _ in self.worker_ids]
Example #21
Source File: output.py From Paradrop with Apache License 2.0 | 5 votes |
def startLogging(self, filePath=None, stealStdio=False, printToConsole=True): ''' Begin logging. The output class is ready to go out of the box, but in order to prevent mere imports from stealing stdio or console logging to vanish these must be manually turned on. :param filePath: if provided, begin logging to the given directory. If not provided, do not write out logs. :type filePath: str :param stealStdio: choose to intercept stdio (including vanilla print statements) or allow it to passthrough :type stealStdio: bool. :param printToConsole: output the results of all logging to the console. This is primarily a performance consideration when running in production :type printToConsole: bool. ''' # Initialize printer thread self.__dict__['logpath'] = None if filePath is not None: self.__dict__['queue'] = queue.Queue() self.__dict__['printer'] = PrintLogThread(filePath, self.queue, LOG_NAME) self.__dict__['logpath'] = filePath self.printer.start() # by default, stdio gets captures. This can be toggled off self.stealStdio(stealStdio) self.logToConsole(printToConsole) # Override twisted logging (allows us to cleanly catch all exceptions) # This must come after the setattr calls so we get the wrapped object log.startLoggingWithObserver(self.twisted, setStdout=False) log.startLoggingWithObserver(self.twistedErr, setStdout=False)
Example #22
Source File: itn_helpers.py From django-payfast with MIT License | 5 votes |
def do_POST(self): # type: () -> None assert self.command == 'POST' assert self.path == '/' # Parse the request body, and post to the queue. data = self.read_request_data() itn_queue = self.server.itn_queue # type: ignore itn_queue # type: Queue itn_queue.put(data) self.send_response(HTTPStatus.OK) self.end_headers()
Example #23
Source File: transcript_logger.py From botbuilder-python with MIT License | 5 votes |
def log_activity(self, transcript: Queue, activity: Activity) -> None: """Logs the activity. :param transcript: transcript. :param activity: Activity to log. """ transcript.put(activity)
Example #24
Source File: vad.py From audio with BSD 2-Clause "Simplified" License | 5 votes |
def __init__(self, device=None, rate=22050, chunk=2205): """ The 22050 is the librosa default, which is what our models were trained on. The ratio of [chunk / rate] is the amount of time between audio samples - for example, with these defaults, an audio fragment will be processed every tenth of a second. """ self._rate = rate self._chunk = chunk self._device = device # Create a thread-safe buffer of audio data self._buff = queue.Queue() self.closed = True
Example #25
Source File: utils.py From audio with BSD 2-Clause "Simplified" License | 5 votes |
def __init__(self, generator: Iterable, maxsize: int) -> None: threading.Thread.__init__(self) self.queue: Queue = Queue(maxsize) self.generator = generator self.daemon = True self.start()
Example #26
Source File: data_util.py From ICDAR-2019-SROIE with MIT License | 5 votes |
def start(self, workers=1, max_queue_size=10): def data_generator_task(): while not self._stop_event.is_set(): try: if self._use_multiprocessing or self.queue.qsize() < max_queue_size: generator_output = next(self._generator) self.queue.put(generator_output) else: time.sleep(self.wait_time) except Exception: self._stop_event.set() raise try: if self._use_multiprocessing: self.queue = multiprocessing.Queue(maxsize=max_queue_size) self._stop_event = multiprocessing.Event() else: self.queue = queue.Queue() self._stop_event = threading.Event() for _ in range(workers): if self._use_multiprocessing: # Reset random seed else all children processes # share the same seed np.random.seed(self.random_seed) thread = multiprocessing.Process(target=data_generator_task) thread.daemon = True if self.random_seed is not None: self.random_seed += 1 else: thread = threading.Thread(target=data_generator_task) self._threads.append(thread) thread.start() except: self.stop() raise
Example #27
Source File: comm.py From overhaul-distillation with MIT License | 5 votes |
def __init__(self, master_callback): """ Args: master_callback: a callback to be invoked after having collected messages from slave devices. """ self._master_callback = master_callback self._queue = queue.Queue() self._registry = collections.OrderedDict() self._activated = False
Example #28
Source File: comm.py From overhaul-distillation with MIT License | 5 votes |
def register_slave(self, identifier): """ Register an slave device. Args: identifier: an identifier, usually is the device id. Returns: a `SlavePipe` object which can be used to communicate with the master device. """ if self._activated: assert self._queue.empty(), 'Queue is not clean before next initialization.' self._activated = False self._registry.clear() future = FutureResult() self._registry[identifier] = _MasterRegistry(future) return SlavePipe(identifier, self._queue, future)
Example #29
Source File: test_create_task.py From sanic with MIT License | 5 votes |
def test_create_task_with_app_arg(app): q = Queue() @app.route("/") def not_set(request): return "hello" async def coro(app): q.put(app.name) app.add_task(coro) request, response = app.test_client.get("/") assert q.get() == "test_create_task_with_app_arg"
Example #30
Source File: wsgi-server.py From hyper-h2 with MIT License | 5 votes |
def __init__(self): config = H2Configuration(client_side=False, header_encoding='utf-8') # Our server-side state machine. self.conn = H2Connection(config=config) # The backing transport. self.transport = None # A dictionary of ``Stream`` objects, keyed by their stream ID. This # makes it easy to route data to the correct WSGI application instance. self.streams = {} # A queue of data emitted by WSGI applications that has not yet been # sent. Each stream may only have one chunk of data in either this # queue or the flow_controlled_data dictionary at any one time. self._stream_data = asyncio.Queue() # Data that has been pulled off the queue that is for a stream blocked # behind flow control limitations. This is used to avoid spinning on # _stream_data queue when a stream cannot have its data sent. Data that # cannot be sent on the connection when it is popped off the queue gets # placed here until the stream flow control window opens up again. self._flow_controlled_data = {} # A reference to the loop in which this protocol runs. This is needed # to synchronise up with background threads. self._loop = asyncio.get_event_loop() # Any streams that have been remotely reset. We keep track of these to # ensure that we don't emit data from a WSGI application whose stream # has been cancelled. self._reset_streams = set() # Keep track of the loop sending task so we can kill it when the # connection goes away. self._send_loop_task = None