Python concurrent.futures.ThreadPoolExecutor() Examples

The following are 30 code examples of concurrent.futures.ThreadPoolExecutor(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module concurrent.futures , or try the search function .
Example #1
Source File: poolImprovement.py    From Learning-Concurrency-in-Python with MIT License 8 votes vote down vote up
def main():

  t1 = timeit.default_timer()
  with ProcessPoolExecutor(max_workers=4) as executor:
        for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
            print('%d is prime: %s' % (number, prime))

  print("{} Seconds Needed for ProcessPoolExecutor".format(timeit.default_timer() - t1))
  
  t2 = timeit.default_timer()
  with ThreadPoolExecutor(max_workers=4) as executor:
        for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
            print('%d is prime: %s' % (number, prime))
  print("{} Seconds Needed for ThreadPoolExecutor".format(timeit.default_timer() - t2))

  t3 = timeit.default_timer()
  for number in PRIMES:
    isPrime = is_prime(number)
    print("{} is prime: {}".format(number, isPrime))
  print("{} Seconds needed for single threaded execution".format(timeit.default_timer()-t3)) 
Example #2
Source File: ars_server.py    From soccer-matlab with BSD 2-Clause "Simplified" License 7 votes vote down vote up
def main(unused_argv):
  servers = []
  server_creds = loas2.loas2_server_credentials()
  port = FLAGS.port
  if not FLAGS.run_on_borg:
    port = 20000 + FLAGS.server_id
  server = grpc.server(
      futures.ThreadPoolExecutor(max_workers=10), ports=(port,))
  servicer = ars_evaluation_service.ParameterEvaluationServicer(
      FLAGS.config_name, worker_id=FLAGS.server_id)
  ars_evaluation_service_pb2_grpc.add_EvaluationServicer_to_server(
      servicer, server)
  server.add_secure_port("[::]:{}".format(port), server_creds)
  servers.append(server)
  server.start()
  print("Start server {}".format(FLAGS.server_id))

  # prevent the main thread from exiting
  try:
    while True:
      time.sleep(_ONE_DAY_IN_SECONDS)
  except KeyboardInterrupt:
    for server in servers:
      server.stop(0) 
Example #3
Source File: test_runner.py    From moler with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def test_CancellableFuture_str_casting_shows_embedded_future():
    import threading
    from moler.runner import CancellableFuture
    from concurrent.futures import ThreadPoolExecutor

    def fun_with_future_result(delay):
        time.sleep(delay)
        return delay * 2

    executor = ThreadPoolExecutor()
    observer_lock = threading.Lock()
    stop_feeding = threading.Event()
    feed_done = threading.Event()
    connection_observer_future = executor.submit(fun_with_future_result, delay=0.1)
    c_future = CancellableFuture(connection_observer_future, observer_lock, stop_feeding, feed_done)
    connection_observer_future_as_str = str(connection_observer_future)
    c_future_as_str = str(c_future)
    assert c_future_as_str == "CancellableFuture({})".format(connection_observer_future_as_str)
    executor.shutdown() 
Example #4
Source File: 0004_calculate_hashes.py    From resolwe with Apache License 2.0 6 votes vote down vote up
def calculate_hashes(apps, schema_editor):
    """Calculate hashes for existing ReferencedPaths."""
    FileStorage = apps.get_model("storage", "FileStorage")

    with ThreadPoolExecutor(max_workers=50) as executor:
        for file_storage in FileStorage.objects.all():
            storage_locations = file_storage.storage_locations.filter(status="OK")
            # Do not calculate hash when no location with status OK exists.
            if storage_locations.count() == 0:
                continue
            best_storage_location = storage_locations.first()
            best_priority = connectors[best_storage_location.connector_name].priority
            for storage_location in storage_locations:
                priority = connectors[storage_location.connector_name].priority
                if priority < best_priority:
                    best_storage_location = storage_location
                    best_priority = priority
            storage_location = best_storage_location
            executor.submit(
                process_storage_location, file_storage, best_storage_location
            ) 
Example #5
Source File: servers.py    From Pyro5 with MIT License 6 votes vote down vote up
def count(self, lines):
        # use the name server's prefix lookup to get all registered wordcounters
        with locate_ns() as ns:
            all_counters = ns.list(prefix="example.dc2.wordcount.")

        # chop the text into chunks that can be distributed across the workers
        # uses futures so that it runs the counts in parallel
        # counter is selected in a round-robin fashion from list of all available counters
        with futures.ThreadPoolExecutor() as pool:
            roundrobin_counters = cycle(all_counters.values())
            tasks = []
            for chunk in grouper(200, lines):
                tasks.append(pool.submit(self.count_chunk, next(roundrobin_counters), chunk))

            # gather the results
            print("Collecting %d results (counted in parallel)..." % len(tasks))
            totals = Counter()
            for task in futures.as_completed(tasks):
                try:
                    totals.update(task.result())
                except Pyro5.errors.CommunicationError as x:
                    raise Pyro5.errors.PyroError("Something went wrong in the server when collecting the responses: "+str(x))
            return totals 
Example #6
Source File: client_graphics.py    From Pyro5 with MIT License 6 votes vote down vote up
def __init__(self):
        self.root = tkinter.Tk()
        self.root.title("Mandelbrot (Pyro multi CPU core version)")
        canvas = tkinter.Canvas(self.root, width=res_x, height=res_y, bg="#000000")
        canvas.pack()
        self.img = tkinter.PhotoImage(width=res_x, height=res_y)
        canvas.create_image((res_x/2, res_y/2), image=self.img, state="normal")
        with locate_ns() as ns:
            mandels = ns.yplookup(meta_any={"class:mandelbrot_calc_color"})
            mandels = list(mandels.items())
        print("{0} mandelbrot calculation servers found.".format(len(mandels)))
        if not mandels:
            raise ValueError("launch at least one mandelbrot calculation server before starting this")
        self.mandels = [uri for _, (uri, meta) in mandels]
        self.pool = futures.ThreadPoolExecutor(max_workers=len(self.mandels))
        self.tasks = []
        self.start_time = time.time()
        for line in range(res_y):
            self.tasks.append(self.calc_new_line(line))
        self.root.after(100, self.draw_results)
        tkinter.mainloop() 
Example #7
Source File: invoker.py    From pywren-ibm-cloud with Apache License 2.0 6 votes vote down vote up
def _run_invoker_process(self, inv_id):
        """
        Run process that implements token bucket scheduling approach
        """
        logger.debug('ExecutorID {} - Invoker process {} started'.format(self.executor_id, inv_id))

        with ThreadPoolExecutor(max_workers=250) as executor:
            while True:
                try:
                    self.token_bucket_q.get()
                    job, call_id = self.pending_calls_q.get()
                except KeyboardInterrupt:
                    break
                if self.running_flag.value:
                    executor.submit(self._invoke, job, call_id)
                else:
                    break

        logger.debug('ExecutorID {} - Invoker process {} finished'.format(self.executor_id, inv_id)) 
Example #8
Source File: tvdb.py    From plugin.video.kmediatorrent with GNU General Public License v3.0 6 votes vote down vote up
def get_all_meta(show_id):
    import xml.etree.ElementTree as ET
    from concurrent import futures
    from kmediatorrent.utils import url_get, joining

    def _get_all_meta():
        r = url_get("%s/all/%s.xml" % (show_base_url(show_id), LANG), headers=HEADERS, with_immunicity=False)
        dom = ET.fromstring(r)
        if not len(dom):
            return
        return update_image_urls(dom2dict(dom))
    with futures.ThreadPoolExecutor(max_workers=2) as pool:
        meta = pool.submit(_get_all_meta)
        banners = pool.submit(get_banners, show_id)
    meta = meta.result()
    meta["series"][0]["episodes"] = meta["episode"]
    meta = meta["series"][0]
    meta["banners"] = banners.result() or []
    return meta 
Example #9
Source File: stac_validator.py    From stac-validator with Apache License 2.0 6 votes vote down vote up
def run(self, concurrent=10):
        """
        Entry point.
        :param concurrent: number of threads to use
        :return: message json
        """

        children = [self.stac_file]
        logger.info(f"Using {concurrent} threads")
        while True:
            with futures.ThreadPoolExecutor(max_workers=int(concurrent)) as executor:
                future_tasks = [executor.submit(self._validate, url) for url in children]
                children = []
                for task in futures.as_completed(future_tasks):
                    message, status, new_children = task.result()
                    self.status = self._update_status(self.status, status)
                    self.message.append(message)
                    children.extend(new_children)

            if not children:
                break

        return json.dumps(self.message) 
Example #10
Source File: itn_helpers.py    From django-payfast with MIT License 6 votes vote down vote up
def itn_handler(host, port):  # type: (str, int) -> Iterator[Queue]
    """
    Usage::

        with itn_handler(ITN_HOST, ITN_PORT) as itn_queue:
            # ...complete PayFast payment...
            itn_data = itn_queue.get(timeout=2)
    """
    server_address = (host, port)
    http_server = HTTPServer(server_address, PayFastITNHandler)
    http_server.itn_queue = Queue()  # type: ignore

    executor = ThreadPoolExecutor(max_workers=1)
    executor.submit(http_server.serve_forever)
    try:
        yield http_server.itn_queue  # type: ignore
    finally:
        http_server.shutdown() 
Example #11
Source File: plugin_base.py    From sslyze with GNU Affero General Public License v3.0 6 votes vote down vote up
def scan_server(
        cls, server_info: "ServerConnectivityInfo", extra_arguments: Optional[_ScanCommandExtraArgumentsTypeVar] = None
    ) -> _ScanCommandResultTypeVar:
        """Utility method to run a scan command directly.

        This is useful for the test suite to run commands without using the Scanner class. It should NOT be used to
        actually run scans as this will be very slow (no multi-threading); use the Scanner class instead.
        """
        thread_pool = ThreadPoolExecutor(max_workers=5)

        all_jobs = cls.scan_jobs_for_scan_command(server_info, extra_arguments)
        all_futures = []
        for job in all_jobs:
            future = thread_pool.submit(job.function_to_call, *job.function_arguments)
            all_futures.append(future)

        result = cls.result_for_completed_scan_jobs(server_info, all_futures)
        return result 
Example #12
Source File: test_circular_buffer.py    From resolwe with Apache License 2.0 6 votes vote down vote up
def test_read_large_chunk(self):
        reading_bytes = 10000
        write_data = b"small"
        stream = CircularBuffer(buffer_size=11)

        def task_a():
            return stream.read(reading_bytes)

        def task_b():
            for i in range(int(reading_bytes / len(write_data)) + 2):
                stream.write(write_data)

        with ThreadPoolExecutor(max_workers=2) as executor:
            future_a = executor.submit(task_a)
            executor.submit(task_b)
        read_data = future_a.result()
        self.assertEqual(
            read_data,
            (write_data * (int(reading_bytes / len(write_data)) + 1))[:reading_bytes],
        ) 
Example #13
Source File: test_circular_buffer.py    From resolwe with Apache License 2.0 6 votes vote down vote up
def test_write_large_chunk(self):
        reading_bytes = 1000
        write_data = b"writinginlargechunksmuchlargerthanmybuffer"
        stream = CircularBuffer(buffer_size=11)

        def task_a():
            data = stream.read(reading_bytes)
            stream.close()
            return data

        def task_b():
            for i in range(int(reading_bytes / len(write_data)) + 2):
                stream.write(write_data)

        with ThreadPoolExecutor(max_workers=2) as executor:
            future_a = executor.submit(task_a)
            executor.submit(task_b)
        read_data = future_a.result()
        self.assertEqual(
            read_data,
            (write_data * (int(reading_bytes / len(write_data)) + 1))[:reading_bytes],
        ) 
Example #14
Source File: rendezvous.py    From tandem with Apache License 2.0 6 votes vote down vote up
def __init__(self, host, port):
        self._main_executor = ThreadPoolExecutor(max_workers=1)
        self._time_scheduler = TimeScheduler(self._main_executor)
        self._udp_gateway = UDPGateway(
            host,
            port,
            self._on_receive_message,
            [
                ListParametersProxy(),
                UnicodeProxy(),
                FragmentProxy(),
                RendezvousRelayProxy(),
                ReliabilityProxy(self._time_scheduler),
            ],
        )
        self._rendezvous_protocol = AgentRendezvousProtocolHandler(
            self._udp_gateway,
        ) 
Example #15
Source File: _workers_pool.py    From arctic with GNU Lesser General Public License v2.1 6 votes vote down vote up
def _workers_pool(self):
        if self._pool is not None:
            return self._pool

        # lazy init the workers pool
        got_initialized = False
        with type(self)._POOL_LOCK:
            if self._pool is None:
                self._pool = ThreadPoolExecutor(max_workers=self._pool_size,
                                                thread_name_prefix='AsyncArcticWorker')
                got_initialized = True

        # Call hooks outside the lock, to minimize time-under-lock
        if got_initialized:
            for hook in self._pool_update_hooks:
                hook(self._pool_size)

        return self._pool 
Example #16
Source File: runner.py    From moler with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, executor=None):
        """Create instance of ThreadPoolExecutorRunner class"""
        self._tick = 0.005  # Tick for sleep or partial timeout
        self._in_shutdown = False
        self._i_own_executor = False
        self._was_timeout_called = False
        self.executor = executor
        self.logger = logging.getLogger('moler.runner.thread-pool')
        self.logger.debug("created")
        atexit.register(self.shutdown)
        if executor is None:
            max_workers = 1000  # max 1000 threads in pool
            try:  # concurrent.futures  v.3.2.0 introduced prefix we like :-)
                self.executor = ThreadPoolExecutor(max_workers=max_workers, thread_name_prefix='ThrdPoolRunner')
            except TypeError as exc:
                if ('unexpected' in str(exc)) and ('thread_name_prefix' in str(exc)):
                    self.executor = ThreadPoolExecutor(max_workers=max_workers)
                else:
                    raise
            self.logger.debug("created own executor {!r}".format(self.executor))
            self._i_own_executor = True
        else:
            self.logger.debug("reusing provided executor {!r}".format(self.executor)) 
Example #17
Source File: test_manager.py    From resolwe with Apache License 2.0 5 votes vote down vote up
def test_skip_locked(self):
        rows_locked = Event()
        manager_finished = Event()

        def task_a(lock_ids=[]):
            with transaction.atomic():
                list(FileStorage.objects.select_for_update().filter(id__in=lock_ids))
                rows_locked.set()
                manager_finished.wait()
            connection.close()

        def task_b():
            rows_locked.wait()
            self.manager.process()
            manager_finished.set()
            connection.close()

        process_filestorage_mock = MagicMock()
        with patch(
            "resolwe.storage.manager.Manager._process_file_storage",
            process_filestorage_mock,
        ):
            with ThreadPoolExecutor() as executor:
                executor.submit(task_a, [self.file_storage1.id, self.file_storage2.id])
                executor.submit(task_b)
        process_filestorage_mock.assert_not_called()

        rows_locked.clear()
        manager_finished.clear()
        process_filestorage_mock = MagicMock()
        with patch(
            "resolwe.storage.manager.Manager._process_file_storage",
            process_filestorage_mock,
        ):
            with ThreadPoolExecutor() as executor:
                executor.submit(task_a, [self.file_storage1.id])
                executor.submit(task_b)
        process_filestorage_mock.assert_called_once_with(self.file_storage2) 
Example #18
Source File: imagenet.py    From tensornets with MIT License 5 votes vote down vote up
def load(data_dir, data_name, batch_size, resize_wh,
         crop_locs, crop_wh, total_num=None):

    files, labels = get_files(data_dir, data_name, total_num)
    total_num = len(labels)

    for batch_start in range(0, total_num, batch_size):

        data_spec = [batch_size, 1, crop_wh, crop_wh, 3]
        if isinstance(crop_locs, list):
            data_spec[1] = len(crop_locs)
        elif crop_locs == 10:
            data_spec[1] = 10
        X = np.zeros(data_spec, np.float32)

        jobs = []
        with cf.ThreadPoolExecutor(max_workers=48) as executor:
            for (k, f) in enumerate(files[batch_start:batch_start+batch_size]):
                filename = os.path.join("%s/ILSVRC2012_img_val" % data_dir, f)
                if os.path.isfile(filename):
                    jobs.append(executor.submit(
                        load_single, (*(filename, resize_wh, crop_wh, crop_locs))))

        cf.wait(jobs)

        for (k, out) in enumerate(jobs):
            X[k] = out.result()

        yield X.reshape((-1, crop_wh, crop_wh, 3)), \
            labels[batch_start:batch_start+batch_size]

        del X 
Example #19
Source File: baseexecution.py    From flumine with MIT License 5 votes vote down vote up
def __init__(self, flumine, max_workers=MAX_WORKERS):
        self.flumine = flumine
        self._thread_pool = ThreadPoolExecutor(max_workers=max_workers)
        self._bet_id = BET_ID_START 
Example #20
Source File: petition.py    From petitions with MIT License 5 votes vote down vote up
def run():
    # 데이터 저장 디렉터리 생성
    try:
        os.mkdir(DATA_DIR)
    except FileExistsError:
        pass

    # 추가로 만료된 청원을 수집하여 기존 CSV 파일에 덧붙이기
    latest_id = get_latest_article_id()
    next_id = get_latest_saved_article_id() + 1

    logging.info(
        f'From {next_id} to {latest_id}: '
        f'about {latest_id - next_id} articles to go...'
    )

    # 동시에 두 개씩 병렬로 처리. workers를 더 늘리면 더 자주 차단됨.
    with ThreadPoolExecutor(max_workers=2) as exe:
        for article in exe.map(fetch_article, range(next_id, latest_id)):
            if article is None:
                continue
            save_article(article)
            logging.info(
                f'{article["article_id"]} of {latest_id}: {article["title"]} '
                f'https://www1.president.go.kr/petitions/'
                f'{article["article_id"]}'
            ) 
Example #21
Source File: gthread.py    From jbox with MIT License 5 votes vote down vote up
def init_process(self):
        self.tpool = futures.ThreadPoolExecutor(max_workers=self.cfg.threads)
        self.poller = selectors.DefaultSelector()
        self._lock = RLock()
        super(ThreadWorker, self).init_process() 
Example #22
Source File: __init__.py    From tributary with Apache License 2.0 5 votes vote down vote up
def pipeline(foos, foo_callbacks, foo_kwargs=None, on_data=print, on_data_kwargs=None):
    '''Pipeline a sequence of functions together via callbacks

    Args:
        foos (list of callables): list of functions to pipeline
        foo_callbacks (List[str]): list of strings indicating the callback names (kwargs of the foos)
        foo_kwargs (List[dict]):
        on_data (callable): callable to call at the end of the pipeline
        on_data_kwargs (dict): kwargs to pass to the on_data function>?
    '''
    global _EXECUTOR
    if _EXECUTOR is None:
        _EXECUTOR = ThreadPoolExecutor(max_workers=2)

    foo_kwargs = foo_kwargs or []
    on_data_kwargs = on_data_kwargs or {}

    # organize args for functional pipeline
    assembled = []
    for i, foo in enumerate(foos):
        cb = foo_callbacks[i] if i < len(foo_callbacks) else 'on_data'
        kwargs = foo_kwargs[i] if i < len(foo_kwargs) else {}
        assembled.append((foo, cb, kwargs))

    # assemble pipeline
    assembled.reverse()
    lambdas = [lambda d, f=on_data: run_submit(f, None, d, **on_data_kwargs)]
    for i, a in enumerate(assembled):
        foo, cb, kwargs = a
        function_to_call = lambdas[i]
        kwargs[cb] = function_to_call

        if i != len(assembled) - 1:
            lambdas.append(lambda d, kw=kwargs, f=foo: run_submit(f, function_to_call, d, **kw))
            lambdas[-1].__name__ = foo.__name__
        else:
            lambdas.append(lambda kw=kwargs, f=foo: run_submit(f, function_to_call, **kw))
            lambdas[-1].__name__ = foo.__name__

    # start entrypoint
    lambdas[-1]() 
Example #23
Source File: utils.py    From pySmartDL with The Unlicense 5 votes vote down vote up
def __init__(self, max_workers):
        futures.ThreadPoolExecutor.__init__(self, max_workers)
        self._futures = [] 
Example #24
Source File: ioloop_test.py    From tornado-zh with MIT License 5 votes vote down vote up
def test_add_future_stack_context(self):
        ready = threading.Event()

        def task():
            # we must wait for the ioloop callback to be scheduled before
            # the task completes to ensure that add_future adds the callback
            # asynchronously (which is the scenario in which capturing
            # the stack_context matters)
            ready.wait(1)
            assert ready.isSet(), "timed out"
            raise Exception("worker")

        def callback(future):
            self.future = future
            raise Exception("callback")

        def handle_exception(typ, value, traceback):
            self.exception = value
            self.stop()
            return True

        # stack_context propagates to the ioloop callback, but the worker
        # task just has its exceptions caught and saved in the Future.
        with futures.ThreadPoolExecutor(1) as pool:
            with ExceptionStackContext(handle_exception):
                self.io_loop.add_future(pool.submit(task), callback)
            ready.set()
        self.wait()

        self.assertEqual(self.exception.args[0], "callback")
        self.assertEqual(self.future.exception().args[0], "worker") 
Example #25
Source File: ioloop_test.py    From tornado-zh with MIT License 5 votes vote down vote up
def test_add_future_threads(self):
        with futures.ThreadPoolExecutor(1) as pool:
            self.io_loop.add_future(pool.submit(lambda: None),
                                    lambda future: self.stop(future))
            future = self.wait()
            self.assertTrue(future.done())
            self.assertTrue(future.result() is None) 
Example #26
Source File: gen_test.py    From tornado-zh with MIT License 5 votes vote down vote up
def test_completed_concurrent_future(self):
        with futures.ThreadPoolExecutor(1) as executor:
            yield gen.with_timeout(datetime.timedelta(seconds=3600),
                                   executor.submit(lambda: None)) 
Example #27
Source File: gen_test.py    From tornado-zh with MIT License 5 votes vote down vote up
def test_timeout_concurrent_future(self):
        with futures.ThreadPoolExecutor(1) as executor:
            with self.assertRaises(gen.TimeoutError):
                yield gen.with_timeout(self.io_loop.time(),
                                       executor.submit(time.sleep, 0.1)) 
Example #28
Source File: ioloop_test.py    From tornado-zh with MIT License 5 votes vote down vote up
def test_add_future_stack_context(self):
        ready = threading.Event()

        def task():
            # we must wait for the ioloop callback to be scheduled before
            # the task completes to ensure that add_future adds the callback
            # asynchronously (which is the scenario in which capturing
            # the stack_context matters)
            ready.wait(1)
            assert ready.isSet(), "timed out"
            raise Exception("worker")

        def callback(future):
            self.future = future
            raise Exception("callback")

        def handle_exception(typ, value, traceback):
            self.exception = value
            self.stop()
            return True

        # stack_context propagates to the ioloop callback, but the worker
        # task just has its exceptions caught and saved in the Future.
        with futures.ThreadPoolExecutor(1) as pool:
            with ExceptionStackContext(handle_exception):
                self.io_loop.add_future(pool.submit(task), callback)
            ready.set()
        self.wait()

        self.assertEqual(self.exception.args[0], "callback")
        self.assertEqual(self.future.exception().args[0], "worker") 
Example #29
Source File: ioloop_test.py    From tornado-zh with MIT License 5 votes vote down vote up
def test_add_future_threads(self):
        with futures.ThreadPoolExecutor(1) as pool:
            self.io_loop.add_future(pool.submit(lambda: None),
                                    lambda future: self.stop(future))
            future = self.wait()
            self.assertTrue(future.done())
            self.assertTrue(future.result() is None) 
Example #30
Source File: gen_test.py    From tornado-zh with MIT License 5 votes vote down vote up
def test_completed_concurrent_future(self):
        with futures.ThreadPoolExecutor(1) as executor:
            yield gen.with_timeout(datetime.timedelta(seconds=3600),
                                   executor.submit(lambda: None))