Python concurrent.futures.ProcessPoolExecutor() Examples

The following are 30 code examples of concurrent.futures.ProcessPoolExecutor(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module concurrent.futures , or try the search function .
Example #1
Source File: poolImprovement.py    From Learning-Concurrency-in-Python with MIT License 8 votes vote down vote up
def main():

  t1 = timeit.default_timer()
  with ProcessPoolExecutor(max_workers=4) as executor:
        for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
            print('%d is prime: %s' % (number, prime))

  print("{} Seconds Needed for ProcessPoolExecutor".format(timeit.default_timer() - t1))
  
  t2 = timeit.default_timer()
  with ThreadPoolExecutor(max_workers=4) as executor:
        for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
            print('%d is prime: %s' % (number, prime))
  print("{} Seconds Needed for ThreadPoolExecutor".format(timeit.default_timer() - t2))

  t3 = timeit.default_timer()
  for number in PRIMES:
    isPrime = is_prime(number)
    print("{} is prime: {}".format(number, isPrime))
  print("{} Seconds needed for single threaded execution".format(timeit.default_timer()-t3)) 
Example #2
Source File: ilsvrc_det.py    From gluon-cv with Apache License 2.0 8 votes vote down vote up
def par_crop(args):
    """
    Dataset curation,crop data and transform the format of a label
    """
    crop_path = os.path.join(args.download_dir, './crop{:d}'.format(args.instance_size))
    if not os.path.isdir(crop_path): makedirs(crop_path)
    VID_base_path = os.path.join(args.download_dir, './ILSVRC')
    ann_base_path = os.path.join(VID_base_path, 'Annotations/DET/train/')
    sub_sets = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i')
    for sub_set in sub_sets:
        sub_set_base_path = os.path.join(ann_base_path, sub_set)
        if 'a' == sub_set:
            xmls = sorted(glob.glob(os.path.join(sub_set_base_path, '*', '*.xml')))
        else:
            xmls = sorted(glob.glob(os.path.join(sub_set_base_path, '*.xml')))
        n_imgs = len(xmls)
        sub_set_crop_path = os.path.join(crop_path, sub_set)
        with futures.ProcessPoolExecutor(max_workers=args.num_threads) as executor:
            fs = [executor.submit(crop_xml, args, xml, sub_set_crop_path, args.instance_size) for xml in xmls]
            for i, f in enumerate(futures.as_completed(fs)):
                printProgress(i, n_imgs, prefix=sub_set, suffix='Done ', barLength=80) 
Example #3
Source File: test_file_subcommand_fifo.py    From sqlitebiter with MIT License 7 votes vote down vote up
def test_smoke_one_file(self):
        db_path = "test.sqlite"
        runner = CliRunner()

        with runner.isolated_filesystem():
            fifo_name = "jsonl_fifo"

            os.mkfifo(fifo_name)

            with ProcessPoolExecutor() as executor:
                executor.submit(fifo_writer, fifo_name)
                result = runner.invoke(cmd, ["-o", db_path, "file", fifo_name, "--format", "jsonl"])

            print_traceback(result)

            assert result.exit_code == ExitCode.SUCCESS, fifo_name

            assert SimpleSQLite(db_path).fetch_num_records("jsonl_fifo") == 8 
Example #4
Source File: eggroll.py    From FATE with Apache License 2.0 7 votes vote down vote up
def __init__(self, eggroll_session):
        self.data_dir = os.path.join(file_utils.get_project_base_directory(), 'data')
        self.session_id = eggroll_session.get_session_id()
        self.meta_table = _DTable('__META__', '__META__', 'fragments', 10)
        self.pool = Executor()
        Standalone.__instance = self

        self.eggroll_session = eggroll_session

        self.unique_id_template = '_EggRoll_%s_%s_%s_%.20f_%d'

        eggroll_session.set_gc_table(self)
        eggroll_session.add_cleanup_task(eggroll_session.clean_duplicated_table)

        # todo: move to eggrollSession
        try:
            self.host_name = socket.gethostname()
            self.host_ip = socket.gethostbyname(self.host_name)
        except socket.gaierror as e:
            self.host_name = 'unknown'
            self.host_ip = 'unknown' 
Example #5
Source File: util.py    From yui with GNU Affero General Public License v3.0 7 votes vote down vote up
def __init__(self, config: Config = None) -> None:
        if config is None:
            config = Config(**DEFAULT, TOKEN='asdf', CHANNELS={}, USERS={})

        Namespace._bot = self
        self.loop = asyncio.get_event_loop()
        self.call_queue: List[Call] = []
        self.api = SlackAPI(self)
        self.channels: List[PublicChannel] = []
        self.ims: List[DirectMessageChannel] = []
        self.groups: List[PrivateChannel] = []
        self.mc = aiomcache.Client(
            host=config.CACHE['HOST'], port=config.CACHE['PORT'],
        )
        self.cache: CacheMock = CacheMock(self.mc, 'YUI_TEST_')
        self.users: List[User] = [User(id='U0', team_id='T0', name='system')]
        self.responses: Dict[str, Callable] = {}
        self.config = config
        self.process_pool_executor = ProcessPoolExecutor()
        self.thread_pool_executor = ThreadPoolExecutor() 
Example #6
Source File: blizzard2013.py    From vae_tacotron with MIT License 7 votes vote down vote up
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
  '''Preprocesses the LJ Speech dataset from a given input path into a given output directory.
    Args:
      in_dir: The directory where you have downloaded the LJ Speech dataset
      out_dir: The directory to write the output into
      num_workers: Optional number of worker processes to parallelize across
      tqdm: You can optionally pass tqdm to get a nice progress bar
    Returns:
      A list of tuples describing the training examples. This should be written to train.txt
  '''

  # We use ProcessPoolExecutor to parallize across processes. This is just an optimization and you
  # can omit it and just call _process_utterance on each input if you want.
  executor = ProcessPoolExecutor(max_workers=num_workers)
  futures = []
  index = 1
  with open(os.path.join(in_dir, 'metadata.train'), encoding='utf-8') as f:
    for line in f:
      parts = line.strip().split('|')
      wav_path = os.path.join(in_dir, 'wavs', '%s.wav' % parts[0])
      text = parts[1]
      futures.append(executor.submit(partial(_process_utterance, out_dir, index, wav_path, text)))
      index += 1
  results = [future.result() for future in tqdm(futures)]
  return [r for r in results if r is not None] 
Example #7
Source File: ilsvrc_vid.py    From gluon-cv with Apache License 2.0 7 votes vote down vote up
def par_crop(args, ann_base_path):
    """
    Dataset curation, crop data and transform the format of label
    Parameters
    ----------
    ann_base_path: str, Annotations base path
    """
    crop_path = os.path.join(args.download_dir, './crop{:d}'.format(int(args.instance_size)))
    if not os.path.isdir(crop_path):
        makedirs(crop_path)
    sub_sets = sorted({'a', 'b', 'c', 'd', 'e'})
    for sub_set in sub_sets:
        sub_set_base_path = os.path.join(ann_base_path, sub_set)
        videos = sorted(os.listdir(sub_set_base_path))
        n_videos = len(videos)
        with futures.ProcessPoolExecutor(max_workers=args.num_threads) as executor:
            fs = [executor.submit(crop_video, args, sub_set, video, crop_path, ann_base_path) for video in videos]
            for i, f in enumerate(futures.as_completed(fs)):
                # Write progress to error so that it can be seen
                printProgress(i, n_videos, prefix=sub_set, suffix='Done ', barLength=40) 
Example #8
Source File: aiopipe.py    From bioconda-utils with MIT License 7 votes vote down vote up
def __init__(self, threads: int = None) -> None:
        try:  # get or create loop (threads don't have one)
            #: our asyncio loop
            self.loop = asyncio.get_event_loop()
        except RuntimeError:
            self.loop = asyncio.new_event_loop()
            asyncio.set_event_loop(self.loop)
        #: number of threads to use
        self.threads = threads or threads_to_use()
        #: semaphore to limit io parallelism
        self.io_sem: asyncio.Semaphore = asyncio.Semaphore(1)
        #: must never run more than one conda at the same time
        #: (used by PyPi when running skeleton)
        self.conda_sem: asyncio.Semaphore = asyncio.Semaphore(1)
        #: the filters successively applied to each item
        self.filters: List[AsyncFilter] = []
        #: executor running things in separate python processes
        self.proc_pool_executor = ProcessPoolExecutor(self.threads)

        self._shutting_down = False 
Example #9
Source File: aiopipe.py    From bioconda-utils with MIT License 7 votes vote down vote up
def run(self) -> bool:
        """Enters the asyncio loop and manages shutdown."""
        # We need to handle KeyboardInterrupt "manually" to get clean shutdown
        # for the ProcessPoolExecutor
        self.loop.add_signal_handler(signal.SIGINT,
                                     lambda: asyncio.ensure_future(self.shutdown(signal.SIGINT)))
        try:
            task = asyncio.ensure_future(self._async_run())
            self.loop.run_until_complete(task)
            logger.warning("Finished update")
        except asyncio.CancelledError:
            pass
        except EndProcessing:
            logger.error("Terminating...")
            self.loop.run_until_complete(self.shutdown())

        for filt in self.filters:
            filt.finalize() 
Example #10
Source File: preprocess.py    From UniversalVocoding with MIT License 7 votes vote down vote up
def preprocess(wav_dirs, out_dir, num_workers, params):
    audio_out_dir = os.path.join(out_dir, "audio")
    mel_out_dir = os.path.join(out_dir, "mels")
    os.makedirs(out_dir, exist_ok=True)
    os.makedirs(audio_out_dir, exist_ok=True)
    os.makedirs(mel_out_dir, exist_ok=True)

    executor = ProcessPoolExecutor(max_workers=num_workers)
    futures = []
    wav_paths = chain.from_iterable(glob.iglob("{}/*.wav".format(dir), recursive=True) for dir in wav_dirs)
    for wav_path in wav_paths:
        fid = os.path.basename(wav_path).replace(".wav", ".npy")
        audio_path = os.path.join(audio_out_dir, fid)
        mel_path = os.path.join(mel_out_dir, fid)
        futures.append(executor.submit(partial(process_wav, wav_path, audio_path, mel_path, params)))

    metadata = [future.result() for future in tqdm(futures)]
    write_metadata(metadata, out_dir, params) 
Example #11
Source File: struc2vec.py    From struc2vec with MIT License 6 votes vote down vote up
def simulate_walks(self,num_walks,walk_length):

		# for large graphs, it is serially executed, because of memory use.
		if(len(self.G) > 500000):

			with ProcessPoolExecutor(max_workers=1) as executor:
				job = executor.submit(generate_random_walks_large_graphs,num_walks,walk_length,self.workers,self.G.keys())

				job.result()

		else:

			with ProcessPoolExecutor(max_workers=1) as executor:
				job = executor.submit(generate_random_walks,num_walks,walk_length,self.workers,self.G.keys())

				job.result()


		return 
Example #12
Source File: blizzard.py    From libfaceid with MIT License 6 votes vote down vote up
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
  executor = ProcessPoolExecutor(max_workers=num_workers)
  futures = []
  index = 1
  for book in books:
    with open(os.path.join(in_dir, book, 'sentence_index.txt')) as f:
      for line in f:
        parts = line.strip().split('\t')
        if line[0] is not '#' and len(parts) == 8 and float(parts[3]) > _min_confidence:
          wav_path = os.path.join(in_dir, book, 'wav', '%s.wav' % parts[0])
          labels_path = os.path.join(in_dir, book, 'lab', '%s.lab' % parts[0])
          text = parts[5]
          task = partial(_process_utterance, out_dir, index, wav_path, labels_path, text)
          futures.append(executor.submit(task))
          index += 1
  results = [future.result() for future in tqdm(futures)]
  return [r for r in results if r is not None] 
Example #13
Source File: __main__.py    From nevergrad with MIT License 6 votes vote down vote up
def launch(experiment: str, num_workers: int = 1, seed: tp.Optional[int] = None,
           cap_index: tp.Optional[int] = None, output: tp.Optional[PathLike] = None) -> Path:
    """Launch experiment with given names and selection modulo
    max_index can be specified to provide a limited number of settings
    """
    # create the data
    csvpath = Path(experiment + ".csv") if output is None else Path(output)
    if num_workers == 1:
        df = core.compute(experiment, cap_index=cap_index, seed=seed)
    else:
        with futures.ProcessPoolExecutor(max_workers=num_workers) as executor:
            df = core.compute(experiment, seed=seed, cap_index=cap_index, executor=executor, num_workers=num_workers)
    # save data to csv
    try:
        core.save_or_append_to_csv(df, csvpath)
    except Exception:  # pylint: disable=broad-except
        csvpath = Path(experiment + ".csv")
        print(f"Failed to save to {output}, falling back to {csvpath}")
        core.save_or_append_to_csv(df, csvpath)
    else:
        print(f"Saved data to {csvpath}")
    return csvpath 
Example #14
Source File: blizzard.py    From vae_tacotron with MIT License 6 votes vote down vote up
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
  executor = ProcessPoolExecutor(max_workers=num_workers)
  futures = []
  index = 1
  for book in books:
    with open(os.path.join(in_dir, book, 'sentence_index.txt')) as f:
      for line in f:
        parts = line.strip().split('\t')
        if line[0] is not '#' and len(parts) == 8 and float(parts[3]) > _min_confidence:
          wav_path = os.path.join(in_dir, book, 'wav', '%s.wav' % parts[0])
          labels_path = os.path.join(in_dir, book, 'lab', '%s.lab' % parts[0])
          text = parts[5]
          task = partial(_process_utterance, out_dir, index, wav_path, labels_path, text)
          futures.append(executor.submit(task))
          index += 1
  results = [future.result() for future in tqdm(futures)]
  return [r for r in results if r is not None] 
Example #15
Source File: convert2jack.py    From jack with MIT License 6 votes vote down vote up
def convert_dataset(path, filemap, name, num_processes, max_num_support, max_tokens, is_web=True):
    with open(path, 'rb') as f:
        dataset = pickle.load(f)

    if num_processes == 1:
        instances = process((dataset, filemap, max_num_support, max_tokens, is_web), True)
    else:
        chunk_size = 1000
        executor = ProcessPoolExecutor(num_processes)
        instances = []
        i = 0
        for processed in executor.map(
                process, [(dataset[i * chunk_size:(i + 1) * chunk_size], filemap, max_num_support, max_tokens, is_web)
                          for i in range(len(dataset) // chunk_size + 1)]):
            instances.extend(processed)
            i += chunk_size
            print("%d/%d done" % (min(len(dataset), i), len(dataset)))

    return {"meta": {"source": name}, 'instances': instances} 
Example #16
Source File: struc2vec.py    From struc2vec with MIT License 5 votes vote down vote up
def preprocess_degree_lists(self):

		with ProcessPoolExecutor(max_workers=self.workers) as executor:
			job = executor.submit(preprocess_degreeLists)
			
			job.result()

		return 
Example #17
Source File: struc2vec.py    From struc2vec with MIT License 5 votes vote down vote up
def preprocess_parameters_random_walk(self):

		with ProcessPoolExecutor(max_workers=1) as executor:
			job = executor.submit(generate_parameters_random_walk,self.workers)

			job.result()

		return 
Example #18
Source File: test_jsonlines_reader.py    From pytablereader with MIT License 5 votes vote down vote up
def test_normal_fifo(self, tmpdir, table_text, fifo_name, expected):
        namedpipe = str(tmpdir.join(fifo_name))

        os.mkfifo(namedpipe)

        loader = self.LOADER_CLASS(namedpipe)

        with ProcessPoolExecutor() as executor:
            executor.submit(fifo_writer, namedpipe, table_text)

            for tabledata in loader.load():
                print("[actual]\n{}".format(dumps_tabledata(tabledata)))

                assert tabledata.in_tabledata_list(expected) 
Example #19
Source File: struc2vec.py    From struc2vec with MIT License 5 votes vote down vote up
def preprocess_neighbors_with_bfs_compact(self):

		with ProcessPoolExecutor(max_workers=self.workers) as executor:
			job = executor.submit(exec_bfs_compact,self.G,self.workers,self.calcUntilLayer)
			
			job.result()

		return 
Example #20
Source File: summarize_logs.py    From fcn with MIT License 5 votes vote down vote up
def summarize_logs(logs_dir, keys, target_key, objective):
    assert objective in ['min', 'max']
    assert target_key in keys

    args_list = []
    for name in os.listdir(logs_dir):
        args_list.append((
            logs_dir,
            name,
            keys,
            target_key,
            objective,
        ))

    from concurrent.futures import ProcessPoolExecutor
    with ProcessPoolExecutor(max_workers=4) as executor:
        results = executor.map(_summarize_log, args_list)

    rows = []
    ignored = []
    for row, is_active, log_dir_ignored in results:
        if log_dir_ignored:
            ignored.append(log_dir_ignored)
            continue
        rows.append(row)

    rows = sorted(rows, key=lambda x: x[0], reverse=True)
    print(tabulate.tabulate(rows, headers=keys,
                            floatfmt='.3f', tablefmt='grid',
                            numalign='center', stralign='center',
                            showindex=True, disable_numparse=True))

    if not ignored:
        return

    print('Ignored logs:')
    for log_dir in ignored:
        print('  - %s' % log_dir) 
Example #21
Source File: bootstrap_portfolio.py    From PyTrendFollow with MIT License 5 votes vote down vote up
def mp_optimize_weights(samples, data, **kw):
    return ProcessPoolExecutor().map(partial(optimize_weights, data), samples) 
Example #22
Source File: struc2vec.py    From struc2vec with MIT License 5 votes vote down vote up
def preprocess_neighbors_with_bfs(self):

		with ProcessPoolExecutor(max_workers=self.workers) as executor:
			job = executor.submit(exec_bfs,self.G,self.workers,self.calcUntilLayer)
			
			job.result()

		return 
Example #23
Source File: throughput.py    From petastorm with Apache License 2.0 5 votes vote down vote up
def _create_concurrent_executor(pool_type, decoders_count):
    if pool_type == WorkerPoolType.PROCESS:
        decoder_pool_executor = ProcessPoolExecutor(decoders_count)
    elif pool_type == WorkerPoolType.THREAD:
        decoder_pool_executor = ThreadPoolExecutor(decoders_count)
    else:
        raise ValueError('Unexpected pool type value: %s', pool_type)
    return decoder_pool_executor 
Example #24
Source File: preprocessor.py    From vae_tacotron2 with MIT License 5 votes vote down vote up
def build_from_path(input_dirs, mel_dir, linear_dir, wav_dir, n_jobs=12, tqdm=lambda x: x):
	"""
	Preprocesses the speech dataset from a gven input path to given output directories

	Args:
		- input_dir: input directory that contains the files to prerocess
		- mel_dir: output directory of the preprocessed speech mel-spectrogram dataset
		- linear_dir: output directory of the preprocessed speech linear-spectrogram dataset
		- wav_dir: output directory of the preprocessed speech audio dataset
		- n_jobs: Optional, number of worker process to parallelize across
		- tqdm: Optional, provides a nice progress bar

	Returns:
		- A list of tuple describing the train examples. this should be written to train.txt
	"""

	# We use ProcessPoolExecutor to parallelize across processes, this is just for
	# optimization purposes and it can be omited
	executor = ProcessPoolExecutor(max_workers=n_jobs)
	futures = []
	index = 1
	for input_dir in input_dirs:
		with open(os.path.join(input_dir, 'metadata.csv'), encoding='utf-8') as f:
			for line in f:
				parts = line.strip().split('|')
				wav_path = os.path.join(input_dir, 'wavs', '{}.wav'.format(parts[0]))
				text = parts[2]
				futures.append(executor.submit(partial(_process_utterance, mel_dir, linear_dir, wav_dir, index, wav_path, text)))
				index += 1

	return [future.result() for future in tqdm(futures) if future.result() is not None] 
Example #25
Source File: test_csv_reader.py    From pytablereader with MIT License 5 votes vote down vote up
def test_normal_fifo(self, tmpdir, table_text, fifo_name, expected):
        namedpipe = str(tmpdir.join(fifo_name))

        os.mkfifo(namedpipe)

        loader = ptr.CsvTableFileLoader(namedpipe)

        with ProcessPoolExecutor() as executor:
            executor.submit(fifo_writer, namedpipe, table_text)

            for tabledata in loader.load():
                print(dumps_tabledata(tabledata))

                assert tabledata.in_tabledata_list(expected) 
Example #26
Source File: getshark.py    From ibeis with Apache License 2.0 5 votes vote down vote up
def download_missing_images(parsed, num=None):
    exist_flags = ut.lmap(exists, parsed['new_fpath'])
    missing_flags = ut.not_list(exist_flags)
    print('nExist = %r / %r' % (sum(exist_flags), len(exist_flags)))
    print('nMissing = %r / %r' % (sum(missing_flags), len(exist_flags)))
    if any(missing_flags):
        missing = parsed.compress(missing_flags)
        print('Downloading missing subset')
        _iter = list(zip(missing['img_url'], missing['new_fpath']))
        if num:
            print('Only downloading {}'.format(num))

        from concurrent import futures
        ex = futures.ProcessPoolExecutor(7)
        fs = [ex.submit(ut.download_url, *args, new=True, verbose=False) for args in _iter]

        for f in ut.ProgIter(futures.as_completed(fs), length=len(_iter), label='downloading wildbook images'):
            pass

        # import multiprocessing
        # pool = multiprocessing.Pool(7)
        # res = pool.map(ut.partial(ut.download_url, new=True, verbose=False), _iter)

        # gen = ut.util_parallel.generate2(ut.download_url, zip(_iter), new=True, verbose=False)
        # for _ in gen:
        #     pass

        # _prog = ut.ProgPartial(bs=True, freq=1)
        # count = 0
        # for img_url, new_fpath in _prog(_iter, lbl='downloading wildbook images'):
        #     #url = img_url
        #     #filename = new_fpath
        #     #break
        #     try:
        #         ut.download_url(img_url, new_fpath, verbose=False, new=True)
        #         count += 1
        #         if num is not None and count > num:
        #             break
        #     except (ZeroDivisionError, IOError):
        #         pass 
Example #27
Source File: _thesis_helpers.py    From ibeis with Apache License 2.0 5 votes vote down vote up
def draw(ChapX, expt_name, dbnames, *args):
        """
        CommandLine:
            python -m ibeis Chap3.draw nsum --dbs=GZ_Master1,PZ_Master1
            python -m ibeis Chap3.draw foregroundness --dbs=GZ_Master1,PZ_Master1 --diskshow
            python -m ibeis Chap3.draw kexpt --dbs=GZ_Master1 --diskshow

            python -m ibeis Chap4.draw importance GZ_Master1

            python -m ibeis Chap4.draw hard_cases GZ_Master1,PZ_Master1 match_state,photobomb_state
            --diskshow

        # Example:
        #     >>> # Script
        #     >>> from ibeis.scripts.thesis import *  # NOQA
        #     >>> expt_name = ut.get_argval('--expt', type_=str, pos=1)
        #     >>> dbnames = ut.get_argval(('--dbs', '--db'), type_=list, default=[])
        #     >>> Chap3.draw(expt_name, dbnames)
        """
        print('expt_name = %r' % (expt_name,))
        print('dbnames = %r' % (dbnames,))
        print('args = %r' % (args,))
        dbnames = ut.smart_cast(dbnames, list)

        if len(dbnames) > 1:
            # parallelize drawing tasks
            from concurrent import futures
            multi_args = [ut.smart_cast(a, list) for a in args]
            with futures.ProcessPoolExecutor(max_workers=6) as executor:
                list(futures.as_completed([
                    executor.submit(ChapX.draw_serial, expt_name, *fsargs)
                    for fsargs in ut.product(dbnames, *multi_args)
                ]))
            print('\n\n Completed multiple tasks')
        else:
            ChapX.draw_serial(expt_name, dbnames, *args) 
Example #28
Source File: command.py    From coconut with Apache License 2.0 5 votes vote down vote up
def running_jobs(self, exit_on_error=True):
        """Initialize multiprocessing."""
        with self.handling_exceptions():
            if self.using_jobs:
                from concurrent.futures import ProcessPoolExecutor
                try:
                    with ProcessPoolExecutor(self.jobs) as self.executor:
                        yield
                finally:
                    self.executor = None
            else:
                yield
        if exit_on_error:
            self.exit_on_error() 
Example #29
Source File: main.py    From Playlist-Length with MIT License 5 votes vote down vote up
def calculate_length(BASE_PATH, no_subdir, media_type, queue, cache_ob):
    if not os.path.isdir(BASE_PATH):
        return bold(red('Error: This doesn\'t seem to be a valid directory.'))

    all_files = get_all_files(BASE_PATH, no_subdir)
    max_workers = multiprocessing.cpu_count() + 1
    with ProcessPoolExecutor(max_workers=max_workers) as executor:
        sys.stdout.write('\n')
        cache = cache_ob.cache
        args = ((file, queue, cache) for file in all_files)
        result = list(
            tqdm(
                executor.map(duration, args),
                total=len(all_files),
                desc='Processing files',
            )
        )

    length = round(sum(result))

    queue.put(None)  # poison pill

    if length == 0:
        return bold(red('Seems like there are no {} files. ¯\_(ツ)_/¯'.format(media_type)))
    elif length < 60:
        minutes_string = pluralize(length, base='minute', suffix='s')
        result = 'Length of all {} is {}.'.format(media_type, minutes_string)
    else:
        hours, minutes = divmod(length, 60)
        hours_string = pluralize(hours, base='hour', suffix='s')
        minutes_string = pluralize(minutes, base='minute', suffix='s')
        result = 'Length of all {} is {} and {}.'.format(
            media_type, hours_string, minutes_string
        )
    return bold(green(result)) 
Example #30
Source File: ex5_cf_procs_cm.py    From pyplus_course with Apache License 2.0 5 votes vote down vote up
def main():
    """
    Use concurrent futures threading to simultaneously gather "show version" output from devices.
    Wait for all threads to complete. Record the amount of time required to do this.
    """
    start_time = time.time()
    max_procs = 5

    # Create the process pool
    with ProcessPoolExecutor(max_procs) as pool:

        # Create list to append the processes to
        futures = []
        for device in network_devices:
            futures.append(pool.submit(ssh_command2, device, "show version"))

        print("\n\n")
        for future in as_completed(futures):
            print("-" * 40)
            print("Result: " + future.result())
            print("-" * 40)
            print("\n\n")

        end_time = time.time()
        print(f"Finished in {end_time - start_time:.2f}")
        print("\n\n")