Python multiprocessing.JoinableQueue() Examples

The following are 30 code examples of multiprocessing.JoinableQueue(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module multiprocessing , or try the search function .
Example #1
Source File: imagescale-q-m.py    From python-in-practice with GNU General Public License v3.0 6 votes vote down vote up
def scale(size, smooth, source, target, concurrency):
    canceled = False
    jobs = multiprocessing.JoinableQueue()
    results = multiprocessing.Queue()
    create_processes(size, smooth, jobs, results, concurrency)
    todo = add_jobs(source, target, jobs)
    try:
        jobs.join()
    except KeyboardInterrupt: # May not work on Windows
        Qtrac.report("canceling...")
        canceled = True
    copied = scaled = 0
    while not results.empty(): # Safe because all jobs have finished
        result = results.get_nowait()
        copied += result.copied
        scaled += result.scaled
    return Summary(todo, copied, scaled, canceled) 
Example #2
Source File: multiproc.py    From PynPoint with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self,
                 tasks_queue_in: multiprocessing.JoinableQueue,
                 result_queue_in: multiprocessing.JoinableQueue) -> None:
        """
        Parameters
        ----------
        tasks_queue_in : multiprocessing.queues.JoinableQueue
            The input task queue with instances of :class:`~pynpoint.util.multiproc.TaskInput`.
        result_queue_in : multiprocessing.queues.JoinableQueue
            The result task queue with instances of :class:`~pynpoint.util.multiproc.TaskResult`.

        Returns
        -------
        NoneType
            None
        """

        multiprocessing.Process.__init__(self)

        self.m_task_queue = tasks_queue_in
        self.m_result_queue = result_queue_in 
Example #3
Source File: _test_multiprocessing.py    From ironpython3 with Apache License 2.0 6 votes vote down vote up
def test_task_done(self):
        queue = self.JoinableQueue()

        workers = [self.Process(target=self._test_task_done, args=(queue,))
                   for i in range(4)]

        for p in workers:
            p.daemon = True
            p.start()

        for i in range(10):
            queue.put(i)

        queue.join()

        for p in workers:
            queue.put(None)

        for p in workers:
            p.join() 
Example #4
Source File: multipca.py    From PynPoint with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self,
                 tasks_queue_in: multiprocessing.JoinableQueue,
                 num_proc: int,
                 pca_numbers: Union[np.ndarray, tuple]) -> None:
        """
        Parameters
        ----------
        tasks_queue_in : multiprocessing.queues.JoinableQueue
            Input task queue.
        num_proc : int
            Number of processors.
        pca_numbers : np.ndarray, tuple
            Principal components for which the residuals are computed.

        Returns
        -------
        NoneType
            None
        """

        super(PcaTaskCreator, self).__init__(None, tasks_queue_in, None, num_proc)

        self.m_pca_numbers = pca_numbers 
Example #5
Source File: _test_multiprocessing.py    From Fluid-Designer with GNU General Public License v3.0 6 votes vote down vote up
def test_task_done(self):
        queue = self.JoinableQueue()

        workers = [self.Process(target=self._test_task_done, args=(queue,))
                   for i in range(4)]

        for p in workers:
            p.daemon = True
            p.start()

        for i in range(10):
            queue.put(i)

        queue.join()

        for p in workers:
            queue.put(None)

        for p in workers:
            p.join() 
Example #6
Source File: save_article_content.py    From web_develop with GNU General Public License v3.0 6 votes vote down vote up
def use_multiprocessing_with_queue2():
    queue = multiprocessing.JoinableQueue()
    num_consumers = multiprocessing.cpu_count() * 2
    results_queue = multiprocessing.Queue()

    for article in Article.objects.all()[5:8]:
        queue.put(article)

    for _ in range(num_consumers):
        p = multiprocessing.Process(target=save_article_result_with_queue2,
                                    args=(queue, results_queue))
        p.start()

    queue.join()

    results = []

    while 1:
        try:
            updated_article = results_queue.get(timeout=1)
        except Empty:
            break
        results.append(updated_article)
    print len(results) 
Example #7
Source File: nmapprocessmanager.py    From grinder with GNU General Public License v2.0 6 votes vote down vote up
def __init__(
        self,
        queue: JoinableQueue,
        arguments: str,
        ports: str,
        sudo: bool,
        hosts_quantity: int,
        results_pool: dict,
    ):
        Process.__init__(self)
        self.queue = queue
        self.arguments = arguments
        self.ports = ports
        self.sudo = sudo
        self.quantity = hosts_quantity
        self.results_pool = results_pool 
Example #8
Source File: save_article_content.py    From web_develop with GNU General Public License v3.0 6 votes vote down vote up
def use_multiprocessing_with_queue2():
    queue = multiprocessing.JoinableQueue()
    num_consumers = multiprocessing.cpu_count() * 2
    results_queue = multiprocessing.Queue()

    for article in Article.objects.all():
        queue.put(article)

    for _ in range(num_consumers):
        p = multiprocessing.Process(target=save_article_result_with_queue2,
                                    args=(queue, results_queue))
        p.start()

    queue.join()

    results = []

    while 1:
        try:
            updated_article = results_queue.get(timeout=1)
        except Empty:
            break
        results.append(updated_article)
    print len(results) 
Example #9
Source File: indexq.py    From SolrClient with Apache License 2.0 6 votes vote down vote up
def get_multi_q(self, sentinel='STOP'):
        '''
        This helps indexq operate in multiprocessing environment without each process having to have it's own IndexQ. It also is a handy way to deal with thread / process safety.

        This method will create and return a JoinableQueue object. Additionally, it will kick off a back end process that will monitor the queue, de-queue items and add them to this indexq.

        The returned JoinableQueue object can be safely passed to multiple worker processes to populate it with data.

        To indicate that you are done writing the data to the queue, pass in the sentinel value ('STOP' by default).

        Make sure you call join_indexer() after you are done to close out the queue and join the worker.
        '''
        self.in_q = JoinableQueue()
        self.indexer_process = Process(target=self._indexer_process, args=(self.in_q, sentinel))
        self.indexer_process.daemon = False
        self.indexer_process.start()
        return self.in_q 
Example #10
Source File: sqli-scanner.py    From sqli-scanner with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, inFile, outFile, processcount=None):
        """
        Initiate controller procedure
        :param inFile: the file containing the URLs
        :param outFile: the output file, "result.txt" by default
        """
        try:
            self.urllist = deduplicate(FileReader(inFile).read()).result
            self.workerCount = int(processcount) if processcount else multiprocessing.cpu_count() * 2
            self.taskQ = multiprocessing.JoinableQueue()
            self.resultQ = multiprocessing.Queue()
            self.workers = []
            self.outfile = outFile

            self.start()
            logging.info("[+] All work done, saving file")
        except KeyboardInterrupt:
            pass
        finally:
            self.cleanup() 
Example #11
Source File: cloud.py    From barman with GNU General Public License v3.0 6 votes vote down vote up
def _ensure_async(self):
        """
        Ensure that the asynchronous execution infrastructure is up
        and the worker process is running
        """
        if self.queue:
            return

        self.queue = multiprocessing.JoinableQueue(
            maxsize=self.worker_processes_count)
        self.result_queue = multiprocessing.Queue()
        self.errors_queue = multiprocessing.Queue()
        self.done_queue = multiprocessing.Queue()
        for process_number in range(self.worker_processes_count):
            process = multiprocessing.Process(
                target=self.worker_process_main,
                args=(process_number,))
            process.start()
            self.worker_processes.append(process) 
Example #12
Source File: workers.py    From vitrage with Apache License 2.0 6 votes vote down vote up
def add_evaluator_workers(self):
        """Add evaluator workers

        Evaluator workers receive all graph updates, hence are updated.
        Each evaluator worker holds an enabled scenario-evaluator and process
        every change.
        Each worker's scenario-evaluator runs different template scenarios.
        Interface to these workers is:
        submit_graph_update(..)
        submit_start_evaluations(..)
        submit_evaluators_reload_templates(..)
        """
        if self._evaluator_queues:
            raise VitrageError('add_evaluator_workers called more than once')
        workers = CONF.evaluator.workers
        queues = [multiprocessing.JoinableQueue() for i in range(workers)]
        self.add(EvaluatorWorker,
                 args=(queues, workers),
                 workers=workers)
        self._evaluator_queues = queues
        self._all_queues.extend(queues) 
Example #13
Source File: __init__.py    From cc98 with MIT License 6 votes vote down vote up
def _producer_multi_threads(queue_task, queue_product, worker_function):
    """
    负责在本进程内分发多线程任务
    :type queue_task: multiprocessing.JoinableQueue
    :type queue_product: multiprocessing.JoinableQueue
    :type worker_function: Callable[[Any], Any]
    """
    while True:
        try:
            task = queue_task.get()
            if isinstance(task, _QueueEndSignal):  # 结束信号
                # finally 里的 task_done() 在break的情况下仍然会被执行
                break
            if isinstance(task, dict):
                result = worker_function(**task)
            elif isinstance(task, (tuple, list)):
                result = worker_function(*task)
            else:
                result = worker_function(task)

            queue_product.put((task, result))
        except:
            traceback.print_exc()
        finally:
            queue_task.task_done() 
Example #14
Source File: refactor.py    From Fluid-Designer with GNU General Public License v3.0 5 votes vote down vote up
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in range(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in range(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None 
Example #15
Source File: save_article_content.py    From web_develop with GNU General Public License v3.0 5 votes vote down vote up
def use_multiprocessing_with_queue():
    queue = multiprocessing.JoinableQueue()
    num_consumers = multiprocessing.cpu_count() * 2

    for article in Article.objects.all():
        queue.put(article)

    for _ in range(num_consumers):
        p = multiprocessing.Process(target=save_article_result_with_queue,
                                    args=(queue,))
        p.start()

    queue.join() 
Example #16
Source File: multiprocessing_utils.py    From warriorframework with Apache License 2.0 5 votes vote down vote up
def create_and_start_process_with_queue(target_module, args_dict, jobs_list, output_q, p_name=''):
    """Creates python multiprocesses for the provided target module with the
    provided arguments and  starts them

    Arguments:
    1. target_module = module for which multiple processes has to be started
    2. args_list = list of arguments to be passed to the target module
    3. jobs_list = list of process created
    4. output_q  = multiprocessing.Queue object to handle returns from the target module
    """

    # THis is to handle the first process when
    # output_q wll be none,create a new q and use the
    # same q for all instances of process started
    if output_q is None:
        # output_q = multiprocessing.JoinableQueue()
        output_q = multiprocessing.Manager().Queue()

    args_dict["output_q"] = output_q

    # now we need to convert the args_dict into
    # a tuple so first create a listout of the dict
    # and then convert the list into a tuple
    args_list = []
    for _, value in args_dict.iteritems():
        args_list.append(value)
    args_tuple = tuple(args_list)

    process = multiprocessing.Process(name=p_name, target=target_module, args=args_tuple)
    jobs_list.append(process)

    process.start()

    return process, jobs_list, output_q 
Example #17
Source File: Alarm.py    From redeem with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
    self.queue = JoinableQueue(10)
    self.running = False
    self.t = Thread(target=self._run, name="AlarmExecutor") 
Example #18
Source File: workers.py    From vitrage with Apache License 2.0 5 votes vote down vote up
def _submit_and_wait(queues, payload):
        for q in queues:
            q.put(payload)
        for q in queues:
            if isinstance(q, multiprocessing.queues.JoinableQueue):
                q.join() 
Example #19
Source File: workers.py    From vitrage with Apache License 2.0 5 votes vote down vote up
def _read_queue(self):
        LOG.debug("%s - reading queue %s",
                  self.__class__.__name__, self.worker_id)
        while True:
            try:
                next_task = self._task_queue.get()
                self.do_task(next_task)
            except Exception:
                LOG.exception("Graph may not be in sync.")
            if isinstance(self._task_queue,
                          multiprocessing.queues.JoinableQueue):
                self._task_queue.task_done() 
Example #20
Source File: save_article_content.py    From web_develop with GNU General Public License v3.0 5 votes vote down vote up
def use_multiprocessing_with_queue():
    queue = multiprocessing.JoinableQueue()
    num_consumers = multiprocessing.cpu_count() * 2

    for article in Article.objects.all()[:4]:
        queue.put(article)

    for _ in range(num_consumers):
        p = multiprocessing.Process(target=save_article_result_with_queue,
                                    args=(queue,))
        p.start()

    queue.join() 
Example #21
Source File: diff_occ.py    From NucleoATAC with MIT License 5 votes vote down vote up
def run_diff(args, bases = 500000):
    """run differential occupancy calling

    """
    chrs = read_chrom_sizes_from_bam(args.bam)
    pwm = PWM.open(args.pwm)
    chunks = ChunkList.read(args.bed, chromDict = chrs, min_offset = args.flank + args.upper/2 + max(pwm.up,pwm.down))
    chunks.merge()
    maxQueueSize = max(2,int(100 * bases / np.mean([chunk.length() for chunk in chunks])))
    #get fragmentsizes
    fragment_dist1 = FragmentMixDistribution(0, upper = args.upper)
    fragment_dist1.fragmentsizes = FragmentSizes(0, args.upper, vals = FragmentSizes.open(args.sizes1).get(0,args.upper))
    fragment_dist1.modelNFR()
    fragment_dist2 = FragmentMixDistribution(0, upper = args.upper)
    fragment_dist2.fragmentsizes = FragmentSizes(0, args.upper, vals = FragmentSizes.open(args.sizes2).get(0,args.upper))
    fragment_dist2.modelNFR()
    params = OccupancyParameters(fragment_dist, args.upper, args.fasta, args.pwm, sep = args.nuc_sep, min_occ = args.min_occ,
            flank = args.flank, bam = args.bam, ci = args.confidence_interval)
    sets = chunks.split(bases = bases)
    pool1 = mp.Pool(processes = max(1,args.cores-1))
    diff_handle = open(args.out + '.occdiff.bed','w')
    diff_handle.close()
    diff_queue = mp.JoinableQueue()
    diff_process = mp.Process(target = _writeDiff, args=(diff_queue, args.out))
    diff_process.start()
    nuc_dist = np.zeros(args.upper)
    for j in sets:
        tmp = pool1.map(_occHelper, zip(j,itertools.repeat(params)))
        for result in tmp:
            diff_queue.put(result[1])
    pool1.close()
    pool1.join()
    diff_queue.put('STOP')
    diff_process.join()
    pysam.tabix_compress(args.out + '.occdiff.bed', args.out + '.occdiff.bed.gz',force = True)
    shell_command('rm ' + args.out + '.occdiff.bed')
    pysam.tabix_index(args.out + '.occdiff.bed.gz', preset = "bed", force = True) 
Example #22
Source File: lynx_basics.py    From ethoscope with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self,queue,  *args, **kwargs):
        """
        Class to run delegate sleep deprivation connection (:class:`~ethoscope.hardware.interfaces.sleep_depriver_interface.SleepDepriverConnection`).
        To a parallel process. This way, the execution of the sleep depriver connection instructions are non-blocking.

        :param queue: A multiprocessing queue to pass instructions.
        :type queue: :class:`~multiprocessing.JoinableQueue`
        :param args: additional arguments
        :param kwargs: additional keyword arguments
        :return:
        """
        self._queue = queue
        self._sleep_dep_args = args
        self._sleep_dep_kwargs = kwargs
        super(SleepDepriverSubProcess,self).__init__() 
Example #23
Source File: refactor.py    From ironpython3 with Apache License 2.0 5 votes vote down vote up
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in range(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in range(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None 
Example #24
Source File: refactor.py    From Imogen with MIT License 5 votes vote down vote up
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in range(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in range(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None 
Example #25
Source File: serialHandler.py    From vPiP with Apache License 2.0 5 votes vote down vote up
def __init__(self, config):
        self.config = config
        self.coordQueue = JoinableQueue()
        self.stepQueue = JoinableQueue()
        self.connected = False
        self.stopRequestCoord = Event()
        self.stopRequestStep = Event()
        self.startedCoord = Event()
        self.startedStepWorker = Event()
        self.serialPort = None
        self.coordWorker = None
        self.stepWorker = None 
Example #26
Source File: pyscriptexecutor.py    From grinder with GNU General Public License v2.0 5 votes vote down vote up
def __init__(self, results_pool: dict, queue: JoinableQueue, mute: bool = False):
        """
        Initialize the process worker

        :param queue: general joinable task queue
        :param mute: bool flag for running scripts in silent mode (w/o output at all)
        :param results_pool: pool of results
        """
        Process.__init__(self)
        self.queue = queue
        self.mute = mute
        self.base_path = self._initialize_base_path()
        self.results_pool = results_pool 
Example #27
Source File: nmapprocessmanager.py    From grinder with GNU General Public License v2.0 5 votes vote down vote up
def organize_processes(self) -> None:
        """
        Create process queue
        :return: None
        """
        queue = JoinableQueue()
        processes = []
        for _ in range(self.workers):
            freeze_support()
            process = NmapProcessing(
                queue,
                self.arguments,
                self.ports,
                self.sudo,
                len(self.hosts),
                self.results_pool,
            )
            process.daemon = True
            processes.append(process)
        for process in processes:
            try:
                process.start()
            except OSError:
                pass
        for index, host in enumerate(self.hosts):
            queue.put((index, host))
        for _ in range(self.workers):
            queue.put((None, None))
        queue.join()
        for process in processes:
            if process.is_alive():
                process.terminate() 
Example #28
Source File: rle.py    From pytorch-saltnet with MIT License 5 votes vote down vote up
def __init__(self, num_consumers=2):
        """Initialize class."""
        self._tasks = multiprocessing.JoinableQueue()
        self._results = multiprocessing.Queue()
        self._n_consumers = num_consumers

        # Initialize consumers
        self._consumers = [Consumer(self._tasks, self._results) for i in range(self._n_consumers)]
        for w in self._consumers:
            w.start()
        self.startIndex = 0 
Example #29
Source File: rle.py    From pytorch-saltnet with MIT License 5 votes vote down vote up
def __init__(self, num_consumers=2):
        """Initialize class."""
        self._tasks = multiprocessing.JoinableQueue()
        self._results = multiprocessing.Queue()
        self._n_consumers = num_consumers

        # Initialize consumers
        self._consumers = [Consumer(self._tasks, self._results) for i in range(self._n_consumers)]
        for w in self._consumers:
            w.start() 
Example #30
Source File: refactor.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in xrange(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in xrange(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None