Python multiprocessing.Event() Examples

The following are 30 code examples of multiprocessing.Event(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module multiprocessing , or try the search function .
Example #1
Source File: test_ringbuffer.py    From ringbuffer with Apache License 2.0 7 votes vote down vote up
def test_wait_for_write(self):
        event = multiprocessing.Event()
        wait_count = 0

        with self.lock.for_read():

            def test():
                with self.lock.for_write():
                    self.assert_writer()
                    event.set()
                    return 'written'

            writer = self.async(test)

            while not event.is_set():
                self.assert_readers(1)
                wait_count += 1
                self.lock.wait_for_write()
                self.assert_readers(1)

        self.assertEqual('written', self.get_result(writer))
        self.assert_unlocked()
        self.assertLessEqual(wait_count, 2) 
Example #2
Source File: server.py    From Bert-TextClassification with MIT License 6 votes vote down vote up
def __init__(self, id, args, worker_address, sink_address):
        super().__init__()
        self.model_dir = args.model_dir
        self.config_fp = os.path.join(self.model_dir, 'bert_config.json')
        self.checkpoint_fp = os.path.join(self.model_dir, 'bert_model.ckpt')
        self.vocab_fp = os.path.join(args.model_dir, 'vocab.txt')
        self.tokenizer = tokenization.FullTokenizer(vocab_file=self.vocab_fp)
        self.max_seq_len = args.max_seq_len
        self.worker_id = id
        self.daemon = True
        self.model_fn = model_fn_builder(
            bert_config=modeling.BertConfig.from_json_file(self.config_fp),
            init_checkpoint=self.checkpoint_fp,
            pooling_strategy=args.pooling_strategy,
            pooling_layer=args.pooling_layer
        )
        os.environ['CUDA_VISIBLE_DEVICES'] = str(self.worker_id)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = args.gpu_memory_fraction
        self.estimator = Estimator(self.model_fn, config=RunConfig(session_config=config))
        self.exit_flag = multiprocessing.Event()
        self.logger = set_logger('WORKER-%d' % self.worker_id)
        self.worker_address = worker_address
        self.sink_address = sink_address 
Example #3
Source File: Parser.py    From RTGraph with MIT License 6 votes vote down vote up
def __init__(self, data_queue, store_reference=None,
                 split=Constants.csv_delimiter,
                 consumer_timeout=Constants.parser_timeout_ms):
        """

        :param data_queue: Reference to Queue where processed data will be put.
        :type data_queue: multiprocessing Queue.
        :param store_reference: Reference to CSVProcess instance, if needed.
        :type store_reference: CSVProcess (multiprocessing.Process)
        :param split: Delimiter in incoming data.
        :type split: str.
        :param consumer_timeout: Time to wait after emptying the internal buffer before next parsing.
        :type consumer_timeout: float.
        """
        multiprocessing.Process.__init__(self)
        self._exit = multiprocessing.Event()
        self._in_queue = multiprocessing.Queue()
        self._out_queue = data_queue
        self._consumer_timeout = consumer_timeout
        self._split = split
        self._store_reference = store_reference
        Log.d(TAG, "Process ready") 
Example #4
Source File: Csv.py    From RTGraph with MIT License 6 votes vote down vote up
def __init__(self, filename=None, path=None, timeout=0.5):
        """
        Sets up the file to export the data as CSV.
        If filename is not specified, a default name based on time will be used.
        :param filename: Name of the file where data will be exported.
        :type filename: str.
        :param path: Path where data file will be saved.
        :type path: str.
        :param timeout: Time to wait after emptying the internal buffer before next write.
        :type timeout: float.
        """
        multiprocessing.Process.__init__(self)
        self._exit = multiprocessing.Event()
        self._store_queue = multiprocessing.Queue()
        self._csv = None
        self._file = None
        self._timeout = timeout

        if filename is None:
            filename = strftime(Constants.csv_default_filename, gmtime())
        self._file = self._create_file(filename, path=path)
        Log.i(TAG, "Process ready") 
Example #5
Source File: test_validators.py    From exopy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def task():
    """Create a task to test the validators.

    """
    class Tester(CheckTask):
        """Class for testing feval validators.

        """
        feval = Str()

    root = RootTask(should_stop=Event(), should_pause=Event())
    task = Tester(name='test', database_entries={'val': 1})
    loop = LoopTask(name='Loop', task=task)
    root.add_child_task(0, loop)
    yield task
    del root.should_pause
    del root.should_stop
    gc.collect() 
Example #6
Source File: common_process.py    From loopchain with Apache License 2.0 6 votes vote down vote up
def start(self):
        parent_conn, child_conn = multiprocessing.Pipe()
        event = multiprocessing.Event()

        self.__conn = parent_conn
        self.__run_process = multiprocessing.Process(target=self.run, args=(child_conn, event))
        self.__run_process.start()

        # To avoid defunct process
        self.__join_thread = threading.Thread(target=self.wait)
        self.__join_thread.start()

        # If no sleep then CommonProcess will be terminated with exitcode SIGSEGV.
        # It may be python bug/
        time.sleep(conf.SLEEP_SECONDS_FOR_INIT_COMMON_PROCESS)
        event.wait() 
Example #7
Source File: data_process.py    From 3D-R2N2 with MIT License 6 votes vote down vote up
def __init__(self, data_queue, data_paths, repeat=True):
        '''
        data_queue : Multiprocessing queue
        data_paths : list of data and label pair used to load data
        repeat : if set True, return data until exit is set
        '''
        super(DataProcess, self).__init__()
        # Queue to transfer the loaded mini batches
        self.data_queue = data_queue
        self.data_paths = data_paths
        self.num_data = len(data_paths)
        self.repeat = repeat

        # Tuple of data shape
        self.batch_size = cfg.CONST.BATCH_SIZE
        self.exit = Event()
        self.shuffle_db_inds() 
Example #8
Source File: analyse.py    From MadMax with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def flush_queue(period, run_sig,
                result_queue, result_list):
    """
    For flushing the queue periodically to a list so it doesn't fill up.

    Args:
        period: flush the result_queue to result_list every period seconds
        run_sig: terminate when the Event run_sig is cleared.
        result_queue: the queue in which results accumulate before being flushed
        result_list: the final list of results.
    """
    while run_sig.is_set():
        time.sleep(period)
        while not result_queue.empty():
            item = result_queue.get()
            result_list.append(item)


# Main Body 
Example #9
Source File: __init__.py    From bert-as-service with MIT License 6 votes vote down vote up
def __init__(self, id, args, worker_address_list, sink_address, device_id, graph_path, graph_config):
        super().__init__()
        self.worker_id = id
        self.device_id = device_id
        self.logger = set_logger(colored('WORKER-%d' % self.worker_id, 'yellow'), args.verbose)
        self.max_seq_len = args.max_seq_len
        self.do_lower_case = args.do_lower_case
        self.mask_cls_sep = args.mask_cls_sep
        self.daemon = True
        self.exit_flag = multiprocessing.Event()
        self.worker_address = worker_address_list
        self.num_concurrent_socket = len(self.worker_address)
        self.sink_address = sink_address
        self.prefetch_size = args.prefetch_size if self.device_id > 0 else None  # set to zero for CPU-worker
        self.gpu_memory_fraction = args.gpu_memory_fraction
        self.model_dir = args.model_dir
        self.verbose = args.verbose
        self.graph_path = graph_path
        self.bert_config = graph_config
        self.use_fp16 = args.fp16
        self.show_tokens_to_client = args.show_tokens_to_client
        self.no_special_token = args.no_special_token
        self.is_ready = multiprocessing.Event() 
Example #10
Source File: __init__.py    From ACE with Apache License 2.0 6 votes vote down vote up
def start_root_lock_manager(self, uuid):
        """Starts a thread that keeps a lock open."""
        if self.single_threaded_mode:
            return

        logging.debug("starting lock manager for {}".format(uuid))

        # we use this event for a controlled shutdown
        self.lock_manager_control_event = threading.Event()

        # start a thread that sends keep alives every N seconds
        self.lock_keepalive_thread = threading.Thread(target=self.root_lock_manager_loop,
                                                           name="Lock Manager ({})".format(uuid),
                                                           args=(uuid,))
        self.lock_keepalive_thread.daemon = True # we want this thread to die if the process dies
        self.lock_keepalive_thread.start() 
Example #11
Source File: test_ringbuffer.py    From ringbuffer with Apache License 2.0 6 votes vote down vote up
def test_writer_blocks_reader(self):
        with self.lock.for_write():
            event = multiprocessing.Event()

            def test():
                self.assert_writer()

                # Caller will block until this event is released.
                event.set()

                with self.lock.for_read():
                    self.assert_readers(1)
                    return 'read'

            r = self.async(test)

            # Wait until we can confirm that the reader is locked out.
            event.wait()
            self.assert_writer()

        self.assertEqual('read', self.get_result(r))
        self.assert_unlocked() 
Example #12
Source File: test_formula_task.py    From exopy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setup(self):
        self.root = RootTask(should_stop=Event(), should_pause=Event())
        self.task = FormulaTask(name='Test')
        self.root.add_child_task(0, self.task) 
Example #13
Source File: rest_service.py    From loopchain with Apache License 2.0 5 votes vote down vote up
def run(self, conn, event: multiprocessing.Event):
        logging.debug("RestService run...")

        args = ['python3', '-m', 'loopchain', 'rest', '-p', str(self._port)]
        args += command_arguments.get_raw_commands_by_filter(
            command_arguments.Type.AMQPKey,
            command_arguments.Type.RadioStationTarget
        )
        server = CommonSubprocess(args)
        api_port = self._port + conf.PORT_DIFF_REST_SERVICE_CONTAINER
        server.set_proctitle(f"{setproctitle.getproctitle()} RestServer api_port({api_port})")
        logging.info(f'RestService run complete port {self._port}')

        # complete init
        event.set()

        command = None
        while command != "quit":
            try:
                command, param = conn.recv()
                logging.debug(f"RestService got: {param}")
            except Exception as e:
                logging.warning(f"RestService conn.recv() error: {e}")
            except KeyboardInterrupt:
                pass

        server.stop()
        logging.info("RestService Ended.") 
Example #14
Source File: Tailer.py    From mongodb_consistent_backup with Apache License 2.0 5 votes vote down vote up
def run(self):
        if not self.enabled():
            logging.info("Oplog tailer is disabled, skipping")
            return
        logging.info("Starting oplog tailers on all replica sets (options: compression=%s, status_secs=%i)" % (self.compression(), self.status_secs))
        self.timer.start(self.timer_name)
        for shard in self.replsets:
            tail_stop   = Event()
            secondary   = self.replsets[shard].find_secondary()
            mongo_uri   = secondary['uri']
            shard_name  = mongo_uri.replset

            oplog_file  = self.prepare_oplog_files(shard_name)
            oplog_state = OplogState(self.manager, mongo_uri, oplog_file)
            thread = TailThread(
                self.backup_stop,
                tail_stop,
                mongo_uri,
                self.config,
                self.timer,
                oplog_file,
                oplog_state,
                self.do_gzip()
            )
            self.shards[shard] = {
                'stop':   tail_stop,
                'thread': thread,
                'state':  oplog_state
            }
            self.shards[shard]['thread'].start()
            while not oplog_state.get('running'):
                if self.shards[shard]['thread'].exitcode:
                    raise OperationError("Oplog tailer for %s failed with exit code %i!" % (mongo_uri, self.shards[shard]['thread'].exitcode))
                sleep(0.5) 
Example #15
Source File: recognition.py    From Piwho with MIT License 5 votes vote down vote up
def __init__(self, dirpath=None):
        """
        Invokes the SpeakerRecognizer constructor with params.

        :type: dirpath: string
        :param: path for the .wav file
        """
        self.proc = None
        self.sprecog = SpeakerRecognizer(dirpath)
        self.event = mp.Event()
        self.debug = False
        self.speaker_name = None 
Example #16
Source File: scan_generator.py    From ms_deisotope with Apache License 2.0 5 votes vote down vote up
def __init__(self, ms_file, number_of_helpers=4,
                 ms1_peak_picking_args=None, msn_peak_picking_args=None,
                 ms1_deconvolution_args=None, msn_deconvolution_args=None,
                 extract_only_tandem_envelopes=False, ignore_tandem_scans=False,
                 ms1_averaging=0, deconvolute=True, verbose=False):
        self.ms_file = ms_file
        self.ignore_tandem_scans = ignore_tandem_scans

        self.scan_ids_exhausted_event = multiprocessing.Event()

        self._iterator = None

        self._scan_yielder_process = None
        self._deconv_process = None

        self._input_queue = None
        self._output_queue = None
        self._deconv_helpers = None
        self._order_manager = None

        self.number_of_helpers = number_of_helpers

        self.ms1_peak_picking_args = ms1_peak_picking_args
        self.msn_peak_picking_args = msn_peak_picking_args
        self.ms1_averaging = ms1_averaging

        self.deconvoluting = deconvolute
        self.ms1_deconvolution_args = ms1_deconvolution_args
        self.msn_deconvolution_args = msn_deconvolution_args
        self.extract_only_tandem_envelopes = extract_only_tandem_envelopes
        self._scan_interval_tree = None
        self.verbose = verbose
        self.log_controller = self.ipc_logger() 
Example #17
Source File: test_execution.py    From exopy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_root_perform_parallel_in_finalization(self):
        """Ensure that the ThreadResources release does not prevent to start
        new threads.

        """
        root = self.root

        event1 = threading.Event()
        event2 = threading.Event()
        event3 = threading.Event()

        comp = ComplexTask(name='comp')
        comp.parallel = {'activated': True, 'pool': 'test'}
        aux = CheckTask(name='signal', custom=lambda t, x: event1.set())
        wait = CheckTask(name='test', custom=lambda t, x: event2.wait())
        par = CheckTask(name='signal', custom=lambda t, x: event3.set())
        # Test creating a new thread as by priority active_threads is released
        # later.
        par.parallel = {'activated': True, 'pool': 'test2'}
        comp.add_child_task(0, aux)
        comp.add_child_task(1, wait)
        comp.add_child_task(2, par)
        root.add_child_task(0, comp)

        t = threading.Thread(target=root.perform)
        t.start()
        event1.wait()
        assert root.resources['active_threads']['test']
        assert not root.resources['active_threads']['test2']
        event2.set()
        event3.wait()
        t.join()

        assert not root.should_pause.is_set()
        assert not root.should_stop.is_set()
        assert par.perform_called == 1
        assert aux.perform_called == 1
        assert wait.perform_called == 1
        assert not root.resources['active_threads']['test'] 
Example #18
Source File: test_execution.py    From exopy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def setup(self):
        root = RootTask()
        root.should_pause = Event()
        root.should_stop = Event()
        root.paused = Event()
        root.resumed = Event()
        root.default_path = 'toto'
        root.write_in_database('meas_name', 'M')
        root.write_in_database('meas_id', '001')
        self.root = root 
Example #19
Source File: http.py    From bert-as-service with MIT License 5 votes vote down vote up
def __init__(self, args):
        super().__init__()
        self.args = args
        self.is_ready = Event() 
Example #20
Source File: test_common_process.py    From loopchain with Apache License 2.0 5 votes vote down vote up
def run(self, event: threading.Event):
        event.set()

        while self.is_run():
            time.sleep(conf.SLEEP_SECONDS_IN_SERVICE_LOOP)
            self.__run_times += 1
            logging.debug("SampleThread, I have: " + str(self.__var)) 
Example #21
Source File: __init__.py    From bert-as-service with MIT License 5 votes vote down vote up
def __init__(self, args, front_sink_addr, bert_config):
        super().__init__()
        self.port = args.port_out
        self.exit_flag = multiprocessing.Event()
        self.logger = set_logger(colored('SINK', 'green'), args.verbose)
        self.front_sink_addr = front_sink_addr
        self.verbose = args.verbose
        self.show_tokens_to_client = args.show_tokens_to_client
        self.max_seq_len = args.max_seq_len
        self.max_position_embeddings = bert_config.max_position_embeddings
        self.fixed_embed_length = args.fixed_embed_length
        self.is_ready = multiprocessing.Event() 
Example #22
Source File: __init__.py    From bert-as-service with MIT License 5 votes vote down vote up
def __init__(self, args):
        super().__init__()
        self.logger = set_logger(colored('VENTILATOR', 'magenta'), args.verbose)

        self.model_dir = args.model_dir
        self.max_seq_len = args.max_seq_len
        self.num_worker = args.num_worker
        self.max_batch_size = args.max_batch_size
        self.num_concurrent_socket = max(8, args.num_worker * 2)  # optimize concurrency for multi-clients
        self.port = args.port
        self.args = args
        self.status_args = {k: (v if k != 'pooling_strategy' else v.value) for k, v in sorted(vars(args).items())}
        self.status_static = {
            'tensorflow_version': _tf_ver_,
            'python_version': sys.version,
            'server_version': __version__,
            'pyzmq_version': zmq.pyzmq_version(),
            'zmq_version': zmq.zmq_version(),
            'server_start_time': str(datetime.now()),
        }
        self.processes = []
        self.logger.info('freeze, optimize and export graph, could take a while...')
        with Pool(processes=1) as pool:
            # optimize the graph, must be done in another process
            from .graph import optimize_graph
            self.graph_path, self.bert_config = pool.apply(optimize_graph, (self.args,))
        # from .graph import optimize_graph
        # self.graph_path = optimize_graph(self.args, self.logger)
        if self.graph_path:
            self.logger.info('optimized graph is stored at: %s' % self.graph_path)
        else:
            raise FileNotFoundError('graph optimization fails and returns empty result')
        self.is_ready = threading.Event() 
Example #23
Source File: mpEngineProdCons.py    From appcompatprocessor with Apache License 2.0 5 votes vote down vote up
def addConsumer(self, extra_arg_list = []):
        if self.num_consumers < self.maxCores:
            # Lock internal
            self.__internalLock__.acquire()

            new_worker_num = self.next_worker_num
            logger.debug("Adding Consumer-%d" % (new_worker_num))
            self.consumer_pool_exitEvent.append(multiprocessing.Event())
            self.consumer_pool.append((new_worker_num, self.consumer_Class(
                self.consumer_task_queue, self.consumer_results_queue, self.get_num_tasks(), self.producer_pool_progress, self.consumer_pool_progress,
                self.consumer_pool_exitEvent[-1], self.killed_event, extra_arg_list), extra_arg_list))
            self.consumer_pool[-1][1].daemon = False  # Remove for debugging
            self.consumer_pool[-1][1].start()

            # Update consumer count
            self.num_consumers += 1

            # Update next worker num
            self.next_worker_num += 1

            # Release internal
            self.__internalLock__.release()

            logger.debug("Consumer-%d added" % new_worker_num)
        else:
            logger.error("Attempted to start workers beyond the maxCores setting") 
Example #24
Source File: Serial.py    From RTGraph with MIT License 5 votes vote down vote up
def __init__(self, parser_process):
        """
        Initialises values for process.
        :param parser_process: Reference to a ParserProcess instance.
        :type parser_process: ParserProcess.
        """
        multiprocessing.Process.__init__(self)
        self._exit = multiprocessing.Event()
        self._parser = parser_process
        self._serial = serial.Serial()
        Log.i(TAG, "Process ready") 
Example #25
Source File: mpEngineProdCons.py    From appcompatprocessor with Apache License 2.0 5 votes vote down vote up
def addProducer(self, extra_arg_list = []):
        if self.num_producers < self.maxCores:
            # Lock internal
            self.__internalLock__.acquire()

            new_worker_num = self.next_worker_num
            logger.debug("Adding Producer-%d" % (new_worker_num))
            self.producer_pool_exitEvent.append(multiprocessing.Event())
            self.producer_pool.append((new_worker_num, self.producer_Class(
                self.producer_task_queue, self.producer_results_queue, self.get_num_tasks(), self.get_num_tasks(), self.producer_pool_progress,
                self.producer_pool_exitEvent[-1], self.killed_event, extra_arg_list), extra_arg_list))
            self.producer_pool[-1][1].daemon = False # Remove for debugging
            self.producer_pool[-1][1].start()

            # Update worker count
            self.num_producers += 1

            # Update next worker num
            self.next_worker_num += 1

            # Release internal
            self.__internalLock__.release()

            logger.debug("Producer-%d added" % new_worker_num)
        else:
            logger.error("Attempted to start workers beyond the maxCores setting") 
Example #26
Source File: SocketClient.py    From RTGraph with MIT License 5 votes vote down vote up
def __init__(self, parser_process):
        """
        Initialises values for process.
        :param parser_process: Reference to a ParserProcess instance.
        :type parser_process: ParserProcess
        """
        multiprocessing.Process.__init__(self)
        self._exit = multiprocessing.Event()
        self._parser = parser_process
        self._socket_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        Log.i(TAG, "Process Ready") 
Example #27
Source File: mpEngineProdCons.py    From appcompatprocessor with Apache License 2.0 5 votes vote down vote up
def __init__(self, maxCores, producer_Class, consumer_Class, governorOffFlag = False):
        logger.debug("mpEngine initializing")
        self.governorOffFlag = governorOffFlag
        self.maxCores = maxCores
        self.__deleting__ = False
        self.__internalLock__ = multiprocessing.Lock()
        self.killed_event = multiprocessing.Event()

        # Producers
        self.num_producers = 0
        self.next_worker_num = 0
        self.producer_Class = producer_Class
        self.producer_pool = []
        self.producer_pool_exitEvent = []
        self.producer_task_queue = multiprocessing.JoinableQueue()
        self.producer_results_queue = multiprocessing.JoinableQueue()
        self.producer_pool_progress = multiprocessing.Value('i', 0)

        # Consumers
        self.num_consumers = 0
        self.next_consumer_num = 0
        self.consumer_Class = consumer_Class
        self.consumer_pool = []
        # Note: consumer_pool_exitEvent is used both to notify a worker it should end and for the worker to notify it has dones so
        self.consumer_pool_exitEvent = []
        self.consumer_task_queue = self.producer_results_queue
        self.consumer_results_queue = multiprocessing.JoinableQueue()
        self.consumer_pool_progress = multiprocessing.Value('i', 0)

        # Tasks
        self.num_tasks = multiprocessing.Value('i', 0)
        self.tasks_added = False

        # Rebalance checks
        self._rebalance_last_kick = datetime.now()
        self.rebalance_backoff_timer = 60 * 1
        self._rebalance_mem_last_kick = datetime.now()
        self.rebalance_mem_backoff_timer = 60 * 2 
Example #28
Source File: data_util.py    From ICDAR-2019-SROIE with MIT License 5 votes vote down vote up
def start(self, workers=1, max_queue_size=10):
        def data_generator_task():
            while not self._stop_event.is_set():
                try:
                    if self._use_multiprocessing or self.queue.qsize() < max_queue_size:
                        generator_output = next(self._generator)
                        self.queue.put(generator_output)
                    else:
                        time.sleep(self.wait_time)
                except Exception:
                    self._stop_event.set()
                    raise

        try:
            if self._use_multiprocessing:
                self.queue = multiprocessing.Queue(maxsize=max_queue_size)
                self._stop_event = multiprocessing.Event()
            else:
                self.queue = queue.Queue()
                self._stop_event = threading.Event()

            for _ in range(workers):
                if self._use_multiprocessing:
                    # Reset random seed else all children processes
                    # share the same seed
                    np.random.seed(self.random_seed)
                    thread = multiprocessing.Process(target=data_generator_task)
                    thread.daemon = True
                    if self.random_seed is not None:
                        self.random_seed += 1
                else:
                    thread = threading.Thread(target=data_generator_task)
                self._threads.append(thread)
                thread.start()
        except:
            self.stop()
            raise 
Example #29
Source File: Simulator.py    From RTGraph with MIT License 5 votes vote down vote up
def __init__(self, parser_process):
        """
        Initialises values for process.
        :param parser_process: Reference to a ParserProcess instance.
        :type parser_process: ParserProcess.
        """
        multiprocessing.Process.__init__(self)
        self._exit = multiprocessing.Event()
        self._period = None
        self._parser = parser_process
        Log.i(TAG, "Process Ready") 
Example #30
Source File: test_execution.py    From exopy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_root_perform_wait_all(self):
        """Test running a simple task waiting on all pools.

        Notes
        -----
        When starting par will be executed in its own thread, which will allow
        aux to run. The test will wait for aux to set its flag. At this step
        wait should be waiting as one pool is active. After checking that we
        can set the flag on which par is waiting and let the execution
        complete.

        """
        root = self.root

        event1 = threading.Event()
        event2 = threading.Event()

        par = CheckTask(name='test', custom=lambda t, x: event1.wait())
        par.parallel = {'activated': True, 'pool': 'test'}
        aux = CheckTask(name='signal', custom=lambda t, x: event2.set())
        wait = CheckTask(name='wait')
        wait.wait = {'activated': True}
        root.add_child_task(0, par)
        root.add_child_task(1, aux)
        root.add_child_task(2, wait)

        t = threading.Thread(target=root.perform)
        t.start()
        event2.wait()
        sleep(1)
        assert not wait.perform_called
        assert root.resources['active_threads']['test']
        event1.set()
        t.join()

        assert not root.should_pause.is_set()
        assert not root.should_stop.is_set()
        assert par.perform_called == 1
        assert aux.perform_called == 1
        assert wait.perform_called == 1
        assert not root.resources['active_threads']['test']