Python multiprocessing.Queue() Examples

The following are 30 code examples of multiprocessing.Queue(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module multiprocessing , or try the search function .
Example #1
Source File: master.py    From ppo-lstm-parallel with MIT License 6 votes vote down vote up
def __init__(self, env_producer):
        self.env_name = env_producer.get_env_name()
        self.config = environments.get_config(self.env_name)
        self.worker_size = self.config["worker_num"]
        self.env_producer = env_producer
        self.queues = []
        self.w_in_queue = Queue()
        self.init_workers()
        self.session = None
        self.trainable_vars = None
        self.accum_vars = None
        self.p_opt_vars = None
        self.v_opt_vars = None
        self.assign_op = None
        self.agent = None
        self.saver = None
        self.summary_writer = None
        self.beta = 1
        self.lr_multiplier = 1.0
        self.iter_count = 1
        self.variables_file_path = "models/%s/variables.txt" % self.env_name
        self.model_path = "models/%s/model" % self.env_name
        self.initialized = False
        self.cur_step = -1
        self.start() 
Example #2
Source File: parallel_dqn_optimizer.py    From tensortrade with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 model: 'ParallelDQNModel',
                 n_envs: int,
                 memory_queue: Queue,
                 model_update_queue: Queue,
                 done_queue: Queue,
                 discount_factor: float = 0.9999,
                 batch_size: int = 128,
                 learning_rate: float = 0.0001,
                 memory_capacity: int = 10000):
        super().__init__()

        self.model = model
        self.n_envs = n_envs
        self.memory_queue = memory_queue
        self.model_update_queue = model_update_queue
        self.done_queue = done_queue
        self.discount_factor = discount_factor
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.memory_capacity = memory_capacity 
Example #3
Source File: test_distributed.py    From sagemaker-xgboost-container with Apache License 2.0 6 votes vote down vote up
def test_integration_rabit_synchronize():
    q = Queue()

    port, _ = find_two_open_ports()

    host_count = 5
    host_list = range(host_count)
    expected_results = [{'idx': idx} for idx in host_list]

    for idx in host_list:
        p = Process(target=synchronize_fn, args=(host_count, port, idx == 0, idx, q))
        p.start()

    num_responses = 0
    while num_responses < host_count:
        host_aggregated_result = q.get(timeout=10)
        for host_individual_result in host_aggregated_result:
            assert host_individual_result in expected_results
        num_responses += 1 
Example #4
Source File: test_distributed.py    From sagemaker-xgboost-container with Apache License 2.0 6 votes vote down vote up
def test_rabit_run_all_hosts_run():
    q = Queue()

    first_port, second_port = find_two_open_ports()

    host_count = 5
    host_list = range(host_count)
    expected_results = [idx for idx in host_list]

    for idx in host_list:
        p = Process(target=rabit_run_fn, args=(host_count, True, first_port, second_port, idx == 0, idx, q))
        p.start()

    num_responses = 0
    while num_responses < host_count:
        response = q.get(timeout=15)
        expected_results.remove(response)
        num_responses += 1

    assert len(expected_results) == 0 
Example #5
Source File: test_distributed.py    From sagemaker-xgboost-container with Apache License 2.0 6 votes vote down vote up
def test_rabit_run_exclude_one_host():
    q = Queue()

    first_port, second_port = find_two_open_ports()

    idx_to_exclude = 3

    host_count = 5
    host_list = range(host_count)
    expected_results = [idx for idx in host_list if idx != idx_to_exclude]

    for idx in host_list:
        p = Process(target=rabit_run_fn, args=(
            host_count, idx != idx_to_exclude, first_port, second_port, idx == 0, idx, q))
        p.start()

    num_responses = 0
    while num_responses < host_count - 1:
        response = q.get(timeout=15)
        expected_results.remove(response)
        num_responses += 1

    assert len(expected_results) == 0 
Example #6
Source File: docker.py    From pywren-ibm-cloud with Apache License 2.0 6 votes vote down vote up
def __init__(self, docker_config):
        self.log_level = os.getenv('PYWREN_LOGLEVEL')
        self.config = docker_config
        self.name = 'docker'
        self.host = docker_config['host']
        self.queue = multiprocessing.Queue()
        self.docker_client = None
        self._is_localhost = self.host in ['127.0.0.1', 'localhost']

        if self._is_localhost:
            try:
                self.docker_client = docker.from_env()
            except Exception:
                pass

        log_msg = 'PyWren v{} init for Docker - Host: {}'.format(__version__, self.host)
        logger.info(log_msg)
        if not self.log_level:
            print(log_msg) 
Example #7
Source File: test_distributed.py    From sagemaker-xgboost-container with Apache License 2.0 6 votes vote down vote up
def test_rabit_delay_master():
    q = Queue()

    first_port, second_port = find_two_open_ports()

    host_count = 5
    host_list = range(host_count)
    expected_results = [idx for idx in host_list]

    for idx in host_list:
        p = Process(
            target=rabit_run_delay_master, args=(host_count, True, first_port, second_port, idx == 0, idx, q, None))
        p.start()

    num_responses = 0
    while num_responses < host_count:
        response = q.get(timeout=20)
        expected_results.remove(response)
        num_responses += 1

    assert len(expected_results) == 0 
Example #8
Source File: test_distributed.py    From sagemaker-xgboost-container with Apache License 2.0 6 votes vote down vote up
def test_rabit_run_fail_bad_max_retry_attempts(bad_max_retry_attempts):
    q = Queue()

    first_port, second_port = find_two_open_ports()

    host_count = 5
    host_list = range(host_count)

    for idx in host_list:
        p = Process(target=rabit_run_fail, args=(
            rabit_run_fn, host_count, True, first_port, second_port, idx == 0, idx, q, bad_max_retry_attempts))
        p.start()

    num_responses = 0
    while num_responses < host_count:
        host_result = q.get(timeout=10)
        assert "max_connect_attempts must be None or an integer greater than 0." in host_result
        num_responses += 1 
Example #9
Source File: bench.py    From OpenBench with GNU General Public License v3.0 6 votes vote down vote up
def multiCoreBench(engine, threads):

    # Give time for any previous run to finish
    time.sleep(2)

    # Dump results into a Queue
    outqueue = multiprocessing.Queue()

    # Spawn each singleCoreBench()
    processes = [
        multiprocessing.Process(
            target=singleCoreBench,
            args=(engine, outqueue)
        ) for ii in range(threads)
    ]

    # Launch eat singleCoreBench()
    for process in processes:
        process.start()

    # Wait for each thread and collect data
    return [outqueue.get() for ii in range(threads)] 
Example #10
Source File: parallel_map.py    From dataflow with Apache License 2.0 6 votes vote down vote up
def reset_state(self):
        super(MultiThreadMapData, self).reset_state()
        if self._threads:
            self._threads[0].stop()
            for t in self._threads:
                t.join()

        self._in_queue = queue.Queue()
        self._out_queue = queue.Queue()
        self._evt = threading.Event()
        self._threads = [MultiThreadMapData._Worker(
            self._in_queue, self._out_queue, self._evt, self.map_func)
            for _ in range(self.num_thread)]
        for t in self._threads:
            t.start()

        self._guard = DataFlowReentrantGuard()

        # Call once at the beginning, to ensure inq+outq has a total of buffer_size elements
        self._fill_buffer() 
Example #11
Source File: multiproc_data.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def reset(self):
        """
        Resets the generator by stopping all processes
        """
        self.alive.value = False
        qsize = 0
        try:
            while True:
                self.queue.get(timeout=0.1)
                qsize += 1
        except QEmptyExcept:
            pass
        print("Queue size on reset: {}".format(qsize))
        for i, p in enumerate(self.proc):
            p.join()
        self.proc.clear() 
Example #12
Source File: data_process.py    From 3D-R2N2 with MIT License 6 votes vote down vote up
def test_process():
    from multiprocessing import Queue
    from lib.config import cfg
    from lib.data_io import category_model_id_pair

    cfg.TRAIN.PAD_X = 10
    cfg.TRAIN.PAD_Y = 10

    data_queue = Queue(2)
    category_model_pair = category_model_id_pair(dataset_portion=[0, 0.1])

    data_process = ReconstructionDataProcess(data_queue, category_model_pair)
    data_process.start()
    batch_img, batch_voxel = data_queue.get()

    kill_processes(data_queue, [data_process]) 
Example #13
Source File: train.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 6 votes vote down vote up
def __init__(self, env):
        super(SummaryWorker, self).__init__()
        self.env = env
        self.config = env.config
        self.queue = multiprocessing.Queue()
        try:
            self.timer_scalar = utils.train.Timer(env.config.getfloat('summary', 'scalar'))
        except configparser.NoOptionError:
            self.timer_scalar = lambda: False
        try:
            self.timer_image = utils.train.Timer(env.config.getfloat('summary', 'image'))
        except configparser.NoOptionError:
            self.timer_image = lambda: False
        try:
            self.timer_histogram = utils.train.Timer(env.config.getfloat('summary', 'histogram'))
        except configparser.NoOptionError:
            self.timer_histogram = lambda: False
        with open(os.path.expanduser(os.path.expandvars(env.config.get('summary_histogram', 'parameters'))), 'r') as f:
            self.histogram_parameters = utils.RegexList([line.rstrip() for line in f])
        self.draw_bbox = utils.visualize.DrawBBox(env.category)
        self.draw_feature = utils.visualize.DrawFeature() 
Example #14
Source File: pipeline.py    From Turku-neural-parser-pipeline with Apache License 2.0 6 votes vote down vote up
def add_step(self,module_name_and_params, extra_args):
        config=module_name_and_params.split()
        module_name=config[0]
        params=config[1:]

        # collect extra arguments from command line meant for this particular module
        if extra_args is not None: 
            for _name, _value in extra_args.__dict__.items():
                if _name.startswith(module_name):
                    _modname,_argname=_name.split(".",1) # for example lemmatizer_mod.gpu
                    params.append("--"+_argname)
                    params.append(str(_value))

        mod=importlib.import_module(module_name)
        step_in=self.q_out
        self.q_out=Queue(self.max_q_size) #new pipeline end
        args=mod.argparser.parse_args(params)
        process=Process(target=mod.launch,args=(args,step_in,self.q_out))
        process.daemon=True
        process.start()
        self.processes.append(process) 
Example #15
Source File: datasources.py    From pygrametl with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, source, batchsize=500, queuesize=20):
        """Arguments:
            
           - source: the source to iterate
           - batchsize: the number of rows passed from the worker process each
             time it passes on a batch of rows. Must be positive. Default: 500
           - queuesize: the maximum number of batches that can wait in a queue
             between the processes. 0 means unlimited. Default: 20
        """
        if not isinstance(batchsize, int) or batchsize < 1:
            raise ValueError('batchsize must be a positive integer')
        self.__source = source
        self.__batchsize = batchsize
        self.__queue = Queue(queuesize)
        p = Process(target=self.__worker)
        p.name = "Process for ProcessSource"
        p.start() 
Example #16
Source File: log_client.py    From iSDX with Apache License 2.0 6 votes vote down vote up
def __init__(self, address, port, authkey, input_file, debug = False, timing = False):
        self.logger = util.log.getLogger('log_client')
        self.logger.info('server: start')

        self.timing = timing

        self.address = address
        self.port = int(port)
        self.authkey = authkey

        self.input_file = input_file

        self.real_start_time = time()
        self.simulation_start_time = 0

        self.fp_thread = None
        self.fs_thread = None

        self.flow_mod_queue = Queue() 
Example #17
Source File: datasources.py    From pygrametl with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, seq, callee):
        """Arguments:
            
           - seq: a sequence with the elements for each of which a unique
             source must be created. the elements are given (one by one) to
             callee.
           - callee: a function f(e) that must accept elements as those in the
             seq argument. the function should return a source which then will
             be iterated by this source. the function is called once for every
             element in seq.
        """
        self.__queue = Queue()  # a multiprocessing.Queue
        if not callable(callee):
            raise TypeError('callee must be callable')
        self.__callee = callee
        for e in seq:
            # put them in a safe queue such that this object can be used from
            # different fork'ed processes
            self.__queue.put(e) 
Example #18
Source File: data_process.py    From 3D-R2N2 with MIT License 6 votes vote down vote up
def __init__(self, data_queue, data_paths, repeat=True):
        '''
        data_queue : Multiprocessing queue
        data_paths : list of data and label pair used to load data
        repeat : if set True, return data until exit is set
        '''
        super(DataProcess, self).__init__()
        # Queue to transfer the loaded mini batches
        self.data_queue = data_queue
        self.data_paths = data_paths
        self.num_data = len(data_paths)
        self.repeat = repeat

        # Tuple of data shape
        self.batch_size = cfg.CONST.BATCH_SIZE
        self.exit = Event()
        self.shuffle_db_inds() 
Example #19
Source File: process.py    From linter-pylama with MIT License 5 votes vote down vote up
def _add_call_item_to_queue(pending_work_items,
                            work_ids,
                            call_queue):
    """Fills call_queue with _WorkItems from pending_work_items.

    This function never blocks.

    Args:
        pending_work_items: A dict mapping work ids to _WorkItems e.g.
            {5: <_WorkItem...>, 6: <_WorkItem...>, ...}
        work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
            are consumed and the corresponding _WorkItems from
            pending_work_items are transformed into _CallItems and put in
            call_queue.
        call_queue: A multiprocessing.Queue that will be filled with _CallItems
            derived from _WorkItems.
    """
    while True:
        if call_queue.full():
            return
        try:
            work_id = work_ids.get(block=False)
        except queue.Empty:
            return
        else:
            work_item = pending_work_items[work_id]

            if work_item.future.set_running_or_notify_cancel():
                call_queue.put(_CallItem(work_id,
                                         work_item.fn,
                                         work_item.args,
                                         work_item.kwargs),
                               block=True)
            else:
                del pending_work_items[work_id]
                continue 
Example #20
Source File: pytribe.py    From PyTribe with GNU General Public License v3.0 5 votes vote down vote up
def _process_samples(self, queue):

		"""Continuously processes samples, updating the most recent sample
		and writing data to a the log file when self._logdata is set to True

		arguments

		queue		--	a multithreading.Queue instance, to read samples
						from
		"""

		# keep processing until it is signalled that we should stop
		while self._processing:
			# wait for the Threading Lock to be released, then lock it
			self._lock.acquire(True)
			# read new item from the queue
			if not queue.empty():
				sample = queue.get()
			else:
				sample = None
			# release the Threading Lock
			self._lock.release()
			# update newest sample
			if sample != None:
				# check if the new sample is the same as the current sample
				if not self._currentsample['timestamp'] == sample['timestamp']:
					# update current sample
					self._currentsample = copy.deepcopy(sample)
					# write to file if data logging is on
					if self._logdata:
						self._log_sample(sample) 
Example #21
Source File: verifier.py    From ffw with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, config):
        self.config = config
        self.queue_sync = multiprocessing.Queue()  # connection to servermanager
        self.queue_out = multiprocessing.Queue()  # connection to servermanager
        self.serverPid = None  # pid of the server started by servermanager (not servermanager)
        self.p = None  # serverManager 
Example #22
Source File: model_common.py    From armchair-expert with MIT License 5 votes vote down vote up
def __init__(self):
        self._read_queue = Queue()
        self._write_queue = Queue()
        self._worker = None 
Example #23
Source File: pytribe.py    From PyTribe with GNU General Public License v3.0 5 votes vote down vote up
def _stream_samples(self, queue):

		"""Continuously polls the device, and puts all new samples in a
		Queue instance

		arguments

		queue		--	a multithreading.Queue instance, to put samples
						into
		"""

		# keep streaming until it is signalled that we should stop
		while self._streaming:
			# do not bother the tracker when it is calibrating
			#self._wait_while_calibrating()
			# wait for the Threading Lock to be released, then lock it
			self._lock.acquire(True)
			# get a new sample
			sample = self._tracker.get_frame()
			t1 = time.time()
			# put the sample in the Queue
			queue.put(sample)
			# release the Threading Lock
			self._lock.release()
			# Update the newest frame
			self._newestframe = copy.deepcopy(sample)
			# Calculate the clock difference
			self._clockdiff = sample['time'] - t1
			# pause for half the intersample time, to avoid an overflow
			# (but to make sure to not miss any samples)
			time.sleep(self._intsampletime/2) 
Example #24
Source File: twitter.py    From armchair-expert with MIT License 5 votes vote down vote up
def __init__(self, read_queue: Queue, write_queue: Queue, shutdown_event: Event,
                 credentials: TwitterApiCredentials):
        ConnectorWorker.__init__(self, name='TwitterWorker', read_queue=read_queue, write_queue=write_queue,
                                 shutdown_event=shutdown_event)
        self._credentials = credentials
        self._user_stream = None
        self._api = None
        self._scraper = None
        self._scraper_thread = None
        self._logger = None 
Example #25
Source File: connector_common.py    From armchair-expert with MIT License 5 votes vote down vote up
def __init__(self, reply_generator: ConnectorReplyGenerator, connectors_event: Event):
        self._reply_generator = reply_generator
        self._scheduler = None
        self._thread = Thread(target=self.run)
        self._write_queue = Queue()
        self._read_queue = Queue()
        self._frontends_event = connectors_event
        self._shutdown_event = Event()
        self._muted = True 
Example #26
Source File: connector_common.py    From armchair-expert with MIT License 5 votes vote down vote up
def __init__(self, name, read_queue: Queue, write_queue: Queue, shutdown_event: Event):
        Process.__init__(self, name=name)
        self._read_queue = read_queue
        self._write_queue = write_queue
        self._shutdown_event = shutdown_event
        self._frontend = None 
Example #27
Source File: structure.py    From armchair-expert with MIT License 5 votes vote down vote up
def __init__(self, read_queue: Queue, write_queue: Queue, use_gpu: bool = False):
        MLModelWorker.__init__(self, name='SentenceStructureModelWorker', read_queue=read_queue,
                               write_queue=write_queue,
                               use_gpu=use_gpu) 
Example #28
Source File: reaction.py    From armchair-expert with MIT License 5 votes vote down vote up
def __init__(self, read_queue: Queue, write_queue: Queue, use_gpu: bool = False):
        MLModelWorker.__init__(self, name='AOLReactionModelWorker', read_queue=read_queue, write_queue=write_queue,
                               use_gpu=use_gpu) 
Example #29
Source File: multiprocessor.py    From svviz with MIT License 5 votes vote down vote up
def _map_init(q):
    # allow setting a multiprocessing.Queue for the _map function
    _map.q = q 
Example #30
Source File: model_common.py    From armchair-expert with MIT License 5 votes vote down vote up
def __init__(self, name, read_queue: Queue, write_queue: Queue, use_gpu: bool):
        Process.__init__(self, name=name)
        self._read_queue = read_queue
        self._write_queue = write_queue
        self._use_gpu = use_gpu
        self._model = None