Python multiprocessing.Process() Examples

The following are 30 code examples of multiprocessing.Process(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module multiprocessing , or try the search function .
Example #1
Source File: test_gluon_utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 7 votes vote down vote up
def test_multiprocessing_download_successful():
    """ test download with multiprocessing """
    tmp = tempfile.mkdtemp()
    tmpfile = os.path.join(tmp, 'README.md')
    process_list = []
    # test it with 10 processes
    for i in range(10):
        process_list.append(mp.Process(
            target=_download_successful, args=(tmpfile,)))
        process_list[i].start()
    for i in range(10):
        process_list[i].join()
    assert os.path.getsize(tmpfile) > 100, os.path.getsize(tmpfile)
    # check only one file we want left
    pattern = os.path.join(tmp, 'README.md*')
    assert len(glob.glob(pattern)) == 1, glob.glob(pattern)
    # delete temp dir
    shutil.rmtree(tmp) 
Example #2
Source File: utils.py    From pySocialWatcher with MIT License 7 votes vote down vote up
def trigger_request_process_and_return_response(rows_to_request):
    process_manager = Manager()
    shared_queue = process_manager.Queue()
    shared_queue_list = []
    list_process = []

    # Trigger Process in rows
    for index, row in rows_to_request.iterrows():
        token, account = get_token_and_account_number_or_wait()
        p = Process(target=trigger_facebook_call, args=(index, row, token, account, shared_queue))
        list_process.append(p)

    # Starting process
    map(lambda p: p.start(), list_process)
    # Stop process
    map(lambda p: p.join(), list_process)
    #Check for Exception
    map(lambda p: check_exception(p), list_process)

    # Put things from shared list to normal list
    while shared_queue.qsize() != 0:
        shared_queue_list.append(shared_queue.get())
    return shared_queue_list 
Example #3
Source File: inputs.py    From inputs with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def handle_input(self, event):
        """Process they keyboard input."""
        self.update_timeval()
        self.events = []
        code = self._get_event_key_code(event)

        if code in self.codes:
            new_code = self.codes[code]
        else:
            new_code = 0
        event_type = self._get_event_type(event)
        value = self._get_key_value(event, event_type)
        scan_event, key_event = self.emulate_press(
            new_code, code, value, self.timeval)

        self.events.append(scan_event)
        self.events.append(key_event)
        # End with a sync marker
        self.events.append(self.sync_marker(self.timeval))
        # We are done
        self.write_to_pipe(self.events) 
Example #4
Source File: manager.py    From Learning-Concurrency-in-Python with MIT License 6 votes vote down vote up
def main():
  manager = mp.Manager()
  ns = manager.Namespace()
  ns.x = 1

  print(ns)
  
  process = mp.Process(target=myProcess, args=(ns,))
  process.start()
  process.join()
  print(ns) 
Example #5
Source File: mpQueue.py    From Learning-Concurrency-in-Python with MIT License 6 votes vote down vote up
def main():
  m = multiprocessing.Manager()
  sharedQueue = m.Queue()
  sharedQueue.put(2)
  sharedQueue.put(3)
  sharedQueue.put(4)

  process1 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
  process1.start()

  process2 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
  process2.start()
  
  process3 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
  process3.start()
  
  process2.join()
  process1.join()
  process3.join() 
Example #6
Source File: from_chinastock.py    From Financial-NLP with Apache License 2.0 6 votes vote down vote up
def parallel_download_all_section(*arg):
    if len(arg)==1:
        k=arg[0]
        pro=[]
        for key in decode.keys():
            pro.append(multiprocessing.Process(target=download, args=(key, k)))
            #th.append(threading.Thread(target=download, args=(key,k)))
        for p in pro:
            p.start()
        for p in pro:
            p.join()
    elif len(arg)==2:
        From=arg[0]
        To=arg[1]
        pro=[]
        for key in decode.keys():
            pro.append(multiprocessing.Process(target=download, args=(key, From, To)))
            #th.append(threading.Thread(target=download, args=(key,k)))
        for p in pro:
            p.start()
        for p in pro:
            p.join() 
Example #7
Source File: concurrentCalculation.py    From Learning-Concurrency-in-Python with MIT License 6 votes vote down vote up
def main():
  print("Starting number crunching")
  t0 = time.time()
  
  procs = []

  # Here we create our processes and kick them off
  for i in range(10):
    proc = Process(target=executeProc, args=())
    procs.append(proc)
    proc.start()

  # Again we use the .join() method in order to wait for 
  # execution to finish for all of our processes
  for proc in procs:
    proc.join()

  t1 = time.time()
  totalTime = t1 - t0
  # we print out the total execution time for our 10
  # procs.
  print("Execution Time: {}".format(totalTime)) 
Example #8
Source File: pipeline.py    From Turku-neural-parser-pipeline with Apache License 2.0 6 votes vote down vote up
def add_step(self,module_name_and_params, extra_args):
        config=module_name_and_params.split()
        module_name=config[0]
        params=config[1:]

        # collect extra arguments from command line meant for this particular module
        if extra_args is not None: 
            for _name, _value in extra_args.__dict__.items():
                if _name.startswith(module_name):
                    _modname,_argname=_name.split(".",1) # for example lemmatizer_mod.gpu
                    params.append("--"+_argname)
                    params.append(str(_value))

        mod=importlib.import_module(module_name)
        step_in=self.q_out
        self.q_out=Queue(self.max_q_size) #new pipeline end
        args=mod.argparser.parse_args(params)
        process=Process(target=mod.launch,args=(args,step_in,self.q_out))
        process.daemon=True
        process.start()
        self.processes.append(process) 
Example #9
Source File: wrappers.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, constructor):
    """Step environment in a separate process for lock free paralellism.

    The environment will be created in the external process by calling the
    specified callable. This can be an environment class, or a function
    creating the environment and potentially wrapping it. The returned
    environment should not access global variables.

    Args:
      constructor: Callable that creates and returns an OpenAI gym environment.

    Attributes:
      observation_space: The cached observation space of the environment.
      action_space: The cached action space of the environment.
    """
    self._conn, conn = multiprocessing.Pipe()
    self._process = multiprocessing.Process(
        target=self._worker, args=(constructor, conn))
    atexit.register(self.close)
    self._process.start()
    self._observ_space = None
    self._action_space = None 
Example #10
Source File: wrappers.py    From soccer-matlab with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, constructor):
    """Step environment in a separate process for lock free paralellism.

    The environment will be created in the external process by calling the
    specified callable. This can be an environment class, or a function
    creating the environment and potentially wrapping it. The returned
    environment should not access global variables.

    Args:
      constructor: Callable that creates and returns an OpenAI gym environment.

    Attributes:
      observation_space: The cached observation space of the environment.
      action_space: The cached action space of the environment.
    """
    self._conn, conn = multiprocessing.Pipe()
    self._process = multiprocessing.Process(
        target=self._worker, args=(constructor, conn))
    atexit.register(self.close)
    self._process.start()
    self._observ_space = None
    self._action_space = None 
Example #11
Source File: log_provider.py    From Paradrop with Apache License 2.0 6 votes vote down vote up
def attach(self):
        """
        Start listening for log messages.

        Log messages in the queue will appear like the following:
        {
            'service': 'main',
            'timestamp': '2017-01-30T15:46:23.009397536Z',
            'message': 'Something happened'
        }
        """
        if not self.listening:
            for service in self.chute.get_services():
                process = Process(target=monitor_logs,
                        args=(service.name, service.get_container_name(), self.queue))
                process.start()
                self.processes.append(process)
            self.listening = True 
Example #12
Source File: subproc_vec_env.py    From lirpg with MIT License 6 votes vote down vote up
def __init__(self, env_fns, spaces=None):
        """
        envs: list of gym environments to run in subprocesses
        """
        self.waiting = False
        self.closed = False
        nenvs = len(env_fns)
        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
        self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
            for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
        for p in self.ps:
            p.daemon = True # if the main process crashes, we should not cause things to hang
            p.start()
        for remote in self.work_remotes:
            remote.close()

        self.remotes[0].send(('get_spaces', None))
        observation_space, action_space = self.remotes[0].recv()
        VecEnv.__init__(self, len(env_fns), observation_space, action_space) 
Example #13
Source File: envs.py    From A2C with MIT License 6 votes vote down vote up
def __init__(self, env_fns, render_interval):
        """ Minor addition to SubprocVecEnv, automatically renders environments

        envs: list of gym environments to run in subprocesses
        """
        self.closed = False
        nenvs = len(env_fns)
        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
        self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
            for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
        for p in self.ps:
            p.daemon = True # if the main process crashes, we should not cause things to hang
            p.start()
        for remote in self.work_remotes:
            remote.close()

        self.remotes[0].send(('get_spaces', None))
        self.action_space, self.observation_space = self.remotes[0].recv()

        self.render_interval = render_interval
        self.render_timer = 0 
Example #14
Source File: invoker.py    From pywren-ibm-cloud with Apache License 2.0 6 votes vote down vote up
def _start_invoker_process(self):
        """
        Starts the invoker process responsible to spawn pending calls in background
        """
        if self.is_pywren_function or not is_unix_system():
            for inv_id in range(INVOKER_PROCESSES):
                p = Thread(target=self._run_invoker_process, args=(inv_id, ))
                self.invokers.append(p)
                p.daemon = True
                p.start()
        else:
            for inv_id in range(INVOKER_PROCESSES):
                p = Process(target=self._run_invoker_process, args=(inv_id, ))
                self.invokers.append(p)
                p.daemon = True
                p.start() 
Example #15
Source File: concurrency.py    From dataflow with Apache License 2.0 6 votes vote down vote up
def ensure_proc_terminate(proc):
    """
    Make sure processes terminate when main process exit.

    Args:
        proc (multiprocessing.Process or list)
    """
    if isinstance(proc, list):
        for p in proc:
            ensure_proc_terminate(p)
        return

    def stop_proc_by_weak_ref(ref):
        proc = ref()
        if proc is None:
            return
        if not proc.is_alive():
            return
        proc.terminate()
        proc.join()

    assert isinstance(proc, mp.Process)
    atexit.register(stop_proc_by_weak_ref, weakref.ref(proc)) 
Example #16
Source File: concurrency.py    From dataflow with Apache License 2.0 6 votes vote down vote up
def start_proc_mask_signal(proc):
    """
    Start process(es) with SIGINT ignored.

    Args:
        proc: (mp.Process or list)

    Note:
        The signal mask is only applied when called from main thread.
    """
    if not isinstance(proc, list):
        proc = [proc]

    with mask_sigint():
        for p in proc:
            if isinstance(p, mp.Process):
                if sys.version_info < (3, 4) or mp.get_start_method() == 'fork':
                    log_once("""
Starting a process with 'fork' method is efficient but not safe and may cause deadlock or crash.
Use 'forkserver' or 'spawn' method instead if you run into such issues.
See https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods on how to set them.
""".replace("\n", ""),
'warn')  # noqa
            p.start() 
Example #17
Source File: multiprocess_vector_env.py    From chainerrl with MIT License 6 votes vote down vote up
def __init__(self, env_fns):
        if np.__version__ == '1.16.0':
            warnings.warn("""
NumPy 1.16.0 can cause severe memory leak in chainerrl.envs.MultiprocessVectorEnv.
We recommend using other versions of NumPy.
See https://github.com/numpy/numpy/issues/12793 for details.
""")  # NOQA

        nenvs = len(env_fns)
        self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
        self.ps = \
            [Process(target=worker, args=(work_remote, env_fn))
             for (work_remote, env_fn) in zip(self.work_remotes, env_fns)]
        for p in self.ps:
            p.start()
        self.last_obs = [None] * self.num_envs
        self.remotes[0].send(('get_spaces', None))
        self.action_space, self.observation_space = self.remotes[0].recv()
        self.closed = False 
Example #18
Source File: zookeeper_basic.py    From Zopkio with Apache License 2.0 6 votes vote down vote up
def test_zookeeper_process_tracking():
  """
  Tests if process register node correctly with zookeeper and zookeeper deletes it when process terminates
  """
  #Wait for zookeeper to start so that kazoo client can connect correctly
  time.sleep(5)
  #"connecting to esnure /my/zookeeper_test"

  kazoo_connection_url = str(runtime.get_active_config('zookeeper_host') + ':2181')
  zkclient = KazooClient(hosts=kazoo_connection_url)

  zkclient.start()

  zkclient.ensure_path("/my/zookeeper_test")
  #spawn a python multiprocess which creates an ephermeral node
  #once the process ends the node will be deleted.
  p = Process(target=zookeeper_ephemeral_node, args=("process1",))
  p.start()
  zkclient.stop() 
Example #19
Source File: zookeeper_cluster_tests.py    From Zopkio with Apache License 2.0 6 votes vote down vote up
def test_zookeeper_process_tracking():
  """
  Tests if process register node correctly with zookeeper and zookeeper deletes it when process terminates
  """
  #Wait for zookeeper to start so that kazoo client can connect correctly
  time.sleep(5)
  #"connecting to esnure /my/zookeeper_test"

  kazoo_connection_url = str(runtime.get_active_config('zookeeper_host') + ':2181')
  zkclient = KazooClient(hosts=kazoo_connection_url)

  zkclient.start()

  zkclient.ensure_path("/my/zookeeper_test")
  #spawn a python multiprocess which creates an ephermeral node
  #once the process ends the node will be deleted.
  p = Process(target=zookeeper_ephemeral_node, args=("process1",))
  p.start()
  zkclient.stop() 
Example #20
Source File: zookeeper_ztestsuite_example.py    From Zopkio with Apache License 2.0 6 votes vote down vote up
def test(self):
    """
    Tests if process register node correctly with zookeeper and zookeeper deletes it when process terminates
    """
    #Wait for zookeeper to start so that kazoo client can connect correctly
    time.sleep(5)
    #"connecting to esnure /my/zookeeper_test"

    kazoo_connection_url = str(runtime.get_active_config('zookeeper_host') + ':2181')
    zkclient = KazooClient(hosts=kazoo_connection_url)

    zkclient.start()

    zkclient.ensure_path("/my/zookeeper_test")
    #spawn a python multiprocess which creates an ephermeral node
    #once the process ends the node will be deleted.
    p = Process(target=zookeeper_ephemeral_node, args=("process1",))
    p.start()
    zkclient.stop() 
Example #21
Source File: hci.py    From bluescan with GNU General Public License v3.0 6 votes vote down vote up
def scan_undiscoverable_dev(cls):
        import multiprocessing

        #hci_read_page_timeout()
        #hci_write_page_timeout(0x0200) # 0x0500 较稳定
        #hci_read_page_timeout()

        # p1 = multiprocessing.Process(target=job,args=(1,2))

        # range(, )

        #hci_create_connection('3C:28:6D:E0:58:F7')
        for i in range(0x000000, 0x100000):
            addr = '3c:28:6d:'+':'.join('%02x'%b for b in i.to_bytes(3, 'big', signed=False))
            print(addr)
            #print('HCI connect', addr)
            #status, bdaddr = hci_create_connection(addr)
            #hci_create_connection(addr)
            time.sleep(0.5)
            # if status == 0:
            #     print(status, bdaddr) 
Example #22
Source File: it_multiprocess.py    From sanic-prometheus with MIT License 6 votes vote down vote up
def test_metrics_are_aggregated_between_workers(self):
        p = Process(target=launch_server)
        self._procs.append(p)
        p.start()
        sleep(1)

        for _ in range(100):
            r = request.urlopen("http://localhost:{}/test".format(TEST_PORT))
            _ = r.read()

        r = request.urlopen("http://localhost:{}/metrics".format(TEST_PORT))
        nreqs = None
        for l in r.readlines():
            l = l.decode('ascii')
            m = re.match(r"^sanic_request_count_total\{.+\}\s+(\d+)\s*", l)
            if m:
                nreqs = int(m.group(1))
                break
        self.assertIsNotNone(nreqs)
        self.assertEqual(nreqs, 100) 
Example #23
Source File: inputs.py    From inputs with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def handle_input(self, ncode, wparam, lparam):
        """Process the key input."""
        value = WIN_KEYBOARD_CODES[wparam]
        scan_code = lparam.contents.scan_code
        vk_code = lparam.contents.vk_code
        self.update_timeval()

        events = []
        # Add key event
        scan_key, key_event = self.emulate_press(
            vk_code, scan_code, value, self.timeval)
        events.append(scan_key)
        events.append(key_event)

        # End with a sync marker
        events.append(self.sync_marker(self.timeval))

        # We are done
        self.write_to_pipe(events)

        return ctypes.windll.user32.CallNextHookEx(
            self.hooked, ncode, wparam, lparam) 
Example #24
Source File: inputs.py    From inputs with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def handle_input(self, ncode, wparam, lparam):
        """Process the key input."""
        x_pos = lparam.contents.x_pos
        y_pos = lparam.contents.y_pos
        data = lparam.contents.mousedata

        # This is how we can distinguish mouse 1 from mouse 2
        # extrainfo = lparam.contents.extrainfo
        # The way windows seems to do it is there is primary mouse
        # and all other mouses report as mouse 2

        # Also useful later will be to support the flags field
        # flags = lparam.contents.flags
        # This shows if the event was from a real device or whether it
        # was injected somehow via software

        self.emulate_mouse(wparam, x_pos, y_pos, data)

        # Give back control to Windows to wait for and process the
        # next event
        return ctypes.windll.user32.CallNextHookEx(
            self.hooked, ncode, wparam, lparam) 
Example #25
Source File: inputs.py    From inputs with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def handle_input(self, event):
        """Process the mouse event."""
        self.update_timeval()
        self.events = []
        code = self._get_event_type(event)

        # Deal with buttons
        self.handle_button(event, code)

        # Mouse wheel
        if code == 22:
            self.handle_scrollwheel(event)
        # Other relative mouse movements
        else:
            self.handle_relative(event)

        # Add in the absolute position of the mouse cursor
        self.handle_absolute(event)

        # End with a sync marker
        self.events.append(self.sync_marker(self.timeval))

        # We are done
        self.write_to_pipe(self.events) 
Example #26
Source File: twitch-viewer.py    From twitch-viewer with Apache License 2.0 6 votes vote down vote up
def prepare_processes():
    global processes
    proxies = get_proxies()
    n = 0

    if len(proxies) < 1:
        print "An error has occurred while preparing the process: Not enough proxy servers. Need at least 1 to function."
        sys.exit(1)

    for proxy in proxies:
        # Preparing the process and giving it its own proxy
        processes.append(
            multiprocessing.Process(
                target=open_url, kwargs={
                    "url": get_url(), "proxy": {
                        "http": proxy}}))

        print '.',

    print '' 
Example #27
Source File: async_.py    From chainerrl with MIT License 5 votes vote down vote up
def run_async(n_process, run_func):
    """Run experiments asynchronously.

    Args:
      n_process (int): number of processes
      run_func: function that will be run in parallel
    """

    processes = []

    def set_seed_and_run(process_idx, run_func):
        random_seed.set_random_seed(np.random.randint(0, 2 ** 32))
        run_func(process_idx)

    for process_idx in range(n_process):
        processes.append(mp.Process(target=set_seed_and_run, args=(
            process_idx, run_func)))

    for p in processes:
        p.start()

    for process_idx, p in enumerate(processes):
        p.join()
        if p.exitcode > 0:
            warnings.warn(
                "Process #{} (pid={}) exited with nonzero status {}".format(
                    process_idx, p.pid, p.exitcode),
                category=AbnormalExitWarning,
            )
        elif p.exitcode < 0:
            warnings.warn(
                "Process #{} (pid={}) was terminated by signal {}".format(
                    process_idx, p.pid, -p.exitcode),
                category=AbnormalExitWarning,
            ) 
Example #28
Source File: train.py    From cat-bbs with MIT License 5 votes vote down vote up
def __init__(self, augseq, queue_source, nb_workers, queue_size=50, threaded=False):
        assert 0 < queue_size <= 10000
        self.augseq = augseq
        self.queue_source = queue_source
        self.queue_result = multiprocessing.Queue(queue_size)
        self.workers = []
        for i in range(nb_workers):
            augseq.reseed()
            if threaded:
                worker = threading.Thread(target=self._augment_images_worker, args=(self.augseq, self.queue_source, self.queue_result))
            else:
                worker = multiprocessing.Process(target=self._augment_images_worker, args=(self.augseq, self.queue_source, self.queue_result))
            worker.daemon = True
            worker.start()
            self.workers.append(worker) 
Example #29
Source File: webui.py    From MPContribs with MIT License 5 votes vote down vote up
def stop_processes():
    global processes
    for process_name in processes.keys():
        if processes[process_name]:
            if process_name != "MongodProcess":
                processes[process_name].terminate()
                time.sleep(1)
            processes[process_name] = None
    parent = psutil.Process(os.getpid())
    for child in parent.children(recursive=True):
        if child.name() == "mongod":
            child.kill()
            print("killed mongod") 
Example #30
Source File: worker.py    From ppo-lstm-parallel with MIT License 5 votes vote down vote up
def init_workers(self):
        for i in range(self.num_gather_workers):
            rollout_size = self.config["rollout_size"] // self.num_gather_workers
            t = Process(target=make_worker, args=(i, self.env_producer,
                                                  self.worker_queue,
                                                  self.weights_queues[i],
                                                  rollout_size))
            t.start()