Python multiprocessing.Manager() Examples

The following are 30 code examples of multiprocessing.Manager(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module multiprocessing , or try the search function .
Example #1
Source File: download_samples.py    From gym-malware with MIT License 9 votes vote down vote up
def use_virustotal(args):
    """
    Use Virustotal to download the environment malware
    """
    m = multiprocessing.Manager()
    download_queue = m.JoinableQueue(args.nconcurrent)

    archive_procs = [
        multiprocessing.Process(
            target=download_worker_function,
            args=(download_queue, args.vtapikey))
        for i in range(args.nconcurrent)
    ]
    for w in archive_procs:
        w.start()

    for row in get_sample_hashes():
        download_queue.put(row["sha256"])

    for i in range(args.narchiveprocs):
        download_queue.put("STOP")

    download_queue.join()
    for w in archive_procs:
        w.join() 
Example #2
Source File: test_shared_mem_store.py    From dgl with Apache License 2.0 7 votes vote down vote up
def test_init():
    manager = Manager()
    return_dict = manager.dict()

    # make server init before worker
    server_init = Value('i', False)
    serv_p = Process(target=server_func, args=(2, 'test_graph1', server_init))
    serv_p.start()
    while server_init.value == 0:
      time.sleep(1)
    work_p1 = Process(target=check_init_func, args=(0, 'test_graph1', return_dict))
    work_p2 = Process(target=check_init_func, args=(1, 'test_graph1', return_dict))
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id 
Example #3
Source File: utils.py    From pySocialWatcher with MIT License 7 votes vote down vote up
def trigger_request_process_and_return_response(rows_to_request):
    process_manager = Manager()
    shared_queue = process_manager.Queue()
    shared_queue_list = []
    list_process = []

    # Trigger Process in rows
    for index, row in rows_to_request.iterrows():
        token, account = get_token_and_account_number_or_wait()
        p = Process(target=trigger_facebook_call, args=(index, row, token, account, shared_queue))
        list_process.append(p)

    # Starting process
    map(lambda p: p.start(), list_process)
    # Stop process
    map(lambda p: p.join(), list_process)
    #Check for Exception
    map(lambda p: check_exception(p), list_process)

    # Put things from shared list to normal list
    while shared_queue.qsize() != 0:
        shared_queue_list.append(shared_queue.get())
    return shared_queue_list 
Example #4
Source File: test.py    From ACE with Apache License 2.0 6 votes vote down vote up
def initialize_unittest_logging():
    # ACE is multi-process multi-threaded
    # so we use this special logging mechanism to keep a central repository of the log events generated
    # that the original process can access

    global test_log_manager
    global test_log_sync
    global test_log_messages
    global memory_log_handler

    test_log_manager = Manager()
    atexit.register(_atexit_callback)
    test_log_sync = RLock()
    test_log_messages = test_log_manager.list()

    log_format = logging.Formatter(datefmt='%(asctime)s')

    memory_log_handler = MemoryLogHandler()
    memory_log_handler.setLevel(logging.DEBUG)
    memory_log_handler.setFormatter(log_format)
    logging.getLogger().addHandler(memory_log_handler) 
Example #5
Source File: translate.py    From flores with Creative Commons Attribution Share Alike 4.0 International 6 votes vote down vote up
def translate_files_local(args, cmds):
    m = mp.Manager()
    gpu_queue = m.Queue()
    for i in args.cuda_visible_device_ids:
        gpu_queue.put(i)
    with mp.Pool(processes=len(args.cuda_visible_device_ids)) as pool:
        for _ in tqdm.tqdm(pool.imap_unordered(translate, [(gpu_queue, cmd) for cmd in cmds]), total=len(cmds)):
            pass 
Example #6
Source File: perftest_endpoint.py    From sagemaker-tensorflow-serving-container with Apache License 2.0 6 votes vote down vote up
def test(self, args, count, test_data):
        self.count = args.count * args.workers
        self.payload_kb = len(test_data[1]) / 1024.0

        manager = multiprocessing.Manager()
        error_counts = manager.dict()
        workers = []
        for i in range(args.workers):
            error_counts[i] = 0
            w = multiprocessing.Process(target=self.test_worker,
                                        args=(i, args, count, test_data, error_counts))
            workers.append(w)

        self.start_time = time.time()
        for w in workers:
            w.start()

        for w in workers:
            w.join()

        self.errors = sum(error_counts.values())
        self.end_time = time.time() 
Example #7
Source File: mpQueue.py    From Learning-Concurrency-in-Python with MIT License 6 votes vote down vote up
def main():
  m = multiprocessing.Manager()
  sharedQueue = m.Queue()
  sharedQueue.put(2)
  sharedQueue.put(3)
  sharedQueue.put(4)

  process1 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
  process1.start()

  process2 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
  process2.start()
  
  process3 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
  process3.start()
  
  process2.join()
  process1.join()
  process3.join() 
Example #8
Source File: parallel.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def add_cmd_tasks(cmd_task_list, identifier=None, stdin_error_lock=mp.Manager().Lock()):
		"""
			Run several command line commands in parallel.

			@attention: use the Manager to get the lock as in this function definition !!!

			@type cmd_task_list: list of TaskCmd
			@param stdin_error_lock: acquiring the lock enables writing to the stdout and stderr

			@return: list of failed commands, dictionary (cmd, task process)
		"""
		assert isinstance(cmd_task_list, list)

		thread_task_list = []
		for cmdTask in cmd_task_list:
			assert isinstance(cmdTask, TaskCmd)
			thread_task_list.append(TaskThread(_runCmd, (cmdTask, stdin_error_lock)))

		return AsyncParallel.add_tasks(thread_task_list, identifier) 
Example #9
Source File: manager.py    From Learning-Concurrency-in-Python with MIT License 6 votes vote down vote up
def main():
  manager = mp.Manager()
  ns = manager.Namespace()
  ns.x = 1

  print(ns)
  
  process = mp.Process(target=myProcess, args=(ns,))
  process.start()
  process.join()
  print(ns) 
Example #10
Source File: main.py    From Playlist-Length with MIT License 6 votes vote down vote up
def main():
    try:
        parser = get_parser()
        args = parser.parse_args()
        if args.media_type == 'both':
            args.media_type = 'audio/video'
        globals()['media_type'] = REGEX_MAP[args.media_type]
        cache_ob = CacheUtil(args.path, args.media_type)
        manager = multiprocessing.Manager()
        queue = manager.Queue()
        consumer = multiprocessing.Process(target=store_in_cache, args=(queue, cache_ob))
        consumer.start()
        result = calculate_length(
            args.path, args.no_subdir, args.media_type, queue, cache_ob
        )
        consumer.join()
    except KeyboardInterrupt:
        sys.stdout.write('\nPlease wait... exiting gracefully!\n')
    else:
        sys.stdout.write('\n{}\n\n'.format(result))
    finally:
        sys.exit() 
Example #11
Source File: kubernetes_executor.py    From airflow with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 kube_config: Any,
                 task_queue: 'Queue[KubernetesJobType]',
                 result_queue: 'Queue[KubernetesResultsType]',
                 kube_client: client.CoreV1Api,
                 worker_uuid: str):
        super().__init__()
        self.log.debug("Creating Kubernetes executor")
        self.kube_config = kube_config
        self.task_queue = task_queue
        self.result_queue = result_queue
        self.namespace = self.kube_config.kube_namespace
        self.log.debug("Kubernetes using namespace %s", self.namespace)
        self.kube_client = kube_client
        self.launcher = PodLauncher(kube_client=self.kube_client)
        self.worker_configuration_pod = WorkerConfiguration(kube_config=self.kube_config).as_pod()
        self._manager = multiprocessing.Manager()
        self.watcher_queue = self._manager.Queue()
        self.worker_uuid = worker_uuid
        self.kube_watcher = self._make_kube_watcher() 
Example #12
Source File: ModuleDnsSpoof.py    From 3vilTwinAttacker with MIT License 6 votes vote down vote up
def scanner_network(self,gateway):
        scan = ''
        config_gateway = gateway.split('.')
        del config_gateway[-1]
        for i in config_gateway:
            scan += str(i) + '.'
        gateway = scan
        ranger = str(self.ip_range.text()).split('-')
        jobs = []
        manager = Manager()
        on_ips = manager.dict()
        for n in xrange(int(ranger[0]),int(ranger[1])):
            ip='%s{0}'.format(n)%(gateway)
            p = Process(target=self.working,args=(ip,on_ips))
            jobs.append(p)
            p.start()
        for i in jobs: i.join()
        for i in on_ips.values():
            Headers = []
            n = i.split('|')
            self.data['IPaddress'].append(n[0])
            self.data['MacAddress'].append(n[1])
            self.data['Hostname'].append('<unknown>')
            for n, key in enumerate(reversed(self.data.keys())):
                Headers.append(key)
                for m, item in enumerate(self.data[key]):
                    item = QTableWidgetItem(item)
                    item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
                    self.tables.setItem(m, n, item)
        Headers = []
        for key in reversed(self.data.keys()):
            Headers.append(key)
        self.tables.setHorizontalHeaderLabels(Headers) 
Example #13
Source File: text_summary_merge.py    From nlg-yongzhuo with MIT License 6 votes vote down vote up
def summary_multi_preprocess(doc, num=None, fs=[text_pronouns, text_teaser, mmr, text_rank, lead3, lda, lsi, nmf]):
    """
        len(fs) 个进程
    :param doc: str
    :return: list
    """
    manager = Manager()
    return_dict = manager.dict()
    jobs = []
    for i in range(len(fs)):
        p = Process(target=worker, args=(i, doc, num, fs, return_dict))
        jobs.append(p)
        p.start()
    for proc in jobs:
        proc.join()
    return list(return_dict.values()) 
Example #14
Source File: multiprocess.py    From Computable with MIT License 6 votes vote down vote up
def _import_mp():
    global Process, Queue, Pool, Event, Value, Array
    try:
        from multiprocessing import Manager, Process
        #prevent the server process created in the manager which holds Python 
        #objects and allows other processes to manipulate them using proxies
        #to interrupt on SIGINT (keyboardinterrupt) so that the communication
        #channel between subprocesses and main process is still usable after
        #ctrl+C is received in the main process.
        old=signal.signal(signal.SIGINT, signal.SIG_IGN)
        m = Manager()
        #reset it back so main process will receive a KeyboardInterrupt
        #exception on ctrl+c
        signal.signal(signal.SIGINT, old)
        Queue, Pool, Event, Value, Array = (
                m.Queue, m.Pool, m.Event, m.Value, m.Array
        )
    except ImportError:
        warn("multiprocessing module is not available, multiprocess plugin "
             "cannot be used", RuntimeWarning) 
Example #15
Source File: event_loop.py    From AMS with Apache License 2.0 6 votes vote down vote up
def __init__(self, _id):
        self.manager = Manager()

        self.event_loop_id = _id
        self.target = Target.new_target(self.event_loop_id, self.__class__.__name__)
        self.__subscribers = {}
        self.__subscribers_lock = self.manager.Lock()
        self.__publishers = {}
        self.__client = None
        self.__main_loop = None
        self.__pid = os.getpid()

        self.__topicPub = Topic()
        self.__topicPub.set_targets(Target.new_target(self.event_loop_id, EventLoop.__name__))
        self.__topicPub.set_categories(EVENT_LOOP.TOPIC.CATEGORIES.RESPONSE)

        self.__topicSub = Topic()
        self.__topicSub.set_targets(None, Target.new_target(self.event_loop_id, EventLoop.__name__))
        self.__topicSub.set_categories(EVENT_LOOP.TOPIC.CATEGORIES.REQUEST)
        self.__topicSub.set_message(EventLoopMessage)
        self.set_subscriber(self.__topicSub, self.on_event_loop_message)

        self.__user_data = None
        self.__user_will = None 
Example #16
Source File: multiprocess.py    From locality-sensitive-hashing with MIT License 6 votes vote down vote up
def _import_mp():
    global Process, Queue, Pool, Event, Value, Array
    try:
        from multiprocessing import Manager, Process
        #prevent the server process created in the manager which holds Python 
        #objects and allows other processes to manipulate them using proxies
        #to interrupt on SIGINT (keyboardinterrupt) so that the communication
        #channel between subprocesses and main process is still usable after
        #ctrl+C is received in the main process.
        old=signal.signal(signal.SIGINT, signal.SIG_IGN)
        m = Manager()
        #reset it back so main process will receive a KeyboardInterrupt
        #exception on ctrl+c
        signal.signal(signal.SIGINT, old)
        Queue, Pool, Event, Value, Array = (
                m.Queue, m.Pool, m.Event, m.Value, m.Array
        )
    except ImportError:
        warn("multiprocessing module is not available, multiprocess plugin "
             "cannot be used", RuntimeWarning) 
Example #17
Source File: base_profiler.py    From vprof with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def run_in_separate_process(func, *args, **kwargs):
    """Runs function in separate process.

    This function is used instead of a decorator, since Python multiprocessing
    module can't serialize decorated function on all platforms.
    """
    manager = multiprocessing.Manager()
    manager_dict = manager.dict()
    process = ProcessWithException(
        manager_dict, target=func, args=args, kwargs=kwargs)
    process.start()
    process.join()
    exc = process.exception
    if exc:
        raise exc
    return process.output 
Example #18
Source File: test_shared_mem_store.py    From dgl with Apache License 2.0 6 votes vote down vote up
def test_compute():
    manager = Manager()
    return_dict = manager.dict()

    # make server init before worker
    server_init = Value('i', 0)
    serv_p = Process(target=server_func, args=(2, 'test_graph3', server_init))
    serv_p.start()
    while server_init.value == 0:
      time.sleep(1)
    work_p1 = Process(target=check_compute_func, args=(0, 'test_graph3', return_dict))
    work_p2 = Process(target=check_compute_func, args=(1, 'test_graph3', return_dict))
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id 
Example #19
Source File: test_shared_mem_store.py    From dgl with Apache License 2.0 6 votes vote down vote up
def test_sync_barrier():
    manager = Manager()
    return_dict = manager.dict()

    # make server init before worker
    server_init = Value('i', 0)
    serv_p = Process(target=server_func, args=(2, 'test_graph4', server_init))
    serv_p.start()
    while server_init.value == 0:
      time.sleep(1)
    work_p1 = Process(target=check_sync_barrier, args=(0, 'test_graph4', return_dict))
    work_p2 = Process(target=check_sync_barrier, args=(1, 'test_graph4', return_dict))
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id 
Example #20
Source File: test_multiprocessing.py    From ironpython2 with Apache License 2.0 6 votes vote down vote up
def test_answer_challenge_auth_failure(self):
        class _FakeConnection(object):
            def __init__(self):
                self.count = 0
            def recv_bytes(self, size):
                self.count += 1
                if self.count == 1:
                    return multiprocessing.connection.CHALLENGE
                elif self.count == 2:
                    return b'something bogus'
                return b''
            def send_bytes(self, data):
                pass
        self.assertRaises(multiprocessing.AuthenticationError,
                          multiprocessing.connection.answer_challenge,
                          _FakeConnection(), b'abc')

#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
# 
Example #21
Source File: plugintest.py    From locality-sensitive-hashing with MIT License 5 votes vote down vote up
def __init__(self):
        # per advice at:
        #    http://docs.python.org/library/multiprocessing.html#all-platforms
        self.__master = getpid()
        self.__queue = Manager().Queue()
        self.__buffer = StringIO()
        self.softspace = 0 
Example #22
Source File: mBot.py    From python-for-mbot with GNU General Public License v2.0 5 votes vote down vote up
def start(self):
        self.manager = Manager()
        self.dict = self.manager.dict()
        self.dict.device = hid.device()
        self.dict.device.open(0x0416, 0xffff)
        #self.dict.device.hid_set_nonblocking(self.device,1)
        self.buffer = []
        self.bufferIndex = 0 
Example #23
Source File: mBot.py    From python-for-mbot with GNU General Public License v2.0 5 votes vote down vote up
def __init__(self):
        signal.signal(signal.SIGINT, self.exit)
        self.manager = Manager()
        self.__selectors = self.manager.dict()
        self.buffer = []
        self.bufferIndex = 0
        self.isParseStart = False
        self.exiting = False
        self.isParseStartIndex = 0 
Example #24
Source File: local_executor.py    From airflow with Apache License 2.0 5 votes vote down vote up
def start(self) -> None:
        """Starts the executor"""
        self.manager = Manager()
        self.result_queue = self.manager.Queue()
        self.workers = []
        self.workers_used = 0
        self.workers_active = 0
        self.impl = (LocalExecutor.UnlimitedParallelism(self) if self.parallelism == 0
                     else LocalExecutor.LimitedParallelism(self))

        self.impl.start() 
Example #25
Source File: dataset.py    From calamari with Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 mode: DataSetMode,
                 args: dict,
                 ):
        """ Create a dataset from memory
        Since this dataset already contains all data in the memory, this dataset may not be loaded
        Parameters
        ----------
        """
        super().__init__(mode)

        self.loaded = False
        self.lines_per_epoch = 10000
        self._samples = [{'id': '{}'.format(i)} for i in range(self.lines_per_epoch)]
        self.text_generator_params = args.get('text_generator_params', TextGeneratorParameters())
        self.line_generator_params = args.get('line_generator_params', LineGeneratorParameters())
        self.manager = Manager()
        self.data_queue = self.manager.Queue(50)
        self.data_generators = [
            LineGeneratorProcess(
                self.data_queue,
                self.text_generator_params,
                self.line_generator_params,
                "{}".format(i),
            ) for i in range(8)
        ]
        for d in self.data_generators:
            d.start() 
Example #26
Source File: Main.py    From python-in-practice with GNU General Public License v3.0 5 votes vote down vote up
def create_variables(self):
        settings = TkUtil.Settings.Data
        self.sourceText = tk.StringVar()
        self.targetText = tk.StringVar()
        self.statusText = tk.StringVar()
        self.statusText.set("Choose or enter folders, then click Scale...")
        self.dimensionText = tk.StringVar()
        self.restore = settings.get_bool(GENERAL, RESTORE, True)
        self.total = self.copied = self.scaled = 0
        self.worker = None
        self.state = multiprocessing.Manager().Value("i", IDLE) 
Example #27
Source File: test_bmuf.py    From fairseq with MIT License 5 votes vote down vote up
def bmuf_process(self, args, iterations):
        processes = []
        results = Manager().dict()
        ctx = torch.multiprocessing.get_context("spawn")
        for rank in range(args.distributed_world_size):
            p = ctx.Process(
                target=single_gpu_training, args=(args, rank, iterations, results)
            )
            p.start()
            processes.append(p)

        for p in processes:
            p.join()
        return results 
Example #28
Source File: analysis.py    From ACE with Apache License 2.0 5 votes vote down vote up
def _start_io_tracker():
    import multiprocessing

    global _track_io
    global _io_tracker_manager
    global _io_tracker_sync
    global _write_count
    global _read_count

    _io_tracker_manager = multiprocessing.Manager()
    _io_tracker_sync = multiprocessing.RLock()
    _write_count = _io_tracker_manager.Value('I', 0, lock=False)
    _read_count = _io_tracker_manager.Value('I', 0, lock=False)
    _track_io = True 
Example #29
Source File: extract_feats.py    From speaker_extraction with GNU General Public License v3.0 5 votes vote down vote up
def main(unused_argv):
    print('Extract starts ...')
    print(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))

    if not os.path.exists(os.path.join(FLAGS.output_dir, FLAGS.data_type)):
        os.makedirs(os.path.join(FLAGS.output_dir, FLAGS.data_type))

    lists = open(FLAGS.list_path).readlines()

    # check whether the cmvn file for training exist, remove if exist.
    if os.path.exists(FLAGS.inputs_cmvn):
        os.remove(FLAGS.inputs_cmvn)
    if os.path.exists(FLAGS.inputs_cmvn.replace('cmvn', 'cmvn_aux')):
        os.remove(FLAGS.inputs_cmvn.replace('cmvn', 'cmvn_aux'))

    mean_var_dict = multiprocessing.Manager().dict()
    mean_var_dict_aux = multiprocessing.Manager().dict()
    pool = multiprocessing.Pool(FLAGS.num_threads)
    workers = []
    for item in lists:
        workers.append(pool.apply_async(extract_mag_feats(item, mean_var_dict, mean_var_dict_aux)))
    pool.close()
    pool.join()

    # convert the utterance level intermediates for mean and var to global mean and std, then save
    cal_global_mean_std(FLAGS.inputs_cmvn, mean_var_dict.values())
    cal_global_mean_std(FLAGS.inputs_cmvn.replace('cmvn', 'cmvn_aux'), mean_var_dict_aux.values())
    
    print(time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))
    print('Extract ends.') 
Example #30
Source File: bcf.py    From nightmare with GNU General Public License v2.0 5 votes vote down vote up
def record_metrics(self, input_file):    
    mgr = Manager()
    metrics_data = mgr.list()

    procs = []
    for i in range(self.metrics):
      p = Process(target=self.record_metric, args=(input_file, metrics_data))
      p.start()
      procs.append(p)

      if len(procs) >= self.procs:
        for p in procs:
          p.join()
        procs = []
    
    for p in procs:
      p.join()

    l = set()
    for metric in metrics_data:
      if self.non_uniques:
        l.add(metric.bbs)
      else:
        l.add(metric.unique_bbs)

    self.stats["min"] = min(l)
    self.stats["max"] = max(l)
    self.stats["avg"] = reduce(lambda x, y: x + y, l) / float(len(l))

    self.original_stats = dict(self.stats)
    
    self.print_statistics()