Python multiprocessing.Manager() Examples

The following are 30 code examples for showing how to use multiprocessing.Manager(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module multiprocessing , or try the search function .

Example 1
Project: gym-malware   Author: endgameinc   File: download_samples.py    License: MIT License 7 votes vote down vote up
def use_virustotal(args):
    """
    Use Virustotal to download the environment malware
    """
    m = multiprocessing.Manager()
    download_queue = m.JoinableQueue(args.nconcurrent)

    archive_procs = [
        multiprocessing.Process(
            target=download_worker_function,
            args=(download_queue, args.vtapikey))
        for i in range(args.nconcurrent)
    ]
    for w in archive_procs:
        w.start()

    for row in get_sample_hashes():
        download_queue.put(row["sha256"])

    for i in range(args.narchiveprocs):
        download_queue.put("STOP")

    download_queue.join()
    for w in archive_procs:
        w.join() 
Example 2
Project: dgl   Author: dmlc   File: test_shared_mem_store.py    License: Apache License 2.0 7 votes vote down vote up
def test_init():
    manager = Manager()
    return_dict = manager.dict()

    # make server init before worker
    server_init = Value('i', False)
    serv_p = Process(target=server_func, args=(2, 'test_graph1', server_init))
    serv_p.start()
    while server_init.value == 0:
      time.sleep(1)
    work_p1 = Process(target=check_init_func, args=(0, 'test_graph1', return_dict))
    work_p2 = Process(target=check_init_func, args=(1, 'test_graph1', return_dict))
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id 
Example 3
Project: Learning-Concurrency-in-Python   Author: PacktPublishing   File: mpQueue.py    License: MIT License 6 votes vote down vote up
def main():
  m = multiprocessing.Manager()
  sharedQueue = m.Queue()
  sharedQueue.put(2)
  sharedQueue.put(3)
  sharedQueue.put(4)

  process1 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
  process1.start()

  process2 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
  process2.start()
  
  process3 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
  process3.start()
  
  process2.join()
  process1.join()
  process3.join() 
Example 4
Project: CAMISIM   Author: CAMI-challenge   File: parallel.py    License: Apache License 2.0 6 votes vote down vote up
def add_cmd_tasks(cmd_task_list, identifier=None, stdin_error_lock=mp.Manager().Lock()):
		"""
			Run several command line commands in parallel.

			@attention: use the Manager to get the lock as in this function definition !!!

			@type cmd_task_list: list of TaskCmd
			@param stdin_error_lock: acquiring the lock enables writing to the stdout and stderr

			@return: list of failed commands, dictionary (cmd, task process)
		"""
		assert isinstance(cmd_task_list, list)

		thread_task_list = []
		for cmdTask in cmd_task_list:
			assert isinstance(cmdTask, TaskCmd)
			thread_task_list.append(TaskThread(_runCmd, (cmdTask, stdin_error_lock)))

		return AsyncParallel.add_tasks(thread_task_list, identifier) 
Example 5
Project: pySocialWatcher   Author: maraujo   File: utils.py    License: MIT License 6 votes vote down vote up
def trigger_request_process_and_return_response(rows_to_request):
    process_manager = Manager()
    shared_queue = process_manager.Queue()
    shared_queue_list = []
    list_process = []

    # Trigger Process in rows
    for index, row in rows_to_request.iterrows():
        token, account = get_token_and_account_number_or_wait()
        p = Process(target=trigger_facebook_call, args=(index, row, token, account, shared_queue))
        list_process.append(p)

    # Starting process
    map(lambda p: p.start(), list_process)
    # Stop process
    map(lambda p: p.join(), list_process)
    #Check for Exception
    map(lambda p: check_exception(p), list_process)

    # Put things from shared list to normal list
    while shared_queue.qsize() != 0:
        shared_queue_list.append(shared_queue.get())
    return shared_queue_list 
Example 6
Project: vprof   Author: nvdv   File: base_profiler.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def run_in_separate_process(func, *args, **kwargs):
    """Runs function in separate process.

    This function is used instead of a decorator, since Python multiprocessing
    module can't serialize decorated function on all platforms.
    """
    manager = multiprocessing.Manager()
    manager_dict = manager.dict()
    process = ProcessWithException(
        manager_dict, target=func, args=args, kwargs=kwargs)
    process.start()
    process.join()
    exc = process.exception
    if exc:
        raise exc
    return process.output 
Example 7
Project: dgl   Author: dmlc   File: test_shared_mem_store.py    License: Apache License 2.0 6 votes vote down vote up
def test_compute():
    manager = Manager()
    return_dict = manager.dict()

    # make server init before worker
    server_init = Value('i', 0)
    serv_p = Process(target=server_func, args=(2, 'test_graph3', server_init))
    serv_p.start()
    while server_init.value == 0:
      time.sleep(1)
    work_p1 = Process(target=check_compute_func, args=(0, 'test_graph3', return_dict))
    work_p2 = Process(target=check_compute_func, args=(1, 'test_graph3', return_dict))
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id 
Example 8
Project: dgl   Author: dmlc   File: test_shared_mem_store.py    License: Apache License 2.0 6 votes vote down vote up
def test_sync_barrier():
    manager = Manager()
    return_dict = manager.dict()

    # make server init before worker
    server_init = Value('i', 0)
    serv_p = Process(target=server_func, args=(2, 'test_graph4', server_init))
    serv_p.start()
    while server_init.value == 0:
      time.sleep(1)
    work_p1 = Process(target=check_sync_barrier, args=(0, 'test_graph4', return_dict))
    work_p2 = Process(target=check_sync_barrier, args=(1, 'test_graph4', return_dict))
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id 
Example 9
Project: ironpython2   Author: IronLanguages   File: test_multiprocessing.py    License: Apache License 2.0 6 votes vote down vote up
def test_answer_challenge_auth_failure(self):
        class _FakeConnection(object):
            def __init__(self):
                self.count = 0
            def recv_bytes(self, size):
                self.count += 1
                if self.count == 1:
                    return multiprocessing.connection.CHALLENGE
                elif self.count == 2:
                    return b'something bogus'
                return b''
            def send_bytes(self, data):
                pass
        self.assertRaises(multiprocessing.AuthenticationError,
                          multiprocessing.connection.answer_challenge,
                          _FakeConnection(), b'abc')

#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
# 
Example 10
Project: ACE   Author: IntegralDefense   File: test.py    License: Apache License 2.0 6 votes vote down vote up
def initialize_unittest_logging():
    # ACE is multi-process multi-threaded
    # so we use this special logging mechanism to keep a central repository of the log events generated
    # that the original process can access

    global test_log_manager
    global test_log_sync
    global test_log_messages
    global memory_log_handler

    test_log_manager = Manager()
    atexit.register(_atexit_callback)
    test_log_sync = RLock()
    test_log_messages = test_log_manager.list()

    log_format = logging.Formatter(datefmt='%(asctime)s')

    memory_log_handler = MemoryLogHandler()
    memory_log_handler.setLevel(logging.DEBUG)
    memory_log_handler.setFormatter(log_format)
    logging.getLogger().addHandler(memory_log_handler) 
Example 11
Project: locality-sensitive-hashing   Author: singhj   File: multiprocess.py    License: MIT License 6 votes vote down vote up
def _import_mp():
    global Process, Queue, Pool, Event, Value, Array
    try:
        from multiprocessing import Manager, Process
        #prevent the server process created in the manager which holds Python 
        #objects and allows other processes to manipulate them using proxies
        #to interrupt on SIGINT (keyboardinterrupt) so that the communication
        #channel between subprocesses and main process is still usable after
        #ctrl+C is received in the main process.
        old=signal.signal(signal.SIGINT, signal.SIG_IGN)
        m = Manager()
        #reset it back so main process will receive a KeyboardInterrupt
        #exception on ctrl+c
        signal.signal(signal.SIGINT, old)
        Queue, Pool, Event, Value, Array = (
                m.Queue, m.Pool, m.Event, m.Value, m.Array
        )
    except ImportError:
        warn("multiprocessing module is not available, multiprocess plugin "
             "cannot be used", RuntimeWarning) 
Example 12
Project: AMS   Author: CPFL   File: event_loop.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, _id):
        self.manager = Manager()

        self.event_loop_id = _id
        self.target = Target.new_target(self.event_loop_id, self.__class__.__name__)
        self.__subscribers = {}
        self.__subscribers_lock = self.manager.Lock()
        self.__publishers = {}
        self.__client = None
        self.__main_loop = None
        self.__pid = os.getpid()

        self.__topicPub = Topic()
        self.__topicPub.set_targets(Target.new_target(self.event_loop_id, EventLoop.__name__))
        self.__topicPub.set_categories(EVENT_LOOP.TOPIC.CATEGORIES.RESPONSE)

        self.__topicSub = Topic()
        self.__topicSub.set_targets(None, Target.new_target(self.event_loop_id, EventLoop.__name__))
        self.__topicSub.set_categories(EVENT_LOOP.TOPIC.CATEGORIES.REQUEST)
        self.__topicSub.set_message(EventLoopMessage)
        self.set_subscriber(self.__topicSub, self.on_event_loop_message)

        self.__user_data = None
        self.__user_will = None 
Example 13
Project: Computable   Author: ktraunmueller   File: multiprocess.py    License: MIT License 6 votes vote down vote up
def _import_mp():
    global Process, Queue, Pool, Event, Value, Array
    try:
        from multiprocessing import Manager, Process
        #prevent the server process created in the manager which holds Python 
        #objects and allows other processes to manipulate them using proxies
        #to interrupt on SIGINT (keyboardinterrupt) so that the communication
        #channel between subprocesses and main process is still usable after
        #ctrl+C is received in the main process.
        old=signal.signal(signal.SIGINT, signal.SIG_IGN)
        m = Manager()
        #reset it back so main process will receive a KeyboardInterrupt
        #exception on ctrl+c
        signal.signal(signal.SIGINT, old)
        Queue, Pool, Event, Value, Array = (
                m.Queue, m.Pool, m.Event, m.Value, m.Array
        )
    except ImportError:
        warn("multiprocessing module is not available, multiprocess plugin "
             "cannot be used", RuntimeWarning) 
Example 14
Project: nlg-yongzhuo   Author: yongzhuo   File: text_summary_merge.py    License: MIT License 6 votes vote down vote up
def summary_multi_preprocess(doc, num=None, fs=[text_pronouns, text_teaser, mmr, text_rank, lead3, lda, lsi, nmf]):
    """
        len(fs) 个进程
    :param doc: str
    :return: list
    """
    manager = Manager()
    return_dict = manager.dict()
    jobs = []
    for i in range(len(fs)):
        p = Process(target=worker, args=(i, doc, num, fs, return_dict))
        jobs.append(p)
        p.start()
    for proc in jobs:
        proc.join()
    return list(return_dict.values()) 
Example 15
Project: Playlist-Length   Author: karansthr   File: main.py    License: MIT License 6 votes vote down vote up
def main():
    try:
        parser = get_parser()
        args = parser.parse_args()
        if args.media_type == 'both':
            args.media_type = 'audio/video'
        globals()['media_type'] = REGEX_MAP[args.media_type]
        cache_ob = CacheUtil(args.path, args.media_type)
        manager = multiprocessing.Manager()
        queue = manager.Queue()
        consumer = multiprocessing.Process(target=store_in_cache, args=(queue, cache_ob))
        consumer.start()
        result = calculate_length(
            args.path, args.no_subdir, args.media_type, queue, cache_ob
        )
        consumer.join()
    except KeyboardInterrupt:
        sys.stdout.write('\nPlease wait... exiting gracefully!\n')
    else:
        sys.stdout.write('\n{}\n\n'.format(result))
    finally:
        sys.exit() 
Example 16
Project: airflow   Author: apache   File: kubernetes_executor.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 kube_config: Any,
                 task_queue: 'Queue[KubernetesJobType]',
                 result_queue: 'Queue[KubernetesResultsType]',
                 kube_client: client.CoreV1Api,
                 worker_uuid: str):
        super().__init__()
        self.log.debug("Creating Kubernetes executor")
        self.kube_config = kube_config
        self.task_queue = task_queue
        self.result_queue = result_queue
        self.namespace = self.kube_config.kube_namespace
        self.log.debug("Kubernetes using namespace %s", self.namespace)
        self.kube_client = kube_client
        self.launcher = PodLauncher(kube_client=self.kube_client)
        self.worker_configuration_pod = WorkerConfiguration(kube_config=self.kube_config).as_pod()
        self._manager = multiprocessing.Manager()
        self.watcher_queue = self._manager.Queue()
        self.worker_uuid = worker_uuid
        self.kube_watcher = self._make_kube_watcher() 
Example 17
def test(self, args, count, test_data):
        self.count = args.count * args.workers
        self.payload_kb = len(test_data[1]) / 1024.0

        manager = multiprocessing.Manager()
        error_counts = manager.dict()
        workers = []
        for i in range(args.workers):
            error_counts[i] = 0
            w = multiprocessing.Process(target=self.test_worker,
                                        args=(i, args, count, test_data, error_counts))
            workers.append(w)

        self.start_time = time.time()
        for w in workers:
            w.start()

        for w in workers:
            w.join()

        self.errors = sum(error_counts.values())
        self.end_time = time.time() 
Example 18
Project: Learning-Concurrency-in-Python   Author: PacktPublishing   File: manager.py    License: MIT License 5 votes vote down vote up
def main():
  manager = mp.Manager()
  ns = manager.Namespace()
  ns.x = 1

  print(ns)
  
  process = mp.Process(target=myProcess, args=(ns,))
  process.start()
  process.join()
  print(ns) 
Example 19
Project: CAMISIM   Author: CAMI-challenge   File: parallel.py    License: Apache License 2.0 5 votes vote down vote up
def runCmdParallel(cmdTaskList, maxProc=mp.cpu_count(), stdInErrLock=mp.Manager().Lock()):
	"""
		Run several command line commands in parallel.

		@attention: use the Manager to get the lock as in this function definition !!!

		@param cmdTaskList: list of command line tasks
		@type cmdTaskList: list of TaskCmd
		@param maxProc: maximum number of tasks that will be run in parallel at the same time
		@param stdInErrLock: acquiring the lock enables writing to the stdout and stderr

		@return: list of failed commands, dictionary (cmd, task process)
	"""
	assert isinstance(cmdTaskList, list)
	assert isinstance(maxProc, int)

	threadTaskList = []
	for cmdTask in cmdTaskList:
		assert isinstance(cmdTask, TaskCmd)

		threadTaskList.append(TaskThread(_runCmd, (cmdTask, stdInErrLock)))

	returnValueList = runThreadParallel(threadTaskList, maxProc)

	failList = []
	for process, task in returnValueList:
		if process.returncode != 0:
			failList.append(dict(process=process, task=task))
	if len(failList) > 0:
		return failList
	else:
		return None 
Example 20
Project: CAMISIM   Author: CAMI-challenge   File: parallel.py    License: Apache License 2.0 5 votes vote down vote up
def runCmdSerial(cmdTaskList, verbose=False, stopWhenError=True, stdInErrLock=None):
	"""
		Run several command line commands one by one.

		@attention: Use the Manager to get the lock (mp.Manager().Lock()) if the lock shared among multiple processes!

		@param cmdTaskList: list of command line tasks
		@type cmdTaskList: list of TaskCmd
		@param stdInErrLock: acquiring the lock enables writing to the stdout and stderr
		@type stdInErrLock: multiprocessing.Lock()
	"""
	assert isinstance(cmdTaskList, list)

	counter = 0
	failList = []
	for task in cmdTaskList:
		counter += 1
		if verbose:
			msg = 'Starting "#%s" cmd: %s\n' % (counter, task.cmd)
			if stdInErrLock is not None:
				stdInErrLock.acquire()
			sys.stdout.write(msg)
			sys.stdout.flush()
			if stdInErrLock is not None:
				stdInErrLock.release()

		# run command
		process, taskCmd = _runCmd(task, stdInErrLock)

		if process.returncode != 0:
			failList.append(dict(process=process, task=task))
			if stopWhenError:
				break
	if len(failList) > 0:
		return failList
	else:
		return None 
Example 21
Project: CAMISIM   Author: CAMI-challenge   File: parallel.py    License: Apache License 2.0 5 votes vote down vote up
def _testMisc():
	cmd = 'echo "a; echo "b" >&2'
	lock = mp.Manager().Lock()

	# print runCmdSerial([TaskCmd(cmd)], stdInErrLock=lock, verbose=True)
	t = TaskThread(_runCmd, (TaskCmd(cmd), lock))
	print runThreadParallel([t])


# if __name__ == "__main__":
#	 pass
	# _testThread()
	# _testCmd()
	# _testMisc() 
Example 22
def sample_normalize(self, k_samples=1000, overwrite=False):
        """ Estimate the mean and std of the features from the training set
        Params:
            k_samples (int): Use this number of samples for estimation
        """
        log = LogUtil().getlogger()
        log.info("Calculating mean and std from samples")
        # if k_samples is negative then it goes through total dataset
        if k_samples < 0:
            audio_paths = self.audio_paths

        # using sample
        else:
            k_samples = min(k_samples, len(self.train_audio_paths))
            samples = self.rng.sample(self.train_audio_paths, k_samples)
            audio_paths = samples
        manager = Manager()
        return_dict = manager.dict()
        jobs = []
        for threadIndex in range(cpu_count()):
            proc = Process(target=self.preprocess_sample_normalize, args=(threadIndex, audio_paths, overwrite, return_dict))
            jobs.append(proc)
            proc.start()
        for proc in jobs:
            proc.join()

        feat = np.sum(np.vstack([item['feat'] for item in return_dict.values()]), axis=0)
        count = sum([item['count'] for item in return_dict.values()])
        feat_squared = np.sum(np.vstack([item['feat_squared'] for item in return_dict.values()]), axis=0)

        self.feats_mean = feat / float(count)
        self.feats_std = np.sqrt(feat_squared / float(count) - np.square(self.feats_mean))
        np.savetxt(
            generate_file_path(self.save_dir, self.model_name, 'feats_mean'), self.feats_mean)
        np.savetxt(
            generate_file_path(self.save_dir, self.model_name, 'feats_std'), self.feats_std)
        log.info("End calculating mean and std from samples") 
Example 23
Project: DeepLung   Author: uci-cbcl   File: detect.py    License: GNU General Public License v3.0 5 votes vote down vote up
def mp_get_pr(conf_th, nms_th, detect_th, num_procs = 64):
    start_time = time.time()
    
    num_samples = len(pbb)
    split_size = int(np.ceil(float(num_samples) / num_procs))
    num_procs = int(np.ceil(float(num_samples) / split_size))
    
    manager = mp.Manager()
    tp = manager.list(range(num_procs))
    fp = manager.list(range(num_procs))
    p = manager.list(range(num_procs))
    procs = []
    for pid in range(num_procs):
        proc = mp.Process(
            target = get_pr,
            args = (
                pbb[pid * split_size:min((pid + 1) * split_size, num_samples)],
                lbb[pid * split_size:min((pid + 1) * split_size, num_samples)],
                conf_th, nms_th, detect_th, pid, tp, fp, p))
        procs.append(proc)
        proc.start()
    
    for proc in procs:
        proc.join()

    tp = np.sum(tp)
    fp = np.sum(fp)
    p = np.sum(p)
    
    end_time = time.time()
    print('conf_th %1.1f, nms_th %1.1f, detect_th %1.1f, tp %d, fp %d, p %d, recall %f, time %3.2f' % (conf_th, nms_th, detect_th, tp, fp, p, float(tp) / p, end_time - start_time)) 
Example 24
Project: pyshgp   Author: erp12   File: search.py    License: MIT License 5 votes vote down vote up
def __init__(self,
                 spawner: GeneSpawner,
                 evaluator: Evaluator,
                 n_proc: Optional[int] = None):
        self.manager = Manager()
        self.ns = self.manager.Namespace()
        self.ns.spawner = spawner
        self.ns.evaluator = evaluator
        self.pool = None
        if n_proc is None:
            self.pool = Pool()
        else:
            self.pool = Pool(n_proc) 
Example 25
Project: flores   Author: facebookresearch   File: translate.py    License: Creative Commons Attribution Share Alike 4.0 International 5 votes vote down vote up
def translate_files_local(args, cmds):
    m = mp.Manager()
    gpu_queue = m.Queue()
    for i in args.cuda_visible_device_ids:
        gpu_queue.put(i)
    with mp.Pool(processes=len(args.cuda_visible_device_ids)) as pool:
        for _ in tqdm.tqdm(pool.imap_unordered(translate, [(gpu_queue, cmd) for cmd in cmds]), total=len(cmds)):
            pass 
Example 26
Project: pwnypack   Author: edibledinos   File: oracle.py    License: MIT License 5 votes vote down vote up
def encrypt_block(oracle, block_len, block, plain, pool):
    if pool is not None:
        event_factory = multiprocessing.Manager().Event
        map_func = pool.imap_unordered
    else:
        event_factory = threading.Event
        map_func = map

    cipher = bytearray([0] * block_len)

    for i in range(block_len - 1, -1, -1):
        chunk = cipher[:]

        for k in range(i + 1, block_len):
            chunk[k] ^= block_len - i

        event = event_factory()
        f = functools.partial(check_padding_encrypt, event, oracle, block_len, chunk, block, i)

        for result in map_func(f, interruptable_iter(event, range(256))):
            if result is not None:
                cipher[i] = result[i] ^ (block_len - i)

        if not event.is_set():
            raise RuntimeError('Oracle is unstable')

    for k, p in enumerate(plain):
        cipher[k] ^= p

    return cipher 
Example 27
Project: zun   Author: openstack   File: service.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self):
        super(CNIDaemonServiceManager, self).__init__()
        # TODO(dulek): Use cotyledon.oslo_config_glue to support conf reload.
        self.manager = multiprocessing.Manager()
        registry = self.manager.dict()  # For Watcher->Server communication.
        self.add(CNIDaemonWatcherService, workers=1, args=(registry,))
        self.add(CNIDaemonServerService, workers=1, args=(registry,))
        self.register_hooks(on_terminate=self.terminate) 
Example 28
Project: misp42splunk   Author: remg427   File: event_writer.py    License: GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, process_safe=False):
        if process_safe:
            self._mgr = multiprocessing.Manager()
            self._event_queue = self._mgr.Queue(1000)
        else:
            self._event_queue = queue.Queue(1000)
        self._event_writer = threading.Thread(target=self._do_write_events)
        self._event_writer.daemon = True
        self._started = False
        self._exception = False 
Example 29
Project: misp42splunk   Author: remg427   File: event_writer.py    License: GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, process_safe=False):
        if process_safe:
            self._mgr = multiprocessing.Manager()
            self._event_queue = self._mgr.Queue(1000)
        else:
            self._event_queue = queue.Queue(1000)
        self._event_writer = threading.Thread(target=self._do_write_events)
        self._event_writer.daemon = True
        self._started = False
        self._exception = False 
Example 30
Project: 3vilTwinAttacker   Author: wi-fi-analyzer   File: ModuleDnsSpoof.py    License: MIT License 5 votes vote down vote up
def scanner_network(self,gateway):
        scan = ''
        config_gateway = gateway.split('.')
        del config_gateway[-1]
        for i in config_gateway:
            scan += str(i) + '.'
        gateway = scan
        ranger = str(self.ip_range.text()).split('-')
        jobs = []
        manager = Manager()
        on_ips = manager.dict()
        for n in xrange(int(ranger[0]),int(ranger[1])):
            ip='%s{0}'.format(n)%(gateway)
            p = Process(target=self.working,args=(ip,on_ips))
            jobs.append(p)
            p.start()
        for i in jobs: i.join()
        for i in on_ips.values():
            Headers = []
            n = i.split('|')
            self.data['IPaddress'].append(n[0])
            self.data['MacAddress'].append(n[1])
            self.data['Hostname'].append('<unknown>')
            for n, key in enumerate(reversed(self.data.keys())):
                Headers.append(key)
                for m, item in enumerate(self.data[key]):
                    item = QTableWidgetItem(item)
                    item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
                    self.tables.setItem(m, n, item)
        Headers = []
        for key in reversed(self.data.keys()):
            Headers.append(key)
        self.tables.setHorizontalHeaderLabels(Headers)