Python multiprocessing.Lock() Examples

The following are 30 code examples of multiprocessing.Lock(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module multiprocessing , or try the search function .
Example #1
Source File: parallel.py    From CAMISIM with Apache License 2.0 7 votes vote down vote up
def _testCmd(parallel=True):
	print('Start: Test: runCmdParallel')
	inDir = '/Users/ivan/Documents/nobackup/hsim01/562/a'
	outDir = '/Users/ivan/Documents/nobackup/hsim01/562/b'
	MUSCLE_BINARY = '/Users/ivan/Documents/work/tools/muscle/muscle3.8.31_i86darwin64'
	assert os.path.isfile(MUSCLE_BINARY), 'Binnary file does not exist: %s' % MUSCLE_BINARY
	cmdListA = []
	for fileName in os.listdir(inDir):
		cmd = '%s -in %s -out %s' % (MUSCLE_BINARY, os.path.join(inDir, fileName), os.path.join(outDir, fileName))
		# print cmd
		cmdListA.append(TaskCmd(cmd, outDir))
		# break

	if parallel:
		failList = runCmdParallel(cmdListA)
	else:
		lock = mp.Lock()
		failList = runCmdSerial(cmdListA, stdInErrLock=lock)
	reportFailedCmd(failList)
	print('Stop: Test: runCmdParallel') 
Example #2
Source File: concurrency.py    From imapfw with MIT License 6 votes vote down vote up
def createLock(self):
        from threading import Lock

        class TLock(LockBase):
            def __init__(self, lock):
                self.lock = lock

            def __enter__(self):
                self.lock.acquire()

            def __exit__(self, t, v, tb):
                self.lock.release()

            def acquire(self):
                self.lock.acquire()

            def release(self):
                self.lock.release()

        return TLock(Lock()) 
Example #3
Source File: mpEngineProdCons.py    From appcompatprocessor with Apache License 2.0 6 votes vote down vote up
def removeProducer(self, noLock = False):
        if self.num_producers > 0:
            # Lock internal
            if not noLock: self.__internalLock__.acquire()

            # Remove last worker from worker pool
            (worker_num, producer, extra_arg_list) = self.producer_pool.pop()
            logger.debug("Removing Producer-%d" % worker_num)
            # Remove last worker's exitFlag
            producer_exitEvent = self.producer_pool_exitEvent.pop()

            # Set the worker's exit event
            if not producer_exitEvent.is_set():
                logger.debug("Producer-%d exitEvent SET" % worker_num)
                producer_exitEvent.set()

            # Update producer count
            self.num_producers -= 1

            # Release internal
            if not noLock: self.__internalLock__.release()
        else:
            logger.error("Attempted to remove producer from empty pool.") 
Example #4
Source File: FoxTellerReleasesFilter.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test():
	print("Test mode!")
	import logSetup
	import WebMirror.rules
	import WebMirror.Engine
	import multiprocessing
	logSetup.initLogging()

	c_lok = cookie_lock = multiprocessing.Lock()
	engine = WebMirror.Engine.SiteArchiver(cookie_lock=c_lok)
	engine.dispatchRequest(testJobFromUrl('https://www.foxteller.com/releases'))


	# import WebRequest as webfunc

	# wg = webfunc.WebGetRobust()
	# proc = FoxTellerSeriesPageFilter(pageUrl="urlllllll", pgContent="watttt", type='lolertype', dosuper=False)

	# urls = [
	# 	'https://www.foxteller.com/releases',
	# 	]
	# for url in urls:
	# 	ctnt = wg.getpage(url)
	# 	proc.content = ctnt
	# 	proc.processPage(ctnt) 
Example #5
Source File: JapTemSeriesPageFilter.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test():
	print("Test mode!")
	import logSetup
	import WebMirror.rules
	import WebMirror.Engine
	import multiprocessing
	logSetup.initLogging()

	c_lok = cookie_lock = multiprocessing.Lock()
	engine = WebMirror.Engine.SiteArchiver(cookie_lock=c_lok)
	engine.dispatchRequest(testJobFromUrl('http://japtem.com/fanfic.php'))


	# import WebRequest as webfunc

	# wg = webfunc.WebGetRobust()
	# proc = JapTemSeriesPageFilter(pageUrl="urlllllll", pgContent="watttt", type='lolertype', dosuper=False)

	# urls = [
	# 	'http://japtem.com/fanfic.php',
	# 	]
	# for url in urls:
	# 	ctnt = wg.getpage(url)
	# 	proc.content = ctnt
	# 	proc.processPage(ctnt) 
Example #6
Source File: Testing.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def exposed_load_urls_from_file(file_path):
	'''
	Load a file of URLs, and feed them through the URL filtering system.
	'''
	with open(file_path, "r") as fp:
		content = fp.readlines()
		content = [tmp.strip() for tmp in content]

	print(content)

	with common.database.session_context() as sess:
		c_lok = cookie_lock = multiprocessing.Lock()
		engine = WebMirror.Engine.SiteArchiver(cookie_lock=c_lok, new_job_queue=None, db_interface=sess)

		job = testJobFromUrl("https://www.webnovel.com/feed/")

		engine.upsertResponseLinks(job, plain=content, debug=True)

		print(engine) 
Example #7
Source File: RRLSeriesUpdateFilter.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test():
	print("Test mode!")
	import logSetup
	import WebMirror.rules
	import WebMirror.Engine
	import multiprocessing
	logSetup.initLogging()

	c_lok = cookie_lock = multiprocessing.Lock()
	engine = WebMirror.Engine.SiteArchiver(cookie_lock=c_lok)





	# engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fiction/3021'))
	# engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fictions/latest-updates/'))

	# engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fictions/best-rated/'))
	engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fictions/latest-updates/'))
	engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fictions/active-top-50/'))
	engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fictions/weekly-views-top-50/'))
	engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fictions/newest/')) 
Example #8
Source File: WattPadJsonProcessor.py    From ReadableWebProxy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test():
	print("Test mode!")
	import logSetup
	import WebMirror.rules
	import WebMirror.Engine
	import multiprocessing
	logSetup.initLogging()

	c_lok = cookie_lock = multiprocessing.Lock()
	engine = WebMirror.Engine.SiteArchiver(cookie_lock=c_lok)



	job = testJobFromUrl(r'https://www.wattpad.com/api/v3/stories?fields%3Dstories%28id%2Ctitle%2Curl%2Cdescription%29%2Ctotal%2CnextUrl&limit=50&offset=0')
	engine.dispatchRequest(job)

	job = testJobFromUrl(r'https://www.wattpad.com/api/v3/stories?fields%3Dstories%28id%2Ctitle%2Curl%2Cdescription%29%2Ctotal%2CnextUrl&limit=50&offset=1490')
	engine.dispatchRequest(job)

	job = testJobFromUrl(r'https://www.wattpad.com/api/v3/stories?fields%3Dstories%28id%2Ctitle%2Curl%2Cdescription%29%2Ctotal%2CnextUrl&limit=50&offset=1500')
	engine.dispatchRequest(job)

	job = testJobFromUrl(r'https://www.wattpad.com/api/v3/stories?fields%3Dstories%28id%2Ctitle%2Curl%2Cdescription%29%2Ctotal%2CnextUrl&limit=50&offset=1550')
	engine.dispatchRequest(job) 
Example #9
Source File: plugin.py    From pytest-mp with MIT License 6 votes vote down vote up
def pytest_configure(config):
    config.addinivalue_line('markers',
                            "mp_group('GroupName', strategy): test (suite) is in named "
                            "grouped w/ desired strategy: 'free' (default), 'serial', "
                            "'isolated_free', or 'isolated_serial'.")

    standard_reporter = config.pluginmanager.get_plugin('terminalreporter')
    if standard_reporter:
        from pytest_mp.terminal import MPTerminalReporter
        mp_reporter = MPTerminalReporter(standard_reporter, manager)
        config.pluginmanager.unregister(standard_reporter)
        config.pluginmanager.register(mp_reporter, 'terminalreporter')

    if config.option.use_mp is None:
        if not config.getini('mp'):
            return

    if config.option.xmlpath is not None:
        from pytest_mp.junitxml import MPLogXML
        synchronization['node_reporters'] = manager.list()
        synchronization['node_reporters_lock'] = manager.Lock()
        xmlpath = config.option.xmlpath
        config.pluginmanager.unregister(config._xml)
        config._xml = MPLogXML(xmlpath, config.option.junitprefix, config.getini("junit_suite_name"), manager)
        config.pluginmanager.register(config._xml, 'mpjunitxml') 
Example #10
Source File: util.py    From atomos with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def synchronized(fn):
    '''
    A decorator which acquires a lock before attempting to execute its wrapped
    function. Releases the lock in a finally clause.

    :param fn: The function to wrap.
    '''
    lock = threading.Lock()

    @functools.wraps(fn)
    def decorated(*args, **kwargs):
        lock.acquire()
        try:
            return fn(*args, **kwargs)
        finally:
            lock.release()

    return decorated 
Example #11
Source File: main.py    From satellite_tracker with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self):
        self.led_queue = mp.Queue()
        self.demo_mode = mp.Lock()

        self.led_process = mp.Process(target=led_control, args=(self.led_queue, self.demo_mode,))

        self.shutting_down = False
        self.last_button_release = 0
        self.show_end_of_lines = False

        # The button has multiple functions:
        # Turn the device on when off, single press to show the end of long lines on the display,
        # double press to start demo mode, single press to stay at one animation in demo mode,
        # long press to shut down
        self.button = Button(3, hold_time=2, bounce_time=0.05)
        self.button.when_held = self.shutdown
        self.button.when_released = self.button_pressed

        self.tft = SattrackerTFT()

        self.tle_updated_time = None

        self.tracker = None  # load in start because it takes quite a long time

        self.led_array = led_array_from_constants() 
Example #12
Source File: parallel.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def add_cmd_tasks(cmd_task_list, identifier=None, stdin_error_lock=mp.Manager().Lock()):
		"""
			Run several command line commands in parallel.

			@attention: use the Manager to get the lock as in this function definition !!!

			@type cmd_task_list: list of TaskCmd
			@param stdin_error_lock: acquiring the lock enables writing to the stdout and stderr

			@return: list of failed commands, dictionary (cmd, task process)
		"""
		assert isinstance(cmd_task_list, list)

		thread_task_list = []
		for cmdTask in cmd_task_list:
			assert isinstance(cmdTask, TaskCmd)
			thread_task_list.append(TaskThread(_runCmd, (cmdTask, stdin_error_lock)))

		return AsyncParallel.add_tasks(thread_task_list, identifier) 
Example #13
Source File: mpEngineProdCons.py    From appcompatprocessor with Apache License 2.0 5 votes vote down vote up
def addProducer(self, extra_arg_list = []):
        if self.num_producers < self.maxCores:
            # Lock internal
            self.__internalLock__.acquire()

            new_worker_num = self.next_worker_num
            logger.debug("Adding Producer-%d" % (new_worker_num))
            self.producer_pool_exitEvent.append(multiprocessing.Event())
            self.producer_pool.append((new_worker_num, self.producer_Class(
                self.producer_task_queue, self.producer_results_queue, self.get_num_tasks(), self.get_num_tasks(), self.producer_pool_progress,
                self.producer_pool_exitEvent[-1], self.killed_event, extra_arg_list), extra_arg_list))
            self.producer_pool[-1][1].daemon = False # Remove for debugging
            self.producer_pool[-1][1].start()

            # Update worker count
            self.num_producers += 1

            # Update next worker num
            self.next_worker_num += 1

            # Release internal
            self.__internalLock__.release()

            logger.debug("Producer-%d added" % new_worker_num)
        else:
            logger.error("Attempted to start workers beyond the maxCores setting") 
Example #14
Source File: refactor.py    From Computable with MIT License 5 votes vote down vote up
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in xrange(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in xrange(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None 
Example #15
Source File: global_writer.py    From tensorboardX with MIT License 5 votes vote down vote up
def __init__(self, logdir=None, comment='', purge_step=None, max_queue=10,
                 flush_secs=120, filename_suffix='', write_to_disk=True, log_dir=None,
                 coalesce_process=True, **kwargs):
        """
        Initialize a GlobalSummaryWriter. The resulting instance will maintain a monotonically
        increasing ``global_step`` for the the event to be written. So there is no need to pass
        the global_step when calling its member functions such as ``add_scalar()``.
        All arguments for the constructor will be passed to the ordinary ``SummaryWriter.__init__()`` directly.

        Examples::

            import multiprocessing as mp
            import numpy as np
            import time
            from tensorboardX import GlobalSummaryWriter
            w = GlobalSummaryWriter()

            def train(x):
                w.add_scalar('poolmap/1', x*np.random.randn())
                time.sleep(0.05*np.random.randint(0, 10))
                w.add_scalar('poolmap/2', x*np.random.randn())

            with mp.Pool() as pool:
                pool.map(train, range(100))

        Expected result:

        .. image:: _static/img/tensorboard/add_scalar_global.png
           :scale: 50 %

        """

        self.smw = SummaryWriter(logdir=logdir, comment=comment, purge_step=purge_step, max_queue=max_queue,
                                 flush_secs=flush_secs, filename_suffix=filename_suffix, write_to_disk=write_to_disk,
                                 log_dir=log_dir)
        self.lock = mp.Lock()
        self.scalar_tag_to_step = mp.Manager().dict()
        self.image_tag_to_step = mp.Manager().dict()
        self.histogram_tag_to_step = mp.Manager().dict()
        self.text_tag_to_step = mp.Manager().dict()
        self.audio_tag_to_step = mp.Manager().dict() 
Example #16
Source File: roi_extractor.py    From lightnet with MIT License 5 votes vote down vote up
def main():
    global inference_lock
    from multiprocessing.dummy import Pool as ThreadPool
    import multiprocessing

    category_folders = glob.glob('%s/*' % (args.images))

    inference_lock = multiprocessing.Lock()
    cpu_n = multiprocessing.cpu_count()
    pool = ThreadPool(cpu_n)
    _ = pool.map(process, category_folders) 
Example #17
Source File: parloop.py    From eht-imaging with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, initval=0, maxval=0):
        self.val = Value('i', initval)
        self.maxval = maxval
        self.lock = Lock() 
Example #18
Source File: refactor.py    From oss-ftp with MIT License 5 votes vote down vote up
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in xrange(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in xrange(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None 
Example #19
Source File: shared_utils.py    From async-deep-rl with Apache License 2.0 5 votes vote down vote up
def __init__(self, initval=0):
        self.val = RawValue('i', initval)
        self.last_step_update_target = RawValue('i', initval)
        self.lock = Lock() 
Example #20
Source File: refactor.py    From PyDev.Debugger with Eclipse Public License 1.0 5 votes vote down vote up
def refactor(self, items, write=False, doctests_only=False,
                 num_processes=1):
        if num_processes == 1:
            return super(MultiprocessRefactoringTool, self).refactor(
                items, write, doctests_only)
        try:
            import multiprocessing
        except ImportError:
            raise MultiprocessingUnsupported
        if self.queue is not None:
            raise RuntimeError("already doing multiple processes")
        self.queue = multiprocessing.JoinableQueue()
        self.output_lock = multiprocessing.Lock()
        processes = [multiprocessing.Process(target=self._child)
                     for i in xrange(num_processes)]
        try:
            for p in processes:
                p.start()
            super(MultiprocessRefactoringTool, self).refactor(items, write,
                                                              doctests_only)
        finally:
            self.queue.join()
            for i in xrange(num_processes):
                self.queue.put(None)
            for p in processes:
                if p.is_alive():
                    p.join()
            self.queue = None 
Example #21
Source File: mpEngineProdCons.py    From appcompatprocessor with Apache License 2.0 5 votes vote down vote up
def addConsumer(self, extra_arg_list = []):
        if self.num_consumers < self.maxCores:
            # Lock internal
            self.__internalLock__.acquire()

            new_worker_num = self.next_worker_num
            logger.debug("Adding Consumer-%d" % (new_worker_num))
            self.consumer_pool_exitEvent.append(multiprocessing.Event())
            self.consumer_pool.append((new_worker_num, self.consumer_Class(
                self.consumer_task_queue, self.consumer_results_queue, self.get_num_tasks(), self.producer_pool_progress, self.consumer_pool_progress,
                self.consumer_pool_exitEvent[-1], self.killed_event, extra_arg_list), extra_arg_list))
            self.consumer_pool[-1][1].daemon = False  # Remove for debugging
            self.consumer_pool[-1][1].start()

            # Update consumer count
            self.num_consumers += 1

            # Update next worker num
            self.next_worker_num += 1

            # Release internal
            self.__internalLock__.release()

            logger.debug("Consumer-%d added" % new_worker_num)
        else:
            logger.error("Attempted to start workers beyond the maxCores setting") 
Example #22
Source File: QASetting.py    From QUANTAXIS with MIT License 5 votes vote down vote up
def __init__(self, uri=None):
        self.lock = Lock()

        self.mongo_uri = uri or self.get_mongo()
        self.username = None
        self.password = None

        # 加入配置文件地址 
Example #23
Source File: create2_api.py    From SenseAct with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, port, baudrate, timeout=2.0):
        """Inits Create2SerialInterface object with device-specific parameters.

        Creates and opens a serial connection to the Create2 device. Attempts to wake
        the robot by sending additional pulses on the BRC pin.

        Args:
            port:  a string specifying the port to connect, e.g. '/dev/ttyUSB0'
            baudrate: an integer specifying the target baudrate (should always
                be 115200 on start, unless previously changed by `baud` command)
            timeout: a float specifying the timeout for the read/write block
        """
        self._serial = serial.Serial(port=port,
                                     baudrate=baudrate,
                                     bytesize=serial.EIGHTBITS,
                                     parity=serial.PARITY_NONE,
                                     stopbits=serial.STOPBITS_ONE,
                                     timeout=timeout,
                                     writeTimeout=timeout,
                                     xonxoff=False,
                                     rtscts=False,
                                     dsrdtr=False)
        # Create2 seems to support full-duplex, but we still won't want two threads to do the
        # same operation at the same time (ie. 2 threads both read, or 2 threads both write)
        self._serial_read_lock = Lock()
        self._serial_write_lock = Lock()

        time.sleep(0.1)
        self.pulse()

        self._serial.flush() 
Example #24
Source File: mpEngineProdCons.py    From appcompatprocessor with Apache License 2.0 5 votes vote down vote up
def __del__(self):
        # Lock internal
        self.__internalLock__.acquire()
        if not self.__deleting__:
            logger.debug("Bringing down mpEngine")
            self.__deleting__ = True

            while self.num_producers > 0: self.removeProducer(True)
            while self.num_consumers > 0: self.removeConsumer(True)

            logger.debug("mpEngine down")
        # Release internal
        self.__internalLock__.release() 
Example #25
Source File: multiline.py    From PynPoint with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self,
                 data_port_in: InputPort,
                 tasks_queue_in: multiprocessing.JoinableQueue,
                 data_mutex_in: multiprocessing.Lock,
                 num_proc: int,
                 data_length: int) -> None:
        """
        Parameters
        ----------
        data_port_in : pynpoint.core.dataio.InputPort
            Input port.
        tasks_queue_in : multiprocessing.queues.JoinableQueue
            Tasks queue.
        data_mutex_in : multiprocessing.synchronize.Lock
            A mutex shared with the writer to ensure that no read and write operations happen at
            the same time.
        num_proc : int
            Number of processors.
        data_length : int
            Length of the processed data.

        Returns
        -------
        NoneType
            None
        """

        super(LineReader, self).__init__(data_port_in, tasks_queue_in, data_mutex_in, num_proc)

        self.m_data_length = data_length 
Example #26
Source File: mpEngineProdCons.py    From appcompatprocessor with Apache License 2.0 5 votes vote down vote up
def __init__(self, maxCores, producer_Class, consumer_Class, governorOffFlag = False):
        logger.debug("mpEngine initializing")
        self.governorOffFlag = governorOffFlag
        self.maxCores = maxCores
        self.__deleting__ = False
        self.__internalLock__ = multiprocessing.Lock()
        self.killed_event = multiprocessing.Event()

        # Producers
        self.num_producers = 0
        self.next_worker_num = 0
        self.producer_Class = producer_Class
        self.producer_pool = []
        self.producer_pool_exitEvent = []
        self.producer_task_queue = multiprocessing.JoinableQueue()
        self.producer_results_queue = multiprocessing.JoinableQueue()
        self.producer_pool_progress = multiprocessing.Value('i', 0)

        # Consumers
        self.num_consumers = 0
        self.next_consumer_num = 0
        self.consumer_Class = consumer_Class
        self.consumer_pool = []
        # Note: consumer_pool_exitEvent is used both to notify a worker it should end and for the worker to notify it has dones so
        self.consumer_pool_exitEvent = []
        self.consumer_task_queue = self.producer_results_queue
        self.consumer_results_queue = multiprocessing.JoinableQueue()
        self.consumer_pool_progress = multiprocessing.Value('i', 0)

        # Tasks
        self.num_tasks = multiprocessing.Value('i', 0)
        self.tasks_added = False

        # Rebalance checks
        self._rebalance_last_kick = datetime.now()
        self.rebalance_backoff_timer = 60 * 1
        self._rebalance_mem_last_kick = datetime.now()
        self.rebalance_mem_backoff_timer = 60 * 2 
Example #27
Source File: multiproc.py    From PynPoint with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self,
                 result_queue_in: multiprocessing.JoinableQueue,
                 data_out_port_in: Optional[OutputPort],
                 data_mutex_in: multiprocessing.Lock) -> None:
        """
        Parameters
        ----------
        result_queue_in : multiprocessing.queues.JoinableQueue
            The result queue.
        data_out_port_in : pynpoint.core.dataio.OutputPort, None
            The output port where the results will be stored.
        data_mutex_in : multiprocessing.synchronize.Lock
            A mutex that is shared with the :class:`~pynpoint.util.multiproc.TaskWriter` which
            ensures that read and write operations to the database do not occur simultaneously.

        Returns
        -------
        NoneType
            None
        """

        multiprocessing.Process.__init__(self)

        self.m_result_queue = result_queue_in
        self.m_data_mutex = data_mutex_in
        self.m_data_out_port = data_out_port_in 
Example #28
Source File: multiproc.py    From PynPoint with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self,
                 data_port_in: Optional[InputPort],
                 tasks_queue_in: multiprocessing.JoinableQueue,
                 data_mutex_in: Optional[multiprocessing.Lock],
                 num_proc: int) -> None:
        """
        Parameters
        ----------
        data_port_in : pynpoint.core.dataio.InputPort, None
            An input port which links to the data that has to be processed.
        tasks_queue_in : multiprocessing.queues.JoinableQueue
            The central task queue.
        data_mutex_in : multiprocessing.synchronize.Lock, None
            A mutex shared with the writer to ensure that no read and write operations happen at
            the same time.
        num_proc : int
            Maximum number of instances of :class:`~pynpoint.util.multiproc.TaskProcessor` that run
            simultaneously.

        Returns
        -------
        NoneType
            None
        """

        multiprocessing.Process.__init__(self)

        self.m_data_in_port = data_port_in
        self.m_task_queue = tasks_queue_in
        self.m_data_mutex = data_mutex_in
        self.m_num_proc = num_proc 
Example #29
Source File: multistack.py    From PynPoint with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self,
                 data_port_in: InputPort,
                 tasks_queue_in: multiprocessing.JoinableQueue,
                 data_mutex_in: multiprocessing.Lock,
                 num_proc: int,
                 stack_size: int,
                 result_shape: tuple) -> None:
        """
        Parameters
        ----------
        data_port_in : pynpoint.core.dataio.InputPort
            Input port.
        tasks_queue_in : multiprocessing.queues.JoinableQueue
            Tasks queue.
        data_mutex_in : multiprocessing.synchronize.Lock
            A mutex shared with the writer to ensure that no read and write operations happen at
            the same time.
        num_proc : int
            Number of processors.
        stack_size: int
            Number of images per stack.
        result_shape : tuple(int, )
            Shape of the array with the output results (usually a stack of images).

        Returns
        -------
        NoneType
            None
        """

        super(StackReader, self).__init__(data_port_in, tasks_queue_in, data_mutex_in, num_proc)

        self.m_stack_size = stack_size
        self.m_result_shape = result_shape 
Example #30
Source File: multipca.py    From PynPoint with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self,
                 result_queue_in: multiprocessing.JoinableQueue,
                 mean_out_port: Optional[OutputPort],
                 median_out_port: Optional[OutputPort],
                 weighted_out_port: Optional[OutputPort],
                 clip_out_port: Optional[OutputPort],
                 data_mutex_in: multiprocessing.Lock,
                 requirements: Tuple[bool, bool, bool, bool]) -> None:
        """
        Constructor of PcaTaskWriter.

        Parameters
        ----------
        result_queue_in : multiprocessing.queues.JoinableQueue
            Input result queue.
        mean_out_port : pynpoint.core.dataio.OutputPort
            Output port with the mean residuals. Not used if set to None.
        median_out_port : pynpoint.core.dataio.OutputPort
            Output port with the median residuals. Not used if set to None.
        weighted_out_port : pynpoint.core.dataio.OutputPort
            Output port with the noise-weighted residuals. Not used if set to None.
        clip_out_port : pynpoint.core.dataio.OutputPort
            Output port with the clipped mean residuals. Not used if set to None.
        data_mutex_in : multiprocessing.synchronize.Lock
            A mutual exclusion variable which ensure that no read and write simultaneously occur.
        requirements : tuple(bool, bool, bool, bool)
            Required output residuals.

        Returns
        -------
        NoneType
            None
        """

        super(PcaTaskWriter, self).__init__(result_queue_in, None, data_mutex_in)

        self.m_mean_out_port = mean_out_port
        self.m_median_out_port = median_out_port
        self.m_weighted_out_port = weighted_out_port
        self.m_clip_out_port = clip_out_port
        self.m_requirements = requirements