Python eventlet.spawn_n() Examples

The following are 8 code examples of eventlet.spawn_n(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module eventlet , or try the search function .
Example #1
Source File: utils.py    From zun with Apache License 2.0 6 votes vote down vote up
def spawn_n(func, *args, **kwargs):
    """Passthrough method for eventlet.spawn_n.

    This utility exists so that it can be stubbed for testing without
    interfering with the service spawns.

    It will also grab the context from the threadlocal store and add it to
    the store on the new thread.  This allows for continuity in logging the
    context when using this method to spawn a new thread.
    """
    _context = common_context.get_current()

    @functools.wraps(func)
    def context_wrapper(*args, **kwargs):
        # NOTE: If update_store is not called after spawn_n it won't be
        # available for the logger to pull from threadlocal storage.
        if _context is not None:
            _context.update_store()
        func(*args, **kwargs)

    eventlet.spawn_n(context_wrapper, *args, **kwargs) 
Example #2
Source File: utils.py    From masakari with Apache License 2.0 6 votes vote down vote up
def spawn_n(func, *args, **kwargs):
    """Passthrough method for eventlet.spawn_n.

    This utility exists so that it can be stubbed for testing without
    interfering with the service spawns.

    It will also grab the context from the threadlocal store and add it to
    the store on the new thread.  This allows for continuity in logging the
    context when using this method to spawn a new thread.
    """
    _context = common_context.get_current()

    @functools.wraps(func)
    def context_wrapper(*args, **kwargs):
        # NOTE: If update_store is not called after spawn_n it won't be
        # available for the logger to pull from threadlocal storage.
        if _context is not None:
            _context.update_store()
        func(*args, **kwargs)

    eventlet.spawn_n(context_wrapper, *args, **kwargs) 
Example #3
Source File: service.py    From oslo.service with Apache License 2.0 6 votes vote down vote up
def _child_process(self, service):
        self._child_process_handle_signal()

        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        eventlet.hubs.use_hub()

        # Close write to ensure only parent has it open
        os.close(self.writepipe)
        # Create greenthread to watch for parent to close pipe
        eventlet.spawn_n(self._pipe_watcher)

        # Reseed random number generator
        random.seed()

        launcher = Launcher(self.conf, restart_method=self.restart_method)
        launcher.launch_service(service)
        return launcher 
Example #4
Source File: api.py    From st2 with Apache License 2.0 6 votes vote down vote up
def _run_server():
    host = cfg.CONF.stream.host
    port = cfg.CONF.stream.port

    LOG.info('(PID=%s) ST2 Stream API is serving on http://%s:%s.', os.getpid(), host, port)

    max_pool_size = eventlet.wsgi.DEFAULT_MAX_SIMULTANEOUS_REQUESTS
    worker_pool = eventlet.GreenPool(max_pool_size)
    sock = eventlet.listen((host, port))

    def queue_shutdown(signal_number, stack_frame):
        eventlet.spawn_n(shutdown_server_kill_pending_requests, sock=sock,
                         worker_pool=worker_pool, wait_time=WSGI_SERVER_REQUEST_SHUTDOWN_TIME)

    # We register a custom SIGINT handler which allows us to kill long running active requests.
    # Note: Eventually we will support draining (waiting for short-running requests), but we
    # will still want to kill long running stream requests.
    register_stream_signal_handlers(handler_func=queue_shutdown)

    wsgi.server(sock, app.setup_app(), custom_pool=worker_pool)
    return 0 
Example #5
Source File: listener.py    From st2 with Apache License 2.0 6 votes vote down vote up
def get_listener(name):
    global _stream_listener
    global _execution_output_listener

    if name == 'stream':
        if not _stream_listener:
            with transport_utils.get_connection() as conn:
                _stream_listener = StreamListener(conn)
                eventlet.spawn_n(listen, _stream_listener)
        return _stream_listener
    elif name == 'execution_output':
        if not _execution_output_listener:
            with transport_utils.get_connection() as conn:
                _execution_output_listener = ExecutionOutputListener(conn)
                eventlet.spawn_n(listen, _execution_output_listener)
        return _execution_output_listener
    else:
        raise ValueError('Invalid listener name: %s' % (name)) 
Example #6
Source File: send.py    From dino with Apache License 2.0 5 votes vote down vote up
def do_post(self):
        is_valid, msg, json = self.validate_json(self.request, silent=False)
        if not is_valid:
            logger.error('invalid json: %s' % msg)
            raise RuntimeError('invalid json')

        if json is None:
            raise RuntimeError('no json in request')
        if not isinstance(json, dict):
            raise RuntimeError('need a dict')

        eventlet.spawn_n(self.async_post, dict(json)) 
Example #7
Source File: proc.py    From detox with MIT License 5 votes vote down vote up
def startloopreport(self):
        if self.toxsession.report.tw.hasmarkup:
            eventlet.spawn_n(self.toxsession.report._loopreport) 
Example #8
Source File: runtime.py    From storlets with Apache License 2.0 5 votes vote down vote up
def communicate(self):
        try:
            self._invoke()

            if not self.srequest.has_fd:
                self._wait_for_write_with_timeout(self._input_data_write_fd)

                # We do the writing in a different thread.
                # Otherwise, we can run into the following deadlock
                # 1. middleware writes to Storlet
                # 2. Storlet reads and starts to write metadata and then data
                # 3. middleware continues writing
                # 4. Storlet continues writing and gets stuck as middleware
                #    is busy writing, but still not consuming the reader end
                #    of the Storlet writer.
                eventlet.spawn_n(self._write_input_data,
                                 self._input_data_write_fd,
                                 self.srequest.data_iter)

            for source in self.extra_data_sources:
                # NOTE(kota_): not sure right now if using eventlet.spawn_n is
                #              right way. GreenPool is better? I don't get
                #              whole for the dead lock described in above.
                self._wait_for_write_with_timeout(source['write_fd'])
                eventlet.spawn_n(self._write_input_data,
                                 source['write_fd'],
                                 source['data_iter'])

            out_md = self._read_metadata()
            self._wait_for_read_with_timeout(self.data_read_fd)

            return StorletResponse(out_md, data_fd=self.data_read_fd,
                                   cancel=self._cancel)
        except Exception:
            self._close_local_side_descriptors()
            if not self.srequest.has_fd:
                self._close_input_data_descriptors()
            raise