Python resource.RLIM_INFINITY Examples

The following are 21 code examples of resource.RLIM_INFINITY(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module resource , or try the search function .
Example #1
Source File: core.py    From daemonocle with MIT License 7 votes vote down vote up
def _reset_file_descriptors(self):
        """Close open file descriptors and redirect standard streams."""
        if self.close_open_files:
            # Attempt to determine the max number of open files
            max_fds = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
            if max_fds == resource.RLIM_INFINITY:
                # If the limit is infinity, use a more reasonable limit
                max_fds = 2048
        else:
            # If we're not closing all open files, we at least need to
            # reset STDIN, STDOUT, and STDERR.
            max_fds = 3

        for fd in range(max_fds):
            try:
                os.close(fd)
            except OSError:
                # The file descriptor probably wasn't open
                pass

        # Redirect STDIN, STDOUT, and STDERR to /dev/null
        devnull_fd = os.open(os.devnull, os.O_RDWR)
        os.dup2(devnull_fd, 0)
        os.dup2(devnull_fd, 1)
        os.dup2(devnull_fd, 2) 
Example #2
Source File: util.py    From jbox with MIT License 6 votes vote down vote up
def get_maxfd():
    maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
    if (maxfd == resource.RLIM_INFINITY):
        maxfd = MAXFD
    return maxfd 
Example #3
Source File: test_setup.py    From avocado-vt with GNU General Public License v2.0 6 votes vote down vote up
def _set(self):
        self.ulimit = {}
        for key in self.ulimit_options:
            set_value = self.params.get("vt_ulimit_%s" % key)
            if not set_value:
                continue
            # get default ulimit values in tuple (soft, hard)
            self.ulimit[key] = resource.getrlimit(self.ulimit_options[key])

            logging.info("Setting ulimit %s to %s." % (key, set_value))
            if set_value == "ulimited":
                set_value = resource.RLIM_INFINITY
            elif set_value.isdigit():
                set_value = int(set_value)
            else:
                self.test.error("%s is not supported for "
                                "setting ulimit %s" % (set_value, key))
            try:
                resource.setrlimit(self.ulimit_options[key],
                                   (set_value, set_value))
            except ValueError as error:
                self.test.error(str(error)) 
Example #4
Source File: lib.py    From edgedb with Apache License 2.0 6 votes vote down vote up
def get_max_fileno(default: int=2048):
    """Return the maximum number of open file descriptors."""
    limit = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
    if limit == resource.RLIM_INFINITY:
        return default
    return limit 
Example #5
Source File: tools.py    From plaso with Apache License 2.0 6 votes vote down vote up
def _EnforceProcessMemoryLimit(self, memory_limit):
    """Enforces a process memory limit.

    Args:
      memory_limit (int): maximum number of bytes the process is allowed
          to allocate, where 0 represents no limit and None a default of
          4 GiB.
    """
    # Resource is not supported on Windows.
    if resource:
      if memory_limit is None:
        memory_limit = 4 * 1024 * 1024 * 1024
      elif memory_limit == 0:
        memory_limit = resource.RLIM_INFINITY

      resource.setrlimit(resource.RLIMIT_DATA, (memory_limit, memory_limit)) 
Example #6
Source File: util.py    From Flask-P2P with MIT License 6 votes vote down vote up
def get_maxfd():
    maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
    if (maxfd == resource.RLIM_INFINITY):
        maxfd = MAXFD
    return maxfd 
Example #7
Source File: _daemonize_unix.py    From py_daemoniker with The Unlicense 6 votes vote down vote up
def _autoclose_files(shielded=None, fallback_limit=1024):
    ''' Automatically close any open file descriptors.
    
    shielded is iterable of file descriptors.
    '''
    # Process shielded.
    shielded = default_to(shielded, [])
    
    # Figure out the maximum number of files to try to close.
    # This returns a tuple of softlimit, hardlimit; the hardlimit is always
    # greater.
    softlimit, hardlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
    
    # If the hard limit is infinity, we can't iterate to it.
    if hardlimit == resource.RLIM_INFINITY:
        # Check the soft limit. If it's also infinity, fallback to guess.
        if softlimit == resource.RLIM_INFINITY:
            fdlimit = fallback_limit
            
        # The soft limit is finite, so fallback to that.
        else:
            fdlimit = softlimit
            
    # The hard limit is not infinity, so prefer it.
    else:
        fdlimit = hardlimit
    
    # Skip fd 0, 1, 2, which are used by stdin, stdout, and stderr
    # (respectively)
    ranges_to_close = _make_range_tuples(
        start = 3,
        stop = fdlimit,
        exclude = shielded
    )
    for start, stop in ranges_to_close:
        # How nice of os to include this for us!
        os.closerange(start, stop) 
Example #8
Source File: daemon.py    From luscan-devel with GNU General Public License v2.0 6 votes vote down vote up
def get_maximum_file_descriptors():
    """ Return the maximum number of open file descriptors for this process.

        Return the process hard resource limit of maximum number of
        open file descriptors. If the limit is “infinity”, a default
        value of ``MAXFD`` is returned.

        """
    limits = resource.getrlimit(resource.RLIMIT_NOFILE)
    result = limits[1]
    if result == resource.RLIM_INFINITY:
        result = MAXFD
    return result 
Example #9
Source File: process.py    From mitogen with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def increase_open_file_limit():
    """
    #549: in order to reduce the possibility of hitting an open files limit,
    increase :data:`resource.RLIMIT_NOFILE` from its soft limit to its hard
    limit, if they differ.

    It is common that a low soft limit is configured by default, where the hard
    limit is much higher.
    """
    soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
    if hard == resource.RLIM_INFINITY:
        hard_s = '(infinity)'
        # cap in case of O(RLIMIT_NOFILE) algorithm in some subprocess.
        hard = 524288
    else:
        hard_s = str(hard)

    LOG.debug('inherited open file limits: soft=%d hard=%s', soft, hard_s)
    if soft >= hard:
        LOG.debug('max open files already set to hard limit: %d', hard)
        return

    # OS X is limited by kern.maxfilesperproc sysctl, rather than the
    # advertised unlimited hard RLIMIT_NOFILE. Just hard-wire known defaults
    # for that sysctl, to avoid the mess of querying it.
    for value in (hard, 10240):
        try:
            resource.setrlimit(resource.RLIMIT_NOFILE, (value, hard))
            LOG.debug('raised soft open file limit from %d to %d', soft, value)
            break
        except ValueError as e:
            LOG.debug('could not raise soft open file limit from %d to %d: %s',
                      soft, value, e) 
Example #10
Source File: daemon.py    From virt-who with GNU General Public License v2.0 6 votes vote down vote up
def get_maximum_file_descriptors():
    """ Return the maximum number of open file descriptors for this process.

        Return the process hard resource limit of maximum number of
        open file descriptors. If the limit is “infinity”, a default
        value of ``MAXFD`` is returned.

        """
    limits = resource.getrlimit(resource.RLIMIT_NOFILE)
    result = limits[1]
    if result == resource.RLIM_INFINITY:
        result = MAXFD
    return result 
Example #11
Source File: qemu_runner.py    From tracer with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __get_rlimit_func(self):
        def set_rlimits():
            # here we limit the logsize
            resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
            resource.setrlimit(resource.RLIMIT_FSIZE, (self.trace_log_limit, self.trace_log_limit))

        return set_rlimits 
Example #12
Source File: custom_runner.py    From pov_fuzzing with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _setup_env(self):
        prefix = "/dev/shm/tracer_"
        curdir = os.getcwd()
        tmpdir = tempfile.mkdtemp(prefix=prefix)
        # dont prefilter the core
        if len(self.binaries) > 1:
            with open("/proc/self/coredump_filter", "wb") as f:
                f.write("00000077")

        # allow cores to be dumped
        saved_limit = resource.getrlimit(resource.RLIMIT_CORE)
        resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
        binaries_old = [ ]
        for binary in self.binaries:
            binaries_old.append(os.path.abspath(binary))

        self.binaries = list(binaries_old)

        os.chdir(tmpdir)

        try:
            yield (tmpdir, self.binaries[0])

        finally:
            assert tmpdir.startswith(prefix)
            shutil.rmtree(tmpdir)
            os.chdir(curdir)
            resource.setrlimit(resource.RLIMIT_CORE, saved_limit)
            self.binaries = binaries_old 
Example #13
Source File: environment.py    From jvmquake with Apache License 2.0 5 votes vote down vote up
def core_ulimit():
    import resource
    (x, y) = resource.getrlimit(resource.RLIMIT_CORE)
    resource.setrlimit(
        resource.RLIMIT_CORE,
        (resource.RLIM_INFINITY, resource.RLIM_INFINITY)
    )
    yield
    resource.setrlimit(resource.RLIMIT_CORE, (x, y)) 
Example #14
Source File: linux.py    From synapse with Apache License 2.0 5 votes vote down vote up
def getMaxLockedMemory():
    '''
    Returns the maximum amount of memory this process can lock
    '''
    # TODO: consider CAP_IPC_LOCK capability
    _, hard = resource.getrlimit(resource.RLIMIT_MEMLOCK)
    if hard == resource.RLIM_INFINITY:
        return 2**64 - 1
    return hard 
Example #15
Source File: daemon.py    From dusty with MIT License 5 votes vote down vote up
def _increase_file_handle_limit():
    """Raise the open file handles permitted by the Dusty daemon process
    and its child processes. The number we choose here needs to be within
    the OS X default kernel hard limit, which is 10240."""
    logging.info('Increasing file handle limit to {}'.format(constants.FILE_HANDLE_LIMIT))
    resource.setrlimit(resource.RLIMIT_NOFILE,
                       (constants.FILE_HANDLE_LIMIT, resource.RLIM_INFINITY)) 
Example #16
Source File: test_resource.py    From medicare-demo with Apache License 2.0 5 votes vote down vote up
def test_fsize_ismax(self):
        try:
            (cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
        except AttributeError:
            pass
        else:
            # RLIMIT_FSIZE should be RLIM_INFINITY, which will be a really big
            # number on a platform with large file support.  On these platforms,
            # we need to test that the get/setrlimit functions properly convert
            # the number to a C long long and that the conversion doesn't raise
            # an error.
            self.assertEqual(resource.RLIM_INFINITY, max)
            resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max)) 
Example #17
Source File: servermanager.py    From ffw with GNU General Public License v3.0 5 votes vote down vote up
def preexec_fn():
    resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) 
Example #18
Source File: cm_jedi.py    From nvim-completion-manager with MIT License 5 votes vote down vote up
def __init__(self,nvim):
        Base.__init__(self, nvim)
        self._snippet_engine = nvim.vars['cm_completed_snippet_engine']

        # workaround for #62
        try:
            import resource
            import psutil
            mem = psutil.virtual_memory()
            resource.setrlimit(resource.RLIMIT_DATA, (mem.total/3, resource.RLIM_INFINITY))
        except Exception as ex:
            logger.exception("set RLIMIT_DATA failed. %s", ex)
            pass 
Example #19
Source File: targetutils.py    From ffw with GNU General Public License v3.0 5 votes vote down vote up
def setupEnvironment(config):
    """
    Prepare the environment before the server is started.

    For example asan options, working directory, ASLR and ulimit.
    Note that for honggfuzz mode, most of this is ignored.
    """
    # Silence warnings from the ptrace library
    #logging.getLogger().setLevel(logging.ERROR)

    # Most important is to set log_path so we have access to the asan logs
    asanOpts = ""
    asanOpts += "color=never:verbosity=0:leak_check_at_exit=false:"
    asanOpts += "abort_on_error=true:log_path=" + config["temp_dir"] + "/asan"
    os.environ["ASAN_OPTIONS"] = asanOpts

    # Tell Glibc to abort on heap corruption but not dump a bunch of output
    os.environ["MALLOC_CHECK_"] = "2"

    # Check ASLR status
    if "ignore_aslr_status" in config and config["ignore_aslr_status"] is False:
        aslrStatusFile = "/proc/sys/kernel/randomize_va_space"
        d = ""
        with open(aslrStatusFile, "r") as f:
            d = f.read()

            if "disable_aslr_check" not in config and d is not "0":
                logging.error("ASLR Enabled, please disable it:")
                logging.error(" echo 0 | sudo tee /proc/sys/kernel/randomize_va_space")
                sys.exit(1)

    # set resources
    if 'handle_corefiles' in config and config['handle_corefiles']:
        resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))

    # set working directory
    os.chdir(config["target_dir"]) 
Example #20
Source File: NTLMAuthHandler.py    From pth-toolkit with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def _daemonize(self):
        # this code is inspire by the recipe described here:
        # http://code.activestate.com/recipes/278731-creating-a-daemon-the-python-way/
        setsid()

        import resource
        MAXFD = 1024
            # we close all open file descriptors != sys.stderr.fileno()
        maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
        if (maxfd == resource.RLIM_INFINITY):
            maxfd = MAXFD
  
        if hasattr(sys.stderr, "fileno"):
            stderr_fileno = sys.stderr.fileno()
        else:
            # mod_wsgi replace sys.stderr with a fake file without a "fileno"
            # method
            stderr_fileno = -1
        # close all fd > 4
        for fd in xrange(3, maxfd):
            if fd != stderr_fileno:
                try:
                    close_fd(fd)
                except OSError:
                    pass

        # free up some memory
        global httplib

        del httplib
        del resource

        # print >> sys.stderr, "cleanup done"

        child_pid = fork()
        if child_pid > 0:
            _exit(0)

        print >> sys.stderr, ("NTLMAuthHandler daemon spawned with pid %d"
                              % getpid())
        
        # forked processes inherits lock created by flock, so we need to
        # unlock the file here
        # flock(lockf.fileno(), LOCK_UN)
        # lockf.close()

        # we are the daemon
        self._run_as_daemon() 
Example #21
Source File: coredumps.py    From hase with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def __enter__(self) -> Coredump:
        kill_command = which("kill")
        assert kill_command is not None

        self.handler_script = NamedTemporaryFile(
            prefix="core_handler", delete=False, mode="w+"
        )
        os.chmod(self.handler_script.name, 0o755)
        assert len(self.handler_script.name) < 128

        script_template = """#!/bin/sh
exec 1>>{log_path}
exec 2>&1

{kill} -SIGUSR2 "{pid}"

export PYTHONPATH={pythonpath}

exec {python} -m hase.record.coredump_handler {fifo_path} {core_file} {manifest_path} "$@"
"""

        script_content = script_template.format(
            kill=kill_command,
            pid=os.getpid(),
            python=quote(sys.executable),
            pythonpath=":".join(sys.path),
            fifo_path=quote(self.fifo_path),
            core_file=quote(self.core_file),
            log_path=quote(self.log_path),
            manifest_path=quote(self.manifest_path),
        )

        self.handler_script.write(script_content)
        self.handler_script.close()

        inf = resource.RLIM_INFINITY
        self.old_core_rlimit = resource.getrlimit(resource.RLIMIT_CORE)
        resource.setrlimit(resource.RLIMIT_CORE, (inf, inf))

        with open(HANDLER_PATH, "r+") as f, open(
            COREDUMP_FILTER_PATH, "w+"
        ) as filter_file:
            self.previous_pattern = f.read()
            f.seek(0)
            extra_args = " ".join(EXTRA_CORE_DUMP_PARAMETER.values())
            f.write("|{} {}".format(self.handler_script.name, extra_args))

            # just dump everything into core dumps and worry later
            filter_file.write("0xff\n")
            filter_file.flush()

            return Coredump(self.core_file, self.fifo_path)