Python os.cpu_count() Examples

The following are 30 code examples of os.cpu_count(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module os , or try the search function .
Example #1
Source File: __init__.py    From teleport with Apache License 2.0 6 votes vote down vote up
def cpu_count(logical=True):
    """Return the number of logical CPUs in the system (same as
    os.cpu_count() in Python 3.4).

    If *logical* is False return the number of physical cores only
    (e.g. hyper thread CPUs are excluded).

    Return None if undetermined.

    The return value is cached after first call.
    If desired cache can be cleared like this:

    >>> psutil.cpu_count.cache_clear()
    """
    if logical:
        ret = _psplatform.cpu_count_logical()
    else:
        ret = _psplatform.cpu_count_physical()
    if ret is not None and ret < 1:
        ret = None
    return ret 
Example #2
Source File: __init__.py    From teleport with Apache License 2.0 6 votes vote down vote up
def cpu_count(logical=True):
    """Return the number of logical CPUs in the system (same as
    os.cpu_count() in Python 3.4).

    If *logical* is False return the number of physical cores only
    (e.g. hyper thread CPUs are excluded).

    Return None if undetermined.

    The return value is cached after first call.
    If desired cache can be cleared like this:

    >>> psutil.cpu_count.cache_clear()
    """
    if logical:
        ret = _psplatform.cpu_count_logical()
    else:
        ret = _psplatform.cpu_count_physical()
    if ret is not None and ret < 1:
        ret = None
    return ret 
Example #3
Source File: main.py    From learning2run with MIT License 6 votes vote down vote up
def workers(master_host, master_port, relay_socket_path, num_workers):
    # Start the relay
    master_redis_cfg = {'host': master_host, 'port': master_port}
    relay_redis_cfg = {'unix_socket_path': relay_socket_path}
    if os.fork() == 0:
        RelayClient(master_redis_cfg, relay_redis_cfg).run()
        return
    # Start the workers
    noise = SharedNoiseTable()  # Workers share the same noise
    num_workers = num_workers if num_workers else os.cpu_count()
    print('Spawning workers')
    logging.info('Spawning {} workers'.format(num_workers))
    for _ in range(num_workers):
        if os.fork() == 0:
            run_worker(relay_redis_cfg, noise=noise)
            return
    os.wait() 
Example #4
Source File: __init__.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def cpu_count(logical=True):
    """Return the number of logical CPUs in the system (same as
    os.cpu_count() in Python 3.4).

    If *logical* is False return the number of physical cores only
    (e.g. hyper thread CPUs are excluded).

    Return None if undetermined.

    The return value is cached after first call.
    If desired cache can be cleared like this:

    >>> psutil.cpu_count.cache_clear()
    """
    if logical:
        ret = _psplatform.cpu_count_logical()
    else:
        ret = _psplatform.cpu_count_physical()
    if ret is not None and ret < 1:
        ret = None
    return ret 
Example #5
Source File: __init__.py    From psutil with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def cpu_count(logical=True):
    """Return the number of logical CPUs in the system (same as
    os.cpu_count() in Python 3.4).

    If *logical* is False return the number of physical cores only
    (e.g. hyper thread CPUs are excluded).

    Return None if undetermined.

    The return value is cached after first call.
    If desired cache can be cleared like this:

    >>> psutil.cpu_count.cache_clear()
    """
    if logical:
        ret = _psplatform.cpu_count_logical()
    else:
        ret = _psplatform.cpu_count_physical()
    if ret is not None and ret < 1:
        ret = None
    return ret 
Example #6
Source File: __init__.py    From teleport with Apache License 2.0 6 votes vote down vote up
def cpu_count(logical=True):
    """Return the number of logical CPUs in the system (same as
    os.cpu_count() in Python 3.4).

    If *logical* is False return the number of physical cores only
    (e.g. hyper thread CPUs are excluded).

    Return None if undetermined.

    The return value is cached after first call.
    If desired cache can be cleared like this:

    >>> psutil.cpu_count.cache_clear()
    """
    if logical:
        ret = _psplatform.cpu_count_logical()
    else:
        ret = _psplatform.cpu_count_physical()
    if ret is not None and ret < 1:
        ret = None
    return ret 
Example #7
Source File: utils.py    From dwave-hybrid with Apache License 2.0 6 votes vote down vote up
def cpu_count():    # pragma: no cover
    try:
        import os
        # doesn't exist in python2, and can return None
        return os.cpu_count() or 1
    except AttributeError:
        pass

    try:
        import multiprocessing
        # doesn't have to be implemented
        return multiprocessing.cpu_count()
    except NotImplementedError:
        pass

    return 1 
Example #8
Source File: server.py    From distex with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def handle_request(self, reader, writer):
        req_host, req_port = writer.get_extra_info('peername')
        peername = f'{req_host}:{req_port}'
        self._logger.info(f'Connection from {peername}')
        data = await reader.readline()
        nw, port, worker_loop, func_pickle, data_pickle = data.split()
        num_workers = int(nw) or os.cpu_count()
        self._logger.info(
            f'Starting up {num_workers} processors for {peername}')

        # start processors that will connect back to the remote server
        asyncio.gather(
            *[asyncio.create_subprocess_exec(
                'distex_proc',
                '-H', req_host,
                '-p', port,
                '-l', worker_loop,
                '-f', func_pickle,
                '-d', data_pickle,
                stdout=None, stderr=None)
                for _ in range(num_workers)])

        writer.close() 
Example #9
Source File: chainstart.py    From komodo-cctools-python with MIT License 6 votes vote down vote up
def enable_mining(proxy):
    cores = os.cpu_count()
    if cores > 2:
        threads_count = cores - 2
    else:
        threads_count = 1
    tries = 0
    while True:
        try:
            proxy.setgenerate(True, threads_count)
            break
        except (RPCError, HttpError) as e:
            print(e, " Waiting chain startup\n")
            time.sleep(10)
            tries += 1
        if tries > 30:
            raise ChildProcessError("Node did not start correctly, aborting\n") 
Example #10
Source File: cmd_utils.py    From godot-mono-builds with MIT License 6 votes vote down vote up
def add_base_arguments(parser, default_help):
    import os
    from os.path import join as path_join

    home = os.environ.get('HOME')
    mono_sources_default = os.environ.get('MONO_SOURCE_ROOT', '')

    parser.add_argument('--verbose-make', action='store_true', default=False, help=default_help)
    # --jobs supports not passing an argument, in which case the 'const' is used,
    # which is the number of CPU cores on the host system.
    parser.add_argument('--jobs', '-j', nargs='?', const=str(os.cpu_count()), default='1', help=default_help)
    parser.add_argument('--configure-dir', default=path_join(home, 'mono-configs'), help=default_help)
    parser.add_argument('--install-dir', default=path_join(home, 'mono-installs'), help=default_help)

    if mono_sources_default:
        parser.add_argument('--mono-sources', default=mono_sources_default, help=default_help)
    else:
        parser.add_argument('--mono-sources', required=True)

    parser.add_argument('--mxe-prefix', default='/usr', help=default_help) 
Example #11
Source File: __init__.py    From pytorch-saltnet with MIT License 6 votes vote down vote up
def get_num_workers(jobs):
    """
    Parameters
    ----------
    jobs How many jobs to be paralleled. Negative or 0 means number of cpu cores left.

    Returns
    -------
    How many subprocess to be used
    """
    num_workers = jobs
    if num_workers <= 0:
        num_workers = os.cpu_count() + jobs
    if num_workers < 0 or num_workers > os.cpu_count():
        raise RuntimeError("System doesn't have so many cpu cores: {} vs {}".format(jobs, os.cpu_count()))
    return num_workers 
Example #12
Source File: parallel.py    From tox with MIT License 6 votes vote down vote up
def auto_detect_cpus():
    try:
        from os import sched_getaffinity  # python 3 only

        def cpu_count():
            return len(sched_getaffinity(0))

    except ImportError:
        # python 2 options
        try:
            from os import cpu_count
        except ImportError:
            from multiprocessing import cpu_count

    try:
        n = cpu_count()
    except NotImplementedError:  # pragma: no cov
        n = None  # pragma: no cov
    return n if n else 1 
Example #13
Source File: setup.py    From QCustomPlot-PyQt5 with MIT License 6 votes vote down vote up
def __build_qcustomplot_library(self):
        if WINDOWS_HOST:
            qcustomplot_static = join(self.build_temp, 'release', 'qcustomplot.lib')
        else:
            qcustomplot_static = join(self.build_temp, 'libqcustomplot.a')
        if exists(qcustomplot_static):
            return

        os.makedirs(self.build_temp, exist_ok=True)
        os.chdir(self.build_temp)
        print('Make static qcustomplot library...')
        self.spawn([self.qmake, join(ROOT, 'QCustomPlot/src/qcp-staticlib.pro')])
        # AFAIK only nmake does not support -j option
        has_multiprocess = not(WINDOWS_HOST and "nmake"in self.make)
        make_cmdline = [self.make]
        if has_multiprocess:
            make_cmdline.extend(('-j', str(os.cpu_count())))
        make_cmdline.append('release')
        self.spawn(make_cmdline)

        os.chdir(ROOT)
        self.static_lib = qcustomplot_static
        # Possibly it's hack
        qcustomplot_ext = self.extensions[0]
        qcustomplot_ext.extra_objects = [qcustomplot_static] 
Example #14
Source File: datasets.py    From renku-python with Apache License 2.0 6 votes vote down vote up
def _add_from_urls(
        self, dataset, urls, destination, destination_names, extract, progress
    ):
        files = []
        max_workers = min(os.cpu_count() - 1, 4) or 1
        with concurrent.futures.ThreadPoolExecutor(max_workers) as executor:
            futures = {
                executor.submit(
                    self._add_from_url,
                    dataset=dataset,
                    url=url,
                    destination=destination,
                    extract=extract,
                    filename=name,
                    progress=progress
                )
                for url, name in zip(urls, destination_names)
            }

            for future in concurrent.futures.as_completed(futures):
                files.extend(future.result())

        return files 
Example #15
Source File: main.py    From evolution-strategies-starter with MIT License 6 votes vote down vote up
def workers(master_host, master_port, relay_socket_path, num_workers):
    # Start the relay
    master_redis_cfg = {'host': master_host, 'port': master_port}
    relay_redis_cfg = {'unix_socket_path': relay_socket_path}
    if os.fork() == 0:
        RelayClient(master_redis_cfg, relay_redis_cfg).run()
        return
    # Start the workers
    noise = SharedNoiseTable()  # Workers share the same noise
    num_workers = num_workers if num_workers else os.cpu_count()
    logging.info('Spawning {} workers'.format(num_workers))
    for _ in range(num_workers):
        if os.fork() == 0:
            run_worker(relay_redis_cfg, noise=noise)
            return
    os.wait() 
Example #16
Source File: eris.py    From platform-resource-manager with Apache License 2.0 5 votes vote down vote up
def init_sysmax(ctx):
    """
    Initialize historical LC tasks maximal utilization from model file
        ctx - agent context
    """
    ctx.sysmax_util = ctx.analyzer.get_lcutilmax()
    if ctx.sysmax_util == 0:
        ctx.sysmax_util = cpu_count() * 100
    if ctx.args.verbose:
        print(ctx.sysmax_util) 
Example #17
Source File: _psbsd.py    From teleport with Apache License 2.0 5 votes vote down vote up
def cpu_count_physical():
        """Return the number of physical CPUs in the system."""
        # From the C module we'll get an XML string similar to this:
        # http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html
        # We may get None in case "sysctl kern.sched.topology_spec"
        # is not supported on this BSD version, in which case we'll mimic
        # os.cpu_count() and return None.
        ret = None
        s = cext.cpu_count_phys()
        if s is not None:
            # get rid of padding chars appended at the end of the string
            index = s.rfind("</groups>")
            if index != -1:
                s = s[:index + 9]
                root = ET.fromstring(s)
                try:
                    ret = len(root.findall('group/children/group/cpu')) or None
                finally:
                    # needed otherwise it will memleak
                    root.clear()
        if not ret:
            # If logical CPUs are 1 it's obvious we'll have only 1
            # physical CPU.
            if cpu_count_logical() == 1:
                return 1
        return ret 
Example #18
Source File: index.py    From pantalaimon with Apache License 2.0 5 votes vote down vote up
def __attrs_post_init__(self):
            self.store_path = self.store_path or self.index_path
            num_searchers = os.cpu_count()
            self.index = Index(self.index_path, num_searchers)
            self.read_semaphore = asyncio.Semaphore(num_searchers or 1)
            self.store = MessageStore(self.user, self.store_path, self.store_name) 
Example #19
Source File: threadpool.py    From soapy_power with MIT License 5 votes vote down vote up
def __init__(self, max_workers=0, thread_name_prefix='', max_queue_size=0):
        #super().__init__(max_workers or os.cpu_count() or 1, thread_name_prefix)
        super().__init__(max_workers or os.cpu_count() or 1)
        self.max_queue_size = max_queue_size or self._max_workers * 10
        if self.max_queue_size > 0:
            self._work_queue = queue.Queue(self.max_queue_size)
        self.max_queue_size_reached = 0 
Example #20
Source File: __init__.py    From teleport with Apache License 2.0 5 votes vote down vote up
def cpu_num(self):
            """Return what CPU this process is currently running on.
            The returned number should be <= psutil.cpu_count()
            and <= len(psutil.cpu_percent(percpu=True)).
            It may be used in conjunction with
            psutil.cpu_percent(percpu=True) to observe the system
            workload distributed across CPUs.
            """
            return self._proc.cpu_num()

    # Linux, macOS, Windows, Solaris, AIX 
Example #21
Source File: _psaix.py    From teleport with Apache License 2.0 5 votes vote down vote up
def cpu_count_logical():
    """Return the number of logical CPUs in the system."""
    try:
        return os.sysconf("SC_NPROCESSORS_ONLN")
    except ValueError:
        # mimic os.cpu_count() behavior
        return None 
Example #22
Source File: setup.py    From QCustomPlot-PyQt5 with MIT License 5 votes vote down vote up
def parallelCCompile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
    # those lines are copied from distutils.ccompiler.CCompiler directly
    macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
    cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
    # parallel code
    N = os.cpu_count() # number of parallel compilations
    import multiprocessing.pool
    def _single_compile(obj):
        try: src, ext = build[obj]
        except KeyError: return
        self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
    # convert to list, imap is evaluated on-demand
    list(multiprocessing.pool.ThreadPool(N).imap(_single_compile,objects))
    return objects 
Example #23
Source File: _pssunos.py    From teleport with Apache License 2.0 5 votes vote down vote up
def cpu_count_logical():
    """Return the number of logical CPUs in the system."""
    try:
        return os.sysconf("SC_NPROCESSORS_ONLN")
    except ValueError:
        # mimic os.cpu_count() behavior
        return None 
Example #24
Source File: _pslinux.py    From teleport with Apache License 2.0 5 votes vote down vote up
def cpu_count_logical():
    """Return the number of logical CPUs in the system."""
    try:
        return os.sysconf("SC_NPROCESSORS_ONLN")
    except ValueError:
        # as a second fallback we try to parse /proc/cpuinfo
        num = 0
        with open_binary('%s/cpuinfo' % get_procfs_path()) as f:
            for line in f:
                if line.lower().startswith(b'processor'):
                    num += 1

        # unknown format (e.g. amrel/sparc architectures), see:
        # https://github.com/giampaolo/psutil/issues/200
        # try to parse /proc/stat as a last resort
        if num == 0:
            search = re.compile(r'cpu\d')
            with open_text('%s/stat' % get_procfs_path()) as f:
                for line in f:
                    line = line.split(' ')[0]
                    if search.match(line):
                        num += 1

        if num == 0:
            # mimic os.cpu_count()
            return None
        return num 
Example #25
Source File: _psbsd.py    From teleport with Apache License 2.0 5 votes vote down vote up
def cpu_count_physical():
        """Return the number of physical CPUs in the system."""
        # From the C module we'll get an XML string similar to this:
        # http://manpages.ubuntu.com/manpages/precise/man4/smp.4freebsd.html
        # We may get None in case "sysctl kern.sched.topology_spec"
        # is not supported on this BSD version, in which case we'll mimic
        # os.cpu_count() and return None.
        ret = None
        s = cext.cpu_count_phys()
        if s is not None:
            # get rid of padding chars appended at the end of the string
            index = s.rfind("</groups>")
            if index != -1:
                s = s[:index + 9]
                root = ET.fromstring(s)
                try:
                    ret = len(root.findall('group/children/group/cpu')) or None
                finally:
                    # needed otherwise it will memleak
                    root.clear()
        if not ret:
            # If logical CPUs are 1 it's obvious we'll have only 1
            # physical CPU.
            if cpu_count_logical() == 1:
                return 1
        return ret 
Example #26
Source File: __init__.py    From teleport with Apache License 2.0 5 votes vote down vote up
def cpu_num(self):
            """Return what CPU this process is currently running on.
            The returned number should be <= psutil.cpu_count()
            and <= len(psutil.cpu_percent(percpu=True)).
            It may be used in conjunction with
            psutil.cpu_percent(percpu=True) to observe the system
            workload distributed across CPUs.
            """
            return self._proc.cpu_num()

    # Linux, macOS, Windows, Solaris, AIX 
Example #27
Source File: _psaix.py    From teleport with Apache License 2.0 5 votes vote down vote up
def cpu_count_logical():
    """Return the number of logical CPUs in the system."""
    try:
        return os.sysconf("SC_NPROCESSORS_ONLN")
    except ValueError:
        # mimic os.cpu_count() behavior
        return None 
Example #28
Source File: compatibility.py    From pex with Apache License 2.0 5 votes vote down vote up
def cpu_count():
      # The set of CPUs accessible to the current process (pid 0).
      cpu_set = os.sched_getaffinity(0)
      return len(cpu_set) 
Example #29
Source File: _utils.py    From aws-data-wrangler with Apache License 2.0 5 votes vote down vote up
def ensure_cpu_count(use_threads: bool = True) -> int:
    """Get the number of cpu cores to be used.

    Note
    ----
    In case of `use_threads=True` the number of threads that could be spawned will be get from os.cpu_count().

    Parameters
    ----------
    use_threads : bool
            True to enable multi-core utilization, False to disable.

    Returns
    -------
    int
        Number of cpu cores to be used.

    Examples
    --------
    >>> from awswrangler._utils import ensure_cpu_count
    >>> ensure_cpu_count(use_threads=True)
    4
    >>> ensure_cpu_count(use_threads=False)
    1

    """
    cpus: int = 1
    if use_threads is True:
        cpu_cnt: Optional[int] = os.cpu_count()
        if cpu_cnt is not None:
            cpus = cpu_cnt if cpu_cnt > cpus else cpus
    return cpus 
Example #30
Source File: test_linux.py    From psutil with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_against_nproc(self):
        num = int(sh("nproc --all"))
        self.assertEqual(psutil.cpu_count(logical=True), num)