Python humanfriendly.format_size() Examples
The following are 19 code examples for showing how to use humanfriendly.format_size(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
humanfriendly
, or try the search function
.
Example 1
Project: dottorrent-gui Author: kz26 File: gui.py License: GNU General Public License v3.0 | 6 votes |
def initializeTorrent(self): self.torrent = dottorrent.Torrent(self.inputEdit.text()) try: t_info = self.torrent.get_info() except Exception as e: self.torrent = None self._showError(str(e)) return ptail = os.path.split(self.torrent.path)[1] if self.inputMode == 'file': self._statusBarMsg( "{}: {}".format(ptail, humanfriendly.format_size( t_info[0], binary=True))) else: self._statusBarMsg( "{}: {} files, {}".format( ptail, t_info[1], humanfriendly.format_size( t_info[0], binary=True))) self.pieceSizeComboBox.setCurrentIndex(0) self.updatePieceCountLabel(t_info[2], t_info[3]) self.pieceCountLabel.show() self.createButton.setEnabled(True)
Example 2
Project: espnet Author: espnet File: model_summary.py License: Apache License 2.0 | 6 votes |
def model_summary(model: torch.nn.Module) -> str: message = "Model structure:\n" message += str(model) num_params = get_human_readable_count( sum(p.numel() for p in model.parameters() if p.requires_grad) ) message += "\n\nModel summary:\n" message += f" Class Name: {model.__class__.__name__}\n" message += f" Number of parameters: {num_params}\n" num_bytes = humanfriendly.format_size( sum( p.numel() * to_bytes(p.dtype) for p in model.parameters() if p.requires_grad ) ) message += f" Size: {num_bytes}\n" dtype = next(iter(model.parameters())).dtype message += f" Type: {dtype}" return message
Example 3
Project: od-database Author: simon987 File: reddit_bot.py License: MIT License | 6 votes |
def format_stats(stats): result = " \n" result += "File types | Count | Total Size\n" result += ":-- | :-- | :-- \n" counter = 0 for mime in stats["ext_stats"]: result += mime[2] result += " | " + str(mime[1]) result += " | " + humanfriendly.format_size(mime[0]) + " \n" counter += 1 if counter >= 3: break result += "**Total** | **" + str(stats["total_count"]) + "** | **" result += humanfriendly.format_size(stats["total_size"]) + "** \n\n" return result
Example 4
Project: TC-ResNet Author: hyperconnect File: trainer.py License: Apache License 2.0 | 6 votes |
def log_step_message(self, header, losses, speeds, comparative_loss, batch_size, is_training, tag=""): def get_loss_color(old_loss: float, new_loss: float): if old_loss < new_loss: return "red" else: return "green" def get_log_color(is_training: bool): if is_training: return {"color": "blue", "attrs": ["bold"]} else: return {"color": "yellow", "attrs": ["underline"]} self.last_loss.setdefault(tag, comparative_loss) loss_color = get_loss_color(self.last_loss.get(tag, 0), comparative_loss) self.last_loss[tag] = comparative_loss model_size = hf.format_size(self.model.total_params*4) total_params = hf.format_number(self.model.total_params) loss_desc, loss_val = self.build_info_step_message(losses, "{:7.4f}") header_desc, header_val = self.build_duration_step_message(header) speed_desc, speed_val = self.build_info_step_message(speeds, "{:4.0f}") with utils.format_text(loss_color) as fmt: loss_val_colored = fmt(loss_val) msg = ( f"[{tag}] {header_desc}: {header_val}\t" f"{speed_desc}: {speed_val} ({self.input_shape};{batch_size})\t" f"{loss_desc}: {loss_val_colored} " f"| {model_size} {total_params}") with utils.format_text(**get_log_color(is_training)) as fmt: self.log.info(fmt(msg))
Example 5
Project: clusterman Author: Yelp File: status.py License: Apache License 2.0 | 6 votes |
def _write_summary(manager: PoolManager) -> None: print('Cluster statistics:') total_cpus = manager.cluster_connector.get_resource_total('cpus') total_mem = format_size(manager.cluster_connector.get_resource_total('mem') * 1000000) total_disk = format_size(manager.cluster_connector.get_resource_total('disk') * 1000000) total_gpus = manager.cluster_connector.get_resource_total('gpus') allocated_cpus = manager.cluster_connector.get_resource_allocation('cpus') allocated_mem = format_size(manager.cluster_connector.get_resource_allocation('mem') * 1000000) allocated_disk = format_size(manager.cluster_connector.get_resource_allocation('disk') * 1000000) allocated_gpus = manager.cluster_connector.get_resource_allocation('gpus') print(f'\tCPU allocation: {allocated_cpus:.1f} CPUs allocated to tasks, {total_cpus:.1f} total') print(f'\tMemory allocation: {allocated_mem} memory allocated to tasks, {total_mem} total') print(f'\tDisk allocation: {allocated_disk} disk space allocated to tasks, {total_disk} total') print(f'\tGPUs allocation: {allocated_gpus} GPUs allocated to tasks, {total_gpus} total')
Example 6
Project: zimfarm Author: openzim File: utils.py License: GNU General Public License v3.0 | 6 votes |
def format_size(value): return humanfriendly.format_size(value, binary=True)
Example 7
Project: dottorrent-gui Author: kz26 File: gui.py License: GNU General Public License v3.0 | 5 votes |
def setupUi(self, MainWindow): super().setupUi(MainWindow) self.torrent = None self.MainWindow = MainWindow self.actionImportProfile.triggered.connect(self.import_profile) self.actionExportProfile.triggered.connect(self.export_profile) self.actionAbout.triggered.connect(self.showAboutDialog) self.actionQuit.triggered.connect(self.MainWindow.close) self.fileRadioButton.toggled.connect(self.inputModeToggle) self.fileRadioButton.setChecked(True) self.directoryRadioButton.toggled.connect(self.inputModeToggle) self.browseButton.clicked.connect(self.browseInput) self.batchModeCheckBox.stateChanged.connect(self.batchModeChanged) self.inputEdit.dragEnterEvent = self.inputDragEnterEvent self.inputEdit.dropEvent = self.inputDropEvent self.pasteButton.clicked.connect(self.pasteInput) self.pieceCountLabel.hide() self.pieceSizeComboBox.addItem('Auto') for x in PIECE_SIZES[1:]: self.pieceSizeComboBox.addItem( humanfriendly.format_size(x, binary=True)) self.pieceSizeComboBox.currentIndexChanged.connect( self.pieceSizeChanged) self.privateTorrentCheckBox.stateChanged.connect( self.privateTorrentChanged) self.commentEdit.textEdited.connect( self.commentEdited) self.sourceEdit.textEdited.connect( self.sourceEdited) self.md5CheckBox.stateChanged.connect( self.md5Changed) self.progressBar.hide() self.createButton.setEnabled(False) self.createButton.clicked.connect(self.createButtonClicked) self.cancelButton.hide() self.cancelButton.clicked.connect(self.cancel_creation) self.resetButton.clicked.connect(self.reset) self._statusBarMsg('Ready')
Example 8
Project: dottorrent-gui Author: kz26 File: gui.py License: GNU General Public License v3.0 | 5 votes |
def updatePieceCountLabel(self, ps, pc): ps = humanfriendly.format_size(ps, binary=True) self.pieceCountLabel.setText("{} pieces @ {} each".format(pc, ps))
Example 9
Project: fossor Author: linkedin File: buddyinfo.py License: BSD 2-Clause "Simplified" License | 5 votes |
def _get_column_sizes_human_readable(self): column_count = self._get_columns_len() page_size = self._getpagesize() column_sizes = [] for c in range(0, column_count): size = humanfriendly.format_size((2**c) * page_size, binary=True) column_sizes.append(size) return ' '.join(column_sizes)
Example 10
Project: apex-sigma-core Author: lu-ci File: status.py License: GNU General Public License v3.0 | 5 votes |
def status(cmd, pld): """ :param cmd: The command object referenced in the command. :type cmd: sigma.core.mechanics.command.SigmaCommand :param pld: The payload with execution data and details. :type pld: sigma.core.mechanics.payload.CommandPayload """ uptime_set = arrow.utcnow().float_timestamp - cmd.bot.start_time.float_timestamp processed = round(cmd.bot.queue.processed / uptime_set, 3) os_icon, os_color = get_os_icon() general_text = f'Latency: **{int(cmd.bot.latency * 1000)}ms**' general_text += f'\nPlatform: **{sys.platform.upper()}**' general_text += f'\nStarted: **{arrow.get(psutil.boot_time()).humanize()}**' cpu_clock = psutil.cpu_freq() cpu_clock = round(cpu_clock.current, 2) if cpu_clock else '???' cpu_text = f'Count: **{psutil.cpu_count()} ({psutil.cpu_count(logical=False)})**' cpu_text += f'\nUsage: **{psutil.cpu_percent()}%**' cpu_text += f'\nClock: **{cpu_clock} MHz**' avail_mem = psutil.virtual_memory().available total_mem = psutil.virtual_memory().total used_mem = humanfriendly.format_size(total_mem - avail_mem, binary=True) total_mem = humanfriendly.format_size(total_mem, binary=True) sigma_mem = humanfriendly.format_size(psutil.Process(os.getpid()).memory_info().rss, binary=True) mem_text = f'Me: **{sigma_mem}**' mem_text += f'\nUsed: **{used_mem}**' mem_text += f'\nTotal: **{total_mem}**' response = discord.Embed(color=os_color) response.set_author(name=socket.gethostname(), icon_url=os_icon) response.add_field(name='General', value=general_text) response.add_field(name='CPU', value=cpu_text) response.add_field(name='Memory', value=mem_text) if cmd.bot.cfg.dsc.bot: shard_latency = get_shard_latency(cmd.bot.latencies, pld.msg.guild.shard_id) verbose_description = f'Shard: #{pld.msg.guild.shard_id} | ' verbose_description += f'Latency: {shard_latency}ms | ' verbose_description += f'Activity: {processed} ev/s' response.description = verbose_description await pld.msg.channel.send(embed=response)
Example 11
Project: MMNet Author: hyperconnect File: trainer.py License: Apache License 2.0 | 5 votes |
def log_step_message(self, header, losses, speeds, comparative_loss, batch_size, is_training, tag=""): def get_loss_color(old_loss: float, new_loss: float): if old_loss < new_loss: return "red" else: return "green" def get_log_color(is_training: bool): if is_training: return {"color": "blue", "attrs": ["bold"]} else: return {"color": "yellow", "attrs": ["underline"]} self.last_loss.setdefault(tag, comparative_loss) loss_color = get_loss_color(self.last_loss.get(tag, 0), comparative_loss) self.last_loss[tag] = comparative_loss model_size = hf.format_size(self.model.total_params*4) total_params = hf.format_number(self.model.total_params) loss_desc, loss_val = self.build_info_step_message(losses, "{:7.4f}") header_desc, header_val = self.build_duration_step_message(header) speed_desc, speed_val = self.build_info_step_message(speeds, "{:4.0f}") with utils.format_text(loss_color) as fmt: loss_val_colored = fmt(loss_val) msg = ( f"[{tag}] {header_desc}: {header_val}\t" f"{speed_desc}: {speed_val} ({self.args.width},{self.args.height};{batch_size})\t" f"{loss_desc}: {loss_val_colored} " f"| {model_size} {total_params}") with utils.format_text(**get_log_color(is_training)) as fmt: self.log.info(fmt(msg))
Example 12
Project: kafka-utils Author: Yelp File: main.py License: Apache License 2.0 | 5 votes |
def human_throttle(throttle): if throttle is None: return "N/A" return humanfriendly.format_size(int(throttle), binary=True)
Example 13
Project: apt-smart Author: martin68 File: cli.py License: MIT License | 5 votes |
def report_available_mirrors(updater): """Print the available mirrors to the terminal (in a human friendly format).""" if connected_to_terminal() or os.getenv('TRAVIS') == 'true': # make Travis CI test this code # https://docs.travis-ci.com/user/environment-variables/#default-environment-variables have_bandwidth = any(c.bandwidth for c in updater.ranked_mirrors) have_last_updated = any(c.last_updated is not None for c in updater.ranked_mirrors) column_names = ["Rank", "Mirror URL", "Available?", "Updating?"] if have_last_updated: column_names.append("Last updated") if have_bandwidth: column_names.append("Bandwidth") data = [] long_mirror_urls = {} if os.getenv('TRAVIS') == 'true' and updater.url_char_len < 50: updater.url_char_len = 50 for i, candidate in enumerate(updater.ranked_mirrors, start=1): if len(candidate.mirror_url) <= updater.url_char_len: stripped_mirror_url = candidate.mirror_url else: # the mirror_url is too long, strip it stripped_mirror_url = candidate.mirror_url[:updater.url_char_len - 3] stripped_mirror_url = stripped_mirror_url + "..." long_mirror_urls[i] = candidate.mirror_url # store it, output as full afterwards row = [i, stripped_mirror_url, "Yes" if candidate.is_available else "No", "Yes" if candidate.is_updating else "No"] if have_last_updated: row.append("Up to date" if candidate.last_updated == 0 else ( "%s behind" % format_timespan(candidate.last_updated, max_units=1) if candidate.last_updated else "Unknown" )) if have_bandwidth: row.append("%s/s" % format_size(round(candidate.bandwidth, 0)) if candidate.bandwidth else "Unknown") data.append(row) output(format_table(data, column_names=column_names)) if long_mirror_urls: output(u"Full URLs which are too long to be shown in above table:") for key in long_mirror_urls: output(u"%i: %s", key, long_mirror_urls[key]) else: output(u"\n".join( candidate.mirror_url for candidate in updater.ranked_mirrors if candidate.is_available and not candidate.is_updating ))
Example 14
Project: apt-smart Author: martin68 File: http.py License: MIT License | 5 votes |
def fetch_worker(url): """ Fetch the given URL for :func:`fetch_concurrent()`. :param url: The URL to fetch (a string). :returns: A tuple of three values: 1. The URL that was fetched (a string). 2. The data that was fetched (a string or :data:`None`). 3. The number of seconds it took to fetch the URL (a number). """ # Ignore Control-C instead of raising KeyboardInterrupt because (due to a # quirk in multiprocessing) this can cause the parent and child processes # to get into a deadlock kind of state where only Control-Z will get you # your precious terminal back; super annoying IMHO. signal.signal(signal.SIGINT, signal.SIG_IGN) timer = Timer() try: data = fetch_url(url, retry=False) except Exception as e: logger.debug("Failed to fetch %s! (%s)", url, e) data = None else: kbps = format_size(round(len(data) / timer.elapsed_time, 2)) logger.debug("Downloaded %s at %s per second.", url, kbps) return url, data, timer.elapsed_time
Example 15
Project: ue4-docker Author: adamrehn File: info.py License: MIT License | 5 votes |
def _formatSize(size): return humanfriendly.format_size(size, binary=True)
Example 16
Project: zimfarm Author: openzim File: uploader.py License: GNU General Public License v3.0 | 5 votes |
def display_stats(filesize, started_on, ended_on=None): ended_on = ended_on or now() duration = (ended_on - started_on).total_seconds() if humanfriendly: hfilesize = humanfriendly.format_size(filesize, binary=True) hduration = humanfriendly.format_timespan(duration, max_units=2) speed = humanfriendly.format_size(filesize / duration) msg = f"uploaded {hfilesize} in {hduration} ({speed}/s)" else: hfilesize = filesize / 2 ** 20 # size in MiB speed = filesize / 1000000 / duration # MB/s duration = duration / 60 # in mn msg = f"uploaded {hfilesize:.3}MiB in {duration:.1}mn ({speed:.3}MBps)" logger.info(f"[stats] {msg}")
Example 17
Project: console Author: laincloud File: views.py License: MIT License | 4 votes |
def render_proc_data(cls, appname, proc_lain_conf, proc_status=None, is_portal=False, client=None): data = { 'procname': proc_lain_conf.name, 'proctype': proc_lain_conf.type.name, 'image': proc_lain_conf.image, 'numinstances': proc_lain_conf.num_instances, 'cpu': proc_lain_conf.cpu, 'memory': proc_lain_conf.memory, 'persistentdirs': proc_lain_conf.volumes, 'dnssearchs': proc_lain_conf.dns_search, 'ports': [{'portnumber': p.port, 'porttype': p.type.name} for p in proc_lain_conf.port.values()], 'mountpoints': proc_lain_conf.mountpoint, 'httpsonly': proc_lain_conf.https_only, 'user': proc_lain_conf.user, 'workingdir': proc_lain_conf.working_dir, 'entrypoint': proc_lain_conf.entrypoint, 'cmd': proc_lain_conf.cmd, 'envs': proc_lain_conf.env, 'pods': [], 'depends': [], 'url': reverse('api_proc', kwargs={'appname': appname, 'procname': proc_lain_conf.name}), 'logs': proc_lain_conf.logs, 'lasterror': '', } if proc_status and isinstance(proc_status['Status'], dict): pods, depends = [], [] last_error = '' pods_meta = proc_status['Status']['Pods'] if pods_meta is not None: # handle the situation when proc is portal if is_portal: for client_name, pods_info in pods_meta.iteritems(): if client and client != client_name: continue for pod in pods_info: pods.append(ProcApi.render_pod_data(pod)) last_error = pod['LastError'] else: for pod in pods_meta: pods.append(ProcApi.render_pod_data(pod)) last_error = proc_status['Status']['LastError'] data['pods'] = pods data['depends'] = depends data['lasterror'] = last_error # patch num_instances / cpu / memory spec in deploy to LainConf try: data['numinstances'] = proc_status[ 'Status']['Spec']['NumInstances'] data['cpu'] = int(proc_status['Status']['Spec'][ 'Pod']['Containers'][0]['CpuLimit']) data['memory'] = humanfriendly.format_size( int(proc_status['Status']['Spec']['Pod']['Containers'][0]['MemoryLimit'])) data['image'] = proc_status['Status'][ 'Spec']['Pod']['Containers'][0]['Image'] except: pass return data
Example 18
Project: bincopy Author: eerimoq File: bincopy.py License: MIT License | 4 votes |
def info(self): """Return a string of human readable information about the binary file. .. code-block:: python >>> print(binfile.info()) Data ranges: 0x00000100 - 0x00000140 (64 bytes) """ info = '' if self._header is not None: if self._header_encoding is None: header = '' for b in bytearray(self.header): if chr(b) in string.printable: header += chr(b) else: header += f'\\x{b:02x}' else: header = self.header info += f'Header: "{header}"\n' if self.execution_start_address is not None: info += (f'Execution start address: ' f'0x{self.execution_start_address:08x}\n') info += 'Data ranges:\n\n' for address, data in self._segments: minimum_address = address size = len(data) maximum_address = (minimum_address + size // self.word_size_bytes) info += 4 * ' ' info += (f'0x{minimum_address:08x} - 0x{maximum_address:08x} ' f'({format_size(size, binary=True)})\n') return info
Example 19
Project: ue4-docker Author: adamrehn File: ResourceMonitor.py License: MIT License | 4 votes |
def run(self): ''' The resource monitor loop itself ''' # Determine which filesystem the Docker daemon uses for storing its data directory dockerInfo = DockerUtils.info() rootDir = dockerInfo['DockerRootDir'] # If we cannot access the Docker data directory (e.g. when the daemon is in a Moby VM), don't report disk space reportDisk = os.path.exists(rootDir) # Sample the CPU usage using an interval of 1 second the first time to prime the system # (See: <https://psutil.readthedocs.io/en/latest/#psutil.cpu_percent>) psutil.cpu_percent(1.0) # Loop until asked to stop while True: # Check that the thread has not been asked to stop with self._lock: if self._shouldStop == True: return # Format the timestamp for the current time in ISO 8601 format (albeit without the "T" separator) isoTime = datetime.datetime.now().replace(microsecond=0).isoformat(' ') # We format data sizes using binary units (KiB, MiB, GiB, etc.) formatSize = lambda size: humanfriendly.format_size(size, binary=True, keep_width=True) # Format the current quantity of available disk space on the Docker data directory's filesystem diskSpace = formatSize(shutil.disk_usage(rootDir).free) if reportDisk == True else 'Unknown' # Format the current quantity of available system memory physicalMemory = formatSize(psutil.virtual_memory().free) virtualMemory = formatSize(psutil.swap_memory().free) # Format the current CPU usage levels cpu = psutil.cpu_percent() # Report the current levels of our available resources self._logger.info('[{}] [Available disk: {}] [Available memory: {} physical, {} virtual] [CPU usage: {:.2f}%]'.format( isoTime, diskSpace, physicalMemory, virtualMemory, cpu ), False) # Sleep until the next sampling interval time.sleep(self._interval)