Python math.ceil() Examples
The following are 30
code examples of math.ceil().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
math
, or try the search function
.

Example #1
Source File: HalvesRainbow.py From BiblioPixelAnimations with MIT License | 6 votes |
def step(self, amt=1): center = float(self._maxLed) / 2 center_floor = math.floor(center) center_ceil = math.ceil(center) if self._centerOut: self.layout.fill( self.palette(self._step), int(center_floor - self._current), int(center_floor - self._current)) self.layout.fill( self.palette(self._step), int(center_ceil + self._current), int(center_ceil + self._current)) else: self.layout.fill( self.palette(self._step), int(self._current), int(self._current)) self.layout.fill( self.palette(self._step), int(self._maxLed - self._current), int(self._maxLed - self._current)) self._step += amt + self._rainbowInc if self._current == center_floor: self._current = self._minLed else: self._current += amt
Example #2
Source File: group_sampler.py From mmdetection with Apache License 2.0 | 6 votes |
def __iter__(self): indices = [] for i, size in enumerate(self.group_sizes): if size == 0: continue indice = np.where(self.flag == i)[0] assert len(indice) == size np.random.shuffle(indice) num_extra = int(np.ceil(size / self.samples_per_gpu) ) * self.samples_per_gpu - len(indice) indice = np.concatenate( [indice, np.random.choice(indice, num_extra)]) indices.append(indice) indices = np.concatenate(indices) indices = [ indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu] for i in np.random.permutation( range(len(indices) // self.samples_per_gpu)) ] indices = np.concatenate(indices) indices = indices.astype(np.int64).tolist() assert len(indices) == self.num_samples return iter(indices)
Example #3
Source File: progressbar.py From multibootusb with GNU General Public License v2.0 | 6 votes |
def _format_widgets(self): result = [] expanding = [] width = self.term_width for index, widget in enumerate(self.widgets): if isinstance(widget, widgets.WidgetHFill): result.append(widget) expanding.insert(0, index) else: widget = widgets.format_updatable(widget, self) result.append(widget) width -= len(widget) count = len(expanding) while count: portion = max(int(math.ceil(width * 1. / count)), 0) index = expanding.pop() count -= 1 widget = result[index].update(self, portion) width -= len(widget) result[index] = widget return result
Example #4
Source File: utils.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def save_image(data, epoch, image_size, batch_size, output_dir, padding=2): """ save image """ data = data.asnumpy().transpose((0, 2, 3, 1)) datanp = np.clip( (data - np.min(data))*(255.0/(np.max(data) - np.min(data))), 0, 255).astype(np.uint8) x_dim = min(8, batch_size) y_dim = int(math.ceil(float(batch_size) / x_dim)) height, width = int(image_size + padding), int(image_size + padding) grid = np.zeros((height * y_dim + 1 + padding // 2, width * x_dim + 1 + padding // 2, 3), dtype=np.uint8) k = 0 for y in range(y_dim): for x in range(x_dim): if k >= batch_size: break start_y = y * height + 1 + padding // 2 end_y = start_y + height - padding start_x = x * width + 1 + padding // 2 end_x = start_x + width - padding np.copyto(grid[start_y:end_y, start_x:end_x, :], datanp[k]) k += 1 imageio.imwrite( '{}/fake_samples_epoch_{}.png'.format(output_dir, epoch), grid)
Example #5
Source File: gaussian_moments.py From DOTA_models with Apache License 2.0 | 6 votes |
def compute_a(sigma, q, lmbd, verbose=False): lmbd_int = int(math.ceil(lmbd)) if lmbd_int == 0: return 1.0 a_lambda_first_term_exact = 0 a_lambda_second_term_exact = 0 for i in xrange(lmbd_int + 1): coef_i = scipy.special.binom(lmbd_int, i) * (q ** i) s1, s2 = 0, 0 for j in xrange(i + 1): coef_j = scipy.special.binom(i, j) * (-1) ** (i - j) s1 += coef_j * np.exp((j * j - j) / (2.0 * (sigma ** 2))) s2 += coef_j * np.exp((j * j + j) / (2.0 * (sigma ** 2))) a_lambda_first_term_exact += coef_i * s1 a_lambda_second_term_exact += coef_i * s2 a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact + q * a_lambda_second_term_exact) if verbose: print "A: by binomial expansion {} = {} + {}".format( a_lambda_exact, (1.0 - q) * a_lambda_first_term_exact, q * a_lambda_second_term_exact) return _to_np_float64(a_lambda_exact)
Example #6
Source File: common_layers.py From fine-lm with MIT License | 6 votes |
def make_even_size(x): """Pad x to be even-sized on axis 1 and 2, but only if necessary.""" x_shape = x.get_shape().as_list() assert len(x_shape) > 2, "Only 3+-dimensional tensors supported." shape = [dim if dim is not None else -1 for dim in x_shape] new_shape = x_shape # To make sure constant shapes remain constant. if x_shape[1] is not None: new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5)) if x_shape[2] is not None: new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5)) if shape[1] % 2 == 0 and shape[2] % 2 == 0: return x if shape[1] % 2 == 0: x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2) x.set_shape(new_shape) return x if shape[2] % 2 == 0: x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1) x.set_shape(new_shape) return x x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1) x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2) x.set_shape(new_shape) return x
Example #7
Source File: get_references_web.py From fine-lm with MIT License | 6 votes |
def main(_): shard_urls = fetch.get_urls_for_shard(FLAGS.urls_dir, FLAGS.shard_id) num_groups = int(math.ceil(len(shard_urls) / fetch.URLS_PER_CLIENT)) tf.logging.info("Launching get_references_web_single_group sequentially for " "%d groups in shard %d. Total URLs: %d", num_groups, FLAGS.shard_id, len(shard_urls)) command_prefix = FLAGS.command.split() + [ "--urls_dir=%s" % FLAGS.urls_dir, "--shard_id=%d" % FLAGS.shard_id, "--debug_num_urls=%d" % FLAGS.debug_num_urls, ] with utils.timing("all_groups_fetch"): for i in range(num_groups): command = list(command_prefix) out_dir = os.path.join(FLAGS.out_dir, "process_%d" % i) command.append("--out_dir=%s" % out_dir) command.append("--group_id=%d" % i) try: # Even on 1 CPU, each group should finish within an hour. sp.check_call(command, timeout=60*60) except sp.TimeoutExpired: tf.logging.error("Group %d timed out", i)
Example #8
Source File: display_methods.py From indras_net with GNU General Public License v3.0 | 5 votes |
def __init__(self, title, varieties, width, height, anim=True, data_func=None, is_headless=False, legend_pos=4): """ Setup a scatter plot. varieties contains the different types of entities to show in the plot, which will get assigned different colors """ global anim_func self.scats = None self.anim = anim self.data_func = data_func self.s = ceil(4096 / width) self.headless = is_headless fig, ax = plt.subplots() ax.set_xlim(0, width) ax.set_ylim(0, height) self.create_scats(varieties) ax.legend(loc = legend_pos) ax.set_title(title) plt.grid(True) if anim and not self.headless: anim_func = animation.FuncAnimation(fig, self.update_plot, frames=1000, interval=500, blit=False)
Example #9
Source File: section_output.py From clikit with MIT License | 5 votes |
def add_content(self, content): # type: (str) -> None for line_content in content.split("\n"): self._lines += ( math.ceil( len(self.remove_format(line_content).replace("\t", " ")) / self._terminal.width ) or 1 ) self._content.append(line_content) self._content.append("\n")
Example #10
Source File: time.py From clikit with MIT License | 5 votes |
def format_time(secs): # type: (int) -> str for fmt in _TIME_FORMATS: if secs > fmt[0]: continue if len(fmt) == 2: return fmt[1] return "{} {}".format(math.ceil(secs / fmt[2]), fmt[1])
Example #11
Source File: img.py From vergeml with MIT License | 5 votes |
def resize_image(img, width, height, method, mode, bg_color=(0, 0, 0, 0)): # Some code from: # https://github.com/charlesthk/python-resize-image/blob/master/resizeimage/resizeimage.py # Thank you! img = img.copy() pil_method = getattr(Image, method.upper()) if mode == 'fill': img = img.resize((width, height), pil_method) elif mode == 'aspect-fill': w,h = img.size ratio = max(width / w, height / h) nsize = (int(math.ceil(w * ratio)), int(math.ceil(h * ratio))) img = img.resize(nsize, pil_method) w,h = img.size left = (w - width) / 2 top = (h - height) / 2 right = w - left bottom = h - top rect = (int(math.ceil(x)) for x in (left, top, right, bottom)) img = img.crop(rect) elif mode == 'aspect-fit': img.thumbnail((width, height), pil_method) background = Image.new('RGBA', (width, height), bg_color) img_position = ( int(math.ceil((width - img.width) / 2)), int(math.ceil((height - img.height) / 2)) ) background.paste(img, img_position) img = background.convert('RGB') return img
Example #12
Source File: dkt.py From dkt with MIT License | 5 votes |
def round_to_multiple(x, base): return int(base * math.ceil(float(x)/base))
Example #13
Source File: plotting.py From cat-bbs with MIT License | 5 votes |
def __init__(self, titles, increasing, save_to_fp): assert len(titles) == len(increasing) n_plots = len(titles) self.titles = titles self.increasing = dict([(title, incr) for title, incr in zip(titles, increasing)]) self.colors = ["red", "blue", "cyan", "magenta", "orange", "black"] self.nb_points_max = 500 self.save_to_fp = save_to_fp self.start_batch_idx = 0 self.autolimit_y = False self.autolimit_y_multiplier = 5 #self.fig, self.axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 20)) nrows = max(1, int(math.sqrt(n_plots))) ncols = int(math.ceil(n_plots / nrows)) width = ncols * 10 height = nrows * 10 self.fig, self.axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(width, height)) if nrows == 1 and ncols == 1: self.axes = [self.axes] else: self.axes = self.axes.flat title_to_ax = dict() for idx, (title, ax) in enumerate(zip(self.titles, self.axes)): title_to_ax[title] = ax self.title_to_ax = title_to_ax self.fig.tight_layout() self.fig.subplots_adjust(left=0.05)
Example #14
Source File: fun.py From cyberdisc-bot with MIT License | 5 votes |
def quoteboard(self, ctx: Context, page: int = 1): """Show a leaderboard of users with the most quotes.""" users = "" current = 1 start_from = (page - 1) * 10 async with self.bot.pool.acquire() as connection: page_count = ceil( await connection.fetchval( "SELECT count(DISTINCT author_id) FROM quotes" ) / 10 ) if 1 > page > page_count: return await ctx.send(":no_entry_sign: Invalid page number") for result in await connection.fetch( "SELECT author_id, COUNT(author_id) as quote_count FROM quotes " "GROUP BY author_id ORDER BY quote_count DESC LIMIT 10 OFFSET $1", start_from, ): author, quotes = result.values() users += f"{start_from + current}. <@{author}> - {quotes}\n" current += 1 embed = Embed(colour=Colour(0xAE444A)) embed.add_field(name=f"Page {page}/{page_count}", value=users) embed.set_author(name="Quotes Leaderboard", icon_url=CYBERDISC_ICON_URL) await ctx.send(embed=embed)
Example #15
Source File: network.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 5 votes |
def _anchor_component(self, height, width): # just to get the shape right #height = int(math.ceil(self._im_info.data[0, 0] / self._feat_stride[0])) #width = int(math.ceil(self._im_info.data[0, 1] / self._feat_stride[0])) anchors, anchor_length = generate_anchors_pre(\ height, width, self._feat_stride, self._anchor_scales, self._anchor_ratios) self._anchors = Variable(torch.from_numpy(anchors).cuda()) self._anchor_length = anchor_length
Example #16
Source File: atomic.py From comet-commonsense with Apache License 2.0 | 5 votes |
def select_partial_dataset(data_opts, data): num_selections = math.ceil(data_opts.kr * len(data)) return random.sample(data, num_selections)
Example #17
Source File: supersets.py From iSDX with Apache License 2.0 | 5 votes |
def recompute_all_supersets(self, pctrl): self.logger.debug("~Recomputing all Supersets...") self.rulecounts = self.recompute_rulecounts(pctrl) # get all sets of participants advertising the same prefix peer_sets = get_prefix2part_sets(pctrl) peer_sets = clear_inactive_parts(peer_sets, self.rulecounts.keys()) peer_sets = removeSubsets(peer_sets) self.supersets = minimize_ss_rules_greedy(peer_sets, self.rulecounts, self.max_initial_bits) # impose an ordering on each superset by converting sets to lists for i in range(len(self.supersets)): self.supersets[i] = list(self.supersets[i]) # if there is more than one superset, set the id size appropriately self.id_size = 1 if len(self.supersets) > 1: self.id_size = int(math.ceil(math.log(len(self.supersets), 2))) # fix the mask size based on the id size self.mask_size = self.max_bits - self.id_size # in the unlikely case that there are more participants for a prefix than can fit in # the mask, truncate the list of participants (this may still be very broken) for superset in self.supersets: if len(superset) > self.mask_size: self.logger.warn('Superset too big! Dropping participants.') del(superset[self.mask_size:]) self.logger.debug("done.~") self.logger.debug("Supersets: >> "+str(self.supersets))
Example #18
Source File: ss_lib.py From iSDX with Apache License 2.0 | 5 votes |
def bitsRequired(supersets): """ How many bits are needed to represent any set in this construction? """ if supersets is None: return 0 logM = 1 if len(supersets) > 1: logM = math.ceil(math.log(len(supersets), 2)) maxS = max(len(superset) for superset in supersets) return int(logM + maxS)
Example #19
Source File: data.py From Neural-LP with MIT License | 5 votes |
def _count_batch(self, samples, batch_size): relations = zip(*samples)[0] relations_counts = Counter(relations) num_batches = [ceil(1. * x / batch_size) for x in relations_counts.values()] return int(sum(num_batches))
Example #20
Source File: wrappers.py From mmdetection with Apache License 2.0 | 5 votes |
def forward(self, x): if x.numel() == 0 and torch.__version__ <= '1.4': out_shape = list(x.shape[:2]) for i, k, p, s, d in zip(x.shape[-2:], _pair(self.kernel_size), _pair(self.padding), _pair(self.stride), _pair(self.dilation)): o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1 o = math.ceil(o) if self.ceil_mode else math.floor(o) out_shape.append(o) empty = NewEmptyTensorOp.apply(x, out_shape) return empty return super().forward(x)
Example #21
Source File: group_sampler.py From mmdetection with Apache License 2.0 | 5 votes |
def __init__(self, dataset, samples_per_gpu=1): assert hasattr(dataset, 'flag') self.dataset = dataset self.samples_per_gpu = samples_per_gpu self.flag = dataset.flag.astype(np.int64) self.group_sizes = np.bincount(self.flag) self.num_samples = 0 for i, size in enumerate(self.group_sizes): self.num_samples += int(np.ceil( size / self.samples_per_gpu)) * self.samples_per_gpu
Example #22
Source File: group_sampler.py From mmdetection with Apache License 2.0 | 5 votes |
def __init__(self, dataset, samples_per_gpu=1, num_replicas=None, rank=None): _rank, _num_replicas = get_dist_info() if num_replicas is None: num_replicas = _num_replicas if rank is None: rank = _rank self.dataset = dataset self.samples_per_gpu = samples_per_gpu self.num_replicas = num_replicas self.rank = rank self.epoch = 0 assert hasattr(self.dataset, 'flag') self.flag = self.dataset.flag self.group_sizes = np.bincount(self.flag) self.num_samples = 0 for i, j in enumerate(self.group_sizes): self.num_samples += int( math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu / self.num_replicas)) * self.samples_per_gpu self.total_size = self.num_samples * self.num_replicas
Example #23
Source File: group_sampler.py From mmdetection with Apache License 2.0 | 5 votes |
def __iter__(self): # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) indices = [] for i, size in enumerate(self.group_sizes): if size > 0: indice = np.where(self.flag == i)[0] assert len(indice) == size indice = indice[list(torch.randperm(int(size), generator=g))].tolist() extra = int( math.ceil( size * 1.0 / self.samples_per_gpu / self.num_replicas) ) * self.samples_per_gpu * self.num_replicas - len(indice) # pad indice tmp = indice.copy() for _ in range(extra // size): indice.extend(tmp) indice.extend(tmp[:extra % size]) indices.extend(indice) assert len(indices) == self.total_size indices = [ indices[j] for i in list( torch.randperm( len(indices) // self.samples_per_gpu, generator=g)) for j in range(i * self.samples_per_gpu, (i + 1) * self.samples_per_gpu) ] # subsample offset = self.num_samples * self.rank indices = indices[offset:offset + self.num_samples] assert len(indices) == self.num_samples return iter(indices)
Example #24
Source File: gen_noisy.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def evaluate_checkpoint(sess,model): dataset = 'cifar' #with tf.Session() as sess: # Iterate over the samples batch-by-batch num_batches = int(math.ceil(num_eval_examples / eval_batch_size)) adv_x_samples=[] adv_y_samples=[] for ibatch in range(num_batches): bstart = ibatch * eval_batch_size bend = min(bstart + eval_batch_size, num_eval_examples) x_batch = mnist.test.images[bstart:bend,:] y_batch = mnist.test.labels[bstart:bend] x_batch_adv = attack.perturb(x_batch, y_batch, sess) if(ibatch == 0): adv_x_samples = x_batch_adv adv_y_samples = y_batch else: adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0) adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0) if(args.attack == 'xent'): atck = 'pgd' f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w") elif(args.attack == 'cw_pgd'): atck = 'cw_pgd' f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w") else: f = open(os.path.join(args.log_dir, "custom.p"), "w") pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f) f.close()
Example #25
Source File: gen_whitebox_adv.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def evaluate_checkpoint(sess,model): dataset = 'cifar' #with tf.Session() as sess: # Iterate over the samples batch-by-batch num_batches = int(math.ceil(num_eval_examples / eval_batch_size)) adv_x_samples=[] adv_y_samples=[] for ibatch in range(num_batches): bstart = ibatch * eval_batch_size bend = min(bstart + eval_batch_size, num_eval_examples) x_batch = mnist.test.images[bstart:bend,:] y_batch = mnist.test.labels[bstart:bend] x_batch_adv = attack.perturb(x_batch, y_batch, sess) if(ibatch == 0): adv_x_samples = x_batch_adv adv_y_samples = y_batch else: adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0) adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0) if(args.attack == 'xent'): atck = 'pgd' f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w") elif(args.attack == 'cw_pgd'): atck = 'cw_pgd' f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w") else: f = open(os.path.join(args.log_dir, "custom.p"), "w") pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f) f.close()
Example #26
Source File: gen_whitebox_adv.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def evaluate_checkpoint(sess,model): dataset = 'mnist' #with tf.Session() as sess: # Iterate over the samples batch-by-batch num_batches = int(math.ceil(num_eval_examples / eval_batch_size)) adv_x_samples=[] adv_y_samples=[] for ibatch in range(num_batches): bstart = ibatch * eval_batch_size bend = min(bstart + eval_batch_size, num_eval_examples) x_batch = mnist.test.images[bstart:bend,:] y_batch = mnist.test.labels[bstart:bend] dict_nat = {model.x_input: x_batch, model.y_input: y_batch} x_batch_adv = attack.perturb(x_batch, y_batch, sess) if(ibatch == 0): adv_x_samples = x_batch_adv adv_y_samples = y_batch else: adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0) adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0) if(args.attack == 'xent'): atck = 'pgd' f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w") elif(args.attack == 'cw_pgd'): atck = 'cw_pgd' f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w") else: f = open(os.path.join(args.log_dir, "custom.p"), "w") pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f) f.close()
Example #27
Source File: utils.py From pinax-documents with MIT License | 5 votes |
def convert_bytes(bytes): bytes = float(bytes) if bytes >= 1099511627776: size, srepr = bytes / 1099511627776, "TB" elif bytes >= 1073741824: size, srepr = bytes / 1073741824, "GB" elif bytes >= 1048576: size, srepr = bytes / 1048576, "MB" elif bytes >= 1024: size, srepr = bytes / 1024, "KB" else: size, srepr = bytes, " bytes" return "%d%s" % (math.ceil(size), srepr)
Example #28
Source File: models.py From pinax-documents with MIT License | 5 votes |
def percentage(self): return int(math.ceil((float(self.bytes_used) / self.bytes_total) * 100))
Example #29
Source File: audio_transfer_learning.py From sklearn-audio-transfer-learning with ISC License | 5 votes |
def extract_features_wrapper(paths, path2gt, model='vggish', save_as=False): """Wrapper function for extracting features (MusiCNN, VGGish or OpenL3) per batch. If a save_as string argument is passed, the features wiil be saved in the specified file. """ if model == 'vggish': feature_extractor = extract_vggish_features elif model == 'openl3' or model == 'musicnn': feature_extractor = extract_other_features else: raise NotImplementedError('Current implementation only supports MusiCNN, VGGish and OpenL3 features') batch_size = config['batch_size'] first_batch = True for batch_id in tqdm(range(ceil(len(paths)/batch_size))): batch_paths = paths[(batch_id)*batch_size:(batch_id+1)*batch_size] [x, y, refs] = feature_extractor(batch_paths, path2gt, model) if first_batch: [X, Y, IDS] = [x, y, refs] first_batch = False else: X = np.concatenate((X, x), axis=0) Y = np.concatenate((Y, y), axis=0) IDS = np.concatenate((IDS, refs), axis=0) if save_as: # save data to file # create a directory where to store the extracted training features audio_representations_folder = DATA_FOLDER + 'audio_representations/' if not os.path.exists(audio_representations_folder): os.makedirs(audio_representations_folder) np.savez(audio_representations_folder + save_as, X=X, Y=Y, IDS=IDS) print('Audio features stored: ', save_as) return [X, Y, IDS]
Example #30
Source File: __init__.py From Random-Erasing with Apache License 2.0 | 5 votes |
def eta(self): return int(ceil(self.avg * self.remaining))