Python numpy.ceil() Examples
The following are 30
code examples of numpy.ceil().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.

Example #1
Source File: util.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 8 votes |
def get_deep_representations(model, X, batch_size=256): """ TODO :param model: :param X: :param batch_size: :return: """ # last hidden layer is always at index -4 output_dim = model.layers[-4].output.shape[-1].value get_encoding = K.function( [model.layers[0].input, K.learning_phase()], [model.layers[-4].output] ) n_batches = int(np.ceil(X.shape[0] / float(batch_size))) output = np.zeros(shape=(len(X), output_dim)) for i in range(n_batches): output[i * batch_size:(i + 1) * batch_size] = \ get_encoding([X[i * batch_size:(i + 1) * batch_size], 0])[0] return output
Example #2
Source File: tile_images.py From HardRLWithYoutube with MIT License | 6 votes |
def tile_images(img_nhwc): """ Tile N images into one big PxQ image (P,Q) are chosen to be as close as possible, and if N is square, then P=Q. input: img_nhwc, list or array of images, ndim=4 once turned into array n = batch index, h = height, w = width, c = channel returns: bigim_HWc, ndarray with ndim=3 """ img_nhwc = np.asarray(img_nhwc) N, h, w, c = img_nhwc.shape H = int(np.ceil(np.sqrt(N))) W = int(np.ceil(float(N)/H)) img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)]) img_HWhwc = img_nhwc.reshape(H, W, h, w, c) img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4) img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c) return img_Hh_Ww_c
Example #3
Source File: mnist_projector_generate.py From deep-learning-note with MIT License | 6 votes |
def create_sprite_image(images): if isinstance(images, list): images = np.array(images) img_h = images.shape[1] img_w = images.shape[2] # sprite 可以理解为所有小图片拼成的大正方形矩阵 m = int(np.ceil(np.sqrt(images.shape[0]))) # 使用全 1 来初始化最终的大图片 sprite_image = np.ones((img_h*m, img_w*m)) for i in range(m): for j in range(m): # 计算当前图片编号 cur = i * m + j if cur < images.shape[0]: # 将小图片的内容复制到最终的 sprite 图像 sprite_image[i*img_h:(i+1)*img_h, j*img_w:(j+1)*img_w] = images[cur] return sprite_image # 加载 mnist 数据,制定 one_hot=False,得到的 labels 就是一个数字,而不是一个向量
Example #4
Source File: vaegan_mxnet.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def visual(title, X, activation): '''create a grid of images and save it as a final image title : grid image name X : array of images ''' assert len(X.shape) == 4 X = X.transpose((0, 2, 3, 1)) if activation == 'sigmoid': X = np.clip((X)*(255.0), 0, 255).astype(np.uint8) elif activation == 'tanh': X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8) n = np.ceil(np.sqrt(X.shape[0])) buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8) for i, img in enumerate(X): fill_buf(buff, i, img, X.shape[1:3]) cv2.imwrite('%s.jpg' % (title), buff)
Example #5
Source File: tlib.py From TOPFARM with GNU Affero General Public License v3.0 | 6 votes |
def move(self, P0, P1): """ Move a point P0 to a new legal location :param P0: ndarray[2] :param P1: ndarray[2] :return: ndarray[2] """ x_dist, y_dist = P1 - P0 tdist = np.sqrt(y_dist**2+x_dist**2) if self.is_in(P1): return P1 else: x_steps = int(np.sign(x_dist) * np.ceil(abs(x_dist / self.dx)))#, self.max_step y_steps = int(np.sign(y_dist) * np.ceil(abs(y_dist / self.dy)))#, self.max_step i0, j0 = self.locate_ij(P0) P2 = self.locate_xy(i0, j0) P_off = P2 - P0 self.loop_i = 0 i1, j1 = self.valid_move(i0, j0, x_steps, y_steps, P_off) P2 = self.locate_xy(i1, j1) + P_off return P2
Example #6
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def curve_length(self, start=None, end=None, precision=0.01): ''' Calculates the length of the curve by dividing the curve up into pieces of parameterized-length <precision>. ''' if start is None: start = self.t[0] if end is None: end = self.t[-1] from scipy import interpolate if self.order == 1: # we just want to add up along the steps... ii = [ii for (ii,t) in enumerate(self.t) if start < t and t < end] ts = np.concatenate([[start], self.t[ii], [end]]) xy = np.vstack([[self(start)], self.coordinates[:,ii].T, [self(end)]]) return np.sum(np.sqrt(np.sum((xy[1:] - xy[:-1])**2, axis=1))) else: t = np.linspace(start, end, int(np.ceil((end-start)/precision))) dt = t[1] - t[0] dx = interpolate.splev(t, self.splrep[0], der=1) dy = interpolate.splev(t, self.splrep[1], der=1) return np.sum(np.sqrt(dx**2 + dy**2)) * dt
Example #7
Source File: group_sampler.py From mmdetection with Apache License 2.0 | 6 votes |
def __iter__(self): indices = [] for i, size in enumerate(self.group_sizes): if size == 0: continue indice = np.where(self.flag == i)[0] assert len(indice) == size np.random.shuffle(indice) num_extra = int(np.ceil(size / self.samples_per_gpu) ) * self.samples_per_gpu - len(indice) indice = np.concatenate( [indice, np.random.choice(indice, num_extra)]) indices.append(indice) indices = np.concatenate(indices) indices = [ indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu] for i in np.random.permutation( range(len(indices) // self.samples_per_gpu)) ] indices = np.concatenate(indices) indices = indices.astype(np.int64).tolist() assert len(indices) == self.num_samples return iter(indices)
Example #8
Source File: utils.py From pruning_yolov3 with GNU General Public License v3.0 | 6 votes |
def plot_images(imgs, targets, paths=None, fname='images.jpg'): # Plots training images overlaid with targets imgs = imgs.cpu().numpy() targets = targets.cpu().numpy() # targets = targets[targets[:, 1] == 21] # plot only one class fig = plt.figure(figsize=(10, 10)) bs, _, h, w = imgs.shape # batch size, _, height, width bs = min(bs, 16) # limit plot to 16 images ns = np.ceil(bs ** 0.5) # number of subplots for i in range(bs): boxes = xywh2xyxy(targets[targets[:, 0] == i, 2:6]).T boxes[[0, 2]] *= w boxes[[1, 3]] *= h plt.subplot(ns, ns, i + 1).imshow(imgs[i].transpose(1, 2, 0)) plt.plot(boxes[[0, 2, 2, 0, 0]], boxes[[1, 1, 3, 3, 1]], '.-') plt.axis('off') if paths is not None: s = Path(paths[i]).name plt.title(s[:min(len(s), 40)], fontdict={'size': 8}) # limit to 40 characters fig.tight_layout() fig.savefig(fname, dpi=200) plt.close()
Example #9
Source File: misc.py From disentangling_conditional_gans with MIT License | 6 votes |
def create_image_grid(images, grid_size=None): assert images.ndim == 3 or images.ndim == 4 num, img_w, img_h = images.shape[0], images.shape[-1], images.shape[-2] if grid_size is not None: grid_w, grid_h = tuple(grid_size) else: grid_w = max(int(np.ceil(np.sqrt(num))), 1) grid_h = max((num - 1) // grid_w + 1, 1) grid = np.zeros(list(images.shape[1:-2]) + [grid_h * img_h, grid_w * img_w], dtype=images.dtype) for idx in range(num): x = (idx % grid_w) * img_w y = (idx // grid_w) * img_h grid[..., y : y + img_h, x : x + img_w] = images[idx] return grid
Example #10
Source File: utils.py From DeepLung with GNU General Public License v3.0 | 6 votes |
def split4(data, max_stride, margin): splits = [] data = torch.Tensor.numpy(data) _,c, z, h, w = data.shape w_width = np.ceil(float(w / 2 + margin)/max_stride).astype('int')*max_stride h_width = np.ceil(float(h / 2 + margin)/max_stride).astype('int')*max_stride pad = int(np.ceil(float(z)/max_stride)*max_stride)-z leftpad = pad/2 pad = [[0,0],[0,0],[leftpad,pad-leftpad],[0,0],[0,0]] data = np.pad(data,pad,'constant',constant_values=-1) data = torch.from_numpy(data) splits.append(data[:, :, :, :h_width, :w_width]) splits.append(data[:, :, :, :h_width, -w_width:]) splits.append(data[:, :, :, -h_width:, :w_width]) splits.append(data[:, :, :, -h_width:, -w_width:]) return torch.cat(splits, 0)
Example #11
Source File: utils.py From DeepLung with GNU General Public License v3.0 | 6 votes |
def split8(data, max_stride, margin): splits = [] if isinstance(data, np.ndarray): c, z, h, w = data.shape else: _,c, z, h, w = data.size() z_width = np.ceil(float(z / 2 + margin)/max_stride).astype('int')*max_stride w_width = np.ceil(float(w / 2 + margin)/max_stride).astype('int')*max_stride h_width = np.ceil(float(h / 2 + margin)/max_stride).astype('int')*max_stride for zz in [[0,z_width],[-z_width,None]]: for hh in [[0,h_width],[-h_width,None]]: for ww in [[0,w_width],[-w_width,None]]: if isinstance(data, np.ndarray): splits.append(data[np.newaxis, :, zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1]]) else: splits.append(data[:, :, zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1]]) if isinstance(data, np.ndarray): return np.concatenate(splits, 0) else: return torch.cat(splits, 0)
Example #12
Source File: utils.py From DeepLung with GNU General Public License v3.0 | 6 votes |
def split32(data, max_stride, margin): splits = [] _,c, z, h, w = data.size() z_width = np.ceil(float(z / 2 + margin)/max_stride).astype('int')*max_stride w_width = np.ceil(float(w / 4 + margin)/max_stride).astype('int')*max_stride h_width = np.ceil(float(h / 4 + margin)/max_stride).astype('int')*max_stride w_pos = [w*3/8-w_width/2, w*5/8-w_width/2] h_pos = [h*3/8-h_width/2, h*5/8-h_width/2] for zz in [[0,z_width],[-z_width,None]]: for hh in [[0,h_width],[h_pos[0],h_pos[0]+h_width],[h_pos[1],h_pos[1]+h_width],[-h_width,None]]: for ww in [[0,w_width],[w_pos[0],w_pos[0]+w_width],[w_pos[1],w_pos[1]+w_width],[-w_width,None]]: splits.append(data[:, :, zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1]]) return torch.cat(splits, 0)
Example #13
Source File: visualization.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 6 votes |
def _draw_single_box(image, xmin, ymin, xmax, ymax, display_str, font, color='black', thickness=4): draw = ImageDraw.Draw(image) (left, right, top, bottom) = (xmin, xmax, ymin, ymax) draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=thickness, fill=color) text_bottom = bottom # Reverse list and print from bottom to top. text_width, text_height = font.getsize(display_str) margin = np.ceil(0.05 * text_height) draw.rectangle( [(left, text_bottom - text_height - 2 * margin), (left + text_width, text_bottom)], fill=color) draw.text( (left + margin, text_bottom - text_height - margin), display_str, fill='black', font=font) return image
Example #14
Source File: utils.py From DeepLung with GNU General Public License v3.0 | 6 votes |
def split64(data, max_stride, margin): splits = [] _,c, z, h, w = data.size() z_width = np.ceil(float(z / 4 + margin)/max_stride).astype('int')*max_stride w_width = np.ceil(float(w / 4 + margin)/max_stride).astype('int')*max_stride h_width = np.ceil(float(h / 4 + margin)/max_stride).astype('int')*max_stride z_pos = [z*3/8-z_width/2, z*5/8-z_width/2] w_pos = [w*3/8-w_width/2, w*5/8-w_width/2] h_pos = [h*3/8-h_width/2, h*5/8-h_width/2] for zz in [[0,z_width],[z_pos[0],z_pos[0]+z_width],[z_pos[1],z_pos[1]+z_width],[-z_width,None]]: for hh in [[0,h_width],[h_pos[0],h_pos[0]+h_width],[h_pos[1],h_pos[1]+h_width],[-h_width,None]]: for ww in [[0,w_width],[w_pos[0],w_pos[0]+w_width],[w_pos[1],w_pos[1]+w_width],[-w_width,None]]: splits.append(data[:, :, zz[0]:zz[1], hh[0]:hh[1], ww[0]:ww[1]]) return torch.cat(splits, 0)
Example #15
Source File: discriminator.py From SSGAN-Tensorflow with MIT License | 6 votes |
def __call__(self, input): with tf.variable_scope(self.name, reuse=self._reuse): if not self._reuse: print('\033[93m'+self.name+'\033[0m') _ = input num_channel = [32, 64, 128, 256, 256, 512] num_layer = np.ceil(np.log2(min(_.shape.as_list()[1:3]))).astype(np.int) for i in range(num_layer): ch = num_channel[i] if i < len(num_channel) else 512 _ = conv2d(_, ch, self._is_train, info=not self._reuse, norm=self._norm_type, name='conv{}'.format(i+1)) _ = conv2d(_, int(num_channel[i]/4), self._is_train, k=1, s=1, info=not self._reuse, norm='None', name='conv{}'.format(i+2)) _ = conv2d(_, self._num_class+1, self._is_train, k=1, s=1, info=not self._reuse, activation_fn=None, norm='None', name='conv{}'.format(i+3)) _ = tf.squeeze(_) if not self._reuse: log.info('discriminator output {}'.format(_.shape.as_list())) self._reuse = True self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name) return tf.nn.sigmoid(_), _
Example #16
Source File: __init__.py From Automatic-Identification-and-Counting-of-Blood-Cells with GNU General Public License v3.0 | 5 votes |
def constructor(self, meta, FLAGS): def _to_color(indx, base): """ return (b, r, g) tuple""" base2 = base * base b = 2 - indx / base2 r = 2 - (indx % base2) / base g = 2 - (indx % base2) % base return (b * 127, r * 127, g * 127) if 'labels' not in meta: misc.labels(meta, FLAGS) #We're not loading from a .pb so we do need to load the labels assert len(meta['labels']) == meta['classes'], ( 'labels.txt and {} indicate' + ' ' 'inconsistent class numbers' ).format(meta['model']) # assign a color for each label colors = list() base = int(np.ceil(pow(meta['classes'], 1./3))) for x in range(len(meta['labels'])): colors += [_to_color(x, base)] meta['colors'] = colors self.fetch = list() self.meta, self.FLAGS = meta, FLAGS # over-ride the threshold in meta if FLAGS has it. if FLAGS.threshold > 0.0: self.meta['thresh'] = FLAGS.threshold
Example #17
Source File: test_librosa_compatibility.py From audio with BSD 2-Clause "Simplified" License | 5 votes |
def test_phase_vocoder(complex_specgrams, rate, hop_length): # Due to cummulative sum, numerical error in using torch.float32 will # result in bottom right values of the stretched sectrogram to not # match with librosa. complex_specgrams = complex_specgrams.type(torch.float64) phase_advance = torch.linspace(0, np.pi * hop_length, complex_specgrams.shape[-3], dtype=torch.float64)[..., None] complex_specgrams_stretch = F.phase_vocoder(complex_specgrams, rate=rate, phase_advance=phase_advance) # == Test shape expected_size = list(complex_specgrams.size()) expected_size[-2] = int(np.ceil(expected_size[-2] / rate)) assert complex_specgrams.dim() == complex_specgrams_stretch.dim() assert complex_specgrams_stretch.size() == torch.Size(expected_size) # == Test values index = [0] * (complex_specgrams.dim() - 3) + [slice(None)] * 3 mono_complex_specgram = complex_specgrams[index].numpy() mono_complex_specgram = mono_complex_specgram[..., 0] + \ mono_complex_specgram[..., 1] * 1j expected_complex_stretch = librosa.phase_vocoder(mono_complex_specgram, rate=rate, hop_length=hop_length) complex_stretch = complex_specgrams_stretch[index].numpy() complex_stretch = complex_stretch[..., 0] + 1j * complex_stretch[..., 1] assert np.allclose(complex_stretch, expected_complex_stretch, atol=1e-5)
Example #18
Source File: plot.py From HardRLWithYoutube with MIT License | 5 votes |
def smooth_reward_curve(x, y): halfwidth = int(np.ceil(len(x) / 60)) # Halfwidth of our smoothing convolution k = halfwidth xsmoo = x ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='same') / np.convolve(np.ones_like(y), np.ones(2 * k + 1), mode='same') return xsmoo, ysmoo
Example #19
Source File: KeplerLike1.py From EXOSIMS with BSD 3-Clause "New" or "Revised" License | 5 votes |
def gen_radius(self, n): """Generate planetary radius values in Earth radius Samples a radius distribution defined as log-uniform in each of 9 radius bins with fixed occurrence rates. Args: n (integer): Number of samples to generate Returns: astropy Quantity array: Planet radius values in units of Earth radius """ n = self.gen_input_check(n) # get number of samples per bin nsamp = np.ceil(n*self.Rvals/np.sum(self.Rvals)).astype(int) # generate random radii in each bin logRs = np.log(self.Rs) Rp = np.concatenate([np.exp(np.random.uniform(low=logRs[j], high=logRs[j+1], size=nsamp[j])) for j in range(len(self.Rvals))]) # select n radom elements from Rp ind = np.random.choice(len(Rp), size=n, replace=len(Rp)<n) Rp = Rp[ind] return Rp*u.earthRad
Example #20
Source File: train_deeplab3D.py From pytorch-mri-segmentation-3D with MIT License | 5 votes |
def outS(i): """Given shape of input image as i,i,3 in deeplab-resnet model, this function returns j such that the shape of output blob of is j,j,21 """ j = int(i) j = (j+1)/2 j = int(np.ceil((j+1)/2.0)) j = (j+1)/2 return j
Example #21
Source File: functional.py From torch-toolbox with BSD 3-Clause "New" or "Revised" License | 5 votes |
def poisson_noise(img): imgtype = img.dtype img = img.astype(np.float32) / 255.0 vals = len(np.unique(img)) vals = 2 ** np.ceil(np.log2(vals)) noisy = 255 * \ np.clip(np.random.poisson(img.astype(np.float32) * vals) / float(vals), 0, 1) return noisy.astype(imgtype)
Example #22
Source File: plot.py From lirpg with MIT License | 5 votes |
def smooth_reward_curve(x, y): halfwidth = int(np.ceil(len(x) / 60)) # Halfwidth of our smoothing convolution k = halfwidth xsmoo = x ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='same') / np.convolve(np.ones_like(y), np.ones(2 * k + 1), mode='same') return xsmoo, ysmoo
Example #23
Source File: roi_pool_py.py From cascade-rcnn_Pytorch with MIT License | 5 votes |
def forward(self, features, rois): batch_size, num_channels, data_height, data_width = features.size() num_rois = rois.size()[0] outputs = Variable(torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)).cuda() for roi_ind, roi in enumerate(rois): batch_ind = int(roi[0].data[0]) roi_start_w, roi_start_h, roi_end_w, roi_end_h = np.round( roi[1:].data.cpu().numpy() * self.spatial_scale).astype(int) roi_width = max(roi_end_w - roi_start_w + 1, 1) roi_height = max(roi_end_h - roi_start_h + 1, 1) bin_size_w = float(roi_width) / float(self.pooled_width) bin_size_h = float(roi_height) / float(self.pooled_height) for ph in range(self.pooled_height): hstart = int(np.floor(ph * bin_size_h)) hend = int(np.ceil((ph + 1) * bin_size_h)) hstart = min(data_height, max(0, hstart + roi_start_h)) hend = min(data_height, max(0, hend + roi_start_h)) for pw in range(self.pooled_width): wstart = int(np.floor(pw * bin_size_w)) wend = int(np.ceil((pw + 1) * bin_size_w)) wstart = min(data_width, max(0, wstart + roi_start_w)) wend = min(data_width, max(0, wend + roi_start_w)) is_empty = (hend <= hstart) or(wend <= wstart) if is_empty: outputs[roi_ind, :, ph, pw] = 0 else: data = features[batch_ind] outputs[roi_ind, :, ph, pw] = torch.max( torch.max(data[:, hstart:hend, wstart:wend], 1)[0], 2)[0].view(-1) return outputs
Example #24
Source File: roibatchLoader.py From cascade-rcnn_Pytorch with MIT License | 5 votes |
def __init__(self, roidb, ratio_list, ratio_index, batch_size, num_classes, training=True, normalize=None): self._roidb = roidb self._num_classes = num_classes # we make the height of image consistent to trim_height, trim_width self.trim_height = cfg.TRAIN.TRIM_HEIGHT self.trim_width = cfg.TRAIN.TRIM_WIDTH self.max_num_box = cfg.MAX_NUM_GT_BOXES self.training = training self.normalize = normalize self.ratio_list = ratio_list self.ratio_index = ratio_index self.batch_size = batch_size self.data_size = len(self.ratio_list) # given the ratio_list, we want to make the ratio same for each batch. self.ratio_list_batch = torch.Tensor(self.data_size).zero_() num_batch = int(np.ceil(len(ratio_index) / batch_size)) for i in range(num_batch): left_idx = i*batch_size right_idx = min((i+1)*batch_size-1, self.data_size-1) if ratio_list[right_idx] < 1: # for ratio < 1, we preserve the leftmost in each batch. target_ratio = ratio_list[left_idx] elif ratio_list[left_idx] > 1: # for ratio > 1, we preserve the rightmost in each batch. target_ratio = ratio_list[right_idx] else: # for ratio cross 1, we make it to be 1. target_ratio = 1 self.ratio_list_batch[left_idx:(right_idx+1)] = target_ratio
Example #25
Source File: kaggle_mnist_input.py From tensorflow-alexnet with MIT License | 5 votes |
def load_mnist_train(validation_size=2000, batch_size=128): download_train() data = pd.read_csv(FLAGS.train_path) images = data.iloc[:, 1:].values images = images.astype(np.float) images = np.multiply(images, 1.0 / 255.0) image_size = images.shape[1] image_width = image_height = np.ceil(np.sqrt(image_size)).astype(np.uint8) images = images.reshape(-1, image_width, image_height, 1) labels_flat = data[[0]].values.ravel() labels_count = np.unique(labels_flat).shape[0] labels = dense_to_one_hot(labels_flat, labels_count) labels = labels.astype(np.uint8) validation_images = images[:validation_size] validation_labels = labels[:validation_size] train_images = images[validation_size:] train_labels = labels[validation_size:] train_range = zip(range(0, len(train_images), batch_size), range(batch_size, len(train_images), batch_size)) if len(train_images) % batch_size > 0: train_range.append((train_range[-1][1], len(train_images))) validation_indices = np.arange(len(validation_images)) return train_images, train_labels, train_range, validation_images, validation_labels, validation_indices
Example #26
Source File: tlib.py From TOPFARM with GNU Affero General Public License v3.0 | 5 votes |
def locate_ij(self, P): """ Find the closest control point to the reference point P :param P: tuple, list or array The reference point :return: [i,j] indices in self.positions """ a = np.argmin([(x-P[0])**2 + (y-P[1])**2 for x,y in self.positions]) return int(np.ceil(a/self.nx)), a % self.nx
Example #27
Source File: tlib.py From TOPFARM with GNU Affero General Public License v3.0 | 5 votes |
def init_values(self): """ Initialise the basic information about the domain """ self.x_min = self.polygon[:,0].min() self.x_max = self.polygon[:,0].max() self.y_min = self.polygon[:,1].min() self.y_max = self.polygon[:,1].max() self.lx = self.x_max - self.x_min self.ly = self.y_max - self.y_min self.nx = int(np.ceil(self.lx / self.dx)) self.ny = int(np.ceil(self.ly / self.dy))
Example #28
Source File: trainer.py From ACAN with MIT License | 5 votes |
def train(self): """Do training, you can overload this function according to your need.""" self.print("Log dir: {}".format(self.logdir)) # Calculate total step self.n_train = len(self.trainset) self.steps_per_epoch = np.ceil( self.n_train / self.batch_size).astype(np.int32) self.verbose = min(self.verbose, self.steps_per_epoch) self.n_steps = self.max_epochs * self.steps_per_epoch # calculate model parameters memory para = sum([np.prod(list(p.size())) for p in self.net.parameters()]) memory = para * 4 / 1000 / 1000 self.print('Model {} : params: {:4f}M'.format(self.net._get_name(), memory)) self.print('###### Experiment Parameters ######') for k, v in self.params.items(): self.print('{0:<22s} : {1:}'.format(k, v)) self.print("{0:<22s} : {1:} ".format('trainset sample', self.n_train)) # GO!!!!!!!!! start_time = time.time() self.train_total_time = 0 self.time_sofar = 0 for epoch in range(self.start_epoch, self.max_epochs + 1): # Decay Learning Rate self.scheduler.step() # Train one epoch total_loss = self.train_epoch(epoch) torch.cuda.empty_cache() # Evaluate the model if self.eval_freq and epoch % self.eval_freq == 0: acc = self.eval(epoch) torch.cuda.empty_cache() self.print("Finished training! Best epoch {} best acc {}".format(self.best_epoch, self.best_acc)) self.print("Spend time: {:.2f}h".format((time.time() - start_time) / 3600))
Example #29
Source File: depthest_trainer.py From ACAN with MIT License | 5 votes |
def train(self): torch.backends.cudnn.benchmark = True if self.logdir: self.writer = SummaryWriter(self.logdir) else: raise Exception("Log dir doesn't exist!") # Calculate total step self.n_train = len(self.trainset) self.steps_per_epoch = np.ceil(self.n_train / self.batch_size).astype(np.int32) self.verbose = min(self.verbose, self.steps_per_epoch) self.n_steps = self.max_epochs * self.steps_per_epoch self.print("{0:<22s} : {1:} ".format('trainset sample', self.n_train)) # calculate model parameters memory para = sum([np.prod(list(p.size())) for p in self.net.parameters()]) memory = para * 4 / (1024**2) self.print('Model {} : params: {:,}, Memory {:.3f}MB'.format(self.net._get_name(), para, memory)) # GO!!!!!!!!! start_time = time.time() self.train_total_time = 0 self.time_sofar = 0 for epoch in range(self.start_epoch, self.max_epochs + 1): # Train one epoch total_loss = self.train_epoch(epoch) torch.cuda.empty_cache() # Decay Learning Rate if self.params.scheduler in ['step', 'plateau']: self.scheduler.step() # Evaluate the model if self.eval_freq and epoch % self.eval_freq == 0: measures = self.eval(epoch) torch.cuda.empty_cache() for k in sorted(list(measures.keys())): self.writer.add_scalar(k, measures[k], epoch) self.print("Finished training! Best epoch {} best acc {:.4f}".format(self.best_epoch, self.best_acc)) self.print("Spend time: {:.2f}h".format((time.time() - start_time) / 3600)) net_type = type(self.net).__name__ best_pkl = os.path.join(self.logdir, '{}_{:03d}.pkl'.format(net_type, self.best_epoch)) modify = os.path.join(self.logdir, 'best.pkl') shutil.copyfile(best_pkl, modify) return
Example #30
Source File: generator.py From SSGAN-Tensorflow with MIT License | 5 votes |
def __call__(self, input): if self._deconv_type == 'bilinear': from ops import bilinear_deconv2d as deconv2d elif self._deconv_type == 'nn': from ops import nn_deconv2d as deconv2d elif self._deconv_type == 'transpose': from ops import deconv2d else: raise NotImplementedError with tf.variable_scope(self.name, reuse=self._reuse): if not self._reuse: print('\033[93m'+self.name+'\033[0m') _ = tf.reshape(input, [input.get_shape().as_list()[0], 1, 1, -1]) _ = fc(_, 1024, self._is_train, info=not self._reuse, norm='None', name='fc') for i in range(int(np.ceil(np.log2(max(self._h, self._w))))): _ = deconv2d(_, max(self._c, int(_.get_shape().as_list()[-1]/2)), self._is_train, info=not self._reuse, norm=self._norm_type, name='deconv{}'.format(i+1)) _ = deconv2d(_, self._c, self._is_train, k=1, s=1, info=not self._reuse, activation_fn=tf.tanh, norm='None', name='deconv{}'.format(i+2)) _ = tf.image.resize_bilinear(_, [self._h, self._w]) self._reuse = True self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name) return _