Python numpy.round() Examples
The following are 30
code examples of numpy.round().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: timestepper.py From pywr with GNU General Public License v3.0 | 6 votes |
def next(self, ): self._current = current = self._next if current.index >= len(self._periods): raise StopIteration() # Increment to next timestep next_index = current.index + 1 if next_index >= len(self._periods): # The final time-step is one offset beyond the end of the model. # Here we compute its delta and create the object. final_period = current.period + self.offset delta = final_period.end_time - final_period.start_time delta = np.round(delta.total_seconds()) delta = delta / SECONDS_IN_DAY self._next = _core.Timestep(final_period, next_index, delta) else: self._next = _core.Timestep(self._periods[next_index], next_index, self._deltas[next_index]) # Return this timestep return current
Example #2
Source File: RegNet2020.py From Pytorch-Networks with MIT License | 6 votes |
def forward(self, x): for layer in self.children(): x = layer(x) return x # @staticmethod # def complexity(cx, w_in, w_out, stride, bm, gw, se_r): # w_b = int(round(w_out * bm)) # g = w_b // gw # cx = net.complexity_conv2d(cx, w_in, w_b, 1, 1, 0) # cx = net.complexity_batchnorm2d(cx, w_b) # cx = net.complexity_conv2d(cx, w_b, w_b, 3, stride, 1, g) # cx = net.complexity_batchnorm2d(cx, w_b) # if se_r: # w_se = int(round(w_in * se_r)) # cx = SE.complexity(cx, w_b, w_se) # cx = net.complexity_conv2d(cx, w_b, w_out, 1, 1, 0) # cx = net.complexity_batchnorm2d(cx, w_out) # return cx
Example #3
Source File: RegNet2020.py From Pytorch-Networks with MIT License | 6 votes |
def __init__(self, w_in, w_out, stride, bm, gw, se_r): super(BottleneckTransform, self).__init__() w_b = int(round(w_out * bm)) g = w_b // gw self.a = nn.Conv2d(w_in, w_b, 1, stride=1, padding=0, bias=False) self.a_bn = nn.BatchNorm2d(w_b, eps=1e-5, momentum=0.1) self.a_relu = nn.ReLU(inplace=True) self.b = nn.Conv2d(w_b, w_b, 3, stride=stride, padding=1, groups=g, bias=False) self.b_bn = nn.BatchNorm2d(w_b, eps=1e-5, momentum=0.1) self.b_relu = nn.ReLU(inplace=True) if se_r: w_se = int(round(w_in * se_r)) self.se = SE(w_b, w_se) self.c = nn.Conv2d(w_b, w_out, 1, stride=1, padding=0, bias=False) self.c_bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1) self.c_bn.final_bn = True
Example #4
Source File: doa.py From FRIDA with MIT License | 6 votes |
def compute_mode(self): """ Pre-compute mode vectors from candidate locations (in spherical coordinates). """ if self.num_loc is None: raise ValueError('Lookup table appears to be empty. \ Run build_lookup().') self.mode_vec = np.zeros((self.max_bin,self.M,self.num_loc), dtype='complex64') if (self.nfft % 2 == 1): raise ValueError('Signal length must be even.') f = 1.0 / self.nfft * np.linspace(0, self.nfft / 2, self.max_bin) \ * 1j * 2 * np.pi for i in range(self.num_loc): p_s = self.loc[:, i] for m in range(self.M): p_m = self.L[:, m] if (self.mode == 'near'): dist = np.linalg.norm(p_m - p_s, axis=1) if (self.mode == 'far'): dist = np.dot(p_s, p_m) # tau = np.round(self.fs*dist/self.c) # discrete - jagged tau = self.fs * dist / self.c # "continuous" - smoother self.mode_vec[:, m, i] = np.exp(f * tau)
Example #5
Source File: visualization.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 6 votes |
def draw_bounding_boxes(image, gt_boxes, im_info): num_boxes = gt_boxes.shape[0] gt_boxes_new = gt_boxes.copy() gt_boxes_new[:,:4] = np.round(gt_boxes_new[:,:4].copy() / im_info[2]) disp_image = Image.fromarray(np.uint8(image[0])) for i in range(num_boxes): this_class = int(gt_boxes_new[i, 4]) disp_image = _draw_single_box(disp_image, gt_boxes_new[i, 0], gt_boxes_new[i, 1], gt_boxes_new[i, 2], gt_boxes_new[i, 3], 'N%02d-C%02d' % (i, this_class), FONT, color=STANDARD_COLORS[this_class % NUM_COLORS]) image[0, :] = np.array(disp_image) return image
Example #6
Source File: FakeCatalog.py From EXOSIMS with BSD 3-Clause "New" or "Revised" License | 6 votes |
def inverse_method(self,N,d): t = np.linspace(1e-3,0.999,N) f = np.log( t / (1 - t) ) f = f/f[0] psi= np.pi*f cosPsi = np.cos(psi) sinTheta = ( np.abs(cosPsi) + (1-np.abs(cosPsi))*np.random.rand(len(cosPsi))) theta = np.arcsin(sinTheta) theta = np.pi-theta + (2*theta - np.pi)*np.round(np.random.rand(len(t))) cosPhi = cosPsi/sinTheta phi = np.arccos(cosPhi)*(-1)**np.round(np.random.rand(len(t))) coords = SkyCoord(phi*u.rad,(np.pi/2-theta)*u.rad,d*np.ones(len(phi))*u.pc) return coords
Example #7
Source File: Embed.py From pytorch_NER_BiLSTM_CNN_CRF with Apache License 2.0 | 6 votes |
def _uniform_embed(self, embed_dict, words_dict): """ :param embed_dict: :param words_dict: """ print("loading pre_train embedding by uniform for out of vocabulary.") embeddings = np.zeros((int(self.words_count), int(self.dim))) inword_list = {} for word in words_dict: if word in embed_dict: embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32') inword_list[words_dict[word]] = 1 self.exact_count += 1 elif word.lower() in embed_dict: embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32') inword_list[words_dict[word]] = 1 self.fuzzy_count += 1 else: self.oov_count += 1 uniform_col = np.random.uniform(-0.25, 0.25, int(self.dim)).round(6) # uniform for i in range(len(words_dict)): if i not in inword_list and i != self.padID: embeddings[i] = uniform_col final_embed = torch.from_numpy(embeddings).float() return final_embed
Example #8
Source File: test.py From StructEngPy with MIT License | 6 votes |
def cantilever_beam_test(): #FEModel Test model=FEModel() model.add_node(0,0,0) model.add_node(2,1,1) E=1.999e11 mu=0.3 A=4.265e-3 J=9.651e-8 I3=6.572e-5 I2=3.301e-6 rho=7849.0474 model.add_beam(0,1,E,mu,A,I2,I3,J,rho) model.set_node_force(1,(0,0,-1e6,0,0,0)) model.set_node_restraint(0,[True]*6) model.assemble_KM() model.assemble_f() model.assemble_boundary() solve_linear(model) print(np.round(model.d_,6)) print("The result of node 1 should be about [0.12879,0.06440,-0.32485,-0.09320,0.18639,0]")
Example #9
Source File: net_utils.py From cascade-rcnn_Pytorch with MIT License | 6 votes |
def vis_det_and_mask(im, class_name, dets, masks, thresh=0.8): """Visual debugging of detections.""" num_dets = np.minimum(10, dets.shape[0]) colors_mask = random_colors(num_dets) colors_bbox = np.round(np.random.rand(num_dets, 3) * 255) # sort rois according to the coordinates, draw upper bbox first draw_mask = np.zeros(im.shape[:2], dtype=np.uint8) for i in range(1): bbox = tuple(int(np.round(x)) for x in dets[i, :4]) mask = masks[i, :, :] full_mask = unmold_mask(mask, bbox, im.shape) score = dets[i, -1] if score > thresh: word_width = len(class_name) cv2.rectangle(im, bbox[0:2], bbox[2:4], colors_bbox[i], 2) cv2.rectangle(im, bbox[0:2], (bbox[0] + 18 + word_width*8, bbox[1]+15), colors_bbox[i], thickness=cv2.FILLED) apply_mask(im, full_mask, draw_mask, colors_mask[i], 0.5) draw_mask += full_mask cv2.putText(im, '%s' % (class_name), (bbox[0]+5, bbox[1] + 12), cv2.FONT_HERSHEY_PLAIN, 1.0, (255,255,255), thickness=1) return im
Example #10
Source File: kalman_filter.py From kalman_filter_multi_object_tracking with MIT License | 6 votes |
def predict(self): """Predict state vector u and variance of uncertainty P (covariance). where, u: previous state vector P: previous covariance matrix F: state transition matrix Q: process noise matrix Equations: u'_{k|k-1} = Fu'_{k-1|k-1} P_{k|k-1} = FP_{k-1|k-1} F.T + Q where, F.T is F transpose Args: None Return: vector of predicted state estimate """ # Predicted state estimate self.u = np.round(np.dot(self.F, self.u)) # Predicted estimate covariance self.P = np.dot(self.F, np.dot(self.P, self.F.T)) + self.Q self.lastResult = self.u # same last predicted result return self.u
Example #11
Source File: net_utils.py From cascade-rcnn_Pytorch with MIT License | 6 votes |
def vis_detections(im, class_name, dets, thresh=0.8): """Visual debugging of detections.""" for i in range(np.minimum(10, dets.shape[0])): bbox = tuple(int(np.round(x)) for x in dets[i, :4]) score = dets[i, -1] if score > thresh: cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 204, 0), 2) cv2.putText(im, '%s: %.3f' % (class_name, score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 255), thickness=1) return im # Borrow from matterport mask R-CNN implementation
Example #12
Source File: blob.py From cascade-rcnn_Pytorch with MIT License | 6 votes |
def prep_im_for_blob(im, pixel_means, pixel_stds, target_size, max_size): """Mean subtract and scale an image for use in a blob.""" im = im.astype(np.float32, copy=False) im /= 255.0 im -= pixel_means im /= pixel_stds # im = im[:, :, ::-1] im_shape = im.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) im_scale = float(target_size) / float(im_size_min) # Prevent the biggest axis from being more than MAX_SIZE # if np.round(im_scale * im_size_max) > max_size: # im_scale = float(max_size) / float(im_size_max) # im = imresize(im, im_scale) im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) return im, im_scale
Example #13
Source File: models.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def cortex_to_angle(self, x, y): iterX = hasattr(x, '__iter__') iterY = hasattr(y, '__iter__') jarr = None if iterX and iterY: if len(x) != len(y): raise RuntimeError('Arguments x and y must be the same length!') jarr = self._java_object.cortexToAngle(to_java_doubles(x), to_java_doubles(y)) elif iterX: jarr = self._java_object.cortexToAngle(to_java_doubles(x), to_java_doubles([y for i in x])) elif iterY: jarr = self._java_object.cortexToAngle(to_java_doubles([x for i in y]), to_java_doubles(y)) else: return self._java_object.cortexToAngle(x, y) dat = np.asarray([[c for c in r] for r in jarr]) a = dat[:,2] a = np.round(np.abs(a)) a[a > 3] = 0 dat[:,2] = a return dat
Example #14
Source File: box_list_ops_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def test_convert_to_normalized_and_back(self): coordinates = np.random.uniform(size=(100, 4)) coordinates = np.round(np.sort(coordinates) * 200) coordinates[:, 2:4] += 1 coordinates[99, :] = [0, 0, 201, 201] img = tf.ones((128, 202, 202, 3)) boxlist = box_list.BoxList(tf.constant(coordinates, tf.float32)) boxlist = box_list_ops.to_normalized_coordinates(boxlist, tf.shape(img)[1], tf.shape(img)[2]) boxlist = box_list_ops.to_absolute_coordinates(boxlist, tf.shape(img)[1], tf.shape(img)[2]) with self.test_session() as sess: out = sess.run(boxlist.get()) self.assertAllClose(out, coordinates)
Example #15
Source File: nav_env.py From DOTA_models with Apache License 2.0 | 6 votes |
def raw_valid_fn_vec(self, xyt): """Returns if the given set of nodes is valid or not.""" height = self.traversible.shape[0] width = self.traversible.shape[1] x = np.round(xyt[:,[0]]).astype(np.int32) y = np.round(xyt[:,[1]]).astype(np.int32) is_inside = np.all(np.concatenate((x >= 0, y >= 0, x < width, y < height), axis=1), axis=1) x = np.minimum(np.maximum(x, 0), width-1) y = np.minimum(np.maximum(y, 0), height-1) ind = np.ravel_multi_index((y,x), self.traversible.shape) is_traversible = self.traversible.ravel()[ind] is_valid = np.all(np.concatenate((is_inside[:,np.newaxis], is_traversible), axis=1), axis=1) return is_valid
Example #16
Source File: map_utils.py From DOTA_models with Apache License 2.0 | 6 votes |
def _project_to_map(map, vertex, wt=None, ignore_points_outside_map=False): """Projects points to map, returns how many points are present at each location.""" num_points = np.zeros((map.size[1], map.size[0])) vertex_ = vertex[:, :2] - map.origin vertex_ = np.round(vertex_ / map.resolution).astype(np.int) if ignore_points_outside_map: good_ind = np.all(np.array([vertex_[:,1] >= 0, vertex_[:,1] < map.size[1], vertex_[:,0] >= 0, vertex_[:,0] < map.size[0]]), axis=0) vertex_ = vertex_[good_ind, :] if wt is not None: wt = wt[good_ind, :] if wt is None: np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), 1) else: assert(wt.shape[0] == vertex.shape[0]), \ 'number of weights should be same as vertices.' np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), wt) return num_points
Example #17
Source File: preprocess.py From padasip with MIT License | 6 votes |
def test_lda(self): """ Linear Disciminant Analysis """ np.random.seed(100) N = 150 classes = np.array(["1", "a", 3]) cols = 4 x = np.random.random((N, cols)) # random data labels = np.random.choice(classes, size=N) # random labels # LDA components out = pa.preprocess.LDA_discriminants(x, labels) self.assertEqual(np.round(np.array(out).mean(), 5), 0.01298) # LDA analysis new_x = pa.preprocess.LDA(x, labels, n=2) self.assertEqual(np.round(np.array(new_x).mean(), 5), -0.50907) self.assertEqual(new_x.shape, (150, 2))
Example #18
Source File: image.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def resize(im, short, max_size): """ only resize input image to target size and return scale :param im: BGR image input by opencv :param short: one dimensional size (the short side) :param max_size: one dimensional max size (the long side) :return: resized image (NDArray) and scale (float) """ im_shape = im.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) im_scale = float(short) / float(im_size_min) # prevent bigger axis from being more than max_size: if np.round(im_scale * im_size_max) > max_size: im_scale = float(max_size) / float(im_size_max) im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) return im, im_scale
Example #19
Source File: transforms.py From torch-toolbox with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_params( fov_range, anglex_ranges, angley_ranges, anglez_ranges, shear_ranges, translate, scale_ranges, img_size): """Get parameters for ``perspective`` for a random perspective transform. Returns: sequence: params to be passed to the perspective transformation """ fov = 90 + random.uniform(-fov_range, fov_range) anglex = random.uniform(anglex_ranges[0], anglex_ranges[1]) angley = random.uniform(angley_ranges[0], angley_ranges[1]) anglez = random.uniform(anglez_ranges[0], anglez_ranges[1]) shear = random.uniform(shear_ranges[0], shear_ranges[1]) max_dx = translate[0] * img_size[1] max_dy = translate[1] * img_size[0] translations = (np.round(random.uniform(-max_dx, max_dx)), np.round(random.uniform(-max_dy, max_dy))) scale = (random.uniform(1 / scale_ranges[0], scale_ranges[0]), random.uniform(1 / scale_ranges[1], scale_ranges[1])) return fov, anglex, angley, anglez, shear, translations, scale
Example #20
Source File: transforms.py From torch-toolbox with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_params(img, scale, ratio): """Get parameters for ``crop`` for a random sized crop. Args: img (CV Image): Image to be cropped. scale (tuple): range of size of the origin size cropped ratio (tuple): range of aspect ratio of the origin aspect ratio cropped Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for a random sized crop. """ area = img.shape[0] * img.shape[1] for attempt in range(10): target_area = random.uniform(*scale) * area aspect_ratio = random.uniform(*ratio) w = int(round(math.sqrt(target_area * aspect_ratio))) h = int(round(math.sqrt(target_area / aspect_ratio))) if w <= img.shape[1] and h <= img.shape[0]: i = random.randint(0, img.shape[0] - h) j = random.randint(0, img.shape[1] - w) return i, j, h, w # Fallback to central crop in_ratio = img.shape[1] / img.shape[0] if (in_ratio < min(ratio)): w = img.shape[1] h = int(round(w / min(ratio))) elif (in_ratio > max(ratio)): h = img.shape[0] w = int(round(h * max(ratio))) else: # whole image w = img.shape[1] h = img.shape[0] i = (img.shape[0] - h) // 2 j = (img.shape[1] - w) // 2 return i, j, h, w
Example #21
Source File: transforms.py From torch-toolbox with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_params(img, scale, ratio, value=0): """Get parameters for ``erase`` for a random erasing. Args: img (Tensor): Tensor image of size (C, H, W) to be erased. scale: range of proportion of erased area against input image. ratio: range of aspect ratio of erased area. Returns: tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing. """ img_c, img_h, img_w = img.shape area = img_h * img_w for attempt in range(10): erase_area = random.uniform(scale[0], scale[1]) * area aspect_ratio = random.uniform(ratio[0], ratio[1]) h = int(round(math.sqrt(erase_area * aspect_ratio))) w = int(round(math.sqrt(erase_area / aspect_ratio))) if h < img_h and w < img_w: i = random.randint(0, img_h - h) j = random.randint(0, img_w - w) if isinstance(value, numbers.Number): v = value elif isinstance(value, torch._six.string_classes): v = torch.empty( [img_c, h, w], dtype=torch.float32).normal_() elif isinstance(value, (list, tuple)): v = torch.tensor( value, dtype=torch.float32).view(-1, 1, 1).expand(-1, h, w) return i, j, h, w, v # Return original image return 0, 0, img_h, img_w, img
Example #22
Source File: generate_anchors.py From cascade-rcnn_Pytorch with MIT License | 5 votes |
def _ratio_enum(anchor, ratios): """ Enumerate a set of anchors for each aspect ratio wrt an anchor. """ w, h, x_ctr, y_ctr = _whctrs(anchor) size = w * h size_ratios = size / ratios ws = np.round(np.sqrt(size_ratios)) hs = np.round(ws * ratios) anchors = _mkanchors(ws, hs, x_ctr, y_ctr) return anchors
Example #23
Source File: depth_utils.py From DOTA_models with Apache License 2.0 | 5 votes |
def bin_points(XYZ_cms, map_size, z_bins, xy_resolution): """Bins points into xy-z bins XYZ_cms is ... x H x W x3 Outputs is ... x map_size x map_size x (len(z_bins)+1) """ sh = XYZ_cms.shape XYZ_cms = XYZ_cms.reshape([-1, sh[-3], sh[-2], sh[-1]]) n_z_bins = len(z_bins)+1 map_center = (map_size-1.)/2. counts = [] isvalids = [] for XYZ_cm in XYZ_cms: isnotnan = np.logical_not(np.isnan(XYZ_cm[:,:,0])) X_bin = np.round(XYZ_cm[:,:,0] / xy_resolution + map_center).astype(np.int32) Y_bin = np.round(XYZ_cm[:,:,1] / xy_resolution + map_center).astype(np.int32) Z_bin = np.digitize(XYZ_cm[:,:,2], bins=z_bins).astype(np.int32) isvalid = np.array([X_bin >= 0, X_bin < map_size, Y_bin >= 0, Y_bin < map_size, Z_bin >= 0, Z_bin < n_z_bins, isnotnan]) isvalid = np.all(isvalid, axis=0) ind = (Y_bin * map_size + X_bin) * n_z_bins + Z_bin ind[np.logical_not(isvalid)] = 0 count = np.bincount(ind.ravel(), isvalid.ravel().astype(np.int32), minlength=map_size*map_size*n_z_bins) count = np.reshape(count, [map_size, map_size, n_z_bins]) counts.append(count) isvalids.append(isvalid) counts = np.array(counts).reshape(list(sh[:-3]) + [map_size, map_size, n_z_bins]) isvalids = np.array(isvalids).reshape(list(sh[:-3]) + [sh[-3], sh[-2], 1]) return counts, isvalids
Example #24
Source File: expt_design.py From ernest with Apache License 2.0 | 5 votes |
def _get_training_points(self): '''Enumerate all the training points given the params for experiment design''' mcs_range = xrange(self.mcs_min, self.mcs_max + 1) scale_min = float(self.parts_min) / float(self.total_parts) scale_max = float(self.parts_max) / float(self.total_parts) scale_range = np.linspace(scale_min, scale_max, self.num_parts_interpolate) for scale in scale_range: for mcs in mcs_range: if np.round(scale * self.total_parts) >= self.cores_per_mc * mcs: yield [scale, mcs]
Example #25
Source File: graph_utils.py From DOTA_models with Apache License 2.0 | 5 votes |
def label_nodes_with_class(nodes_xyt, class_maps, pix): """ Returns: class_maps__: one-hot class_map for each class. node_class_label: one-hot class_map for each class, nodes_xyt.shape[0] x n_classes """ # Assign each pixel to a node. selem = skimage.morphology.disk(pix) class_maps_ = class_maps*1. for i in range(class_maps.shape[2]): class_maps_[:,:,i] = skimage.morphology.dilation(class_maps[:,:,i]*1, selem) class_maps__ = np.argmax(class_maps_, axis=2) class_maps__[np.max(class_maps_, axis=2) == 0] = -1 # For each node pick out the label from this class map. x = np.round(nodes_xyt[:,[0]]).astype(np.int32) y = np.round(nodes_xyt[:,[1]]).astype(np.int32) ind = np.ravel_multi_index((y,x), class_maps__.shape) node_class_label = class_maps__.ravel()[ind][:,0] # Convert to one hot versions. class_maps_one_hot = np.zeros(class_maps.shape, dtype=np.bool) node_class_label_one_hot = np.zeros((node_class_label.shape[0], class_maps.shape[2]), dtype=np.bool) for i in range(class_maps.shape[2]): class_maps_one_hot[:,:,i] = class_maps__ == i node_class_label_one_hot[:,i] = node_class_label == i return class_maps_one_hot, node_class_label_one_hot
Example #26
Source File: platypus.py From pywr with GNU General Public License v3.0 | 5 votes |
def evaluate(self, solution): logger.info('Evaluating solution ...') for ivar, var in enumerate(self.model_variables): j = slice(self.model_variable_map[ivar], self.model_variable_map[ivar+1]) x = np.array(solution[j]) assert len(x) == var.double_size + var.integer_size if var.double_size > 0: var.set_double_variables(np.array(x[:var.double_size])) if var.integer_size > 0: ints = np.round(np.array(x[-var.integer_size:])).astype(np.int32) var.set_integer_variables(ints) self.run_stats = self.model.run() objectives = [] for r in self.model_objectives: sign = 1.0 if r.is_objective == 'minimise' else -1.0 value = r.aggregated_value() objectives.append(sign*value) constraints = [] for c in self.model_constraints: x = c.aggregated_value() if c.is_double_bounded_constraint: # Double bounded recorder is translated to two platypus constraints. constraints.extend([x, x]) else: constraints.append(x) # Return values to the solution logger.info(f'Evaluation completed in {self.run_stats.time_taken:.2f} seconds ' f'({self.run_stats.speed:.2f} ts/s).') if len(constraints) > 0: return objectives, constraints else: return objectives
Example #27
Source File: timestepper.py From pywr with GNU General Public License v3.0 | 5 votes |
def setup(self): periods = self.datetime_index # Compute length of each period deltas = periods.to_timestamp(how='e') - periods.to_timestamp(how='s') # Round to nearest second deltas = np.round(deltas.total_seconds()) # Convert to days deltas = deltas / SECONDS_IN_DAY self._periods = periods self._deltas = deltas self.reset() self._dirty = False
Example #28
Source File: run_audio_attack.py From Black-Box-Audio with MIT License | 5 votes |
def save_wav(audio, output_wav_file): wav.write(output_wav_file, 16000, np.array(np.clip(np.round(audio), -2**15, 2**15-1), dtype=np.int16)) print('output dB', db(audio))
Example #29
Source File: RegNet2020.py From Pytorch-Networks with MIT License | 5 votes |
def quantize_float(f, q): """Converts a float to closest non-zero int divisible by q.""" return int(round(f / q) * q)
Example #30
Source File: gen_synthetic_single.py From DOTA_models with Apache License 2.0 | 5 votes |
def GenerateSample(filename, code_shape, layer_depth): # {0, +1} binary codes. # No conversion since the output file is expected to store # codes using {0, +1} codes (and not {-1, +1}). code = synthetic_model.GenerateSingleCode(code_shape) code = np.round(code) # Reformat the code so as to be compatible with what is generated # by the image encoder. # The image encoder generates a tensor of size: # iteration_count x batch_size x height x width x iteration_depth. # Here: batch_size = 1 if code_shape[-1] % layer_depth != 0: raise ValueError('Number of layers is not an integer') height = code_shape[0] width = code_shape[1] code = code.reshape([1, height, width, -1, layer_depth]) code = np.transpose(code, [3, 0, 1, 2, 4]) int_codes = code.astype(np.int8) exported_codes = np.packbits(int_codes.reshape(-1)) output = io.BytesIO() np.savez_compressed(output, shape=int_codes.shape, codes=exported_codes) with tf.gfile.FastGFile(filename, 'wb') as code_file: code_file.write(output.getvalue())