Python numpy.round() Examples

The following are 30 code examples for showing how to use numpy.round(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
def draw_bounding_boxes(image, gt_boxes, im_info):
  num_boxes = gt_boxes.shape[0]
  gt_boxes_new = gt_boxes.copy()
  gt_boxes_new[:,:4] = np.round(gt_boxes_new[:,:4].copy() / im_info[2])
  disp_image = Image.fromarray(np.uint8(image[0]))

  for i in range(num_boxes):
    this_class = int(gt_boxes_new[i, 4])
    disp_image = _draw_single_box(disp_image, 
                                gt_boxes_new[i, 0],
                                gt_boxes_new[i, 1],
                                gt_boxes_new[i, 2],
                                gt_boxes_new[i, 3],
                                'N%02d-C%02d' % (i, this_class),
                                FONT,
                                color=STANDARD_COLORS[this_class % NUM_COLORS])

  image[0, :] = np.array(disp_image)
  return image 
Example 2
Project: FRIDA   Author: LCAV   File: doa.py    License: MIT License 6 votes vote down vote up
def compute_mode(self):
        """
        Pre-compute mode vectors from candidate locations (in spherical 
        coordinates).
        """
        if self.num_loc is None:
            raise ValueError('Lookup table appears to be empty. \
                Run build_lookup().')
        self.mode_vec = np.zeros((self.max_bin,self.M,self.num_loc), 
            dtype='complex64')
        if (self.nfft % 2 == 1):
            raise ValueError('Signal length must be even.')
        f = 1.0 / self.nfft * np.linspace(0, self.nfft / 2, self.max_bin) \
            * 1j * 2 * np.pi
        for i in range(self.num_loc):
            p_s = self.loc[:, i]
            for m in range(self.M):
                p_m = self.L[:, m]
                if (self.mode == 'near'):
                    dist = np.linalg.norm(p_m - p_s, axis=1)
                if (self.mode == 'far'):
                    dist = np.dot(p_s, p_m)
                # tau = np.round(self.fs*dist/self.c) # discrete - jagged
                tau = self.fs * dist / self.c  # "continuous" - smoother
                self.mode_vec[:, m, i] = np.exp(f * tau) 
Example 3
Project: StructEngPy   Author: zhuoju36   File: test.py    License: MIT License 6 votes vote down vote up
def cantilever_beam_test():
    #FEModel Test
    model=FEModel()
    model.add_node(0,0,0)
    model.add_node(2,1,1)
    E=1.999e11
    mu=0.3
    A=4.265e-3
    J=9.651e-8
    I3=6.572e-5
    I2=3.301e-6
    rho=7849.0474
    
    model.add_beam(0,1,E,mu,A,I2,I3,J,rho)
    model.set_node_force(1,(0,0,-1e6,0,0,0))
    model.set_node_restraint(0,[True]*6)
    model.assemble_KM()
    model.assemble_f()
    model.assemble_boundary()
    solve_linear(model)
    print(np.round(model.d_,6))
    print("The result of node 1 should be about [0.12879,0.06440,-0.32485,-0.09320,0.18639,0]") 
Example 4
Project: kalman_filter_multi_object_tracking   Author: srianant   File: kalman_filter.py    License: MIT License 6 votes vote down vote up
def predict(self):
        """Predict state vector u and variance of uncertainty P (covariance).
            where,
            u: previous state vector
            P: previous covariance matrix
            F: state transition matrix
            Q: process noise matrix
        Equations:
            u'_{k|k-1} = Fu'_{k-1|k-1}
            P_{k|k-1} = FP_{k-1|k-1} F.T + Q
            where,
                F.T is F transpose
        Args:
            None
        Return:
            vector of predicted state estimate
        """
        # Predicted state estimate
        self.u = np.round(np.dot(self.F, self.u))
        # Predicted estimate covariance
        self.P = np.dot(self.F, np.dot(self.P, self.F.T)) + self.Q
        self.lastResult = self.u  # same last predicted result
        return self.u 
Example 5
Project: pytorch_NER_BiLSTM_CNN_CRF   Author: bamtercelboo   File: Embed.py    License: Apache License 2.0 6 votes vote down vote up
def _uniform_embed(self, embed_dict, words_dict):
        """
        :param embed_dict:
        :param words_dict:
        """
        print("loading pre_train embedding by uniform for out of vocabulary.")
        embeddings = np.zeros((int(self.words_count), int(self.dim)))
        inword_list = {}
        for word in words_dict:
            if word in embed_dict:
                embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
                inword_list[words_dict[word]] = 1
                self.exact_count += 1
            elif word.lower() in embed_dict:
                embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
                inword_list[words_dict[word]] = 1
                self.fuzzy_count += 1
            else:
                self.oov_count += 1
        uniform_col = np.random.uniform(-0.25, 0.25, int(self.dim)).round(6)  # uniform
        for i in range(len(words_dict)):
            if i not in inword_list and i != self.padID:
                embeddings[i] = uniform_col
        final_embed = torch.from_numpy(embeddings).float()
        return final_embed 
Example 6
Project: neuropythy   Author: noahbenson   File: models.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def cortex_to_angle(self, x, y):
        iterX = hasattr(x, '__iter__')
        iterY = hasattr(y, '__iter__')
        jarr = None
        if iterX and iterY:
            if len(x) != len(y):
                raise RuntimeError('Arguments x and y must be the same length!')
            jarr = self._java_object.cortexToAngle(to_java_doubles(x), to_java_doubles(y))
        elif iterX:
            jarr = self._java_object.cortexToAngle(to_java_doubles(x),
                                                   to_java_doubles([y for i in x]))
        elif iterY:
            jarr = self._java_object.cortexToAngle(to_java_doubles([x for i in y]),
                                                   to_java_doubles(y))
        else:
            return self._java_object.cortexToAngle(x, y)
        dat = np.asarray([[c for c in r] for r in jarr])
        a = dat[:,2]
        a = np.round(np.abs(a))
        a[a > 3] = 0
        dat[:,2] = a
        return dat 
Example 7
Project: padasip   Author: matousc89   File: preprocess.py    License: MIT License 6 votes vote down vote up
def test_lda(self):
        """
        Linear Disciminant Analysis
        """
        np.random.seed(100) 
        N = 150 
        classes = np.array(["1", "a", 3]) 
        cols = 4
        x = np.random.random((N, cols)) # random data
        labels = np.random.choice(classes, size=N) # random labels
        # LDA components
        out = pa.preprocess.LDA_discriminants(x, labels)
        self.assertEqual(np.round(np.array(out).mean(), 5), 0.01298)
        # LDA analysis
        new_x = pa.preprocess.LDA(x, labels, n=2)  
        self.assertEqual(np.round(np.array(new_x).mean(), 5), -0.50907)
        self.assertEqual(new_x.shape, (150, 2)) 
Example 8
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: image.py    License: Apache License 2.0 6 votes vote down vote up
def resize(im, short, max_size):
    """
    only resize input image to target size and return scale
    :param im: BGR image input by opencv
    :param short: one dimensional size (the short side)
    :param max_size: one dimensional max size (the long side)
    :return: resized image (NDArray) and scale (float)
    """
    im_shape = im.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])
    im_scale = float(short) / float(im_size_min)
    # prevent bigger axis from being more than max_size:
    if np.round(im_scale * im_size_max) > max_size:
        im_scale = float(max_size) / float(im_size_max)
    im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
    return im, im_scale 
Example 9
Project: DOTA_models   Author: ringringyi   File: map_utils.py    License: Apache License 2.0 6 votes vote down vote up
def _project_to_map(map, vertex, wt=None, ignore_points_outside_map=False):
  """Projects points to map, returns how many points are present at each
  location."""
  num_points = np.zeros((map.size[1], map.size[0]))
  vertex_ = vertex[:, :2] - map.origin
  vertex_ = np.round(vertex_ / map.resolution).astype(np.int)
  if ignore_points_outside_map:
    good_ind = np.all(np.array([vertex_[:,1] >= 0, vertex_[:,1] < map.size[1],
                                vertex_[:,0] >= 0, vertex_[:,0] < map.size[0]]),
                      axis=0)
    vertex_ = vertex_[good_ind, :]
    if wt is not None:
      wt = wt[good_ind, :]
  if wt is None:
    np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), 1)
  else:
    assert(wt.shape[0] == vertex.shape[0]), \
      'number of weights should be same as vertices.'
    np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), wt)
  return num_points 
Example 10
Project: DOTA_models   Author: ringringyi   File: nav_env.py    License: Apache License 2.0 6 votes vote down vote up
def raw_valid_fn_vec(self, xyt):
    """Returns if the given set of nodes is valid or not."""
    height = self.traversible.shape[0]
    width = self.traversible.shape[1]
    x = np.round(xyt[:,[0]]).astype(np.int32)
    y = np.round(xyt[:,[1]]).astype(np.int32)
    is_inside = np.all(np.concatenate((x >= 0, y >= 0,
                                       x < width, y < height), axis=1), axis=1)
    x = np.minimum(np.maximum(x, 0), width-1)
    y = np.minimum(np.maximum(y, 0), height-1)
    ind = np.ravel_multi_index((y,x), self.traversible.shape)
    is_traversible = self.traversible.ravel()[ind]

    is_valid = np.all(np.concatenate((is_inside[:,np.newaxis], is_traversible),
                                     axis=1), axis=1)
    return is_valid 
Example 11
Project: DOTA_models   Author: ringringyi   File: box_list_ops_test.py    License: Apache License 2.0 6 votes vote down vote up
def test_convert_to_normalized_and_back(self):
    coordinates = np.random.uniform(size=(100, 4))
    coordinates = np.round(np.sort(coordinates) * 200)
    coordinates[:, 2:4] += 1
    coordinates[99, :] = [0, 0, 201, 201]
    img = tf.ones((128, 202, 202, 3))

    boxlist = box_list.BoxList(tf.constant(coordinates, tf.float32))
    boxlist = box_list_ops.to_normalized_coordinates(boxlist,
                                                     tf.shape(img)[1],
                                                     tf.shape(img)[2])
    boxlist = box_list_ops.to_absolute_coordinates(boxlist,
                                                   tf.shape(img)[1],
                                                   tf.shape(img)[2])

    with self.test_session() as sess:
      out = sess.run(boxlist.get())
      self.assertAllClose(out, coordinates) 
Example 12
Project: cascade-rcnn_Pytorch   Author: guoruoqian   File: blob.py    License: MIT License 6 votes vote down vote up
def prep_im_for_blob(im, pixel_means, pixel_stds, target_size, max_size):
    """Mean subtract and scale an image for use in a blob."""
    
    im = im.astype(np.float32, copy=False)
    im /= 255.0
    im -= pixel_means
    im /= pixel_stds
    # im = im[:, :, ::-1]
    im_shape = im.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])
    im_scale = float(target_size) / float(im_size_min)
    # Prevent the biggest axis from being more than MAX_SIZE
    # if np.round(im_scale * im_size_max) > max_size:
    #     im_scale = float(max_size) / float(im_size_max)
    # im = imresize(im, im_scale)
    im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
                    interpolation=cv2.INTER_LINEAR)

    return im, im_scale 
Example 13
Project: cascade-rcnn_Pytorch   Author: guoruoqian   File: net_utils.py    License: MIT License 6 votes vote down vote up
def vis_det_and_mask(im, class_name, dets, masks, thresh=0.8):
    """Visual debugging of detections."""
    num_dets = np.minimum(10, dets.shape[0])
    colors_mask = random_colors(num_dets)
    colors_bbox = np.round(np.random.rand(num_dets, 3) * 255)
    # sort rois according to the coordinates, draw upper bbox first
    draw_mask = np.zeros(im.shape[:2], dtype=np.uint8)

    for i in range(1):
        bbox = tuple(int(np.round(x)) for x in dets[i, :4])
        mask = masks[i, :, :]
        full_mask = unmold_mask(mask, bbox, im.shape)

        score = dets[i, -1]
        if score > thresh:
            word_width = len(class_name)
            cv2.rectangle(im, bbox[0:2], bbox[2:4], colors_bbox[i], 2)
            cv2.rectangle(im, bbox[0:2], (bbox[0] + 18 + word_width*8, bbox[1]+15), colors_bbox[i], thickness=cv2.FILLED)
            apply_mask(im, full_mask, draw_mask, colors_mask[i], 0.5)
            draw_mask += full_mask
            cv2.putText(im, '%s' % (class_name), (bbox[0]+5, bbox[1] + 12), cv2.FONT_HERSHEY_PLAIN,
								1.0, (255,255,255), thickness=1)
    return im 
Example 14
Project: Pytorch-Networks   Author: HaiyangLiu1997   File: RegNet2020.py    License: MIT License 6 votes vote down vote up
def __init__(self, w_in, w_out, stride, bm, gw, se_r):
        super(BottleneckTransform, self).__init__()
        w_b = int(round(w_out * bm))
        g = w_b // gw
        self.a = nn.Conv2d(w_in, w_b, 1, stride=1, padding=0, bias=False)
        self.a_bn = nn.BatchNorm2d(w_b, eps=1e-5, momentum=0.1)
        self.a_relu = nn.ReLU(inplace=True)
        self.b = nn.Conv2d(w_b, w_b, 3, stride=stride, padding=1, groups=g, bias=False)
        self.b_bn = nn.BatchNorm2d(w_b, eps=1e-5, momentum=0.1)
        self.b_relu = nn.ReLU(inplace=True)
        if se_r:
            w_se = int(round(w_in * se_r))
            self.se = SE(w_b, w_se)
        self.c = nn.Conv2d(w_b, w_out, 1, stride=1, padding=0, bias=False)
        self.c_bn = nn.BatchNorm2d(w_out, eps=1e-5, momentum=0.1)
        self.c_bn.final_bn = True 
Example 15
Project: Pytorch-Networks   Author: HaiyangLiu1997   File: RegNet2020.py    License: MIT License 6 votes vote down vote up
def forward(self, x):
        for layer in self.children():
            x = layer(x)
        return x

    # @staticmethod
    # def complexity(cx, w_in, w_out, stride, bm, gw, se_r):
    #     w_b = int(round(w_out * bm))
    #     g = w_b // gw
    #     cx = net.complexity_conv2d(cx, w_in, w_b, 1, 1, 0)
    #     cx = net.complexity_batchnorm2d(cx, w_b)
    #     cx = net.complexity_conv2d(cx, w_b, w_b, 3, stride, 1, g)
    #     cx = net.complexity_batchnorm2d(cx, w_b)
    #     if se_r:
    #         w_se = int(round(w_in * se_r))
    #         cx = SE.complexity(cx, w_b, w_se)
    #     cx = net.complexity_conv2d(cx, w_b, w_out, 1, 1, 0)
    #     cx = net.complexity_batchnorm2d(cx, w_out)
    #     return cx 
Example 16
Project: pywr   Author: pywr   File: timestepper.py    License: GNU General Public License v3.0 6 votes vote down vote up
def next(self, ):
        self._current = current = self._next

        if current.index >= len(self._periods):
            raise StopIteration()

        # Increment to next timestep
        next_index = current.index + 1
        if next_index >= len(self._periods):
            # The final time-step is one offset beyond the end of the model.
            # Here we compute its delta and create the object. 
            final_period = current.period + self.offset
            delta = final_period.end_time - final_period.start_time
            delta = np.round(delta.total_seconds())
            delta = delta / SECONDS_IN_DAY
            self._next = _core.Timestep(final_period, next_index, delta)
        else:
            self._next = _core.Timestep(self._periods[next_index], next_index, self._deltas[next_index])

        # Return this timestep
        return current 
Example 17
Project: EXOSIMS   Author: dsavransky   File: FakeCatalog.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def inverse_method(self,N,d):
        
        t = np.linspace(1e-3,0.999,N)
        f = np.log( t / (1 - t) )
        f = f/f[0]
        
        psi= np.pi*f
        cosPsi = np.cos(psi)
        sinTheta = ( np.abs(cosPsi) + (1-np.abs(cosPsi))*np.random.rand(len(cosPsi)))
        
        theta = np.arcsin(sinTheta)
        theta = np.pi-theta + (2*theta - np.pi)*np.round(np.random.rand(len(t)))
        cosPhi = cosPsi/sinTheta
        phi = np.arccos(cosPhi)*(-1)**np.round(np.random.rand(len(t)))
        
        coords = SkyCoord(phi*u.rad,(np.pi/2-theta)*u.rad,d*np.ones(len(phi))*u.pc)

        return coords 
Example 18
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    License: MIT License 5 votes vote down vote up
def save_wav(audio, output_wav_file):
    wav.write(output_wav_file, 16000, np.array(np.clip(np.round(audio), -2**15, 2**15-1), dtype=np.int16))
    print('output dB', db(audio)) 
Example 19
def prep_im_for_blob(im, pixel_means, target_size, max_size):
  """Mean subtract and scale an image for use in a blob."""
  im = im.astype(np.float32, copy=False)
  im -= pixel_means
  im_shape = im.shape
  im_size_min = np.min(im_shape[0:2])
  im_size_max = np.max(im_shape[0:2])
  im_scale = float(target_size) / float(im_size_min)
  # Prevent the biggest axis from being more than MAX_SIZE
  if np.round(im_scale * im_size_max) > max_size:
    im_scale = float(max_size) / float(im_size_max)
  im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
                  interpolation=cv2.INTER_LINEAR)

  return im, im_scale 
Example 20
def _get_image_blob(im):
  """Converts an image into a network input.
  Arguments:
    im (ndarray): a color image in BGR order
  Returns:
    blob (ndarray): a data blob holding an image pyramid
    im_scale_factors (list): list of image scales (relative to im) used
      in the image pyramid
  """
  im_orig = im.astype(np.float32, copy=True)
  im_orig -= cfg.PIXEL_MEANS

  im_shape = im_orig.shape
  im_size_min = np.min(im_shape[0:2])
  im_size_max = np.max(im_shape[0:2])

  processed_ims = []
  im_scale_factors = []

  for target_size in cfg.TEST.SCALES:
    im_scale = float(target_size) / float(im_size_min)
    # Prevent the biggest axis from being more than MAX_SIZE
    if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
      im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
    im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
            interpolation=cv2.INTER_LINEAR)
    im_scale_factors.append(im_scale)
    processed_ims.append(im)

  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, np.array(im_scale_factors) 
Example 21
def _get_image_blob(im):
  """Converts an image into a network input.
  Arguments:
    im (ndarray): a color image in BGR order
  Returns:
    blob (ndarray): a data blob holding an image pyramid
    im_scale_factors (list): list of image scales (relative to im) used
      in the image pyramid
  """
  im_orig = im.astype(np.float32, copy=True)
  im_orig -= cfg.PIXEL_MEANS

  im_shape = im_orig.shape
  im_size_min = np.min(im_shape[0:2])
  im_size_max = np.max(im_shape[0:2])

  processed_ims = []
  im_scale_factors = []

  for target_size in cfg.TEST.SCALES:
    im_scale = float(target_size) / float(im_size_min)
    # Prevent the biggest axis from being more than MAX_SIZE
    if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
      im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
    im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
            interpolation=cv2.INTER_LINEAR)
    im_scale_factors.append(im_scale)
    processed_ims.append(im)

  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, np.array(im_scale_factors) 
Example 22
def forward(self, features, rois):
        batch_size, num_channels, data_height, data_width = features.size()
        num_rois = rois.size()[0]
        outputs = Variable(torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)).cuda()

        for roi_ind, roi in enumerate(rois):
            batch_ind = int(roi[0].data[0])
            roi_start_w, roi_start_h, roi_end_w, roi_end_h = np.round(
                roi[1:].data.cpu().numpy() * self.spatial_scale).astype(int)
            roi_width = max(roi_end_w - roi_start_w + 1, 1)
            roi_height = max(roi_end_h - roi_start_h + 1, 1)
            bin_size_w = float(roi_width) / float(self.pooled_width)
            bin_size_h = float(roi_height) / float(self.pooled_height)

            for ph in range(self.pooled_height):
                hstart = int(np.floor(ph * bin_size_h))
                hend = int(np.ceil((ph + 1) * bin_size_h))
                hstart = min(data_height, max(0, hstart + roi_start_h))
                hend = min(data_height, max(0, hend + roi_start_h))
                for pw in range(self.pooled_width):
                    wstart = int(np.floor(pw * bin_size_w))
                    wend = int(np.ceil((pw + 1) * bin_size_w))
                    wstart = min(data_width, max(0, wstart + roi_start_w))
                    wend = min(data_width, max(0, wend + roi_start_w))

                    is_empty = (hend <= hstart) or(wend <= wstart)
                    if is_empty:
                        outputs[roi_ind, :, ph, pw] = 0
                    else:
                        data = features[batch_ind]
                        outputs[roi_ind, :, ph, pw] = torch.max(
                            torch.max(data[:, hstart:hend, wstart:wend], 1)[0], 2)[0].view(-1)

        return outputs 
Example 23
def _ratio_enum(anchor, ratios):
  """
  Enumerate a set of anchors for each aspect ratio wrt an anchor.
  """

  w, h, x_ctr, y_ctr = _whctrs(anchor)
  size = w * h
  size_ratios = size / ratios
  ws = np.round(np.sqrt(size_ratios))
  hs = np.round(ws * ratios)
  anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
  return anchors 
Example 24
Project: dustmaps   Author: gregreen   File: bh.py    License: GNU General Public License v2.0 5 votes vote down vote up
def _lb2RN_northcap(self, l, b):
        R = 100. + (90. - b) * np.sin(np.radians(l)) / 0.3
        N = 100. + (90. - b) * np.cos(np.radians(l)) / 0.3
        return np.round(R).astype('i4'), np.round(N).astype('i4') 
Example 25
Project: dustmaps   Author: gregreen   File: bh.py    License: GNU General Public License v2.0 5 votes vote down vote up
def _lb2RN_southcap(self, l, b):
        R = 100. + (90. + b) * np.sin(np.radians(l)) / 0.3
        N = 100. + (90. + b) * np.cos(np.radians(l)) / 0.3
        return np.round(R).astype('i4'), np.round(N).astype('i4') 
Example 26
Project: dustmaps   Author: gregreen   File: bh.py    License: GNU General Public License v2.0 5 votes vote down vote up
def _lb2RN_mid(self, l, b):
        R = (np.abs(b) - 10.) / 0.6
        N = (np.mod(l, 360.) + 0.15) / 0.3 - 1
        return np.round(R).astype('i4'), np.round(N).astype('i4') 
Example 27
Project: StructEngPy   Author: zhuoju36   File: test.py    License: MIT License 5 votes vote down vote up
def simply_supported_beam_test():
    #FEModel Test
    model=FEModel()

    E=1.999e11
    mu=0.3
    A=4.265e-3
    J=9.651e-8
    I3=6.572e-5
    I2=3.301e-6
    rho=7849.0474
    
    model.add_node(0,0,0)
    model.add_node(0.5,1,0.5)
    model.add_node(1,2,1)
    
    model.add_beam(0,1,E,mu,A,I2,I3,J,rho)
    model.add_beam(1,2,E,mu,A,I2,I3,J,rho)

    model.set_node_force(1,(0,0,-1e6,0,0,0))
    model.set_node_restraint(2,[False,False,True]+[False]*3)
    model.set_node_restraint(0,[True]*3+[False]*3)

    model.assemble_KM()
    model.assemble_f()
    model.assemble_boundary()
    solve_linear(model)
    print(np.round(model.d_,6))
    print("The result of node 1 should be about [0.00796,0.00715,-0.02296,-0.01553,-0.03106,-0.01903]") 
Example 28
Project: StructEngPy   Author: zhuoju36   File: test.py    License: MIT License 5 votes vote down vote up
def simply_released_beam_test():
    #FEModel Test
    model=FEModel()

    E=1.999e11
    mu=0.3
    A=4.265e-3
    J=9.651e-8
    I3=6.572e-5
    I2=3.301e-6
    rho=7849.0474
    
    model.add_node(0,0,0)
    model.add_node(0.5,1,0.5)
    model.add_node(1,2,1)
    
    model.add_beam(0,1,E,mu,A,I2,I3,J,rho)
    model.add_beam(1,2,E,mu,A,I2,I3,J,rho)

    model.set_node_force(1,(0,0,-1e6,0,0,0))
    model.set_node_restraint(2,[True]*6)
    model.set_node_restraint(0,[True]*6)
    
    model.set_beam_releases(0,[True]*6,[False]*6)
    model.set_beam_releases(1,[False]*6,[True]*6)
    
    model.assemble_KM()
    model.assemble_f()
    model.assemble_boundary()
    solve_linear(model)
    print(np.round(model.d_,6))
    print("The result of node 1 should be about [0.00445,0.00890,-0.02296,-0.01930,-0.03860,-0.01930]") 
Example 29
Project: mmdetection   Author: open-mmlab   File: regnet.py    License: Apache License 2.0 5 votes vote down vote up
def generate_regnet(self,
                        initial_width,
                        width_slope,
                        width_parameter,
                        depth,
                        divisor=8):
        """Generates per block width from RegNet parameters.

        Args:
            initial_width ([int]): Initial width of the backbone
            width_slope ([float]): Slope of the quantized linear function
            width_parameter ([int]): Parameter used to quantize the width.
            depth ([int]): Depth of the backbone.
            divisor (int, optional): The divisor of channels. Defaults to 8.

        Returns:
            list, int: return a list of widths of each stage and the number of
                stages
        """
        assert width_slope >= 0
        assert initial_width > 0
        assert width_parameter > 1
        assert initial_width % divisor == 0
        widths_cont = np.arange(depth) * width_slope + initial_width
        ks = np.round(
            np.log(widths_cont / initial_width) / np.log(width_parameter))
        widths = initial_width * np.power(width_parameter, ks)
        widths = np.round(np.divide(widths, divisor)) * divisor
        num_stages = len(np.unique(widths))
        widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist()
        return widths, num_stages 
Example 30
Project: mmdetection   Author: open-mmlab   File: regnet.py    License: Apache License 2.0 5 votes vote down vote up
def quantize_float(number, divisor):
        """Converts a float to closest non-zero int divisible by divior.

        Args:
            number (int): Original number to be quantized.
            divisor (int): Divisor used to quantize the number.

        Returns:
            int: quantized number that is divisible by devisor.
        """
        return int(round(number / divisor) * divisor)