Python numpy.round() Examples

The following are code examples for showing how to use numpy.round(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: FRIDA   Author: LCAV   File: doa.py    MIT License 7 votes vote down vote up
def compute_mode(self):
        """
        Pre-compute mode vectors from candidate locations (in spherical 
        coordinates).
        """
        if self.num_loc is None:
            raise ValueError('Lookup table appears to be empty. \
                Run build_lookup().')
        self.mode_vec = np.zeros((self.max_bin,self.M,self.num_loc), 
            dtype='complex64')
        if (self.nfft % 2 == 1):
            raise ValueError('Signal length must be even.')
        f = 1.0 / self.nfft * np.linspace(0, self.nfft / 2, self.max_bin) \
            * 1j * 2 * np.pi
        for i in range(self.num_loc):
            p_s = self.loc[:, i]
            for m in range(self.M):
                p_m = self.L[:, m]
                if (self.mode == 'near'):
                    dist = np.linalg.norm(p_m - p_s, axis=1)
                if (self.mode == 'far'):
                    dist = np.dot(p_s, p_m)
                # tau = np.round(self.fs*dist/self.c) # discrete - jagged
                tau = self.fs * dist / self.c  # "continuous" - smoother
                self.mode_vec[:, m, i] = np.exp(f * tau) 
Example 2
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: visualization.py    MIT License 6 votes vote down vote up
def draw_bounding_boxes(image, gt_boxes, im_info):
  num_boxes = gt_boxes.shape[0]
  gt_boxes_new = gt_boxes.copy()
  gt_boxes_new[:,:4] = np.round(gt_boxes_new[:,:4].copy() / im_info[2])
  disp_image = Image.fromarray(np.uint8(image[0]))

  for i in range(num_boxes):
    this_class = int(gt_boxes_new[i, 4])
    disp_image = _draw_single_box(disp_image, 
                                gt_boxes_new[i, 0],
                                gt_boxes_new[i, 1],
                                gt_boxes_new[i, 2],
                                gt_boxes_new[i, 3],
                                'N%02d-C%02d' % (i, this_class),
                                FONT,
                                color=STANDARD_COLORS[this_class % NUM_COLORS])

  image[0, :] = np.array(disp_image)
  return image 
Example 3
Project: kipet   Author: salvadorgarciamunoz   File: TemplateBuilder.py    GNU General Public License v3.0 6 votes vote down vote up
def add_feed_times(self, times):
        """Add measurement times to the model

        Args:
            times (array_like): feeding points

        Returns:
            None

        """
        for t in times:
            t = round(t,
                      6)  # for added ones when generating data otherwise too many digits due to different data types CS
            self._feed_times.add(t)
            self._meas_times.add(t)  # added here to avoid double addition CS

    # For inclusion of discrete jumps 
Example 4
Project: DataComp   Author: Cojabi   File: stats.py    Apache License 2.0 6 votes vote down vote up
def calc_conf_inv(zipper, feat_subset, df_names):
    """
    Calculates the confidence intervals of means for numerical features.

    :param zipper: Zipper created from a DataCollection.
    :param feat_subset: An iterable of features for which the confidence intervals shall be calculated.
    :param df_names: Names of the dataframes in the DataCollection
    :return:
    """
    confs = dict()

    for key in feat_subset:

        # turns zipper values into lists storing the number of entries of the respective features per dataset
        confs[key] = [np.round(conf_interval(z), 2) for z in zipper[key]]

        counts = pd.DataFrame(confs).transpose()
        counts.index.name = "features"
        counts.columns = [name+"conf." for name in df_names]

    return counts 
Example 5
Project: StructEngPy   Author: zhuoju36   File: test.py    MIT License 6 votes vote down vote up
def cantilever_beam_test():
    #FEModel Test
    model=FEModel()
    model.add_node(0,0,0)
    model.add_node(2,1,1)
    E=1.999e11
    mu=0.3
    A=4.265e-3
    J=9.651e-8
    I3=6.572e-5
    I2=3.301e-6
    rho=7849.0474
    
    model.add_beam(0,1,E,mu,A,I2,I3,J,rho)
    model.set_node_force(1,(0,0,-1e6,0,0,0))
    model.set_node_restraint(0,[True]*6)
    model.assemble_KM()
    model.assemble_f()
    model.assemble_boundary()
    solve_linear(model)
    print(np.round(model.d_,6))
    print("The result of node 1 should be about [0.12879,0.06440,-0.32485,-0.09320,0.18639,0]") 
Example 6
Project: kalman_filter_multi_object_tracking   Author: srianant   File: kalman_filter.py    MIT License 6 votes vote down vote up
def predict(self):
        """Predict state vector u and variance of uncertainty P (covariance).
            where,
            u: previous state vector
            P: previous covariance matrix
            F: state transition matrix
            Q: process noise matrix
        Equations:
            u'_{k|k-1} = Fu'_{k-1|k-1}
            P_{k|k-1} = FP_{k-1|k-1} F.T + Q
            where,
                F.T is F transpose
        Args:
            None
        Return:
            vector of predicted state estimate
        """
        # Predicted state estimate
        self.u = np.round(np.dot(self.F, self.u))
        # Predicted estimate covariance
        self.P = np.dot(self.F, np.dot(self.P, self.F.T)) + self.Q
        self.lastResult = self.u  # same last predicted result
        return self.u 
Example 7
Project: pytorch_NER_BiLSTM_CNN_CRF   Author: bamtercelboo   File: Embed.py    Apache License 2.0 6 votes vote down vote up
def _uniform_embed(self, embed_dict, words_dict):
        """
        :param embed_dict:
        :param words_dict:
        """
        print("loading pre_train embedding by uniform for out of vocabulary.")
        embeddings = np.zeros((int(self.words_count), int(self.dim)))
        inword_list = {}
        for word in words_dict:
            if word in embed_dict:
                embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
                inword_list[words_dict[word]] = 1
                self.exact_count += 1
            elif word.lower() in embed_dict:
                embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
                inword_list[words_dict[word]] = 1
                self.fuzzy_count += 1
            else:
                self.oov_count += 1
        uniform_col = np.random.uniform(-0.25, 0.25, int(self.dim)).round(6)  # uniform
        for i in range(len(words_dict)):
            if i not in inword_list and i != self.padID:
                embeddings[i] = uniform_col
        final_embed = torch.from_numpy(embeddings).float()
        return final_embed 
Example 8
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: image.py    Apache License 2.0 6 votes vote down vote up
def resize(im, short, max_size):
    """
    only resize input image to target size and return scale
    :param im: BGR image input by opencv
    :param short: one dimensional size (the short side)
    :param max_size: one dimensional max size (the long side)
    :return: resized image (NDArray) and scale (float)
    """
    im_shape = im.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])
    im_scale = float(short) / float(im_size_min)
    # prevent bigger axis from being more than max_size:
    if np.round(im_scale * im_size_max) > max_size:
        im_scale = float(max_size) / float(im_size_max)
    im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
    return im, im_scale 
Example 9
Project: LHMP   Author: hydrogo   File: gr4j_cemaneige.py    GNU General Public License v3.0 6 votes vote down vote up
def interaction(river_name, path_to_scheme, path_to_observations,\
    X1, X2, X3, X4, X5, X6):

    # simulate our modeled hydrograph
    data = dataframe_construction(path_to_scheme)
    data['Qsim'] = simulation(data, [X1, X2, X3, X4, X5, X6])

    # read observations
    obs = pd.read_csv(path_to_observations, index_col=0, parse_dates=True,
                      squeeze=True, header=None, names=['Date', 'Qobs'])

    # concatenate data
    data = pd.concat([data, obs], axis=1)

    # calculate efficiency criterion
    # slice data only for observational period and drop NA values
    data_for_obs = data.ix[obs.index, ['Qsim', 'Qobs']].dropna()
    eff = NS(data_for_obs['Qobs'], data_for_obs['Qsim'])

    # plot
    ax = data.ix[obs.index, ['Qsim', 'Qobs']].plot(figsize=(10, 7), style=['b-', 'k.'])
    ax.set_title(river_name + ' daily runoff modelling, ' + 'Nash-Sutcliffe efficiency: {}'.format(np.round(eff, 2))) 
Example 10
Project: LHMP   Author: hydrogo   File: simhyd_cemaneige.py    GNU General Public License v3.0 6 votes vote down vote up
def interaction(river_name, path_to_scheme, path_to_observations,\
    INSC, COEFF, SQ, SMSC, SUB, CRAK, K, etmul, DELAY, X_m, X5, X6):

    # simulate our modeled hydrograph
    data = dataframe_construction(path_to_scheme)
    data['Qsim'] = simulation(data, [INSC, COEFF, SQ, SMSC, SUB, CRAK, K,\
    etmul, DELAY, X_m, X5, X6])

    # read observations
    obs = pd.read_csv(path_to_observations, index_col=0, parse_dates=True,
                      squeeze=True, header=None, names=['Date', 'Qobs'])

    # concatenate data
    data = pd.concat([data, obs], axis=1)

    # calculate efficiency criterion
    # slice data only for observational period and drop NA values
    data_for_obs = data.ix[obs.index, ['Qsim', 'Qobs']].dropna()
    eff = NS(data_for_obs['Qobs'], data_for_obs['Qsim'])

    # plot
    ax = data.ix[obs.index, ['Qsim', 'Qobs']].plot(figsize=(10, 7), style=['b-', 'k.'])
    ax.set_title(river_name + ' daily runoff modelling, ' + 'Nash-Sutcliffe efficiency: {}'.format(np.round(eff, 2))) 
Example 11
Project: DOTA_models   Author: ringringyi   File: map_utils.py    Apache License 2.0 6 votes vote down vote up
def _project_to_map(map, vertex, wt=None, ignore_points_outside_map=False):
  """Projects points to map, returns how many points are present at each
  location."""
  num_points = np.zeros((map.size[1], map.size[0]))
  vertex_ = vertex[:, :2] - map.origin
  vertex_ = np.round(vertex_ / map.resolution).astype(np.int)
  if ignore_points_outside_map:
    good_ind = np.all(np.array([vertex_[:,1] >= 0, vertex_[:,1] < map.size[1],
                                vertex_[:,0] >= 0, vertex_[:,0] < map.size[0]]),
                      axis=0)
    vertex_ = vertex_[good_ind, :]
    if wt is not None:
      wt = wt[good_ind, :]
  if wt is None:
    np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), 1)
  else:
    assert(wt.shape[0] == vertex.shape[0]), \
      'number of weights should be same as vertices.'
    np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), wt)
  return num_points 
Example 12
Project: DOTA_models   Author: ringringyi   File: nav_env.py    Apache License 2.0 6 votes vote down vote up
def raw_valid_fn_vec(self, xyt):
    """Returns if the given set of nodes is valid or not."""
    height = self.traversible.shape[0]
    width = self.traversible.shape[1]
    x = np.round(xyt[:,[0]]).astype(np.int32)
    y = np.round(xyt[:,[1]]).astype(np.int32)
    is_inside = np.all(np.concatenate((x >= 0, y >= 0,
                                       x < width, y < height), axis=1), axis=1)
    x = np.minimum(np.maximum(x, 0), width-1)
    y = np.minimum(np.maximum(y, 0), height-1)
    ind = np.ravel_multi_index((y,x), self.traversible.shape)
    is_traversible = self.traversible.ravel()[ind]

    is_valid = np.all(np.concatenate((is_inside[:,np.newaxis], is_traversible),
                                     axis=1), axis=1)
    return is_valid 
Example 13
Project: DOTA_models   Author: ringringyi   File: box_list_ops_test.py    Apache License 2.0 6 votes vote down vote up
def test_convert_to_normalized_and_back(self):
    coordinates = np.random.uniform(size=(100, 4))
    coordinates = np.round(np.sort(coordinates) * 200)
    coordinates[:, 2:4] += 1
    coordinates[99, :] = [0, 0, 201, 201]
    img = tf.ones((128, 202, 202, 3))

    boxlist = box_list.BoxList(tf.constant(coordinates, tf.float32))
    boxlist = box_list_ops.to_normalized_coordinates(boxlist,
                                                     tf.shape(img)[1],
                                                     tf.shape(img)[2])
    boxlist = box_list_ops.to_absolute_coordinates(boxlist,
                                                   tf.shape(img)[1],
                                                   tf.shape(img)[2])

    with self.test_session() as sess:
      out = sess.run(boxlist.get())
      self.assertAllClose(out, coordinates) 
Example 14
Project: pypriv   Author: soeaver   File: transforms.py    MIT License 6 votes vote down vote up
def multi_scale(im, scales=(480, 576, 688, 864, 1200), max_sizes=(800, 1000, 1200, 1500, 1800), image_flip=False):
    im_size_min = np.min(im.shape[0:2])
    im_size_max = np.max(im.shape[0:2])

    scale_ims = []
    scale_ratios = []
    for i in xrange(len(scales)):
        scale_ratio = float(scales[i]) / float(im_size_min)
        if np.round(scale_ratio * im_size_max) > float(max_sizes[i]):
            scale_ratio = float(max_sizes[i]) / float(im_size_max)
        resize_im = cv2.resize(im, None, None, fx=scale_ratio, fy=scale_ratio,
                               interpolation=cv2.INTER_LINEAR)
        scale_ims.append(resize_im)
        scale_ratios.append(scale_ratio)
        if image_flip:
            scale_ims.append(cv2.resize(im[:, ::-1], None, None, fx=scale_ratio, fy=scale_ratio,
                                        interpolation=cv2.INTER_LINEAR))
            scale_ratios.append(-scale_ratio)

    return scale_ims, scale_ratios 
Example 15
Project: ReinforcementLearningBookExamples   Author: Shawn-Guo-CN   File: 2GridWorld_Ch3.py    GNU General Public License v3.0 6 votes vote down vote up
def value_estimate_with_bellman_optimal_equation(gridworld, world_size=5, discount=0.9):
    values_est = np.zeros((world_size, world_size))
    while True:
        new_values_est = np.zeros((world_size, world_size))
        for i in range(0, world_size):
            for j in range(0, world_size):
                tmp_values_increment = []
                for action in ['U', 'D', 'L', 'R']:
                    gridworld.set_agent_location(i, j)
                    reward = gridworld.take_action(action)
                    new_pos = gridworld.get_agent_location()
                    tmp_values_increment.append(reward + discount * values_est[new_pos[0], new_pos[1]])
                new_values_est[i, j] = np.max(tmp_values_increment)
        if np.sum(np.abs(values_est - new_values_est)) < 1e-4:
            break
        values_est = new_values_est
    draw_image('bellman optimal equation', np.round(new_values_est, decimals=2)) 
Example 16
Project: RNASEqTool   Author: armell   File: r_binding.py    MIT License 6 votes vote down vote up
def deseq_gene_expression_normalization(df_data):
    rpy2.robjects.pandas2ri.activate()
    df_data = df_data.dropna()

    r_data_set = robjects.conversion.py2ri(df_data)
    base = importr("base")
    deseq = importr("DESeq")
    bio_generics = importr("BiocGenerics")
    rdiv = robjects.r.get('/')

    conds = base.factor(base.c(base.colnames(r_data_set)))

    cds = deseq.newCountDataSet(base.round(r_data_set), conds)
    res_est = bio_generics.estimateSizeFactors(cds)

    normalized = base.t(rdiv(base.t(bio_generics.counts(res_est)), bio_generics.sizeFactors(res_est)))
    rpy2.robjects.pandas2ri.deactivate()
    res = Result()
    res.frame = pd.DataFrame(numpy.round(numpy.matrix(normalized)), index=normalized.rownames, columns=normalized.colnames)
    res.package = "DESeq"
    res.version = deseq.__version__

    return res 
Example 17
Project: lung_nodule_classifier   Author: xairc   File: prepare.py    MIT License 6 votes vote down vote up
def resample(imgs, spacing, new_spacing,order=2):
    if len(imgs.shape)==3:
        new_shape = np.round(imgs.shape * spacing / new_spacing)
        true_spacing = spacing * imgs.shape / new_shape
        resize_factor = new_shape / imgs.shape
        imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order)
        return imgs, true_spacing
    elif len(imgs.shape)==4:
        n = imgs.shape[-1]
        newimg = []
        for i in range(n):
            slice = imgs[:,:,:,i]
            newslice,true_spacing = resample(slice,spacing,new_spacing)
            newimg.append(newslice)
        newimg=np.transpose(np.array(newimg),[1,2,3,0])
        return newimg,true_spacing
    else:
        raise ValueError('wrong shape') 
Example 18
Project: lung_nodule_classifier   Author: xairc   File: prepare.py    MIT License 6 votes vote down vote up
def load_itk_image(filename):
    with open(filename) as f:
        contents = f.readlines()
        line = [k for k in contents if k.startswith('TransformMatrix')][0]
        transformM = np.array(line.split(' = ')[1].split(' ')).astype('float')
        transformM = np.round(transformM)
        if np.any( transformM!=np.array([1,0,0, 0, 1, 0, 0, 0, 1])):
            isflip = True
        else:
            isflip = False

    itkimage = sitk.ReadImage(filename)
    numpyImage = sitk.GetArrayFromImage(itkimage)
     
    numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
    numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
     
    return numpyImage, numpyOrigin, numpySpacing, isflip 
Example 19
Project: HAPI   Author: MAfarrag   File: DistRRM.py    MIT License 6 votes vote down vote up
def DistMAXBAS(FPL,SPMAXBAS, q_uz):
    
    MAXBAS = np.nanmax(SPMAXBAS)
    FPLArray = FPL.ReadAsArray()
    rows = FPL.RasterYSize
    cols = FPL.RasterXSize
    NoDataValue = np.float32(FPL.GetRasterBand(1).GetNoDataValue())
    FPLArray[FPLArray == NoDataValue] = np.nan # replace novalue cells by nan
    
    MaxFPL = np.nanmax(FPLArray)
    MinFPL = np.nanmin(FPLArray)
#    resize_fun = lambda x: np.round(((((x - min_dist)/(max_dist - min_dist))*(1*maxbas - 1)) + 1), 0)
    resize_fun = lambda x: ((((x - MinFPL)/(MaxFPL - MinFPL))*(1*MAXBAS - 1)) + 1)
    
    NormalizedFPL = resize_fun(FPLArray)
    
    for x in range(rows):
        for y in range(cols):
            if not np.isnan(FPLArray[x,y]):# FPLArray[x,y] != np.nan: #NoDataValue:
                q_uz[x,y,:] = Routing.TriangularRouting(q_uz[x,y,:], NormalizedFPL[x,y]) 
    
    return q_uz 
Example 20
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def save_wav(audio, output_wav_file):
    wav.write(output_wav_file, 16000, np.array(np.clip(np.round(audio), -2**15, 2**15-1), dtype=np.int16))
    print('output dB', db(audio)) 
Example 21
Project: PEAKachu   Author: tbischler   File: deseq2.py    ISC License 5 votes vote down vote up
def run_deseq2(self, exp_lib_list, ctr_lib_list, size_factors,
                   pairwise_replicates):
        self._count_df = np.round(self._count_df, decimals=0)
        self._count_df = self._count_df.astype(int)
        conds = ["exp"] * len(exp_lib_list) + ["ctr"] * len(ctr_lib_list)
        if pairwise_replicates:
            samples = [str(sample) for sample in (
                       list(range(1, len(exp_lib_list) + 1)) +
                       list(range(1, len(ctr_lib_list) + 1)))]
            colData = robjects.DataFrame({
                    "conditions": robjects.StrVector(conds),
                    "samples": robjects.StrVector(samples)})
            design = Formula('~ samples + conditions')
        else:
            colData = robjects.DataFrame(
                    {"conditions": robjects.StrVector(conds)})
            design = Formula('~ conditions')
        r_count_df = robjects.DataFrame(self._count_df)
        r_count_df.colnames = robjects.rinterface.NULL
        dds = r.DESeqDataSetFromMatrix(countData=r_count_df,
                                       colData=colData, design=design)
        if size_factors is None:
            dds = r.estimateSizeFactors(dds)
        else:
            assign_sf = r["sizeFactors<-"]
            dds = assign_sf(object=dds, value=robjects.FloatVector(
                size_factors))
        dds = r.estimateDispersions(dds, quiet=True)
        dds = r.nbinomWaldTest(dds, quiet=True)
        size_factors = pd.Series(r.sizeFactors(dds),
                                 index=self._count_df.columns)
        results = r.results(dds, contrast=robjects.StrVector(
            ("conditions", "exp", "ctr")), altHypothesis="greater")
        with localconverter(robjects.default_converter + pandas2ri.converter):
            results_df = robjects.conversion.rpy2py(
                r['as.data.frame'](results))
        results_df.index = self._count_df.index
        return(results_df, size_factors) 
Example 22
Project: PEAKachu   Author: tbischler   File: deseq2.py    ISC License 5 votes vote down vote up
def calc_size_factors(self):
        self._count_df = np.round(self._count_df, decimals=0)
        self._count_df = self._count_df.astype(int)
        r_count_df = robjects.DataFrame(self._count_df)
        r_count_df.colnames = robjects.rinterface.NULL
        r_size_factors = r.estimateSizeFactorsForMatrix(r_count_df)
        return pd.Series(r_size_factors, index=self._count_df.columns) 
Example 23
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: blob.py    MIT License 5 votes vote down vote up
def prep_im_for_blob(im, pixel_means, target_size, max_size):
  """Mean subtract and scale an image for use in a blob."""
  im = im.astype(np.float32, copy=False)
  im -= pixel_means
  im_shape = im.shape
  im_size_min = np.min(im_shape[0:2])
  im_size_max = np.max(im_shape[0:2])
  im_scale = float(target_size) / float(im_size_min)
  # Prevent the biggest axis from being more than MAX_SIZE
  if np.round(im_scale * im_size_max) > max_size:
    im_scale = float(max_size) / float(im_size_max)
  im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
                  interpolation=cv2.INTER_LINEAR)

  return im, im_scale 
Example 24
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: test.py    MIT License 5 votes vote down vote up
def _get_image_blob(im):
  """Converts an image into a network input.
  Arguments:
    im (ndarray): a color image in BGR order
  Returns:
    blob (ndarray): a data blob holding an image pyramid
    im_scale_factors (list): list of image scales (relative to im) used
      in the image pyramid
  """
  im_orig = im.astype(np.float32, copy=True)
  im_orig -= cfg.PIXEL_MEANS

  im_shape = im_orig.shape
  im_size_min = np.min(im_shape[0:2])
  im_size_max = np.max(im_shape[0:2])

  processed_ims = []
  im_scale_factors = []

  for target_size in cfg.TEST.SCALES:
    im_scale = float(target_size) / float(im_size_min)
    # Prevent the biggest axis from being more than MAX_SIZE
    if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
      im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
    im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
            interpolation=cv2.INTER_LINEAR)
    im_scale_factors.append(im_scale)
    processed_ims.append(im)

  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, np.array(im_scale_factors) 
Example 25
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: test_train.py    MIT License 5 votes vote down vote up
def _get_image_blob(im):
  """Converts an image into a network input.
  Arguments:
    im (ndarray): a color image in BGR order
  Returns:
    blob (ndarray): a data blob holding an image pyramid
    im_scale_factors (list): list of image scales (relative to im) used
      in the image pyramid
  """
  im_orig = im.astype(np.float32, copy=True)
  im_orig -= cfg.PIXEL_MEANS

  im_shape = im_orig.shape
  im_size_min = np.min(im_shape[0:2])
  im_size_max = np.max(im_shape[0:2])

  processed_ims = []
  im_scale_factors = []

  for target_size in cfg.TEST.SCALES:
    im_scale = float(target_size) / float(im_size_min)
    # Prevent the biggest axis from being more than MAX_SIZE
    if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
      im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
    im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
            interpolation=cv2.INTER_LINEAR)
    im_scale_factors.append(im_scale)
    processed_ims.append(im)

  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, np.array(im_scale_factors) 
Example 26
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: roi_pool_py.py    MIT License 5 votes vote down vote up
def forward(self, features, rois):
        batch_size, num_channels, data_height, data_width = features.size()
        num_rois = rois.size()[0]
        outputs = Variable(torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)).cuda()

        for roi_ind, roi in enumerate(rois):
            batch_ind = int(roi[0].data[0])
            roi_start_w, roi_start_h, roi_end_w, roi_end_h = np.round(
                roi[1:].data.cpu().numpy() * self.spatial_scale).astype(int)
            roi_width = max(roi_end_w - roi_start_w + 1, 1)
            roi_height = max(roi_end_h - roi_start_h + 1, 1)
            bin_size_w = float(roi_width) / float(self.pooled_width)
            bin_size_h = float(roi_height) / float(self.pooled_height)

            for ph in range(self.pooled_height):
                hstart = int(np.floor(ph * bin_size_h))
                hend = int(np.ceil((ph + 1) * bin_size_h))
                hstart = min(data_height, max(0, hstart + roi_start_h))
                hend = min(data_height, max(0, hend + roi_start_h))
                for pw in range(self.pooled_width):
                    wstart = int(np.floor(pw * bin_size_w))
                    wend = int(np.ceil((pw + 1) * bin_size_w))
                    wstart = min(data_width, max(0, wstart + roi_start_w))
                    wend = min(data_width, max(0, wend + roi_start_w))

                    is_empty = (hend <= hstart) or(wend <= wstart)
                    if is_empty:
                        outputs[roi_ind, :, ph, pw] = 0
                    else:
                        data = features[batch_ind]
                        outputs[roi_ind, :, ph, pw] = torch.max(
                            torch.max(data[:, hstart:hend, wstart:wend], 1)[0], 2)[0].view(-1)

        return outputs 
Example 27
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: generate_anchors.py    MIT License 5 votes vote down vote up
def _ratio_enum(anchor, ratios):
  """
  Enumerate a set of anchors for each aspect ratio wrt an anchor.
  """

  w, h, x_ctr, y_ctr = _whctrs(anchor)
  size = w * h
  size_ratios = size / ratios
  ws = np.round(np.sqrt(size_ratios))
  hs = np.round(ws * ratios)
  anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
  return anchors 
Example 28
Project: kipet   Author: salvadorgarciamunoz   File: TemplateBuilder.py    GNU General Public License v3.0 5 votes vote down vote up
def add_huplc_data(self, data): #added for the inclusion of h/uplc data CS
        """Add HPLC or UPLC data

                Args:
                    data (DataFrame): DataFrame with measurement times as
                                      indices and wavelengths as columns.

                Returns:
                    None
        """
        if isinstance(data, pd.DataFrame):
            dfDhat = pd.DataFrame(index=self._feed_times, columns=data.columns)
            for t in self._feed_times:
                if t not in data.index:  # for points that are the same in original meas times and feed times
                    dfDhat.loc[t] = [0.0 for n in range(len(data.columns))]
            dfallDhat = data.append(dfDhat)
            dfallDhat.sort_index(inplace=True)
            dfallDhat.index = dfallDhat.index.to_series().apply(
                lambda x: np.round(x, 6))  # time from data rounded to 6 digits
            ##############Filter out NaN###############
            count = 0
            for j in dfallDhat.index:
                if count >= 1 and count < len(dfallDhat.index):
                    if dfallDhat.index[count] == dfallDhat.index[count - 1]:
                        dfallDhat = dfallDhat.dropna()
                count += 1
            ###########################################
            self._huplc_data = dfallDhat
        else:
            raise RuntimeError('HUPLC data format not supported. Try pandas.DataFrame')
        Dhat = np.array(dfallDhat)
        for t in range(len(dfallDhat.index)):
            for l in range(len(dfallDhat.columns)):
                if Dhat[t, l] >= 0:
                    pass
                else:
                    self._is_Dhat_deriv = True
        if self._is_Dhat_deriv == True:
            print(
                "Warning! Since Dhat-matrix contains negative values Kipet is assuming a derivative of C has been inputted") 
Example 29
Project: kipet   Author: salvadorgarciamunoz   File: TemplateBuilder.py    GNU General Public License v3.0 5 votes vote down vote up
def add_smoothparam_data(self, data): #added for mutable smoothing parameter option CS
        """Add HPLC or UPLC data

                Args:
                    data (DataFrame): DataFrame with measurement times as
                                      indices and wavelengths as columns.

                Returns:
                    None
        """
        if isinstance(data, pd.DataFrame):
            dfPs = pd.DataFrame(index=self._feed_times, columns=data.columns)
            for t in self._feed_times:
                if t not in data.index:  # for points that are the same in original meas times and feed times
                    dfPs.loc[t] = [0.0 for n in range(len(data.columns))]
            dfallPs = data.append(dfPs)
            dfallPs.sort_index(inplace=True)
            dfallPs.index = dfallPs.index.to_series().apply(
                lambda x: np.round(x, 6))  # time from data rounded to 6 digits
            ##############Filter out NaN###############
            count = 0
            for j in dfallPs.index:
                if count >= 1 and count < len(dfallPs.index):
                    if dfallPs.index[count] == dfallPs.index[count - 1]:
                        dfallPs = dfallPs.dropna()
                count += 1
            ###########################################
            self._smoothparam_data = dfallPs
        else:
            raise RuntimeError('Smooth parameter data format not supported. Try pandas.DataFrame')
        Ps = np.array(dfallPs) 
Example 30
Project: kipet   Author: salvadorgarciamunoz   File: TemplateBuilder.py    GNU General Public License v3.0 5 votes vote down vote up
def add_concentration_data(self, data):
        """Add concentration data

        Args:
            data (DataFrame): DataFrame with measurement times as
                              indices and concentrations as columns.

        Returns:
            None

        """
        if isinstance(data, pd.DataFrame):
            dfc = pd.DataFrame(index=self._feed_times, columns=data.columns)
            for t in self._feed_times:
                if t not in data.index:  # for points that are the same in original meas times and feed times
                    dfc.loc[t] = [0.0 for n in range(len(data.columns))]
            dfallc = data.append(dfc)
            dfallc.sort_index(inplace=True)
            dfallc.index = dfallc.index.to_series().apply(
                lambda x: np.round(x, 6))  # time from data rounded to 6 digits
            ##############Filter out NaN###############
            count = 0
            for j in dfallc.index:
                if count >= 1 and count < len(dfallc.index):
                    if dfallc.index[count] == dfallc.index[count - 1]:
                        dfallc = dfallc.dropna()
                count += 1
            ###########################################
            self._concentration_data = dfallc
        else:
            raise RuntimeError('Concentration data format not supported. Try pandas.DataFrame')
        C = np.array(dfallc)
        for t in range(len(dfallc.index)):
            for l in range(len(dfallc.columns)):
                if C[t, l] >= 0:
                    pass
                else:
                    self._is_C_deriv = True
        if self._is_C_deriv == True:
            print(
                "Warning! Since C-matrix contains negative values Kipet is assuming a derivative of C has been inputted") 
Example 31
Project: kipet   Author: salvadorgarciamunoz   File: TemplateBuilder.py    GNU General Public License v3.0 5 votes vote down vote up
def add_huplcmeasurement_times(self, times): #added for additional huplc times that are on a different time scale CS
        """Add H/UPLC measurement times to the model

        Args:
            times (array_like): measurement points

        Returns:
            None

        """
        for t in times:
            t = round(t,6)  # for added ones when generating data otherwise too many digits due to different data types CS
            self._huplcmeas_times.add(t) 
Example 32
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: blob.py    MIT License 5 votes vote down vote up
def prep_im_for_blob(im, pixel_means, target_size, max_size):
    """Mean subtract and scale an image for use in a blob."""
    im = im.astype(np.float32, copy=False)
    im -= pixel_means
    im_shape = im.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])
    im_scale = float(target_size) / float(im_size_min)
    # Prevent the biggest axis from being more than MAX_SIZE
    if np.round(im_scale * im_size_max) > max_size:
        im_scale = float(max_size) / float(im_size_max)
    im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
                    interpolation=cv2.INTER_LINEAR)

    return im, im_scale 
Example 33
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: test.py    MIT License 5 votes vote down vote up
def _get_image_blob(im):
    """Converts an image into a network input.
    Arguments:
      im (ndarray): a color image in BGR order
    Returns:
      blob (ndarray): a data blob holding an image pyramid
      im_scale_factors (list): list of image scales (relative to im) used
        in the image pyramid
    """
    im_orig = im.astype(np.float32, copy=True)
    im_orig -= cfg.FLAGS2["pixel_means"]

    im_shape = im_orig.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])

    processed_ims = []
    im_scale_factors = []

    for target_size in cfg.FLAGS2["test_scales"]:
        im_scale = float(target_size) / float(im_size_min)
        # Prevent the biggest axis from being more than MAX_SIZE
        if np.round(im_scale * im_size_max) > cfg.FLAGS.test_max_size:
            im_scale = float(cfg.FLAGS.test_max_size) / float(im_size_max)
        im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
        im_scale_factors.append(im_scale)
        processed_ims.append(im)

    # Create a blob to hold the input images
    blob = im_list_to_blob(processed_ims)

    return blob, np.array(im_scale_factors) 
Example 34
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: ds_utils.py    MIT License 5 votes vote down vote up
def unique_boxes(boxes, scale=1.0):
    """Return indices of unique boxes."""
    v = np.array([1, 1e3, 1e6, 1e9])
    hashes = np.round(boxes * scale).dot(v)
    _, index = np.unique(hashes, return_index=True)
    return np.sort(index) 
Example 35
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: proposal_target_layer.py    MIT License 5 votes vote down vote up
def proposal_target_layer(rpn_rois, rpn_scores, gt_boxes, _num_classes):
    """
    Assign object detection proposals to ground-truth targets. Produces proposal
    classification labels and bounding-box regression targets.
    """

    # Proposal ROIs (0, x1, y1, x2, y2) coming from RPN
    # (i.e., rpn.proposal_layer.ProposalLayer), or any other source
    all_rois = rpn_rois
    all_scores = rpn_scores

    # Include ground-truth boxes in the set of candidate rois
    if cfg.FLAGS.proposal_use_gt:
        zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)
        all_rois = np.vstack(
            (all_rois, np.hstack((zeros, gt_boxes[:, :-1])))
        )
        # not sure if it a wise appending, but anyway i am not using it
        all_scores = np.vstack((all_scores, zeros))

    num_images = 1
    rois_per_image = cfg.FLAGS.batch_size / num_images
    fg_rois_per_image = np.round(cfg.FLAGS.proposal_fg_fraction * rois_per_image)

    # Sample rois with classification labels and bounding box regression
    # targets
    labels, rois, roi_scores, bbox_targets, bbox_inside_weights = _sample_rois(
        all_rois, all_scores, gt_boxes, fg_rois_per_image,
        rois_per_image, _num_classes)

    rois = rois.reshape(-1, 5)
    roi_scores = roi_scores.reshape(-1)
    labels = labels.reshape(-1, 1)
    bbox_targets = bbox_targets.reshape(-1, _num_classes * 4)
    bbox_inside_weights = bbox_inside_weights.reshape(-1, _num_classes * 4)
    bbox_outside_weights = np.array(bbox_inside_weights > 0).astype(np.float32)

    return rois, roi_scores, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights 
Example 36
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: generate_anchors.py    MIT License 5 votes vote down vote up
def _ratio_enum(anchor, ratios):
    """
    Enumerate a set of anchors for each aspect ratio wrt an anchor.
    """

    w, h, x_ctr, y_ctr = _whctrs(anchor)
    size = w * h
    size_ratios = size / ratios
    ws = np.round(np.sqrt(size_ratios))
    hs = np.round(ws * ratios)
    anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
    return anchors 
Example 37
Project: DataComp   Author: Cojabi   File: stats.py    Apache License 2.0 5 votes vote down vote up
def calc_mean_diff_conf(zipper, feat_subset):
    """
    Calculates the confidence intervals for numerical features.

    :param zipper: Zipper created from a DataCollection.
    :param feat_subset: An iterable of features for which the confidence intervals shall be calculated.
    :return:
    """
    # initialize dictionary which stores the conf_invs
    conf_invs = dict()

    for feat in feat_subset:  # run through all variables

        # initiate dict in dict for d1 vs d2, d2 vs d3 etc. per feature
        conf_invs[feat] = dict()

        for i in range(len(zipper[feat]) - 1):  # select dataset1
            for j in range(i + 1, len(zipper[feat])):  # select dataset2

                # only calculate score if there are values in each dataset
                if zipper[feat][i] and zipper[feat][j]:
                    interval = np.round(calc_mean_diff(zipper[feat][i], zipper[feat][j]), 2)

                    # indicator = True if 0 is not in the interval
                    if interval[0] >= 0 or interval[1] <= 0:
                        flag = True
                    else:
                        flag = False

                    conf_invs[feat][i + 1, j + 1] = (interval, flag)

                # if one or both sets are empty
                else:
                    conf_invs[feat][i + 1, j + 1] = (np.nan, np.nan)

    return conf_invs 
Example 38
Project: DataComp   Author: Cojabi   File: utils.py    Apache License 2.0 5 votes vote down vote up
def diff_mean_conf_formula(n1, n2, mean1, mean2, var1, var2, rnd=2):
    """
    Calculates the confidence interval for the difference of means between two features.

    :param n1: Sample size of sample 1
    :param n2: Sample size of sample 2
    :param mean1: Mean of sample 1
    :param mean2: Mean of sample 2
    :param var1: Variance of sample 1
    :param var2: Variance of sample 2
    :param rnd: Number of decimal places the result shall be round to. Default is 2.
    :return: Confidence interval given as a list: [intervat start, interval end]
    """
    # estimate common variance
    s2 = ((n1 - 1) * var1 + (n2 - 1) * var2) / (n1 - 1 + n2 - 1)

    # estimate standard deviation
    sd = np.sqrt(s2 * (1 / n1 + 1 / n2))

    # calculate difference in means
    diff = mean1 - mean2

    # set z value. 1.96 is standard for a 95% significance level
    z = 1.96  # t.ppf((1+0.95) / 2, len(series1)-1+len(series2)-1)

    start = diff - z * sd
    end = diff + z * sd

    return [np.round(start, rnd), np.round(end, rnd)] 
Example 39
Project: DataComp   Author: Cojabi   File: utils.py    Apache License 2.0 5 votes vote down vote up
def diff_prop_conf_formula(count1, n1, count2, n2, rnd=2):
    """
    Calculates the Agresti / Cuffo confidence interval for the difference of proportions between two features.

    :param n1: Sample size of sample 1
    :param n2: Sample size of sample 2
    :param prop1: Mean of sample 1
    :param prop2: Mean of sample 2
    :param rnd: Number of decimal places the result shall be round to. Default is 2.
    :return: Confidence interval given as a list: [intervat start, interval end]
    """

    # Agresti / Cuffo adjustment
    n1m = n1 + 2
    n2m = n2 + 2
    prop1 = (count1 + 1) / n1m
    prop2 = (count2 + 1) / n2m


    # calculate combined standard error
    se = np.sqrt(prop1 * (1 - prop1) / n1m + prop2 * (1 - prop2) / n2m)

    # calculate difference in means
    diff = prop1 - prop2

    # set z value. 1.96 is standard for a 95% significance level
    z = 1.96  # t.ppf((1+0.95) / 2, len(series1)-1+len(series2)-1)

    start = diff - z * se
    end = diff + z * se

    return [np.round(start, rnd), np.round(end, rnd)] 
Example 40
Project: StructEngPy   Author: zhuoju36   File: test.py    MIT License 5 votes vote down vote up
def simply_supported_beam_test():
    #FEModel Test
    model=FEModel()

    E=1.999e11
    mu=0.3
    A=4.265e-3
    J=9.651e-8
    I3=6.572e-5
    I2=3.301e-6
    rho=7849.0474
    
    model.add_node(0,0,0)
    model.add_node(0.5,1,0.5)
    model.add_node(1,2,1)
    
    model.add_beam(0,1,E,mu,A,I2,I3,J,rho)
    model.add_beam(1,2,E,mu,A,I2,I3,J,rho)

    model.set_node_force(1,(0,0,-1e6,0,0,0))
    model.set_node_restraint(2,[False,False,True]+[False]*3)
    model.set_node_restraint(0,[True]*3+[False]*3)

    model.assemble_KM()
    model.assemble_f()
    model.assemble_boundary()
    solve_linear(model)
    print(np.round(model.d_,6))
    print("The result of node 1 should be about [0.00796,0.00715,-0.02296,-0.01553,-0.03106,-0.01903]") 
Example 41
Project: StructEngPy   Author: zhuoju36   File: test.py    MIT License 5 votes vote down vote up
def simply_released_beam_test():
    #FEModel Test
    model=FEModel()

    E=1.999e11
    mu=0.3
    A=4.265e-3
    J=9.651e-8
    I3=6.572e-5
    I2=3.301e-6
    rho=7849.0474
    
    model.add_node(0,0,0)
    model.add_node(0.5,1,0.5)
    model.add_node(1,2,1)
    
    model.add_beam(0,1,E,mu,A,I2,I3,J,rho)
    model.add_beam(1,2,E,mu,A,I2,I3,J,rho)

    model.set_node_force(1,(0,0,-1e6,0,0,0))
    model.set_node_restraint(2,[True]*6)
    model.set_node_restraint(0,[True]*6)
    
    model.set_beam_releases(0,[True]*6,[False]*6)
    model.set_beam_releases(1,[False]*6,[True]*6)
    
    model.assemble_KM()
    model.assemble_f()
    model.assemble_boundary()
    solve_linear(model)
    print(np.round(model.d_,6))
    print("The result of node 1 should be about [0.00445,0.00890,-0.02296,-0.01930,-0.03860,-0.01930]") 
Example 42
Project: ML_from_scratch   Author: jarfa   File: test_util.py    Apache License 2.0 5 votes vote down vote up
def rounded_list(array, digits=6):
    """
    Because the unittest module won't do almost equal
    comparisons of numpy arrays or even lists :(
    """
    return list(np.round(array, digits)) 
Example 43
Project: kalman_filter_multi_object_tracking   Author: srianant   File: kalman_filter.py    MIT License 5 votes vote down vote up
def correct(self, b, flag):
        """Correct or update state vector u and variance of uncertainty P (covariance).
        where,
        u: predicted state vector u
        A: matrix in observation equations
        b: vector of observations
        P: predicted covariance matrix
        Q: process noise matrix
        R: observation noise matrix
        Equations:
            C = AP_{k|k-1} A.T + R
            K_{k} = P_{k|k-1} A.T(C.Inv)
            u'_{k|k} = u'_{k|k-1} + K_{k}(b_{k} - Au'_{k|k-1})
            P_{k|k} = P_{k|k-1} - K_{k}(CK.T)
            where,
                A.T is A transpose
                C.Inv is C inverse
        Args:
            b: vector of observations
            flag: if "true" prediction result will be updated else detection
        Return:
            predicted state vector u
        """

        if not flag:  # update using prediction
            self.b = self.lastResult
        else:  # update using detection
            self.b = b
        C = np.dot(self.A, np.dot(self.P, self.A.T)) + self.R
        K = np.dot(self.P, np.dot(self.A.T, np.linalg.inv(C)))

        self.u = np.round(self.u + np.dot(K, (self.b - np.dot(self.A,
                                                              self.u))))
        self.P = self.P - np.dot(K, np.dot(C, K.T))
        self.lastResult = self.u
        return self.u 
Example 44
Project: pytorch_NER_BiLSTM_CNN_CRF   Author: bamtercelboo   File: Embed.py    Apache License 2.0 5 votes vote down vote up
def info(self):
        """
        :return:
        """
        total_count = self.exact_count + self.fuzzy_count
        print("Words count {}, Embed dim {}.".format(self.words_count, self.dim))
        print("Exact count {} / {}".format(self.exact_count, self.words_count))
        print("Fuzzy count {} / {}".format(self.fuzzy_count, self.words_count))
        print("  INV count {} / {}".format(total_count, self.words_count))
        print("  OOV count {} / {}".format(self.oov_count, self.words_count))
        print("  OOV radio ===> {}%".format(np.round((self.oov_count / self.words_count) * 100, 2)))
        print(40 * "*") 
Example 45
Project: ConvLSTM   Author: XingguangZhang   File: extract_flow.py    MIT License 5 votes vote down vote up
def compute_TVL1(prev, curr, bound=15):
    """Compute the TV-L1 optical flow."""
    TVL1 = cv2.optflow.DualTVL1OpticalFlow_create()
    flow = TVL1.calc(prev, curr, None)
    assert flow.dtype == np.float32

    flow = (flow + bound) * (255.0 / (2*bound))
    flow = np.round(flow).astype(int)
    flow[flow >= 255] = 255
    flow[flow <= 0] = 0

    return flow 
Example 46
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: anchor.py    Apache License 2.0 5 votes vote down vote up
def _ratio_enum(anchor, ratios):
        """
        Enumerate a set of anchors for each aspect ratio wrt an anchor.
        """
        w, h, x_ctr, y_ctr = AnchorGenerator._whctrs(anchor)
        size = w * h
        size_ratios = size / ratios
        ws = np.round(np.sqrt(size_ratios))
        hs = np.round(ws * ratios)
        anchors = AnchorGenerator._mkanchors(ws, hs, x_ctr, y_ctr)
        return anchors 
Example 47
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_round_ceil_floor():
    data = mx.symbol.Variable('data')
    shape = (3, 4)
    data_tmp = np.ones(shape)
    data_tmp[:]=5.543
    arr_data = mx.nd.array(data_tmp)
    arr_grad = mx.nd.empty(shape)
    arr_grad[:]= 2

    test = mx.sym.round(data) + mx.sym.ceil(data) +  mx.sym.floor(data)
    exe_test = test.bind(default_context(), args=[arr_data])
    exe_test.forward(is_train=True)
    out = exe_test.outputs[0].asnumpy()
    npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
    assert_almost_equal(out, npout) 
Example 48
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def gen_broadcast_data_int(idx):
    d = gen_broadcast_data(idx);
    return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)] 
Example 49
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def gen_binary_data_int(dummy):
    d = gen_binary_data(dummy);
    return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)] 
Example 50
Project: LHMP   Author: hydrogo   File: hbv.py    GNU General Public License v3.0 5 votes vote down vote up
def interaction(river_name, path_to_scheme, path_to_observations,\
    parBETA, parCET, parFC, parK0, parK1, parK2, parLP, parMAXBAS,\
    parPERC, parUZL, parPCORR, parTT, parCFMAX, parSFCF, parCFR, parCWH):

    # simulate our modeled hydrograph
    data = dataframe_construction(path_to_scheme)
    data['Qsim'] = simulation(data, [parBETA, parCET, parFC, parK0, parK1,\
    parK2, parLP, parMAXBAS, parPERC, parUZL, parPCORR, parTT, parCFMAX,\
    parSFCF, parCFR, parCWH])

    # read observations
    obs = pd.read_csv(path_to_observations, index_col=0, parse_dates=True,
                      squeeze=True, header=None, names=['Date', 'Qobs'])

    # concatenate data
    data = pd.concat([data, obs], axis=1)

    # calculate efficiency criterion
    # slice data only for observational period and drop NA values
    data_for_obs = data.ix[obs.index, ['Qsim', 'Qobs']].dropna()
    eff = NS(data_for_obs['Qobs'], data_for_obs['Qsim'])

    # plot
    ax = data.ix[obs.index, ['Qsim', 'Qobs']].plot(figsize=(10, 7), style=['b-', 'k.'])
    ax.set_title(river_name + ' daily runoff modelling, ' + 'Nash-Sutcliffe efficiency: {}'.format(np.round(eff, 2)))
    #text_pos = np.max(data['Qobs'])
    #ax.text(obs.index[100], text_pos, 'NS: {}'.format(np.round(eff, 2)), size=14) 
Example 51
Project: FCOS_GluonCV   Author: DetectionTeamUCAS   File: anchor.py    Apache License 2.0 5 votes vote down vote up
def _generate_anchors(self, stride, base_size, ratios, scales, alloc_size):
        """Pre-generate all anchors."""
        # generate same shapes on every location
        px, py = (base_size - 1) * 0.5, (base_size - 1) * 0.5
        base_sizes = []
        for r in ratios:
            for s in scales:
                size = base_size * base_size / r
                ws = np.round(np.sqrt(size))
                w = (ws * s - 1) * 0.5
                h = (np.round(ws * r) * s - 1) * 0.5
                base_sizes.append([px - w, py - h, px + w, py + h])
        base_sizes = np.array(base_sizes)  # (N, 4)

        # propagete to all locations by shifting offsets
        height, width = alloc_size
        offset_x = np.arange(0, width * stride, stride)
        offset_y = np.arange(0, height * stride, stride)
        offset_x, offset_y = np.meshgrid(offset_x, offset_y)
        offsets = np.stack((offset_x.ravel(), offset_y.ravel(),
                            offset_x.ravel(), offset_y.ravel()), axis=1)
        # broadcast_add (1, N, 4) + (M, 1, 4)
        anchors = (base_sizes.reshape((1, -1, 4)) + offsets.reshape((-1, 1, 4)))
        anchors = anchors.reshape((1, 1, height, width, -1)).astype(np.float32)
        return anchors

    # pylint: disable=arguments-differ 
Example 52
Project: DOTA_models   Author: ringringyi   File: graph_utils.py    Apache License 2.0 5 votes vote down vote up
def label_nodes_with_class(nodes_xyt, class_maps, pix):
  """
  Returns: 
    class_maps__: one-hot class_map for each class.
    node_class_label: one-hot class_map for each class, nodes_xyt.shape[0] x n_classes
  """
  # Assign each pixel to a node.
  selem = skimage.morphology.disk(pix)
  class_maps_ = class_maps*1.
  for i in range(class_maps.shape[2]):
    class_maps_[:,:,i] = skimage.morphology.dilation(class_maps[:,:,i]*1, selem)
  class_maps__ = np.argmax(class_maps_, axis=2)
  class_maps__[np.max(class_maps_, axis=2) == 0] = -1

  # For each node pick out the label from this class map.
  x = np.round(nodes_xyt[:,[0]]).astype(np.int32)
  y = np.round(nodes_xyt[:,[1]]).astype(np.int32)
  ind = np.ravel_multi_index((y,x), class_maps__.shape)
  node_class_label = class_maps__.ravel()[ind][:,0]

  # Convert to one hot versions.
  class_maps_one_hot = np.zeros(class_maps.shape, dtype=np.bool)
  node_class_label_one_hot = np.zeros((node_class_label.shape[0], class_maps.shape[2]), dtype=np.bool)
  for i in range(class_maps.shape[2]):
    class_maps_one_hot[:,:,i] = class_maps__ == i 
    node_class_label_one_hot[:,i] = node_class_label == i
  return class_maps_one_hot, node_class_label_one_hot 
Example 53
Project: DOTA_models   Author: ringringyi   File: depth_utils.py    Apache License 2.0 5 votes vote down vote up
def bin_points(XYZ_cms, map_size, z_bins, xy_resolution):
  """Bins points into xy-z bins
  XYZ_cms is ... x H x W x3
  Outputs is ... x map_size x map_size x (len(z_bins)+1)
  """
  sh = XYZ_cms.shape
  XYZ_cms = XYZ_cms.reshape([-1, sh[-3], sh[-2], sh[-1]])
  n_z_bins = len(z_bins)+1
  map_center = (map_size-1.)/2.
  counts = []
  isvalids = []
  for XYZ_cm in XYZ_cms:
    isnotnan = np.logical_not(np.isnan(XYZ_cm[:,:,0]))
    X_bin = np.round(XYZ_cm[:,:,0] / xy_resolution + map_center).astype(np.int32)
    Y_bin = np.round(XYZ_cm[:,:,1] / xy_resolution + map_center).astype(np.int32)
    Z_bin = np.digitize(XYZ_cm[:,:,2], bins=z_bins).astype(np.int32)

    isvalid = np.array([X_bin >= 0, X_bin < map_size, Y_bin >= 0, Y_bin < map_size,
                        Z_bin >= 0, Z_bin < n_z_bins, isnotnan])
    isvalid = np.all(isvalid, axis=0)

    ind = (Y_bin * map_size + X_bin) * n_z_bins + Z_bin
    ind[np.logical_not(isvalid)] = 0
    count = np.bincount(ind.ravel(), isvalid.ravel().astype(np.int32),
                         minlength=map_size*map_size*n_z_bins)
    count = np.reshape(count, [map_size, map_size, n_z_bins])
    counts.append(count)
    isvalids.append(isvalid)
  counts = np.array(counts).reshape(list(sh[:-3]) + [map_size, map_size, n_z_bins])
  isvalids = np.array(isvalids).reshape(list(sh[:-3]) + [sh[-3], sh[-2], 1])
  return counts, isvalids 
Example 54
Project: DOTA_models   Author: ringringyi   File: gen_synthetic_single.py    Apache License 2.0 5 votes vote down vote up
def GenerateSample(filename, code_shape, layer_depth):
  # {0, +1} binary codes.
  # No conversion since the output file is expected to store
  # codes using {0, +1} codes (and not {-1, +1}).
  code = synthetic_model.GenerateSingleCode(code_shape)
  code = np.round(code)

  # Reformat the code so as to be compatible with what is generated
  # by the image encoder.
  # The image encoder generates a tensor of size:
  # iteration_count x batch_size x height x width x iteration_depth.
  # Here: batch_size = 1
  if code_shape[-1] % layer_depth != 0:
    raise ValueError('Number of layers is not an integer')
  height = code_shape[0]
  width = code_shape[1]
  code = code.reshape([1, height, width, -1, layer_depth])
  code = np.transpose(code, [3, 0, 1, 2, 4])

  int_codes = code.astype(np.int8)
  exported_codes = np.packbits(int_codes.reshape(-1))

  output = io.BytesIO()
  np.savez_compressed(output, shape=int_codes.shape, codes=exported_codes)
  with tf.gfile.FastGFile(filename, 'wb') as code_file:
    code_file.write(output.getvalue()) 
Example 55
Project: OpenFermion-Cirq   Author: quantumlib   File: fermionic_simulation.py    Apache License 2.0 5 votes vote down vote up
def _canonicalize_weight(w):
    if w == 0:
        return (0, 0)
    if cirq.is_parameterized(w):
        return (cirq.PeriodicValue(abs(w), 2 * sympy.pi), sympy.arg(w))
    period = 2 * np.pi
    return (np.round((w % period) if (w == np.real(w)) else
        (abs(w) % period) * w / abs(w), 8), 0) 
Example 56
Project: pypriv   Author: soeaver   File: transforms.py    MIT License 5 votes vote down vote up
def scale(im, short_size=256, max_size=1e5, interp=cv2.INTER_LINEAR):
    """ support gray im; interp: cv2.INTER_LINEAR (default) or cv2.INTER_NEAREST; """
    im_size_min = np.min(im.shape[0:2])
    im_size_max = np.max(im.shape[0:2])
    scale_ratio = float(short_size) / float(im_size_min)
    if np.round(scale_ratio * im_size_max) > float(max_size):
        scale_ratio = float(max_size) / float(im_size_max)

    scale_im = cv2.resize(im, None, None, fx=scale_ratio, fy=scale_ratio, interpolation=interp)

    return scale_im, scale_ratio 
Example 57
Project: ReinforcementLearningBookExamples   Author: Shawn-Guo-CN   File: 2GridWorld_Ch3.py    GNU General Public License v3.0 5 votes vote down vote up
def value_estimate_with_bellman_equation(gridworld, world_size=5, discount=0.9):
    values_est = np.zeros((world_size, world_size))
    while True:
        new_values_est = np.zeros((world_size, world_size))
        for i in range(0, world_size):
            for j in range(0, world_size):
                for action in ['U', 'D', 'L', 'R']:
                    gridworld.set_agent_location(i, j)
                    reward = gridworld.take_action(action)
                    new_pos = gridworld.get_agent_location()
                    new_values_est[i, j] += 0.25 * (reward + discount * values_est[new_pos[0], new_pos[1]])
        if np.sum(np.abs(values_est - new_values_est)) < 1e-4:
            break
        values_est = new_values_est
    draw_image('bellman equation', np.round(new_values_est, decimals=2)) 
Example 58
Project: pytac   Author: dls-controls   File: test_units.py    Apache License 2.0 5 votes vote down vote up
def test_pp_conversion_to_physics_3_points():
    pchip_uc = PchipUnitConv([1, 3, 5], [1, 3, 6])
    assert pchip_uc.eng_to_phys(1) == 1
    assert numpy.round(pchip_uc.eng_to_phys(2), 4) == 1.8875
    assert pchip_uc.eng_to_phys(3) == 3
    assert numpy.round(pchip_uc.eng_to_phys(4), 4) == 4.3625
    assert pchip_uc.eng_to_phys(5) == 6 
Example 59
Project: ImageQA   Author: codedecde   File: plotAttention.py    MIT License 5 votes vote down vote up
def plotAttention (image_file, question, alpha, smooth=True):
    
    ## Parameters
    #
    # image_file : Path to image file.
    # question   : List of question string words (tokenised)
    # alpha      : NP array of size (len(question), 196) or List of len(question) NP vectors of shape (196, )
    # smooth     : Parameter for scaling alpha
    #

    img = LoadImage(image_file)
    n_words = len(question) + 1
    w = np.round(np.sqrt(n_words))
    h = np.ceil(np.float32(n_words) / w)
            
    plt.subplot(w, h, 1)
    plt.imshow(img)
    plt.axis('off')

    for ii in xrange(alpha.shape[0]):
        plt.subplot(w, h, ii+2)
        lab = question[ii]
        plt.text(0, 1, lab, backgroundcolor='white', fontsize=13)
        plt.text(0, 1, lab, color='black', fontsize=13)
        plt.imshow(img)
        if smooth:
            alpha_img = skimage.transform.pyramid_expand(alpha[ii].reshape(14,14), upscale=32)
        else:
            alpha_img = skimage.transform.resize(alpha[ii].reshape(14,14), [img.shape[0], img.shape[1]])
        plt.imshow(alpha_img, alpha=0.8)
        plt.set_cmap(cm.Greys_r)
        plt.axis('off') 
Example 60
Project: RNASEqTool   Author: armell   File: r_binding.py    MIT License 5 votes vote down vote up
def edger_gene_expression_normalization(df_data):
    rpy2.robjects.pandas2ri.activate()
    df_data = df_data.dropna()
    r_data_set = robjects.conversion.py2ri(df_data)


    edger = importr("edgeR")
    base = importr("base")
    mult = robjects.r.get('*')

    factors = base.factor(base.c(base.colnames(r_data_set)))
    dge = edger.DGEList(counts=r_data_set, group=factors)
    y = edger.calcNormFactors(dge)
    y = edger.estimateCommonDisp(y)

    #y [counts] and y[samples][size factors] accessed by index
    #bit tricky but yeah...
    #there is a conversion between python 0 based index and r (1 based)
    #done by rpy2 which is a fabulous library!!!
    normalized = mult(y[0], y[1][2])

    rpy2.robjects.pandas2ri.deactivate()

    print("preparing result")
    res = Result()
    res.frame = pd.DataFrame(numpy.round(numpy.matrix(normalized)), columns=normalized.colnames, index=normalized.rownames)
    res.package = "edgeR"
    res.version = edger.__version__

    return res 
Example 61
Project: GCN-VAE-opinion   Author: zxj32   File: generate_distribution.py    MIT License 5 votes vote down vote up
def get_omega(b, u):
    W = 2.0
    a = 0.5
    d = 1.0 - b - u
    r = W * b / u
    s = W * d / u
    alpha = r + W * a
    beta = s + W * (1.0 - a)
    return  np.round(alpha+beta) 
Example 62
Project: HAPI   Author: MAfarrag   File: Inputs.py    MIT License 5 votes vote down vote up
def mycolor(x,min_old,max_old,min_new, max_new):
    """
    # =============================================================================
    #  mycolor(x,min_old,max_old,min_new, max_new)
    # =============================================================================
    this function transform the value between two normal values to a logarithmic scale
    between logarithmic value of both boundaries 
    inputs:
        1-x:
            [float] new value needed to be transformed to a logarithmic scale
        2-min_old:
            [float] min old value in normal scale
        3-max_old:
            [float] max old value in normal scale
        4-min_new:
            [float] min new value in normal scale
        5-max_new:
            [float] max_new max new value
    output:
        1-Y:
            [int] integer number between new max_new and min_new boundaries
    """
    
    # get the boundaries of the logarithmic scale
    if min_old== 0.0:
        min_old_log=-7
    else:
        min_old_log=np.log(min_old)
        
    max_old_log=np.log(max_old)    
    
    if x==0:
        x_log=-7
    else:
        x_log=np.log(x)
    
    y=int(np.round(rescale(x_log,min_old_log,max_old_log,min_new,max_new)))
    
    return y 
Example 63
Project: HAPI   Author: MAfarrag   File: Routing.py    MIT License 5 votes vote down vote up
def muskingum(inflow,Qinitial,k,x,dt):
    """
    ===========================================================
     muskingum(inflow,Qinitial,k,x,dt)
    ===========================================================
    
    inputs:
    ----------
            1- inflow
            2- Qinitial initial value for outflow
            3- k travelling time (hours)
            4- x surface nonlinearity coefficient (0,0.5)
            5- dt delta t 
    Outputs:
    ----------
   """ 
    c1=(dt-2*k*x)/(2*k*(1-x)+dt)
    c2=(dt+2*k*x)/(2*k*(1-x)+dt)
    c3=(2*k*(1-x)-dt)/(2*k*(1-x)+dt)
    
#    if c1+c2+c3!=1:
#        raise("sim of c1,c2 & c3 is not 1")
    
    outflow=np.ones_like(inflow)*np.nan    
    outflow[0]=Qinitial
    
    for i in range(1,len(inflow)):
        outflow[i]=c1*inflow[i]+c2*inflow[i-1]+c3*outflow[i-1]
    
#    outflow[outflow<0]=0
    outflow=np.round(outflow,4)
    return outflow 
Example 64
Project: HAPI   Author: MAfarrag   File: Inputs.py    MIT License 5 votes vote down vote up
def mycolor(x,min_old,max_old,min_new, max_new):
    """
    # =============================================================================
    #  mycolor(x,min_old,max_old,min_new, max_new)
    # =============================================================================
    this function transform the value between two normal values to a logarithmic scale
    between logarithmic value of both boundaries 
    inputs:
        1-x:
            [float] new value needed to be transformed to a logarithmic scale
        2-min_old:
            [float] min old value in normal scale
        3-max_old:
            [float] max old value in normal scale
        4-min_new:
            [float] min new value in normal scale
        5-max_new:
            [float] max_new max new value
    output:
        1-Y:
            [int] integer number between new max_new and min_new boundaries
    """
    
    # get the boundaries of the logarithmic scale
    if min_old== 0.0:
        min_old_log=-7
    else:
        min_old_log=np.log(min_old)
        
    max_old_log=np.log(max_old)    
    
    if x==0:
        x_log=-7
    else:
        x_log=np.log(x)
    
    y=int(np.round(rescale(x_log,min_old_log,max_old_log,min_new,max_new)))
    
    return y 
Example 65
Project: MLP-Numpy-Implementation-Gradient-Descent-Backpropagation   Author: EsterHlav   File: support.py    MIT License 5 votes vote down vote up
def getBestShift(img):
    # helper function for preprocessMNIST
    cy,cx = ndimage.measurements.center_of_mass(img)

    rows,cols = img.shape
    shiftx = np.round(cols/2.0-cx).astype(int)
    shifty = np.round(rows/2.0-cy).astype(int)

    return shiftx,shifty 
Example 66
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: minibatch.py    MIT License 4 votes vote down vote up
def get_minibatch(roidb, num_classes):
  """Given a roidb, construct a minibatch sampled from it."""
  num_images = len(roidb)
  # Sample random scales to use for each image in this batch
  random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
                  size=num_images)
  assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
    'num_images ({}) must divide BATCH_SIZE ({})'. \
    format(num_images, cfg.TRAIN.BATCH_SIZE)

  # Get the input image blob, formatted for caffe
  im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)

  blobs = {'data': im_blob}

  assert len(im_scales) == 1, "Single batch only"
  assert len(roidb) == 1, "Single batch only"
  
  # gt boxes: (x1, y1, x2, y2, cls)
  #if cfg.TRAIN.USE_ALL_GT:
    # Include all ground truth boxes
  #  gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
  #else:
    # For the COCO ground truth boxes, exclude the ones that are ''iscrowd'' 
  #  gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
  #gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
  #gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
  #gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
  boxes = roidb[0]['boxes'] * im_scales[0]
  batch_ind = 0 * np.ones((boxes.shape[0], 1))
  boxes = np.hstack((batch_ind, boxes))
  DEDUP_BOXES=1./16.
  if DEDUP_BOXES > 0:
    v = np.array([1,1e3, 1e6, 1e9, 1e12])
    hashes = np.round(boxes * DEDUP_BOXES).dot(v)
    _, index, inv_index = np.unique(hashes, return_index=True,
                                    return_inverse=True)
    boxes = boxes[index, :]
  
  blobs['boxes'] = boxes
  blobs['im_info'] = np.array(
    [im_blob.shape[1], im_blob.shape[2], im_scales[0]],
    dtype=np.float32)
  blobs['labels'] = roidb[0]['labels']

  return blobs 
Example 67
Project: kipet   Author: salvadorgarciamunoz   File: TemplateBuilder.py    GNU General Public License v3.0 4 votes vote down vote up
def add_spectral_data(self, data):
        """Add spectral data

        Args:
            data (DataFrame): DataFrame with measurement times as
                              indices and wavelengths as columns.

        Returns:
            None

        """
        if isinstance(data, pd.DataFrame):
            # add zero rows for feed times that are not in original measurements in D-matrix (CS):
            df = pd.DataFrame(index=self._feed_times, columns=data.columns)
            for t in self._feed_times:
                if t not in data.index:  # for points that are the same in original measurement times and feed times (CS)
                    df.loc[t] = [0.0 for n in range(len(data.columns))]
            dfall = data.append(df)
            dfall.sort_index(inplace=True)
            dfall.index = dfall.index.to_series().apply(lambda x: np.round(x, 6))  # time from data rounded to 6 digits
            ##############Filter out NaN############### points that are the same in original measurement times and feed times (CS)
            count = 0
            for j in dfall.index:
                if count >= 1 and count < len(dfall.index):
                    if dfall.index[count] == dfall.index[count - 1]:
                        dfall = dfall.dropna()
                count += 1
            ###########################################
            self._spectral_data = dfall
        else:
            raise RuntimeError('Spectral data format not supported. Try pandas.DataFrame')

        D = np.array(dfall)

        for t in range(len(dfall.index)):
            for l in range(len(dfall.columns)):
                if D[t, l] >= 0:
                    pass
                else:
                    self._is_D_deriv = True
        if self._is_D_deriv == True:
            print(
                "Warning! Since D-matrix contains negative values Kipet is assuming a derivative of D has been inputted") 
Example 68
Project: FRIDA   Author: LCAV   File: generators.py    MIT License 4 votes vote down vote up
def gen_far_field_ir(doa, R, fs):
    """
    This function generates the impulse responses for all microphones for
    K sources in the far field.

    :param doa: (nd-array) The sources direction of arrivals. This should
                be a (D-1)xK array where D is the dimension (2 or 3) and K
                is the number of sources
    :param R: the locations of the microphones
    :param fs: sampling frequency

    :return ir: (ndarray) A KxMxL array containing all the fractional delay
                filters between each source (axis 0) and microphone (axis 1)
                L is the length of the filter
    """

    # make sure these guys are nd-arrays
    doa = np.array(doa)

    if doa.ndim == 0:
        doa = np.array([[doa]])

    elif doa.ndim == 1:
        doa = np.array([doa])

    # the number of microphones
    M = R.shape[1]
    dim = R.shape[0]

    # the number of sources
    K = doa.shape[1]

    # convert the spherical coordinates to unit propagation vectors
    p_vec = -unit_vec(doa)

    # the delays are the inner product between unit vectors and mic locations
    # set zero delay at earliest microphone
    delays = np.dot(p_vec.T, R) / pra.constants.get('c')
    delays -= delays.min()

    # figure out the maximal length of the impulse responses
    L = pra.constants.get('frac_delay_length')
    t_max = delays.max()
    D = int(L + np.ceil(np.abs(t_max * fs)))

    # the impulse response filter bank
    fb = np.zeros((K, M, D))

    # create all the impulse responses
    for k in xrange(K):
        for m in xrange(M):
            t = delays[k, m]
            delay_s = t * fs
            delay_i = int(np.round(delay_s))
            delay_f = delay_s - delay_i
            fb[k, m, delay_i:delay_i + (L - 1) + 1] += pra.fractional_delay(delay_f)

    return fb 
Example 69
Project: mmdetection   Author: open-mmlab   File: fcn_mask_head.py    Apache License 2.0 4 votes vote down vote up
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
                      ori_shape, scale_factor, rescale):
        """Get segmentation masks from mask_pred and bboxes.

        Args:
            mask_pred (Tensor or ndarray): shape (n, #class+1, h, w).
                For single-scale testing, mask_pred is the direct output of
                model, whose type is Tensor, while for multi-scale testing,
                it will be converted to numpy array outside of this method.
            det_bboxes (Tensor): shape (n, 4/5)
            det_labels (Tensor): shape (n, )
            img_shape (Tensor): shape (3, )
            rcnn_test_cfg (dict): rcnn testing config
            ori_shape: original image size

        Returns:
            list[list]: encoded masks
        """
        if isinstance(mask_pred, torch.Tensor):
            mask_pred = mask_pred.sigmoid().cpu().numpy()
        assert isinstance(mask_pred, np.ndarray)
        # when enabling mixed precision training, mask_pred may be float16
        # numpy array
        mask_pred = mask_pred.astype(np.float32)

        cls_segms = [[] for _ in range(self.num_classes - 1)]
        bboxes = det_bboxes.cpu().numpy()[:, :4]
        labels = det_labels.cpu().numpy() + 1

        if rescale:
            img_h, img_w = ori_shape[:2]
        else:
            img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)
            img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)
            scale_factor = 1.0

        for i in range(bboxes.shape[0]):
            if not isinstance(scale_factor, (float, np.ndarray)):
                scale_factor = scale_factor.cpu().numpy()
            bbox = (bboxes[i, :] / scale_factor).astype(np.int32)
            label = labels[i]
            w = max(bbox[2] - bbox[0] + 1, 1)
            h = max(bbox[3] - bbox[1] + 1, 1)

            if not self.class_agnostic:
                mask_pred_ = mask_pred[i, label, :, :]
            else:
                mask_pred_ = mask_pred[i, 0, :, :]
            im_mask = np.zeros((img_h, img_w), dtype=np.uint8)

            bbox_mask = mmcv.imresize(mask_pred_, (w, h))
            bbox_mask = (bbox_mask > rcnn_test_cfg.mask_thr_binary).astype(
                np.uint8)
            im_mask[bbox[1]:bbox[1] + h, bbox[0]:bbox[0] + w] = bbox_mask
            rle = mask_util.encode(
                np.array(im_mask[:, :, np.newaxis], order='F'))[0]
            cls_segms[label - 1].append(rle)

        return cls_segms 
Example 70
Project: pytorch_NER_BiLSTM_CNN_CRF   Author: bamtercelboo   File: Load_Pretrained_Embed.py    Apache License 2.0 4 votes vote down vote up
def load_pretrained_emb_zeros(path, text_field_words_dict, pad=None, set_padding=False):
    print("loading pre_train embedding by zeros......")
    if not isinstance(text_field_words_dict, dict):
        text_field_words_dict = convert_list2dict(text_field_words_dict)
    if pad is not None:
        padID = text_field_words_dict[pad]
    embedding_dim = -1
    with open(path, encoding='utf-8') as f:
        for line in f:
            line_split = line.strip().split(' ')
            if len(line_split) == 1:
                embedding_dim = line_split[0]
                break
            elif len(line_split) == 2:
                embedding_dim = line_split[1]
                break
            else:
                embedding_dim = len(line_split) - 1
                break
    f.close()
    word_count = len(text_field_words_dict)
    print('The number of wordsDict is {} \nThe dim of pretrained embedding is {}'.format(str(word_count),
                                                                                           str(embedding_dim)))
    embeddings = np.zeros((int(word_count), int(embedding_dim)))
    iv_num = 0
    oov_num = 0
    with open(path, encoding='utf-8') as f:
        lines = f.readlines()
        # lines = tqdm.tqdm(lines)
        for line in lines:
            values = line.strip().split(' ')
            if len(values) == 1 or len(values) == 2:
                continue
            index = text_field_words_dict.get(values[0])  # digit or None
            if index:
                iv_num += 1
                vector = np.array([float(i) for i in values[1:]], dtype='float32')
                embeddings[index] = vector

    f.close()
    oov_num = word_count - iv_num
    print("iv_num {} oov_num {} oov_radio {:.4f}%".format(iv_num, oov_num, round((oov_num / word_count) * 100, 4)))
    return torch.from_numpy(embeddings).float() 
Example 71
Project: pytorch_NER_BiLSTM_CNN_CRF   Author: bamtercelboo   File: Load_Pretrained_Embed.py    Apache License 2.0 4 votes vote down vote up
def load_pretrained_emb_avg(path, text_field_words_dict, pad=None, set_padding=False):
    print("loading pre_train embedding by avg......")
    if not isinstance(text_field_words_dict, dict):
        text_field_words_dict = convert_list2dict(text_field_words_dict)
    assert pad is not None, "pad not allow with None"
    padID = text_field_words_dict[pad]
    embedding_dim = -1
    with open(path, encoding='utf-8') as f:
        for line in f:
            line_split = line.strip().split(' ')
            if len(line_split) == 1:
                embedding_dim = line_split[0]
                break
            elif len(line_split) == 2:
                embedding_dim = line_split[1]
                break
            else:
                embedding_dim = len(line_split) - 1
                break
    f.close()
    word_count = len(text_field_words_dict)
    print('The number of wordsDict is {} \nThe dim of pretrained embedding is {}\n'.format(str(word_count),
                                                                                           str(embedding_dim)))
    embeddings = np.zeros((int(word_count), int(embedding_dim)))

    inword_list = {}
    with open(path, encoding='utf-8') as f:
        lines = f.readlines()
        lines = tqdm.tqdm(lines)
        for line in lines:
            lines.set_description("Processing")
            values = line.strip().split(" ")
            if len(values) == 1 or len(values) == 2:
                continue
            index = text_field_words_dict.get(values[0])  # digit or None
            if index:
                vector = np.array([float(i) for i in values[1:]], dtype='float32')
                embeddings[index] = vector
                inword_list[index] = 1
    f.close()
    print("oov words initial by avg embedding, maybe take a while......")
    sum_col = np.sum(embeddings, axis=0) / len(inword_list)     # avg
    for i in range(len(text_field_words_dict)):
        if i not in inword_list and i != padID:
            embeddings[i] = sum_col

    OOVWords = word_count - len(inword_list)
    oov_radio = np.round(OOVWords / word_count, 6)
    print("All Words = {}, InWords = {}, OOVWords = {}, OOV Radio={}".format(
        word_count, len(inword_list), OOVWords, oov_radio))

    return torch.from_numpy(embeddings).float() 
Example 72
Project: pytorch_NER_BiLSTM_CNN_CRF   Author: bamtercelboo   File: Load_Pretrained_Embed.py    Apache License 2.0 4 votes vote down vote up
def load_pretrained_emb_uniform(path, text_field_words_dict, pad=None, set_padding=False):
    print("loading pre_train embedding by uniform......")
    if not isinstance(text_field_words_dict, dict):
        text_field_words_dict = convert_list2dict(text_field_words_dict)
    assert pad is not None, "pad not allow with None"
    padID = text_field_words_dict[pad]
    embedding_dim = -1
    with open(path, encoding='utf-8') as f:
        for line in f:
            line_split = line.strip().split(' ')
            if len(line_split) == 1:
                embedding_dim = line_split[0]
                break
            elif len(line_split) == 2:
                embedding_dim = line_split[1]
                break
            else:
                embedding_dim = len(line_split) - 1
                break
    f.close()
    word_count = len(text_field_words_dict)
    print('The number of wordsDict is {} \nThe dim of pretrained embedding is {}\n'.format(str(word_count),
                                                                                           str(embedding_dim)))
    embeddings = np.zeros((int(word_count), int(embedding_dim)))

    inword_list = {}
    with open(path, encoding='utf-8') as f:
        lines = f.readlines()
        lines = tqdm.tqdm(lines)
        for line in lines:
            lines.set_description("Processing")
            values = line.strip().split(" ")
            if len(values) == 1 or len(values) == 2:
                continue
            index = text_field_words_dict.get(values[0])  # digit or None
            if index:
                vector = np.array([float(i) for i in values[1:]], dtype='float32')
                embeddings[index] = vector
                inword_list[index] = 1
    f.close()
    print("oov words initial by uniform embedding, maybe take a while......")
    # sum_col = np.sum(embeddings, axis=0) / len(inword_list)     # avg
    uniform_col = np.random.uniform(-0.25, 0.25, int(embedding_dim)).round(6)    # avg
    for i in range(len(text_field_words_dict)):
        if i not in inword_list and i != padID:
            embeddings[i] = uniform_col

    OOVWords = word_count - len(inword_list)
    oov_radio = np.round(OOVWords / word_count, 6)
    print("All Words = {}, InWords = {}, OOVWords = {}, OOV Radio={}".format(
        word_count, len(inword_list), OOVWords, oov_radio))

    return torch.from_numpy(embeddings).float() 
Example 73
Project: numpynet   Author: uptake   File: test_activation.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def test_tanh(self):
        activation = nnc.Activation("tanh")

        # Compare scalars
        self.assertEqual(activation.function(1), math.tanh(1))
        self.assertEqual(activation.function(0), math.tanh(0))

        # Compare vectors
        vect = np.array([1, 2, 3])
        true_values = np.array(
            [0.7615941559557649, 0.9640275800758169, 0.9950547536867305]
        )
        self.assertTrue(np.array_equal(activation.function(vect), true_values))

        # Compare matrices
        matrix = np.array([[0.2, 0.0000001, 3], [-0.5, 1000, -10]])
        true_values = np.array(
            [[0.19737532, 0.0000001, 0.99505475], [-0.46211716, 1.0, -1.0]]
        )
        self.assertTrue(
            np.array_equal(np.round(activation.function(matrix), 8), true_values)
        )

        # Test derivative and scalars
        self.assertEqual(activation._tanh(1, deriv=True), 1 - math.tanh(1) ** 2)
        self.assertEqual(activation._tanh(0, deriv=True), 1 - math.tanh(0) ** 2)

        # Test derivative and vectors
        true_values = np.array(
            [0.41997434161402614, 0.07065082485316443, 0.009866037165440211]
        )
        self.assertTrue(np.array_equal(activation._tanh(vect, deriv=True), true_values))

        # Test derivative and matrices
        true_values = np.array(
            [[0.96104298, 1.0, 0.00986604], [0.78644773, 0.0, 0.00000001]]
        )
        self.assertTrue(
            np.array_equal(
                np.round(activation._tanh(matrix, deriv=True), 8), true_values
            )
        ) 
Example 74
Project: numpynet   Author: uptake   File: examples.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def make_checkerboard_training_set(
    num_points=0, noise=0.0, randomize=True, x_min=0.0, x_max=1.0, y_min=0.0, y_max=1.0
):
    """
    Makes a binary array like a checkerboard (to work on an xor like problem)
    :param num_points: (int) The number of points you want in your training set
    :param noise: (float) percent to bit-flip in the training data, allows it to be imperfect
    :param randomize: (bool) True if you want the locations to be random, False if you want an ordered grid
    :param x_min: (float) minimum x of the 2D domain
    :param x_max: (float) maximum x of the 2D domain
    :param y_min: (float) minimum y of the 2D domain
    :param y_max: (float) maximum y of the 2D domain
    :return:
    """
    log.out.info("Generating target data.")
    # Select coordinates to do an XOR like operation on
    coords = []
    bools = []
    if randomize:
        for i in range(num_points):
            # Add num_points randomly
            coord_point = np.random.random(2)
            coord_point[0] = coord_point[0] * (x_max - x_min) + x_min
            coord_point[1] = coord_point[1] * (y_max - y_min) + y_min
            coords.append(coord_point)
    else:
        x_points = np.linspace(x_min, x_max, int(np.sqrt(num_points)))
        y_points = np.linspace(y_min, y_max, int(np.sqrt(num_points)))
        for i in range(int(np.sqrt(num_points))):
            for j in range(int(np.sqrt(num_points))):
                # Add num_points randomly
                coord_point = [x_points[i], y_points[j]]
                coords.append(coord_point)
    # Assign an xor boolean value to the coordinates
    for coord_point in coords:
        bool_point = np.array(
            [np.round(coord_point[0]) % 2, np.round(coord_point[1]) % 2]
        ).astype(bool)
        bools.append(np.logical_xor(bool_point[0], bool_point[1]))
    # If noisy then bit flip
    if noise > 0.0:
        for i in enumerate(bools):
            if np.random.random() < noise:
                bools[i] = np.logical_not(bools[i])
    # Build training vectors
    train_in = None
    train_out = None
    for i, coord in enumerate(coords):
        # Need to initialize the arrays
        if i == 0:
            train_in = np.array([coord])
            train_out = np.array([[bools[i]]])
        else:
            train_in = np.append(train_in, np.array([coord]), axis=0)
            train_out = np.append(train_out, np.array([[bools[i]]]), axis=1)

    train_out = train_out.T
    return train_in, train_out 
Example 75
Project: DOTA_models   Author: ringringyi   File: graph_utils.py    Apache License 2.0 4 votes vote down vote up
def label_nodes_with_class_geodesic(nodes_xyt, class_maps, pix, traversible,
                                    ff_cost=1., fo_cost=1., oo_cost=1.,
                                    connectivity=4):
  """Labels nodes in nodes_xyt with class labels using geodesic distance as
  defined by traversible from class_maps.
  Inputs:
    nodes_xyt
    class_maps: counts for each class.
    pix: distance threshold to consider close enough to target.
    traversible: binary map of whether traversible or not.
  Output:
    labels: For each node in nodes_xyt returns a label of the class or -1 is
    unlabelled.
  """
  g, nodes = convert_traversible_to_graph(traversible, ff_cost=ff_cost,
                                          fo_cost=fo_cost, oo_cost=oo_cost,
                                          connectivity=connectivity)

  class_dist = np.zeros_like(class_maps*1.)
  n_classes = class_maps.shape[2]
  if False:
    # Assign each pixel to a class based on number of points.
    selem = skimage.morphology.disk(pix)
    class_maps_ = class_maps*1.
    class_maps__ = np.argmax(class_maps_, axis=2)
    class_maps__[np.max(class_maps_, axis=2) == 0] = -1

  # Label nodes with classes.
  for i in range(n_classes):
    # class_node_ids = np.where(class_maps__.ravel() == i)[0]
    class_node_ids = np.where(class_maps[:,:,i].ravel() > 0)[0]
    dist_i = get_distance_node_list(g, class_node_ids, 'to', weights='wts')
    class_dist[:,:,i] = np.reshape(dist_i, class_dist[:,:,i].shape)
  class_map_geodesic = (class_dist <= pix)
  class_map_geodesic = np.reshape(class_map_geodesic, [-1, n_classes])

  # For each node pick out the label from this class map.
  x = np.round(nodes_xyt[:,[0]]).astype(np.int32)
  y = np.round(nodes_xyt[:,[1]]).astype(np.int32)
  ind = np.ravel_multi_index((y,x), class_dist[:,:,0].shape)
  node_class_label = class_map_geodesic[ind[:,0],:]
  class_map_geodesic = class_dist <= pix
  return class_map_geodesic, node_class_label 
Example 76
Project: pyalcs   Author: ParrotPrediction   File: Agent.py    MIT License 4 votes vote down vote up
def _evaluate(self,
                  env,
                  n_trials: int,
                  func: Callable,
                  decay: bool = False) -> Tuple:
        """
        Runs the classifier in desired strategy (see `func`) and collects
        metrics.

        Parameters
        ----------
        env:
            OpenAI Gym environment
        n_trials: int
            maximum number of trials
        func: Callable
            Function accepting three parameters: env, steps already made,
             current trial
        decay: bool
            Whether the epsilon is decaying through the whole experiment

        Returns
        -------
        tuple
            population of classifiers and metrics
        """
        current_trial = 0
        steps = 0

        metrics: List = []
        while current_trial < n_trials:
            steps_in_trial, reward = func(env, steps, current_trial)
            steps += steps_in_trial

            # collect user metrics
            if current_trial % self.get_cfg().metrics_trial_frequency == 0:
                m = basic_metrics(current_trial, steps_in_trial, reward)

                user_metrics = self.get_cfg().user_metrics_collector_fcn
                if user_metrics is not None:
                    m.update(user_metrics(self.get_population(), env))

                metrics.append(m)

            # Print last metric
            if current_trial % np.round(n_trials / 10) == 0:
                logger.info(metrics[-1])

            if decay:
                # Gradually decrease the epsilon
                self.get_cfg().epsilon -= 1 / n_trials
                if self.get_cfg().epsilon < 0.01:
                    self.get_cfg().epsilon = 0.01

            current_trial += 1

        return self.get_population(), metrics 
Example 77
Project: HAPI   Author: MAfarrag   File: Routing.py    MIT License 4 votes vote down vote up
def muskingum(inflow,Qinitial,k,x,dt):
    """
    ===========================================================
     muskingum(inflow,Qinitial,k,x,dt)
    ===========================================================
    
    inputs:
    ----------
        1-inflow:
            [numpy array] time series of inflow hydrograph
        2-Qinitial:
            [numeric] initial value for outflow
        3-k:
            [numeric] travelling time (hours)
        4-x:
            [numeric] surface nonlinearity coefficient (0,0.5)
        5-dt:
            [numeric] delta t
        
    Outputs:
    ----------
        1-outflow:
            [numpy array] time series of routed hydrograph
    
    Examples:
    ----------
    pars[10]=k
    pars[11]=x
    p2[0]=1  # hourly time step
    q_routed = Routing.muskingum(q_uz,q_uz[0],pars[10],pars[11],p2[0]) 
   """ 
   
    c1=(dt-2*k*x)/(2*k*(1-x)+dt)
    c2=(dt+2*k*x)/(2*k*(1-x)+dt)
    c3=(2*k*(1-x)-dt)/(2*k*(1-x)+dt)
    
#    if c1+c2+c3!=1:
#        raise("sim of c1,c2 & c3 is not 1")
    
    outflow=np.ones_like(inflow)*np.nan    
    outflow[0]=Qinitial
    
    for i in range(1,len(inflow)):
        outflow[i]=c1*inflow[i]+c2*inflow[i-1]+c3*outflow[i-1]
    
    outflow=np.round(outflow,4)
    
    return outflow 
Example 78
Project: HAPI   Author: MAfarrag   File: Routing.py    MIT License 4 votes vote down vote up
def TriangularRouting(q, maxbas=1):
    """
    ==========================================================
         TriangularRouting(q, maxbas=1)
    ==========================================================
    This function implements the transfer function using a triangular 
    function
    
    Inputs:
    ----------
        1-q:
            [numpy array] time series of discharge hydrographs
        2-maxbas:
            [integer] number of time steps that the triangular routing function
            is going to divide the discharge into, based on the weights
            generated from this function, min value is 1 and default value is 1
    
    Outputs:
    ----------
        1-q_r:
            [numpy array] time series of routed hydrograph
    
    Examples:
    ----------
        q_sim=TriangularRouting(np.array(q_sim), parameters[-1])
    """
    # input data validation
    assert maxbas >= 1, 'Maxbas value has to be larger than 1'
    
    # Get integer part of maxbas
    maxbas = int(round(maxbas,0))
    
    # get the weights
    w = Tf(maxbas)
    
    # rout the discharge signal
    q_r = np.zeros_like(q, dtype='float64')
    q_temp = q
    for w_i in w:
        q_r += q_temp*w_i
        q_temp = np.insert(q_temp, 0, 0.0)[:-1]

    return q_r 
Example 79
Project: HAPI   Author: MAfarrag   File: Routing.py    MIT License 4 votes vote down vote up
def muskingum(inflow,Qinitial,k,x,dt):
    """
    ===========================================================
     muskingum(inflow,Qinitial,k,x,dt)
    ===========================================================
    
    inputs:
    ----------
        1-inflow:
            [numpy array] time series of inflow hydrograph
        2-Qinitial:
            [numeric] initial value for outflow
        3-k:
            [numeric] travelling time (hours)
        4-x:
            [numeric] surface nonlinearity coefficient (0,0.5)
        5-dt:
            [numeric] delta t
        
    Outputs:
    ----------
        1-outflow:
            [numpy array] time series of routed hydrograph
    
    Examples:
    ----------
    pars[10]=k
    pars[11]=x
    p2[0]=1  # hourly time step
    q_routed = Routing.muskingum(q_uz,q_uz[0],pars[10],pars[11],p2[0]) 
   """ 
   
    c1=(dt-2*k*x)/(2*k*(1-x)+dt)
    c2=(dt+2*k*x)/(2*k*(1-x)+dt)
    c3=(2*k*(1-x)-dt)/(2*k*(1-x)+dt)
    
#    if c1+c2+c3!=1:
#        raise("sim of c1,c2 & c3 is not 1")
    
    outflow=np.ones_like(inflow)*np.nan    
    outflow[0]=Qinitial
    
    for i in range(1,len(inflow)):
        outflow[i]=c1*inflow[i]+c2*inflow[i-1]+c3*outflow[i-1]
    
    outflow=np.round(outflow,4)
    
    return outflow 
Example 80
Project: HAPI   Author: MAfarrag   File: Routing.py    MIT License 4 votes vote down vote up
def TriangularRouting(q, maxbas=1):
    """
    ==========================================================
         TriangularRouting(q, maxbas=1)
    ==========================================================
    This function implements the transfer function using a triangular 
    function
    
    Inputs:
    ----------
        1-q:
            [numpy array] time series of discharge hydrographs
        2-maxbas:
            [integer] number of time steps that the triangular routing function
            is going to divide the discharge into, based on the weights
            generated from this function, min value is 1 and default value is 1
    
    Outputs:
    ----------
        1-q_r:
            [numpy array] time series of routed hydrograph
    
    Examples:
    ----------
        q_sim=TriangularRouting(np.array(q_sim), parameters[-1])
    """
    # input data validation
    assert maxbas >= 1, 'Maxbas value has to be larger than 1'
    
    # Get integer part of maxbas
    maxbas = int(round(maxbas,0))
    
    # get the weights
    w = Tf(maxbas)
    
    # rout the discharge signal
    q_r = np.zeros_like(q, dtype='float64')
    q_temp = np.float32(q)
    for w_i in w:
        q_r += q_temp*w_i
        q_temp = np.insert(q_temp, 0, 0.0)[:-1]

    return q_r