Python numpy.hsplit() Examples

The following are 30 code examples for showing how to use numpy.hsplit(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: fenics-topopt   Author: zfergus   File: von_mises_stress.py    License: MIT License 6 votes vote down vote up
def calculate_diff_stress(self, x, u, nu, side=1):
        """
        Calculate the derivative of the Von Mises stress given the densities x,
        displacements u, and young modulus nu. Optionally, provide the side
        length (default: 1).
        """
        rho = self.penalized_densities(x)
        EB = self.E(nu).dot(self.B(side))
        EBu = sum([EB.dot(u[:, i][self.edofMat]) for i in range(u.shape[1])])
        s11, s22, s12 = numpy.hsplit((EBu * rho / float(u.shape[1])).T, 3)
        drho = self.diff_penalized_densities(x)
        ds11, ds22, ds12 = numpy.hsplit(
            ((1 - rho) * drho * EBu / float(u.shape[1])).T, 3)
        vm_stress = numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2)
        if abs(vm_stress).sum() > 1e-8:
            dvm_stress = (0.5 * (1. / vm_stress) * (2 * s11 * ds11 -
                ds11 * s22 - s11 * ds22 + 2 * s22 * ds22 + 6 * s12 * ds12))
            return dvm_stress
        return 0 
Example 2
Project: fenics-topopt   Author: zfergus   File: von_mises_stress.py    License: MIT License 6 votes vote down vote up
def calculate_diff_stress(self, x, u, nu, side=1):
        """
        Calculate the derivative of the Von Mises stress given the densities x,
        displacements u, and young modulus nu. Optionally, provide the side
        length (default: 1).
        """
        rho = self.penalized_densities(x)
        EB = self.E(nu).dot(self.B(side))
        EBu = sum([EB.dot(u[:, i][self.edofMat]) for i in range(u.shape[1])])
        s11, s22, s12 = numpy.hsplit((EBu * rho / float(u.shape[1])).T, 3)
        drho = self.diff_penalized_densities(x)
        ds11, ds22, ds12 = numpy.hsplit(
            ((1 - rho) * drho * EBu / float(u.shape[1])).T, 3)
        vm_stress = numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2)
        if abs(vm_stress).sum() > 1e-8:
            dvm_stress = (0.5 * (1. / vm_stress) * (2 * s11 * ds11 -
                ds11 * s22 - s11 * ds22 + 2 * s22 * ds22 + 6 * s12 * ds12))
            return dvm_stress
        return 0 
Example 3
Project: EyerissF   Author: jneless   File: Pooling.py    License: GNU Lesser General Public License v2.1 6 votes vote down vote up
def MAXPooling(Array,activation=1, ksize=2):
    assert len(Array) % ksize == 0

    V2list = np.vsplit(Array, len(Array) / ksize)

    VerticalElements = list()
    HorizontalElements = list()

    for x in V2list:
        H2list = np.hsplit(x, len(x[0]) / ksize)
        HorizontalElements.clear()
        for y in H2list:
            # y should be a two-two square
            HorizontalElements.append(y.max())
        VerticalElements.append(np.array(HorizontalElements))

    return np.array(np.array(VerticalElements)/activation,dtype=int) 
Example 4
Project: vnpy_crypto   Author: birforce   File: test_vecm.py    License: MIT License 6 votes vote down vote up
def test_var_rep():
    if debug_mode:
        if "VAR repr. A" not in to_test:  # pragma: no cover
            return
        print("\n\nVAR REPRESENTATION", end="")
    for ds in datasets:
        for dt in ds.dt_s_list:
            if debug_mode:
                print("\n" + dt_s_tup_to_string(dt) + ": ", end="")

            exog = (results_sm_exog[ds][dt].exog is not None)
            exog_coint = (results_sm_exog_coint[ds][dt].exog_coint is not None)

            err_msg = build_err_msg(ds, dt, "VAR repr. A")
            obtained = results_sm[ds][dt].var_rep
            obtained_exog = results_sm_exog[ds][dt].var_rep
            obtained_exog_coint = results_sm_exog_coint[ds][dt].var_rep
            p = obtained.shape[0]
            desired = np.hsplit(results_ref[ds][dt]["est"]["VAR A"], p)
            assert_allclose(obtained, desired, rtol, atol, False, err_msg)
            if exog:
                assert_equal(obtained_exog, obtained, "WITH EXOG" + err_msg)
            if exog_coint:
                assert_equal(obtained_exog_coint, obtained, "WITH EXOG_COINT" + err_msg) 
Example 5
Project: refinedet.pytorch   Author: dd604   File: ds_utils.py    License: MIT License 6 votes vote down vote up
def bbox_overlaps(bboxes, ref_bboxes):
    """
    ref_bboxes: N x 4;
    bboxes: K x 4

    return: K x N
    """
    refx1, refy1, refx2, refy2 = np.vsplit(np.transpose(ref_bboxes), 4)
    x1, y1, x2, y2 = np.hsplit(bboxes, 4)
    
    minx = np.maximum(refx1, x1)
    miny = np.maximum(refy1, y1)
    maxx = np.minimum(refx2, x2)
    maxy = np.minimum(refy2, y2)
    
    inter_area = (maxx - minx + 1) * (maxy - miny + 1)
    ref_area = (refx2 - refx1 + 1) * (refy2 - refy1 + 1)
    area = (x2 - x1 + 1) * (y2 - y1 + 1)
    iou = inter_area / (ref_area + area - inter_area)
    
    return iou 
Example 6
Project: ml-fairness-gym   Author: google   File: attention_allocation.py    License: Apache License 2.0 6 votes vote down vote up
def _sample_incidents(rng, params):
  """Generates new crimeincident occurrences across locations.

  Args:
    rng: A numpy RandomState() object acting as a random number generator.
    params: A Params instance for this environment.

  Returns:
    incidents_occurred: a list of integers of number of incidents for each
    location.
    that could be discovered by attention.
    reported_incidents: a list of integers of a number of incidents reported
    directly.
  """
  # pylint: disable=g-complex-comprehension
  crimes = [
      rng.poisson([
          params.incident_rates[i] * params.discovered_incident_weight,
          params.incident_rates[i] * params.reported_incident_weight
      ]) for i in range(params.n_locations)
  ]
  incidents_occurred, reported_incidents = np.hsplit(np.asarray(crimes), 2)
  return incidents_occurred.flatten(), reported_incidents.flatten() 
Example 7
Project: bayesmark   Author: uber   File: space_test.py    License: Apache License 2.0 6 votes vote down vote up
def test_joint_space_warp_missing(args):
    meta, X, _, fixed_vars = args

    S = sp.JointSpace(meta)

    X_w = S.warp([fixed_vars])
    assert X_w.dtype == sp.WARPED_DTYPE

    # Test bounds
    lower, upper = S.get_bounds().T
    assert np.all((lower <= X_w) | np.isnan(X_w))
    assert np.all((X_w <= upper) | np.isnan(X_w))

    for param, xx in zip(S.param_list, np.hsplit(X_w, S.blocks[:-1])):
        xx, = xx
        if param in fixed_vars:
            x_orig = S.spaces[param].unwarp(xx).item()
            S.spaces[param].validate(x_orig)
            assert close_enough(x_orig, fixed_vars[param])

            # check other direction
            x_w2 = S.spaces[param].warp(fixed_vars[param])
            assert close_enough(xx, x_w2)
        else:
            assert np.all(np.isnan(xx)) 
Example 8
Project: BigGAN-TPU-TensorFlow   Author: Octavian-ai   File: inception_score.py    License: MIT License 6 votes vote down vote up
def test_debug(self):
		image = imageio.imread("./temp/dump.png")
		grid_n = 6
		img_size = image.shape[1] // grid_n
		img_ch = image.shape[-1]

		images = np.vsplit(image, grid_n)
		images = [np.hsplit(i, grid_n) for i in images]
		images = np.reshape(np.array(images), [grid_n*grid_n, img_size, img_size, img_ch])

		with tf.Graph().as_default():
			with tf.Session() as sess:
				v_images_placeholder = tf.placeholder(dtype=tf.float32)
				v_images = tf.contrib.gan.eval.preprocess_image(v_images_placeholder)
				v_logits = tf.contrib.gan.eval.run_inception(v_images)
				v_score = tf.contrib.gan.eval.classifier_score_from_logits(v_logits)
				score, logits = sess.run([v_score, v_logits], feed_dict={v_images_placeholder:images})


		imageio.imwrite("./temp/inception_logits.png", logits) 
Example 9
Project: Systematic-LEDs   Author: not-matt   File: main.py    License: MIT License 6 votes vote down vote up
def visualize_wave(self, y):
        """Effect that flashes to the beat with scrolling coloured bits"""
        if self.current_freq_detects["beat"]:
            output = np.zeros((3,config.settings["devices"][self.board]["configuration"]["N_PIXELS"]))
            output[0][:]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_flash"])[0]
            output[1][:]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_flash"])[1]
            output[2][:]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_flash"])[2]
            self.wave_wipe_count = config.settings["devices"][self.board]["effect_opts"]["Wave"]["wipe_len"]
        else:
            output = np.copy(self.prev_output)
            #for i in range(len(self.prev_output)):
            #    output[i] = np.hsplit(self.prev_output[i],2)[0]
            output = np.multiply(self.prev_output,config.settings["devices"][self.board]["effect_opts"]["Wave"]["decay"])
            for i in range(self.wave_wipe_count):
                output[0][i]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_wave"])[0]
                output[0][-i]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_wave"])[0]
                output[1][i]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_wave"])[1]
                output[1][-i]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_wave"])[1]
                output[2][i]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_wave"])[2]
                output[2][-i]=colour_manager.colour(config.settings["devices"][self.board]["effect_opts"]["Wave"]["color_wave"])[2]
            #output = np.concatenate([output,np.fliplr(output)], axis=1)
            if self.wave_wipe_count > config.settings["devices"][self.board]["configuration"]["N_PIXELS"]//2:
                self.wave_wipe_count = config.settings["devices"][self.board]["configuration"]["N_PIXELS"]//2
            self.wave_wipe_count += config.settings["devices"][self.board]["effect_opts"]["Wave"]["wipe_speed"]
        return output 
Example 10
def load_digits_and_labels(big_image):
    """ Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example 11
def load_digits_and_labels(big_image):
    """ Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example 12
def load_digits_and_labels(big_image):
    """ Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example 13
def load_digits_and_labels(big_image):
    """Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example 14
def load_digits_and_labels(big_image):
    """Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example 15
def load_digits_and_labels(big_image):
    """ Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example 16
Project: altanalyze   Author: nsalomonis   File: cluster_corr.py    License: Apache License 2.0 6 votes vote down vote up
def find_closest_cluster(query, ref, min_correlation=-1):
    """
    For each collection in query, identifies the collection in ref that is most similar

    query and ref are both dictionaries of CellCollections, keyed by a "partition id"

    Returns a list containing the best matches for each collection in query that meet the 
    min_correlation threshold.  Each member of the list is itself a list containing the 
    id of the query collection and the id of its best match in ref
    """
    query_centroids, query_ids = compute_centroids(query)
    ref_centroids, ref_ids = compute_centroids(ref)
    print('number of reference partions %d, number of query partions %d' % (len(ref_ids),len(query_ids)))
    all_correlations = np.corrcoef(np.concatenate((ref_centroids, query_centroids), axis=1), rowvar=False)

    # At this point, we have the correlations of everything vs everything.  We only care about query vs ref
    # Extract the top-right corner of the matrix
    nref = len(ref)
    corr = np.hsplit(np.vsplit(all_correlations, (nref, ))[0], (nref,))[1]
    best_match = zip(range(corr.shape[1]), np.argmax(corr, 0))
    # At this point, best_match is: 1) using indices into the array rather than ids, 
    # and 2) not restricted by the threshold.  Fix before returning
    return ( (query_ids[q], ref_ids[r]) for q, r in best_match if corr[r,q] >= min_correlation ) 
Example 17
Project: Python_DIC   Author: ChrisEberl   File: initData.py    License: Apache License 2.0 6 votes vote down vote up
def openCoordinates(directory, nbInstances, nbImages):

    zi = []
    zi_strainX = []
    zi_strainY = []
    testTime = time.time()
    coordinatesFile = getData.testReadFile(directory+'/coordinates.csv')
    if coordinatesFile is not None:
        instanceCoordinates = np.hsplit(coordinatesFile, nbInstances)
        for instance in range(nbInstances):
            try:
                imageCoordinates = np.asarray(np.vsplit(instanceCoordinates[instance], nbImages))
            except:
                return None, None, None
            zi.append(imageCoordinates[:,:,0:100])
            zi_strainX.append(imageCoordinates[:,:,100:200])
            zi_strainY.append(imageCoordinates[:,:,200:300])
        return zi, zi_strainX, zi_strainY
    else:
        return None, None, None 
Example 18
Project: python-urbanPlanning   Author: richieBao   File: rf_NDVIEvolution.py    License: MIT License 6 votes vote down vote up
def trainBlock(array,row,col):
    arrayShape=array.shape
    print(arrayShape)
    rowPara=divmod(arrayShape[1],row)  #divmod(a,b)方法为除法取整,以及a对b的余数
    colPara=divmod(arrayShape[0],col)
    extractArray=array[:colPara[0]*col,:rowPara[0]*row]  #移除多余部分,规范数组,使其正好切分均匀
#    print(extractArray.shape)
    hsplitArray=np.hsplit(extractArray,rowPara[0])
    vsplitArray=flatten_lst([np.vsplit(subArray,colPara[0]) for subArray in hsplitArray])
    dataBlock=flatten_lst(vsplitArray)
    print("样本量:%s"%(len(dataBlock)))  #此时切分的块数据量,就为样本数据量
    
    '''显示查看其中一个样本'''     
    subShow=dataBlock[-10]
    print(subShow,'\n',subShow.max(),subShow.std())
    fig=plt.figure(figsize=(20, 12))
    ax=fig.add_subplot(111)
    plt.xticks([x for x in range(subShow.shape[0]) if x%400==0])
    plt.yticks([y for y in range(subShow.shape[1]) if y%200==0])
    ax.imshow(subShow)    
    
    dataBlockStack=np.append(dataBlock[:-1],[dataBlock[-1]],axis=0) #将列表转换为数组
    print(dataBlockStack.shape)
    return dataBlockStack 
Example 19
Project: fenics-topopt   Author: zfergus   File: von_mises_stress.py    License: MIT License 5 votes vote down vote up
def calculate_principle_stresses(self, x, u, nu, side=1):
        """
        Calculate the principle stresses in the x, y, and shear directions.
        """
        rho = self.penalized_densities(x)
        EB = self.E(nu).dot(self.B(side))
        stress = sum([EB.dot(u[:, i][self.edofMat]) for i in range(u.shape[1])])
        stress *= rho / float(u.shape[1])
        return numpy.hsplit(stress.T, 3) 
Example 20
Project: fenics-topopt   Author: zfergus   File: von_mises_stress.py    License: MIT License 5 votes vote down vote up
def calculate_principle_stresses(self, x, u, nu, side=1):
        """
        Calculate the principle stresses in the x, y, and shear directions.
        """
        rho = self.penalized_densities(x)
        EB = self.E(nu).dot(self.B(side))
        stress = sum([EB.dot(u[:, i][self.edofMat]) for i in range(u.shape[1])])
        stress *= rho / float(u.shape[1])
        return numpy.hsplit(stress.T, 3) 
Example 21
Project: OpenCV-Python-Tutorial   Author: makelove   File: digits.py    License: MIT License 5 votes vote down vote up
def split2d(img, cell_size, flatten=True):
    h, w = img.shape[:2]
    sx, sy = cell_size
    cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)]
    cells = np.array(cells)
    if flatten:
        cells = cells.reshape(-1, sy, sx)
    return cells 
Example 22
Project: EyerissF   Author: jneless   File: Hive.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def __ReverseFmapReuse__(self, Psum, PsumNum):
        SubMap = np.hsplit(Psum, int(np.shape(Psum)[1] / PsumNum))
        l = []
        m = []
        for x in range(0, PsumNum):
            for y in range(len(SubMap)):
                # [np.newaxis]会使返回的向量为列向量
                l.append(np.transpose(np.array(SubMap[y][:, x])[np.newaxis]))
            m.append(np.hstack(l))
            l = []

        # self.__SetReturnImgs__(np.array(m))
        self.__SetReturnImgs__(m) 
Example 23
Project: EyerissF   Author: jneless   File: Hive.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def __ReverseFilterReuse__(self, Psum, PsumNum):
        self.__SetReturnImgs__(list(np.hsplit(Psum, PsumNum))) 
Example 24
Project: EyerissF   Author: jneless   File: PE.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def __Conv__(self):
        ImageRow = self.ImageRow
        FilterWeight = self.FilterWeight
        ImageNum = self.ImageNum
        FilterNum = self.FilterNum

        l = list()
        if FilterNum == 1 and ImageNum == 1:

            # 图和核都为1 直接运行卷积
            return self.__Conv1d__(ImageRow, FilterWeight)
        else:
            # 核为1 , filter重用
            if FilterNum == 1:
                # 水平分割为原始图的每一行
                pics = np.hsplit(ImageRow, ImageNum)
                # 遍历,卷积
                for x in pics:
                    # 卷积后的结果加入l中临时保存
                    l.append(self.__Conv1d__(x, FilterWeight))
                    # 将l中的结果组合成一个新的矩阵
                    # 横向组合
                    result = np.hstack(np.array(l))
                # 返回结果
                return result

            # 图为1 ,img重用
            if ImageNum == 1:

                # 将FilterWeight变为矩阵
                FilterWeight = np.reshape(FilterWeight, (int(FilterWeight.size / FilterNum), FilterNum))
                flts = np.array(FilterWeight.T)

                for x in flts:
                    l.append(self.__Conv1d__(ImageRow, x))
                result = np.array(l)
                result = result.T
                result = np.reshape(result, (1, result.size))
                return result 
Example 25
Project: VSUA-Captioning   Author: ltguo19   File: dataloader.py    License: MIT License 5 votes vote down vote up
def get_box_feat(self, image_id):
        image = self.sg_box_info[int(image_id)]
        x1, y1, x2, y2 = np.hsplit(image['boxes'], 4)
        h, w = image[int(image_id)]['image_h'], image[int(image_id)]['image_w']
        iw, ih = x2 - x1 + 1, y2 - y1 + 1
        box_feat = np.hstack((0.5 * (x1 + x2) / w, 0.5 * (y1 + y2) / h, iw / w, ih / h, iw * ih / (w * h)))
        if self.norm_box_feat:
            box_feat = box_feat / np.linalg.norm(box_feat, 2, 1, keepdims=True)
        return box_feat 
Example 26
Project: monasca-analytics   Author: openstack   File: svc.py    License: Apache License 2.0 5 votes vote down vote up
def _generate_train_test_sets(self, samples, ratio_train):
        num_samples_train = int(len(samples) * ratio_train)

        data, labels = np.hsplit(samples, [-1])
        X_train = np.array(data[:num_samples_train])
        _labels = np.array(labels[:num_samples_train])
        X_train_label = _labels.ravel()
        X_test = np.array(data[num_samples_train:])
        _labels = np.array(labels[num_samples_train:])
        X_test_label = _labels.ravel()
        return X_train, X_train_label, X_test, X_test_label 
Example 27
Project: monasca-analytics   Author: openstack   File: decision_tree.py    License: Apache License 2.0 5 votes vote down vote up
def _generate_train_test_sets(self, samples, ratio_train):
        num_samples_train = int(len(samples) * ratio_train)

        data, labels = np.hsplit(samples, [-1])
        X_train = np.array(data[:num_samples_train])
        _labels = np.array(labels[:num_samples_train])
        X_train_label = _labels.ravel()
        X_test = np.array(data[num_samples_train:])
        _labels = np.array(labels[num_samples_train:])
        X_test_label = _labels.ravel()
        return X_train, X_train_label, X_test, X_test_label 
Example 28
Project: monasca-analytics   Author: openstack   File: logistic_regression.py    License: Apache License 2.0 5 votes vote down vote up
def _generate_train_test_sets(self, samples, ratio_train):
        num_samples_train = int(len(samples) * ratio_train)

        data, labels = np.hsplit(samples, [-1])
        X_train = np.array(data[:num_samples_train])
        _labels = np.array(labels[:num_samples_train])
        X_train_label = _labels.ravel()
        X_test = np.array(data[num_samples_train:])
        _labels = np.array(labels[num_samples_train:])
        X_test_label = _labels.ravel()
        return X_train, X_train_label, X_test, X_test_label 
Example 29
Project: monasca-analytics   Author: openstack   File: random_forest_classifier.py    License: Apache License 2.0 5 votes vote down vote up
def _generate_train_test_sets(self, samples, ratio_train):
        num_samples_train = int(len(samples) * ratio_train)

        data, labels = np.hsplit(samples, [-1])
        X_train = np.array(data[:num_samples_train])
        _labels = np.array(labels[:num_samples_train])
        X_train_label = _labels.ravel()
        X_test = np.array(data[num_samples_train:])
        _labels = np.array(labels[num_samples_train:])
        X_test_label = _labels.ravel()
        return X_train, X_train_label, X_test, X_test_label 
Example 30
Project: AAT   Author: husthuaan   File: dataloader.py    License: MIT License 5 votes vote down vote up
def __getitem__(self, index):
        """This function returns a tuple that is further passed to collate_fn
        """
        ix = index #self.split_ix[index]
        if self.use_att:
            att_feat = self.att_loader.get(str(self.info['images'][ix]['id']))
            # Reshape to K x C
            att_feat = att_feat.reshape(-1, att_feat.shape[-1])
            if self.norm_att_feat:
                att_feat = att_feat / np.linalg.norm(att_feat, 2, 1, keepdims=True)
            if self.use_box:
                box_feat = self.box_loader.get(str(self.info['images'][ix]['id']))
                # devided by image width and height
                x1,y1,x2,y2 = np.hsplit(box_feat, 4)
                h,w = self.info['images'][ix]['height'], self.info['images'][ix]['width']
                box_feat = np.hstack((x1/w, y1/h, x2/w, y2/h, (x2-x1)*(y2-y1)/(w*h))) # question? x2-x1+1??
                if self.norm_box_feat:
                    box_feat = box_feat / np.linalg.norm(box_feat, 2, 1, keepdims=True)
                att_feat = np.hstack([att_feat, box_feat])
                # sort the features by the size of boxes
                att_feat = np.stack(sorted(att_feat, key=lambda x:x[-1], reverse=True))
        else:
            att_feat = np.zeros((1,1,1), dtype='float32')
        if self.use_fc:
            fc_feat = self.fc_loader.get(str(self.info['images'][ix]['id']))
        else:
            fc_feat = np.zeros((1), dtype='float32')
        if hasattr(self, 'h5_label_file'):
            seq = self.get_captions(ix, self.seq_per_img)
        else:
            seq = None
        return (fc_feat,
                att_feat, seq,
                ix)