Python numpy.vsplit() Examples

The following are 30 code examples of numpy.vsplit(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: svm_handwritten_digits_recognition_preprocessing_hog.py    From Mastering-OpenCV-4-with-Python with MIT License 6 votes vote down vote up
def load_digits_and_labels(big_image):
    """ Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example #2
Source File: knn_handwritten_digits_recognition_k_training_testing_preprocessing_hog.py    From Mastering-OpenCV-4-with-Python with MIT License 6 votes vote down vote up
def load_digits_and_labels(big_image):
    """ Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example #3
Source File: knn_handwritten_digits_recognition_k_training_testing_preprocessing.py    From Mastering-OpenCV-4-with-Python with MIT License 6 votes vote down vote up
def load_digits_and_labels(big_image):
    """ Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example #4
Source File: svm_handwritten_digits_recognition_preprocessing_hog_c_gamma.py    From Mastering-OpenCV-4-with-Python with MIT License 6 votes vote down vote up
def load_digits_and_labels(big_image):
    """ Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example #5
Source File: inception_score.py    From BigGAN-TPU-TensorFlow with MIT License 6 votes vote down vote up
def test_debug(self):
		image = imageio.imread("./temp/dump.png")
		grid_n = 6
		img_size = image.shape[1] // grid_n
		img_ch = image.shape[-1]

		images = np.vsplit(image, grid_n)
		images = [np.hsplit(i, grid_n) for i in images]
		images = np.reshape(np.array(images), [grid_n*grid_n, img_size, img_size, img_ch])

		with tf.Graph().as_default():
			with tf.Session() as sess:
				v_images_placeholder = tf.placeholder(dtype=tf.float32)
				v_images = tf.contrib.gan.eval.preprocess_image(v_images_placeholder)
				v_logits = tf.contrib.gan.eval.run_inception(v_images)
				v_score = tf.contrib.gan.eval.classifier_score_from_logits(v_logits)
				score, logits = sess.run([v_score, v_logits], feed_dict={v_images_placeholder:images})


		imageio.imwrite("./temp/inception_logits.png", logits) 
Example #6
Source File: NKDE.py    From Conditional_Density_Estimation with MIT License 6 votes vote down vote up
def _build_model(self, X, Y):
    # save mean and std of data for normalization
    self.x_std = np.std(X, axis=0)
    self.x_mean = np.mean(X, axis=0)
    self.y_mean = np.std(Y, axis=0)
    self.y_std = np.std(Y, axis=0)

    self.n_train_points = X.shape[0]

    # lazy learner - just store training data
    self.X_train = self._normalize_x(X)
    self.Y_train = Y

    # prepare Gaussians centered in the Y points
    self.locs_array = np.vsplit(Y, self.n_train_points)
    self.log_kernel = multivariate_normal(mean=np.ones(self.ndim_y)).logpdf

    # select / properly initialize bandwidth and epsilon
    if isinstance(self.bandwidth, (int, float)):
      self.bandwidth = self.y_std * self.bandwidth

    if self.param_selection == 'normal_reference':
      self.bandwidth = self._normal_reference()
    elif self.param_selection == 'cv_ml':
      self.bandwidth, self.epsilon = self._cv_ml() 
Example #7
Source File: simple_data_set.py    From pywsd with MIT License 6 votes vote down vote up
def split_train_dev_test(X,y,train_per,dev_per,test_per):
    if(train_per + dev_per + test_per > 1):
        print "Train Dev Test split should sum to one"
        return
    dim = y.shape[0]
    split1 = int(dim*train_per)
    if(dev_per ==0):
        train_y,test_y = np.vsplit(y,[split1])
        dev_y = np.array([])
        train_X = X[0:split1,:]
        dev_X = np.array([])
        test_X = X[split1:,:]

    else:
        split2 = int(dim*(train_per+dev_per))
        print split2
        train_y,dev_y,test_y = np.vsplit(y,(split1,split2))
        train_X = X[0:split1,:]
        dev_X = X[split1:split2,:]
        test_X = X[split2:,:]
    return train_y,dev_y,test_y,train_X,dev_X,test_X 
Example #8
Source File: knn_handwritten_digits_recognition_introduction.py    From Mastering-OpenCV-4-with-Python with MIT License 6 votes vote down vote up
def load_digits_and_labels(big_image):
    """Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example #9
Source File: util.py    From gbdxtools with MIT License 6 votes vote down vote up
def rev(self, lng, lat, z=None, _type=np.int32):
        if z is None:
            z = self._default_z

        if all(isinstance(var, (int, float, tuple)) for var in [lng, lat]):
            lng, lat = (np.array([lng]), np.array([lat]))
        if not all(isinstance(var, np.ndarray) for var in [lng, lat]):
            raise ValueError("lng, lat inputs must be of type int, float, tuple or numpy.ndarray")
        if not isinstance(z, np.ndarray):
            z = np.zeros_like(lng) + z
        coord = np.dstack([lng, lat, z])
        offset, scale = np.vsplit(self._offscl, 2)
        normed = coord * scale + offset
        X = self._rpc(normed)
        result = np.rollaxis(np.inner(self._A, X) / np.inner(self._B, X), 0, 3)
        rev_offset, rev_scale = np.vsplit(self._px_offscl_rev, 2)
        # needs to return x/y
        return  np.rint(np.rollaxis(result * rev_scale + rev_offset, 2)).squeeze().astype(_type)[::-1] 
Example #10
Source File: knn_handwritten_digits_recognition_k_training_testing.py    From Mastering-OpenCV-4-with-Python with MIT License 6 votes vote down vote up
def load_digits_and_labels(big_image):
    """Returns all the digits from the 'big' image and creates the corresponding labels for each image"""

    # Load the 'big' image containing all the digits:
    digits_img = cv2.imread(big_image, 0)

    # Get all the digit images from the 'big' image:
    number_rows = digits_img.shape[1] / SIZE_IMAGE
    rows = np.vsplit(digits_img, digits_img.shape[0] / SIZE_IMAGE)

    digits = []
    for row in rows:
        row_cells = np.hsplit(row, number_rows)
        for digit in row_cells:
            digits.append(digit)
    digits = np.array(digits)

    # Create the labels for each image:
    labels = np.repeat(np.arange(NUMBER_CLASSES), len(digits) / NUMBER_CLASSES)
    return digits, labels 
Example #11
Source File: ds_utils.py    From refinedet.pytorch with MIT License 6 votes vote down vote up
def bbox_overlaps(bboxes, ref_bboxes):
    """
    ref_bboxes: N x 4;
    bboxes: K x 4

    return: K x N
    """
    refx1, refy1, refx2, refy2 = np.vsplit(np.transpose(ref_bboxes), 4)
    x1, y1, x2, y2 = np.hsplit(bboxes, 4)
    
    minx = np.maximum(refx1, x1)
    miny = np.maximum(refy1, y1)
    maxx = np.minimum(refx2, x2)
    maxy = np.minimum(refy2, y2)
    
    inter_area = (maxx - minx + 1) * (maxy - miny + 1)
    ref_area = (refx2 - refx1 + 1) * (refy2 - refy1 + 1)
    area = (x2 - x1 + 1) * (y2 - y1 + 1)
    iou = inter_area / (ref_area + area - inter_area)
    
    return iou 
Example #12
Source File: mapmaker.py    From pysplit with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def random_colors(number_ofcolors):
    """
    Generate random RGB tuples.

    Parameters
    ----------
    number_ofcolors : int
        Number of tuples to generate

    Returns
    -------
    colors : list of tuples of floats
        List of ``len(number_ofcolors)``, the requested random colors
    """
    color_tmp = np.random.rand(number_ofcolors, 3)
    color_tmp = np.vsplit(color_tmp, number_ofcolors)
    colors = []
    for c in color_tmp:
        colors.append(c[0])

    return colors 
Example #13
Source File: rf_NDVIEvolution.py    From python-urbanPlanning with MIT License 6 votes vote down vote up
def trainBlock(array,row,col):
    arrayShape=array.shape
    print(arrayShape)
    rowPara=divmod(arrayShape[1],row)  #divmod(a,b)方法为除法取整,以及a对b的余数
    colPara=divmod(arrayShape[0],col)
    extractArray=array[:colPara[0]*col,:rowPara[0]*row]  #移除多余部分,规范数组,使其正好切分均匀
#    print(extractArray.shape)
    hsplitArray=np.hsplit(extractArray,rowPara[0])
    vsplitArray=flatten_lst([np.vsplit(subArray,colPara[0]) for subArray in hsplitArray])
    dataBlock=flatten_lst(vsplitArray)
    print("样本量:%s"%(len(dataBlock)))  #此时切分的块数据量,就为样本数据量
    
    '''显示查看其中一个样本'''     
    subShow=dataBlock[-10]
    print(subShow,'\n',subShow.max(),subShow.std())
    fig=plt.figure(figsize=(20, 12))
    ax=fig.add_subplot(111)
    plt.xticks([x for x in range(subShow.shape[0]) if x%400==0])
    plt.yticks([y for y in range(subShow.shape[1]) if y%200==0])
    ax.imshow(subShow)    
    
    dataBlockStack=np.append(dataBlock[:-1],[dataBlock[-1]],axis=0) #将列表转换为数组
    print(dataBlockStack.shape)
    return dataBlockStack 
Example #14
Source File: Pooling.py    From EyerissF with GNU Lesser General Public License v2.1 6 votes vote down vote up
def MAXPooling(Array,activation=1, ksize=2):
    assert len(Array) % ksize == 0

    V2list = np.vsplit(Array, len(Array) / ksize)

    VerticalElements = list()
    HorizontalElements = list()

    for x in V2list:
        H2list = np.hsplit(x, len(x[0]) / ksize)
        HorizontalElements.clear()
        for y in H2list:
            # y should be a two-two square
            HorizontalElements.append(y.max())
        VerticalElements.append(np.array(HorizontalElements))

    return np.array(np.array(VerticalElements)/activation,dtype=int) 
Example #15
Source File: array.py    From dislib with Apache License 2.0 5 votes vote down vote up
def _split_block(block, tl_shape, reg_shape, out_blocks):
    """ Splits a block into new blocks following the ds-array typical scheme
    with a top left block, regular blocks in the middle and remainder blocks
    at the edges """
    vsplit = range(tl_shape[0], block.shape[0], reg_shape[0])
    hsplit = range(tl_shape[1], block.shape[1], reg_shape[1])

    for i, rows in enumerate(np.vsplit(block, vsplit)):
        for j, cols in enumerate(np.hsplit(rows, hsplit)):
            out_blocks[i][j] = cols 
Example #16
Source File: test_mesh.py    From geoist with MIT License 5 votes vote down vote up
def test_z_split_x():
    "model.split along x vs numpy.vsplit splits the z array correctly"
    area = [-1000., 1000., -2000., 0.]
    shape = (20, 21)
    xp, yp = gridder.regular(area, shape)
    zp = 100*np.arange(xp.size)
    model = PointGrid(area, zp, shape)
    subshape = (2, 1)
    submodels = model.split(subshape)
    temp = np.vsplit(np.reshape(zp, shape), subshape[0])
    diff = []
    for i in range(subshape[0]):
        diff.append(np.all((submodels[i].z - temp[i].ravel()) == 0.))
    assert np.alltrue(diff) 
Example #17
Source File: test_quantity_non_ufuncs.py    From Carnets with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_vsplit(self):
        self.check(np.vsplit, [1]) 
Example #18
Source File: LST.py    From python-urbanPlanning with MIT License 5 votes vote down vote up
def trainBlock(self,array,row,col):
        arrayShape=array.shape
        print(arrayShape)
        rowPara=divmod(arrayShape[1],row)  #divmod(a,b)方法为除法取整,以及a对b的余数
        colPara=divmod(arrayShape[0],col)
        extractArray=array[:colPara[0]*col,:rowPara[0]*row]  #移除多余部分,规范数组,使其正好切分均匀
    #    print(extractArray.shape)
        hsplitArray=np.hsplit(extractArray,rowPara[0])
        flatten_lst=lambda lst: [m for n_lst in lst for m in flatten_lst(n_lst)] if type(lst) is list else [lst]
        vsplitArray=flatten_lst([np.vsplit(subArray,colPara[0]) for subArray in hsplitArray])
        dataBlock=flatten_lst(vsplitArray)
        print("样本量:%s"%(len(dataBlock)))  #此时切分的块数据量,就为样本数据量
        
        '''显示查看其中一个样本'''     
        subShow=dataBlock[-2]
        print(subShow,'\n',subShow.max(),subShow.std())
        fig=plt.figure(figsize=(20, 12))
        ax=fig.add_subplot(111)
        plt.xticks([x for x in range(subShow.shape[0]) if x%400==0])
        plt.yticks([y for y in range(subShow.shape[1]) if y%200==0])
        ax.imshow(subShow)    
        
        dataBlockStack=np.append(dataBlock[:-1],[dataBlock[-1]],axis=0) #将列表转换为数组
        print(dataBlockStack.shape)
        return dataBlockStack    
    
#主程序:数据准备/预处理 
Example #19
Source File: digits.py    From PyCV-time with MIT License 5 votes vote down vote up
def split2d(img, cell_size, flatten=True):
    h, w = img.shape[:2]
    sx, sy = cell_size
    cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)]
    cells = np.array(cells)
    if flatten:
        cells = cells.reshape(-1, sy, sx)
    return cells 
Example #20
Source File: digits.py    From PyCV-time with MIT License 5 votes vote down vote up
def split2d(img, cell_size, flatten=True):
    h, w = img.shape[:2]
    sx, sy = cell_size
    cells = [np.hsplit(row, w//sx) for row in np.vsplit(img, h//sy)]
    cells = np.array(cells)
    if flatten:
        cells = cells.reshape(-1, sy, sx)
    return cells 
Example #21
Source File: test_validation.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_validation_curve_cv_splits_consistency():
    n_samples = 100
    n_splits = 5
    X, y = make_classification(n_samples=100, random_state=0)

    scores1 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
                               'C', [0.1, 0.1, 0.2, 0.2],
                               cv=OneTimeSplitter(n_splits=n_splits,
                                                  n_samples=n_samples))
    # The OneTimeSplitter is a non-re-entrant cv splitter. Unless, the
    # `split` is called for each parameter, the following should produce
    # identical results for param setting 1 and param setting 2 as both have
    # the same C value.
    assert_array_almost_equal(*np.vsplit(np.hstack(scores1)[(0, 2, 1, 3), :],
                                         2))

    scores2 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
                               'C', [0.1, 0.1, 0.2, 0.2],
                               cv=KFold(n_splits=n_splits, shuffle=True))

    # For scores2, compare the 1st and 2nd parameter's scores
    # (Since the C value for 1st two param setting is 0.1, they must be
    # consistent unless the train test folds differ between the param settings)
    assert_array_almost_equal(*np.vsplit(np.hstack(scores2)[(0, 2, 1, 3), :],
                                         2))

    scores3 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
                               'C', [0.1, 0.1, 0.2, 0.2],
                               cv=KFold(n_splits=n_splits))

    # OneTimeSplitter is basically unshuffled KFold(n_splits=5). Sanity check.
    assert_array_almost_equal(np.array(scores3), np.array(scores1)) 
Example #22
Source File: NPE.py    From Neural-Photo-Editor with MIT License 5 votes vote down vote up
def paint_latents( event ):
   global r, Z, output,painted_rects,MASK,USER_MASK,RECON 

   # Get extent of latent paintbrush
   x1, y1 = ( event.x - d.get() ), ( event.y - d.get() )
   x2, y2 = ( event.x + d.get() ), ( event.y + d.get() )
   
   selected_widget = event.widget
   
   # Paint in latent space and update Z
   painted_rects.append(event.widget.create_rectangle( x1, y1, x2, y2, fill = rb(color.get()),outline = rb(color.get()) ))
   r[max((y1-bd),0):min((y2-bd),r.shape[0]),max((x1-bd),0):min((x2-bd),r.shape[1])] = color.get()/255.0;
   Z = np.asarray([np.mean(o) for v in [np.hsplit(h,Z.shape[0])\
                                                         for h in np.vsplit((r),Z.shape[1])]\
                                                         for o in v]).reshape(Z.shape[0],Z.shape[1])
   if SAMPLE_FLAG:
        update_photo(None,output)
        update_canvas(w) # Remove this if you wish to see a more free-form paintbrush
   else:     
        DELTA = model.sample_at(np.float32([Z.flatten()]))[0]-to_tanh(np.float32(RECON))
        MASK=scipy.ndimage.filters.gaussian_filter(np.min([np.mean(np.abs(DELTA),axis=0),np.ones((64,64))],axis=0),0.7)
        # D = dampen(to_tanh(np.float32(RECON)),MASK*DELTA+(1-MASK)*ERROR)
        D = MASK*DELTA+(1-MASK)*ERROR
        IM = np.uint8(from_tanh(to_tanh(RECON)+D))
        update_canvas(w) # Remove this if you wish to see a more free-form paintbrush
        update_photo(IM,output)  

# Scroll to lighten or darken an image patch 
Example #23
Source File: data_utils.py    From neural-symbolic-machines with Apache License 2.0 5 votes vote down vote up
def convert_batch_to_seqs(batch):
  array = batch.tensor
  sequence_length = batch.sequence_length
  seqs = np.vsplit(array, array.shape[0])
  result = []
  for seq, seq_len in zip(seqs, sequence_length):
    result.append(list(seq[0][:seq_len]))
  return result 
Example #24
Source File: eval_speech.py    From end2end_AU_speech with MIT License 5 votes vote down vote up
def estimate_one_audio_seq(model, audio_seq, small_mem=False):
    if isinstance(model, str):
        model = C.load_model(model)
    # set up 2 cases: if the model is recurrent or static
    if is_recurrent(model):
        n = audio_seq.shape[0]
        NNN = 125
        if n > NNN and small_mem:
            nseqs = n//NNN + 1
            indices = []
            for i in range(nseqs-1):
                indices.append(NNN*i + NNN)
            input_seqs = np.vsplit(audio_seq, indices)
            outputs = []
            for seq in input_seqs:
                output = model.eval({model.arguments[0]:[seq]})[0]
                outputs.append(output)
            output = np.concatenate(outputs)
        else:
            output = model.eval({model.arguments[0]:[audio_seq]})[0]
    else:
        output = model.eval({model.arguments[0]: audio_seq})
    return output


#----------------------- feed sequence ------------------------- 
Example #25
Source File: tools.py    From GSTools with GNU Lesser General Public License v3.0 5 votes vote down vote up
def rotate_mesh(dim, angles, x, y, z):
    """Rotate axes.

    for 3d: yaw, pitch, and roll angles are alpha, beta, and gamma,
    of intrinsic rotation rotation whose Tait-Bryan angles are
    alpha, beta, gamma about axes x, y, z.
    """
    if dim == 1:
        return x, y, z
    if dim == 2:
        # extract 2d rotation matrix
        rot_mat = r3d_z(angles[0])[0:2, 0:2]
        pos_tuple = np.vstack((x, y))
        pos_tuple = np.vsplit(np.dot(rot_mat, pos_tuple), 2)
        x = pos_tuple[0].reshape(np.shape(x))
        y = pos_tuple[1].reshape(np.shape(y))
        return x, y, z
    if dim == 3:
        alpha = angles[0]
        beta = angles[1]
        gamma = angles[2]
        rot_mat = np.dot(np.dot(r3d_x(gamma), r3d_y(beta)), r3d_z(alpha))
        pos_tuple = np.vstack((x, y, z))
        pos_tuple = np.vsplit(np.dot(rot_mat, pos_tuple), 3)
        x = pos_tuple[0].reshape(np.shape(x))
        y = pos_tuple[1].reshape(np.shape(y))
        z = pos_tuple[2].reshape(np.shape(z))
        return x, y, z
    return None 
Example #26
Source File: tools.py    From GSTools with GNU Lesser General Public License v3.0 5 votes vote down vote up
def unrotate_mesh(dim, angles, x, y, z):
    """Rotate axes in order to implement rotation.

    for 3d: yaw, pitch, and roll angles are alpha, beta, and gamma,
    of intrinsic rotation rotation whose Tait-Bryan angles are
    alpha, beta, gamma about axes x, y, z.
    """
    if dim == 1:
        return x, y, z
    if dim == 2:
        # extract 2d rotation matrix
        rot_mat = r3d_z(-angles[0])[0:2, 0:2]
        pos_tuple = np.vstack((x, y))
        pos_tuple = np.vsplit(np.dot(rot_mat, pos_tuple), 2)
        x = pos_tuple[0].reshape(np.shape(x))
        y = pos_tuple[1].reshape(np.shape(y))
        return x, y, z
    if dim == 3:
        alpha = -angles[0]
        beta = -angles[1]
        gamma = -angles[2]
        rot_mat = np.dot(np.dot(r3d_z(alpha), r3d_y(beta)), r3d_x(gamma))
        pos_tuple = np.vstack((x, y, z))
        pos_tuple = np.vsplit(np.dot(rot_mat, pos_tuple), 3)
        x = pos_tuple[0].reshape(np.shape(x))
        y = pos_tuple[1].reshape(np.shape(y))
        z = pos_tuple[2].reshape(np.shape(z))
        return x, y, z
    return None 
Example #27
Source File: text2mat.py    From hypertools with MIT License 5 votes vote down vote up
def _transform(vmodel, tmodel, x):
    split = np.cumsum([len(xi) for xi in x])[:-1]
    if vmodel is not None:
        x = np.vsplit(vmodel.transform(np.vstack(x).ravel()).toarray(), split)
    if tmodel is not None:
        if isinstance(tmodel, Pipeline):
            x = np.vsplit(tmodel.transform(np.vstack(x).ravel()), split)
        else:
            x = np.vsplit(tmodel.transform(np.vstack(x)), split)
    return [xi for xi in x] 
Example #28
Source File: test_validation.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_validation_curve_cv_splits_consistency():
    n_samples = 100
    n_splits = 5
    X, y = make_classification(n_samples=100, random_state=0)

    scores1 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
                               'C', [0.1, 0.1, 0.2, 0.2],
                               cv=OneTimeSplitter(n_splits=n_splits,
                                                  n_samples=n_samples))
    # The OneTimeSplitter is a non-re-entrant cv splitter. Unless, the
    # `split` is called for each parameter, the following should produce
    # identical results for param setting 1 and param setting 2 as both have
    # the same C value.
    assert_array_almost_equal(*np.vsplit(np.hstack(scores1)[(0, 2, 1, 3), :],
                                         2))

    scores2 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
                               'C', [0.1, 0.1, 0.2, 0.2],
                               cv=KFold(n_splits=n_splits, shuffle=True))

    # For scores2, compare the 1st and 2nd parameter's scores
    # (Since the C value for 1st two param setting is 0.1, they must be
    # consistent unless the train test folds differ between the param settings)
    assert_array_almost_equal(*np.vsplit(np.hstack(scores2)[(0, 2, 1, 3), :],
                                         2))

    scores3 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
                               'C', [0.1, 0.1, 0.2, 0.2],
                               cv=KFold(n_splits=n_splits))

    # OneTimeSplitter is basically unshuffled KFold(n_splits=5). Sanity check.
    assert_array_almost_equal(np.array(scores3), np.array(scores1)) 
Example #29
Source File: CVFeatures.py    From videofeatures with MIT License 5 votes vote down vote up
def computeFeaturesForVideoDataset(self, dataloader, pickle_path=None):
    """
    Computes Feature Vectors for the video dataset provided via a dataloader object
    :param dataloader: gulpIO Dataloader object which represents a dataset
    :param pickle_path: (optional) if provided the features are pickeled to the specified location
    :return: (features, labels) - features as ndarray of shape (n_videos, n_frames, n_descriptors_per_image, n_dim_descriptor) and labels of videos
    """
    assert isinstance(dataloader, DataLoader)

    feature_batch_list = []
    labels = []
    n_batches = len(dataloader)
    for i, (data_batch, label_batch) in enumerate(dataloader):
      assert data_batch.ndim == 5
      n_frames = data_batch.shape[1]

      frames_batch = data_batch.reshape(
        (data_batch.shape[0] * n_frames, data_batch.shape[2], data_batch.shape[3], data_batch.shape[4]))
      frames_batch = frames_batch.astype('float32')

      feature_batch = self.computeFeatures(frames_batch)
      assert feature_batch.ndim == 2
      feature_batch = feature_batch.reshape((data_batch.shape[0], data_batch.shape[1], -1, feature_batch.shape[1]))

      feature_batch_list.append(feature_batch)
      labels.extend(label_batch)
      print("batch %i of %i" % (i, n_batches))

    features = np.concatenate(feature_batch_list, axis=0)
    assert features.shape[0] == len(labels) and features.ndim == 4

    if pickle_path:
      df = pd.DataFrame(data={'labels': labels, 'features': np.vsplit(features, features.shape[0])})
      print('Dumped feature dataframe to', pickle_path)
      df.to_pickle(pickle_path)

    return features, labels 
Example #30
Source File: CNNFeatures.py    From videofeatures with MIT License 5 votes vote down vote up
def computeFeaturesForVideoDataset(self, dataloader, pickle_path=None):
    """
    Computes Feature Vectors for the video dataset provided via a dataloader object
    :param dataloader: gulpIO Dataloader object which represents a dataset
    :param pickle_path: (optional) if provided the features are pickeled to the specified location
    :return: (features, labels) - features as ndarray of shape (n_videos, n_frames, n_descriptors_per_image, n_dim_descriptor) and labels of videos
    """
    assert isinstance(dataloader, DataLoader)

    feature_batch_list = []
    labels = []
    n_batches = len(dataloader)
    for i, (data_batch, label_batch) in enumerate(dataloader):
      assert data_batch.ndim == 5
      n_frames = data_batch.shape[1]

      frames_batch = data_batch.reshape(
        (data_batch.shape[0] * n_frames, data_batch.shape[2], data_batch.shape[3], data_batch.shape[4]))
      frames_batch = frames_batch.astype('float32')

      feature_batch = self.computeFeatures(frames_batch)
      assert feature_batch.ndim == 2
      feature_batch = feature_batch.reshape((data_batch.shape[0], data_batch.shape[1], feature_batch.shape[1]))

      feature_batch_list.append(feature_batch)
      labels.extend(label_batch)
      print("batch %i of %i" % (i, n_batches))

    features = np.concatenate(feature_batch_list, axis=0)

    # reshape features to (n_videos, n_frames, n_descriptors_per_image, n_dim_descriptor)
    features = features.reshape((features.shape[0], features.shape[1], 1, features.shape[2]))
    assert features.shape[0] == len(labels) and features.ndim == 4

    # store as pandas dataframe
    if pickle_path:
      df = pd.DataFrame(data={'labels': labels, 'features': np.vsplit(features, features.shape[0])})
      print('Dumped feature dataframe to', pickle_path)
      df.to_pickle(pickle_path)

    return features, labels