Python numba.int32() Examples
The following are 30
code examples of numba.int32().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numba
, or try the search function
.
Example #1
Source File: numba_functions.py From GridCal with GNU General Public License v3.0 | 6 votes |
def csc_diagonal_from_array(m, array): """ :param m: :param array: :return: """ indptr = np.empty(m + 1, dtype=nb.int32) indices = np.empty(m, dtype=nb.int32) data = np.empty(m, dtype=nb.complex128) for i in range(m): indptr[i] = i indices[i] = i data[i] = array[i] indptr[m] = m return indices, indptr, data
Example #2
Source File: csc_numba.py From GridCal with GNU General Public License v3.0 | 6 votes |
def csc_diagonal_from_array(m, array): """ :param m: :param array: :return: """ indptr = np.empty(m + 1, dtype=np.int32) indices = np.empty(m, dtype=np.int32) data = np.empty(m, dtype=np.float64) for i in range(m): indptr[i] = i indices[i] = i data[i] = array[i] indptr[m] = m return indices, indptr, data
Example #3
Source File: csc_numba.py From GridCal with GNU General Public License v3.0 | 6 votes |
def csc_diagonal(m, value=1.0): """ Build CSC diagonal matrix of the given value :param m: size :param value: value :return: CSC matrix """ indptr = np.empty(m + 1, dtype=np.int32) indices = np.empty(m, dtype=np.int32) data = np.empty(m, dtype=np.float64) for i in range(m): indptr[i] = i indices[i] = i data[i] = value indptr[m] = m return indices, indptr, data
Example #4
Source File: csc_numba.py From GridCal with GNU General Public License v3.0 | 6 votes |
def csc_sprealloc_f(An, Aindptr, Aindices, Adata, nzmax): """ Change the max # of entries a sparse matrix can hold. :param An: number of columns :param Aindptr: csc column pointers :param Aindices: csc row indices :param Adata: csc data :param nzmax:new maximum number of entries :return: indices, data, nzmax """ if nzmax <= 0: nzmax = Aindptr[An] length = min(nzmax, len(Aindices)) Ainew = np.empty(nzmax, dtype=nb.int32) for i in range(length): Ainew[i] = Aindices[i] length = min(nzmax, len(Adata)) Axnew = np.empty(nzmax, dtype=nb.float64) for i in range(length): Axnew[i] = Adata[i] return Ainew, Axnew, nzmax
Example #5
Source File: augmentations.py From dataset_agnostic_segmentation with MIT License | 6 votes |
def apply(self, image, boxes, meta_image): if np.random.rand() < self.skip_gray_prob: return image, boxes, meta_image # tic = time.time() gray_scale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # toc1 = time.time() - tic # print ('Gray Scale: %4.3f' % toc1) if np.random.rand() > self.apply_prob: gray_scale = expand(gray_scale) return gray_scale, boxes, meta_image kernel = np.ones((5, 5), np.uint8) gray_scale = augment_boxes(gray_scale, boxes.astype(np.int32), kernel, self.dialation_prob) # toc2 = time.time() - tic - toc1 # print('Agument: %4.3f' % toc2) # gray_scale = np.repeat(gray_scale[:,:, np.newaxis], 3, axis=2) gray_scale = expand(gray_scale) q = 0.4 + 0.2*np.random.rand() gray_scale = np.array(q*gray_scale + (1-q)*image, dtype=np.uint8) # print('expand: %4.3f' % toc3) return gray_scale, boxes, meta_image
Example #6
Source File: augmentations.py From dataset_agnostic_segmentation with MIT License | 5 votes |
def apply(self, image, boxes, meta_image): mins = boxes.min(0).astype(np.int32) - 5 maxs = boxes.max(0).astype(np.int32) + 5 new_image = image[mins[1]:maxs[3], mins[0]:maxs[2], :] if new_image.shape[0] < 10 or new_image.shape[1] < 10: black_image = np.zeros(shape=image.shape, dtype=image.dtype) black_image[mins[1]:maxs[3], mins[0]:maxs[2], :] = image[mins[1]:maxs[3], mins[0]:maxs[2], :] return black_image, boxes new_boxes = boxes - np.array([mins[0], mins[1], mins[0], mins[1]]) image = None return new_image, new_boxes, meta_image
Example #7
Source File: csc_numba.py From GridCal with GNU General Public License v3.0 | 5 votes |
def csc_sub_matrix_rows(An, Anz, Ap, Ai, Ax, rows): """ Get SCS arbitrary sub-matrix :param An: number of rows :param Anz: number of non-zero entries :param Ap: Column pointers :param Ai: Row indices :param Ax: Data :param rows: row indices to keep :return: CSC sub-matrix (n, new_col_ptr, new_row_ind, new_val) """ n_rows = len(rows) n = 0 p = 0 Bx = np.zeros(Anz, dtype=np.float64) Bi = np.empty(Anz, dtype=np.int32) Bp = np.empty(An + 1, dtype=np.int32) Bp[p] = 0 for j in range(An): # for each column selected ... i = 0 for r in rows: for k in range(Ap[j], Ap[j + 1]): # for each row of the column j of A... if Ai[k] == r: Bx[n] = Ax[k] # store the value Bi[n] = i # row index in the new matrix n += 1 i += 1 if i == 0: i += 1 p += 1 Bp[p] = n Bp[p] = n return n, Bp, Bi[:n], Bx[:n] # @nb.njit("f8[:, :](i8, i8, i4[:], i4[:], f8[:])")
Example #8
Source File: csc_numba.py From GridCal with GNU General Public License v3.0 | 5 votes |
def csc_sub_matrix(Am, Anz, Ap, Ai, Ax, rows, cols): """ Get SCS arbitrary sub-matrix :param Am: number of rows :param Anz: number of non-zero entries :param Ap: Column pointers :param Ai: Row indices :param Ax: Data :param rows: row indices to keep :param cols: column indices to keep :return: CSC sub-matrix (n, new_col_ptr, new_row_ind, new_val) """ n_cols = len(cols) Bx = np.zeros(Anz, dtype=np.float64) Bi = np.empty(Anz, dtype=np.int32) Bp = np.empty(n_cols + 1, dtype=np.int32) n = 0 p = 0 Bp[p] = 0 for j in cols: # for each column selected ... i = 0 for r in rows: for k in range(Ap[j], Ap[j + 1]): # for each row of the column j of A... if Ai[k] == r: Bx[n] = Ax[k] # store the value Bi[n] = i # row index in the new matrix i += 1 n += 1 if i == 0: i += 1 p += 1 Bp[p] = n Bp[p] = n return n, Bp, Bi[:n], Bx[:n]
Example #9
Source File: csc_numba.py From GridCal with GNU General Public License v3.0 | 5 votes |
def coo_to_csc(m, n, Ti, Tj, Tx, nz): """ C = compressed-column form of a triplet matrix T. The columns of C are not sorted, and duplicate entries may be present in C. @param T: triplet matrix @return: Cm, Cn, Cp, Ci, Cx """ Cm, Cn, Cp, Ci, Cx, nz = csc_spalloc_f(m, n, nz) # allocate result w = w = np.zeros(n, dtype=nb.int32) # get workspace for k in range(nz): w[Tj[k]] += 1 # column counts csc_cumsum_i(Cp, w, n) # column pointers for k in range(nz): p = w[Tj[k]] w[Tj[k]] += 1 Ci[p] = Ti[k] # A(i,j) is the pth entry in C # if Cx is not None: Cx[p] = Tx[k] return Cm, Cn, Cp, Ci, Cx
Example #10
Source File: csc_numba.py From GridCal with GNU General Public License v3.0 | 5 votes |
def csc_add_ff(Am, An, Aindptr, Aindices, Adata, Bm, Bn, Bindptr, Bindices, Bdata, alpha, beta): """ C = alpha*A + beta*B @param A: column-compressed matrix @param B: column-compressed matrix @param alpha: scalar alpha @param beta: scalar beta @return: C=alpha*A + beta*B, null on error (Cm, Cn, Cp, Ci, Cx) """ nz = 0 m, anz, n, Bp, Bx = Am, Aindptr[An], Bn, Bindptr, Bdata bnz = Bp[n] w = np.zeros(m, dtype=nb.int32) x = xalloc(m) # get workspace Cm, Cn, Cp, Ci, Cx, Cnzmax = csc_spalloc_f(m, n, anz + bnz) # allocate result for j in range(n): Cp[j] = nz # column j of C starts here nz = csc_scatter_f(Aindptr, Aindices, Adata, j, alpha, w, x, j + 1, Ci, nz) # alpha*A(:,j) nz = csc_scatter_f(Bindptr, Bindices, Bdata, j, beta, w, x, j + 1, Ci, nz) # beta*B(:,j) for p in range(Cp[j], nz): Cx[p] = x[Ci[p]] Cp[n] = nz # finalize the last column of C return Cm, Cn, Cp, Ci, Cx # success; free workspace, return C
Example #11
Source File: csc_numba.py From GridCal with GNU General Public License v3.0 | 5 votes |
def ialloc(n): return np.zeros(n, dtype=nb.int32)
Example #12
Source File: numbalib.py From soapy with GNU General Public License v3.0 | 5 votes |
def zoom(data, zoomArray): """ 2-D zoom interpolation using purely python - fast if compiled with numba. Both the array to zoom and the output array are required as arguments, the zoom level is calculated from the size of the new array. Parameters: array (ndarray): The 2-D array to zoom zoomArray (ndarray): The array to place the calculation Returns: interpArray (ndarray): A pointer to the calculated ``zoomArray'' """ for i in numba.prange(zoomArray.shape[0]): x = i*numba.float32(data.shape[0]-1) / (zoomArray.shape[0] - 0.99999999) x1 = numba.int32(x) for j in range(zoomArray.shape[1]): y = j*numba.float32(data.shape[1]-1) / (zoomArray.shape[1] - 0.99999999) y1 = numba.int32(y) xGrad1 = data[x1+1, y1] - data[x1, y1] a1 = data[x1, y1] + xGrad1*(x-x1) xGrad2 = data[x1+1, y1+1] - data[x1, y1+1] a2 = data[x1, y1+1] + xGrad2*(x-x1) yGrad = a2 - a1 zoomArray[i,j] = a1 + yGrad*(y-y1) return zoomArray
Example #13
Source File: numbalib.py From soapy with GNU General Public License v3.0 | 5 votes |
def rotate(data, interpArray, rotation_angle): for i in range(interpArray.shape[0]): for j in range(interpArray.shape[1]): i1 = i - (interpArray.shape[0] / 2. - 0.5) j1 = j - (interpArray.shape[1] / 2. - 0.5) x = i1 * numpy.cos(rotation_angle) - j1 * numpy.sin(rotation_angle) y = i1 * numpy.sin(rotation_angle) + j1 * numpy.cos(rotation_angle) x += data.shape[0] / 2. - 0.5 y += data.shape[1] / 2. - 0.5 if x >= data.shape[0] - 1: x = data.shape[0] - 1.1 x1 = numpy.int32(x) if y >= data.shape[1] - 1: y = data.shape[1] - 1.1 y1 = numpy.int32(y) xGrad1 = data[x1 + 1, y1] - data[x1, y1] a1 = data[x1, y1] + xGrad1 * (x - x1) xGrad2 = data[x1 + 1, y1 + 1] - data[x1, y1 + 1] a2 = data[x1, y1 + 1] + xGrad2 * (x - x1) yGrad = a2 - a1 interpArray[i, j] = a1 + yGrad * (y - y1) return interpArray
Example #14
Source File: numbalib.py From soapy with GNU General Public License v3.0 | 5 votes |
def bilinear_interp_numba_inbounds(data, xCoords, yCoords, interpArray): """ 2-D interpolation using purely python - fast if compiled with numba This version also accepts a parameter specifying how much of the array to operate on. This is useful for multi-threading applications. **NO BOUNDS CHECKS ARE PERFORMED - IF COORDS REFERENCE OUT-OF-BOUNDS ELEMENTS THEN MYSTERIOUS SEGFAULTS WILL OCCURR!!!** Parameters: array (ndarray): The 2-D array to interpolate xCoords (ndarray): A 1-D array of x-coordinates yCoords (ndarray): A 2-D array of y-coordinates interpArray (ndarray): The array to place the calculation Returns: interpArray (ndarray): A pointer to the calculated ``interpArray'' """ jRange = range(yCoords.shape[0]) for i in range(xCoords.shape[0]): x = xCoords[i] x1 = numba.int32(x) for j in jRange: y = yCoords[j] y1 = numba.int32(y) xGrad1 = data[x1 + 1, y1] - data[x1, y1] a1 = data[x1, y1] + xGrad1 * (x - x1) xGrad2 = data[x1 + 1, y1 + 1] - data[x1, y1 + 1] a2 = data[x1, y1 + 1] + xGrad2 * (x - x1) yGrad = a2 - a1 interpArray[i, j] = a1 + yGrad * (y - y1) return interpArray
Example #15
Source File: numbalib.py From soapy with GNU General Public License v3.0 | 5 votes |
def bilinear_interp_numba(data, xCoords, yCoords, interpArray): """ 2-D interpolation using purely python - fast if compiled with numba This version also accepts a parameter specifying how much of the array to operate on. This is useful for multi-threading applications. Bounds are checks to ensure no out of bounds access is attempted to avoid mysterious seg-faults Parameters: array (ndarray): The 2-D array to interpolate xCoords (ndarray): A 1-D array of x-coordinates yCoords (ndarray): A 2-D array of y-coordinates interpArray (ndarray): The array to place the calculation Returns: interpArray (ndarray): A pointer to the calculated ``interpArray'' """ jRange = range(yCoords.shape[0]) for i in numba.prange(xCoords.shape[0]): x = xCoords[i] if x >= data.shape[0] - 1: x = data.shape[0] - 1 - 1e-9 x1 = numba.int32(x) for j in jRange: y = yCoords[j] if y >= data.shape[1] - 1: y = data.shape[1] - 1 - 1e-9 y1 = numba.int32(y) xGrad1 = data[x1 + 1, y1] - data[x1, y1] a1 = data[x1, y1] + xGrad1 * (x - x1) xGrad2 = data[x1 + 1, y1 + 1] - data[x1, y1 + 1] a2 = data[x1, y1 + 1] + xGrad2 * (x - x1) yGrad = a2 - a1 interpArray[i, j] = a1 + yGrad * (y - y1) return interpArray
Example #16
Source File: wfslib.py From soapy with GNU General Public License v3.0 | 5 votes |
def zoomtoefield(data, zoomArray): """ 2-D zoom interpolation using purely python - fast if compiled with numba. Both the array to zoom and the output array are required as arguments, the zoom level is calculated from the size of the new array. Parameters: array (ndarray): The 2-D array to zoom zoomArray (ndarray): The array to place the calculation Returns: interpArray (ndarray): A pointer to the calculated ``zoomArray'' """ for i in numba.prange(zoomArray.shape[0]): x = i * numba.float32(data.shape[0] - 1) / (zoomArray.shape[0] - 0.99999999) x1 = numba.int32(x) for j in range(zoomArray.shape[1]): y = j * numba.float32(data.shape[1] - 1) / (zoomArray.shape[1] - 0.99999999) y1 = numba.int32(y) xGrad1 = data[x1 + 1, y1] - data[x1, y1] a1 = data[x1, y1] + xGrad1 * (x - x1) xGrad2 = data[x1 + 1, y1 + 1] - data[x1, y1 + 1] a2 = data[x1, y1 + 1] + xGrad2 * (x - x1) yGrad = a2 - a1 phase_value = (a1 + yGrad * (y - y1)) zoomArray[i, j] = numpy.exp(1j * phase_value) return zoomArray
Example #17
Source File: augmentations.py From dataset_agnostic_segmentation with MIT License | 5 votes |
def pick_random_size(self): ratio = self._low_bound + (self._high_bound - self._low_bound) * np.random.rand() tw = np.int32(ratio * self._tw) th = np.int32(ratio * self._th) x0 = np.random.randint(0, self._tw) y0 = np.random.randint(0, self._th) while not (x0 + tw < self._tw and y0 + th < self._th): x0 = np.random.randint(0, self._tw) y0 = np.random.randint(0, self._th) return tw, th, x0, y0
Example #18
Source File: csr.py From skan with BSD 3-Clause "New" or "Revised" License | 5 votes |
def csr_to_nbgraph(csr, node_props=None): if node_props is None: node_props = np.broadcast_to(1., csr.shape[0]) node_props.flags.writeable = True return NBGraph(csr.indptr, csr.indices, csr.data, np.array(csr.shape, dtype=np.int32), node_props)
Example #19
Source File: augmentations.py From dataset_agnostic_segmentation with MIT License | 5 votes |
def apply(self, image, boxes, meta_image): if np.random.rand() < self._apply_prob: aug_img = self.augment_boxes(image, boxes.astype(np.int32), self.slant_prob) return aug_img, boxes, meta_image return image, boxes, meta_image
Example #20
Source File: augmentations.py From dataset_agnostic_segmentation with MIT License | 5 votes |
def box_filter(boxes, limits, size, border=20): z = PartilPage.sq_size(limits, size) # Pick boxes that fall inside new image boundaries good_idx = np.where((boxes[:, 0] > z[0]) & (boxes[:, 1] > z[1]) & (boxes[:, 2] < z[2]) & (boxes[:, 3] < z[3]))[0] # If boundaries are empty... if good_idx.shape[0] < 1: return [], (0, 0, 0, 0) good_boxes = boxes[good_idx, :] limits_of_good_boxes = np.concatenate((good_boxes[:, :2].min(0), good_boxes[:, 2:].max(0))) new_z = np.array([max(limits_of_good_boxes[0] - border, 0), max(limits_of_good_boxes[1] - border, 0), min(limits_of_good_boxes[2] + border, limits[1]), min(limits_of_good_boxes[3] + border, limits[0])])\ .astype(np.int32) return good_idx, new_z
Example #21
Source File: nonrigid.py From suite2p with GNU General Public License v3.0 | 5 votes |
def nfloor(y): return math.floor(y) #np.int32(np.floor(y))
Example #22
Source File: nonrigid.py From suite2p with GNU General Public License v3.0 | 5 votes |
def map_coordinates(I, yc, xc, Y): """ bilinear transform of image with ycoordinates yc and xcoordinates xc to Y Parameters ------------- I : int16 or float32, 2D array size [Ly x Lx] yc : 2D array size [Ly x Lx], new y coordinates xc : 2D array size [Ly x Lx], new x coordinates Returns ----------- Y : float32, 2D array size [Ly x Lx], shifted I """ Ly,Lx = I.shape yc_floor = yc.copy().astype(np.int32) xc_floor = xc.copy().astype(np.int32) yc -= yc_floor xc -= xc_floor for i in range(yc_floor.shape[0]): for j in range(yc_floor.shape[1]): yf = min(Ly-1, max(0, yc_floor[i,j])) xf = min(Lx-1, max(0, xc_floor[i,j])) yf1= min(Ly-1, yf+1) xf1= min(Lx-1, xf+1) y = yc[i,j] x = xc[i,j] Y[i,j] = (np.float32(I[yf, xf]) * (1 - y) * (1 - x) + np.float32(I[yf, xf1]) * (1 - y) * x + np.float32(I[yf1, xf]) * y * (1 - x) + np.float32(I[yf1, xf1]) * y * x )
Example #23
Source File: rp_trees.py From pynndescent with BSD 2-Clause "Simplified" License | 5 votes |
def convert_tree_format(tree, data_size): n_nodes, n_leaves = num_nodes_and_leaves(tree) is_sparse = False if tree.hyperplanes[0].ndim == 1: # dense hyperplanes hyperplane_dim = dense_hyperplane_dim(tree.hyperplanes) hyperplanes = np.zeros((n_nodes, hyperplane_dim), dtype=np.float32) else: # sparse hyperplanes is_sparse = True hyperplane_dim = sparse_hyperplane_dim(tree.hyperplanes) hyperplanes = np.zeros((n_nodes, 2, hyperplane_dim), dtype=np.float32) hyperplanes[:, 0, :] = -1 offsets = np.zeros(n_nodes, dtype=np.float32) children = np.int32(-1) * np.ones((n_nodes, 2), dtype=np.int32) indices = np.int32(-1) * np.ones(data_size, dtype=np.int32) if is_sparse: recursive_convert_sparse( tree, hyperplanes, offsets, children, indices, 0, 0, len(tree.children) - 1 ) else: recursive_convert( tree, hyperplanes, offsets, children, indices, 0, 0, len(tree.children) - 1 ) return FlatTree(hyperplanes, offsets, children, indices, tree.leaf_size)
Example #24
Source File: rp_trees.py From pynndescent with BSD 2-Clause "Simplified" License | 5 votes |
def get_leaves_from_tree(tree): n_leaves = 0 for i in range(len(tree.children)): if tree.children[i][0] == -1 and tree.children[i][1] == -1: n_leaves += 1 result = -1 * np.ones((n_leaves, tree.leaf_size), dtype=np.int32) leaf_index = 0 for i in range(len(tree.indices)): if tree.children[i][0] == -1 or tree.children[i][1] == -1: leaf_size = tree.indices[i].shape[0] result[leaf_index, :leaf_size] = tree.indices[i] leaf_index += 1 return result
Example #25
Source File: rp_trees.py From pynndescent with BSD 2-Clause "Simplified" License | 5 votes |
def make_sparse_tree(inds, indptr, spdata, rng_state, leaf_size=30, angular=False): indices = np.arange(indptr.shape[0] - 1).astype(np.int32) hyperplanes = numba.typed.List.empty_list(sparse_hyperplane_type) offsets = numba.typed.List.empty_list(offset_type) children = numba.typed.List.empty_list(children_type) point_indices = numba.typed.List.empty_list(point_indices_type) if angular: make_sparse_angular_tree( inds, indptr, spdata, indices, hyperplanes, offsets, children, point_indices, rng_state, leaf_size, ) else: make_sparse_euclidean_tree( inds, indptr, spdata, indices, hyperplanes, offsets, children, point_indices, rng_state, leaf_size, ) return FlatTree(hyperplanes, offsets, children, point_indices, leaf_size)
Example #26
Source File: rp_trees.py From pynndescent with BSD 2-Clause "Simplified" License | 5 votes |
def make_dense_tree(data, rng_state, leaf_size=30, angular=False): indices = np.arange(data.shape[0]).astype(np.int32) hyperplanes = numba.typed.List.empty_list(dense_hyperplane_type) offsets = numba.typed.List.empty_list(offset_type) children = numba.typed.List.empty_list(children_type) point_indices = numba.typed.List.empty_list(point_indices_type) if angular: make_angular_tree( data, indices, hyperplanes, offsets, children, point_indices, rng_state, leaf_size, ) else: make_euclidean_tree( data, indices, hyperplanes, offsets, children, point_indices, rng_state, leaf_size, ) # print("Completed a tree") result = FlatTree(hyperplanes, offsets, children, point_indices, leaf_size) # print("Tree type is:", numba.typeof(result)) return result
Example #27
Source File: utils.py From PPGNet with MIT License | 5 votes |
def gen_gaussian_map(centers, shape, sigma): centers = np.float32(centers) sigma = np.float32(sigma) accumulate_confid_map = np.zeros(shape, dtype=np.float32) y_range = np.arange(accumulate_confid_map.shape[0], dtype=np.int32) x_range = np.arange(accumulate_confid_map.shape[1], dtype=np.int32) xx, yy = np.meshgrid(x_range, y_range) accumulate_confid_map = apply_gaussian(accumulate_confid_map, centers, xx, yy, sigma) accumulate_confid_map[accumulate_confid_map > 1.0] = 1.0 return accumulate_confid_map
Example #28
Source File: _crown_dalponteCIRC_numba.py From pycrown with GNU General Public License v3.0 | 4 votes |
def get_neighbourhood(radius): """ creates list of row and column coordinates for circular indexing around a central pixel and for different distances from the centre Parameters ---------- radius : int radius of circular kernel Returns ------- ndarray array of column coordinates _relative_ to the central pixel ndarray array of row coordinates _relative_ to the central pixel ndarray indices for splitting the array of row/column coordinates into different distances from the centre """ # build a circular kernel xy = np.arange(-radius, radius+1).reshape(radius*2+1, 1) kernel = xy**2 + xy.reshape(1, radius*2+1)**2 # numba v0.39 doesn't support np.unique, so use a workaround sfkernel = np.sort(kernel.flatten()) unique = list(sfkernel[:1]) for x in sfkernel: if x != unique[-1]: unique.append(x) nums = unique[1:] start = 1 for num in range(len(nums)): if nums[num] >= radius**2: continue n1, n0 = np.where(kernel == nums[num]) if start: neighbours_x = list(n1.astype(np.int32)) neighbours_y = list(n0.astype(np.int32)) breaks = [len(n0)] start = 0 else: neighbours_x += list(n1.astype(np.int32)) neighbours_y += list(n0.astype(np.int32)) breaks.append(len(n0)) breaks = np.array(breaks, dtype=np.int32) neighbours_x = np.array(neighbours_x, dtype=np.int32) - radius neighbours_y = np.array(neighbours_y, dtype=np.int32) - radius return neighbours_x, neighbours_y, breaks
Example #29
Source File: rp_trees.py From pynndescent with BSD 2-Clause "Simplified" License | 4 votes |
def make_sparse_angular_tree( inds, indptr, data, indices, hyperplanes, offsets, children, point_indices, rng_state, leaf_size=30, ): if indices.shape[0] > leaf_size: ( left_indices, right_indices, hyperplane, offset, ) = sparse_angular_random_projection_split( inds, indptr, data, indices, rng_state ) make_sparse_angular_tree( inds, indptr, data, left_indices, hyperplanes, offsets, children, point_indices, rng_state, leaf_size, ) left_node_num = len(point_indices) - 1 make_sparse_angular_tree( inds, indptr, data, right_indices, hyperplanes, offsets, children, point_indices, rng_state, leaf_size, ) right_node_num = len(point_indices) - 1 hyperplanes.append(hyperplane) offsets.append(offset) children.append((np.int32(left_node_num), np.int32(right_node_num))) point_indices.append(np.array([-1], dtype=np.int32)) else: hyperplanes.append(np.array([[-1.0], [-1.0]], dtype=np.float64)) offsets.append(-np.inf) children.append((np.int32(-1), np.int32(-1))) point_indices.append(indices)
Example #30
Source File: rp_trees.py From pynndescent with BSD 2-Clause "Simplified" License | 4 votes |
def make_angular_tree( data, indices, hyperplanes, offsets, children, point_indices, rng_state, leaf_size=30, ): if indices.shape[0] > leaf_size: ( left_indices, right_indices, hyperplane, offset, ) = angular_random_projection_split(data, indices, rng_state) make_angular_tree( data, left_indices, hyperplanes, offsets, children, point_indices, rng_state, leaf_size, ) left_node_num = len(point_indices) - 1 make_angular_tree( data, right_indices, hyperplanes, offsets, children, point_indices, rng_state, leaf_size, ) right_node_num = len(point_indices) - 1 hyperplanes.append(hyperplane) offsets.append(offset) children.append((np.int32(left_node_num), np.int32(right_node_num))) point_indices.append(np.array([-1], dtype=np.int32)) else: hyperplanes.append(np.array([-1.0], dtype=np.float32)) offsets.append(-np.inf) children.append((np.int32(-1), np.int32(-1))) point_indices.append(indices) return