Python numpy.dstack() Examples
The following are 30
code examples of numpy.dstack().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: test_color.py From snowy with MIT License | 6 votes |
def test_luminance(): source = sn.load('tests/sobel_input.png')[:,:,:3] L = rgb2gray(source) skresult = np.dstack([L, L, L]) small_skresult = sn.resize(skresult, width=256) L = sn.rgb_to_luminance(source) snresult = np.dstack([L, L, L]) small_snresult = sn.resize(snresult, width=256) L = skimage_sobel(source) sksobel = np.dstack([L, L, L]) small_sksobel = sn.resize(sksobel, width=256) L = sn.rgb_to_luminance(source) L = sn.compute_sobel(L) snsobel = np.dstack([L, L, L]) small_snsobel = sn.resize(snsobel, width=256) sn.show(np.hstack([ small_skresult, small_snresult, small_sksobel, small_snsobel]))
Example #2
Source File: lattice_test.py From lattice with Apache License 2.0 | 6 votes |
def _GetTrainingInputsAndLabels(self, config): """Generates training inputs and labels. Args: config: Dictionary with config for this unit test. Returns: Tuple `(training_inputs, training_labels, raw_training_inputs)` where `training_inputs` and `training_labels` are data for training and `raw_training_inputs` are representation of training_inputs for visualisation. """ raw_training_inputs = config["x_generator"]( num_points=config["num_training_records"], lattice_sizes=config["lattice_sizes"]) if isinstance(raw_training_inputs, tuple): # This means that raw inputs are 2-d mesh grid. Convert them into list of # 2-d points. training_inputs = list(np.dstack(raw_training_inputs).reshape((-1, 2))) else: training_inputs = raw_training_inputs training_labels = [config["y_function"](x) for x in training_inputs] return training_inputs, training_labels, raw_training_inputs
Example #3
Source File: lucidDream.py From pyLucid with MIT License | 6 votes |
def thin_plate_transform(x,y,offw,offh,imshape,shift_l=-0.05,shift_r=0.05,num_points=5,offsetMatrix=False): rand_p=np.random.choice(x.size,num_points,replace=False) movingPoints=np.zeros((1,num_points,2),dtype='float32') fixedPoints=np.zeros((1,num_points,2),dtype='float32') movingPoints[:,:,0]=x[rand_p] movingPoints[:,:,1]=y[rand_p] fixedPoints[:,:,0]=movingPoints[:,:,0]+offw*(np.random.rand(num_points)*(shift_r-shift_l)+shift_l) fixedPoints[:,:,1]=movingPoints[:,:,1]+offh*(np.random.rand(num_points)*(shift_r-shift_l)+shift_l) tps=cv2.createThinPlateSplineShapeTransformer() good_matches=[cv2.DMatch(i,i,0) for i in range(num_points)] tps.estimateTransformation(movingPoints,fixedPoints,good_matches) imh,imw=imshape x,y=np.meshgrid(np.arange(imw),np.arange(imh)) x,y=x.astype('float32'),y.astype('float32') newxy=tps.applyTransformation(np.dstack((x.ravel(),y.ravel())))[1] newxy=newxy.reshape([imh,imw,2]) if offsetMatrix: return newxy,newxy-np.dstack((x,y)) else: return newxy
Example #4
Source File: image_process.py From Advanced_Lane_Lines with MIT License | 6 votes |
def color_grid_thresh(img, s_thresh=(170,255), sx_thresh=(20, 100)): img = np.copy(img) # Convert to HLS color space and separate the V channel hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) l_channel = hls[:,:,1] s_channel = hls[:,:,2] # Sobel x sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivateive in x abs_sobelx = np.absolute(sobelx) # Absolute x derivateive to accentuate lines scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx)) # Threshold x gradient sxbinary = np.zeros_like(scaled_sobel) sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1 # Threshold color channel s_binary = np.zeros_like(s_channel) s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1 # combine the two binary binary = sxbinary | s_binary # Stack each channel (for visual check the pixal sourse) # color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary,s_binary)) * 255 return binary
Example #5
Source File: misc.py From bop_toolkit with MIT License | 6 votes |
def depth_im_to_dist_im(depth_im, K): """Converts a depth image to a distance image. :param depth_im: hxw ndarray with the input depth image, where depth_im[y, x] is the Z coordinate of the 3D point [X, Y, Z] that projects to pixel [x, y], or 0 if there is no such 3D point (this is a typical output of the Kinect-like sensors). :param K: 3x3 ndarray with an intrinsic camera matrix. :return: hxw ndarray with the distance image, where dist_im[y, x] is the distance from the camera center to the 3D point [X, Y, Z] that projects to pixel [x, y], or 0 if there is no such 3D point. """ xs, ys = np.meshgrid( np.arange(depth_im.shape[1]), np.arange(depth_im.shape[0])) Xs = np.multiply(xs - K[0, 2], depth_im) * (1.0 / K[0, 0]) Ys = np.multiply(ys - K[1, 2], depth_im) * (1.0 / K[1, 1]) dist_im = np.sqrt( Xs.astype(np.float64)**2 + Ys.astype(np.float64)**2 + depth_im.astype(np.float64)**2) # dist_im = np.linalg.norm(np.dstack((Xs, Ys, depth_im)), axis=2) # Slower. return dist_im
Example #6
Source File: test_merge_execute.py From mars with Apache License 2.0 | 6 votes |
def testDStackExecution(self): a_data = np.random.rand(10) b_data = np.random.rand(10) a = tensor(a_data, chunk_size=4) b = tensor(b_data, chunk_size=4) c = dstack([a, b]) res = self.executor.execute_tensor(c, concat=True)[0] expected = np.dstack([a_data, b_data]) self.assertTrue(np.array_equal(res, expected)) a_data = np.random.rand(10, 20) b_data = np.random.rand(10, 20) a = tensor(a_data, chunk_size=3) b = tensor(b_data, chunk_size=4) c = dstack([a, b]) res = self.executor.execute_tensor(c, concat=True)[0] expected = np.dstack([a_data, b_data]) self.assertTrue(np.array_equal(res, expected))
Example #7
Source File: formatters.py From pyswarms with MIT License | 6 votes |
def compute_history_3d(self, pos_history): """Compute a 3D position matrix The first two columns are the 2D position in the x and y axes respectively, while the third column is the fitness on that given position. Parameters ---------- pos_history : numpy.ndarray Two-dimensional position matrix history of shape :code:`(iterations, n_particles, 2)` Returns ------- numpy.ndarray 3D position matrix of shape :code:`(iterations, n_particles, 3)` """ fitness = np.array(list(map(self.func, pos_history))) return np.dstack((pos_history, fitness))
Example #8
Source File: population.py From ibllib with MIT License | 6 votes |
def _symmetrize_correlograms(correlograms): """Return the symmetrized version of the CCG arrays.""" n_clusters, _, n_bins = correlograms.shape assert n_clusters == _ # We symmetrize c[i, j, 0]. # This is necessary because the algorithm in correlograms() # is sensitive to the order of identical spikes. correlograms[..., 0] = np.maximum( correlograms[..., 0], correlograms[..., 0].T) sym = correlograms[..., 1:][..., ::-1] sym = np.transpose(sym, (1, 0, 2)) return np.dstack((sym, correlograms))
Example #9
Source File: image_process.py From Advanced_Lane_Lines with MIT License | 6 votes |
def draw_lane_fit(undist, warped ,Minv, left_fitx, right_fitx, ploty): # Drawing # Create an image to draw the lines on warp_zero = np.zeros_like(warped).astype(np.uint8) color_warp = np.dstack((warp_zero, warp_zero, warp_zero)) # Recast the x and y points into usable format for cv2.fillPoly() pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))]) pts = np.hstack((pts_left, pts_right)) # Draw the lane onto the warped blank image cv2.fillPoly(color_warp, np.int_([pts]), (0,255,0)) # Warp the blank back to original image space using inverse perspective matrix(Minv) newwarp = cv2.warpPerspective(color_warp, Minv, (undist.shape[1], undist.shape[0])) # Combine the result with the original image result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0) return result
Example #10
Source File: plotting.py From rl_algorithms with MIT License | 6 votes |
def plot_cost_to_go_mountain_car(env, estimator, num_tiles=20): x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles) y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles) X, Y = np.meshgrid(x, y) Z = np.apply_along_axis(lambda _: -np.max(estimator.predict(_)), 2, np.dstack([X, Y])) fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(111, projection='3d') surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0) ax.set_xlabel('Position') ax.set_ylabel('Velocity') ax.set_zlabel('Value') ax.set_title("Mountain \"Cost To Go\" Function") fig.colorbar(surf) plt.show()
Example #11
Source File: pix2pix.py From Chinese-Character-and-Calligraphic-Image-Processing with MIT License | 6 votes |
def train(self): list = os.listdir(self.path) nums_file = list.__len__() saver = tf.train.Saver() for i in range(10000): rand_select = np.random.randint(0, nums_file, [self.batch_size]) INPUTS = np.zeros([self.batch_size, self.img_h, self.img_w, 3]) INPUTS_CONDITION = np.zeros([self.batch_size, self.img_h, self.img_w, 3]) for j in range(self.batch_size): img = np.array(Image.open(self.path + list[rand_select[j]])) img_h, img_w = img.shape[0], img.shape[1] INPUT_CON = misc.imresize(img[:, :img_w//2], [self.img_h, self.img_w]) / 127.5 - 1.0 INPUTS_CONDITION[j] = np.dstack((INPUT_CON, INPUT_CON, INPUT_CON)) INPUT = misc.imresize(img[:, img_w//2:], [self.img_h, self.img_w]) / 127.5 - 1.0 INPUTS[j] = np.dstack((INPUT, INPUT, INPUT)) self.sess.run(self.opt_dis, feed_dict={self.inputs: INPUTS, self.inputs_condition: INPUTS_CONDITION}) self.sess.run(self.opt_gen, feed_dict={self.inputs: INPUTS, self.inputs_condition: INPUTS_CONDITION}) if i % 10 == 0: [G_LOSS, D_LOSS] = self.sess.run([self.g_loss, self.d_loss], feed_dict={self.inputs: INPUTS, self.inputs_condition: INPUTS_CONDITION}) print("Iteration: %d, d_loss: %f, g_loss: %f"%(i, D_LOSS, G_LOSS)) if i % 100 == 0: saver.save(self.sess, "./save_para//model.ckpt")
Example #12
Source File: style_transfer_realtime.py From Chinese-Character-and-Calligraphic-Image-Processing with MIT License | 6 votes |
def mapping(img): return 255.0 * (img - np.min(img)) / (np.max(img) - np.min(img)) # def read_data(path, batch_size): # filenames = os.listdir(path) # filenames_len = filenames.__len__() # rand_select = np.random.randint(0, filenames_len, [batch_size]) # batch_data = np.zeros([batch_size, 256, 256, 3]) # for i in range(batch_size): # img = np.array(Image.open(path + filenames[rand_select[i]]).resize([256, 256])) # try: # if img.shape.__len__() == 3: # batch_data[i, :, :, :] = img[:256, :256, :3] # else: # batch_data[i, :, :, :] = np.dstack((img, img, img))[:256, :256, :] # except: # img = np.array(Image.open(path + filenames[0])) # batch_data[i, :, :, :] = img[:256, :256, :3] # return batch_data
Example #13
Source File: test_hlayers.py From aboleth with Apache License 2.0 | 6 votes |
def test_concat(make_data): """Test concatenation layer.""" x, _, X = make_data # This replicates the input layer behaviour f = ab.InputLayer('X', n_samples=3) g = ab.InputLayer('Y', n_samples=3) catlayer = ab.Concat(f, g) F, KL = catlayer(X=x, Y=x) tc = tf.test.TestCase() with tc.test_session(): forked = F.eval() orig = X.eval() assert forked.shape == orig.shape[0:2] + (2 * orig.shape[2],) assert np.all(forked == np.dstack((orig, orig))) assert KL.eval() == 0.0
Example #14
Source File: stylize.py From Chinese-Character-and-Calligraphic-Image-Processing with MIT License | 6 votes |
def mapping(img): return 255.0 * (img - np.min(img)) / (np.max(img) - np.min(img)) # def read_data(path, batch_size): # filenames = os.listdir(path) # filenames_len = filenames.__len__() # rand_select = np.random.randint(0, filenames_len, [batch_size]) # batch_data = np.zeros([batch_size, 256, 256, 3]) # for i in range(batch_size): # img = np.array(Image.open(path + filenames[rand_select[i]]).resize([256, 256])) # try: # if img.shape.__len__() == 3: # batch_data[i, :, :, :] = img[:256, :256, :3] # else: # batch_data[i, :, :, :] = np.dstack((img, img, img))[:256, :256, :] # except: # img = np.array(Image.open(path + filenames[0])) # batch_data[i, :, :, :] = img[:256, :256, :3] # return batch_data
Example #15
Source File: tiles.py From argus-freesound with MIT License | 6 votes |
def merge(self, tiles: List[np.ndarray], dtype=np.float32): if len(tiles) != len(self.crops): raise ValueError channels = 1 if len(tiles[0].shape) == 2 else tiles[0].shape[2] target_shape = self.image_height + self.margin_bottom + self.margin_top, self.image_width + self.margin_right + self.margin_left, channels image = np.zeros(target_shape, dtype=np.float64) norm_mask = np.zeros(target_shape, dtype=np.float64) w = np.dstack([self.weight] * channels) for tile, (x, y, tile_width, tile_height) in zip(tiles, self.crops): # print(x, y, tile_width, tile_height, image.shape) image[y:y + tile_height, x:x + tile_width] += tile * w norm_mask[y:y + tile_height, x:x + tile_width] += w # print(norm_mask.min(), norm_mask.max()) norm_mask = np.clip(norm_mask, a_min=np.finfo(norm_mask.dtype).eps, a_max=None) normalized = np.divide(image, norm_mask).astype(dtype) crop = self.crop_to_orignal_size(normalized) return crop
Example #16
Source File: test_function_base.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def test_extended_axis(self): o = np.random.normal(size=(71, 23)) x = np.dstack([o] * 10) assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) x = np.rollaxis(x, -1, 0) assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) x = x.swapaxes(0, 1).copy() assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) x = x.swapaxes(0, 1).copy() assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)), np.percentile(x, [25, 60], axis=None)) assert_equal(np.percentile(x, [25, 60], axis=(0,)), np.percentile(x, [25, 60], axis=0)) d = np.arange(3 * 5 * 7 * 11).reshape(3, 5, 7, 11) np.random.shuffle(d) assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], np.percentile(d[:,:,:, 0].flatten(), 25)) assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], np.percentile(d[:,:, 1,:].flatten(), [10, 90])) assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2], np.percentile(d[:,:, 2,:].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2], np.percentile(d[2,:,:,:].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1], np.percentile(d[2, 1,:,:].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1], np.percentile(d[2,:,:, 1].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2], np.percentile(d[2,:, 2,:].flatten(), 25))
Example #17
Source File: deconvolution.py From OpenCV-Python-Tutorial with MIT License | 5 votes |
def blur_edge(img, d=31): h, w = img.shape[:2] img_pad = cv2.copyMakeBorder(img, d, d, d, d, cv2.BORDER_WRAP) img_blur = cv2.GaussianBlur(img_pad, (2*d+1, 2*d+1), -1)[d:-d,d:-d] y, x = np.indices((h, w)) dist = np.dstack([x, w-x-1, y, h-y-1]).min(-1) w = np.minimum(np.float32(dist)/d, 1.0) return img*w + img_blur*(1-w)
Example #18
Source File: io.py From snowy with MIT License | 5 votes |
def extract_rgb(image: np.ndarray) -> np.ndarray: """Extract the RGB planes from an RGBA image. Note that this returns a copy. If you wish to obtain a view that allows mutating pixels, simply use slicing instead. For example, to invert the colors of an image while leaving alpha intact, you can do: <code>myimage[:,:,:3] = 1.0 - myimage[:,:,:3]</code>. """ assert len(image.shape) == 3 and image.shape[2] >= 3 planes = np.dsplit(image, image.shape[2]) return np.dstack(planes[:3])
Example #19
Source File: io.py From snowy with MIT License | 5 votes |
def from_planar(image: np.ndarray) -> np.ndarray: """Create a channel-major image into row-major image. This creates a copy, not a view. """ assert len(image.shape) == 3 return np.dstack(image)
Example #20
Source File: test_dist.py From snowy with MIT License | 5 votes |
def test_cpcf(): w, h = 500, 500 def show(im): snowy.show(snowy.resize(im, height=100, filter=None)) yellow = np.full((w, h, 3), (1, 1, 0)) red = np.full((w, h, 3), (1, 0, 0)) blue_border = np.full((w, h, 3), (0, 0, 1)) t = 5; blue_border[t:h-t,t:w-t] *= 0 c0 = create_circle(w, h, 0.3) * yellow * 100000 c1 = create_circle(w, h, 0.07, 0.8, 0.8) * red * 10000 circles = np.clip(c0 + c1 + blue_border, 0, 1) r, g, b = circles.swapaxes(0, 2) luma = snowy.reshape(r + g + b) mask = luma != 0.0 sdf = snowy.unitize(np.abs(snowy.generate_sdf(mask))) cpcf = snowy.generate_cpcf(mask) voronoi = np.empty(circles.shape) np.copyto(voronoi, snowy.dereference_coords(circles, cpcf)) luma = np.dstack([luma, luma, luma]) sdf = np.dstack([sdf, sdf, sdf]) final = np.hstack([circles, luma, sdf, voronoi]) final = snowy.resize(final, height=400) show(final)
Example #21
Source File: make_handcrafted_33_features.py From wsdm19cup with MIT License | 5 votes |
def create_question_hash(self, train_df, test_df): train_qs = np.dstack([train_df["title1_zh"], train_df["title2_zh"]]).flatten() test_qs = np.dstack([test_df["title1_zh"], test_df["title2_zh"]]).flatten() all_qs = np.append(train_qs, test_qs) all_qs = pd.DataFrame(all_qs)[0].drop_duplicates() all_qs.reset_index(inplace=True, drop=True) question_dict = pd.Series(all_qs.index.values, index=all_qs.values).to_dict() return question_dict
Example #22
Source File: basis_functions.py From revrand with Apache License 2.0 | 5 votes |
def grad(self, X, lenscale=None): r""" Get the gradients of this basis w.r.t.\ the length scales. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. lenscale: scalar or ndarray, optional scalar or array of shape (d,) length scales (one for each dimension of X). If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, 2*nbases[, d]) where d is number of lenscales (if not ARD, i.e. scalar lenscale, this is just a 2D array). This is :math:`\partial \Phi(\mathbf{X}) / \partial \mathbf{l}` """ N, D = X.shape lenscale = self._check_dim(D, lenscale)[:, np.newaxis] WX = np.dot(X, self.W / lenscale) sinWX = - np.sin(WX) cosWX = np.cos(WX) dPhi = [] for i, l in enumerate(lenscale): dWX = np.outer(X[:, i], - self.W[i, :] / l**2) dPhi.append(np.hstack((dWX * sinWX, dWX * cosWX)) / np.sqrt(self.n)) return np.dstack(dPhi) if len(lenscale) != 1 else dPhi[0]
Example #23
Source File: basis_functions.py From revrand with Apache License 2.0 | 5 votes |
def grad(self, X, lenscale=None): r""" Get the gradients of this basis w.r.t.\ the length scale. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. lenscale: float, optional the length scale (scalar) of the RBFs to apply to X. If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, D) where D is number of centres. This is :math:`\partial \Phi(\mathbf{X}) / \partial l` """ N, d = X.shape lenscale = self._check_dim(d, lenscale) Phi = self.transform(X, lenscale) dPhi = [] for i, l in enumerate(lenscale): ldist = cdist(X[:, [i]] / l**2, self.C[:, [i]] / l**2, 'euclidean') dPhi.append(- ldist * Phi * (1 - Phi)) return np.dstack(dPhi) if len(lenscale) != 1 else dPhi[0]
Example #24
Source File: basis_functions.py From revrand with Apache License 2.0 | 5 votes |
def grad(self, X, lenscale=None): r""" Get the gradients of this basis w.r.t.\ the length scale. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. lenscale: scalar or ndarray, optional scalar or array of shape (d,) length scales (one for each dimension of X). If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, D) where D is number of RBF centres. This is :math:`\partial \Phi(\mathbf{X}) / \partial l` """ N, d = X.shape lenscale = self._check_dim(d, lenscale) Phi = self.transform(X, lenscale) dPhi = [] for i, l in enumerate(lenscale): ldist = cdist(X[:, [i]] / l**3, self.C[:, [i]] / l**3, 'sqeuclidean') dPhi.append(Phi * ldist) return np.dstack(dPhi) if len(lenscale) != 1 else dPhi[0]
Example #25
Source File: plotting.py From rl_algorithms with MIT License | 5 votes |
def plot_value_function(V, title="Value Function"): """ Plots the value function as a surface plot. """ min_x = min(k[0] for k in V.keys()) max_x = max(k[0] for k in V.keys()) min_y = min(k[1] for k in V.keys()) max_y = max(k[1] for k in V.keys()) x_range = np.arange(min_x, max_x + 1) y_range = np.arange(min_y, max_y + 1) X, Y = np.meshgrid(x_range, y_range) # Find value for all (x, y) coordinates Z_noace = np.apply_along_axis(lambda _: V[(_[0], _[1], False)], 2, np.dstack([X, Y])) Z_ace = np.apply_along_axis(lambda _: V[(_[0], _[1], True)], 2, np.dstack([X, Y])) def plot_surface(X, Y, Z, title): fig = plt.figure(figsize=(20, 10)) ax = fig.add_subplot(111, projection='3d') surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0) ax.set_xlabel('Player Sum') ax.set_ylabel('Dealer Showing') ax.set_zlabel('Value') ax.set_title(title) ax.view_init(ax.elev, -120) fig.colorbar(surf) plt.show() plot_surface(X, Y, Z_noace, "{} (No Usable Ace)".format(title)) plot_surface(X, Y, Z_ace, "{} (Usable Ace)".format(title))
Example #26
Source File: ILI9341.py From Adafruit_Python_ILI9341 with MIT License | 5 votes |
def image_to_data(image): """Generator function to convert a PIL image to 16-bit 565 RGB bytes.""" #NumPy is much faster at doing this. NumPy code provided by: #Keith (https://www.blogger.com/profile/02555547344016007163) pb = np.array(image.convert('RGB')).astype('uint16') color = ((pb[:,:,0] & 0xF8) << 8) | ((pb[:,:,1] & 0xFC) << 3) | (pb[:,:,2] >> 3) return np.dstack(((color >> 8) & 0xFF, color & 0xFF)).flatten().tolist()
Example #27
Source File: test_rastersource_getsetdata_basic.py From buzzard with Apache License 2.0 | 5 votes |
def dst_arr(rast): """Array to write in raster and to compare against""" arr = np.dstack([ np.add(*rast.fp.meshgrid_raster) + i for i in range(len(rast)) ]).astype(rast.dtype) if rast.nodata is not None: arr[np.diag_indices(arr.shape[0])] = rast.nodata return arr
Example #28
Source File: test_footprint.py From buzzard with Apache License 2.0 | 5 votes |
def test_coord_conv(fps): ai = np.dstack(fps.AI.meshgrid_spatial) assert fps.AI.raster_to_spatial(ai).shape == ai.shape assert fps.AI.spatial_to_raster(ai).shape == ai.shape assert fps.AI.spatial_to_raster(ai, dtype='float16').dtype == np.float16 assert fps.AI.spatial_to_raster(ai, dtype='float16', op=42).dtype == np.float16
Example #29
Source File: test_rastersource_resampling.py From buzzard with Apache License 2.0 | 5 votes |
def rast(request, ds): """Fixture for the dataset creation""" fp = TIF_FP driver, channel_count, dtype, nodata = request.param if driver == 'numpy': rast = ds.awrap_numpy_raster( fp, np.dstack([TIF_VALUES.copy().astype(dtype=dtype)] * channel_count), channels_schema=dict(nodata=nodata), sr=None, mode='r', ) elif driver == 'MEM': rast = ds.acreate_raster( '', fp, dtype, channel_count, channels_schema=dict(nodata=nodata), driver='MEM', ) for band_id in range(1, len(rast) + 1): rast.set_data(TIF_VALUES, band=band_id) else: path = '{}/{}.tif'.format(tempfile.gettempdir(), uuid.uuid4()) rast = ds.acreate_raster( path, fp, dtype, channel_count, channels_schema=dict(nodata=nodata), driver=driver ) for band_id in range(1, len(rast) + 1): rast.set_data(TIF_VALUES, band=band_id) yield rast if driver in {'numpy', 'MEM'}: rast.close() else: rast.delete()
Example #30
Source File: _footprint.py From buzzard with Apache License 2.0 | 5 votes |
def meshgrid_raster_in(self, other, dtype=None, op=np.floor): """Compute raster coordinate matrices of `self` in `other` referential Parameters ---------- other: Footprint .. dtype: None or convertible to np.dtype Output dtype If None: Use buzz.env.default_index_dtype op: None or function operating on a vector Function to apply before casting output to dtype If None: Do not transform data before casting Returns ------- (x, y): (np.ndarray, np.ndarray) Raster coordinate matrices with shape = self.shape with dtype = dtype """ # Check other parameter if not isinstance(other, self.__class__): raise TypeError('other should be a Footprint') # pragma: no cover # Check dtype parameter if dtype is None: dtype = env.default_index_dtype else: dtype = conv.dtype_of_any_downcast(dtype) # Check op parameter if not np.issubdtype(dtype, np.integer): op = None xy = other.spatial_to_raster(np.dstack(self.meshgrid_spatial), dtype=dtype, op=op) return xy[..., 0], xy[..., 1]