Python skimage.io.imshow() Examples
The following are 14
code examples of skimage.io.imshow().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
skimage.io
, or try the search function
.

Example #1
Source Project: Pic-Numero Author: oduwa File: RAG_threshold.py License: MIT License | 6 votes |
def experiment_with_parameters(): img = misc.imread("wheat.png") compactness_values = [30, 50, 70, 100, 200, 300, 500, 700, 1000] n_segments_values = [3,4,5,6,7,8,9,10] for compactness_val in compactness_values: for n in n_segments_values: labels1 = segmentation.slic(img, compactness=compactness_val, n_segments=n) out1 = color.label2rgb(labels1, img, kind='overlay') fig, ax = plt.subplots() ax.imshow(out1, interpolation='nearest') ax.set_title("Compactness: {} | Segments: {}".format(compactness_val, n)) plt.savefig("RAG/c{}_k{}.png".format(compactness_val, n)) plt.close(fig)
Example #2
Source Project: deep-learning-note Author: wdxtub File: 8_kmeans_pca.py License: MIT License | 5 votes |
def plot_n_image(X, n): """ plot first n images n has to be a square number """ pic_size = int(np.sqrt(X.shape[1])) grid_size = int(np.sqrt(n)) first_n_images = X[:n, :] fig, ax_array = plt.subplots(nrows=grid_size, ncols=grid_size, sharey=True, sharex=True, figsize=(8, 8)) for r in range(grid_size): for c in range(grid_size): ax_array[r, c].imshow(first_n_images[grid_size * r + c].reshape((pic_size, pic_size))) plt.xticks(np.array([])) plt.yticks(np.array([]))
Example #3
Source Project: c3d-pytorch Author: DavideA File: predict.py License: MIT License | 5 votes |
def get_sport_clip(clip_name, verbose=True): """ Loads a clip to be fed to C3D for classification. TODO: should I remove mean here? Parameters ---------- clip_name: str the name of the clip (subfolder in 'data'). verbose: bool if True, shows the unrolled clip (default is True). Returns ------- Tensor a pytorch batch (n, ch, fr, h, w). """ clip = sorted(glob(join('data', clip_name, '*.png'))) clip = np.array([resize(io.imread(frame), output_shape=(112, 200), preserve_range=True) for frame in clip]) clip = clip[:, :, 44:44+112, :] # crop centrally if verbose: clip_img = np.reshape(clip.transpose(1, 0, 2, 3), (112, 16 * 112, 3)) io.imshow(clip_img.astype(np.uint8)) io.show() clip = clip.transpose(3, 0, 1, 2) # ch, fr, h, w clip = np.expand_dims(clip, axis=0) # batch axis clip = np.float32(clip) return torch.from_numpy(clip)
Example #4
Source Project: DCGAN-LSGAN-WGAN-GP-DRAGAN-Tensorflow-2 Author: LynnHo File: basic.py License: MIT License | 5 votes |
def imshow(image): """Show a [-1.0, 1.0] image.""" iio.imshow(dtype.im2uint(image))
Example #5
Source Project: CycleGAN-Tensorflow-2 Author: LynnHo File: basic.py License: MIT License | 5 votes |
def imshow(image): """Show a [-1.0, 1.0] image.""" iio.imshow(dtype.im2uint(image))
Example #6
Source Project: VAE-Tensorflow Author: LynnHo File: basic.py License: MIT License | 5 votes |
def imshow(image): """Show a [-1.0, 1.0] image.""" iio.imshow(im2float(image))
Example #7
Source Project: Pic-Numero Author: oduwa File: RAG_threshold.py License: MIT License | 5 votes |
def main(): img = misc.imread("wheat.png") # labels1 = segmentation.slic(img, compactness=100, n_segments=9) labels1 = segmentation.slic(img, compactness=50, n_segments=4) out1 = color.label2rgb(labels1, img, kind='overlay') print(labels1.shape) g = graph.rag_mean_color(img, labels1) labels2 = graph.cut_threshold(labels1, g, 29) out2 = color.label2rgb(labels2, img, kind='overlay') # get roi # logicalIndex = (labels2 != 1) # gray = rgb2gray(img); # gray[logicalIndex] = 0; plt.figure() io.imshow(out1) plt.figure() io.imshow(out2) io.show()
Example #8
Source Project: Pic-Numero Author: oduwa File: RAG_threshold.py License: MIT License | 5 votes |
def spectral_cluster(filename, compactness_val=30, n=6): img = misc.imread(filename) labels1 = segmentation.slic(img, compactness=compactness_val, n_segments=n) out1 = color.label2rgb(labels1, img, kind='overlay', colors=['red','green','blue','cyan','magenta','yellow']) fig, ax = plt.subplots() ax.imshow(out1, interpolation='nearest') ax.set_title("Compactness: {} | Segments: {}".format(compactness_val, n)) plt.show()
Example #9
Source Project: brain_segmentation Author: naldeborgh7575 File: Segmentation_Models.py License: MIT License | 5 votes |
def predict_image(self, test_img, show=False): ''' predicts classes of input image INPUT (1) str 'test_image': filepath to image to predict on (2) bool 'show': True to show the results of prediction, False to return prediction OUTPUT (1) if show == False: array of predicted pixel classes for the center 208 x 208 pixels (2) if show == True: displays segmentation results ''' imgs = io.imread(test_img).astype('float').reshape(5,240,240) plist = [] # create patches from an entire slice for img in imgs[:-1]: if np.max(img) != 0: img /= np.max(img) p = extract_patches_2d(img, (33,33)) plist.append(p) patches = np.array(zip(np.array(plist[0]), np.array(plist[1]), np.array(plist[2]), np.array(plist[3]))) # predict classes of each pixel based on model full_pred = self.model_comp.predict_classes(patches) fp1 = full_pred.reshape(208,208) if show: io.imshow(fp1) plt.show else: return fp1
Example #10
Source Project: deep-high-resolution-net.TensorFlow Author: VXallset File: dataset.py License: MIT License | 5 votes |
def draw_points_on_img(img, point_ver, point_hor, point_class): for i in range(len(point_class)): if point_class[i] != 3: rr, cc = draw.circle(point_ver[i], point_hor[i], 10, (256, 192)) #draw.set_color(img, [rr, cc], [0., 0., 0.], alpha=5) img[rr, cc, :] = 0 #io.imshow(img) #io.show() return img
Example #11
Source Project: deep-high-resolution-net.TensorFlow Author: VXallset File: dataset.py License: MIT License | 5 votes |
def mytest(): tfrecord_file = '../dataset/train.tfrecords' filename_queue = tf.train.string_input_producer([tfrecord_file], num_epochs=None) image_name, image, keypoints_ver, keypoints_hor, keypoints_class = decode_tfrecord(filename_queue) with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) try: # while not coord.should_stop(): for i in range(10): img_name, img, point_ver, point_hor, point_class = sess.run([image_name, image, keypoints_ver, keypoints_hor, keypoints_class]) print(img_name, point_hor, point_ver, point_class) for i in range(len(point_class)): if point_class[i] > 0: rr, cc = draw.circle(point_ver[i], point_hor[i], 10, (256, 192)) img[rr, cc, :] = 0 io.imshow(img) io.show() except tf.errors.OutOfRangeError: print('Done reading') finally: coord.request_stop()
Example #12
Source Project: DCGAN-LSGAN-WGAN-GP-DRAGAN-Pytorch Author: LynnHo File: basic.py License: MIT License | 5 votes |
def imshow(image): """Show a [-1.0, 1.0] image.""" iio.imshow(dtype.im2uint(image))
Example #13
Source Project: AttGAN-Tensorflow Author: LynnHo File: basic.py License: MIT License | 5 votes |
def imshow(image): """Show a [-1.0, 1.0] image.""" iio.imshow(dtype.im2uint(image))
Example #14
Source Project: brain_segmentation Author: naldeborgh7575 File: Segmentation_Models.py License: MIT License | 4 votes |
def show_segmented_image(self, test_img, modality='t1c', show = False): ''' Creates an image of original brain with segmentation overlay INPUT (1) str 'test_img': filepath to test image for segmentation, including file extension (2) str 'modality': imaging modelity to use as background. defaults to t1c. options: (flair, t1, t1c, t2) (3) bool 'show': If true, shows output image. defaults to False. OUTPUT (1) if show is True, shows image of segmentation results (2) if show is false, returns segmented image. ''' modes = {'flair':0, 't1':1, 't1c':2, 't2':3} segmentation = self.predict_image(test_img, show=False) img_mask = np.pad(segmentation, (16,16), mode='edge') ones = np.argwhere(img_mask == 1) twos = np.argwhere(img_mask == 2) threes = np.argwhere(img_mask == 3) fours = np.argwhere(img_mask == 4) test_im = io.imread(test_img) test_back = test_im.reshape(5,240,240)[-2] # overlay = mark_boundaries(test_back, img_mask) gray_img = img_as_float(test_back) # adjust gamma of image image = adjust_gamma(color.gray2rgb(gray_img), 0.65) sliced_image = image.copy() red_multiplier = [1, 0.2, 0.2] yellow_multiplier = [1,1,0.25] green_multiplier = [0.35,0.75,0.25] blue_multiplier = [0,0.25,0.9] # change colors of segmented classes for i in xrange(len(ones)): sliced_image[ones[i][0]][ones[i][1]] = red_multiplier for i in xrange(len(twos)): sliced_image[twos[i][0]][twos[i][1]] = green_multiplier for i in xrange(len(threes)): sliced_image[threes[i][0]][threes[i][1]] = blue_multiplier for i in xrange(len(fours)): sliced_image[fours[i][0]][fours[i][1]] = yellow_multiplier if show: io.imshow(sliced_image) plt.show() else: return sliced_image