Python pylab.imshow() Examples
The following are 30
code examples of pylab.imshow().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
pylab
, or try the search function
.
Example #1
Source File: View.py From Deep-Spying with Apache License 2.0 | 9 votes |
def plot_confusion_matrix(self, matrix, labels): if not self.to_save and not self.to_show: return pylab.figure() pylab.imshow(matrix, interpolation='nearest', cmap=pylab.cm.jet) pylab.title("Confusion Matrix") for i, vi in enumerate(matrix): for j, vj in enumerate(vi): pylab.annotate("%.1f" % vj, xy=(j, i), horizontalalignment='center', verticalalignment='center', fontsize=9) pylab.colorbar() classes = np.arange(len(labels)) pylab.xticks(classes, labels) pylab.yticks(classes, labels) pylab.ylabel('Expected label') pylab.xlabel('Predicted label')
Example #2
Source File: image_ocr.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def on_epoch_end(self, epoch, logs={}): self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch))) self.show_edit_distance(256) word_batch = next(self.text_img_gen)[0] res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words]) if word_batch['the_input'][0].shape[0] < 256: cols = 2 else: cols = 1 for i in range(self.num_display_words): pylab.subplot(self.num_display_words // cols, cols, i + 1) if K.image_data_format() == 'channels_first': the_input = word_batch['the_input'][i, 0, :, :] else: the_input = word_batch['the_input'][i, :, :, 0] pylab.imshow(the_input.T, cmap='Greys_r') pylab.xlabel('Truth = \'%s\'\nDecoded = \'%s\'' % (word_batch['source_str'][i], res[i])) fig = pylab.gcf() fig.set_size_inches(10, 13) pylab.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch))) pylab.close()
Example #3
Source File: rectify.py From facade-segmentation with MIT License | 6 votes |
def plot_rectified(self): import pylab pylab.title('rectified') pylab.imshow(self.rectified) for line in self.vlines: p0, p1 = line p0 = self.inv_transform(p0) p1 = self.inv_transform(p1) pylab.plot((p0[0], p1[0]), (p0[1], p1[1]), c='green') for line in self.hlines: p0, p1 = line p0 = self.inv_transform(p0) p1 = self.inv_transform(p1) pylab.plot((p0[0], p1[0]), (p0[1], p1[1]), c='red') pylab.axis('image'); pylab.grid(c='yellow', lw=1) pylab.plt.yticks(np.arange(0, self.l, 100.0)); pylab.xlim(0, self.w) pylab.ylim(self.l, 0)
Example #4
Source File: rectify.py From facade-segmentation with MIT License | 6 votes |
def _extract_lines(img, edges=None, mask=None, min_line_length=20, max_line_gap=3): global __i__ __i__ += 1 if edges is None: edges = canny(rgb2grey(img)) if mask is not None: edges = edges & mask # figure() # subplot(131) # imshow(img) # subplot(132) #vimshow(edges) # subplot(133) # if mask is not None: # imshow(mask, cmap=cm.gray) # savefig('/home/shared/Projects/Facades/src/data/for-labelme/debug/foo/{:06}.jpg'.format(__i__)) lines = np.array(probabilistic_hough_line(edges, line_length=min_line_length, line_gap=max_line_gap)) return lines
Example #5
Source File: model.py From facade-segmentation with MIT License | 6 votes |
def plot(self, overlay_alpha=0.5): import pylab as pl rows = int(sqrt(self.layers())) cols = int(ceil(self.layers()/rows)) for i in range(rows*cols): pl.subplot(rows, cols, i+1) pl.axis('off') if i >= self.layers(): continue pl.title('{}({})'.format(self.labels[i], i)) pl.imshow(self.image) pl.imshow(colorize(self.features[i].argmax(0), colors=np.array([[0, 0, 255], [0, 255, 255], [255, 255, 0], [255, 0, 0]])), alpha=overlay_alpha)
Example #6
Source File: import_labelme.py From facade-segmentation with MIT License | 6 votes |
def plot(self): """ Plot the layer data (for debugging) :return: The current figure """ import pylab as pl aspect = self.nrows / float(self.ncols) figure_width = 6 #inches rows = max(1, int(np.sqrt(self.nlayers))) cols = int(np.ceil(self.nlayers/rows)) # noinspection PyUnresolvedReferences pallette = {i:rgb for (i, rgb) in enumerate(pl.cm.jet(np.linspace(0, 1, 4), bytes=True))} f, a = pl.subplots(rows, cols) f.set_size_inches(6 * cols, 6 * rows) a = a.flatten() for i, label in enumerate(self.label_names): pl.sca(a[i]) pl.title(label) pl.imshow(self.color_data) pl.imshow(colorize(self.label_data[:, :, i], pallette), alpha=0.5) # axis('off') return f
Example #7
Source File: utils_2dfmc.py From msaf with MIT License | 6 votes |
def compute_ffmc2d(X): """Computes the 2D-Fourier Magnitude Coefficients.""" # 2d-fft fft2 = scipy.fftpack.fft2(X) # Magnitude fft2m = magnitude(fft2) # FFTshift and flatten fftshift = scipy.fftpack.fftshift(fft2m).flatten() #cmap = plt.cm.get_cmap('hot') #plt.imshow(np.log1p(scipy.fftpack.fftshift(fft2m)).T, interpolation="nearest", # aspect="auto", cmap=cmap) #plt.show() # Take out redundant components return fftshift[:fftshift.shape[0] // 2 + 1]
Example #8
Source File: image_ocr.py From pCVR with Apache License 2.0 | 6 votes |
def on_epoch_end(self, epoch, logs={}): self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch))) self.show_edit_distance(256) word_batch = next(self.text_img_gen)[0] res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words]) if word_batch['the_input'][0].shape[0] < 256: cols = 2 else: cols = 1 for i in range(self.num_display_words): pylab.subplot(self.num_display_words // cols, cols, i + 1) if K.image_data_format() == 'channels_first': the_input = word_batch['the_input'][i, 0, :, :] else: the_input = word_batch['the_input'][i, :, :, 0] pylab.imshow(the_input.T, cmap='Greys_r') pylab.xlabel('Truth = \'%s\'\nDecoded = \'%s\'' % (word_batch['source_str'][i], res[i])) fig = pylab.gcf() fig.set_size_inches(10, 13) pylab.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch))) pylab.close()
Example #9
Source File: megafacade.py From facade-segmentation with MIT License | 6 votes |
def _plot_background(self, bgimage): import pylab as pl # Show the portion of the image behind this facade left, right = self.facade_left, self.facade_right top, bottom = 0, self.mega_facade.rectified.shape[0] if bgimage is not None: pl.imshow(bgimage[top:bottom, left:right], extent=(left, right, bottom, top)) else: # Fit the facade in the plot y0, y1 = pl.ylim() x0, x1 = pl.xlim() x0 = min(x0, left) x1 = max(x1, right) y0 = min(y0, top) y1 = max(y1, bottom) pl.xlim(x0, x1) pl.ylim(y1, y0)
Example #10
Source File: image_ocr.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def on_epoch_end(self, epoch, logs={}): self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch))) self.show_edit_distance(256) word_batch = next(self.text_img_gen)[0] res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words]) if word_batch['the_input'][0].shape[0] < 256: cols = 2 else: cols = 1 for i in range(self.num_display_words): pylab.subplot(self.num_display_words // cols, cols, i + 1) if K.image_data_format() == 'channels_first': the_input = word_batch['the_input'][i, 0, :, :] else: the_input = word_batch['the_input'][i, :, :, 0] pylab.imshow(the_input.T, cmap='Greys_r') pylab.xlabel('Truth = \'%s\'\nDecoded = \'%s\'' % (word_batch['source_str'][i], res[i])) fig = pylab.gcf() fig.set_size_inches(10, 13) pylab.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch))) pylab.close()
Example #11
Source File: image_ocr.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def on_epoch_end(self, epoch, logs={}): self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch))) self.show_edit_distance(256) word_batch = next(self.text_img_gen)[0] res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words]) if word_batch['the_input'][0].shape[0] < 256: cols = 2 else: cols = 1 for i in range(self.num_display_words): pylab.subplot(self.num_display_words // cols, cols, i + 1) if K.image_data_format() == 'channels_first': the_input = word_batch['the_input'][i, 0, :, :] else: the_input = word_batch['the_input'][i, :, :, 0] pylab.imshow(the_input.T, cmap='Greys_r') pylab.xlabel('Truth = \'%s\'\nDecoded = \'%s\'' % (word_batch['source_str'][i], res[i])) fig = pylab.gcf() fig.set_size_inches(10, 13) pylab.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch))) pylab.close()
Example #12
Source File: image_ocr.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def on_epoch_end(self, epoch, logs={}): self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch))) self.show_edit_distance(256) word_batch = next(self.text_img_gen)[0] res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words]) if word_batch['the_input'][0].shape[0] < 256: cols = 2 else: cols = 1 for i in range(self.num_display_words): pylab.subplot(self.num_display_words // cols, cols, i + 1) if K.image_data_format() == 'channels_first': the_input = word_batch['the_input'][i, 0, :, :] else: the_input = word_batch['the_input'][i, :, :, 0] pylab.imshow(the_input.T, cmap='Greys_r') pylab.xlabel('Truth = \'%s\'\nDecoded = \'%s\'' % (word_batch['source_str'][i], res[i])) fig = pylab.gcf() fig.set_size_inches(10, 13) pylab.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch))) pylab.close()
Example #13
Source File: rnnrbm.py From bachbot with MIT License | 6 votes |
def generate(self, filename, show=True): '''Generate a sample sequence, plot the resulting piano-roll and save it as a MIDI file. filename : string A MIDI file will be created at this location. show : boolean If True, a piano-roll of the generated sequence will be shown.''' piano_roll = self.generate_function() midiwrite(filename, piano_roll, self.r, self.dt) if show: extent = (0, self.dt * len(piano_roll)) + self.r pylab.figure() pylab.imshow(piano_roll.T, origin='lower', aspect='auto', interpolation='nearest', cmap=pylab.cm.gray_r, extent=extent) pylab.xlabel('time (s)') pylab.ylabel('MIDI note number') pylab.title('generated piano-roll')
Example #14
Source File: image_ocr.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def on_epoch_end(self, epoch, logs={}): self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch))) self.show_edit_distance(256) word_batch = next(self.text_img_gen)[0] res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words]) if word_batch['the_input'][0].shape[0] < 256: cols = 2 else: cols = 1 for i in range(self.num_display_words): pylab.subplot(self.num_display_words // cols, cols, i + 1) if K.image_data_format() == 'channels_first': the_input = word_batch['the_input'][i, 0, :, :] else: the_input = word_batch['the_input'][i, :, :, 0] pylab.imshow(the_input.T, cmap='Greys_r') pylab.xlabel('Truth = \'%s\'\nDecoded = \'%s\'' % (word_batch['source_str'][i], res[i])) fig = pylab.gcf() fig.set_size_inches(10, 13) pylab.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch))) pylab.close()
Example #15
Source File: image_ocr.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def on_epoch_end(self, epoch, logs={}): self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch))) self.show_edit_distance(256) word_batch = next(self.text_img_gen)[0] res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words]) if word_batch['the_input'][0].shape[0] < 256: cols = 2 else: cols = 1 for i in range(self.num_display_words): pylab.subplot(self.num_display_words // cols, cols, i + 1) if K.image_data_format() == 'channels_first': the_input = word_batch['the_input'][i, 0, :, :] else: the_input = word_batch['the_input'][i, :, :, 0] pylab.imshow(the_input.T, cmap='Greys_r') pylab.xlabel('Truth = \'%s\'\nDecoded = \'%s\'' % (word_batch['source_str'][i], res[i])) fig = pylab.gcf() fig.set_size_inches(10, 13) pylab.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch))) pylab.close()
Example #16
Source File: image_ocr.py From DeepLearning_Wavelet-LSTM with MIT License | 6 votes |
def on_epoch_end(self, epoch, logs={}): self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch))) self.show_edit_distance(256) word_batch = next(self.text_img_gen)[0] res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words]) if word_batch['the_input'][0].shape[0] < 256: cols = 2 else: cols = 1 for i in range(self.num_display_words): pylab.subplot(self.num_display_words // cols, cols, i + 1) if K.image_data_format() == 'channels_first': the_input = word_batch['the_input'][i, 0, :, :] else: the_input = word_batch['the_input'][i, :, :, 0] pylab.imshow(the_input.T, cmap='Greys_r') pylab.xlabel('Truth = \'%s\'\nDecoded = \'%s\'' % (word_batch['source_str'][i], res[i])) fig = pylab.gcf() fig.set_size_inches(10, 13) pylab.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch))) pylab.close()
Example #17
Source File: fusion_pca.py From ImageFusion with MIT License | 5 votes |
def plot(self): plt.figure(0) plt.gray() plt.subplot(131) plt.imshow(self._images[0]) plt.subplot(132) plt.imshow(self._images[1]) plt.subplot(133) plt.imshow(self._fusionImage) plt.show()
Example #18
Source File: sample_convnade.py From NADE with BSD 3-Clause "New" or "Revised" License | 5 votes |
def main(): parser = buildArgsParser() args = parser.parse_args() # Load experiments hyperparameters try: hyperparams = smartutils.load_dict_from_json_file(pjoin(args.experiment, "hyperparams.json")) except: hyperparams = smartutils.load_dict_from_json_file(pjoin(args.experiment, '..', "hyperparams.json")) model = load_model(args.experiment) print(str(model)) with Timer("Generating {} samples from Conv Deep NADE".format(args.count)): sample = model.build_sampling_function(seed=args.seed) samples, probs = sample(args.count, return_probs=True, ordering_seed=args.seed) if args.out is not None: outfile = pjoin(args.experiment, args.out) with Timer("Saving {0} samples to '{1}'".format(args.count, outfile)): np.save(outfile, samples) if args.view: import pylab as plt from convnade import vizu if hyperparams["dataset"] == "binarized_mnist": image_shape = (28, 28) else: raise ValueError("Unknown dataset: {0}".format(hyperparams["dataset"])) plt.figure() data = vizu.concatenate_images(samples, shape=image_shape, border_size=1, clim=(0, 1)) plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest') plt.title("Samples") plt.figure() data = vizu.concatenate_images(probs, shape=image_shape, border_size=1, clim=(0, 1)) plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest') plt.title("Probs") plt.show()
Example #19
Source File: fusion_dwb.py From ImageFusion with MIT License | 5 votes |
def plot(self): plt.figure(0) plt.gray() plt.subplot(131) plt.imshow(self._images[0]) plt.subplot(132) plt.imshow(self._images[1]) plt.subplot(133) plt.imshow(self._fusionImage) plt.show()
Example #20
Source File: handdetector.py From LSPS with GNU General Public License v3.0 | 5 votes |
def estimateHandsize(self, contours, com, cube=(250, 250, 250), tol=0.): """ Estimate hand size from contours :param contours: contours of hand :param com: center of mass :param cube: default cube :param tol: tolerance to be added to all sides :return: metric cube for cropping (x, y, z) """ x, y, w, h = cv2.boundingRect(contours) # drawing = numpy.zeros((480, 640), dtype=float) # cv2.drawContours(drawing, [contours], 0, (255, 0, 244), 1, 8) # cv2.rectangle(drawing, (x, y), (x+w, y+h), (244, 0, 233), 2, 8, 0) # cv2.imshow("contour", drawing) # convert to cube xstart = (com[0] - w / 2.) * com[2] / self.fx xend = (com[0] + w / 2.) * com[2] / self.fx ystart = (com[1] - h / 2.) * com[2] / self.fy yend = (com[1] + h / 2.) * com[2] / self.fy szx = xend - xstart szy = yend - ystart sz = (szx + szy) / 2. cube = (sz + tol, sz + tol, sz + tol) return cube
Example #21
Source File: util.py From face-magnet with Apache License 2.0 | 5 votes |
def drawModel(mfeat, mode="black", parts=True): """ draw the HOG weight of an object model """ col = ["r", "g", "b"] import drawHOG lev = len(mfeat) if mfeat[0].shape[0] > mfeat[0].shape[1]: sy = 1 sx = lev else: sy = lev sx = 1 for l in range(lev): pylab.subplot(sy, sx, l + 1) if mode == "white": drawHOG9(mfeat[l]) elif mode == "black": img = drawHOG.drawHOG(mfeat[l]) pylab.axis("off") pylab.imshow(img, cmap=pylab.cm.gray, interpolation="nearest") if parts == True: for x in range(0, 2 ** l): for y in range(0, 2 ** l): boxHOG(mfeat[0].shape[1] * x, mfeat[0].shape[0] * y, mfeat[0].shape[1], mfeat[0].shape[0], col[l], 5 - l)
Example #22
Source File: epipolar.py From dfc2019 with MIT License | 5 votes |
def show_rectified_images(rimg1, rimg2): ax = pl.subplot(121) pl.imshow(rimg1, cmap=cm.gray) # Hack to get the lines span on the left image # http://stackoverflow.com/questions/6146290/plotting-a-line-over-several-graphs for i in range(1, rimg1.shape[0], int(rimg1.shape[0]/20)): pl.axhline(y=i, color='g', xmin=0, xmax=1.2, clip_on=False); pl.subplot(122) pl.imshow(rimg2, cmap=cm.gray) for i in range(1, rimg1.shape[0], int(rimg1.shape[0]/20)): pl.axhline(y=i, color='g');
Example #23
Source File: generate_figs.py From discrete_sieve with Apache License 2.0 | 5 votes |
def save_digit(z, filename, cmap=pylab.cm.gray): pylab.clf() pylab.axis('off') pylab.imshow(z.reshape((28, 28)), interpolation='nearest', cmap=cmap, vmin=-1, vmax=1) pylab.savefig('results/' + filename + '.pdf') pylab.clf()
Example #24
Source File: generate_figs.py From discrete_sieve with Apache License 2.0 | 5 votes |
def stack_digit(zs, filename): pylab.clf() fig = pylab.figure(frameon=False) fig.set_size_inches(1, 3) ax = pylab.Axes(fig, [0., 0., 1., 1.]) ax.set_axis_off() fig.add_axes(ax) ax.imshow(np.vstack(zs).reshape((-1, 28)), interpolation='nearest', cmap=pylab.cm.gray) fig.savefig('results/' + filename + '.pdf') pylab.close('all')
Example #25
Source File: plot_kl_analysis.py From SelfTarget with MIT License | 5 votes |
def plotHeatMap(data, col='KL without null', label=''): #Compute and collate medians sel_cols = [x for x in data.columns if col in x] cmp_meds = data[sel_cols].median(axis=0) samples = sortSampleNames(getUniqueSamples(sel_cols)) cell_lines = ['CHO', 'E14TG2A', 'BOB','RPE1', 'HAP1','K562','eCAS9','TREX2'] sample_idxs = [(cell_lines.index(parseSampleName(x)[0]),x) for x in getUniqueSamples(sel_cols)] sample_idxs.sort() samples = [x[1] for x in sample_idxs] N = len(samples) meds = np.zeros((N,N)) for colname in sel_cols: dir1, dir2 = getDirsFromFilename(colname.split('$')[-1]) idx1, idx2 = samples.index(dir1), samples.index(dir2) meds[idx1,idx2] = cmp_meds[colname] meds[idx2,idx1] = cmp_meds[colname] for i in range(N): print(' '.join(['%.2f' % x for x in meds[i,:]])) print( np.median(meds[:,:-4],axis=0)) #Display in Heatmap PL.figure(figsize=(5,5)) PL.imshow(meds, cmap='hot_r', vmin = 0.0, vmax = 3.0, interpolation='nearest') PL.colorbar() PL.xticks(range(N)) PL.yticks(range(N)) PL.title("Median KL") # between %d mutational profiles (for %s with >%d mutated reads)" % (col, len(data), label, MIN_READS)) ax1 = PL.gca() ax1.set_yticklabels([getSimpleName(x) for x in samples], rotation='horizontal') ax1.set_xticklabels([getSimpleName(x) for x in samples], rotation='vertical') PL.subplots_adjust(left=0.25,right=0.95,top=0.95, bottom=0.25) PL.show(block=False) saveFig('median_kl_heatmap_cell_lines')
Example #26
Source File: iris_recognition.py From GmdhPy with MIT License | 5 votes |
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(iris.target_names)) plt.xticks(tick_marks, iris.target_names, rotation=45) plt.yticks(tick_marks, iris.target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label')
Example #27
Source File: celllab_cts.py From landlab with MIT License | 5 votes |
def update_plot(self): """Plot the current node state grid.""" plt.clf() if self.gridtype == "rast": nsr = self.ca.grid.node_vector_to_raster(self.ca.node_state) plt.imshow(nsr, interpolation="None", origin="lower", cmap=self._cmap) else: self.ca.grid.hexplot(self.ca.node_state, color_map=self._cmap) plt.draw() plt.pause(0.001)
Example #28
Source File: read_dense.py From hfnet with MIT License | 5 votes |
def main(): # Read depth and normal maps corresponding to the same image. depth_map = read_array( "path/to/dense/stereo/depth_maps/image1.jpg.photometric.bin") normal_map = read_array( "path/to/dense/stereo/normal_maps/image1.jpg.photometric.bin") # Visualize the depth map. min_depth, max_depth = np.percentile(depth_map, [5, 95]) depth_map[depth_map < min_depth] = min_depth depth_map[depth_map > max_depth] = max_depth plt.imshow(depth_map) plt.show()
Example #29
Source File: eigenfaces.py From machine-learning with GNU General Public License v3.0 | 5 votes |
def plot_gallery(images, titles, h, w, n_row=3, n_col=4): """Helper function to plot a gallery of portraits""" pl.figure(figsize=(1.8 * n_col, 2.4 * n_row)) pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35) for i in range(n_row * n_col): pl.subplot(n_row, n_col, i + 1) pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray) pl.title(titles[i], size=12) pl.xticks(()) pl.yticks(()) # plot the result of the prediction on a portion of the test set
Example #30
Source File: TensorFlowInterface.py From IntroToDeepLearning with MIT License | 5 votes |
def plotFields(layer,fieldShape=None,channel=None,figOffset=1,cmap=None,padding=0.01): # Receptive Fields Summary try: W = layer.W except: W = layer wp = W.eval().transpose(); if len(np.shape(wp)) < 4: # Fully connected layer, has no shape fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape) else: # Convolutional layer already has shape features, channels, iy, ix = np.shape(wp) if channel is not None: fields = wp[:,channel,:,:] else: fields = np.reshape(wp,[features*channels,iy,ix]) perRow = int(math.floor(math.sqrt(fields.shape[0]))) perColumn = int(math.ceil(fields.shape[0]/float(perRow))) fig = mpl.figure(figOffset); mpl.clf() # Using image grid from mpl_toolkits.axes_grid1 import ImageGrid grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single') for i in range(0,np.shape(fields)[0]): im = grid[i].imshow(fields[i],cmap=cmap); grid.cbar_axes[0].colorbar(im) mpl.title('%s Receptive Fields' % layer.name) # old way # fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))]) # tiled = [] # for i in range(0,perColumn*perRow,perColumn): # tiled.append(np.hstack(fields2[i:i+perColumn])) # # tiled = np.vstack(tiled) # mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar(); mpl.figure(figOffset+1); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar()