Python skimage.data.camera() Examples
The following are 5
code examples of skimage.data.camera().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
skimage.data
, or try the search function
.
Example #1
Source File: MR.py From mr_saliency with GNU General Public License v2.0 | 5 votes |
def MR_showsuperpixel(self,img=None): if img == None: img = cv2.cvtColor(camera(),cv2.COLOR_RGB2BGR) img = self._MR_saliency__MR_readimg(img) labels = self._MR_saliency__MR_superpixel(img) plt.axis('off') plt.imshow(mark_boundaries(img,labels)) plt.show()
Example #2
Source File: test_graphs.py From pygsp with BSD 3-Clause "New" or "Revised" License | 5 votes |
def setUpClass(cls): cls._G = graphs.Logo() cls._G.compute_fourier_basis() cls._rs = np.random.RandomState(42) cls._signal = cls._rs.uniform(size=cls._G.N) cls._img = img_as_float(data.camera()[::16, ::16])
Example #3
Source File: test_plotting.py From pygsp with BSD 3-Clause "New" or "Revised" License | 5 votes |
def setUpClass(cls): cls._img = img_as_float(data.camera()[::16, ::16])
Example #4
Source File: profile_torchkbnufft.py From torchkbnufft with MIT License | 4 votes |
def run_all_profiles(): print('running profiler...') spokelength = 512 nspokes = 405 ncoil = 15 print('problem size (radial trajectory, 2-factor oversampling):') print('number of coils: {}'.format(ncoil)) print('number of spokes: {}'.format(nspokes)) print('spokelength: {}'.format(spokelength)) # create an example to run on image = np.array(Image.fromarray(camera()).resize((256, 256))) image = image.astype(np.complex) im_size = image.shape image = np.stack((np.real(image), np.imag(image))) image = torch.tensor(image).unsqueeze(0).unsqueeze(0) # create k-space trajectory ga = np.deg2rad(180 / ((1 + np.sqrt(5)) / 2)) kx = np.zeros(shape=(spokelength, nspokes)) ky = np.zeros(shape=(spokelength, nspokes)) ky[:, 0] = np.linspace(-np.pi, np.pi, spokelength) for i in range(1, nspokes): kx[:, i] = np.cos(ga) * kx[:, i - 1] - np.sin(ga) * ky[:, i - 1] ky[:, i] = np.sin(ga) * kx[:, i - 1] + np.cos(ga) * ky[:, i - 1] ky = np.transpose(ky) kx = np.transpose(kx) ktraj = np.stack((ky.flatten(), kx.flatten()), axis=0) ktraj = torch.tensor(ktraj).unsqueeze(0) smap_sz = (1, ncoil, 2) + im_size smap = torch.ones(*smap_sz) profile_torchkbnufft(image, ktraj, smap, im_size, device=torch.device( 'cpu'), sparse_mats_flag=False) profile_torchkbnufft(image, ktraj, smap, im_size, device=torch.device( 'cpu'), sparse_mats_flag=True) profile_torchkbnufft(image, ktraj, smap, im_size, device=torch.device( 'cpu'), use_toep=True) profile_torchkbnufft(image, ktraj, smap, im_size, device=torch.device( 'cuda'), sparse_mats_flag=False) profile_torchkbnufft(image, ktraj, smap, im_size, device=torch.device( 'cuda'), sparse_mats_flag=True) profile_torchkbnufft(image, ktraj, smap, im_size, device=torch.device( 'cuda'), use_toep=True)
Example #5
Source File: MR.py From mr_saliency with GNU General Public License v2.0 | 4 votes |
def MR_boundary_saliency(self,img=None): if img == None: img = cv2.cvtColor(camera(),cv2.COLOR_RGB2BGR) lab_img = self._MR_saliency__MR_readimg(img) labels = self._MR_saliency__MR_superpixel(lab_img) up,right,low,left = self._MR_saliency__MR_boundary_indictor(labels) aff = self._MR_saliency__MR_affinity_matrix(lab_img,labels) up_sal = 1- self._MR_saliency__MR_saliency(aff,up) up_img = self._MR_saliency__MR_fill_superpixel_with_saliency(labels,up_sal) up_img = up_img.astype(np.uint8) right_sal = 1-self._MR_saliency__MR_saliency(aff,right) right_img = self._MR_saliency__MR_fill_superpixel_with_saliency(labels,right_sal) right_img = right_img.astype(np.uint8) low_sal = 1-self._MR_saliency__MR_saliency(aff,low) low_img = self._MR_saliency__MR_fill_superpixel_with_saliency(labels,low_sal) low_img = low_img.astype(np.uint8) left_sal = 1-self._MR_saliency__MR_saliency(aff,left) left_img = self._MR_saliency__MR_fill_superpixel_with_saliency(labels,left_sal) left_img = left_img.astype(np.uint8) plt.subplot(3,2,1) plt.title('orginal') plt.axis('off') plt.imshow(cv2.cvtColor(img,cv2.COLOR_BGR2RGB)) plt.subplot(3,2,2) plt.title('up') plt.axis('off') plt.imshow(up_img,'gray') plt.subplot(3,2,3) plt.title('right') plt.axis('off') plt.imshow(right_img,'gray') plt.subplot(3,2,4) plt.title('low') plt.axis('off') plt.imshow(low_img,'gray') plt.subplot(3,2,5) plt.title('left') plt.axis('off') plt.imshow(left_img,'gray') plt.subplot(3,2,6) plt.title('integrated') plt.axis('off') saliency_map = MR_debuger().saliency(img).astype(np.uint8) plt.imshow( saliency_map,'gray') plt.show()