Python sklearn.feature_extraction.image.extract_patches_2d() Examples

The following are 7 code examples of sklearn.feature_extraction.image.extract_patches_2d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sklearn.feature_extraction.image , or try the search function .
Example #1
Source File: patch_preprocessor.py    From aiexamples with Apache License 2.0 5 votes vote down vote up
def preprocess(self, image):
    # extract a random crop from the image with the target width and height
    return extract_patches_2d(image, (self.width, self.height), max_patches=1)[0] 
Example #2
Source File: DataLoader.py    From rank-ordered-autoencoder with GNU General Public License v3.0 5 votes vote down vote up
def iterate_cifar(shapeInput, batch_size, shuffle=False, train=True):
    # iterator over patches of the cifar10 data set.
    files = []
    if train:
        for j in range(1, 6):
            files.append('data_batch_'+str(j))
    else:
        for j in range(1, 6):
            files.append('test_batch')
    data_idxs = np.random.permutation(len(files))
    data = []
    labels = []
    for j in range(len(files)):
        data_idx = j
        if shuffle:
            data_idx = data_idxs[j]
        file = files[data_idx]
        dict = unpickle('C:\\Paul\\cifar-10-batches-py\\'+file)
        ls = dict['labels']
        idxs = np.random.permutation(len(dict['data']))
        for i in range(len(dict['data'])):
            if shuffle:
                idx = idxs[i]
            else:
                idx = i
            stackedArray = np.dstack((dict['data'][idx][0:1024].reshape(32, 32),
                                      dict['data'][idx][1024:1024 * 2].reshape(32,32),
                                      dict['data'][idx][1024 * 2:1024 * 3].reshape(32, 32)))
            patches = image.extract_patches_2d(stackedArray, (shapeInput[0], shapeInput[1]), max_patches=1)
            #max = patches.max()+1.e-6
            patches = patches.astype(np.float32) / 256.0
            data.append(patches)
            labels.append(ls[idx])
            if len(data)>=batch_size:
                array = np.asarray(data).reshape(-1, shapeInput[0]*shapeInput[1]*3)
                data = []
                labels = []
                #print(len(dict['data'])*len(files)*patches.shape[0])
                yield array 
Example #3
Source File: Segmentation_Models.py    From brain_segmentation with MIT License 5 votes vote down vote up
def predict_image(self, test_img, show=False):
        '''
        predicts classes of input image
        INPUT   (1) str 'test_image': filepath to image to predict on
                (2) bool 'show': True to show the results of prediction, False to return prediction
        OUTPUT  (1) if show == False: array of predicted pixel classes for the center 208 x 208 pixels
                (2) if show == True: displays segmentation results
        '''
        imgs = io.imread(test_img).astype('float').reshape(5,240,240)
        plist = []

        # create patches from an entire slice
        for img in imgs[:-1]:
            if np.max(img) != 0:
                img /= np.max(img)
            p = extract_patches_2d(img, (33,33))
            plist.append(p)
        patches = np.array(zip(np.array(plist[0]), np.array(plist[1]), np.array(plist[2]), np.array(plist[3])))

        # predict classes of each pixel based on model
        full_pred = self.model_comp.predict_classes(patches)
        fp1 = full_pred.reshape(208,208)
        if show:
            io.imshow(fp1)
            plt.show
        else:
            return fp1 
Example #4
Source File: patch_library.py    From brain_segmentation with MIT License 5 votes vote down vote up
def slice_to_patches(self, filename):
        '''
        Converts an image to a list of patches with a stride length of 1. Use as input for image prediction.
        INPUT: str 'filename': path to image to be converted to patches
        OUTPUT: list of patched version of imput image.
        '''
        slices = io.imread(filename).astype('float').reshape(5,240,240)[:-1]
        plist=[]
        for slice in slices:
            if np.max(img) != 0:
                img /= np.max(img)
            p = extract_patches_2d(img, (h,w))
            plist.append(p)
        return np.array(zip(np.array(plist[0]), np.array(plist[1]), np.array(plist[2]), np.array(plist[3]))) 
Example #5
Source File: denoising.py    From sparselandtools with MIT License 4 votes vote down vote up
def denoise(self, image, sigma=3, multiplier=10, n_iter=15, patch_size=8, noise_gain=1.15):
        # promote values to super
        self.noise_gain = noise_gain
        self.sigma = sigma

        # error handling
        if image.shape[0] != image.shape[1]:
            raise ValueError("Image must be square!")

        # set initial values
        self.image = image
        self.sigma = sigma
        self.multiplier = multiplier
        self.n_iter = n_iter
        self.patch_size = patch_size

        # compute further values
        self.image_size = image.shape[0]

        # prepare K-SVD
        patches = extract_patches_2d(self.image, (self.patch_size, self.patch_size))
        Y = np.array([p.reshape(self.patch_size**2) for p in patches]).T

        # iterate K-SVD
        for itr in range(self.n_iter):
            self.sparse_coding(Y)
            self.dictionary_update(Y)

        # reconstruct image
        # this was translated from the Matlab code in Michael Elads book
        # cf. Elad, M. (2010). Sparse and redundant representations:
        # from theory to applications in signal and image processing. New York: Springer.
        out = np.zeros(image.shape)
        weight = np.zeros(image.shape)
        logging.info("reconstructing")
        i = j = 0
        for k in range((self.image_size - self.patch_size + 1) ** 2):
            patch = np.reshape(np.matmul(self.dictionary.matrix, self.alphas[:, k]), (self.patch_size, self.patch_size))
            out[j:j + self.patch_size, i:i + self.patch_size] += patch
            weight[j:j + self.patch_size, i:i + self.patch_size] += 1
            if i < self.image_size - self.patch_size:
                i += 1
            else:
                i = 0
                j += 1
        out = np.divide(out + self.multiplier * self.image, weight + self.multiplier)
        return out, self.dictionary, self.alphas 
Example #6
Source File: patch_matcher.py    From image-analogies with MIT License 4 votes vote down vote up
def make_patch_grid(x, patch_size, patch_stride=1):
    '''x shape: (num_channels, rows, cols)'''
    x = x.transpose(2, 1, 0)
    patches = extract_patches_2d(x, (patch_size, patch_size))
    x_w, x_h, x_c  = x.shape
    num_rows, num_cols = _calc_patch_grid_dims(x.shape, patch_size, patch_stride)
    patches = patches.reshape((num_rows, num_cols, patch_size, patch_size, x_c))
    patches = patches.transpose((0, 1, 4, 2, 3))
    #patches = np.rollaxis(patches, -1, 2)
    return patches 
Example #7
Source File: data.py    From ALISTA with MIT License 4 votes vote down vote up
def dir2tfrecords_cs (data_dir, out_path, Phi, patch_size, patches_per_image, suffix):
    Phi = Phi.astype (np.float32)
    if isinstance (patch_size, int):
        patch_size = (16,16)

    if not out_path.endswith(".tfrecords"):
        out_path += ".tfrecords"
    writer = tf.python_io.TFRecordWriter (out_path)
    for fn in tqdm (glob.glob (os.path.join (data_dir, "*." + suffix))) :
        """Read images (and convert to grayscale)."""
        im = Image.open (fn)
        if im.mode == 'RGB':
            im = im.convert ('L')
        im = np.asarray (im)

        """Extract patches."""
        patches = extract_patches_2d (im, patch_size)
        perm = np.random.permutation (len (patches))
        patches = patches [perm [:patches_per_image]]

        """Vectorize patches."""
        fs = patches.reshape (len (patches), -1)

        """Demean and normalize."""
        fs = fs -  np.mean (fs, axis=1, keepdims=True)
        fs = (fs / 255.0).astype (np.float32)

        """Measure the signal using sensing matrix `Phi`."""
        ys = np.transpose (Phi.dot (np.transpose (fs)))

        """Write singals and measurements to tfrecords file."""
        for y, f in zip (ys, fs):
            yraw = y.tostring ()
            fraw = f.tostring ()
            example = tf.train.Example (features=tf.train.Features (
                feature={
                    'y': _bytes_feature (yraw),
                    'f': _bytes_feature (fraw)
                }
            ))

            writer.write (example.SerializeToString ())

    writer.close ()