Python scipy.ndimage.imread() Examples

The following are 30 code examples of scipy.ndimage.imread(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.ndimage , or try the search function .
Example #1
Source File: spectrum_painter.py    From spectrum_painter with MIT License 7 votes vote down vote up
def convert_image(self, filename):
        pic = img.imread(filename)
        # Set FFT size to be double the image size so that the edge of the spectrum stays clear
        # preventing some bandfilter artifacts
        self.NFFT = 2*pic.shape[1]

        # Repeat image lines until each one comes often enough to reach the desired line time
        ffts = (np.flipud(np.repeat(pic[:, :, 0], self.repetitions, axis=0) / 16.)**2.) / 256.

        # Embed image in center bins of the FFT
        fftall = np.zeros((ffts.shape[0], self.NFFT))
        startbin = int(self.NFFT/4)
        fftall[:, startbin:(startbin+pic.shape[1])] = ffts

        # Generate random phase vectors for the FFT bins, this is important to prevent high peaks in the output
        # The phases won't be visible in the spectrum
        phases = 2*np.pi*np.random.rand(*fftall.shape)
        rffts = fftall * np.exp(1j*phases)

        # Perform the FFT per image line, then concatenate them to form the final signal
        timedata = np.fft.ifft(np.fft.ifftshift(rffts, axes=1), axis=1) / np.sqrt(float(self.NFFT))
        linear = timedata.flatten()
        linear = linear / np.max(np.abs(linear))
        return linear 
Example #2
Source File: test_snapping.py    From PReMVOS with MIT License 6 votes vote down vote up
def do_seq(seq):
  files = sorted(glob.glob(preds_path + seq + "/*.pickle"))
  for f in files:
    pred_path = f
    im_path, superpixel_path, out_path = convert_path(f)
    im = imread(im_path)
    pred = pickle.load(open(pred_path))
    superpixels = loadmat(superpixel_path)["superpixels"]
    res = apply_snapping(superpixels, pred).astype("uint8") * 255
    # before = numpy.argmax(pred, axis=2)
    dir_ = "/".join(out_path.split("/")[:-1])
    mkdir_p(dir_)
    imsave(out_path, res)
    print(out_path)

    #TODO: compute iou as well

    # plt.imshow(before)
    # plt.figure()
    # plt.imshow(res)
    # plt.show() 
Example #3
Source File: generate_example_images.py    From ViolenceDetection with Apache License 2.0 6 votes vote down vote up
def save(fp, image, quality=75):
    image_jpg = compress_to_jpg(image, quality=quality)
    image_jpg_decompressed = decompress_jpg(image_jpg)

    # If the image file already exists and is (practically) identical,
    # then don't save it again to avoid polluting the repository with tons
    # of image updates.
    # Not that we have to compare here the results AFTER jpg compression
    # and then decompression. Otherwise we compare two images of which
    # image (1) has never been compressed while image (2) was compressed and
    # then decompressed.
    if os.path.isfile(fp):
        image_saved = ndimage.imread(fp, mode="RGB")
        #print("arrdiff", arrdiff(image_jpg_decompressed, image_saved))
        same_shape = (image_jpg_decompressed.shape == image_saved.shape)
        d_avg = arrdiff(image_jpg_decompressed, image_saved) if same_shape else -1
        if same_shape and d_avg <= 1.0:
            print("[INFO] Did not save image '%s', because the already saved image is basically identical (d_avg=%.8f)" % (fp, d_avg,))
            return
        else:
            print("[INFO] Saving image '%s'..." % (fp,))

    with open(fp, "w") as f:
        f.write(image_jpg) 
Example #4
Source File: prep_data.py    From recipe-summarization with MIT License 6 votes vote down vote up
def load_images(img_dims):
    """Load all images into a dictionary with filename as the key and numpy image array as the value."""
    image_list = {}
    for root, dirnames, filenames in os.walk(config.path_img):
        for filename in filenames:
            if re.search("\.(jpg|jpeg|png|bmp|tiff)$", filename):
                filepath = os.path.join(root, filename)
                try:
                    image = ndimage.imread(filepath, mode="RGB")
                except OSError:
                    print('Could not load image {}'.format(filepath))
                image_resized = misc.imresize(image, img_dims)
                if np.random.random() > 0.5:
                    # Flip horizontally with probability 50%
                    image_resized = np.fliplr(image_resized)
                image_list[filename.split('.')[0]] = image_resized
    print('Loaded {:,} images from disk'.format(len(image_list)))
    return image_list 
Example #5
Source File: imgaug.py    From ViolenceDetection with Apache License 2.0 6 votes vote down vote up
def quokka(size=None):
    """
    Returns an image of a quokka as a numpy array.

    Parameters
    ----------
    size : None or float or tuple of two ints, optional(default=None)
        Size of the output image. Input into scipy.misc.imresize.
        Usually expected to be a tuple (H, W), where H is the desired height
        and W is the width. If None, then the image will not be resized.

    Returns
    -------
    img : (H,W,3) ndarray
        The image array of dtype uint8.

    """
    img = ndimage.imread(QUOKKA_FP, mode="RGB")
    if size is not None:
        img = misc.imresize(img, size)
    return img 
Example #6
Source File: OMNIGLOTClassifier.py    From DeROL with MIT License 6 votes vote down vote up
def LoadImgAsPoints(self, fn):
        if(fn in self.image_points_cache):
            return self.image_points_cache[fn]

        I = imread(fn, flatten=True)
        I = np.asarray(imresize(I, size=self.image_size), dtype=np.float32)
        I[I<255] = 0
        I = np.array(I, dtype=bool)
        I = np.logical_not(I)
        (row, col) = I.nonzero()
        D = np.array([row, col])
        D = np.transpose(D)
        D = D.astype(float)
        n = D.shape[0]
        mean = np.mean(D, axis=0)
        for i in range(n):
            D[i, :] = D[i, :] - mean

        self.image_points_cache[fn] = D
        return D 
Example #7
Source File: eval_youtube_nonfull.py    From PReMVOS with MIT License 6 votes vote down vote up
def eval_sequence(gt_folder, recog_folder):
  seq = gt_folder.split("/")[-7] + "_" + gt_folder.split("/")[-5]
  gt_files = sorted(glob.glob(gt_folder + "/*.jpg"))

  #checks
  #if not gt_files[0].endswith("00001.jpg"):
  #  print "does not start with 00001.jpg!", gt_files[0]
  #indices = [int(f.split("/")[-1][:-4]) for f in gt_files]
  #if not (numpy.diff(indices) == 10).all():
  #  print "no spacing of 10:", gt_files

  gt_files = gt_files[1:]
  recog_folder_seq = recog_folder + seq + "/"
  print(recog_folder_seq, end=' ')
  recog_files = [gt_file.replace(gt_folder, recog_folder_seq).replace(".jpg", ".png") for gt_file in gt_files]

  ious = []
  for gt_file, recog_file in zip(gt_files, recog_files):
    gt = imread(gt_file) / 255
    recog = imread(recog_file) / 255
    iou = compute_iou_for_binary_segmentation(recog, gt)
    ious.append(iou)
  return numpy.mean(ious) 
Example #8
Source File: eval_youtube.py    From PReMVOS with MIT License 6 votes vote down vote up
def eval_sequence(gt_folder, recog_folder):
  seq = gt_folder.split("/")[-7] + "_" + gt_folder.split("/")[-5]
  gt_files = sorted(glob.glob(gt_folder + "/*.jpg"))

  gt_files = gt_files[1:]
  recog_folder_seq = recog_folder + seq + "/"
  
  #for full dataset
  recog_files = []
  for gt_file in gt_files:
    idx = int(gt_file.split("/")[-1].replace(".jpg", ""))
    ending = "frame%04d.png" % idx
    recog_file = recog_folder_seq + ending
    recog_files.append(recog_file)

  ious = []
  for gt_file, recog_file in zip(gt_files, recog_files):
    gt = imread(gt_file) / 255
    recog = imread(recog_file) / 255
    iou = compute_iou_for_binary_segmentation(recog, gt)
    ious.append(iou)
  return numpy.mean(ious) 
Example #9
Source File: combine_single_object_predictions_crf.py    From PReMVOS with MIT License 6 votes vote down vote up
def run_multiclass_crf(seq, fn, posteriors, softmax_scale, sxy1, compat1, sxy2, compat2, srgb):
  im_fn = DAVIS2017_DIR + "JPEGImages/480p/" + seq + "/" + fn.replace(".pickle", ".jpg")
  im = imread(im_fn)
  nlabels = posteriors.shape[-1]

  im = numpy.ascontiguousarray(im)
  pred = numpy.ascontiguousarray(posteriors.swapaxes(0, 2).swapaxes(1, 2))

  d = dcrf.DenseCRF2D(im.shape[1], im.shape[0], nlabels)  # width, height, nlabels
  unaries = unary_from_softmax(pred, scale=softmax_scale)
  d.setUnaryEnergy(unaries)

  d.addPairwiseGaussian(sxy=sxy1, compat=compat1)
  d.addPairwiseBilateral(sxy=sxy2, srgb=srgb, rgbim=im, compat=compat2)
  processed = d.inference(12)
  res = numpy.argmax(processed, axis=0).reshape(im.shape[:2])
  return res 
Example #10
Source File: data_utils.py    From LSH_Memory with Apache License 2.0 6 votes vote down vote up
def crawl_directory(directory, augment_with_rotations=False, first_label=0):
  """Crawls data directory and returns stuff."""
  label_idx = first_label
  images = []
  labels = []
  info = []

  # traverse root directory
  for root, _, files in os.walk(directory):
    logging.info('Reading files from %s', root)

    for file_name in files:
      full_file_name = os.path.join(root, file_name)
      img = imread(full_file_name, flatten=True)
      for idx, angle in enumerate([0, 90, 180, 270]):
        if not augment_with_rotations and idx > 0:
          break

        images.append(imrotate(img, angle))
        labels.append(label_idx + idx)
        info.append(full_file_name)

    if len(files) == 20:
      label_idx += 4 if augment_with_rotations else 1
  return images, labels, info 
Example #11
Source File: crf_youtube.py    From PReMVOS with MIT License 6 votes vote down vote up
def do_seq(seq, model, save=True):
  preds_path = preds_path_prefix + model + "/valid/"
  files = sorted(glob.glob(preds_path + seq + "/*.pickle"))
  for f in files:
    pred_path = f
    im_path, out_path = convert_path(f)
    pred = pickle.load(open(pred_path))
    im = imread(im_path)
    res = apply_crf(im, pred).astype("uint8") * 255
    # before = numpy.argmax(pred, axis=2)
    if save:
      dir_ = "/".join(out_path.split("/")[:-1])
      mkdir_p(dir_)
      imsave(out_path, res)

    print(out_path) 
Example #12
Source File: multi_mnist.py    From tf-attend-infer-repeat with MIT License 6 votes vote down vote up
def read_image(path, max_intensity):
    image = nd.imread(path, mode="L")
    image = np.asarray(image, dtype=np.float32) / 255.0
    img_min, img_max = image.min(), image.max()

    if img_min != img_max:
        if img_min > 0.0:
            image -= img_min
        if img_max > 0.0:
            image /= img_max
        if max_intensity < 1.0:
            image *= max_intensity
    else:
        if img_max > max_intensity:
            image = np.ones_like(image) * max_intensity

    return image 
Example #13
Source File: dataset.py    From Unet_pytorch with MIT License 6 votes vote down vote up
def __getitem__(self, idx):
    if self.train:
      img_path, gt_path = self.train_set_path[idx]

      img = imread(img_path)
      img = img[0:self.nRow, 0:self.nCol]
      img = np.atleast_3d(img).transpose(2, 0, 1).astype(np.float32)
      img = (img - img.min()) / (img.max() - img.min())
      img = torch.from_numpy(img).float()

      gt = imread(gt_path)[0:self.nRow, 0:self.nCol]
      gt = np.atleast_3d(gt).transpose(2, 0, 1)
      gt = gt / 255.0
      gt = torch.from_numpy(gt).float()

      return img, gt 
Example #14
Source File: load_data.py    From kaggle-galaxies with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def load_images_from_jpg(subset="train", downsample_factor=None, normalise=True, from_ram=False):
    if from_ram:
        pattern = "/dev/shm/images_%s_rev1/*.jpg"
    else:
        pattern = "data/raw/images_%s_rev1/*.jpg"
    paths = glob.glob(pattern % subset)
    paths.sort() # alphabetic ordering is used everywhere.
    for path in paths:
        # img = ndimage.imread(path)
        img = skimage.io.imread(path)
        if normalise:
            img = img.astype('float32') / 255.0 # normalise and convert to float

        if downsample_factor is None:
            yield img
        else:
            yield img[::downsample_factor, ::downsample_factor] 
Example #15
Source File: generate_encoded_submission.py    From kaggle-carvana-2017 with MIT License 6 votes vote down vote up
def encoder(in_queue, threshold, generated_masks, time_counts):
    while True:
        img_name, mask_img_path = in_queue.get()

        if img_name is None:
            break

        t0 = time.clock()
        mask_img = ndimage.imread(mask_img_path, mode='L')
        mask_img[mask_img <= threshold] = 0
        mask_img[mask_img > threshold] = 1
        time_counts['time_read'].append(time.clock() - t0)

        t0 = time.clock()
        rle = rle_encode(mask_img)
        time_counts['time_rle'].append(time.clock() - t0)

        t0 = time.clock()
        rle_string = rle_to_string(rle)
        time_counts['time_stringify'].append(time.clock() - t0)

        generated_masks.append((img_name, rle_string)) 
Example #16
Source File: test_io.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_imread():
    lp = os.path.join(os.path.dirname(__file__), 'dots.png')
    with suppress_warnings() as sup:
        # PIL causes a Py3k ResourceWarning
        sup.filter(message="unclosed file")
        sup.filter(DeprecationWarning)
        img = ndi.imread(lp, mode="RGB")
    assert_array_equal(img.shape, (300, 420, 3))

    with suppress_warnings() as sup:
        # PIL causes a Py3k ResourceWarning
        sup.filter(message="unclosed file")
        sup.filter(DeprecationWarning)
        img = ndi.imread(lp, flatten=True)
    assert_array_equal(img.shape, (300, 420))

    with open(lp, 'rb') as fobj:
        with suppress_warnings() as sup:
            sup.filter(DeprecationWarning)
            img = ndi.imread(fobj, mode="RGB")
        assert_array_equal(img.shape, (300, 420, 3)) 
Example #17
Source File: download_data.py    From arc-pytorch with MIT License 6 votes vote down vote up
def omniglot_folder_to_NDarray(path_im):
    alphbts = os.listdir(path_im)
    ALL_IMGS = []

    for alphbt in alphbts:
        chars = os.listdir(os.path.join(path_im, alphbt))
        for char in chars:
            img_filenames = os.listdir(os.path.join(path_im, alphbt, char))
            char_imgs = []
            for img_fn in img_filenames:
                fn = os.path.join(path_im, alphbt, char, img_fn)
                I = imread(fn)
                I = np.invert(I)
                char_imgs.append(I)
            ALL_IMGS.append(char_imgs)

    return np.array(ALL_IMGS) 
Example #18
Source File: 1_notmnist.py    From udacity-deep-learning with GNU General Public License v3.0 5 votes vote down vote up
def plot_samples(data_folders, sample_size, title=None):
    fig = plt.figure()
    if title: fig.suptitle(title, fontsize=16, fontweight='bold')
    for folder in data_folders:
        image_files = os.listdir(folder)
        image_sample = random.sample(image_files, sample_size)
        for image in image_sample:
            image_file = os.path.join(folder, image)
            ax = fig.add_subplot(len(data_folders), sample_size, sample_size * data_folders.index(folder) +
                                 image_sample.index(image) + 1)
            image = mpimg.imread(image_file)
            ax.imshow(image)
            ax.set_axis_off()

    plt.show() 
Example #19
Source File: data_utils.py    From hands-detection with MIT License 5 votes vote down vote up
def crawl_directory(directory, augment_with_rotations=False,
                    first_label=0):
  """Crawls data directory and returns stuff."""
  label_idx = first_label
  images = []
  labels = []
  info = []

  # traverse root directory
  for root, _, files in os.walk(directory):
    logging.info('Reading files from %s', root)
    fileflag = 0
    for file_name in files:
      full_file_name = os.path.join(root, file_name)
      img = imread(full_file_name, flatten=True)
      for i, angle in enumerate([0, 90, 180, 270]):
        if not augment_with_rotations and i > 0:
          break

        images.append(imrotate(img, angle))
        labels.append(label_idx + i)
        info.append(full_file_name)

      fileflag = 1

    if fileflag:
      label_idx += 4 if augment_with_rotations else 1

  return images, labels, info 
Example #20
Source File: data_utils.py    From object_detection_kitti with Apache License 2.0 5 votes vote down vote up
def crawl_directory(directory, augment_with_rotations=False,
                    first_label=0):
  """Crawls data directory and returns stuff."""
  label_idx = first_label
  images = []
  labels = []
  info = []

  # traverse root directory
  for root, _, files in os.walk(directory):
    logging.info('Reading files from %s', root)
    fileflag = 0
    for file_name in files:
      full_file_name = os.path.join(root, file_name)
      img = imread(full_file_name, flatten=True)
      for i, angle in enumerate([0, 90, 180, 270]):
        if not augment_with_rotations and i > 0:
          break

        images.append(imrotate(img, angle))
        labels.append(label_idx + i)
        info.append(full_file_name)

      fileflag = 1

    if fileflag:
      label_idx += 4 if augment_with_rotations else 1

  return images, labels, info 
Example #21
Source File: data_utils.py    From HumanRecognition with MIT License 5 votes vote down vote up
def crawl_directory(directory, augment_with_rotations=False,
                    first_label=0):
  """Crawls data directory and returns stuff."""
  label_idx = first_label
  images = []
  labels = []
  info = []

  # traverse root directory
  for root, _, files in os.walk(directory):
    logging.info('Reading files from %s', root)
    fileflag = 0
    for file_name in files:
      full_file_name = os.path.join(root, file_name)
      img = imread(full_file_name, flatten=True)
      for i, angle in enumerate([0, 90, 180, 270]):
        if not augment_with_rotations and i > 0:
          break

        images.append(imrotate(img, angle))
        labels.append(label_idx + i)
        info.append(full_file_name)

      fileflag = 1

    if fileflag:
      label_idx += 4 if augment_with_rotations else 1

  return images, labels, info 
Example #22
Source File: ingest_utils.py    From neon with Apache License 2.0 5 votes vote down vote up
def resize_image(image, img_save_path, img_reshape):
    im = imread(image)
    if img_reshape is not None:
        im = imresize(im, img_reshape)
    imsave(img_save_path, im)
    return img_save_path 
Example #23
Source File: data_utils.py    From Machine-Learning-with-TensorFlow-1.x with MIT License 5 votes vote down vote up
def load_class(folder, image_size, pixel_depth):
    image_files = os.listdir(folder)
    num_of_images = len(image_files)
    dataset = np.ndarray(shape=(num_of_images, image_size, image_size),
                         dtype=np.float32)
    image_index = 0
    print('Started loading images from: ' + folder)
    for index, image in enumerate(image_files):

        sys.stdout.write('Loading image %d of %d\r' % (index + 1, num_of_images))
        sys.stdout.flush()

        image_file = os.path.join(folder, image)

        try:
            image_data = (ndimage.imread(image_file).astype(float) -
                          pixel_depth / 2) / pixel_depth
            if image_data.shape != (image_size, image_size):
                raise Exception('Unexpected image shape: %s' % str(image_data.shape))
            dataset[image_index, :, :] = image_data
            image_index += 1
        except IOError as e:
            print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
    print('Finished loading data from: ' + folder)

    return dataset[0:image_index, :, :] 
Example #24
Source File: 1_notmnist.py    From udacity-deep-learning with GNU General Public License v3.0 5 votes vote down vote up
def load_letter(folder, min_num_images):
    """Load the data for a single letter label."""
    image_files = os.listdir(folder)
    dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
                         dtype=np.float32)
    print(folder)
    num_images = 0
    for image in image_files:
        image_file = os.path.join(folder, image)
        try:
            image_data = (ndimage.imread(image_file).astype(float) -
                          pixel_depth / 2) / pixel_depth
            if image_data.shape != (image_size, image_size):
                raise Exception('Unexpected image shape: %s' % str(image_data.shape))
            dataset[num_images, :, :] = image_data
            num_images = num_images + 1
        except IOError as e:
            print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')

    dataset = dataset[0:num_images, :, :]
    if num_images < min_num_images:
        raise Exception('Many fewer images than expected: %d < %d' %
                        (num_images, min_num_images))

    print('Full dataset tensor:', dataset.shape)
    print('Mean:', np.mean(dataset))
    print('Standard deviation:', np.std(dataset))
    return dataset 
Example #25
Source File: videos.py    From LipNet with MIT License 5 votes vote down vote up
def from_frames(self, path):
        frames_path = sorted([os.path.join(path, x) for x in os.listdir(path)])
        frames = [ndimage.imread(frame_path) for frame_path in frames_path]
        self.handle_type(frames)
        return self 
Example #26
Source File: process_data.py    From GPPVAE with Apache License 2.0 5 votes vote down vote up
def import_data(size=128):

    files = []
    orients = ["00F", "30L", "30R", "45L", "45R", "60L", "60R", "90L", "90R"]
    for orient in orients:
        _files = glob.glob(os.path.join(data_dir, "*/*_%s.jpg" % orient))
        files = files + _files
    files = sp.sort(files)

    D1id = []
    D2id = []
    Did = []
    Rid = []
    Y = sp.zeros([len(files), size, size, 3], dtype=sp.uint8)
    for _i, _file in enumerate(files):
        y = imread(_file)
        y = imresize(y, size=[size, size], interp="bilinear")
        Y[_i] = y
        fn = _file.split(".jpg")[0]
        fn = fn.split("/")[-1]
        did1, did2, rid = fn.split("_")
        Did.append(did1 + "_" + did2)
        Rid.append(rid)
    Did = sp.array(Did, dtype="|S100")
    Rid = sp.array(Rid, dtype="|S100")

    RV = {"Y": Y, "Did": Did, "Rid": Rid}
    return RV 
Example #27
Source File: main.py    From DeeplearningAI_AndrewNg with MIT License 5 votes vote down vote up
def predict_image(self, image_path):
        image = np.array(ndimage.imread(image_path, flatten=False))
        my_image = scipy.misc.imresize(image, size=(64, 64)).reshape((1, 64 * 64 * 3))
        my_image = my_image/255.0
        self.load_weight()

        Y_prediction = self.__sigmoid(np.dot(self.w.T, my_image.T) + self.b)

        return Y_prediction 
Example #28
Source File: main.py    From DeeplearningAI_AndrewNg with MIT License 5 votes vote down vote up
def predict_standard(self, image_path):
        print("==============在测试集的准确率=================")
        predict(self.test_x, self.test_y, self.parameters)
        print("==============预测一张图片=================")
        image = np.array(ndimage.imread(image_path, flatten=False))
        my_image = scipy.misc.imresize(image, size=(64, 64)).reshape((64 * 64 * 3, 1))
        my_predicted_image = predict(X=my_image, y=[1], parameters=self.parameters)
        print("这%s一只猫" % "是" if my_predicted_image == 1 else "不是")
        plt.imshow(image) 
Example #29
Source File: main.py    From DeeplearningAI_AndrewNg with MIT License 5 votes vote down vote up
def predict_with_keras(self, image_path):
        image = np.array(ndimage.imread(image_path, flatten=False))
        image_flatten = scipy.misc.imresize(image, size=(64, 64)).reshape((64*64*3, 1))
        result = np.squeeze(self.model.predict(image_flatten.T))
        print("这%s一只猫" % "是" if result==1 else "不是") 
Example #30
Source File: notmnist_prepare_data.py    From deep-learning-samples with The Unlicense 5 votes vote down vote up
def load_letter(folder, min_num_images):
  """Load the data for a single letter label."""
  image_files = os.listdir(folder)
  dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
                         dtype=np.float32)
  print(folder)
  num_images = 0
  for image in image_files:
    image_file = os.path.join(folder, image)
    try:
      image_data = (ndimage.imread(image_file).astype(float) -
                    pixel_depth / 2) / pixel_depth
      if image_data.shape != (image_size, image_size):
        raise Exception('Unexpected image shape: %s' % str(image_data.shape))
      dataset[num_images, :, :] = image_data
      num_images = num_images + 1
    except IOError as e:
      print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')

  dataset = dataset[0:num_images, :, :]
  if num_images < min_num_images:
    raise Exception('Many fewer images than expected: %d < %d' %
                    (num_images, min_num_images))

  print('Full dataset tensor:', dataset.shape)
  print('Mean:', np.mean(dataset))
  print('Standard deviation:', np.std(dataset))
  return dataset