Python keras.preprocessing.image.ImageDataGenerator() Examples

The following are 30 code examples of keras.preprocessing.image.ImageDataGenerator(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.preprocessing.image , or try the search function .
Example #1
Source File: auto_dataset.py    From aetros-cli with MIT License 6 votes vote down vote up
def get_image_data_augmentor_from_dataset(dataset):
    from keras.preprocessing.image import ImageDataGenerator
    dataset_config = dataset['config']

    augShearRange = float(get_option(dataset_config, 'augShearRange', 0.1))
    augZoomRange = float(get_option(dataset_config, 'augZoomRange', 0.1))
    augHorizontalFlip = bool(get_option(dataset_config, 'augHorizontalFlip', False))
    augVerticalFlip = bool(get_option(dataset_config, 'augVerticalFlip', False))
    augRotationRange = float(get_option(dataset_config, 'augRotationRange', 0.2))

    return ImageDataGenerator(
        rotation_range=augRotationRange,
        shear_range=augShearRange,
        zoom_range=augZoomRange,
        horizontal_flip=augHorizontalFlip,
        vertical_flip=augVerticalFlip
    ) 
Example #2
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_image_data_generator_invalid_data(self):
        generator = image.ImageDataGenerator(
            featurewise_center=True,
            samplewise_center=True,
            featurewise_std_normalization=True,
            samplewise_std_normalization=True,
            zca_whitening=True,
            data_format='channels_last')
        # Test fit with invalid data
        with pytest.raises(ValueError):
            x = np.random.random((3, 10, 10))
            generator.fit(x)

        # Test flow with invalid data
        with pytest.raises(ValueError):
            x = np.random.random((32, 10, 10))
            generator.flow(np.arange(x.shape[0])) 
Example #3
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_image_data_generator_with_validation_split(self):
        for test_images in self.all_test_images:
            img_list = []
            for im in test_images:
                img_list.append(image.img_to_array(im)[None, ...])

            images = np.vstack(img_list)
            generator = image.ImageDataGenerator(validation_split=0.5)
            seq = generator.flow(images, np.arange(images.shape[0]),
                                 shuffle=False, batch_size=3,
                                 subset='validation')
            x, y = seq[0]
            assert list(y) == [0, 1, 2]
            seq = generator.flow(images, np.arange(images.shape[0]),
                                 shuffle=False, batch_size=3,
                                 subset='training')
            x2, y2 = seq[0]
            assert list(y2) == [4, 5, 6]

            with pytest.raises(ValueError):
                generator.flow(images, np.arange(images.shape[0]),
                               shuffle=False, batch_size=3,
                               subset='foo') 
Example #4
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_image_data_generator_with_validation_split(self):
        for test_images in self.all_test_images:
            img_list = []
            for im in test_images:
                img_list.append(image.img_to_array(im)[None, ...])

            images = np.vstack(img_list)
            generator = image.ImageDataGenerator(validation_split=0.5)
            seq = generator.flow(images, np.arange(images.shape[0]),
                                 shuffle=False, batch_size=3,
                                 subset='validation')
            x, y = seq[0]
            assert list(y) == [0, 1, 2]
            seq = generator.flow(images, np.arange(images.shape[0]),
                                 shuffle=False, batch_size=3,
                                 subset='training')
            x2, y2 = seq[0]
            assert list(y2) == [4, 5, 6]

            with pytest.raises(ValueError):
                generator.flow(images, np.arange(images.shape[0]),
                               shuffle=False, batch_size=3,
                               subset='foo') 
Example #5
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_image_data_generator_invalid_data(self):
        generator = image.ImageDataGenerator(
            featurewise_center=True,
            samplewise_center=True,
            featurewise_std_normalization=True,
            samplewise_std_normalization=True,
            zca_whitening=True,
            data_format='channels_last')
        # Test fit with invalid data
        with pytest.raises(ValueError):
            x = np.random.random((3, 10, 10))
            generator.fit(x)

        # Test flow with invalid data
        with pytest.raises(ValueError):
            x = np.random.random((32, 10, 10))
            generator.flow(np.arange(x.shape[0])) 
Example #6
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_image_data_generator_with_validation_split(self):
        for test_images in self.all_test_images:
            img_list = []
            for im in test_images:
                img_list.append(image.img_to_array(im)[None, ...])

            images = np.vstack(img_list)
            generator = image.ImageDataGenerator(validation_split=0.5)
            seq = generator.flow(images, np.arange(images.shape[0]),
                                 shuffle=False, batch_size=3,
                                 subset='validation')
            x, y = seq[0]
            assert list(y) == [0, 1, 2]
            seq = generator.flow(images, np.arange(images.shape[0]),
                                 shuffle=False, batch_size=3,
                                 subset='training')
            x2, y2 = seq[0]
            assert list(y2) == [4, 5, 6]

            with pytest.raises(ValueError):
                generator.flow(images, np.arange(images.shape[0]),
                               shuffle=False, batch_size=3,
                               subset='foo') 
Example #7
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_image_data_generator_invalid_data(self):
        generator = image.ImageDataGenerator(
            featurewise_center=True,
            samplewise_center=True,
            featurewise_std_normalization=True,
            samplewise_std_normalization=True,
            zca_whitening=True,
            data_format='channels_last')
        # Test fit with invalid data
        with pytest.raises(ValueError):
            x = np.random.random((3, 10, 10))
            generator.fit(x)

        # Test flow with invalid data
        with pytest.raises(ValueError):
            x = np.random.random((32, 10, 10))
            generator.flow(np.arange(x.shape[0])) 
Example #8
Source File: train.py    From Dog-Cat-Classifier with Apache License 2.0 6 votes vote down vote up
def train_model(model, X, X_test, Y, Y_test):
    checkpoints = []
    if not os.path.exists('Data/Checkpoints/'):
        os.makedirs('Data/Checkpoints/')
    checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
    checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))

    # Creates live data:
    # For better yield. The duration of the training is extended.

    # If you don't want, use this:
    # model.fit(X, Y, batch_size=10, epochs=25, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)

    from keras.preprocessing.image import ImageDataGenerator
    generated_data = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0,  width_shift_range=0.1, height_shift_range=0.1, horizontal_flip = True, vertical_flip = False)
    generated_data.fit(X)
    import numpy
    model.fit_generator(generated_data.flow(X, Y, batch_size=8), steps_per_epoch=X.shape[0]//8, epochs=25, validation_data=(X_test, Y_test), callbacks=checkpoints)

    return model 
Example #9
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_directory_iterator_class_mode_input(self, tmpdir):
        tmpdir.join('class-1').mkdir()

        # save the images in the paths
        count = 0
        for test_images in self.all_test_images:
            for im in test_images:
                filename = str(tmpdir / 'class-1' / 'image-{}.jpg'.format(count))
                im.save(filename)
                count += 1

        # create iterator
        generator = image.ImageDataGenerator()
        dir_iterator = generator.flow_from_directory(str(tmpdir), class_mode='input')
        batch = next(dir_iterator)

        # check if input and output have the same shape
        assert(batch[0].shape == batch[1].shape)
        # check if the input and output images are not the same numpy array
        input_img = batch[0][0]
        output_img = batch[1][0]
        output_img[0][0][0] += 1
        assert(input_img[0][0][0] != output_img[0][0][0]) 
Example #10
Source File: data_Keras.py    From U-net with MIT License 6 votes vote down vote up
def __init__(
			self, train_path="../data_set/train", label_path="../data_set/label", merge_path="../data_set/merge",
			aug_merge_path="../data_set/aug_merge", aug_train_path="../data_set/aug_train",
			aug_label_path="../data_set/aug_label", img_type="tif"
	):

		# Using glob to get all .img_type form path
		self.train_imgs = glob.glob(train_path + "/*." + img_type)  # 训练集
		self.label_imgs = glob.glob(label_path + "/*." + img_type)  # label
		self.train_path = train_path
		self.label_path = label_path
		self.merge_path = merge_path
		self.img_type = img_type
		self.aug_merge_path = aug_merge_path
		self.aug_train_path = aug_train_path
		self.aug_label_path = aug_label_path
		self.slices = len(self.train_imgs)
		self.datagen = ImageDataGenerator(
			rotation_range=0.2,
			width_shift_range=0.05,
			height_shift_range=0.05,
			shear_range=0.05,
			zoom_range=0.05,
			horizontal_flip=True,
			fill_mode='nearest') 
Example #11
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_image_data_generator_with_validation_split(self):
        for test_images in self.all_test_images:
            img_list = []
            for im in test_images:
                img_list.append(image.img_to_array(im)[None, ...])

            images = np.vstack(img_list)
            generator = image.ImageDataGenerator(validation_split=0.5)
            seq = generator.flow(images, np.arange(images.shape[0]),
                                 shuffle=False, batch_size=3,
                                 subset='validation')
            x, y = seq[0]
            assert list(y) == [0, 1, 2]
            seq = generator.flow(images, np.arange(images.shape[0]),
                                 shuffle=False, batch_size=3,
                                 subset='training')
            x2, y2 = seq[0]
            assert list(y2) == [4, 5, 6]

            with pytest.raises(ValueError):
                generator.flow(images, np.arange(images.shape[0]),
                               shuffle=False, batch_size=3,
                               subset='foo') 
Example #12
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_directory_iterator_class_mode_input(self, tmpdir):
        tmpdir.join('class-1').mkdir()

        # save the images in the paths
        count = 0
        for test_images in self.all_test_images:
            for im in test_images:
                filename = str(tmpdir / 'class-1' / 'image-{}.jpg'.format(count))
                im.save(filename)
                count += 1

        # create iterator
        generator = image.ImageDataGenerator()
        dir_iterator = generator.flow_from_directory(str(tmpdir), class_mode='input')
        batch = next(dir_iterator)

        # check if input and output have the same shape
        assert(batch[0].shape == batch[1].shape)
        # check if the input and output images are not the same numpy array
        input_img = batch[0][0]
        output_img = batch[1][0]
        output_img[0][0][0] += 1
        assert(input_img[0][0][0] != output_img[0][0][0]) 
Example #13
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_image_data_generator_with_validation_split(self):
        for test_images in self.all_test_images:
            img_list = []
            for im in test_images:
                img_list.append(image.img_to_array(im)[None, ...])

            images = np.vstack(img_list)
            generator = image.ImageDataGenerator(validation_split=0.5)
            seq = generator.flow(images, np.arange(images.shape[0]),
                                 shuffle=False, batch_size=3,
                                 subset='validation')
            x, y = seq[0]
            assert list(y) == [0, 1, 2]
            seq = generator.flow(images, np.arange(images.shape[0]),
                                 shuffle=False, batch_size=3,
                                 subset='training')
            x2, y2 = seq[0]
            assert list(y2) == [4, 5, 6]

            with pytest.raises(ValueError):
                generator.flow(images, np.arange(images.shape[0]),
                               shuffle=False, batch_size=3,
                               subset='foo') 
Example #14
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_directory_iterator_class_mode_input(self, tmpdir):
        tmpdir.join('class-1').mkdir()

        # save the images in the paths
        count = 0
        for test_images in self.all_test_images:
            for im in test_images:
                filename = str(tmpdir / 'class-1' / 'image-{}.jpg'.format(count))
                im.save(filename)
                count += 1

        # create iterator
        generator = image.ImageDataGenerator()
        dir_iterator = generator.flow_from_directory(str(tmpdir), class_mode='input')
        batch = next(dir_iterator)

        # check if input and output have the same shape
        assert(batch[0].shape == batch[1].shape)
        # check if the input and output images are not the same numpy array
        input_img = batch[0][0]
        output_img = batch[1][0]
        output_img[0][0][0] += 1
        assert(input_img[0][0][0] != output_img[0][0][0]) 
Example #15
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_image_data_generator_invalid_data(self):
        generator = image.ImageDataGenerator(
            featurewise_center=True,
            samplewise_center=True,
            featurewise_std_normalization=True,
            samplewise_std_normalization=True,
            zca_whitening=True,
            data_format='channels_last')
        # Test fit with invalid data
        with pytest.raises(ValueError):
            x = np.random.random((3, 10, 10))
            generator.fit(x)

        # Test flow with invalid data
        with pytest.raises(ValueError):
            x = np.random.random((32, 10, 10))
            generator.flow(np.arange(x.shape[0])) 
Example #16
Source File: generator.py    From keras-image-segmentation with MIT License 6 votes vote down vote up
def pre_processing(img):
    # Random exposure and saturation (0.9 ~ 1.1 scale)
    rand_s = random.uniform(0.9, 1.1)
    rand_v = random.uniform(0.9, 1.1)

    img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)

    tmp = np.ones_like(img[:, :, 1]) * 255
    img[:, :, 1] = np.where(img[:, :, 1] * rand_s > 255, tmp, img[:, :, 1] * rand_s)
    img[:, :, 2] = np.where(img[:, :, 2] * rand_v > 255, tmp, img[:, :, 2] * rand_v)

    img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)

    # Centering helps normalization image (-1 ~ 1 value)
    return img / 127.5 - 1


# Get ImageDataGenerator arguments(options) depends on mode - (train, val, test) 
Example #17
Source File: test_multi.py    From DeepFashion with Apache License 2.0 6 votes vote down vote up
def evaluate_test_dataset():
## Test
    test_datagen = ImageDataGenerator(rescale=1. / 255)

    test_generator = test_datagen.flow_from_directory(
        dataset_test_path,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='sparse',                                                                           # Binary to Multi classification changes
        save_to_dir=None,
        shuffle=False)

    scores = model.evaluate_generator(test_generator, nb_test_samples // batch_size)

    logging.debug('model.metrics_names {}'.format(model.metrics_names))
    logging.debug('scores {}'.format(scores)) 
Example #18
Source File: 420.py    From facial_expressions with Apache License 2.0 5 votes vote down vote up
def image_samplewise(x, swise_center, swise_std_norm, save_path):
    datagen = ImageDataGenerator(samplewise_center=swise_center, samplewise_std_normalization=swise_std_norm)
    for batch in datagen.flow(x, batch_size=1,
                          save_to_dir=save_path, save_prefix='cat', save_format='jpeg'):
                break  # otherwise the generator would loop indefinitely

#Function to rescale the image 
Example #19
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_batch_standardize(self):
        # ImageDataGenerator.standardize should work on batches
        for test_images in self.all_test_images:
            img_list = []
            for im in test_images:
                img_list.append(image.img_to_array(im)[None, ...])

            images = np.vstack(img_list)
            generator = image.ImageDataGenerator(
                featurewise_center=True,
                samplewise_center=True,
                featurewise_std_normalization=True,
                samplewise_std_normalization=True,
                zca_whitening=True,
                rotation_range=90.,
                width_shift_range=0.1,
                height_shift_range=0.1,
                shear_range=0.5,
                zoom_range=0.2,
                channel_shift_range=0.,
                brightness_range=(1, 5),
                fill_mode='nearest',
                cval=0.5,
                horizontal_flip=True,
                vertical_flip=True)
            generator.fit(images, augment=True)

            transformed = np.copy(images)
            for i, im in enumerate(transformed):
                transformed[i] = generator.random_transform(im)
            transformed = generator.standardize(transformed) 
Example #20
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_image_data_generator_fit(self):
        generator = image.ImageDataGenerator(
            featurewise_center=True,
            samplewise_center=True,
            featurewise_std_normalization=True,
            samplewise_std_normalization=True,
            zca_whitening=True,
            zoom_range=(0.2, 0.2),
            data_format='channels_last')
        # Test grayscale
        x = np.random.random((32, 10, 10, 1))
        generator.fit(x)
        # Test RBG
        x = np.random.random((32, 10, 10, 3))
        generator.fit(x)
        # Test more samples than dims
        x = np.random.random((32, 4, 4, 1))
        generator.fit(x)
        generator = image.ImageDataGenerator(
            featurewise_center=True,
            samplewise_center=True,
            featurewise_std_normalization=True,
            samplewise_std_normalization=True,
            zca_whitening=True,
            data_format='channels_first')
        # Test grayscale
        x = np.random.random((32, 1, 10, 10))
        generator.fit(x)
        # Test RBG
        x = np.random.random((32, 3, 10, 10))
        generator.fit(x)
        # Test more samples than dims
        x = np.random.random((32, 1, 4, 4))
        generator.fit(x) 
Example #21
Source File: data.py    From U-net-segmentation with GNU General Public License v2.0 5 votes vote down vote up
def __init__(self, train_path="data/train/image", label_path="data/train/label", merge_path="data/merge",
                 aug_merge_path="data/aug_merge", aug_train_path="data/aug_train",
                 aug_label_path="data/aug_label", img_type="tif"):

        """
        Using glob to get all .img_type form path
        """

        self.train_imgs = glob.glob(train_path + "/*." + img_type)  # 训练集
        self.label_imgs = glob.glob(label_path + "/*." + img_type)  # label
        self.train_path = train_path
        self.label_path = label_path
        self.merge_path = merge_path
        self.img_type = img_type
        self.aug_merge_path = aug_merge_path
        self.aug_train_path = aug_train_path
        self.aug_label_path = aug_label_path
        self.slices = len(self.train_imgs)
        self.datagen = ImageDataGenerator(
            rotation_range=0.2,
            width_shift_range=0.05,
            height_shift_range=0.05,
            shear_range=0.05,
            zoom_range=0.05,
            horizontal_flip=True,
            fill_mode='nearest') 
Example #22
Source File: image_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_image_data_generator_with_split_value_error(self):
        with pytest.raises(ValueError):
            generator = image.ImageDataGenerator(validation_split=5) 
Example #23
Source File: 302_WU.py    From facial_expressions with Apache License 2.0 5 votes vote down vote up
def picaugment(indir, newdir, augsize = 10):
    
    # Using keras ImageDataGenerator to generate random images
    datagen = ImageDataGenerator(
        featurewise_std_normalization=False,
        rotation_range = 20,
        width_shift_range = 0.10,
        height_shift_range = 0.10,
        shear_range = 0.1,
        zoom_range = 0.1,
        horizontal_flip = True)
    
    piclist = os.listdir(indir)
    if not os.path.exists(newdir):
        os.makedirs(newdir)
    
    for i in range(0,len(piclist)):
        img = load_img(os.path.join(indir, piclist[i]))
        x = img_to_array(img)
        x = x.reshape((1,) + x.shape)
        j = 0
        for batch in datagen.flow(x, batch_size = 1, save_to_dir = newdir, 
                              save_prefix = piclist[i].split('.')[0]):
            j += 1
            if j >= augsize:
                break 
Example #24
Source File: extractor.py    From keras-examples with MIT License 5 votes vote down vote up
def save_bottleneck_features():
    """VGG16に訓練画像、バリデーション画像を入力し、
    ボトルネック特徴量(FC層の直前の出力)をファイルに保存する"""

    # VGG16モデルと学習済み重みをロード
    # Fully-connected層(FC)はいらないのでinclude_top=False)
    model = VGG16(include_top=False, weights='imagenet')
    model.summary()

    # ジェネレータの設定
    datagen = ImageDataGenerator(rescale=1.0 / 255)

    # 訓練セットを生成するジェネレータを作成
    generator = datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_rows, img_cols),
        color_mode='rgb',
        classes=classes,
        class_mode='categorical',
        batch_size=batch_size,
        shuffle=False)

    # ジェネレータから生成される画像を入力し、VGG16の出力をファイルに保存
    bottleneck_features_train = model.predict_generator(generator, nb_train_samples)
    np.save(os.path.join(result_dir, 'bottleneck_features_train.npy'),
            bottleneck_features_train)

    # バリデーションセットを生成するジェネレータを作成
    generator = datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_rows, img_cols),
        color_mode='rgb',
        classes=classes,
        class_mode='categorical',
        batch_size=batch_size,
        shuffle=False)

    # ジェネレータから生成される画像を入力し、VGG16の出力をファイルに保存
    bottleneck_features_validation = model.predict_generator(generator, nb_val_samples)
    np.save(os.path.join(result_dir, 'bottleneck_features_validation.npy'),
            bottleneck_features_validation) 
Example #25
Source File: 420.py    From facial_expressions with Apache License 2.0 5 votes vote down vote up
def image_rescale(x, resc, shear_range, zoom_range, h_flip, save_path):
    datagen = ImageDataGenerator(
        rescale=resc,
        shear_range=shear_range,
        zoom_range=zoom_range,
        horizontal_flip=h_flip)
    for batch in datagen.flow(x, batch_size=1,
                          save_to_dir=save_path, save_prefix='cat', save_format='jpeg'):
                break  # otherwise the generator would loop indefinitely

#Flip the color of pixels (black to white and vice versa) 
Example #26
Source File: 420.py    From facial_expressions with Apache License 2.0 5 votes vote down vote up
def image_featurewise(x, fwise_center, fwise_std_norm, save_path):
    datagen = ImageDataGenerator(featurewise_center=fwise_center, featurewise_std_normalization=fwise_std_norm)
    for batch in datagen.flow(x, batch_size=1,
                          save_to_dir=save_path, save_prefix='cat', save_format='jpeg'):
                             break  # otherwise the generator would loop indefinitely

#Function to apply ZCA whitening to the image 
Example #27
Source File: 420.py    From facial_expressions with Apache License 2.0 5 votes vote down vote up
def image_rotation(x, rotation_range, save_path):
    datagen = ImageDataGenerator(rotation_range=rotation_range)
    for batch in datagen.flow(x, batch_size=1,
                          save_to_dir=save_path, save_prefix='cat', save_format='jpeg'):
             break  # otherwise the generator would loop indefinitely
   
#Function to shift height and width 
Example #28
Source File: 420.py    From facial_expressions with Apache License 2.0 5 votes vote down vote up
def image_flip(x, h_flip, v_flip, save_path):
    datagen = ImageDataGenerator(horizontal_flip=h_flip, vertical_flip=v_flip)
    for batch in datagen.flow(x, batch_size=1,
                          save_to_dir=save_path, save_prefix='cat', save_format='jpeg'):
              break  # otherwise the generator would loop indefinitely
    
#Function to shift features in the image 
Example #29
Source File: 420.py    From facial_expressions with Apache License 2.0 5 votes vote down vote up
def image_size_shift(x, w_shift, h_shift, save_path):
    datagen = ImageDataGenerator(width_shift_range=w_shift, height_shift_range=h_shift)
    for batch in datagen.flow(x, batch_size=1,
                          save_to_dir=save_path, save_prefix='cat', save_format='jpeg'):
              break  # otherwise the generator would loop indefinitely

#Function to shift channel in the image 
Example #30
Source File: 420.py    From facial_expressions with Apache License 2.0 5 votes vote down vote up
def image_channel_shift(x, c_shift, save_path):
    datagen = ImageDataGenerator(channel_shift_range=c_shift)
    for batch in datagen.flow(x, batch_size=1,
                          save_to_dir=save_path, save_prefix='cat', save_format='jpeg'):
              break  # otherwise the generator would loop indefinitely

#Function to flip the image