Python tensorflow.keras.utils.to_categorical() Examples

The following are 30 code examples of tensorflow.keras.utils.to_categorical(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.utils , or try the search function .
Example #1
Source File: test_cfproto.py    From alibi with Apache License 2.0 6 votes vote down vote up
def tf_keras_iris(tf_keras_iris_model, tf_keras_iris_ae):
    X, y = load_iris(return_X_y=True)
    X = (X - X.mean(axis=0)) / X.std(axis=0)  # scale dataset

    idx = 145
    X_train, y_train = X[:idx, :], y[:idx]
    # y_train = to_categorical(y_train) # TODO: fine to leave as is?

    # set random seed
    np.random.seed(1)
    tf.set_random_seed(1)

    model = tf_keras_iris_model
    model.fit(X_train, y_train, batch_size=128, epochs=500, verbose=0)

    ae, enc, _ = tf_keras_iris_ae
    ae.fit(X_train, X_train, batch_size=32, epochs=100, verbose=0)

    return X_train, model, ae, enc 
Example #2
Source File: test_preprocessor.py    From MIScnn with GNU General Public License v3.0 6 votes vote down vote up
def test_PREPROCESSOR_patchwisecrop_skipBlanks(self):
        sample_list = self.data_io3D.get_indiceslist()
        pp = Preprocessor(self.data_io3D, data_aug=None, batch_size=1,
                          analysis="patchwise-crop", patch_shape=(4,4,4))
        pp.patchwise_skip_blanks = True
        batches = pp.run(sample_list[0:3], training=True, validation=False)
        sample = self.data_io3D.sample_loader(sample_list[0], load_seg=True)
        sample.seg_data = to_categorical(sample.seg_data,
                                         num_classes=sample.classes)
        ready_data = pp.analysis_patchwise_crop(sample, data_aug=False)
        self.assertEqual(len(ready_data), 1)
        self.assertEqual(ready_data[0][0].shape, (4,4,4,1))
        self.assertEqual(ready_data[0][1].shape, (4,4,4,3))

    #-------------------------------------------------#
    #            Analysis: Patchwise-grid             #
    #-------------------------------------------------# 
Example #3
Source File: test_preprocessor.py    From MIScnn with GNU General Public License v3.0 6 votes vote down vote up
def test_PREPROCESSOR_patchwisegrid_2D(self):
        sample_list = self.data_io2D.get_indiceslist()
        pp = Preprocessor(self.data_io2D, data_aug=None, batch_size=1,
                          analysis="patchwise-grid", patch_shape=(4,4))
        batches = pp.run(sample_list[0:1], training=False, validation=False)
        self.assertEqual(len(batches), 16)
        sample = self.data_io2D.sample_loader(sample_list[0], load_seg=True)
        sample.seg_data = to_categorical(sample.seg_data,
                                         num_classes=sample.classes)
        pp = Preprocessor(self.data_io2D, data_aug=None, batch_size=1,
                          analysis="patchwise-grid", patch_shape=(5,5))
        ready_data = pp.analysis_patchwise_grid(sample, data_aug=False,
                                                training=True)
        self.assertEqual(len(ready_data), 16)
        self.assertEqual(ready_data[0][0].shape, (5,5,1))
        self.assertEqual(ready_data[0][1].shape, (5,5,3)) 
Example #4
Source File: shapelets.py    From tslearn with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _preprocess_labels(self, y):
        self.classes_ = unique_labels(y)
        n_labels = len(self.classes_)
        if n_labels == 1:
            raise ValueError("Classifier can't train when only one class "
                             "is present.")
        if self.classes_.dtype in [numpy.int32, numpy.int64]:
            self.label_to_ind_ = {int(lab): ind
                                  for ind, lab in enumerate(self.classes_)}
        else:
            self.label_to_ind_ = {lab: ind
                                  for ind, lab in enumerate(self.classes_)}
        y_ind = numpy.array(
            [self.label_to_ind_[lab] for lab in y]
        )
        y_ = to_categorical(y_ind)
        if n_labels == 2:
            y_ = y_[:, 1:]  # Keep only indicator of positive class
        return y_ 
Example #5
Source File: test_supervised.py    From ivis with GNU General Public License v2.0 6 votes vote down vote up
def test_svm_score_samples():
    iris = datasets.load_iris()
    x = iris.data
    y = iris.target

    supervision_metric = 'categorical_hinge'
    ivis_iris = Ivis(k=15, batch_size=16, epochs=5,
                     supervision_metric=supervision_metric)

    # Correctly formatted one-hot labels train successfully
    y = to_categorical(y)
    embeddings = ivis_iris.fit_transform(x, y)

    y_pred = ivis_iris.score_samples(x)

    loss_name = ivis_iris.model_.loss['supervised'].__name__
    assert losses.get(loss_name).__name__ == losses.get(supervision_metric).__name__
    assert ivis_iris.model_.layers[-1].activation.__name__ == 'linear'
    assert ivis_iris.model_.layers[-1].kernel_regularizer is not None
    assert ivis_iris.model_.layers[-1].output_shape[-1] == y.shape[-1] 
Example #6
Source File: mine-13.8.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def load_eval_dataset(self):
        """Pre-load test data for evaluation
        """
        (_, _), (x_test, self.y_test) = \
                self.args.dataset.load_data()
        image_size = x_test.shape[1]
        x_test = np.reshape(x_test,
                            [-1, image_size, image_size, 1])
        x_test = x_test.astype('float32') / 255
        x_eval = np.zeros([x_test.shape[0],
                          *self.train_gen.input_shape])
        for i in range(x_eval.shape[0]):
            x_eval[i] = center_crop(x_test[i])

        self.y_test = to_categorical(self.y_test)
        self.x_test = x_eval 
Example #7
Source File: test_preprocessor.py    From MIScnn with GNU General Public License v3.0 6 votes vote down vote up
def test_PREPROCESSOR_patchwisegrid_3D(self):
        sample_list = self.data_io3D.get_indiceslist()
        pp = Preprocessor(self.data_io3D, data_aug=None, batch_size=1,
                          analysis="patchwise-grid", patch_shape=(4,4,4))
        batches = pp.run(sample_list[0:1], training=False, validation=False)
        self.assertEqual(len(batches), 64)
        sample = self.data_io3D.sample_loader(sample_list[0], load_seg=True)
        sample.seg_data = to_categorical(sample.seg_data,
                                         num_classes=sample.classes)
        pp = Preprocessor(self.data_io3D, data_aug=None, batch_size=1,
                          analysis="patchwise-grid", patch_shape=(5,5,5))
        ready_data = pp.analysis_patchwise_grid(sample, data_aug=False,
                                                training=True)
        self.assertEqual(len(ready_data), 64)
        self.assertEqual(ready_data[0][0].shape, (5,5,5,1))
        self.assertEqual(ready_data[0][1].shape, (5,5,5,3)) 
Example #8
Source File: fcn-12.3.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 6 votes vote down vote up
def segment_objects(self, image, normalized=True):
        """Run segmentation prediction for a given image
    
        Arguments:
            image (tensor): Image loaded in a numpy tensor.
                RGB components range is [0.0, 1.0]
            normalized (Bool): Use normalized=True for 
                pixel-wise categorical prediction. False if 
                segmentation will be displayed in RGB
                image format.
        """

        from tensorflow.keras.utils import to_categorical
        image = np.expand_dims(image, axis=0)
        segmentation = self.fcn.predict(image)
        segmentation = np.squeeze(segmentation, axis=0)
        segmentation = np.argmax(segmentation, axis=-1)
        segmentation = to_categorical(segmentation,
                                      num_classes=self.n_classes)
        if not normalized:
            segmentation = segmentation * 255
        segmentation = segmentation.astype('uint8')
        return segmentation 
Example #9
Source File: test_preprocessor.py    From MIScnn with GNU General Public License v3.0 6 votes vote down vote up
def test_PREPROCESSOR_fullimage_2D(self):
        sample_list = self.data_io2D.get_indiceslist()
        pp = Preprocessor(self.data_io2D, data_aug=None, batch_size=2,
                          analysis="fullimage")
        batches = pp.run(sample_list[0:3], training=True, validation=False)
        self.assertEqual(len(batches), 2)
        batches = pp.run(sample_list[0:1], training=False, validation=False)
        self.assertEqual(len(batches), 1)
        sample = self.data_io2D.sample_loader(sample_list[0], load_seg=True)
        sample.seg_data = to_categorical(sample.seg_data,
                                         num_classes=sample.classes)
        ready_data = pp.analysis_fullimage(sample, data_aug=False,
                                           training=True)
        self.assertEqual(len(ready_data), 1)
        self.assertEqual(ready_data[0][0].shape, (16,16,1))
        self.assertEqual(ready_data[0][1].shape, (16,16,3)) 
Example #10
Source File: test_preprocessor.py    From MIScnn with GNU General Public License v3.0 6 votes vote down vote up
def test_PREPROCESSOR_fullimage_3D(self):
        sample_list = self.data_io3D.get_indiceslist()
        pp = Preprocessor(self.data_io3D, data_aug=None, batch_size=2,
                          analysis="fullimage")
        batches = pp.run(sample_list[0:3], training=True, validation=False)
        self.assertEqual(len(batches), 2)
        batches = pp.run(sample_list[0:1], training=False, validation=False)
        self.assertEqual(len(batches), 1)
        sample = self.data_io3D.sample_loader(sample_list[0], load_seg=True)
        sample.seg_data = to_categorical(sample.seg_data,
                                         num_classes=sample.classes)
        ready_data = pp.analysis_fullimage(sample, data_aug=False,
                                           training=True)
        self.assertEqual(len(ready_data), 1)
        self.assertEqual(ready_data[0][0].shape, (16,16,16,1))
        self.assertEqual(ready_data[0][1].shape, (16,16,16,3)) 
Example #11
Source File: test_dataaugmentation.py    From MIScnn with GNU General Public License v3.0 6 votes vote down vote up
def setUpClass(self):
        np.random.seed(1234)
        # Create 2D data
        img2D = np.random.rand(1, 16, 16, 1) * 255
        self.img2D = img2D.astype(int)
        seg2D = np.random.rand(1, 16, 16, 1) * 3
        self.seg2D = seg2D.astype(int)
        self.seg2D = to_categorical(self.seg2D, num_classes=3)
        # Create 3D data
        img3D = np.random.rand(1, 16, 16, 16, 1) * 255
        self.img3D = img3D.astype(int)
        seg3D = np.random.rand(1, 16, 16, 16, 1) * 3
        self.seg3D = seg3D.astype(int)
        self.seg3D = to_categorical(self.seg3D, num_classes=3)

    #-------------------------------------------------#
    #                Base Functionality               #
    #-------------------------------------------------#
    # Class Creation 
Example #12
Source File: test_datagenerator.py    From MIScnn with GNU General Public License v3.0 6 votes vote down vote up
def test_DATAGENERATOR_consistency(self):
        pp_fi = Preprocessor(self.data_io, batch_size=1, data_aug=None,
                             prepare_subfunctions=False, prepare_batches=False,
                             analysis="fullimage")
        data_gen = DataGenerator(self.sample_list, pp_fi,
                                 training=True, shuffle=False, iterations=None)
        i = 0
        for batch in data_gen:
            sample = self.data_io.sample_loader(self.sample_list[i],
                                                load_seg=True)
            self.assertTrue(np.array_equal(batch[0][0], sample.img_data))
            seg = to_categorical(sample.seg_data, num_classes=3)
            self.assertTrue(np.array_equal(batch[1][0], seg))
            i += 1

    # Iteration fixation test 
Example #13
Source File: utils.py    From alibi with Apache License 2.0 6 votes vote down vote up
def fashion_mnist_dataset():
    """
    Load and prepare Fashion MNIST dataset.
    """

    (x_train, y_train), (x_test, y_test) = keras.datasets.fashion_mnist.load_data()
    x_train = x_train.astype('float32') / 255
    x_train = np.reshape(x_train, x_train.shape + (1,))
    y_train = to_categorical(y_train)

    return {
        'X_train': x_train,
        'y_train': y_train,
        'X_test': x_test,
        'y_test': y_test,
        'preprocessor': None,
        'metadata': {'name': 'fashion_mnist'},
    } 
Example #14
Source File: test_subfunctions.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def test_SUBFUNCTIONS_postprocessing(self):
        ds = dict()
        for i in range(0, 10):
            img = np.random.rand(16, 16, 16) * 255
            img = img.astype(int)
            seg = np.random.rand(16, 16, 16) * 3
            seg = seg.astype(int)
            sample = (img, seg)
            ds["TEST.sample_" + str(i)] = sample
        io_interface = Dictionary_interface(ds, classes=3, three_dim=True)
        self.tmp_dir = tempfile.TemporaryDirectory(prefix="tmp.miscnn.")
        tmp_batches = os.path.join(self.tmp_dir.name, "batches")
        dataio = Data_IO(io_interface, input_path="", output_path="",
                         batch_path=tmp_batches, delete_batchDir=False)
        sf = [Resize((9,9,9)), Normalization(), Clipping(min=-1.0, max=0.0)]
        pp = Preprocessor(dataio, batch_size=1, prepare_subfunctions=False,
                          analysis="patchwise-grid", subfunctions=sf,
                          patch_shape=(4,4,4))
        sample_list = dataio.get_indiceslist()
        for index in sample_list:
            sample = dataio.sample_loader(index)
            for sf in pp.subfunctions:
                sf.preprocessing(sample, training=False)
            pp.cache["shape_" + str(index)] = sample.img_data.shape
            sample.seg_data = np.random.rand(9, 9, 9) * 3
            sample.seg_data = sample.seg_data.astype(int)
            sample.seg_data = to_categorical(sample.seg_data, num_classes=3)
            data_patches = pp.analysis_patchwise_grid(sample, training=True,
                                                      data_aug=False)
            seg_list = []
            for i in range(0, len(data_patches)):
                seg_list.append(data_patches[i][1])
            seg = np.stack(seg_list, axis=0)
            self.assertEqual(seg.shape, (27,4,4,4,3))
            pred = pp.postprocessing(index, seg)
            self.assertEqual(pred.shape, (16,16,16))
        self.tmp_dir.cleanup()

    # Run prepare subfunction of Preprocessor 
Example #15
Source File: dataset_tools.py    From DeepMusicClassification with MIT License 5 votes vote down vote up
def preprocess_data(X, y, width, height):
    """ Reshapes arrays and converts labels to one-hot arrays and splits to train and test """
    X = np.array(X).reshape(-1, width, height, 1)
    y = np.array(y)

    label_encoder = LabelEncoder()
    y = label_encoder.fit_transform(y)
    y = to_categorical(y)

    X = X / 255.0  # normalize pixel values

    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.33, random_state=42)

    return X_train, X_test, y_train, y_test 
Example #16
Source File: nn.py    From bugbug with Mozilla Public License 2.0 5 votes vote down vote up
def fit(self, X, y):
        X_dict = numpy_to_dict(X)

        y = to_categorical(y)

        self.model = self.model_creator(X_dict, y)

        for (epochs, batch_size) in self.fit_params:
            self.model.fit(X_dict, y, epochs=epochs, batch_size=batch_size, verbose=1)

        return self 
Example #17
Source File: main.py    From AugMix_TF2 with MIT License 5 votes vote down vote up
def get_cifar_data(num_classes=10):
    """Loads cifar-10 data. Normalize the images and do one-hot encoding for labels"""

    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
    x_train = x_train.astype(np.float32) / 255.
    x_test = x_test.astype(np.float32) / 255.

    y_train_cat = to_categorical(y_train, num_classes=num_classes).astype(np.float32)
    y_test_cat = to_categorical(y_test, num_classes=num_classes).astype(np.float32)

    return x_train, y_train, x_test, y_test, y_train_cat, y_test_cat


########################################################################### 
Example #18
Source File: test_preprocessor.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def test_PREPROCESSOR_patchwisecrop_3D(self):
        sample_list = self.data_io3D.get_indiceslist()
        pp = Preprocessor(self.data_io3D, data_aug=None, batch_size=1,
                          analysis="patchwise-crop", patch_shape=(4,4,4))
        batches = pp.run(sample_list[0:3], training=True, validation=False)
        self.assertEqual(len(batches), 3)
        batches = pp.run(sample_list[0:1], training=False, validation=False)
        self.assertEqual(len(batches), 64)
        sample = self.data_io3D.sample_loader(sample_list[0], load_seg=True)
        sample.seg_data = to_categorical(sample.seg_data,
                                         num_classes=sample.classes)
        ready_data = pp.analysis_patchwise_crop(sample, data_aug=False)
        self.assertEqual(len(ready_data), 1)
        self.assertEqual(ready_data[0][0].shape, (4,4,4,1))
        self.assertEqual(ready_data[0][1].shape, (4,4,4,3)) 
Example #19
Source File: test_preprocessor.py    From MIScnn with GNU General Public License v3.0 5 votes vote down vote up
def test_PREPROCESSOR_patchwisecrop_2D(self):
        sample_list = self.data_io2D.get_indiceslist()
        pp = Preprocessor(self.data_io2D, data_aug=None, batch_size=1,
                          analysis="patchwise-crop", patch_shape=(4,4))
        batches = pp.run(sample_list[0:3], training=True, validation=False)
        self.assertEqual(len(batches), 3)
        batches = pp.run(sample_list[0:1], training=False, validation=False)
        self.assertEqual(len(batches), 16)
        sample = self.data_io2D.sample_loader(sample_list[0], load_seg=True)
        sample.seg_data = to_categorical(sample.seg_data,
                                         num_classes=sample.classes)
        ready_data = pp.analysis_patchwise_crop(sample, data_aug=False)
        self.assertEqual(len(ready_data), 1)
        self.assertEqual(ready_data[0][0].shape, (4,4,1))
        self.assertEqual(ready_data[0][1].shape, (4,4,3)) 
Example #20
Source File: 训练.py    From Semantic-segmentation-of-remote-sensing-images with Apache License 2.0 5 votes vote down vote up
def generateValidData(batch_size,data=[]):
    while True:  
        valid_data = []  
        valid_label = []  
        batch = 0  
        for i in (range(len(data))):  
            url = data[i]
            batch += 1  
            img = load_img(filepath + 'train/' + url)
            img = img_to_array(img)  
            valid_data.append(img)  
            label = load_img(filepath + 'label/' + url, grayscale=True)
            label = img_to_array(label).reshape((img_w * img_h,))  
            valid_label.append(label)  
            if batch % batch_size==0:  
                valid_data = np.array(valid_data)  
                valid_label = np.array(valid_label).flatten()  
                valid_label = labelencoder.transform(valid_label)  
                valid_label = to_categorical(valid_label, num_classes=n_label)
                valid_label = valid_label.reshape((batch_size,img_w,img_h,n_label))
                yield (valid_data,valid_label)  
                valid_data = []  
                valid_label = []  
                batch = 0
                

#定义模型-网络模型 
Example #21
Source File: 训练.py    From Semantic-segmentation-of-remote-sensing-images with Apache License 2.0 5 votes vote down vote up
def generateData(batch_size,data=[]):
    while True:  
        train_data = []  
        train_label = []  
        batch = 0  
        for i in (range(len(data))): 
            url = data[i]
            batch += 1 
            img = load_img(filepath + 'train/' + url)
            img = img_to_array(img) 
            train_data.append(img)  
            label = load_img(filepath + 'label/' + url, grayscale=True)
            label = img_to_array(label).reshape((img_w * img_h,))  
            train_label.append(label)  
            if batch % batch_size==0: 
                train_data = np.array(train_data)  
                train_label = np.array(train_label).flatten()      #拍平
                train_label = labelencoder.transform(train_label)  
                train_label = to_categorical(train_label, num_classes=n_label)  #编码输出便签
                train_label = train_label.reshape((batch_size,img_w,img_h,n_label))
                yield (train_data,train_label)  
                train_data = []  
                train_label = []  
                batch = 0  

                


#生成测试的数据 
Example #22
Source File: conftest.py    From snn_toolbox with MIT License 5 votes vote down vote up
def _dataset():

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train / 255
    x_test = x_test / 255

    axis = 1 if keras.backend.image_data_format() == 'channels_first' else -1
    x_train = np.expand_dims(x_train, axis)
    x_test = np.expand_dims(x_test, axis)

    y_train = to_categorical(y_train, 10)
    y_test = to_categorical(y_test, 10)

    return x_train, y_train, x_test, y_test 
Example #23
Source File: batcher.py    From bootcamp with Apache License 2.0 5 votes vote down vote up
def __init__(self, speakers_list):
        from tensorflow.keras.utils import to_categorical
        self.speaker_ids = sorted(speakers_list)
        self.int_speaker_ids = list(range(len(self.speaker_ids)))
        self.map_speakers_to_index = dict([(k, v) for (k, v) in zip(self.speaker_ids, self.int_speaker_ids)])
        self.map_index_to_speakers = dict([(v, k) for (k, v) in zip(self.speaker_ids, self.int_speaker_ids)])
        self.speaker_categories = to_categorical(self.int_speaker_ids, num_classes=len(self.speaker_ids)) 
Example #24
Source File: data_generator.py    From Advanced-Deep-Learning-with-Keras with MIT License 5 votes vote down vote up
def _dataset(self):
        """Load dataset and normalize it
        """
        dataset = self.args.dataset
        if self.args.train:
            (self.data, self.label), (_, _) = dataset.load_data()
        else:
            (_, _), (self.data, self.label) = dataset.load_data()

        if self.args.dataset == mnist:
            self.n_channels = 1
        else:
            self.n_channels = self.data.shape[3]

        image_size = self.data.shape[1]
        side = image_size - self.crop_size
        self.input_shape = [side, side, self.n_channels]

        # from sparse label to categorical
        self.n_labels = len(np.unique(self.label))
        self.label = to_categorical(self.label)

        # reshape and normalize input images
        orig_shape = [-1, image_size, image_size, self.n_channels]
        self.data = np.reshape(self.data, orig_shape)
        self.data = self.data.astype('float32') / 255
        self.indexes = [i for i in range(self.data.shape[0])] 
Example #25
Source File: example_mnist_prune.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def main():
    # input image dimensions
    img_rows, img_cols = 28, 28

    # the data, shuffled and split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    if K.image_data_format() == "channels_first":
      x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
      x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
      input_shape = (1, img_rows, img_cols)
    else:
      x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
      x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
      input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype("float32")
    x_test = x_test.astype("float32")
    x_train /= 255
    x_test /= 255
    print("x_train shape:", x_train.shape)
    print(x_train.shape[0], "train samples")
    print(x_test.shape[0], "test samples")

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, num_classes)
    y_test = to_categorical(y_test, num_classes)

    pruning_params = {
        "pruning_schedule":
            pruning_schedule.ConstantSparsity(0.75, begin_step=2000, frequency=100)
    }
    
    if prune_whole_model:
        model = build_model(input_shape)
        model = prune.prune_low_magnitude(model, **pruning_params)
    else:
        model = build_layerwise_model(input_shape, **pruning_params)

    train_and_save(model, x_train, y_train, x_test, y_test) 
Example #26
Source File: deep_classifier.py    From nlp-journey with Apache License 2.0 5 votes vote down vote up
def load_data_from_scratch(self, test_size=0.2, max_len=100):
        assert self.train_file_path is not None, 'file must not be none '
        stopwords = load_en_stopwords()
        with open(self.train_file_path, 'r', encoding='utf-8') as file:
            lines = file.readlines()
        lines = [line.strip() for line in lines]
        lines = [line.split('##') for line in lines]
        x = [line[0] for line in lines]
        x = [line.split() for line in x]
        data = [word for xx in x for word in xx]
        y = [line[0] for line in lines]

        counter = Counter(data)
        vocab = [k for k, v in counter.items() if v >= 5]

        word_index = {k: v for v, k in enumerate(vocab)}

        max_sentence_length = max([len(words) for words in x])
        max_len = max_len if max_sentence_length > max_len else max_sentence_length

        x_data = [[word_index[word] for word in words if word in word_index.keys() and word not in stopwords] for words
                  in x]
        x_data = pad_sequences(x_data, maxlen=max_len)

        y_data = to_categorical(y)

        x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=test_size)
        return x_train, y_train, x_test, y_test, word_index


# cnn 
Example #27
Source File: cgan-mnist-4.3.1.py    From Advanced-Deep-Learning-with-Keras with MIT License 4 votes vote down vote up
def build_and_train_models():
    # load MNIST dataset
    (x_train, y_train), (_, _) = mnist.load_data()

    # reshape data for CNN as (28, 28, 1) and normalize
    image_size = x_train.shape[1]
    x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
    x_train = x_train.astype('float32') / 255

    num_labels = np.amax(y_train) + 1
    y_train = to_categorical(y_train)

    model_name = "cgan_mnist"
    # network parameters
    # the latent or z vector is 100-dim
    latent_size = 100
    batch_size = 64
    train_steps = 40000
    lr = 2e-4
    decay = 6e-8
    input_shape = (image_size, image_size, 1)
    label_shape = (num_labels, )

    # build discriminator model
    inputs = Input(shape=input_shape, name='discriminator_input')
    labels = Input(shape=label_shape, name='class_labels')

    discriminator = build_discriminator(inputs, labels, image_size)
    # [1] or original paper uses Adam, 
    # but discriminator converges easily with RMSprop
    optimizer = RMSprop(lr=lr, decay=decay)
    discriminator.compile(loss='binary_crossentropy',
                          optimizer=optimizer,
                          metrics=['accuracy'])
    discriminator.summary()

    # build generator model
    input_shape = (latent_size, )
    inputs = Input(shape=input_shape, name='z_input')
    generator = build_generator(inputs, labels, image_size)
    generator.summary()

    # build adversarial model = generator + discriminator
    optimizer = RMSprop(lr=lr*0.5, decay=decay*0.5)
    # freeze the weights of discriminator during adversarial training
    discriminator.trainable = False
    outputs = discriminator([generator([inputs, labels]), labels])
    adversarial = Model([inputs, labels],
                        outputs,
                        name=model_name)
    adversarial.compile(loss='binary_crossentropy',
                        optimizer=optimizer,
                        metrics=['accuracy'])
    adversarial.summary()

    # train discriminator and adversarial networks
    models = (generator, discriminator, adversarial)
    data = (x_train, y_train)
    params = (batch_size, latent_size, train_steps, num_labels, model_name)
    train(models, data, params) 
Example #28
Source File: get_data.py    From qkeras with Apache License 2.0 4 votes vote down vote up
def get_data(dataset_name, fast=False):
  """Returns dataset from tfds."""
  ds_train = tfds.load(name=dataset_name, split="train", batch_size=-1)
  ds_test = tfds.load(name=dataset_name, split="test", batch_size=-1)

  dataset = tfds.as_numpy(ds_train)
  x_train, y_train = dataset["image"].astype(np.float32), dataset["label"]

  dataset = tfds.as_numpy(ds_test)
  x_test, y_test = dataset["image"].astype(np.float32), dataset["label"]

  if len(x_train.shape) == 3:
    x_train = x_train.reshape(x_train.shape + (1,))
    x_test = x_test.reshape(x_test.shape + (1,))

  x_train /= 256.0
  x_test /= 256.0

  x_mean = np.mean(x_train, axis=0)

  x_train -= x_mean
  x_test -= x_mean

  nb_classes = np.max(y_train) + 1
  y_train = to_categorical(y_train, nb_classes)
  y_test = to_categorical(y_test, nb_classes)

  print(x_train.shape[0], "train samples")
  print(x_test.shape[0], "test samples")

  if fast:
    i_train = np.arange(x_train.shape[0])
    np.random.shuffle(i_train)
    i_test = np.arange(x_test.shape[0])
    np.random.shuffle(i_test)

    s_x_train = x_train[i_train[0:fast]]
    s_y_train = y_train[i_train[0:fast]]
    s_x_test = x_test[i_test[0:fast]]
    s_y_test = y_test[i_test[0:fast]]
    return ((s_x_train, s_y_train), (x_train, y_train), (s_x_test, s_y_test),
            (x_test, y_test))
  else:
    return (x_train, y_train), (x_test, y_test) 
Example #29
Source File: dmnist.py    From qkeras with Apache License 2.0 4 votes vote down vote up
def UseNetwork(weights_f, load_weights=False):
  """Use DenseModel.

  Args:
    weights_f: weight file location.
    load_weights: load weights when it is True.
  """
  model = QDenseModel(weights_f, load_weights)

  batch_size = BATCH_SIZE
  (x_train, y_train), (x_test, y_test) = mnist.load_data()

  x_train = x_train.reshape(60000, 28*28)
  x_test = x_test.reshape(10000, 28*28)
  x_train = x_train.astype("float32")
  x_test = x_test.astype("float32")

  x_train /= 256.
  x_test /= 256.

  print(x_train.shape[0], "train samples")
  print(x_test.shape[0], "test samples")

  y_train = to_categorical(y_train_, NB_CLASSES)
  y_test = to_categorical(y_test_, NB_CLASSES)

  if not load_weights:
    model.fit(
        x_train,
        y_train,
        batch_size=batch_size,
        epochs=NB_EPOCH,
        verbose=VERBOSE,
        validation_split=VALIDATION_SPLIT)

    if weights_f:
      model.save_weights(weights_f)

  score = model.evaluate(x_test, y_test, verbose=False)
  print("Test score:", score[0])
  print("Test accuracy:", score[1])

  return model, x_train 
Example #30
Source File: cmnist.py    From qkeras with Apache License 2.0 4 votes vote down vote up
def UseNetwork(weights_f, load_weights=False):
  """Use DenseModel.

  Args:
    weights_f: weight file location.
    load_weights: load weights when it is True.
  """
  model = QConv2DModel(weights_f, load_weights)

  batch_size = BATCH_SIZE
  (x_train, y_train), (x_test, y_test) = mnist.load_data()

  x_train = x_train.reshape(60000, 28, 28, 1)
  x_test = x_test.reshape(10000, 28, 28, 1)
  x_train = x_train.astype("float32")
  x_test = x_test.astype("float32")

  x_train /= 256.
  x_test /= 256.

  print(x_train.shape[0], "train samples")
  print(x_test.shape[0], "test samples")

  y_train = to_categorical(y_train, NB_CLASSES)
  y_test = to_categorical(y_test, NB_CLASSES)

  if not load_weights:
    model.fit(
        x_train,
        y_train,
        batch_size=batch_size,
        epochs=NB_EPOCH,
        verbose=VERBOSE,
        validation_split=VALIDATION_SPLIT)

    if weights_f:
      model.save_weights(weights_f)

  score = model.evaluate(x_test, y_test, verbose=False)
  print("Test score:", score[0])
  print("Test accuracy:", score[1])

  return model, x_train, x_test