Python keras.callbacks.LambdaCallback() Examples

The following are 26 code examples of keras.callbacks.LambdaCallback(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.callbacks , or try the search function .
Example #1
Source File: seq2seq_model.py    From supervised-oie with MIT License 6 votes vote down vote up
def get_callbacks(self, X):
        """
        Returns callbacks listed below.
        X is the encoded dataset used to print a sample of the output.
        Callbacks created:
        1. Sample output each epoch
        2. Save best performing model each epoch
        """
        sample_output_callback = LambdaCallback(on_epoch_end = lambda epoch, logs:\
                                                pprint(self.sample_labels(self.model.predict(X))))
        checkpoint = ModelCheckpoint(os.path.join(self.model_dir,
                                                  "weights.hdf5"),
                                     verbose = 1,
                                     save_best_only = False)   # TODO: is there a way to save by best val_acc?
        return [sample_output_callback,
                checkpoint] 
Example #2
Source File: model.py    From supervised-oie with MIT License 6 votes vote down vote up
def get_callbacks(self, X):
        """
        Sets these callbacks as a class member.
        X is the encoded dataset used to print a sample of the output.
        Callbacks created:
        1. Sample output each epoch
        2. Save best performing model each epoch
        """

        sample_output_callback = LambdaCallback(on_epoch_end = lambda epoch, logs:\
                                                logging.debug(pformat(self.sample_labels(self.model.predict(X)))))
        checkpoint = ModelCheckpoint(os.path.join(self.model_dir,
                                                  "weights.hdf5"),
                                     verbose = 1,
                                     save_best_only = False)   # TODO: is there a way to save by best val_acc?

        return [sample_output_callback,
                checkpoint] 
Example #3
Source File: lr_finder.py    From keras_lr_finder with MIT License 5 votes vote down vote up
def find_generator(self, generator, start_lr, end_lr, epochs=1, steps_per_epoch=None, **kw_fit):
        if steps_per_epoch is None:
            try:
                steps_per_epoch = len(generator)
            except (ValueError, NotImplementedError) as e:
                raise e('`steps_per_epoch=None` is only valid for a'
                        ' generator based on the '
                        '`keras.utils.Sequence`'
                        ' class. Please specify `steps_per_epoch` '
                        'or use the `keras.utils.Sequence` class.')
        self.lr_mult = (float(end_lr) / float(start_lr)) ** (float(1) / float(epochs * steps_per_epoch))

        # Save weights into a file
        initial_weights = self.model.get_weights()

        # Remember the original learning rate
        original_lr = K.get_value(self.model.optimizer.lr)

        # Set the initial learning rate
        K.set_value(self.model.optimizer.lr, start_lr)

        callback = LambdaCallback(on_batch_end=lambda batch,
                                                      logs: self.on_batch_end(batch, logs))

        self.model.fit_generator(generator=generator,
                                 epochs=epochs,
                                 steps_per_epoch=steps_per_epoch,
                                 callbacks=[callback],
                                 **kw_fit)

        # Restore the weights to the state before model fitting
        self.model.set_weights(initial_weights)

        # Restore the original learning rate
        K.set_value(self.model.optimizer.lr, original_lr) 
Example #4
Source File: enhancer.py    From ImageEnhancer with MIT License 5 votes vote down vote up
def train_model(self):
        """ train the model """
        callbacks = []
        callbacks.append(TensorBoard(self.graph_path))
        callbacks.append(LearningRateScheduler(lambda e: self.learning_rate * 0.999 ** (e / 20)))
        callbacks.append(ModelCheckpoint(self.checkpoint_path + 'checkpoint.best.hdf5', save_best_only=True))
        if not self.best_cp:
            callbacks.append(ModelCheckpoint(self.checkpoint_path + 'checkpoint.{epoch:02d}-{val_loss:.2f}.hdf5'))
        callbacks.append(LambdaCallback(on_epoch_end=lambda epoch, logs: self.save_image('test.{e:02d}-{val_loss:.2f}'.format(e=epoch, **logs))))
        self.model.compile(Adam(lr=self.learning_rate), binary_crossentropy)
        self.model.fit(self.corrupted['train'], self.source['train'],
                       batch_size=self.batch_size,
                       epochs=self.epoch,
                       callbacks=callbacks,
                       validation_data=(self.corrupted['valid'], self.source['valid'])) 
Example #5
Source File: example_rock_paper_scissors.py    From keras-adversarial with MIT License 5 votes vote down vote up
def experiment(opt, path):
    """Train two players to play rock, paper, scissors using a given optimizer"""
    x = Input((1,), name="x")
    player_a = Dense(3, activation='softmax', name="player_a", bias=False, W_regularizer=l2(1e-2))
    player_b = Dense(3, activation='softmax', name="player_b", bias=False, W_regularizer=l2(1e-2))

    action_a = player_a(x)
    action_b = player_b(x)

    def rps(z):
        u = z[0]
        v = z[1]
        return u[:, 0] * v[:, 2] + u[:, 1] * v[:, 0] + u[:, 2] * v[:, 1]

    model_a = Model(x, merge([action_a, action_b], mode=rps, output_shape=lambda z: (z[0][0], 1)))
    model_b = Model(x, merge([action_b, action_a], mode=rps, output_shape=lambda z: (z[0][0], 1)))

    adversarial_model = AdversarialModel(player_models=[model_a, model_b],
                                         player_params=[[player_a.W], [player_b.W]],
                                         player_names=["a", "b"])
    adversarial_model.adversarial_compile(opt,
                                          player_optimizers=[SGD(1), SGD(1)],
                                          loss="mean_absolute_error")
    param_model = Model(x, [action_a, action_b])

    def print_params(epoch, logs):
        params = param_model.predict(np.ones((1, 1)))
        a = params[0].ravel()
        b = params[1].ravel()
        print("Epoch: {}, A: {}, B: {}".format(epoch, a, b))
        imgpath = os.path.join(path, "epoch-{:03d}.png".format(epoch))
        rps_chart(imgpath, a, b)

    cb = LambdaCallback(on_epoch_begin=print_params)
    batch_count = 5
    adversarial_model.fit(np.ones((batch_count, 1)),
                          [np.ones((batch_count, 1)), np.ones((batch_count, 1))],
                          nb_epoch=120, callbacks=[cb], verbose=0, batch_size=1) 
Example #6
Source File: trainer.py    From deep_qa with Apache License 2.0 5 votes vote down vote up
def _get_callbacks(self):
        """
         Returns a set of Callbacks which are used to perform various functions within Keras' .fit method.
         Here, we use an early stopping callback to add patience with respect to the validation metric and
         a Lambda callback which performs the model specific callbacks which you might want to build into
         a model, such as re-encoding some background knowledge.

         Additionally, there is also functionality to create Tensorboard log files. These can be visualised
         using 'tensorboard --logdir /path/to/log/files' after training.
        """
        early_stop = EarlyStopping(monitor=self.validation_metric, patience=self.patience)
        model_callbacks = LambdaCallback(on_epoch_begin=lambda epoch, logs: self._pre_epoch_hook(epoch),
                                         on_epoch_end=lambda epoch, logs: self._post_epoch_hook(epoch))
        callbacks = [early_stop, model_callbacks]

        if self.debug_params:
            debug_callback = LambdaCallback(on_epoch_end=lambda epoch, logs:
                                            self.__debug(self.debug_params["layer_names"],
                                                         self.debug_params.get("masks", []), epoch))
            callbacks.append(debug_callback)
            return CallbackList(callbacks)

        # Some witchcraft is happening here - we don't specify the epoch replacement variable
        # checkpointing string, because Keras does that within the callback if we specify it here.
        if self.save_models:
            checkpointing = ModelCheckpoint(self.model_prefix + "_weights_epoch={epoch:d}.h5",
                                            save_best_only=True, save_weights_only=True,
                                            monitor=self.validation_metric)
            callbacks.append(checkpointing)

        return CallbackList(callbacks) 
Example #7
Source File: image_saver.py    From keras_weight_animator with MIT License 5 votes vote down vote up
def image_saver_callback(model, directory, epoch_interval=1, batch_interval=100, cmap='gray', render_videos=False):
    
    def save_image(weights, batch, layer_name, i):
        global current_epoch
        weight = str(i + 1).zfill(2)
        epoch = str(current_epoch).zfill(3)
        fold = os.path.join(directory, 'epoch_{}-layer_{}-weights_{}'.format(epoch, layer_name, weight))
        if not os.path.isdir(fold):
            os.makedirs(fold)
        name = os.path.join('{}'.format(fold),
                            '{}_{}x{}.png'.format(str(batch).zfill(9), 
                                                  weights.shape[0], weights.shape[1]))
        plt.imsave(name, weights, cmap=cmap)
    
    def save_weight_images(batch, logs):
        global current_epoch
        if current_epoch % epoch_interval == 0 and batch % batch_interval == 0:
            for layer in model.layers:
                if len(layer.get_weights()) > 0:
                    for i, weights in enumerate(layer.get_weights()):
                        if len(weights.shape) < 2:
                            weights = np.expand_dims(weights, axis=0)
                        save_image(weights, batch, layer.name, i)
    
    def on_epoch_begin(epoch, logs):
        global current_epoch
        current_epoch = epoch

    def on_train_end(logs):
        src = os.path.dirname(os.path.abspath(__file__))
        cmd = os.path.join(src, '..', 'bin', 'create_image_sequence.sh')
        print(os.system('{} {}'.format(cmd, directory)))

    kwargs = dict()
    kwargs['on_batch_begin'] = save_weight_images
    kwargs['on_epoch_begin'] = on_epoch_begin
    if render_videos:
        kwargs['on_train_end'] = on_train_end

    return LambdaCallback(**kwargs) 
Example #8
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_LambdaCallback():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    # Start an arbitrary process that should run during model training and be terminated after training has completed.
    def f():
        while True:
            pass

    p = multiprocessing.Process(target=f)
    p.start()
    cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())

    cbks = [cleanup_callback]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    p.join()
    assert not p.is_alive() 
Example #9
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_LambdaCallback():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    # Start an arbitrary process that should run during model training and be terminated after training has completed.
    def f():
        while True:
            pass

    p = multiprocessing.Process(target=f)
    p.start()
    cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())

    cbks = [cleanup_callback]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    p.join()
    assert not p.is_alive() 
Example #10
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_LambdaCallback():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    # Start an arbitrary process that should run during model training and be terminated after training has completed.
    def f():
        while True:
            pass

    p = multiprocessing.Process(target=f)
    p.start()
    cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())

    cbks = [cleanup_callback]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    p.join()
    assert not p.is_alive() 
Example #11
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_LambdaCallback():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    # Start an arbitrary process that should run during model training and be terminated after training has completed.
    def f():
        while True:
            pass

    p = multiprocessing.Process(target=f)
    p.start()
    cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())

    cbks = [cleanup_callback]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    p.join()
    assert not p.is_alive() 
Example #12
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_LambdaCallback():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    # Start an arbitrary process that should run during model training and be terminated after training has completed.
    def f():
        while True:
            pass

    p = multiprocessing.Process(target=f)
    p.start()
    cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())

    cbks = [cleanup_callback]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    p.join()
    assert not p.is_alive() 
Example #13
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_LambdaCallback():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    # Start an arbitrary process that should run during model training and be terminated after training has completed.
    def f():
        while True:
            pass

    p = multiprocessing.Process(target=f)
    p.start()
    cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())

    cbks = [cleanup_callback]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    p.join()
    assert not p.is_alive() 
Example #14
Source File: lr_finder.py    From keras_lr_finder with MIT License 5 votes vote down vote up
def find(self, x_train, y_train, start_lr, end_lr, batch_size=64, epochs=1, **kw_fit):
        # If x_train contains data for multiple inputs, use length of the first input.
        # Assumption: the first element in the list is single input; NOT a list of inputs.
        N = x_train[0].shape[0] if isinstance(x_train, list) else x_train.shape[0]

        # Compute number of batches and LR multiplier
        num_batches = epochs * N / batch_size
        self.lr_mult = (float(end_lr) / float(start_lr)) ** (float(1) / float(num_batches))
        # Save weights into a file
        initial_weights = self.model.get_weights()

        # Remember the original learning rate
        original_lr = K.get_value(self.model.optimizer.lr)

        # Set the initial learning rate
        K.set_value(self.model.optimizer.lr, start_lr)

        callback = LambdaCallback(on_batch_end=lambda batch, logs: self.on_batch_end(batch, logs))

        self.model.fit(x_train, y_train,
                       batch_size=batch_size, epochs=epochs,
                       callbacks=[callback],
                       **kw_fit)

        # Restore the weights to the state before model fitting
        self.model.set_weights(initial_weights)

        # Restore the original learning rate
        K.set_value(self.model.optimizer.lr, original_lr) 
Example #15
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_CallbackValData():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)

    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size],
                       y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size],
                       y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit_generator(data_generator(True), len(X_train), epochs=1,
                        validation_data=(X_test, y_test),
                        callbacks=[cbk2])

    # callback validation data should always have x, y, and sample weights
    assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
    assert cbk.validation_data[0] is cbk2.validation_data[0]
    assert cbk.validation_data[1] is cbk2.validation_data[1]
    assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape 
Example #16
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_CallbackValData():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)

    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size],
                       y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size],
                       y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit_generator(data_generator(True), len(X_train), epochs=1,
                        validation_data=(X_test, y_test),
                        callbacks=[cbk2])

    # callback validation data should always have x, y, and sample weights
    assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
    assert cbk.validation_data[0] is cbk2.validation_data[0]
    assert cbk.validation_data[1] is cbk2.validation_data[1]
    assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape 
Example #17
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_CallbackValData():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)

    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size],
                       y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size],
                       y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit_generator(data_generator(True), len(X_train), epochs=1,
                        validation_data=(X_test, y_test),
                        callbacks=[cbk2])

    # callback validation data should always have x, y, and sample weights
    assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
    assert cbk.validation_data[0] is cbk2.validation_data[0]
    assert cbk.validation_data[1] is cbk2.validation_data[1]
    assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape 
Example #18
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_CallbackValData():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)

    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size],
                       y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size],
                       y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit_generator(data_generator(True), len(X_train), epochs=1,
                        validation_data=(X_test, y_test),
                        callbacks=[cbk2])

    # callback validation data should always have x, y, and sample weights
    assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
    assert cbk.validation_data[0] is cbk2.validation_data[0]
    assert cbk.validation_data[1] is cbk2.validation_data[1]
    assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape 
Example #19
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_CallbackValData():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)

    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size],
                       y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size],
                       y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit_generator(data_generator(True), len(X_train), epochs=1,
                        validation_data=(X_test, y_test),
                        callbacks=[cbk2])

    # callback validation data should always have x, y, and sample weights
    assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
    assert cbk.validation_data[0] is cbk2.validation_data[0]
    assert cbk.validation_data[1] is cbk2.validation_data[1]
    assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape 
Example #20
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_CallbackValData():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)

    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size],
                       y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size],
                       y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit_generator(data_generator(True), len(X_train), epochs=1,
                        validation_data=(X_test, y_test),
                        callbacks=[cbk2])

    # callback validation data should always have x, y, and sample weights
    assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
    assert cbk.validation_data[0] is cbk2.validation_data[0]
    assert cbk.validation_data[1] is cbk2.validation_data[1]
    assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape 
Example #21
Source File: model_training.py    From Real-Time-Facial-Expression-Recognition-with-DeepLearning with MIT License 4 votes vote down vote up
def main():
    model = myVGG.VGG_16()

    if args.test is not None:
        print ("Test mode")
        img = cv2.imread(args.test)
        img = fu.preprocessing(img)
        img = np.expand_dims(img, axis=0)
        y = np.expand_dims(np.asarray([0]), axis=0)
        batch_size = 1
        model.fit(img, y, nb_epoch=400, \
                batch_size=batch_size, \
                validation_split=0.2, \
                shuffle=True, verbose=0)
        return

    #input_path = args.data_path
    #print("training data path : " + input_path)
    #X_train, y_train = fu.extract_features(input_path)
    X_fname = '../data/X_train_train.npy'
    y_fname = '../data/y_train_train.npy'
    X_train = np.load(X_fname)
    y_train = np.load(y_fname)
    print(X_train.shape)
    print(y_train.shape) 
   
    print("Training started")

    callbacks = []
    earlystop_callback = EarlyStopping(monitor='val_loss', patience=5, verbose=0)
    batch_print_callback = LambdaCallback(on_batch_begin=lambda batch, logs: print(batch))
    epoch_print_callback = LambdaCallback(on_epoch_end=lambda epoch, logs: print("epoch:", epoch))
    callbacks.append(earlystop_callback)
    callbacks.append(batch_print_callback)
    callbacks.append(epoch_print_callback)

    batch_size = 512
    model.fit(X_train, y_train, nb_epoch=400, \
            batch_size=batch_size, \
            validation_split=0.2, \
            shuffle=True, verbose=0, \
            callbacks=callbacks)

    model.save_weights('my_model_weights.h5')
    scores = model.evaluate(X_train, y_train, verbose=0)
    print ("Train loss : %.3f" % scores[0])
    print ("Train accuracy : %.3f" % scores[1])
    print ("Training finished") 
Example #22
Source File: train.py    From dfc2019 with MIT License 4 votes vote down vote up
def train_all(self, epochs=1000, lr=1e-3):
        checkdir = "checkpoint"
        try:
            os.mkdir(checkdir)
        except FileExistsError:
            print("Folder exists: ", checkdir)

        filename = self.settings.dataset
        filename += ".densemapnet.weights.{epoch:02d}.h5"
        filepath = os.path.join(checkdir, filename)
        checkpoint = ModelCheckpoint(filepath=filepath,
                                     save_weights_only=True,
                                     verbose=1,
                                     save_best_only=False)
        predict_callback = LambdaCallback(on_epoch_end=lambda epoch,
                                          logs: self.predict_disparity())
        callbacks = [checkpoint, predict_callback]
        self.load_train_data(1)
        if self.network is None:
            self.network = DenseMapNet(settings=self.settings)
            self.model = self.network.build_model()

#        print("Using loss=mse_disparity on final conv layer and Adam")
        print("Using loss=mae_disparity on final conv layer and Adam")
        print(self.settings.lr)
#        self.model.compile(loss='mse',
#        self.model.compile(loss=_loss_mse_disparity,
        self.model.compile(loss=_loss_mae_disparity,
#        self.model.compile(loss=_loss_3pe_disparity,
#        optimizer=RMSprop(lr=lr, decay=1e-6))
        optimizer=Adam(lr=self.settings.lr))		
			
        if self.settings.model_weights:
            if self.settings.notrain:
                self.predict_disparity()
                return

        x = [self.train_lx, self.train_rx]
        self.model.fit(x,
                       self.train_dx,
                       epochs=epochs,
                       batch_size=self.settings.batch_size,
                       shuffle=True,
                       callbacks=callbacks) 
Example #23
Source File: train.py    From dfc2019 with MIT License 4 votes vote down vote up
def train_model(self):

        # setup checkpoint folder
        checkdir = "checkpoint"
        try:
            os.mkdir(checkdir)
        except FileExistsError:
            print("Folder exists: ", checkdir)

        # loop on epochs
        lr = self.args.lr + self.args.decay
        for i in range(1, self.args.n_epochs + 1):

            # update the learning rate
            lr = lr - self.args.decay

            # randomize order of train files
            indexes = np.arange(1, self.args.n_trains + 1)
            np.random.shuffle(indexes)

            # loop on train files
            is_compiled = False
            for j in indexes:

                # load training file
                self.load_train_data(j)

                # setup callback for writing new model checkpoint files
                filename = "us3d.icnet.weights.%d-%d.h5" % (i, j)
                filepath = os.path.join(checkdir, filename)
                checkpoint = ModelCheckpoint(filepath=filepath, save_weights_only=True, verbose=1, save_best_only=False)
                predict_callback = LambdaCallback(on_epoch_end=lambda epoch, logs: self.compute_accuracy())
                callbacks = [checkpoint, predict_callback]

                # build the model, compile, and fit
                height = self.train_images[0].shape[0]
                width = self.train_images[0].shape[1]
                bands = self.train_images[0].shape[2]
                myloss = tversky_loss
                if self.model is None:
                    self.model = build_icnet(height, width, bands, self.n_classes, weights_path=self.args.checkpoint,
                                             train=True)
                if not is_compiled:
                    self.model.compile(optimizer=Adam(lr=lr), loss=myloss, loss_weights=[1.0, 0.4, 0.16])
                    is_compiled = True
                self.model.fit(self.train_images, [self.Y1, self.Y2, self.Y3], epochs=1,
                               batch_size=self.args.batch_size, shuffle=True, callbacks=callbacks)

    # convert category value image to RGB color image 
Example #24
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_CallbackValData():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)

    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size],
                       y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size],
                       y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit_generator(data_generator(True), len(X_train), epochs=1,
                        validation_data=(X_test, y_test),
                        callbacks=[cbk2])

    # callback validation data should always have x, y, and sample weights
    assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
    assert cbk.validation_data[0] is cbk2.validation_data[0]
    assert cbk.validation_data[1] is cbk2.validation_data[1]
    assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape 
Example #25
Source File: test_callbacks.py    From DeepLearning_Wavelet-LSTM with MIT License 4 votes vote down vote up
def test_CallbackValData():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_classes)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)

    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size],
                       y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size],
                       y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)
    model.fit_generator(data_generator(True), len(X_train), epochs=1,
                        validation_data=(X_test, y_test),
                        callbacks=[cbk2])

    # callback validation data should always have x, y, and sample weights
    assert len(cbk.validation_data) == len(cbk2.validation_data) == 3
    assert cbk.validation_data[0] is cbk2.validation_data[0]
    assert cbk.validation_data[1] is cbk2.validation_data[1]
    assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape 
Example #26
Source File: predictor.py    From densemapnet with MIT License 4 votes vote down vote up
def train_all(self, epochs=1000, lr=1e-3):
        checkdir = "checkpoint"
        try:
            os.mkdir(checkdir)
        except FileExistsError:
            print("Folder exists: ", checkdir)

        filename = self.settings.dataset
        filename += ".densemapnet.weights.{epoch:02d}.h5"
        filepath = os.path.join(checkdir, filename)
        checkpoint = ModelCheckpoint(filepath=filepath,
                                     save_weights_only=True,
                                     verbose=1,
                                     save_best_only=False)
        predict_callback = LambdaCallback(on_epoch_end=lambda epoch,
                                          logs: self.predict_disparity())
        callbacks = [checkpoint, predict_callback]
        self.load_train_data(1)
        if self.network is None:
            self.network = DenseMapNet(settings=self.settings)
            self.model = self.network.build_model(lr=lr)

        if self.settings.otanh:
            print("Using loss=mse on tanh output layer")
            self.model.compile(loss='mse',
                               optimizer=RMSprop(lr=lr, decay=1e-6))
        else:
            print("Using loss=crossent on sigmoid output layer")
            self.model.compile(loss='binary_crossentropy',
                               optimizer=RMSprop(lr=lr, decay=1e-6))

        if self.settings.model_weights:
            if self.settings.notrain:
                self.predict_disparity()
                return

        x = [self.train_lx, self.train_rx]
        self.model.fit(x,
                       self.train_dx,
                       epochs=epochs,
                       batch_size=4,
                       shuffle=True,
                       callbacks=callbacks)