Python keras.callbacks.History() Examples

The following are 9 code examples of keras.callbacks.History(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.callbacks , or try the search function .
Example #1
Source File: image_classifier.py    From imageatm with Apache License 2.0 6 votes vote down vote up
def fit_generator(self, **kwargs) -> History:
        """
        Trains classifiers' model on data generated by a Python generator.

        Args:
            generator: Input samples from a data generator on which to train the model.
            validation_data: Input samples from a data generator on which to evaluate the model.
            epochs: Number of epochs to train the model.
            initial_epoch: Epoch at which to start training.
            verbose: Verbosity mode.
            use_multiprocessing: Use process based threading.
            workers: Maximum number of processes.
            max_queue_size: Maximum size for the generator queue.
            callbacks: List of callbacks to apply during training.

        Returns:
            history: A `History` object.

        """
        return self.model.fit_generator(**kwargs) 
Example #2
Source File: network.py    From entity_embeddings_categorical with MIT License 6 votes vote down vote up
def fit(self, X_train: np.ndarray, y_train: np.ndarray, X_val: np.ndarray, y_val: np.ndarray) -> History:
        """
        This method is used to fit a given training and validation data into our entity embeddings model
        :param X_train: training features
        :param y_train: training targets
        :param X_val: validation features
        :param y_val: validation targets
        :return a History object
        """

        self.max_log_y = max(np.max(np.log(y_train)), np.max(np.log(y_val)))

        history = self.model.fit(x=transpose_to_list(X_train),
                                 y=self._val_for_fit(y_train),
                                 validation_data=(transpose_to_list(X_val), self._val_for_fit(y_val)),
                                 epochs=self.config.epochs,
                                 batch_size=self.config.batch_size, )
        return history 
Example #3
Source File: visualization_utils.py    From entity_embeddings_categorical with MIT License 6 votes vote down vote up
def make_plot_from_history(history: History,
                           output_path: str = None,
                           extension: str = 'pdf') -> Figure:
    """
    Used to make a Figure object containing the loss curve between the epochs.
    :param history: the history outputted from the model.fit method
    :param output_path: (optional) where the image will be saved
    :param extension: (optional) the extension of the file
    :return: a Figure object containing the plot
    """
    loss = history.history['loss']

    fig = plt.figure(figsize=(10, 10))
    plt.xlabel("Epochs")
    plt.ylabel("Loss")

    plt.plot(loss)

    if output_path:
        os.makedirs(output_path, exist_ok=True)
        plt.savefig(os.path.join(output_path, PLOT_LOSS_FORMAT % extension))

    return fig 
Example #4
Source File: Stock_Prediction_Model_Stateless_LSTM.py    From StockRecommendSystem with MIT License 5 votes vote down vote up
def train_data(self, data_feature, window, LabelColumnName):
        # history = History()
        
        #X_train, y_train, X_test, y_test = self.prepare_train_test_data(data_feature, LabelColumnName)
        X_train, y_train, X_test, y_test = self.prepare_train_data(data_feature, LabelColumnName)
        model = self.build_model(window, X_train, y_train, X_test, y_test)

        model.fit(
            X_train,
            y_train,
            batch_size=self.paras.batch_size,
            epochs=self.paras.epoch,
            # validation_split=self.paras.validation_split,
            # validation_data = (X_known_lately, y_known_lately),
            # callbacks=[history],
            # shuffle=True,
            verbose=self.paras.verbose
        )
        # save model
        self.save_training_model(model, window)
        recall_train, tmp = self.predict(model, X_train, y_train)
        # print('train recall is', recall_train)
        # print(' ############## validation on test data ############## ')
        recall_test, tmp = self.predict(model, X_test, y_test)
        # print('test recall is',recall_test)

        # plot training loss/ validation loss
        if self.paras.plot:
            self.plot_training_curve(history)

        return model


    ###################################
    ###                             ###
    ###         Predicting          ###
    ###                             ###
    ################################### 
Example #5
Source File: co_lstm_predict_day.py    From copper_price_forecast with GNU General Public License v3.0 5 votes vote down vote up
def main():
    global_start_time = time.time()
    print('> Loading data... ')
    # mm_scaler, X_train, y_train, X_test, y_test = load_data()
    X_train, y_train, X_test, y_test = load_data()
    print('> Data Loaded. Compiling...')

    model = build_model()
    print(model.summary())

    # keras.callbacks.History记录每个epochs的loss及val_loss
    hist = History()
    model.fit(X_train, y_train, batch_size=Conf.BATCH_SIZE, epochs=Conf.EPOCHS, shuffle=True,
              validation_split=0.05, callbacks=[hist])

    # 控制台打印历史loss及val_loss
    print(hist.history['loss'])
    print(hist.history['val_loss'])

    # 可视化历史loss及val_loss
    plot_loss(hist.history['loss'], hist.history['val_loss'])
    # predicted = predict_by_days(model, X_test, 20)
    predicted = predict_by_day(model, X_test)

    print('Training duration (s) : ', time.time() - global_start_time)

    # predicted = inverse_trans(mm_scaler, predicted)
    # y_test = inverse_trans(mm_scaler, y_test)

    # 模型评估
    model_evaluation(pd.DataFrame(predicted), pd.DataFrame(y_test))

    # 预测结果可视化
    model_visualization(y_test, predicted) 
Example #6
Source File: co_lstm_predict_sequence.py    From copper_price_forecast with GNU General Public License v3.0 5 votes vote down vote up
def main():
    global_start_time = time.time()
    print('> Loading data... ')
    # mm_scaler, X_train, y_train, X_test, y_test = load_data()
    X_train, y_train, X_test, y_test = load_data()
    print('> Data Loaded. Compiling...')

    model = build_model()
    print(model.summary())

    # keras.callbacks.History记录每个epochs的loss及val_loss
    hist = History()
    model.fit(X_train, y_train, batch_size=Conf.BATCH_SIZE, epochs=Conf.EPOCHS, shuffle=True,
              validation_split=0.05, callbacks=[hist])

    # 控制台打印历史loss及val_loss
    print(hist.history['loss'])
    print(hist.history['val_loss'])

    # 可视化历史loss及val_loss
    plot_loss(hist.history['loss'], hist.history['val_loss'])
    # predicted = predict_by_days(model, X_test, 20)
    predicted = predict_by_day(model, X_test)

    print('Training duration (s) : ', time.time() - global_start_time)

    # predicted = inverse_trans(mm_scaler, predicted)
    # y_test = inverse_trans(mm_scaler, y_test)

    # 模型评估
    model_evaluation_multi_step(pd.DataFrame(predicted), pd.DataFrame(y_test))

    # 预测结果可视化
    model_visulaization_multi_step(y_test, predicted) 
Example #7
Source File: image_classifier.py    From imageatm with Apache License 2.0 5 votes vote down vote up
def predict_generator(self, data_generator: DataGenerator, **kwargs) -> History:
        """
        Generates predictions for the input samples from a data generator.

        Args:
            data_generator: Input samples from a data generator.
            workers: Maximum number of processes.
            use_multiprocessing: Use process based threading.
            verbose: Verbosity mode.

        Returns:
            history: A `History` object.
        """
        return self.model.predict_generator(data_generator, **kwargs) 
Example #8
Source File: hyopt.py    From kopt with MIT License 5 votes vote down vote up
def _train_and_eval_single(train, valid, model,
                           batch_size=32, epochs=300, use_weight=False,
                           callbacks=[], eval_best=False, add_eval_metrics={}, custom_objects=None):
    """Fit and evaluate a keras model

    eval_best: if True, load the checkpointed model for evaluation
    """
    def _format_keras_history(history):
        """nicely format keras history
        """
        return {"params": history.params,
                "loss": merge_dicts({"epoch": history.epoch}, history.history),
                }
    if use_weight:
        sample_weight = train[2]
    else:
        sample_weight = None
    # train the model
    logger.info("Fit...")
    history = History()
    model.fit(train[0], train[1],
              batch_size=batch_size,
              validation_data=valid[:2],
              epochs=epochs,
              sample_weight=sample_weight,
              verbose=2,
              callbacks=[history] + callbacks)

    # get history
    hist = _format_keras_history(history)
    # load and eval the best model
    if eval_best:
        mcp = [x for x in callbacks if isinstance(x, ModelCheckpoint)]
        assert len(mcp) == 1
        model = load_model(mcp[0].filepath, custom_objects=custom_objects)

    return eval_model(model, valid, add_eval_metrics), hist 
Example #9
Source File: models.py    From deep_qa with Apache License 2.0 4 votes vote down vote up
def _prepare_callbacks(self,
                           callbacks: List[Callback],
                           val_ins: List[numpy.array],
                           epochs: int,
                           batch_size: int,
                           num_train_samples: int,
                           callback_metrics: List[str],
                           do_validation: bool,
                           verbose: int):

        """
        Sets up Keras callbacks to perform various monitoring functions during training.
        """

        self.history = History()  # pylint: disable=attribute-defined-outside-init
        callbacks = [BaseLogger()] + (callbacks or []) + [self.history]
        if verbose:
            callbacks += [ProgbarLogger()]
        callbacks = CallbackList(callbacks)

        # it's possible to callback a different model than self
        # (used by Sequential models).
        if hasattr(self, 'callback_model') and self.callback_model:
            callback_model = self.callback_model
        else:
            callback_model = self  # pylint: disable=redefined-variable-type

        callbacks.set_model(callback_model)
        callbacks.set_params({
                'batch_size': batch_size,
                'epochs': epochs,
                'samples': num_train_samples,
                'verbose': verbose,
                'do_validation': do_validation,
                'metrics': callback_metrics or [],
        })
        callbacks.on_train_begin()
        callback_model.stop_training = False
        for cbk in callbacks:
            cbk.validation_data = val_ins

        return callbacks, callback_model