Python keras.callbacks() Examples

The following are 30 code examples for showing how to use keras.callbacks(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras , or try the search function .

Example 1
Project: AIX360   Author: IBM   File: resnet_keras_model.py    License: Apache License 2.0 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 2
Project: robotreviewer   Author: ijmarshall   File: punchline_extractor.py    License: GNU General Public License v3.0 6 votes vote down vote up
def train_simple_inference_net(n_epochs=30):
    inf_net = SimpleInferenceNet()
    tr_ids, val_ids, te_ids = train_document_ids(), validation_document_ids(), test_document_ids()
    tr_ids = list(train_document_ids())
    train_Xy, inference_vectorizer = get_train_Xy(tr_ids, sections_of_interest=None, vocabulary_file=None, include_sentence_span_splits=False, include_raw_texts=True)

    X_k, y_k = make_Xy_inference(train_Xy, inf_net.bc)
    print("train data for inference task loaded!")

    val_Xy = get_Xy(val_ids, inference_vectorizer,  include_raw_texts=True)
    X_kv, y_kv = make_Xy_inference(val_Xy, inf_net.bc)
    print("val data loaded!")

    filepath="inference.weights.best.hdf5"
    checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
    callbacks_list = [checkpoint]

    with open("inference_model.json", "w") as outf:
        outf.write(inf_net.model.to_json())

    print("fitting inference model!")
    inf_net.model.fit(X_k, y_k, validation_data=(X_kv, y_kv), callbacks=callbacks_list, epochs=n_epochs) 
Example 3
Project: Adaptive-Diversity-Promoting   Author: P2333   File: train_mnist.py    License: Apache License 2.0 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 30:
        lr *= 1e-2
    elif epoch > 15:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 4
Project: Adaptive-Diversity-Promoting   Author: P2333   File: train_cifar.py    License: Apache License 2.0 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 150:
        lr *= 1e-2
    elif epoch > 100:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 5
Project: Adaptive-Diversity-Promoting   Author: P2333   File: advtrain_cifar10.py    License: Apache License 2.0 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 6
Project: PJ_NLP   Author: moxiu2012   File: train.py    License: Apache License 2.0 6 votes vote down vote up
def train():
    # load data
    train_dataset = Dataset(training=True)
    dev_dataset = Dataset(training=False)

    # model
    MODEL = name_model[model_name]
    model = MODEL(train_dataset.vocab_size, conf.n_classes, train_dataset.emb_mat)

    # callback
    my_callback = MyCallback()
    f1 = F1(dev_dataset.gen_batch_data(), dev_dataset.steps_per_epoch)
    checkpointer = ModelCheckpoint('data/{}.hdf5'.format(model_name), save_best_only=True)
    early_stop = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')

    # train
    model.compile(optimizer=keras.optimizers.Adam(),
                  loss=keras.losses.categorical_crossentropy, metrics=['acc'])
    model.fit_generator(train_dataset.gen_batch_data(),
                        steps_per_epoch=train_dataset.steps_per_epoch,
                        verbose=0,
                        epochs=conf.epochs, callbacks=[my_callback, checkpointer, early_stop, f1])
    keras.models.save_model(model, conf.model_path.format(model_name)) 
Example 7
Project: IdenProf   Author: OlafenwaMoses   File: idenprof.py    License: MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """
    Learning Rate Schedule
    """
    # Learning rate is scheduled to be reduced after 80, 120, 160, 180  epochs. Called  automatically  every
    #  epoch as part  of  callbacks  during  training.



    lr = 1e-3
    if epoch > 180:
        lr *= 1e-4
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1

    print('Learning rate: ', lr)
    return lr 
Example 8
Project: keras-adabound   Author: titu1994   File: cifar10.py    License: MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """
    lr = 0.001
    epoch += 1

    # if epoch >= 90:
    #     lr *= 5e-2
    # elif epoch >= 60:
    #     lr *= 1e-1
    # elif epoch >= 30:
    #     lr *= 5e-1

    if epoch >= 150:
        lr *= 0.1
    print('Learning rate: ', lr)
    return lr 
Example 9
Project: ntm_keras   Author: flomlo   File: testing_utils.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def train_model(model, epochs=10, min_size=5, max_size=20, callbacks=None, verboose=False):
    input_dim = model.input_dim
    output_dim = model.output_dim
    batch_size = model.batch_size

    sample_generator = get_sample(batch_size=batch_size, in_bits=input_dim, out_bits=output_dim,
                                                max_size=max_size, min_size=min_size)
    if verboose:
        for j in range(epochs):
            model.fit_generator(sample_generator, steps_per_epoch=10, epochs=j+1, callbacks=callbacks, initial_epoch=j)
            print("currently at epoch {0}".format(j+1))
            for i in [5,10,20,40]:
                test_model(model, sequence_length=i, verboose=True)
    else:
        model.fit_generator(sample_generator, steps_per_epoch=10, epochs=epochs, callbacks=callbacks)

    print("done training") 
Example 10
Project: ntm_keras   Author: flomlo   File: testing_utils.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def lengthy_test(model, testrange=[5,10,20,40,80], epochs=100, verboose=True):
    ts = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    log_path = LOG_PATH_BASE + ts + "_-_" + model.name 
    tensorboard = TensorBoard(log_dir=log_path,
                                write_graph=False, #This eats a lot of space. Enable with caution!
                                #histogram_freq = 1,
                                write_images=True,
                                batch_size = model.batch_size,
                                write_grads=True)
    model_saver =  ModelCheckpoint(log_path + "/model.ckpt.{epoch:04d}.hdf5", monitor='loss', period=1)
    callbacks = [tensorboard, TerminateOnNaN(), model_saver]

    for i in testrange:
        acc = test_model(model, sequence_length=i, verboose=verboose)
        print("the accuracy for length {0} was: {1}%".format(i,acc))

    train_model(model, epochs=epochs, callbacks=callbacks, verboose=verboose)

    for i in testrange:
        acc = test_model(model, sequence_length=i, verboose=verboose)
        print("the accuracy for length {0} was: {1}%".format(i,acc))
    return 
Example 11
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cifar10_resnet.py    License: MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 12
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cifar10_resnet.py    License: MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 13
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cifar10_resnet.py    License: MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 14
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cifar10_resnet.py    License: MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 15
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cifar10_resnet.py    License: MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 16
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cifar10_resnet.py    License: MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 17
Project: poetry_generator_Keras   Author: ioiogoo   File: poetry_model.py    License: MIT License 6 votes vote down vote up
def train(self):
        '''训练模型'''
        number_of_epoch = len(self.files_content) // self.config.batch_size

        if not self.model:
            self.build_model()

        self.model.summary()

        self.model.fit_generator(
            generator=self.data_generator(),
            verbose=True,
            steps_per_epoch=self.config.batch_size,
            epochs=number_of_epoch,
            callbacks=[
                keras.callbacks.ModelCheckpoint(self.config.weight_file, save_weights_only=False),
                LambdaCallback(on_epoch_end=self.generate_sample_result)
            ]
        ) 
Example 18
Project: face_landmark_dnn   Author: junhwanjang   File: train_mobilenets.py    License: MIT License 5 votes vote down vote up
def main():
#        Define X and y
# #        Load data
        PATH = "./data/64_64_1/offset_1.3/"
        X = np.load(PATH + "basic_dataset_img.npz")
        y = np.load(PATH + "basic_dataset_pts.npz")
        X = X['arr_0']
        y = y['arr_0'].reshape(-1, 136)
        

        print("Define X and Y")
        print("=======================================")
        
        # Split train / test dataset
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
        print("Success of getting train / test dataset")
        print("=======================================")
        print("X_train: ", X_train.shape)
        print("y_train: ", y_train.shape)
        print("X_test: ", X_test.shape)
        print("y_test: ", y_test.shape)
        print("=======================================")

        model.compile(loss=smoothL1, optimizer=keras.optimizers.Adam(lr=1e-3), metrics=['mape'])
        print(model.summary())
        # checkpoint
        filepath="./mobilenet_checkpoints/smooth_L1-{epoch:02d}-{val_mean_absolute_percentage_error:.5f}.hdf5"
        checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
        callbacks_list = [checkpoint]
        history = model.fit(X_train, y_train, batch_size=64, epochs=10000, shuffle=True,\
                            verbose=1, validation_data=(X_test, y_test), callbacks=callbacks_list)

        # Save model
        model.save("./model/face_landmark_dnn.h5")
        print("=======================================")
        print("Save Final Model")
        print("=======================================") 
Example 19
Project: face_landmark_dnn   Author: junhwanjang   File: train_basic_models.py    License: MIT License 5 votes vote down vote up
def main():
#        Define X and y
# #        Load data
        PATH = "./data/64_64_1/offset_1.3/"
        X = np.load(PATH + "basic_dataset_img.npz")
        y = np.load(PATH + "basic_dataset_pts.npz")
        X = X['arr_0']
        y = y['arr_0'].reshape(-1, 136)
        

        print("Define X and Y")
        print("=======================================")
        
        # Split train / test dataset
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
        print("Success of getting train / test dataset")
        print("=======================================")
        print("X_train: ", X_train.shape)
        print("y_train: ", y_train.shape)
        print("X_test: ", X_test.shape)
        print("y_test: ", y_test.shape)
        print("=======================================")

        model.compile(loss=smoothL1, optimizer=keras.optimizers.Adam(lr=1e-3), metrics=['mape'])
        print(model.summary())
        # checkpoint
        filepath="./basic_checkpoints/smooth_L1-{epoch:02d}-{val_mean_absolute_percentage_error:.5f}.hdf5"
        checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
        callbacks_list = [checkpoint]
        history = model.fit(X_train, y_train, batch_size=64, epochs=10000, shuffle=True,\
                            verbose=1, validation_data=(X_test, y_test), callbacks=callbacks_list)

        # Save model
        model.save("./model/face_landmark_dnn.h5")
        print("=======================================")
        print("Save Final Model")
        print("=======================================") 
Example 20
Project: imgclsmob   Author: osmr   File: train_ke.py    License: MIT License 5 votes vote down vote up
def train_net(net,
              train_gen,
              val_gen,
              train_num_examples,
              val_num_examples,
              num_epochs,
              checkpoint_filepath,
              start_epoch1):
    checkpointer = ModelCheckpoint(
        filepath=checkpoint_filepath,
        verbose=1,
        save_best_only=True)

    tic = time.time()

    net.fit_generator(
        generator=train_gen,
        samples_per_epoch=train_num_examples,
        epochs=num_epochs,
        verbose=True,
        callbacks=[checkpointer],
        validation_data=val_gen,
        validation_steps=val_num_examples,
        class_weight=None,
        max_queue_size=10,
        workers=1,
        use_multiprocessing=False,
        shuffle=True,
        initial_epoch=(start_epoch1 - 1))

    logging.info("Time cost: {:.4f} sec".format(
        time.time() - tic)) 
Example 21
Project: robotreviewer   Author: ijmarshall   File: punchline_extractor.py    License: GNU General Public License v3.0 5 votes vote down vote up
def train():
    # train the model -- this assumes access to evidence_inference:
    # https://github.com/jayded/evidence-inference/tree/master/evidence_inference
    # which is not needed in general to load the trained model.
    #
    # if inference_true flag is on, then a model will also be fit that predicts the
    # outcome (sig. decrease, no diff, sig. increase) given punchline snippets.
    from evidence_inference.preprocess.preprocessor import get_Xy, train_document_ids, test_document_ids, validation_document_ids, get_train_Xy

    extractor_model = PunchlineExtractor()

    tr_ids, val_ids, te_ids = train_document_ids(), validation_document_ids(), test_document_ids()
    tr_ids = list(train_document_ids())
    train_Xy, inference_vectorizer = get_train_Xy(tr_ids, sections_of_interest=None, vocabulary_file=None, include_sentence_span_splits=False, include_raw_texts=True)
    # Create vectors and targets for extraction task
    X_k, y_k = make_Xy(train_Xy, extractor_model.bc)
    print("train data loaded!") 
 
    val_Xy = get_Xy(val_ids, inference_vectorizer,  include_raw_texts=True)    
    X_kv, y_kv = make_Xy(val_Xy, extractor_model.bc, neg_samples=1)
    print("val data loaded!") 

    # Fit the model!
    filepath="punchline.weights.best.hdf5"
    checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
    callbacks_list = [checkpoint]
   
    with open("punchline_model.json", "w") as outf:
       outf.write(extractor_model.model.to_json())
    
    print("fitting punchline extractor!")
    extractor_model.model.fit(X_k, y_k, validation_data=(X_kv, y_kv), callbacks=callbacks_list, epochs=50) 
Example 22
Project: FaceNet   Author: foamliu   File: train.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model 
Example 23
Project: IdenProf   Author: OlafenwaMoses   File: idenprof.py    License: MIT License 5 votes vote down vote up
def train_network():
    download_idenprof()

    print(os.listdir(os.path.join(execution_path, "idenprof")))

    optimizer = keras.optimizers.Adam(lr=0.01, decay=1e-4)
    batch_size = 32
    num_classes = 10
    epochs = 200

    model = ResNet50((224, 224, 3), num_classes=num_classes)
    model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
    model.summary()

    print("Using real time Data Augmentation")
    train_datagen = ImageDataGenerator(
        rescale=1. / 255,
        horizontal_flip=True)

    test_datagen = ImageDataGenerator(
        rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(DATASET_TRAIN_DIR, target_size=(224, 224),
                                                        batch_size=batch_size, class_mode="categorical")
    test_generator = test_datagen.flow_from_directory(DATASET_TEST_DIR, target_size=(224, 224), batch_size=batch_size,
                                                      class_mode="categorical")

    model.fit_generator(train_generator, steps_per_epoch=int(9000 / batch_size), epochs=epochs,
                        validation_data=test_generator,
                        validation_steps=int(2000 / batch_size), callbacks=[checkpoint, lr_scheduler])


# ----------------- The Section Responsible for Inference --------------------- 
Example 24
Project: mljar-supervised   Author: mljar   File: nn.py    License: MIT License 5 votes vote down vote up
def fit(self, X, y, X_validation=None, y_validation=None, log_to_file=None):

        if self.model is None:
            self.create_model(input_dim=X.shape[1])
        
        batch_size = 1024
        if X.shape[0] < batch_size * 5:
            batch_size = 32
        
        self.model.fit(X, y, batch_size=batch_size, epochs=self.rounds, verbose=False)
        
        """
        # Experimental ...
        es = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=50)
        mc = ModelCheckpoint(
            "best_model.h5",
            monitor="val_loss",
            mode="min",
            verbose=0,
            save_best_only=True,
        )
        self.model.fit(
            X,
            y,
            validation_data=(X_validation, y_validation),
            batch_size=4096,
            epochs=1000,
            verbose=False,
            callbacks=[es, mc],
        )
        self.model = load_model("best_model.h5")
        """ 
Example 25
Project: Luna2016-Lung-Nodule-Detection   Author: codedecde   File: LUNA_unet.py    License: MIT License 5 votes vote down vote up
def train(use_existing):
    print ("Loading the options ....")
    options = get_options()
    print ("epochs: %d"%options.epochs)
    print ("batch_size: %d"%options.batch_size)
    print ("filter_width: %d"%options.filter_width)
    print ("stride: %d"%options.stride)
    print ("learning rate: %f"%options.lr)
    sys.stdout.flush()

    print('-'*30)
    print('Loading and preprocessing train data...')
    print('-'*30)
    imgs_train = np.load(options.out_dir+"trainImages.npy").astype(np.float32)
    imgs_mask_train = np.load(options.out_dir+"trainMasks.npy").astype(np.float32)

    # Renormalizing the masks
    imgs_mask_train[imgs_mask_train > 0.] = 1.0
    
    # Now the Test Data
    imgs_test = np.load(options.out_dir+"testImages.npy").astype(np.float32)
    imgs_mask_test_true = np.load(options.out_dir+"testMasks.npy").astype(np.float32)
    # Renormalizing the test masks
    imgs_mask_test_true[imgs_mask_test_true > 0] = 1.0    

    print('-'*30)
    print('Creating and compiling model...')
    print('-'*30)
    model = get_unet_small(options)
    weight_save = WeightSave(options)
    accuracy = Accuracy(copy.deepcopy(imgs_test),copy.deepcopy(imgs_mask_test_true))
    print('-'*30)
    print('Fitting model...')
    print('-'*30)
    model.fit(x=imgs_train, y=imgs_mask_train, batch_size=options.batch_size, nb_epoch=options.epochs, verbose=1, shuffle=True
            ,callbacks=[weight_save, accuracy])
              # callbacks = [accuracy])
              # callbacks=[weight_save,accuracy])
    return model 
Example 26
Project: Deep-Image-Matting   Author: foamliu   File: train_final.py    License: MIT License 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model 
Example 27
Project: Deep-Image-Matting   Author: foamliu   File: train_encoder_decoder.py    License: MIT License 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model 
Example 28
Project: Deep-Image-Matting   Author: foamliu   File: train.py    License: MIT License 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model 
Example 29
Project: CNNArt   Author: thomaskuestner   File: motion_vgg_CNN2D.py    License: Apache License 2.0 5 votes vote down vote up
def fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):
    # weight_name = sOutPath + '/' + model_name + '_weights.h5'
    # model_json = sOutPath + model_name + '_json'
    model_all = sOutPath + model_name + '_model.h5'

    # model = createModel(patchSize)
    # opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    # callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]
    #
    # model.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
    # model.load_weights(weight_name)

    # load complete model (including weights); keras > 0.7
    model = load_model(model_all)

    # assume artifact affected shall be tested!
    # y_test = np.ones((len(X_test),1))

    X_test = np.expand_dims(X_test, axis=1)
    y_test = np.asarray([y_test[:], np.abs(np.asarray(y_test[:], dtype=np.float32) - 1)]).T

    score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize)
    prob_pre = model.predict(X_test, batchSize, 1)

    # modelSave = model_name[:-5] + '_pred.mat'
    modelSave = sOutPath + '/' + model_name + '_pred.mat'
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example 30
Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: mnist_tfrecord.py    License: MIT License 5 votes vote down vote up
def __init__(self, model, steps, metrics_prefix='val', verbose=1):
        # parameter of callbacks passed during initialization
        # pass evalation mode directly
        super(EvaluateInputTensor, self).__init__()
        self.val_model = model
        self.num_steps = steps
        self.verbose = verbose
        self.metrics_prefix = metrics_prefix