Python keras.callbacks() Examples

The following are 30 code examples of keras.callbacks(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras , or try the search function .
Example #1
Source File: cifar10_resnet.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example #2
Source File: testing_utils.py    From ntm_keras with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def train_model(model, epochs=10, min_size=5, max_size=20, callbacks=None, verboose=False):
    input_dim = model.input_dim
    output_dim = model.output_dim
    batch_size = model.batch_size

    sample_generator = get_sample(batch_size=batch_size, in_bits=input_dim, out_bits=output_dim,
                                                max_size=max_size, min_size=min_size)
    if verboose:
        for j in range(epochs):
            model.fit_generator(sample_generator, steps_per_epoch=10, epochs=j+1, callbacks=callbacks, initial_epoch=j)
            print("currently at epoch {0}".format(j+1))
            for i in [5,10,20,40]:
                test_model(model, sequence_length=i, verboose=True)
    else:
        model.fit_generator(sample_generator, steps_per_epoch=10, epochs=epochs, callbacks=callbacks)

    print("done training") 
Example #3
Source File: testing_utils.py    From ntm_keras with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def lengthy_test(model, testrange=[5,10,20,40,80], epochs=100, verboose=True):
    ts = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    log_path = LOG_PATH_BASE + ts + "_-_" + model.name 
    tensorboard = TensorBoard(log_dir=log_path,
                                write_graph=False, #This eats a lot of space. Enable with caution!
                                #histogram_freq = 1,
                                write_images=True,
                                batch_size = model.batch_size,
                                write_grads=True)
    model_saver =  ModelCheckpoint(log_path + "/model.ckpt.{epoch:04d}.hdf5", monitor='loss', period=1)
    callbacks = [tensorboard, TerminateOnNaN(), model_saver]

    for i in testrange:
        acc = test_model(model, sequence_length=i, verboose=verboose)
        print("the accuracy for length {0} was: {1}%".format(i,acc))

    train_model(model, epochs=epochs, callbacks=callbacks, verboose=verboose)

    for i in testrange:
        acc = test_model(model, sequence_length=i, verboose=verboose)
        print("the accuracy for length {0} was: {1}%".format(i,acc))
    return 
Example #4
Source File: cifar10.py    From keras-adabound with MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """
    lr = 0.001
    epoch += 1

    # if epoch >= 90:
    #     lr *= 5e-2
    # elif epoch >= 60:
    #     lr *= 1e-1
    # elif epoch >= 30:
    #     lr *= 5e-1

    if epoch >= 150:
        lr *= 0.1
    print('Learning rate: ', lr)
    return lr 
Example #5
Source File: cifar10_resnet.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example #6
Source File: idenprof.py    From IdenProf with MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """
    Learning Rate Schedule
    """
    # Learning rate is scheduled to be reduced after 80, 120, 160, 180  epochs. Called  automatically  every
    #  epoch as part  of  callbacks  during  training.



    lr = 1e-3
    if epoch > 180:
        lr *= 1e-4
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1

    print('Learning rate: ', lr)
    return lr 
Example #7
Source File: cifar10_resnet.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example #8
Source File: train.py    From PJ_NLP with Apache License 2.0 6 votes vote down vote up
def train():
    # load data
    train_dataset = Dataset(training=True)
    dev_dataset = Dataset(training=False)

    # model
    MODEL = name_model[model_name]
    model = MODEL(train_dataset.vocab_size, conf.n_classes, train_dataset.emb_mat)

    # callback
    my_callback = MyCallback()
    f1 = F1(dev_dataset.gen_batch_data(), dev_dataset.steps_per_epoch)
    checkpointer = ModelCheckpoint('data/{}.hdf5'.format(model_name), save_best_only=True)
    early_stop = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')

    # train
    model.compile(optimizer=keras.optimizers.Adam(),
                  loss=keras.losses.categorical_crossentropy, metrics=['acc'])
    model.fit_generator(train_dataset.gen_batch_data(),
                        steps_per_epoch=train_dataset.steps_per_epoch,
                        verbose=0,
                        epochs=conf.epochs, callbacks=[my_callback, checkpointer, early_stop, f1])
    keras.models.save_model(model, conf.model_path.format(model_name)) 
Example #9
Source File: advtrain_cifar10.py    From Adaptive-Diversity-Promoting with Apache License 2.0 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example #10
Source File: train_cifar.py    From Adaptive-Diversity-Promoting with Apache License 2.0 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 150:
        lr *= 1e-2
    elif epoch > 100:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example #11
Source File: train_mnist.py    From Adaptive-Diversity-Promoting with Apache License 2.0 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 30:
        lr *= 1e-2
    elif epoch > 15:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example #12
Source File: cifar10_resnet.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example #13
Source File: punchline_extractor.py    From robotreviewer with GNU General Public License v3.0 6 votes vote down vote up
def train_simple_inference_net(n_epochs=30):
    inf_net = SimpleInferenceNet()
    tr_ids, val_ids, te_ids = train_document_ids(), validation_document_ids(), test_document_ids()
    tr_ids = list(train_document_ids())
    train_Xy, inference_vectorizer = get_train_Xy(tr_ids, sections_of_interest=None, vocabulary_file=None, include_sentence_span_splits=False, include_raw_texts=True)

    X_k, y_k = make_Xy_inference(train_Xy, inf_net.bc)
    print("train data for inference task loaded!")

    val_Xy = get_Xy(val_ids, inference_vectorizer,  include_raw_texts=True)
    X_kv, y_kv = make_Xy_inference(val_Xy, inf_net.bc)
    print("val data loaded!")

    filepath="inference.weights.best.hdf5"
    checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
    callbacks_list = [checkpoint]

    with open("inference_model.json", "w") as outf:
        outf.write(inf_net.model.to_json())

    print("fitting inference model!")
    inf_net.model.fit(X_k, y_k, validation_data=(X_kv, y_kv), callbacks=callbacks_list, epochs=n_epochs) 
Example #14
Source File: cifar10_resnet.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example #15
Source File: cifar10_resnet.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example #16
Source File: poetry_model.py    From poetry_generator_Keras with MIT License 6 votes vote down vote up
def train(self):
        '''训练模型'''
        number_of_epoch = len(self.files_content) // self.config.batch_size

        if not self.model:
            self.build_model()

        self.model.summary()

        self.model.fit_generator(
            generator=self.data_generator(),
            verbose=True,
            steps_per_epoch=self.config.batch_size,
            epochs=number_of_epoch,
            callbacks=[
                keras.callbacks.ModelCheckpoint(self.config.weight_file, save_weights_only=False),
                LambdaCallback(on_epoch_end=self.generate_sample_result)
            ]
        ) 
Example #17
Source File: licenseplate.py    From deep_learning with MIT License 5 votes vote down vote up
def main():
    input_tensor = Input((72, 272, 3))
    x = input_tensor
    print("build model")
    x = Conv2D(32, kernel_size=(3, 3), activation='relu')(x)
    x = Conv2D(32, kernel_size=(3, 3), activation='relu')(x)
    x = MaxPool2D(pool_size=(2, 2))(x)

    x = Conv2D(64, kernel_size=(3, 3), activation='relu')(x)
    x = Conv2D(64, kernel_size=(3, 3), activation='relu')(x)
    x = MaxPool2D(pool_size=(2, 2))(x)
    x = Dropout(0.3)(x)

    x = Conv2D(128, kernel_size=(3, 3), activation='relu')(x)
    x = Conv2D(128, kernel_size=(3, 3), activation='relu')(x)
    x = MaxPool2D(pool_size=(2, 2))(x)
    x = Dropout(0.3)(x)

    x = Flatten()(x)
    x = Dropout(0.5)(x)

    n_class = len(chars)
    x = [Dense(n_class, activation='softmax', name='c{0}'.format(i + 1))(x) for i in range(7)]
    model = Model(inputs=input_tensor, outputs=x)
    
    print("compile model")
    adam = Adam(lr=0.001)
    model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])

    # display 
    plot_model(model,to_file='./models/licenseplate_model.png')

    # training
    print("training model")
    best_model = ModelCheckpoint("./models/licenseplate.h5", monitor='val_loss', verbose=0, save_best_only=True)
    model.fit_generator(gen_plate(), steps_per_epoch=2000, epochs=8, validation_data=gen_plate(),
                     validation_steps=1280, callbacks=[best_model]) 
Example #18
Source File: stock_pred_hyperopt.py    From Stock-Price-Prediction with Apache License 2.0 5 votes vote down vote up
def create_model_hypopt(params):
    print("Trying params:",params)
    batch_size = params["batch_size"]
    time_steps = params["time_steps"]
    x_train_ts, y_train_ts, x_test_ts, y_test_ts = data(batch_size, time_steps)
    lstm_model = Sequential()
    # (batch_size, timesteps, data_dim)
    lstm_model.add(LSTM(params["lstm1_nodes"], batch_input_shape=(batch_size, time_steps, x_train_ts.shape[2]), dropout=params["lstm1_dropouts"],
                        recurrent_dropout=params["lstm1_dropouts"], stateful=True, return_sequences=True,
                        kernel_initializer='random_uniform'))  
    # ,return_sequences=True #LSTM params => dropout=0.2, recurrent_dropout=0.2
    if params["lstm_layers"]["layers"] == "two":
        lstm_model.add(LSTM(params["lstm_layers"]["lstm2_nodes"], dropout=params["lstm_layers"]["lstm2_dropouts"]))
    else:
        lstm_model.add(Flatten())

    if params["dense_layers"]["layers"] == 'two':
        lstm_model.add(Dense(params["dense_layers"]["dense2_nodes"], activation='relu'))
    
    lstm_model.add(Dense(1, activation='sigmoid'))

    lr = params["lr"]
    epochs = params["epochs"]
    if params["optimizer"] == 'rms':
        optimizer = optimizers.RMSprop(lr=lr)
    else:
        optimizer = optimizers.SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True)

    lstm_model.compile(loss='mean_squared_error', optimizer=optimizer)  # binary_crossentropy
    history = lstm_model.fit(x_train_ts, y_train_ts, epochs=epochs, verbose=2, batch_size=batch_size,
                             validation_data=[x_test_ts, y_test_ts],
                             callbacks=[LogMetrics(search_space, params, -1), csv_logger])
    # for key in history.history.keys():
    #     print(key, "--",history.history[key])
    # get the highest validation accuracy of the training epochs
    val_error = np.amin(history.history['val_loss']) 
    print('Best validation error of epoch:', val_error)
    return {'loss': val_error, 'status': STATUS_OK, 'model': lstm_model} # if accuracy use '-' sign 
Example #19
Source File: hypertree_train.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def __init__(self, filenames=None, example_generator=None, steps=None, metrics_prefix='val', verbose=1):
        # parameter of callbacks passed during initialization
        # pass evalation mode directly
        super(GraspJaccardEvaluateCallback, self).__init__()
        print('filenames: ' + str(filenames))
        print('generator: ' + str(example_generator))
        self.num_steps = steps
        self.verbose = verbose
        self.metrics_prefix = metrics_prefix
        self.filenames = filenames
        self.example_generator = example_generator 
Example #20
Source File: hypertree_train.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def choose_optimizer(optimizer_name, learning_rate, callbacks, monitor_loss_name, train_steps, learning_rate_schedule=None):
    if learning_rate_schedule is None:
        learning_rate_schedule = FLAGS.learning_rate_schedule

    if optimizer_name == 'sgd':
        optimizer = keras.optimizers.SGD(learning_rate * 1.0)
        print('sgd initialized with learning rate: ' + str(learning_rate) + ' this might be overridden by callbacks later.')
    elif optimizer_name == 'adam':
        optimizer = keras.optimizers.Adam()
    elif optimizer_name == 'rmsprop':
        optimizer = keras.optimizers.RMSprop()
    else:
        raise ValueError('Unsupported optimizer ' + str(optimizer_name) +
                         'try adam, sgd, or rmsprop.')

    if ((optimizer_name == 'sgd' or optimizer_name == 'rmsprop') and
            learning_rate_schedule is not None and learning_rate_schedule and learning_rate_schedule != 'none'):
        if learning_rate_schedule == 'reduce_lr_on_plateau':
            callbacks = callbacks + [
                # Reduce the learning rate if training plateaus.
                # patience of 13 is a good option for the cornell datasets and costar stack regression
                keras.callbacks.ReduceLROnPlateau(patience=13, verbose=1, factor=0.5, monitor=monitor_loss_name, min_delta=1e-6)
            ]
        else:
            callbacks = callbacks + [
                # In this case the max learning rate is double the specified one,
                # so that the average initial learning rate is as specified.
                keras_contrib.callbacks.CyclicLR(
                    step_size=train_steps * 8, base_lr=1e-5, max_lr=learning_rate * 2,
                    mode=learning_rate_schedule, gamma=0.99999)
            ]
    return callbacks, optimizer 
Example #21
Source File: mnist_tfrecord.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def __init__(self, model, steps, metrics_prefix='val', verbose=1):
        # parameter of callbacks passed during initialization
        # pass evalation mode directly
        super(EvaluateInputTensor, self).__init__()
        self.val_model = model
        self.num_steps = steps
        self.verbose = verbose
        self.metrics_prefix = metrics_prefix 
Example #22
Source File: stock_pred_talos.py    From Stock-Price-Prediction with Apache License 2.0 5 votes vote down vote up
def create_model_talos(x_train_ts, y_train_ts, x_test_ts, y_test_ts, params):
    x_train_ts, y_train_ts, x_test_ts, y_test_ts = data(params)
    BATCH_SIZE = params["batch_size"]
    TIME_STEPS = params["time_steps"]
    lstm_model = Sequential()
    # (batch_size, timesteps, data_dim)
    lstm_model.add(LSTM(params["lstm1_nodes"], batch_input_shape=(BATCH_SIZE, TIME_STEPS, x_train_ts.shape[2]), dropout=0.2,
                        recurrent_dropout=0.2, stateful=True, return_sequences=True,
                        kernel_initializer='random_uniform'))  
    
    if params["lstm_layers"] == 2:
        lstm_model.add(LSTM(params["lstm2_nodes"], dropout=0.2))
    else:
        lstm_model.add(Flatten())

    if params["dense_layers"] == 2:
        lstm_model.add(Dense(params["dense2_nodes"], activation='relu'))

    lstm_model.add(Dense(1, activation='sigmoid'))
    if params["optimizer"] == 'rms':
        optimizer = optimizers.RMSprop(lr=params["lr"])
    else:
        optimizer = optimizers.SGD(lr=params["lr"], decay=1e-6, momentum=0.9, nesterov=True)
    lstm_model.compile(loss='mean_squared_error', optimizer=optimizer)  # binary_crossentropy
    history = lstm_model.fit(x_train_ts, y_train_ts, epochs=params["epochs"], verbose=2, batch_size=BATCH_SIZE,
                             validation_data=[x_test_ts, y_test_ts],
                             callbacks=[LogMetrics(search_params, params, -1), csv_logger])
    # for key in history.history.keys():
    #     print(key, "--",history.history[key])
    return history, lstm_model 
Example #23
Source File: mnist-keras.py    From nni with MIT License 5 votes vote down vote up
def train(args, params):
    '''
    Train model
    '''
    x_train, y_train, x_test, y_test = load_mnist_data(args)
    model = create_mnist_model(params)

    model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
        validation_data=(x_test, y_test), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)])

    _, acc = model.evaluate(x_test, y_test, verbose=0)
    LOG.debug('Final result is: %d', acc)
    nni.report_final_result(acc) 
Example #24
Source File: mnist_tfrecord.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def __init__(self, model, steps, metrics_prefix='val', verbose=1):
        # parameter of callbacks passed during initialization
        # pass evalation mode directly
        super(EvaluateInputTensor, self).__init__()
        self.val_model = model
        self.num_steps = steps
        self.verbose = verbose
        self.metrics_prefix = metrics_prefix 
Example #25
Source File: keras_bert_classify_bi_lstm.py    From nlp_xiaojiang with MIT License 5 votes vote down vote up
def fit(self, x_train, y_train, x_dev, y_dev):
        self.model.fit(x_train, y_train, batch_size=args.batch_size,
                       epochs=args.epochs, validation_data=(x_dev, y_dev),
                       shuffle=True,
                       callbacks=self.callback())
        self.model.save(args.path_save_model) 
Example #26
Source File: FashionMNIST_keras.py    From nni with MIT License 5 votes vote down vote up
def train_eval():
    """ train and eval the model
    """

    global trainloader
    global testloader
    global net

    (x_train, y_train) = trainloader
    (x_test, y_test) = testloader

    # train procedure
    net.fit(
        x=x_train,
        y=y_train,
        batch_size=args.batch_size,
        validation_data=(x_test, y_test),
        epochs=args.epochs,
        shuffle=True,
        callbacks=[
            SendMetrics(),
            EarlyStopping(min_delta=0.001, patience=10),
            TensorBoard(log_dir=TENSORBOARD_DIR),
        ],
    )

    # trial report final acc to tuner
    _, acc = net.evaluate(x_test, y_test)
    logger.debug("Final result is: %.3f", acc)
    nni.report_final_result(acc) 
Example #27
Source File: cifar10_keras.py    From nni with MIT License 5 votes vote down vote up
def train_eval():
    """ train and eval the model
    """

    global trainloader
    global testloader
    global net

    (x_train, y_train) = trainloader
    (x_test, y_test) = testloader

    # train procedure
    net.fit(
        x=x_train,
        y=y_train,
        batch_size=args.batch_size,
        validation_data=(x_test, y_test),
        epochs=args.epochs,
        shuffle=True,
        callbacks=[
            SendMetrics(),
            EarlyStopping(min_delta=0.001, patience=10),
            TensorBoard(log_dir=TENSORBOARD_DIR),
        ],
    )

    # trial report final acc to tuner
    _, acc = net.evaluate(x_test, y_test)
    logger.debug("Final result is: %.3f", acc)
    nni.report_final_result(acc) 
Example #28
Source File: motion_vgg_CNN2D.py    From CNNArt with Apache License 2.0 5 votes vote down vote up
def fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):
    # weight_name = sOutPath + '/' + model_name + '_weights.h5'
    # model_json = sOutPath + model_name + '_json'
    model_all = sOutPath + model_name + '_model.h5'

    # model = createModel(patchSize)
    # opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    # callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]
    #
    # model.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
    # model.load_weights(weight_name)

    # load complete model (including weights); keras > 0.7
    model = load_model(model_all)

    # assume artifact affected shall be tested!
    # y_test = np.ones((len(X_test),1))

    X_test = np.expand_dims(X_test, axis=1)
    y_test = np.asarray([y_test[:], np.abs(np.asarray(y_test[:], dtype=np.float32) - 1)]).T

    score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize)
    prob_pre = model.predict(X_test, batchSize, 1)

    # modelSave = model_name[:-5] + '_pred.mat'
    modelSave = sOutPath + '/' + model_name + '_pred.mat'
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test}) 
Example #29
Source File: mnist-keras.py    From nni with MIT License 5 votes vote down vote up
def train(args, params):
    '''
    Train model
    '''
    x_train, y_train, x_test, y_test = load_mnist_data(args)
    model = create_mnist_model(params)

    # nni
    model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
        validation_data=(x_test, y_test), callbacks=[SendMetrics(), TensorBoard(log_dir=TENSORBOARD_DIR)])

    _, acc = model.evaluate(x_test, y_test, verbose=0)
    LOG.debug('Final result is: %d', acc)
    nni.report_final_result(acc) 
Example #30
Source File: train_encoder_decoder.py    From Deep-Image-Matting with MIT License 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model