Python keras.callbacks() Examples

The following are code examples for showing how to use keras.callbacks(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Efficient_Augmentation   Author: mkuchnik   File: norb_resnet.py    MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 2
Project: Efficient_Augmentation   Author: mkuchnik   File: cifar10_resnet.py    MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 3
Project: AIX360   Author: IBM   File: resnet_keras_model.py    Apache License 2.0 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 4
Project: FastNet   Author: johnolafenwa   File: cifar100.py    MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """

    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1

    print('Learning rate: ', lr)

    return lr 
Example 5
Project: Variational-AutoEncoder-For-Novelty-Detection   Author: LordAlucard90   File: helper.py    GNU General Public License v3.0 6 votes vote down vote up
def _train_model(self, vae, hidden, reg_val=None, drp_val=None):
        keras.backend.clear_session()

        m_generator = ModelGenerator(vae=vae, hidden=hidden, reg_val=reg_val, drp_val=drp_val)
        name = m_generator.get_name()

        if not exists(join(self.models_dir, '{}_BestW.hdf5'.format(name))):
            checkpointer = ModelCheckpoint(
                filepath=join(self.bests_tmp_dir, name + '_Wep{epoch:03d}_loss{val_loss:.5f}.hdf5'),
                verbose=0,
                save_best_only=True)

            model, history = m_generator.train(epochs=self.epochs, train=self.Trn, validation=self.Val,
                                               callbacks=[checkpointer])

            save_name = join(self.models_dir, name)
            best = sorted(list(filter(lambda w: w.startswith(name) and w.endswith('.hdf5'),
                                      listdir(self.bests_tmp_dir))))[-1]
            move(join(self.bests_tmp_dir, best), '{}_BestW.hdf5'.format(save_name))
            np.save('{}.npy'.format(save_name), history)
            for file in filter(lambda w: w.startswith(name) and w.endswith('.hdf5'), listdir('bests_tmp')):
                remove(join(self.bests_tmp_dir, file)) 
Example 6
Project: Adaptive-Diversity-Promoting   Author: P2333   File: train_mnist.py    Apache License 2.0 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 30:
        lr *= 1e-2
    elif epoch > 15:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 7
Project: Adaptive-Diversity-Promoting   Author: P2333   File: train_cifar.py    Apache License 2.0 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 150:
        lr *= 1e-2
    elif epoch > 100:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 8
Project: Adaptive-Diversity-Promoting   Author: P2333   File: advtrain_cifar10.py    Apache License 2.0 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 9
Project: PJ_NLP   Author: moxiu2012   File: train.py    Apache License 2.0 6 votes vote down vote up
def train():
    # load data
    train_dataset = Dataset(training=True)
    dev_dataset = Dataset(training=False)

    # model
    MODEL = name_model[model_name]
    model = MODEL(train_dataset.vocab_size, conf.n_classes, train_dataset.emb_mat)

    # callback
    my_callback = MyCallback()
    f1 = F1(dev_dataset.gen_batch_data(), dev_dataset.steps_per_epoch)
    checkpointer = ModelCheckpoint('data/{}.hdf5'.format(model_name), save_best_only=True)
    early_stop = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')

    # train
    model.compile(optimizer=keras.optimizers.Adam(),
                  loss=keras.losses.categorical_crossentropy, metrics=['acc'])
    model.fit_generator(train_dataset.gen_batch_data(),
                        steps_per_epoch=train_dataset.steps_per_epoch,
                        verbose=0,
                        epochs=conf.epochs, callbacks=[my_callback, checkpointer, early_stop, f1])
    keras.models.save_model(model, conf.model_path.format(model_name)) 
Example 10
Project: IdenProf   Author: OlafenwaMoses   File: idenprof.py    MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """
    Learning Rate Schedule
    """
    # Learning rate is scheduled to be reduced after 80, 120, 160, 180  epochs. Called  automatically  every
    #  epoch as part  of  callbacks  during  training.



    lr = 1e-3
    if epoch > 180:
        lr *= 1e-4
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1

    print('Learning rate: ', lr)
    return lr 
Example 11
Project: keras-adabound   Author: titu1994   File: cifar10.py    MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule
    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.
    # Arguments
        epoch (int): The number of epochs
    # Returns
        lr (float32): learning rate
    """
    lr = 0.001
    epoch += 1

    # if epoch >= 90:
    #     lr *= 5e-2
    # elif epoch >= 60:
    #     lr *= 1e-1
    # elif epoch >= 30:
    #     lr *= 5e-1

    if epoch >= 150:
        lr *= 0.1
    print('Learning rate: ', lr)
    return lr 
Example 12
Project: applications   Author: geomstats   File: cifar_10_resnet_hypersphere.py    MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 13
Project: applications   Author: geomstats   File: cifar10_resnet.py    MIT License 6 votes vote down vote up
def lr_schedule(epoch):
    """Learning Rate Schedule

    Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
    Called automatically every epoch as part of callbacks during training.

    # Arguments
        epoch (int): The number of epochs

    # Returns
        lr (float32): learning rate
    """
    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
    print('Learning rate: ', lr)
    return lr 
Example 14
Project: lowLightImageEnhancement   Author: kauziishere   File: preprocess_train.py    Apache License 2.0 6 votes vote down vote up
def train():
	train_dict = get_file_from_pickle("train_dictionary.pkl")
	input_id_list = [x for x,_ in train_dict.items()]
	output_id_list = [x for _, x in train_dict.items()]
	input_id_list, output_id_list = decrease_train_input(input_id_list, output_id_list)
	train_generator = DataGenerator(input_id_list, output_id_list, train_dict)
	print("Number of input files are: {}".format(len(input_id_list)))
#	val_dict = get_file_from_pickle("val_dictionary.pkl")
#	input_id_list = [x for x,_ in val_dict.items()]
#	output_id_list = [x for _, x in val_dict.items()]
#	val_generator = DataGenerator(input_id_list, output_id_list, val_dict)
	net = model()
	net.load_weights('./result_dir/weights.020.hdf5')
	sgd = SGD(lr = 0.003, nesterov = True)
	net.compile(optimizer = sgd, loss = custom_loss, metrics = ['accuracy'])
	checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=False, mode='max')
	callbacks_list = [checkpoint]
	net.fit_generator(generator = train_generator,
			epochs = 20,
			callbacks = callbacks_list,
			verbose = 1) 
Example 15
Project: AutoSpeech2019   Author: DeepWisdom   File: model.py    Apache License 2.0 6 votes vote down vote up
def train_fit_first_by_generator(self):
        self.trn_gen = generator.DataGenerator(self.train_x, self.train_y, **self.params)
        self.first_r_train_x = self.train_x
        self.first_r_data_generator = self.trn_gen
        cur_epoch = self.decide_epoch_curround()
        early_stopping = TerminateOnBaseline(monitor="acc", baseline=0.999)
        self.model.fit_generator(
            self.first_r_data_generator,
            steps_per_epoch=int(len(self.first_r_train_x) // self.params["batch_size"] // 2),
            epochs=cur_epoch,
            max_queue_size=10,
            callbacks=self.callbacks + [early_stopping],
            use_multiprocessing=False,
            workers=1,
            verbose=ThinRes34Config.VERBOSE,
        )
        return 
Example 16
Project: AutoSpeech2019   Author: DeepWisdom   File: model.py    Apache License 2.0 6 votes vote down vote up
def train_left_rounds(self, remaining_time_budget):
        self.try_to_update_spec_len()
        model = self.model
        callbacks = self.callbacks

        self.fullvalid_stage = self.decide_if_full_valid()
        cur_round_sample_id_list = self.autospeech_sampler.get_downsample_index_list_by_class(
            per_class_num=10, max_sample_num=200, min_sample_num=300
        )

        self.train_x = self.get_x_data_features_by_sampleidlist(cur_sampid_list=cur_round_sample_id_list)
        self.train_y = self.g_train_y[cur_round_sample_id_list]
        model, accept_cur = self.train_left_fit_by_generator()
        self.round_idx += 1
        if accept_cur:
            self.model = model
            return True
        else:
            return False 
Example 17
Project: ntm_keras   Author: flomlo   File: testing_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def train_model(model, epochs=10, min_size=5, max_size=20, callbacks=None, verboose=False):
    input_dim = model.input_dim
    output_dim = model.output_dim
    batch_size = model.batch_size

    sample_generator = get_sample(batch_size=batch_size, in_bits=input_dim, out_bits=output_dim,
                                                max_size=max_size, min_size=min_size)
    if verboose:
        for j in range(epochs):
            model.fit_generator(sample_generator, steps_per_epoch=10, epochs=j+1, callbacks=callbacks, initial_epoch=j)
            print("currently at epoch {0}".format(j+1))
            for i in [5,10,20,40]:
                test_model(model, sequence_length=i, verboose=True)
    else:
        model.fit_generator(sample_generator, steps_per_epoch=10, epochs=epochs, callbacks=callbacks)

    print("done training") 
Example 18
Project: ntm_keras   Author: flomlo   File: testing_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def lengthy_test(model, testrange=[5,10,20,40,80], epochs=100, verboose=True):
    ts = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
    log_path = LOG_PATH_BASE + ts + "_-_" + model.name 
    tensorboard = TensorBoard(log_dir=log_path,
                                write_graph=False, #This eats a lot of space. Enable with caution!
                                #histogram_freq = 1,
                                write_images=True,
                                batch_size = model.batch_size,
                                write_grads=True)
    model_saver =  ModelCheckpoint(log_path + "/model.ckpt.{epoch:04d}.hdf5", monitor='loss', period=1)
    callbacks = [tensorboard, TerminateOnNaN(), model_saver]

    for i in testrange:
        acc = test_model(model, sequence_length=i, verboose=verboose)
        print("the accuracy for length {0} was: {1}%".format(i,acc))

    train_model(model, epochs=epochs, callbacks=callbacks, verboose=verboose)

    for i in testrange:
        acc = test_model(model, sequence_length=i, verboose=verboose)
        print("the accuracy for length {0} was: {1}%".format(i,acc))
    return 
Example 19
Project: Scene-Understanding   Author: foamliu   File: train.py    MIT License 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model 
Example 20
Project: deep-nn-car   Author: scope-lab-vu   File: train.py    MIT License 5 votes vote down vote up
def trainModel(model, X, A, Y):
    adam = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
    model.compile(loss='mse', optimizer=adam)
    # checkpoint
    filePath = "weights.best.hdf5"
    checkpoint = ModelCheckpoint(filePath, monitor='loss', verbose=1, save_best_only=True, mode='min')
    callbacks_list = [checkpoint, history]
    model.fit([X,A], Y, epochs=200,batch_size=64, callbacks=callbacks_list, verbose=2) 
Example 21
Project: XLNet_embbeding   Author: zedom1   File: demo.py    MIT License 5 votes vote down vote up
def train(filename):
	X_train, X_val, y_train, y_val = process_data(filename, mode="train")
	model = create_model()
	encoded_x_train, encoded_y_train = encode_data(X_train, y_train)
	encoded_x_val, encoded_y_val = encode_data(X_val, y_val)

	model.compile(
		optimizer = Adam(lr=model_hyper["lr"], beta_1=0.9, beta_2=0.999, decay=0.0),
		loss = 'categorical_crossentropy',
		metrics = ['accuracy']
	)

	model.fit(
		encoded_x_train, encoded_y_train, 
		batch_size = model_hyper["batch_size"],
		epochs = model_hyper["epochs"], 
		validation_data = (encoded_x_val, encoded_y_val),
		callbacks = [ 
			EarlyStopping(monitor='val_loss', mode='min', min_delta=1e-8, patience=model_hyper["patience"]),
			ModelCheckpoint(monitor='val_loss', mode='min', filepath=model_hyper["model_path"], verbose=1, save_best_only=True, save_weights_only=True),
			f1_callback(val_data=[encoded_x_val, encoded_y_val])
		]
	)
	# 保存embedding
	if xlnet_hyper["trainable"]:
		embedding.model.save(xlnet_hyper["path_fineture"]) 
Example 22
Project: Colorful-Image-Colorization   Author: foamliu   File: train.py    MIT License 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model 
Example 23
Project: gumpy-deeplearning   Author: gumpy-bci   File: model.py    MIT License 5 votes vote down vote up
def __init__(self, name):
        super(KerasModel, self).__init__(name)
        self.callbacks = None 
Example 24
Project: gumpy-deeplearning   Author: gumpy-bci   File: model.py    MIT License 5 votes vote down vote up
def get_callbacks(self):
        """Returns callbacks to monitor the model.

        """

        # save weights in an HDF5 file
        model_file = self.name + '_monitoring' + '.h5'
        checkpoint = ModelCheckpoint(model_file, monitor = 'val_loss',
                                     verbose = 0, save_best_only = True, mode = 'min')
        log_file = self.name + '.csv'
        csv_logger = CSVLogger(log_file, append = True, separator = ';')
        callbacks_list = [csv_logger] # callback list

        self.callbacks = callbacks_list
        return callbacks_list 
Example 25
Project: gumpy-deeplearning   Author: gumpy-bci   File: model.py    MIT License 5 votes vote down vote up
def fit(self, x, y, monitor=True, **kwargs):
        # TODO: allow user to specify filename
        if monitor and (self.callbacks is None):
            self.get_callbacks()

        if self.callbacks is not None:
            self.model.fit(x, y, **kwargs, callbacks=self.callbacks)
        else:
            self.model.fit(x, y, **kwargs) 
Example 26
Project: kutils   Author: subpic   File: model_helper.py    MIT License 5 votes vote down vote up
def _callbacks(self):
        """Setup callbacks"""
        p = self.params
        log_dir = os.path.join(self.params.logs_root, self.model_name())
        if p.histogram_freq:
            valid_gen = self.make_generator(self.ids[self.ids.set=='validation'], 
                                            deterministic=True, fixed_batches=True)
            tb_callback = TensorBoardWrapper(valid_gen, log_dir=log_dir, 
                                      write_images=p.write_images, 
                                      histogram_freq=p.histogram_freq, 
                                      write_graph=p.write_graph)
        else:
            tb_callback = TensorBoard(log_dir=log_dir,
                                      write_graph=p.write_graph, 
                                      histogram_freq=0, 
                                      write_images=p.write_images)
        
        tb_callback.set_model(self.model)
        best_model_path = os.path.join(self.params.models_root, 
                                       self.model_name() + '_best_weights.h5')
        make_dirs(best_model_path)
        checkpointer = ModelCheckpoint(filepath = best_model_path, verbose=0,
                                       monitor  = p.monitor_metric, 
                                       mode     = p.monitor_mode, 
                                       period   = p.checkpoint_period,
                                       save_best_only    = p.save_best_only,
                                       save_weights_only = True)
        earlystop = EarlyStopping(monitor=p.monitor_metric, 
                                  patience=p.early_stop_patience, 
                                  mode=p.monitor_mode)
        return [tb_callback, earlystop, checkpointer] 
Example 27
Project: Super-Resolution-Net   Author: foamliu   File: train.py    MIT License 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model 
Example 28
Project: face_landmark_dnn   Author: junhwanjang   File: train_mobilenets.py    MIT License 5 votes vote down vote up
def main():
#        Define X and y
# #        Load data
        PATH = "./data/64_64_1/offset_1.3/"
        X = np.load(PATH + "basic_dataset_img.npz")
        y = np.load(PATH + "basic_dataset_pts.npz")
        X = X['arr_0']
        y = y['arr_0'].reshape(-1, 136)
        

        print("Define X and Y")
        print("=======================================")
        
        # Split train / test dataset
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
        print("Success of getting train / test dataset")
        print("=======================================")
        print("X_train: ", X_train.shape)
        print("y_train: ", y_train.shape)
        print("X_test: ", X_test.shape)
        print("y_test: ", y_test.shape)
        print("=======================================")

        model.compile(loss=smoothL1, optimizer=keras.optimizers.Adam(lr=1e-3), metrics=['mape'])
        print(model.summary())
        # checkpoint
        filepath="./mobilenet_checkpoints/smooth_L1-{epoch:02d}-{val_mean_absolute_percentage_error:.5f}.hdf5"
        checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
        callbacks_list = [checkpoint]
        history = model.fit(X_train, y_train, batch_size=64, epochs=10000, shuffle=True,\
                            verbose=1, validation_data=(X_test, y_test), callbacks=callbacks_list)

        # Save model
        model.save("./model/face_landmark_dnn.h5")
        print("=======================================")
        print("Save Final Model")
        print("=======================================") 
Example 29
Project: face_landmark_dnn   Author: junhwanjang   File: train_basic_models.py    MIT License 5 votes vote down vote up
def main():
#        Define X and y
# #        Load data
        PATH = "./data/64_64_1/offset_1.3/"
        X = np.load(PATH + "basic_dataset_img.npz")
        y = np.load(PATH + "basic_dataset_pts.npz")
        X = X['arr_0']
        y = y['arr_0'].reshape(-1, 136)
        

        print("Define X and Y")
        print("=======================================")
        
        # Split train / test dataset
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
        print("Success of getting train / test dataset")
        print("=======================================")
        print("X_train: ", X_train.shape)
        print("y_train: ", y_train.shape)
        print("X_test: ", X_test.shape)
        print("y_test: ", y_test.shape)
        print("=======================================")

        model.compile(loss=smoothL1, optimizer=keras.optimizers.Adam(lr=1e-3), metrics=['mape'])
        print(model.summary())
        # checkpoint
        filepath="./basic_checkpoints/smooth_L1-{epoch:02d}-{val_mean_absolute_percentage_error:.5f}.hdf5"
        checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
        callbacks_list = [checkpoint]
        history = model.fit(X_train, y_train, batch_size=64, epochs=10000, shuffle=True,\
                            verbose=1, validation_data=(X_test, y_test), callbacks=callbacks_list)

        # Save model
        model.save("./model/face_landmark_dnn.h5")
        print("=======================================")
        print("Save Final Model")
        print("=======================================") 
Example 30
Project: kaggle_dsb   Author: syagev   File: classifier.py    Apache License 2.0 5 votes vote down vote up
def make_lr_scheduler(base_lr, decay_rate, epoch_rate):

    def lr_schedule(epoch):
        if epoch + 1 < epoch_rate:
            lr = base_lr
        else:
            lr = base_lr / (decay_rate * np.floor(epoch + 1 / rate_epochs))
            
        return lr

    return keras.callbacks.LearningRateScheduler(lr_schedule) 
Example 31
Project: imgclsmob   Author: osmr   File: train_ke.py    MIT License 5 votes vote down vote up
def train_net(net,
              train_gen,
              val_gen,
              train_num_examples,
              val_num_examples,
              num_epochs,
              checkpoint_filepath,
              start_epoch1):
    checkpointer = ModelCheckpoint(
        filepath=checkpoint_filepath,
        verbose=1,
        save_best_only=True)

    tic = time.time()

    net.fit_generator(
        generator=train_gen,
        samples_per_epoch=train_num_examples,
        epochs=num_epochs,
        verbose=True,
        callbacks=[checkpointer],
        validation_data=val_gen,
        validation_steps=val_num_examples,
        class_weight=None,
        max_queue_size=10,
        workers=1,
        use_multiprocessing=False,
        shuffle=True,
        initial_epoch=(start_epoch1 - 1))

    logging.info("Time cost: {:.4f} sec".format(
        time.time() - tic)) 
Example 32
Project: Crop-Disease-Detection   Author: foamliu   File: train.py    MIT License 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model 
Example 33
Project: FaceNet   Author: foamliu   File: train.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model 
Example 34
Project: Image-Segmentation   Author: ForeverPs   File: net.py    MIT License 5 votes vote down vote up
def train_test(self):
		train, label, test = self.load_data()
		model = self.net_structure()
		print('\n\nTraining Neural Network')
		model_checkpoint = ModelCheckpoint('net.h5', monitor='loss', verbose=1, save_best_only=True)
		model.fit(train, label, batch_size=20, epochs=30, verbose=1, shuffle=True, callbacks=[model_checkpoint])
		test_result = model.predict(test, batch_size=1, verbose=1)
		np.save('test_result.npy', test_result) 
Example 35
Project: MCLNN   Author: fadymedhat   File: trainingcallbacks.py    MIT License 5 votes vote down vote up
def prepare_callbacks(configuration, fold_weights_path, data_loader):
    callback_list = []

    # remote_callback = callbacks.RemoteMonitor(root='http://localhost:9000')
    # callback_list.append(remote_callback)

    # early stopping
    early_stopping_callback = callbacks.EarlyStopping(monitor=configuration.STOPPING_CRITERION,
                                                      patience=configuration.WAIT_COUNT,
                                                      verbose=0,
                                                      mode='auto')
    callback_list.append(early_stopping_callback)

    # save weights at the end of epoch
    weights_file_name_format = 'weights.epoch{epoch:02d}-val_loss{val_loss:.2f}-val_acc{val_acc:.4f}.hdf5'
    checkpoint_callback = ModelCheckpoint(os.path.join(fold_weights_path, weights_file_name_format),
                                          monitor='val_loss', verbose=0,
                                          save_best_only=False, mode='auto')
    callback_list.append(checkpoint_callback)

    # free space of stored weights of early epochs
    directory_house_keeping_callback = DirectoryHouseKeepingCallback(fold_weights_path)
    callback_list.append(directory_house_keeping_callback)

    # call for visualization if it is enabled
    if configuration.SAVE_SEGMENT_PREDICTION_IMAGE_PER_EPOCH == True:
        segment_plot_callback = SegmentPlotCallback(configuration=configuration,
                                                    data_loader=data_loader)
        callback_list.append(segment_plot_callback)

    return callback_list 
Example 36
Project: cloudFCN   Author: aliFrancis   File: callbacks.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, datagen, steps_per_epoch=10, frequency=1, class_labels=['Fill', 'Clear', 'Cloud'],unlabelled_mask=False):
        keras.callbacks.Callback.__init__(self)
        self.datagen = datagen
        self.steps_per_epoch = steps_per_epoch
        self.frequency = frequency
        self.class_labels = class_labels
        self.unlabelled_mask = unlabelled_mask
        return 
Example 37
Project: cloudFCN   Author: aliFrancis   File: callbacks.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, datagen, RGB_bands=[0, 1, 2],class_labels=None):
        keras.callbacks.Callback.__init__(self)
        self.datagen = datagen
        self.RGB_bands = RGB_bands
        self.class_labels = class_labels
        return 
Example 38
Project: cloudFCN   Author: aliFrancis   File: custom_callbacks.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, valid_datasets, valid_datagens, steps_per_epoch=float('inf'), frequency=1):
        keras.callbacks.Callback.__init__(self)
        self.datasets = valid_datasets
        self.datagens = valid_datagens
        self.steps_per_epoch = steps_per_epoch
        self.frequency = frequency

        return 
Example 39
Project: cloudFCN   Author: aliFrancis   File: custom_callbacks.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, valid_datasets, valid_datagens, steps_per_epoch=float('inf'), frequency=1):
        keras.callbacks.Callback.__init__(self)
        self.datasets = valid_datasets
        self.datagens = valid_datagens
        self.steps_per_epoch = steps_per_epoch
        self.frequency = frequency

        return 
Example 40
Project: IdenProf   Author: OlafenwaMoses   File: idenprof.py    MIT License 5 votes vote down vote up
def train_network():
    download_idenprof()

    print(os.listdir(os.path.join(execution_path, "idenprof")))

    optimizer = keras.optimizers.Adam(lr=0.01, decay=1e-4)
    batch_size = 32
    num_classes = 10
    epochs = 200

    model = ResNet50((224, 224, 3), num_classes=num_classes)
    model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
    model.summary()

    print("Using real time Data Augmentation")
    train_datagen = ImageDataGenerator(
        rescale=1. / 255,
        horizontal_flip=True)

    test_datagen = ImageDataGenerator(
        rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(DATASET_TRAIN_DIR, target_size=(224, 224),
                                                        batch_size=batch_size, class_mode="categorical")
    test_generator = test_datagen.flow_from_directory(DATASET_TEST_DIR, target_size=(224, 224), batch_size=batch_size,
                                                      class_mode="categorical")

    model.fit_generator(train_generator, steps_per_epoch=int(9000 / batch_size), epochs=epochs,
                        validation_data=test_generator,
                        validation_steps=int(2000 / batch_size), callbacks=[checkpoint, lr_scheduler])


# ----------------- The Section Responsible for Inference --------------------- 
Example 41
Project: DeepPINK   Author: younglululu   File: run_withKnockoff_all.py    GNU General Public License v2.0 5 votes vote down vote up
def train_DNN(model, X, y, myCallback):
    num_sequences = len(y);
    num_positives = np.sum(y);
    num_negatives = num_sequences - num_positives;

    model.fit(X, y, epochs=num_epochs, batch_size=batch_size, verbose=1, class_weight={True: num_sequences / num_positives, False: num_sequences / num_negatives}, callbacks=[myCallback]);
    return model; 
Example 42
Project: hanabi-rl   Author: whymauri   File: dqn_agent_untangled.py    MIT License 5 votes vote down vote up
def update_models(self, data, epochs, verbose, history):
		for action in range(3):
			for (state,act,target) in data:
				if act == action:
					self.models[action].fit(state, target, epochs=epochs, verbose=verbose, callbacks=[history]) 
Example 43
Project: hanabi-rl   Author: whymauri   File: dqn_agent.py    MIT License 5 votes vote down vote up
def train_agent(self, batch_size, epochs):
		def batch(iterable, n=1):
			l = len(iterable)
			for ndx in range(0, l, n):
				yield list(itertools.islice(iterable, ndx, min(ndx + n, l)))
				
		history = History()
		# self.get_memory()

		print ('memory length', len(self.memory))
		start = timer()
		count = 0
		for minibatch in batch(self.memory, batch_size):
			count += 1
			print ('running minibatch number', count) 

			for observation, state, action, reward, next_state, next_observation, done in minibatch:
				# print ('action in minibatch: ', action)
				# print (state.shape)
			
				current_action_values = self.model.predict(state)
				# print ('current action values', current_action_values)
				future_action_values = self.model.predict(next_state)
				# print ('future action values', future_action_values)

				if done:
					current_action_values[0][action] = reward
				else:
					future_action_index, future_action = \
						self._convert_action_for_environment(future_action_values, next_observation)
					current_action_values[0][action] = reward + \
								self.gamma * future_action_values[0][future_action_index]

				self.model.fit(state, current_action_values, epochs=epochs, verbose=0, callbacks=[history])
				# print(history.history)
				if self.epsilon > self.epsilon_min:
					self.epsilon *= self.epsilon_decay
		end = timer()
		print ('time for training', end - start)
		self.save_model()
		return None 
Example 44
Project: applications   Author: geomstats   File: mnist_tfrecord.py    MIT License 5 votes vote down vote up
def __init__(self, model, steps, metrics_prefix='val', verbose=1):
        # parameter of callbacks passed during initialization
        # pass evalation mode directly
        super(EvaluateInputTensor, self).__init__()
        self.val_model = model
        self.num_steps = steps
        self.verbose = verbose
        self.metrics_prefix = metrics_prefix 
Example 45
Project: MDSR   Author: foamliu   File: train.py    MIT License 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model 
Example 46
Project: POEM   Author: Rinoahu   File: deep_operon.py    GNU General Public License v3.0 5 votes vote down vote up
def retrain_2d(self, X_train, y_train, X_test=None, y_test=None):

        Y_train = np_utils.to_categorical(y_train).astype('float32')
        if type(y_test) == type(None):
            Y_test = None
        else:
            Y_test = np_utils.to_categorical(y_test).astype('float32')

        if type(X_test) != type(None) and type(Y_test) != type(None):
        #if 0:
            self.model_2d.fit(X_train, Y_train, batch_size=self.batch_size, epochs=self.nb_epoch, verbose=1,
                      validation_data=(X_test, Y_test), shuffle=True, validation_split=1e-4, callbacks=self.checkpointer)
        else:
            self.model_2d.fit(X_train, Y_train, batch_size=self.batch_size,
                      epochs=self.nb_epoch, verbose=1, shuffle=True, validation_split=self.cross_val, callbacks=self.checkpointer) 
Example 47
Project: POEM   Author: Rinoahu   File: deep_operon.py    GNU General Public License v3.0 5 votes vote down vote up
def fit_lstm(self, X_train, y_train, X_test=None, y_test=None):
        self.max_features = 2**12
        #print X_train.shape, y_train.shape
        N, D = X_train.shape
        model = Sequential()
        model.add(Embedding(self.max_features, D))
        #model.add(LSTM(D, dropout=0.2, recurrent_dropout=0.2))
        model.add(Bidirectional(CuDNNGRU(D, return_sequences=True)))

        #model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
        model.add(Dropout(0.2))

        model.add(Bidirectional(CuDNNGRU(D)))
        #model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None))
        model.add(Dropout(0.2))

        model.add(Dense(1, activation='sigmoid'))

        # try using different optimizers and different optimizer configs
        nb_classes = len(set(y_train))
        loss = nb_classes > 2 and 'categorical_crossentropy' or 'binary_crossentropy'
        #model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[self.metric])
        model.compile(loss=loss, optimizer='adam', metrics=[self.metric])
        print('Train..., loss is %s %s'%(loss, D))
        if type(X_test) != type(None) and type(y_test) != type(None):
            model.fit(X_train, y_train, batch_size=self.batch_size, epochs=self.nb_epoch, validation_data=(X_test, y_test), shuffle=True, callbacks=self.checkpointer)
        else:
            model.fit(X_train, y_train, batch_size=self.batch_size, epochs=self.nb_epoch, validation_data=(X_test, y_test), verbose=1, shuffle=True, validation_split=self.cross_val, callbacks=self.checkpointer)
        score, acc = model.evaluate(X_test, y_test, batch_size=self.batch_size)
        print('Test score:', score)
        print('Test accuracy:', acc)
        self.model_2d = model 
Example 48
Project: POEM   Author: Rinoahu   File: deep_operon.py    GNU General Public License v3.0 5 votes vote down vote up
def retrain_lstm(self, X_train, y_train, X_test=None, y_test=None):
        if type(X_test) != type(None) and type(y_test) != type(None):
            self.model_2d.fit(X_train, y_train, batch_size=self.batch_size, epochs=self.nb_epoch, validation_data=(X_test, y_test), shuffle=True, callbacks=self.checkpointer)
        else:
            self.model_2d.fit(X_train, y_train, batch_size=self.batch_size, epochs=self.nb_epoch, validation_data=(X_test, y_test), verbose=1, shuffle=True, validation_split=self.cross_val, callbacks=self.checkpointer)
        score, acc = self.model_2d.evaluate(X_test, y_test, batch_size=self.batch_size) 
Example 49
Project: POEM   Author: Rinoahu   File: deep_operon.py    GNU General Public License v3.0 5 votes vote down vote up
def retrain_2d(self, X_train, y_train, X_test=None, y_test=None):

        Y_train = np_utils.to_categorical(y_train).astype('float32')
        if type(y_test) == type(None):
            Y_test = None
        else:
            Y_test = np_utils.to_categorical(y_test).astype('float32')

        if type(X_test) != type(None) and type(Y_test) != type(None):
        #if 0:
            self.model_2d.fit(X_train, Y_train, batch_size=self.batch_size, epochs=self.nb_epoch, verbose=1,
                      validation_data=(X_test, Y_test), shuffle=True, validation_split=1e-4, callbacks=self.checkpointer)
        else:
            self.model_2d.fit(X_train, Y_train, batch_size=self.batch_size,
                      epochs=self.nb_epoch, verbose=1, shuffle=True, validation_split=self.cross_val, callbacks=self.checkpointer) 
Example 50
Project: POEM   Author: Rinoahu   File: deep_operon.py    GNU General Public License v3.0 5 votes vote down vote up
def retrain_lstm(self, X_train, y_train, X_test=None, y_test=None):
        if type(X_test) != type(None) and type(y_test) != type(None):
            self.model_2d.fit(X_train, y_train, batch_size=self.batch_size, epochs=self.nb_epoch, validation_data=(X_test, y_test), shuffle=True, callbacks=self.checkpointer)
        else:
            self.model_2d.fit(X_train, y_train, batch_size=self.batch_size, epochs=self.nb_epoch, validation_data=(X_test, y_test), verbose=1, shuffle=True, validation_split=self.cross_val, callbacks=self.checkpointer)
        score, acc = self.model_2d.evaluate(X_test, y_test, batch_size=self.batch_size) 
Example 51
Project: eye_gesture_app_wth   Author: nxphi47   File: base_model.py    MIT License 5 votes vote down vote up
def fit(self, train_files, test_files=None, batch_size=32, epochs=10, validation_split=0.1, callbacks=None,
			**kwargs):
		pass 
Example 52
Project: eye_gesture_app_wth   Author: nxphi47   File: base_model.py    MIT License 5 votes vote down vote up
def fit(self, train_files,
			test_files=None,
			batch_size=32, epochs=10, validation_split=0.1, callbacks=None,
			**kwargs):
		self.process_training_data(train_files, split=validation_split)

		if callbacks is None:
			callbacks = [
				keras.callbacks.ModelCheckpoint(
					filepath=self.checkpoint_path,
					monitor='val_loss',
					verbose=1,
					period=kwargs.get('checkpoint_epochs', 2),
					mode='max'),
				TerminateOnNaN(),
				EarlyStopping(patience=10),
				ReduceLROnPlateau(patience=4),
				TensorBoard(log_dir=self.job_dir,
							histogram_freq=1,
							batch_size=batch_size,
							),
				utils.EvalCheckPoint(self.model,
									 self.job_dir,
									 self.X_val,
									 self.y_val,
									 self.label_set,
									 self.sequence_length,
									 eval_freq=kwargs.get('eval_freq', 1),
									 print_func=self.print_f,
									 epochs=epochs,
									 batch_norm=self.batch_norm
									 )

			]

		self.model.fit(self.X, self.y, batch_size=batch_size, epochs=epochs,
					   validation_data=[self.X_val, self.y_val], callbacks=callbacks)

		self.print_f('--Training Done--')
		self.test_on_trained(test_files=test_files) 
Example 53
Project: experiments   Author: Octavian-ai   File: train.py    MIT License 5 votes vote down vote up
def __init__(self, monitor='val_acc', value=0.99, verbose=0, patience=3):
		super(keras.callbacks.Callback, self).__init__()
		self.monitor = monitor
		self.value = value
		self.verbose = verbose
		self.stopped_epoch = 0
		self.patience = patience 
Example 54
Project: experiments   Author: Octavian-ai   File: train.py    MIT License 5 votes vote down vote up
def __init__(self, experiment, dataset, model, verbose):
		self.experiment = experiment
		self.model = model
		self.dataset = dataset
		self.verbose = verbose
		super(keras.callbacks.Callback, self).__init__() 
Example 55
Project: Fundus_Lesion2018   Author: foamliu   File: train.py    MIT License 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model 
Example 56
Project: Luna2016-Lung-Nodule-Detection   Author: codedecde   File: LUNA_unet.py    MIT License 5 votes vote down vote up
def train(use_existing):
    print ("Loading the options ....")
    options = get_options()
    print ("epochs: %d"%options.epochs)
    print ("batch_size: %d"%options.batch_size)
    print ("filter_width: %d"%options.filter_width)
    print ("stride: %d"%options.stride)
    print ("learning rate: %f"%options.lr)
    sys.stdout.flush()

    print('-'*30)
    print('Loading and preprocessing train data...')
    print('-'*30)
    imgs_train = np.load(options.out_dir+"trainImages.npy").astype(np.float32)
    imgs_mask_train = np.load(options.out_dir+"trainMasks.npy").astype(np.float32)

    # Renormalizing the masks
    imgs_mask_train[imgs_mask_train > 0.] = 1.0
    
    # Now the Test Data
    imgs_test = np.load(options.out_dir+"testImages.npy").astype(np.float32)
    imgs_mask_test_true = np.load(options.out_dir+"testMasks.npy").astype(np.float32)
    # Renormalizing the test masks
    imgs_mask_test_true[imgs_mask_test_true > 0] = 1.0    

    print('-'*30)
    print('Creating and compiling model...')
    print('-'*30)
    model = get_unet_small(options)
    weight_save = WeightSave(options)
    accuracy = Accuracy(copy.deepcopy(imgs_test),copy.deepcopy(imgs_mask_test_true))
    print('-'*30)
    print('Fitting model...')
    print('-'*30)
    model.fit(x=imgs_train, y=imgs_mask_train, batch_size=options.batch_size, nb_epoch=options.epochs, verbose=1, shuffle=True
            ,callbacks=[weight_save, accuracy])
              # callbacks = [accuracy])
              # callbacks=[weight_save,accuracy])
    return model 
Example 57
Project: Deep-Image-Matting   Author: foamliu   File: train_final.py    MIT License 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model 
Example 58
Project: Deep-Image-Matting   Author: foamliu   File: train_encoder_decoder.py    MIT License 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model 
Example 59
Project: Deep-Image-Matting   Author: foamliu   File: train.py    MIT License 5 votes vote down vote up
def __init__(self, model):
            keras.callbacks.Callback.__init__(self)
            self.model_to_save = model 
Example 60
Project: keras-ctpn   Author: yizt   File: train.py    Apache License 2.0 4 votes vote down vote up
def main(args):
    set_gpu_growth()
    # 加载标注
    annotation_files = file_utils.get_sub_files(config.IMAGE_GT_DIR)
    image_annotations = [reader.load_annotation(file,
                                                config.IMAGE_DIR) for file in annotation_files]
    # 过滤不存在的图像,ICDAR2017中部分图像找不到
    image_annotations = [ann for ann in image_annotations if os.path.exists(ann['image_path'])]
    # 加载模型
    m = models.ctpn_net(config, 'train')
    models.compile(m, config, loss_names=['ctpn_regress_loss', 'ctpn_class_loss', 'side_regress_loss'])
    # 增加度量
    output = models.get_layer(m, 'ctpn_target').output
    models.add_metrics(m, ['gt_num', 'pos_num', 'neg_num', 'gt_min_iou', 'gt_avg_iou'], output[-5:])
    if args.init_epochs > 0:
        m.load_weights(args.weight_path, by_name=True)
    else:
        m.load_weights(config.PRE_TRAINED_WEIGHT, by_name=True)
    m.summary()
    # 生成器
    gen = generator(image_annotations[:-100],
                    config.IMAGES_PER_GPU,
                    config.IMAGE_SHAPE,
                    config.ANCHORS_WIDTH,
                    config.MAX_GT_INSTANCES,
                    horizontal_flip=False,
                    random_crop=False)
    val_gen = generator(image_annotations[-100:],
                        config.IMAGES_PER_GPU,
                        config.IMAGE_SHAPE,
                        config.ANCHORS_WIDTH,
                        config.MAX_GT_INSTANCES)

    # 训练
    m.fit_generator(gen,
                    steps_per_epoch=len(image_annotations) // config.IMAGES_PER_GPU * 2,
                    epochs=args.epochs,
                    initial_epoch=args.init_epochs,
                    validation_data=val_gen,
                    validation_steps=100 // config.IMAGES_PER_GPU,
                    verbose=True,
                    callbacks=get_call_back(),
                    workers=2,
                    use_multiprocessing=True)

    # 保存模型
    m.save(config.WEIGHT_PATH) 
Example 61
Project: kutils   Author: subpic   File: model_helper.py    MIT License 4 votes vote down vote up
def train(self, train_gen=None, valid_gen=None, lr=1e-4, epochs=1):
        """
        Run training iterations on existing model.
        Initializes `train_gen` and `valid_gen` if not defined.

        :param train_gen: train generator
        :param valid_gen: validation generator
        :param lr:        learning rate
        :param epochs:    number of epochs
        :return:          training history from self.model.fit_generator()
        """
        ids = self.ids
        params = self.params
                   
        print '\nTraining model:', self.model_name()
        
        if train_gen is None:
            train_gen = self.make_generator(ids[ids.set == 'training'])
        if valid_gen is None:
            valid_gen = self.make_generator(ids[ids.set == 'validation'],
                                            deterministic=True)

        if lr: self.params.lr = lr
        self.params.optimizer = update_config(self.params.optimizer,
                                              lr=self.params.lr)
        
        self.model.compile(optimizer=self.params.optimizer, 
                           loss=params.loss, loss_weights=params.loss_weights, 
                           metrics=params.metrics)

        if self.verbose:
            print '\nGenerator parameters:'
            print '---------------------'
            pretty(self.gen_params)
            print '\nMain parameters:'
            print '----------------'
            pretty(self.params)
            print '\nLearning'

        history = self.model.fit_generator(train_gen, epochs = epochs,
                                           steps_per_epoch   = len(train_gen),
                                           validation_data   = valid_gen, 
                                           validation_steps  = len(valid_gen),
                                           workers           = params.workers, 
                                           callbacks         = self._callbacks(),
                                           max_queue_size    = params.max_queue_size,
                                           class_weight      = self.params.class_weights,
                                           use_multiprocessing = params.multiproc)
        return history 
Example 62
Project: AIX360   Author: IBM   File: profwt.py    Apache License 2.0 4 votes vote down vote up
def fit(self,x_train,y_train,x_test,y_test,simple_model,hps,model_type='neural_keras',sample_weight=None):
        """
        Fits the training data by initializing a simple model with hyper parameters and returns the test accuracy on the test dataset.
        This can be trained with or without sample weights. The first 500 samples of the test dataset is used as validation data.

        Parameters:
            x_train (numpy array): Training dataset features for training the simple model. Dimensions (num of samples x feature dimensions)
            y_train (numpy array): Labels for the training dataset to train on. Dimensions (num of samples x num of classes)
            x_test (numpy array): Test dataset features. Dimensions (num of samples x feature dimensions)
            y_test (numpy array): Test dataset labels. Dimensions (num of samples x num of classes)
            hps (namedtuple): A namedtuple that is expected to have the following named tuple elements:

                * optimizer - specified the optimizer in keras.
                * complexity_param - Used for Resenet based simple model to specify number of Resunits. Used by simple model function
                  object to intialize a simple model of appropriate complexity.
                * num_classes - scalar specifying number of classes used by the simple
                  model function.
                * checkpoint_path - specifies the path for saving a checkpoint of the trained model. This is expected.
                * lr_scheduler - a function object that takes in a scalar (epochs) and specified a learning rate (scalar). This is a learning rate Scheduler. Expected.
                * lr_reducer  - a function object that specifies how learning rates must be reduced if validation accuracy does not improve - Optional.

            simple_model (function object for a Keras model): A function object that constructs a keras model for the simple model and returns the model object. It is expected to take in input_shape, hps.complexity_param and num_classes.
                It is expected to implement a keras model fit function. It is also expected to implement a keras model evaulate function.

        Returns:
            tuple:

                * **model_d** (`Keras model object`) -- Returns the trained model that is initialized by simple_model functions.
                * **scores[1]** (`float`) -- Returns the test accuracy of the trained model on (x_test,y_test.)

        """


        input_shape=x_train.shape[1:]
        if model_type=='neural_keras':
            assert None not in (hps.complexity_param,hps.num_classes), "Missing Hyper Parameters"
            model_d=simple_model(input_shape,hps.complexity_param,hps.num_classes)
            assert hps.optimizer is not None, "Missing Optimizer Specs"
            model_d.compile(loss='categorical_crossentropy',optimizer=hps.optimizer,metrics=['accuracy'])
            if (hasattr(model_d,'summary')):
                model_d.summary()
            callbacks=[]
            assert hps.checkpoint_path is not None, "Checkpoint to save Model not specified"
            checkpoint = ModelCheckpoint(filepath=hps.checkpoint_path,monitor='val_acc',verbose=1,save_best_only=True)
            assert hps.lr_scheduler is not None, "Learning Rate Scheduler not specified"
            callbacks=callbacks+[checkpoint,hps.lr_scheduler]
            if hasattr(hps,'lr_reducer'):
                callbacks=callbacks+[hps.lr_reducer]
            assert hasattr(model_d,'fit'), "Model supplied needs a fit function"
            if sample_weight is not None:
                model_d.fit(x_train, y_train,batch_size=hps.batch_size,epochs=hps.epochs,validation_data=(x_test[0:500,:], y_test[0:500,:]),shuffle=True,callbacks=callbacks,sample_weight=sample_weight)
            else:
                model_d.fit(x_train, y_train,batch_size=hps.batch_size,epochs=hps.epochs,validation_data=(x_test[0:500,:], y_test[0:500,:]),shuffle=True,callbacks=callbacks)
            assert hasattr(model_d,'evaluate'), "Model supplied needs an evaluate function"
            scores=model_d.evaluate(x_test, y_test, verbose=1)

        return (model_d,scores[1]) 
Example 63
Project: cactus-maml   Author: kylehkhsu   File: baselines.py    MIT License 4 votes vote down vote up
def embedding_mlp(num_classes=FLAGS.way, num_shots=FLAGS.shot, num_tasks=FLAGS.num_tasks,
                  num_encoding_dims=FLAGS.num_encoding_dims, test_set=FLAGS.test_set, dataset=FLAGS.dataset,
                  units=FLAGS.units, dropout=FLAGS.dropout):
    import keras
    from keras.layers import Dense, Dropout
    from keras.losses import categorical_crossentropy
    from keras.callbacks import EarlyStopping
    from keras import backend as K

    if dataset != 'celeba':
        _, _, _, X_test, Y_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
        task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
        partition = task_generator.get_partition_from_labels(Y_test)
        partitions = [partition]
    else:
        _, _, _, X_test, attributes_test, Z_test = get_data(dataset, num_encoding_dims, test_set)
        task_generator = TaskGenerator(num_classes=num_classes, num_train_samples_per_class=num_shots, num_samples_per_class=num_shots+5)
        partitions = task_generator.get_celeba_task_pool(attributes_test)
    tasks = task_generator.get_tasks(num_tasks=num_tasks, partitions=partitions)

    train_accuracies, test_accuracies = [], []

    start = time.time()
    for i_task, task in enumerate(tqdm(tasks)):
        if (i_task + 1) % (num_tasks // 10) == 0:
            tqdm.write('test {}, accuracy {:.5}'.format(i_task + 1, np.mean(test_accuracies)))
        ind_train_few, Y_train_few, ind_test_few, Y_test_few = task
        Z_train_few, Z_test_few = Z_test[ind_train_few], Z_test[ind_test_few]
        Y_train_few, Y_test_few = keras.utils.to_categorical(Y_train_few, num_classes=num_classes), keras.utils.to_categorical(Y_test_few, num_classes=num_classes)

        model = keras.Sequential()
        model.add(Dense(units=units, activation='relu', input_dim=Z_train_few.shape[1]))
        model.add(Dropout(rate=dropout))
        model.add(Dense(units=num_classes, activation='softmax'))
        model.compile(loss=categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
        early_stopping = EarlyStopping(monitor='val_loss', patience=2)
        model.fit(Z_train_few, Y_train_few, batch_size=Z_train_few.shape[0], epochs=500, verbose=0, validation_data=(Z_test_few, Y_test_few), callbacks=[early_stopping])
        train_score = model.evaluate(Z_train_few, Y_train_few, verbose=0)
        train_accuracies.append(train_score[1])
        test_score = model.evaluate(Z_test_few, Y_test_few, verbose=0)
        test_accuracies.append(test_score[1])
        K.clear_session()

    print('units={}, dropout={}'.format(units, dropout))
    print('{}-way {}-shot embedding mlp: {:.5} with 95% CI {:.5} over {} tests'.format(num_classes, num_shots, np.mean(test_accuracies), 1.96*np.std(test_accuracies)/np.sqrt(num_tasks), num_tasks))
    print('Mean training accuracy: {:.5}; standard deviation: {:.5}'.format(np.mean(train_accuracies), np.std(train_accuracies)))
    print('{} few-shot classification tasks: {:.5} seconds.'.format(num_tasks, time.time() - start)) 
Example 64
Project: kaggle_dsb   Author: syagev   File: classifier.py    Apache License 2.0 4 votes vote down vote up
def train(trainset, valset, path_data, path_session, hyper_param):
    """Execute a single training task.


    Returns:
        model: /path/to/best_model as measured by validation's loss
        loss: the loss computed on the validation set
        acc: the accuracy computed on the validation set
    """

    session_id = os.path.basename(path_session)
    model_cp = keras.callbacks.ModelCheckpoint(
        os.path.join(path_session, "{}_model.hdf5".format(session_id)),
        monitor="val_loss",
        save_best_only=True)
    

    # train
    model = _get_model(hyper_param["optimizer"],
                       hyper_param["batch_norm"],
                       pool_type=hyper_param["pool_type"],
                       dropout_rate=hyper_param["dropout_rate"])
    history = model.fit_generator(
        _sample_generator(trainset, path_data, hyper_param["batch_sz"]),
        steps_per_epoch=int(len(trainset) / hyper_param["batch_sz"]),
        epochs=hyper_param["epochs"],
        validation_data=_sample_generator(valset, path_data, 2),
        validation_steps=int(len(valset) / 2),
        callbacks=[model_cp, hyper_param["lr_schedule"]],
        verbose=1,
        workers=4)

    # plot training curves
    def plot_history(metric):
        plt.ioff()
        str_metric = "accuracy" if metric == "acc" else "loss"
        plt.plot(history.history[metric])
        plt.plot(history.history["val_{}".format(metric)])
        plt.title("model {}".format(str_metric))
        plt.ylabel(str_metric)
        plt.xlabel("epoch")
        plt.legend(["train", "test"], loc="upper left")
        plt.savefig(os.path.join(path_session, 
                                 "{}_{}.png".format(session_id, str_metric)))
    
    plot_history("loss")
    plt.cla()
    plot_history("acc")   
    with open(os.path.join(path_session,
                           "{}_history.pkl".format(session_id)),
              'wb') as output:
        pickle.dump(history.history, output, pickle.HIGHEST_PROTOCOL)
    
    # output model and performance measures
    ind_min_loss = np.argmin(history.history["val_loss"])
    return (os.path.join(path_session, "{}.hdf5".format(session_id)),
            history.history["val_loss"][ind_min_loss],
            history.history["val_acc"][ind_min_loss]) 
Example 65
Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_reg.py    MIT License 4 votes vote down vote up
def train_model(self,file_list,labels,n_fold=5,batch_size=16,epochs=40,dim=224,lr=1e-5,model='ResNet50'):
		model_save_dest = {}
		k = 0
		kf = KFold(n_splits=n_fold, random_state=0, shuffle=True)

		for train_index,test_index in kf.split(file_list):


			k += 1
			file_list = np.array(file_list)
			labels   = np.array(labels)
			train_files,train_labels  = file_list[train_index],labels[train_index]
			val_files,val_labels  = file_list[test_index],labels[test_index]
			
			if model == 'Resnet50':
				model_final = self.resnet_pseudo(dim=224,freeze_layers=10,full_freeze='N')
			
			if model == 'VGG16':
				model_final = self.VGG16_pseudo(dim=224,freeze_layers=10,full_freeze='N') 
			
			if model == 'InceptionV3':
				model_final = self.inception_pseudo(dim=224,freeze_layers=10,full_freeze='N')
				
			adam = optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
			model_final.compile(optimizer=adam, loss=["mse"],metrics=['mse'])
			reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.50,patience=3, min_lr=0.000001)
			early = EarlyStopping(monitor='val_loss', patience=10, mode='min', verbose=1)
			logger = CSVLogger('keras-5fold-run-01-v1-epochs_ib.log', separator=',', append=False)
			checkpoint = ModelCheckpoint(
								'kera1-5fold-run-01-v1-fold-' + str('%02d' % (k + 1)) + '-run-' + str('%02d' % (1 + 1)) + '.check',
								monitor='val_loss', mode='min',
								save_best_only=True,
								verbose=1) 
			callbacks = [reduce_lr,early,checkpoint,logger]
			train_gen = DataGenerator(train_files,train_labels,batch_size=32,n_classes=len(self.class_folders),dim=(self.dim,self.dim,3),shuffle=True)
			val_gen = DataGenerator(val_files,val_labels,batch_size=32,n_classes=len(self.class_folders),dim=(self.dim,self.dim,3),shuffle=True)
			model_final.fit_generator(train_gen,epochs=epochs,verbose=1,validation_data=(val_gen),callbacks=callbacks)
			model_name = 'kera1-5fold-run-01-v1-fold-' + str('%02d' % (k + 1)) + '-run-' + str('%02d' % (1 + 1)) + '.check'
			del model_final
			f = h5py.File(model_name, 'r+')
			del f['optimizer_weights']
			f.close()
			model_final = keras.models.load_model(model_name)
			model_name1 = self.outdir + str(model) + '___' + str(k) 
			model_final.save(model_name1)
			model_save_dest[k] = model_name1
				
		return model_save_dest

	# Hold out dataset validation function 
Example 66
Project: Intelligent-Projects-Using-Python   Author: PacktPublishing   File: TransferLearning_ffd.py    MIT License 4 votes vote down vote up
def train_model(self,train_dir,val_dir,n_fold=5,batch_size=16,epochs=40,dim=224,lr=1e-5,model='ResNet50'):
        if model == 'Resnet50':
            model_final = self.resnet_pseudo(dim=224,freeze_layers=10,full_freeze='N')
        if model == 'VGG16':
            model_final = self.VGG16_pseudo(dim=224,freeze_layers=10,full_freeze='N') 
        if model == 'InceptionV3':
            model_final = self.inception_pseudo(dim=224,freeze_layers=10,full_freeze='N')
            
        train_file_names = glob.glob(f'{train_dir}/*/*')
        val_file_names = glob.glob(f'{val_dir}/*/*')
        train_steps_per_epoch = len(train_file_names)/float(batch_size)
        val_steps_per_epoch = len(val_file_names)/float(batch_size)
        train_datagen = ImageDataGenerator(horizontal_flip = True,vertical_flip = True,width_shift_range = 0.1,height_shift_range = 0.1,
                channel_shift_range=0,zoom_range = 0.2,rotation_range = 20,preprocessing_function=pre_process)
        val_datagen = ImageDataGenerator(preprocessing_function=pre_process)
        train_generator = train_datagen.flow_from_directory(train_dir,
        target_size=(dim,dim),
        batch_size=batch_size,
        class_mode='categorical')
        val_generator = val_datagen.flow_from_directory(val_dir,
        target_size=(dim,dim),
        batch_size=batch_size,
        class_mode='categorical')
        print(train_generator.class_indices)
        joblib.dump(train_generator.class_indices,f'{self.outdir}/class_indices.pkl')
        adam = optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        model_final.compile(optimizer=adam, loss=["categorical_crossentropy"],metrics=['accuracy'])
        reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.50,patience=3, min_lr=0.000001)
        early = EarlyStopping(monitor='val_loss', patience=10, mode='min', verbose=1)
        logger = CSVLogger(f'{self.outdir}/keras-epochs_ib.log', separator=',', append=False)
        model_name = f'{self.outdir}/keras_transfer_learning-run.check'
        checkpoint = ModelCheckpoint(
                model_name,
                monitor='val_loss', mode='min',
                save_best_only=True,
                verbose=1) 
        callbacks = [reduce_lr,early,checkpoint,logger]
        model_final.fit_generator(train_generator,steps_per_epoch=train_steps_per_epoch,epochs=epochs,verbose=1,validation_data=(val_generator),validation_steps=val_steps_per_epoch,callbacks=callbacks,
                                                                                                                  class_weight={0:0.012,1:0.12,2:0.058,3:0.36,4:0.43})
        #model_final.fit_generator(train_generator,steps_per_epoch=1,epochs=epochs,verbose=1,validation_data=(val_generator),validation_steps=1,callbacks=callbacks)
        
        del model_final
        f = h5py.File(model_name, 'r+')
        del f['optimizer_weights']
        f.close()
        model_final = keras.models.load_model(model_name)
        model_to_store_path = f'{self.outdir}/{model}' 
        model_final.save(model_to_store_path)
        return model_to_store_path,train_generator.class_indices

# Hold out dataset validation function 
Example 67
Project: vergeml   Author: vergeml   File: libraries.py    MIT License 4 votes vote down vote up
def callback(env, display_progress, stats):
        from keras.callbacks import Callback

        class KerasCallback(Callback):

            def __init__(self, env, display_progress, stats):
                self.env = env
                self.display_progress = display_progress
                self.stats = stats
                self.callback = None
                self.current_epoch = 0
                self.current_step = 0

            def on_train_begin(self, logs=None):
                logs = KerasCallback._xform_logs(logs)
                self.callback = env.progress_callback(self.params['epochs'], self.params['steps'],
                                                      self.display_progress, self.stats)

            def on_train_end(self, logs=None):
                logs = KerasCallback._xform_logs(logs)
                self.callback(self.current_epoch, self.current_step, **logs)

            def on_epoch_begin(self, epoch, logs=None):
                logs = KerasCallback._xform_logs(logs)
                self.callback(self.current_epoch, self.current_step, **logs)

            def on_epoch_end(self, epoch, logs=None):
                logs = KerasCallback._xform_logs(logs)
                self.current_epoch += 1
                self.callback(self.current_epoch, self.current_step, **logs)

            def on_batch_begin(self, batch, logs=None):
                logs = KerasCallback._xform_logs(logs)
                self.callback(self.current_epoch, self.current_step, **logs)

            def on_batch_end(self, batch, logs=None):
                logs = KerasCallback._xform_logs(logs)
                self.current_step += 1
                self.callback(self.current_epoch, self.current_step, **logs)

            @staticmethod
            def _xform_logs(logs):
                from copy import deepcopy
                logs = deepcopy(logs or {})
                for k in ('size', 'batch', 'epoch'):
                    if k in logs:
                        del logs[k]
                return {k.replace('_', '-'):v for k, v in logs.items()}

        return KerasCallback(env, display_progress, stats) 
Example 68
Project: hacktoberfest2018   Author: ambujraj   File: DenseNet_CIFAR10.py    GNU General Public License v3.0 4 votes vote down vote up
def run_on_dataset(train_x, test_x, epochs, initial_epoch, load_weights=False, learning_rate=0.1, aug=True):  
  global model
  batch_size = 64
  if model==None:
    img_height, img_width, channel = train_x.shape[1],train_x.shape[2],train_x.shape[3]
    input = Input(shape=(img_height, img_width, channel,))
    First_Conv2D = Conv2D(num_filter*2, (3,3), use_bias=False ,padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input)

    First_Block, out_filters = add_denseblock(First_Conv2D, num_filter, num_filter*2, dropout_rate)
    First_Transition, out_filters = add_transition(First_Block, out_filters, dropout_rate)
    Second_Block, out_filters = add_denseblock(First_Transition, num_filter, out_filters, dropout_rate)
    Second_Transition, out_filters = add_transition(Second_Block, out_filters, dropout_rate)
    Third_Block, out_filters = add_denseblock(Second_Transition, num_filter, out_filters, dropout_rate)
    Third_Transition, out_filters = add_transition(Third_Block, out_filters, dropout_rate)
    Last_Block, out_filters = add_denseblock(Third_Transition,  num_filter, out_filters, dropout_rate)
    output = output_layer(Last_Block)    
    model = Model(inputs=[input], outputs=[output])

    sgd = SGD(lr=learning_rate, decay=0.0001, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    generator.fit(train_x, augment=True)
  if(load_weights):
    model.load_weights("DenseNet-40-12-CIFAR10.h5")
  model.summary()    
  if aug==True:  
    steps_per_epoch = (len(train_x)*2)//batch_size    
    model.fit_generator(generator.flow(train_x, y_train, batch_size=batch_size),
              steps_per_epoch=steps_per_epoch,
              epochs=epochs,
              verbose=1,
              initial_epoch=initial_epoch,          
              validation_data=(test_x, y_test),
              callbacks=callbacks)      
  else:  
    model.fit(train_x, y_train, batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              initial_epoch=initial_epoch,          
              validation_data=(test_x, y_test),
              callbacks=callbacks) 
Example 69
Project: densemapnet   Author: roatienza   File: predictor.py    MIT License 4 votes vote down vote up
def train_all(self, epochs=1000, lr=1e-3):
        checkdir = "checkpoint"
        try:
            os.mkdir(checkdir)
        except FileExistsError:
            print("Folder exists: ", checkdir)

        filename = self.settings.dataset
        filename += ".densemapnet.weights.{epoch:02d}.h5"
        filepath = os.path.join(checkdir, filename)
        checkpoint = ModelCheckpoint(filepath=filepath,
                                     save_weights_only=True,
                                     verbose=1,
                                     save_best_only=False)
        predict_callback = LambdaCallback(on_epoch_end=lambda epoch,
                                          logs: self.predict_disparity())
        callbacks = [checkpoint, predict_callback]
        self.load_train_data(1)
        if self.network is None:
            self.network = DenseMapNet(settings=self.settings)
            self.model = self.network.build_model(lr=lr)

        if self.settings.otanh:
            print("Using loss=mse on tanh output layer")
            self.model.compile(loss='mse',
                               optimizer=RMSprop(lr=lr, decay=1e-6))
        else:
            print("Using loss=crossent on sigmoid output layer")
            self.model.compile(loss='binary_crossentropy',
                               optimizer=RMSprop(lr=lr, decay=1e-6))

        if self.settings.model_weights:
            if self.settings.notrain:
                self.predict_disparity()
                return

        x = [self.train_lx, self.train_rx]
        self.model.fit(x,
                       self.train_dx,
                       epochs=epochs,
                       batch_size=4,
                       shuffle=True,
                       callbacks=callbacks) 
Example 70
Project: densemapnet   Author: roatienza   File: predictor.py    MIT License 4 votes vote down vote up
def train_batch(self, epochs=1, lr=1e-3, seq=1):
        count = self.settings.num_dataset + 1
        checkdir = "checkpoint"
        try:
            os.mkdir(checkdir)
        except FileExistsError:
            print("Folder exists: ", checkdir)

        is_model_compiled = False
            
        indexes = np.arange(1,count)
        np.random.shuffle(indexes)
        
        # for i in range(1, count, 1):
        for i in indexes:
            filename = self.settings.dataset
            # filename += ".densemapnet.weights.{epoch:02d}.h5"
            filename += ".densemapnet.weights.%d-%d.h5" % (seq, i)
            filepath = os.path.join(checkdir, filename)
            checkpoint = ModelCheckpoint(filepath=filepath,
                                         save_weights_only=True,
                                         verbose=1,
                                         save_best_only=False)
            callbacks = [checkpoint]

            self.load_train_data(i)

            if self.network is None:
                self.network = DenseMapNet(settings=self.settings)
                self.model = self.network.build_model(lr=lr)

            if not is_model_compiled:
                if self.settings.otanh:
                    sgd = SGD(lr=lr, momentum=0.5, nesterov=True)
                    print("Using loss=mse on tanh output layer")
                    self.model.compile(loss='mse', optimizer=sgd)
                else:
                    print("Using loss=crossent on sigmoid output layer")
                    self.model.compile(loss='binary_crossentropy',
                                       optimizer=RMSprop(lr=lr, decay=1e-6))
                is_model_compiled = True

            if self.settings.model_weights:
                if self.settings.notrain:
                    self.predict_disparity()
                    return

            x = [self.train_lx, self.train_rx]
            self.model.fit(x,
                           self.train_dx,
                           epochs=epochs,
                           batch_size=4,
                           shuffle=True,
                           callbacks=callbacks) 
Example 71
Project: StarTrader   Author: jiewwantan   File: compare.py    MIT License 4 votes vote down vote up
def train_model(model, train_X, train_y, model_type):
        """
        Try to load a pre-built model.
        Otherwise fit a new mode with the training data. Once training is done, save the model.
        """
        es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=PATIENCE)
        if model_type == "LSTM":
            batch_size = 4
            mc = ModelCheckpoint('./model/best_lstm_model.h5', monitor='val_loss', save_weights_only=False,
                                 mode='min', verbose=1, save_best_only=True)
            try:
                model = load_model('./model/best_lstm_model.h5')
                print("\n")
                print("Loading pre-saved model ...")
            except:
                print("\n")
                print("No pre-saved model, training new model.")
                pass
        elif model_type == "CNN":
            batch_size = 8
            mc = ModelCheckpoint('./model/best_cnn_model.h5'.format(symbol), monitor='val_loss', save_weights_only=False,
                                 mode='min', verbose=1, save_best_only=True)
            try:
                model = load_model('./model/best_cnn_model.h5')
                print("\n")
                print("Loading pre-saved model ...")
            except:
                print("\n")
                print("No pre-saved model, training new model.")
                pass
        # fit network
        history = model.fit(
            train_X,
            train_y,
            epochs=500,
            batch_size=batch_size,
            validation_split=0.2,
            verbose=2,
            shuffle=True,
            # callbacks=[es, mc, tb, LearningRateTracker()])
            callbacks=[es, mc])

        if model_type == "LSTM":
            model.save('./model/best_lstm_model.h5')
        elif model_type == "CNN":
            model.save('./model/best_cnn_model.h5')

        return history, model 
Example 72
Project: POEM   Author: Rinoahu   File: deep_operon.py    GNU General Public License v3.0 4 votes vote down vote up
def fit_2d(self, X_train, y_train, X_test=None, y_test=None):
        Y_train = np_utils.to_categorical(y_train)
        if type(y_test) == type(None):
            Y_test = None
        else:
            Y_test = np_utils.to_categorical(y_test)

        nb_classes = Y_train.shape[1]

        # set parameter for cnn
        loss = nb_classes > 2 and 'categorical_crossentropy' or 'binary_crossentropy'
        print 'loss function is', loss
        # number of convolutional filters to use
        nb_filters = self.nb_filter
        # size of pooling area for max pooling
        nb_pool = self.nb_pool
        # convolution kernel size
        nb_conv = self.nb_conv
        # traning iteration
        nb_epoch = self.nb_epoch
        batch_size = self.batch_size
        a, b, img_rows, img_cols = X_train.shape

        # set the conv model
        model = Sequential()
        model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='same', input_shape=(b, img_rows, img_cols), activation='relu', name='conv1_1'))
        #model.add(Conv2D(64, (2, 2), padding="same", activation="relu", name="conv1_1", input_shape=(1, 192, 4)))

        model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))

        model.add(Flatten())
        model.add(Dense(512, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(nb_classes, activation='sigmoid'))
        opt = self.opt
        model.compile(loss=loss, optimizer='adam', metrics=[self.metric])

        # set the check pointer to save the best model
        if type(X_test) != type(None) and type(Y_test) != type(None):
            model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1,
                      validation_data=(X_test, Y_test), shuffle=True, validation_split=1e-4, callbacks=self.checkpointer)
        else:
            model.fit(X_train, Y_train, batch_size=batch_size,
                      nb_epoch=nb_epoch, verbose=1, shuffle=True, validation_split = self.cross_val, callbacks=self.checkpointer)

        self.model_2d = model 
Example 73
Project: AutoSpeech2019   Author: DeepWisdom   File: model.py    Apache License 2.0 4 votes vote down vote up
def tr34_model_init(self):
        def build_model(net):
            return vggvox_resnet2d_icassp(
                input_dim=self.params["dim"],
                num_class=self.params["n_classes"],
                mode="pretrain",
                config=self.config,
                net=net,
            )
        model_34 = build_model('resnet34s')
        model = model_34
        pretrain_path = os.path.join(os.path.dirname(__file__), self.config["resume_pretrained"])
        if self.config["resume_pretrained"]:
            if os.path.isfile(pretrain_path):
                model.load_weights(pretrain_path, by_name=True, skip_mismatch=True)
                if self.class_num >= self.tr34_mconfig.CLASS_NUM_THS:
                    frz_layer_num = self.tr34_mconfig.INIT_BRZ_L_NUM
                else:
                    frz_layer_num = self.tr34_mconfig.INIT_BRZ_L_NUM_WILD
                for layer in model.layers[: frz_layer_num]:
                    layer.trainable = False

            pretrain_output = model.output
            weight_decay = self.tr34_mconfig.TR34_INIT_WD
            y = keras.layers.Dense(
                self.params["n_classes"],
                activation="softmax",
                kernel_initializer="orthogonal",
                use_bias=False,
                trainable=True,
                kernel_regularizer=keras.regularizers.l2(weight_decay),
                bias_regularizer=keras.regularizers.l2(weight_decay),
                name="prediction",
            )(pretrain_output)
            model = keras.models.Model(model.input, y, name="vggvox_resnet2D_{}_{}_new".format("softmax", "gvlad"))
            opt = keras.optimizers.Adam(lr=1e-3)
            model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["acc"])

        model.summary()

        callbacks = list()
        if self.tr34_mconfig.ENABLE_CB_ES:
            early_stopping = EarlyStopping(monitor="val_loss", patience=15)
            callbacks.append(early_stopping)
        if self.tr34_mconfig.ENABLE_CB_LRS:
            normal_lr = LearningRateScheduler(self.step_decay)
            callbacks.append(normal_lr)
        return model, callbacks 
Example 74
Project: AutoSpeech2019   Author: DeepWisdom   File: model.py    Apache License 2.0 4 votes vote down vote up
def train_left_fit_by_generator(self):
        self.trn_gen = generator.DataGenerator(self.train_x, self.train_y, **self.params)
        model = self.model
        accept_cur = False
        cur_epoch = self.decide_epoch_curround()
        if self.fullvalid_stage:
            m_history = model.fit_generator(
                self.trn_gen,
                steps_per_epoch=int(len(self.train_x) // self.params["batch_size"]),
                validation_data=self.g_valid_gen,
                epochs=cur_epoch,
                max_queue_size=10,
                callbacks=self.callbacks,
                use_multiprocessing=False,
                workers=1,
                verbose=ThinRes34Config.VERBOSE,
            )
            cur_valid_loss = round(m_history.history.get("val_loss")[-1], 6)
            cur_valid_acc = round(m_history.history.get("val_acc")[-1], 6)
            self.g_val_loss_list.append(cur_valid_loss)
            self.g_val_acc_list.append(cur_valid_acc)
        else:
            self.trn_gen = generator.DataGenerator(self.train_x, self.train_y, **self.params)
            m_history = model.fit_generator(
                self.trn_gen,
                steps_per_epoch=int(len(self.train_x) // self.params["batch_size"]),
                epochs=cur_epoch,
                max_queue_size=10,
                callbacks=self.callbacks,
                use_multiprocessing=False,
                workers=1,
                verbose=ThinRes34Config.VERBOSE,
            )
            cur_valid_loss = 100
            cur_valid_acc = -1
        cur_train_loss = round(m_history.history.get("loss")[-1], 6)
        cur_train_acc = round(m_history.history.get("acc")[-1], 6)
        cur_lr = m_history.history.get("lr")[-1]
        self.g_train_loss_list.append(cur_train_loss)
        self.g_train_acc_list.append(cur_train_acc)
        self.g_his_eval_dict[self.round_idx] = {
            "t_loss": cur_train_loss,
            "t_acc": cur_train_acc,
            "v_loss": cur_valid_loss,
            "v_acc": cur_valid_acc
        }
        accept_cur = self.train_bestmodel_decision()
        if self.fullvalid_stage:
            self.g_accept_cur_list.append(accept_cur)
        return model, accept_cur 
Example 75
Project: CNNArt   Author: thomaskuestner   File: motion_abd_CNN2D.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):
    weight_name = model_name[0] + '_weights.h5'
    model_json = model_name[0] + '.json'
    model_all = model_name[0] + '_model.h5'

    #    # load weights and model (OLD WAY)
    #    conten = sio.loadmat(model_name)
    #    weig = content['wei']
    #    nSize = weig.shape
    #    weigh = []
    #
    #    for i in drange(0,nSize[1],2):
    #    	w0 = weig[0,i]
    #    	w1 = weig[0,i+1]
    #    	w1=w1.T
    #    	w1 = np.concatenate(w1,axis=0)
    #
    #    	weigh= weigh.extend([w0, w1])
    #
    #    model = model_from_json(model_json)
    #    model.set_weights(weigh)

    # load weights and model (new way)
    # model = model_from_json(model_json)
    model = createModel(patchSize)
    opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]

    model.compile(loss='categorical_crossentropy', optimizer=opti)
    model.load_weights(weight_name)

    # load complete model (including weights); keras > 0.7
    # model = load_model(model_all)

    # assume artifact affected shall be tested!
    # y_test = np.ones((len(X_test),1))

    score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize, show_accuracy=True)

    prob_pre = model.predict(X_test, batchSize, 0)

    modelSave = model_name[0] + '_pred.mat'
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 76
Project: CNNArt   Author: thomaskuestner   File: motion_all_CNN2D.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):
    weight_name = model_name[0] + '_weights.h5'
    model_json = model_name[0] + '.json'
    model_all = model_name[0] + '_model.h5'

    #    # load weights and model (OLD WAY)
    #    conten = sio.loadmat(model_name)
    #    weig = content['wei']
    #    nSize = weig.shape
    #    weigh = []
    #
    #    for i in drange(0,nSize[1],2):
    #    	w0 = weig[0,i]
    #    	w1 = weig[0,i+1]
    #    	w1=w1.T
    #    	w1 = np.concatenate(w1,axis=0)
    #
    #    	weigh= weigh.extend([w0, w1])
    #
    #    model = model_from_json(model_json)
    #    model.set_weights(weigh)

    # load weights and model (new way)
    # model = model_from_json(model_json)
    model = createModel(patchSize)
    opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]

    model.compile(loss='categorical_crossentropy', optimizer=opti)
    model.load_weights(weight_name)

    # load complete model (including weights); keras > 0.7
    # model = load_model(model_all)

    # assume artifact affected shall be tested!
    # y_test = np.ones((len(X_test),1))

    score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize, show_accuracy=True)
    prob_pre = model.predict(X_test, batchSize, 0)

    # modelSave = model_name[:-5] + '_pred.mat'
    modelSave = model_name[0] + '_pred.mat'
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 77
Project: CNNArt   Author: thomaskuestner   File: motion_head_CNN2D.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test,y_test,model_name, sOutPath, patchSize, batchSize):

    weight_name = model_name[0] + '_weights.h5'
    model_json = model_name[0] + '.json'
    model_all = model_name[0] + '_model.h5'

#    # load weights and model (OLD WAY)
#    conten = sio.loadmat(model_name)
#    weig = content['wei']
#    nSize = weig.shape
#    weigh = []
#
#    for i in drange(0,nSize[1],2):
#    	w0 = weig[0,i]
#    	w1 = weig[0,i+1]
#    	w1=w1.T
#    	w1 = np.concatenate(w1,axis=0)
#
#    	weigh= weigh.extend([w0, w1])
#
#    model = model_from_json(model_json)
#    model.set_weights(weigh)

    # load weights and model (new way)
    #model = model_from_json(model_json)
    model = createModel(patchSize)
    opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = [EarlyStopping(monitor='val_loss',patience=10,verbose=1)]

    model.compile(loss='categorical_crossentropy', optimizer=opti)
    model.load_weights(weight_name)

    # load complete model (including weights); keras > 0.7
    #model = load_model(model_all)

    # assume artifact affected shall be tested!
    #y_test = np.ones((len(X_test),1))

    score_test, acc_test = model.evaluate(X_test, y_test,batch_size=batchSize,show_accuracy=True)
    prob_pre = model.predict(X_test, batchSize, 0)

    #modelSave = model_name[:-5] + '_pred.mat'
    modelSave = model_name[0] + '_pred.mat'
    sio.savemat(modelSave, {'prob_pre':prob_pre, 'score_test': score_test, 'acc_test':acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 78
Project: CNNArt   Author: thomaskuestner   File: multiclass_DenseNet-100.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):
    weight_name = model_name[0] + '_weights.h5'
    model_json = model_name[0] + '_json'
    model_all = model_name[0] + '_model.h5'

    #    # load weights and model (OLD WAY)
    #    conten = sio.loadmat(model_name)
    #    weig = content['wei']
    #    nSize = weig.shape
    #    weigh = []
    #
    #    for i in drange(0,nSize[1],2):
    #    	w0 = weig[0,i]
    #    	w1 = weig[0,i+1]
    #    	w1=w1.T
    #    	w1 = np.concatenate(w1,axis=0)
    #
    #    	weigh= weigh.extend([w0, w1])
    #
    #    model = model_from_json(model_json)
    #    model.set_weights(weigh)

    # load weights and model (new way)
    # model = model_from_json(model_json)
    model = createModel(patchSize)
    opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]

    model.compile(loss='categorical_crossentropy', optimizer=opti)
    model.load_weights(weight_name)

    # load complete model (including weights); keras > 0.7
    # model = load_model(model_all)

    # assume artifact affected shall be tested!
    # y_test = np.ones((len(X_test),1))

    score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize, show_accuracy=True)
    prob_pre = model.predict(X_test, batchSize, 0)

    # modelSave = model_name[:-5] + '_pred.mat'
    modelSave = model_name[0] + '_pred.mat'
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 79
Project: CNNArt   Author: thomaskuestner   File: multiclass_SE_DenseNet-BC-100.py    Apache License 2.0 4 votes vote down vote up
def fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):
    weight_name = model_name[0] + '_weights.h5'
    model_json = model_name[0] + '.json'
    model_all = model_name[0] + '_model.h5'

    #    # load weights and model (OLD WAY)
    #    conten = sio.loadmat(model_name)
    #    weig = content['wei']
    #    nSize = weig.shape
    #    weigh = []
    #
    #    for i in drange(0,nSize[1],2):
    #    	w0 = weig[0,i]
    #    	w1 = weig[0,i+1]
    #    	w1=w1.T
    #    	w1 = np.concatenate(w1,axis=0)
    #
    #    	weigh= weigh.extend([w0, w1])
    #
    #    model = model_from_json(model_json)
    #    model.set_weights(weigh)

    # load weights and model (new way)
    # model = model_from_json(model_json)
    model = createModel(patchSize)
    opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = []
    callbacks.append(
        ModelCheckpoint(sOutPath + os.sep + 'checkpoints' + os.sep + 'checker.hdf5', monitor='val_acc', verbose=0,
                        period=1, save_best_only=True))  # overrides the last checkpoint, its just for security
    callbacks.append(EarlyStopping(monitor='val_loss', patience=10, verbose=1))

    model.compile(loss='categorical_crossentropy', optimizer=opti)
    model.load_weights(weight_name)

    # load complete model (including weights); keras > 0.7
    # model = load_model(model_all)

    # assume artifact affected shall be tested!
    # y_test = np.ones((len(X_test),1))

    score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize, show_accuracy=True)
    prob_pre = model.predict(X_test, batchSize, 0)

    # modelSave = model_name[:-5] + '_pred.mat'
    modelSave = model_name[0] + '_pred.mat'
    sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test})


###############################################################################
## OPTIMIZATIONS ##
############################################################################### 
Example 80
Project: CNNArt   Author: thomaskuestner   File: motion_vgg_CNN2D.py    Apache License 2.0 4 votes vote down vote up
def fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSize=None, learningRate=None, iEpochs=None):
    # parse inputs
    batchSize = [64] if batchSize is None else batchSize
    learningRate = [0.01] if learningRate is None else learningRate
    iEpochs = 300 if iEpochs is None else iEpochs

    print('Training 2D CNN')
    print('with lr = ' + str(learningRate) + ' , batchSize = ' + str(batchSize))

    # save names
    _, sPath = os.path.splitdrive(sOutPath)
    sPath, sFilename = os.path.split(sPath)
    sFilename, sExt = os.path.splitext(sFilename)
    model_name = sPath + '/' + sFilename + '/' + sFilename + '_lr_' + str(learningRate) + '_bs_' + str(batchSize)
    weight_name = model_name + '_weights.h5'
    model_json = model_name + '_json'
    model_all = model_name + '_model.h5'
    model_mat = model_name + '.mat'

    if (os.path.isfile(model_mat)):  # no training if output file exists
        return

    # create model
    cnn = createModel(patchSize)

    cnn.summary()

    # opti = SGD(lr=learningRate, momentum=1e-8, decay=0.1, nesterov=True);#Adag(lr=0.01, epsilon=1e-06)
    opti = keras.optimizers.Adam(lr=learningRate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]
    callbacks.append(ModelCheckpoint(model_all, save_weights_only=False, monitor='val_acc', verbose=1, period=2, save_best_only=True))  # overrides the last checkpoint, its just for security
    callbacks.append(ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, min_lr=1e-4, verbose=1))

    cnn.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
    print(cnn.summary)

    cnn.fit(X_train,
            y_train,
            validation_data=[X_test, y_test],
            epochs=iEpochs,
            batch_size=batchSize,
            callbacks=callbacks,
            verbose=1)

    # save model
    # cnn.save_weights(weight_name, overwrite=True)
    cnn.save(model_all)  # keras > v0.7