Python train model

60 Python code examples are found related to " train model". You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: tokenization.py    From BERT with Apache License 2.0 11 votes vote down vote up
def train_model(self, train_config=None):
		'''
		https://github.com/google/sentencepiece/blob/master/python/sentencepiece_python_module_example.ipynb
		see from this tutorial for sentence piece training
		'''
		config = train_config if train_config else self.config
		param = ""
		param += "--input={} ".format(config["corpus"])
		param += "--model_prefix={} ".format(config["model_prefix"])
		param += "--vocab_size={} ".format(config["vocab_size"])
		param += "--model_type={} ".format(config.get("model_type", "unigram"))
		param += "--character_coverage={} ".format(config.get("character_coverage", 0.995))
		param += "--mining_sentence_size={} ".format(config.get("mining_sentence_size", 5000000))
		param += "--input_sentence_size={} ".format(config.get("input_sentence_size", 5000000))
		param += "--max_sentencepiece_length={} ".format(config.get("max_sentencepiece_length", 5))
		try:
			SentencePieceTrainer.Train(param)
			self.sp.Load(config["model_prefix"]+".model")
		except:
			raise ValueError(" training word piece model failed ") 
Example 2
Source File: 1_linear_basic.py    From deep-learning-note with MIT License 7 votes vote down vote up
def trainModel(trainData, features, labels):
    """
    利用训练数据,估计模型参数

    参数
    ----
    trainData : DataFrame,训练数据集,包含特征和标签

    features : 特征名列表

    labels : 标签名列表

    返回
    ----
    model : LinearRegression, 训练好的线性模型
    """
    # 创建一个线性回归模型
    model = linear_model.LinearRegression()
    # 训练模型,估计模型参数
    model.fit(trainData[features], trainData[labels])
    return model 
Example 3
Source File: embedding_trainer.py    From kaggle-HomeDepot with MIT License 7 votes vote down vote up
def train_doc2vec_model(df, columns):
    model_param = {
        "alpha": config.EMBEDDING_ALPHA,
        "learning_rate_decay": config.EMBEDDING_LEARNING_RATE_DECAY,
        "n_epoch": config.EMBEDDING_N_EPOCH,
        "sg": 1, # not use
        "dm": 1,
        "hs": 1,
        "min_count": config.EMBEDDING_MIN_COUNT,
        "size": config.EMBEDDING_DIM,
        "sample": 0.001,
        "window": config.EMBEDDING_WINDOW,
        "workers": config.EMBEDDING_WORKERS,
    }
    model_dir = config.DOC2VEC_MODEL_DIR
    model_name = "Homedepot-doc2vec-D%d-min_count%d.model"%(
                    model_param["size"], model_param["min_count"])

    doc2vec = DataFrameDoc2Vec(df, columns, model_param)
    doc2vec.train()
    doc2vec.save(model_dir, model_name)


#---------------------- Main ---------------------- 
Example 4
Source File: cartpole_a2c.py    From 2019-OSS-Summer-RL with MIT License 6 votes vote down vote up
def train_model(self, state, action, reward, next_state, done):
        value = self.critic.predict(state)[0]
        next_value = self.critic.predict(next_state)[0]

        act = np.zeros([1, self.action_size])
        act[0][action] = 1

        # 벨만 기대 방정식를 이용한 어드벤티지와 업데이트 타깃
        if done:
            advantage = reward - value
            target = [reward]
        else:
            advantage = (reward + self.discount_factor * next_value) - value
            target = reward + self.discount_factor * next_value

        self.actor_updater([state, act, advantage])
        self.critic_updater([state, target]) 
Example 5
Source File: svm_classifier.py    From nlp-journey with Apache License 2.0 6 votes vote down vote up
def train_model(self, test_size=0.2):
        """
        训练模型,简单地将生成的TF-IDF数据,chi提取后的特征,以及svm算法模型写入到了磁盘中
        :return: 返回训练好的模型
        """
        data_set = pd.read_csv(self.train_path,
                               sep='##',
                               encoding='utf-8',
                               header=None,
                               engine='python')
        data_set = data_set.dropna()
        chi_features, tf_idf_model, chi_model = self.__select_features(data_set)
        x_train, x_test, y_train, y_test = train_test_split(chi_features,
                                                            data_set[1],
                                                            test_size=test_size,
                                                            random_state=42)
        # 这里采用的是线性分类模型,如果采用rbf径向基模型,速度会非常慢.
        clf_model = svm.SVC(kernel='linear', verbose=True)
        print(clf_model)
        clf_model.fit(x_train, y_train)
        score = clf_model.score(x_test, y_test)
        print('测试准确率:', score)
        return tf_idf_model, chi_model, clf_model 
Example 6
Source File: mobilenet_v1_train.py    From edafa with MIT License 6 votes vote down vote up
def train_model():
  """Trains mobilenet_v1."""
  g, train_tensor = build_model()
  with g.as_default():
    slim.learning.train(
        train_tensor,
        FLAGS.checkpoint_dir,
        is_chief=(FLAGS.task == 0),
        master=FLAGS.master,
        log_every_n_steps=FLAGS.log_every_n_steps,
        graph=g,
        number_of_steps=FLAGS.number_of_steps,
        save_summaries_secs=FLAGS.save_summaries_secs,
        save_interval_secs=FLAGS.save_interval_secs,
        init_fn=get_checkpoint_init_fn(),
        global_step=tf.train.get_global_step()) 
Example 7
Source File: nlp_chinese.py    From simple_nlp_chinese with MIT License 6 votes vote down vote up
def train_model(file_input, file_output):
    file_intermediate = os.path.join(
        os.path.dirname(file_input),
        os.path.splitext(file_input)[0])
    process_corpus_extraction(
        file_input, file_intermediate + '.extracted')
    process_chinese_filtering(
        file_intermediate + '.extracted',
        file_intermediate + '.filtered')
    process_chinese_transformation(
        file_intermediate + '.filtered',
        file_intermediate + '.transformed')
    process_chinese_transformation(
        file_intermediate + '.transformed',
        file_intermediate + '.segmented')
    # we can train for either word2vec or doc2vec
    # process_word_training(
    #     file_intermediate + '.segmented', file_output)
    process_doc_training(
        file_intermediate + '.segmented', file_output) 
Example 8
Source File: embedding_trainer.py    From kaggle-HomeDepot with MIT License 6 votes vote down vote up
def train_word2vec_model(df, columns):
    model_param = {
        "alpha": config.EMBEDDING_ALPHA,
        "learning_rate_decay": config.EMBEDDING_LEARNING_RATE_DECAY,
        "n_epoch": config.EMBEDDING_N_EPOCH,
        "sg": 1,
        "hs": 1,
        "min_count": config.EMBEDDING_MIN_COUNT,
        "size": config.EMBEDDING_DIM,
        "sample": 0.001,
        "window": config.EMBEDDING_WINDOW,
        "workers": config.EMBEDDING_WORKERS,
    }
    model_dir = config.WORD2VEC_MODEL_DIR
    model_name = "Homedepot-word2vec-D%d-min_count%d.model"%(
                    model_param["size"], model_param["min_count"])

    word2vec = DataFrameWord2Vec(df, columns, model_param)
    word2vec.train()
    word2vec.save(model_dir, model_name)


#---------------------- Doc2Vec ---------------------- 
Example 9
Source File: TrainInterface.py    From helen with MIT License 6 votes vote down vote up
def train_model(self):
        # train a model
        train(self.train_file,
              self.test_file,
              self.batch_size,
              self.epochs,
              self.gpu_mode,
              self.num_workers,
              self.retrain_model,
              self.retrain_model_path,
              self.gru_layers,
              self.hidden_size,
              self.learning_rate,
              self.weight_decay,
              self.model_dir,
              self.stats_dir,
              not_hyperband=True) 
Example 10
Source File: FFM_Multi_PyTorch.py    From Awesome-RecSystem-Models with MIT License 6 votes vote down vote up
def train_FFM_model_demo():

    # Step1: 导入数据
    x_train, y_train, x_test, y_test, feature2field = load_dataset()
    x_train = preprocessing.scale(x_train, with_mean=True, with_std=True)
    x_test = preprocessing.scale(x_test, with_mean=True, with_std=True)
    class_num = len(set([y for y in y_train] + [y for y in y_test]))

    # FFM模型
    ffm = FFM_layer(field_map_dict=feature2field, fea_num=x_train.shape[1], reg_l1=0.01, reg_l2=0.01,
                    class_num=class_num, latent_factor_dim=10).to(DEVICE)

    # 定义损失函数还有优化器
    optm = torch.optim.Adam(ffm.parameters())

    train_loader = get_batch_loader(x_train, y_train, BATCH_SIZE, shuffle=True)
    test_loader = get_batch_loader(x_test, y_test, BATCH_SIZE, shuffle=False)

    for epoch in range(1, EPOCHS + 1):
        train(ffm, DEVICE, train_loader, optm, epoch)
        test(ffm, DEVICE, test_loader) 
Example 11
Source File: FM_Multi_PyTorch.py    From Awesome-RecSystem-Models with MIT License 6 votes vote down vote up
def train_FM_model_demo():

    # Step1: 导入数据
    x_train, y_train, x_test, y_test = load_dataset()
    x_train = preprocessing.scale(x_train, with_mean=True, with_std=True)
    x_test = preprocessing.scale(x_test, with_mean=True, with_std=True)
    class_num = len(set([y for y in y_train] + [y for y in y_test]))

    # FM模型
    fm = FM_layer(class_num=class_num, feature_num=x_train.shape[1], latent_factor_dim=40).to(DEVICE)

    # 定义损失函数还有优化器
    optm = torch.optim.Adam(fm.parameters())

    train_loader = get_batch_loader(x_train, y_train, BATCH_SIZE, shuffle=True)
    test_loader = get_batch_loader(x_test, y_test, BATCH_SIZE, shuffle=False)

    for epoch in range(1, EPOCHS + 1):
        train(fm, DEVICE, train_loader, optm, epoch)
        test(fm, DEVICE, test_loader) 
Example 12
Source File: train_model_util_PyTorch.py    From Awesome-RecSystem-Models with MIT License 6 votes vote down vote up
def train_test_model_demo(model, device, train_data_path, test_data_path, feat_dict_):
    print("Start Training Model!")

    # Sort the Train files in order
    train_filelist = ["%s%s" % (train_data_path, x) for x in os.listdir(train_data_path)]
    train_file_id = [int(re.sub('^.*[\D]', '', x)) for x in train_filelist]
    train_filelist = [train_filelist[idx] for idx in np.argsort(train_file_id)]

    # Sort the Test files in order
    test_filelist = ["%s%s" % (test_data_path, x) for x in os.listdir(test_data_path)]
    test_file_id = [int(re.sub('^.*[\D]', '', x)) for x in test_filelist]
    test_filelist = [test_filelist[idx] for idx in np.argsort(test_file_id)]

    optimizer = torch.optim.Adam(model.parameters())

    for epoch in range(1, EPOCHS + 1):
        train_model(model, train_filelist, feat_dict_, device, optimizer, epoch)
        test_model(model, test_filelist, feat_dict_, device) 
Example 13
Source File: chatbot.py    From Intelligent-Projects-Using-Python with MIT License 6 votes vote down vote up
def train_model(self,model,X_train,X_test,y_train,y_test):
        input_y_train = self.include_start_token(y_train)
        print(input_y_train.shape)
        input_y_test = self.include_start_token(y_test)
        print(input_y_test.shape)
        early = EarlyStopping(monitor='val_loss',patience=10,mode='auto')

        checkpoint = ModelCheckpoint(self.outpath + 's2s_model_' + str(self.version) + '_.h5',monitor='val_loss',verbose=1,save_best_only=True,mode='auto')
        lr_reduce = ReduceLROnPlateau(monitor='val_loss',factor=0.5, patience=2, verbose=0, mode='auto')
        model.fit([X_train,input_y_train],y_train, 
		      epochs=self.epochs,
		      batch_size=self.batch_size, 
		      validation_data=[[X_test,input_y_test],y_test], 
		      callbacks=[early,checkpoint,lr_reduce], 
		      shuffle=True)
        return model 
Example 14
Source File: train.py    From dpl with MIT License 6 votes vote down vote up
def train_model(self, max_iters):
        """Network training loop."""
        last_snapshot_iter = -1
        timer = Timer()
        while self.solver.iter < max_iters:
            # Make one SGD update
            timer.tic()
            
            self.solver.step(1)
            
            timer.toc()
            if self.solver.iter % (10 * self.solver_param.display) == 0:
                print 'speed: {:.3f}s / iter'.format(timer.average_time)

            if self.solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
                last_snapshot_iter = self.solver.iter
                self.snapshot()

        if last_snapshot_iter != self.solver.iter:
            self.snapshot() 
Example 15
Source File: vaegan_cifar.py    From MachineLearning with Apache License 2.0 6 votes vote down vote up
def train_model(name, g_train, d_train, sampler):
    """
    Main training loop.
    modified from Keras fit_generator
    """
    epoch = 0
    cifar10 = cifar.Cifar()
    cifar10.ReadDataSets(one_hot=False)

    while epoch < EPOCH:
        z, x = fetch_next_batch(cifar10)
        d_train(x, z, epoch)
        z, x = fetch_next_batch(cifar10)
        g_train(x, z, epoch)
        if epoch % 2 == 0:
            print "epoch: ", epoch
        epoch += 1 
Example 16
Source File: time_train_test.py    From rasa_lookup_demo with Apache License 2.0 6 votes vote down vote up
def train_model():
    # trains a model and times it
    t = time()
    # training_data = load_data('demo_train.md')
    training_data = load_data("data/company_train_lookup.json")
    td_load_time = time() - t
    trainer = Trainer(config.load("config.yaml"))
    t = time()
    trainer.train(training_data)
    train_time = time() - t
    clear_model_dir()
    t = time()
    model_directory = trainer.persist(
        "./tmp/models"
    )  # Returns the directory the model is stored in
    persist_time = time() - t
    return td_load_time, train_time, persist_time 
Example 17
Source File: train.py    From Dog-Cat-Classifier with Apache License 2.0 6 votes vote down vote up
def train_model(model, X, X_test, Y, Y_test):
    checkpoints = []
    if not os.path.exists('Data/Checkpoints/'):
        os.makedirs('Data/Checkpoints/')
    checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
    checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))

    # Creates live data:
    # For better yield. The duration of the training is extended.

    # If you don't want, use this:
    # model.fit(X, Y, batch_size=10, epochs=25, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)

    from keras.preprocessing.image import ImageDataGenerator
    generated_data = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0,  width_shift_range=0.1, height_shift_range=0.1, horizontal_flip = True, vertical_flip = False)
    generated_data.fit(X)
    import numpy
    model.fit_generator(generated_data.flow(X, Y, batch_size=8), steps_per_epoch=X.shape[0]//8, epochs=25, validation_data=(X_test, Y_test), callbacks=checkpoints)

    return model 
Example 18
Source File: process.py    From DeepRNN with MIT License 6 votes vote down vote up
def create_train_model(model_creator, hparams, data_dir):
    """Create train graph, model, and iterator."""
    train_data_path = []
    for root, _, name in os.walk(os.path.join(data_dir, 'train_data')):
        for x in name:
            if x.split('.')[-1] == 'mat':
                train_data_path.append(os.path.join(root, x))
    assert len(train_data_path) == 1
    train_data = scio.loadmat(*train_data_path)['data']
    assert hparams.src_len == hparams.tgt_len == train_data.shape[1]
    graph = tf.Graph()

    with graph.as_default(), tf.container("train"):
        # channels: [features, SBP, DBP, MBP]
        train_src_data = train_data[:, :, 0:hparams.src_feature_size]
        train_tgt_data = train_data[:, :, hparams.src_feature_size:hparams.src_feature_size + hparams.tgt_feature_size]
        src_dataset = tf.data.Dataset.from_tensor_slices(train_src_data)
        tgt_dataset = tf.data.Dataset.from_tensor_slices(train_tgt_data)
        iterator = get_iterator(src_dataset, tgt_dataset, batch_size=hparams.batch_size,
                                random_seed=hparams.random_seed, is_train=True)
        model = model_creator(hparams, iterator=iterator, mode=tf.contrib.learn.ModeKeys.TRAIN)
    return TrainModel(graph=graph, model=model, iterator=iterator) 
Example 19
Source File: Deopen_classification.py    From Deopen with MIT License 6 votes vote down vote up
def model_train(X_train, y_train,learning_rate = 1e-4,epochs = 50):
    network = create_network()
    lr = theano.shared(np.float32(learning_rate))
    net = NeuralNet(
                network,
                max_epochs=epochs,
                update=adam,
                update_learning_rate=lr,
                train_split=TrainSplit(eval_size=0.1),
                batch_iterator_train=BatchIterator(batch_size=32),
                batch_iterator_test=BatchIterator(batch_size=64),
                #on_training_started=[LoadBestParam(iteration=val_acc.argmax())],
                on_epoch_finished=[EarlyStopping(patience=5)],
                verbose=1)
    print 'Loading pre-training weights...'
    net.load_params_from(params[val_acc.argmax()])
    print 'Continue to train...'
    net.fit(X_train, y_train)
    print 'Model training finished.'
    return net


#model testing 
Example 20
Source File: trainer_model_based.py    From training_results_v0.5 with Apache License 2.0 6 votes vote down vote up
def train_world_model(
    env, data_dir, output_dir, hparams, world_model_steps_num, epoch
):
  """Train the world model on problem_name."""
  world_model_steps_num += world_model_step_increment(
      hparams, is_initial_epoch=(epoch == 0)
  )
  model_hparams = trainer_lib.create_hparams(hparams.generative_model_params)
  model_hparams.learning_rate = model_hparams.learning_rate_constant
  if epoch > 0:
    model_hparams.learning_rate *= hparams.learning_rate_bump

  train_supervised(
      problem=env,
      model_name=hparams.generative_model,
      hparams=model_hparams,
      data_dir=data_dir,
      output_dir=output_dir,
      train_steps=world_model_steps_num,
      eval_steps=100,
      local_eval_frequency=2000
  )

  return world_model_steps_num 
Example 21
Source File: kdd99_model.py    From tcav with Apache License 2.0 6 votes vote down vote up
def train_and_save_model(model_path, labels_path):
  """ Trains simple feedforward model for the KDD99 dataset"""
  # Prepare dataset and split it
  data, labels = prepare_dataset(labels_path)
  train_data, test_data, train_labels, test_labels = train_test_split(
      data, labels, test_size=0.2)

  # Create categorical map for the embedding layer
  categorical_map = create_categorical_map(data)
  model = make_keras_model(categorical_map)

  print(model.summary())
  model.fit(
      train_data,
      train_labels,
      validation_data=(test_data, test_labels),
      epochs=4,
      batch_size=64)
  model.save(model_path)

  # Test on a small subset of predictions
  predictions = model.predict(test_data[:10])
  print(predictions) 
Example 22
Source File: train.py    From face-py-faster-rcnn with MIT License 6 votes vote down vote up
def train_model(self, max_iters):
        """Network training loop."""
        last_snapshot_iter = -1
        timer = Timer()
        model_paths = []
        while self.solver.iter < max_iters:
            # Make one SGD update
            timer.tic()
            self.solver.step(1)
            timer.toc()
            if self.solver.iter % (10 * self.solver_param.display) == 0:
                print 'speed: {:.3f}s / iter'.format(timer.average_time)

            if self.solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
                last_snapshot_iter = self.solver.iter
                model_paths.append(self.snapshot())

        if last_snapshot_iter != self.solver.iter:
            model_paths.append(self.snapshot())
        return model_paths 
Example 23
Source File: train_model.py    From caption_generator with MIT License 6 votes vote down vote up
def train_model(weight = None, batch_size=32, epochs = 10):

    cg = caption_generator.CaptionGenerator()
    model = cg.create_model()

    if weight != None:
        model.load_weights(weight)

    counter = 0
    file_name = 'weights-improvement-{epoch:02d}.hdf5'
    checkpoint = ModelCheckpoint(file_name, monitor='loss', verbose=1, save_best_only=True, mode='min')
    callbacks_list = [checkpoint]
    model.fit_generator(cg.data_generator(batch_size=batch_size), steps_per_epoch=cg.total_samples/batch_size, epochs=epochs, verbose=2, callbacks=callbacks_list)
    try:
        model.save('Models/WholeModel.h5', overwrite=True)
        model.save_weights('Models/Weights.h5',overwrite=True)
    except:
        print "Error in saving model."
    print "Training complete...\n" 
Example 24
Source File: trainer.py    From nima.pytorch with MIT License 6 votes vote down vote up
def train_model(self):
        best_loss = float("inf")
        best_state = None
        for e in range(1, self.num_epoch + 1):
            train_loss = self.train()
            val_loss = self.validate()
            self.scheduler.step(metrics=val_loss)

            self.writer.add_scalar("train/loss", train_loss, global_step=e)
            self.writer.add_scalar("val/loss", val_loss, global_step=e)

            if best_state is None or val_loss < best_loss:
                logger.info(f"updated loss from {best_loss} to {val_loss}")
                best_loss = val_loss
                best_state = {
                    "state_dict": self.model.state_dict(),
                    "model_type": self.model_type,
                    "epoch": e,
                    "best_loss": best_loss,
                }
                torch.save(best_state, self.experiment_dir / "best_state.pth") 
Example 25
Source File: cross_fold_trainer.py    From calamari with Apache License 2.0 6 votes vote down vote up
def train_individual_model(run_args):
    # Call the training script with the json file as args
    # The json file contains all training parameters, including the files for training and validation
    # Note: It is necessary to launch a new thread because the command might be prefixed (e. g. use slurm as job
    #       skeduler to train all folds on different machines
    args = run_args["args"]
    train_args_json = run_args["json"]
    for line in run(prefix_run_command([
        sys.executable, "-u",
        args["train_script"],
        "--files", train_args_json,

    ], args.get("run", None), {"threads": args.get('num_threads', -1)}), verbose=args.get("verbose", False)):
        # Print the output of the thread
        if args.get("verbose", False):
            print("FOLD {} | {}".format(args["id"], line), end="")

    return args 
Example 26
Source File: main.py    From u_net_liver with MIT License 6 votes vote down vote up
def train_model(model, criterion, optimizer, dataload, num_epochs=20):
    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)
        dt_size = len(dataload.dataset)
        epoch_loss = 0
        step = 0
        for x, y in dataload:
            step += 1
            inputs = x.to(device)
            labels = y.to(device)
            # zero the parameter gradients
            optimizer.zero_grad()
            # forward
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            print("%d/%d,train_loss:%0.3f" % (step, (dt_size - 1) // dataload.batch_size + 1, loss.item()))
        print("epoch %d loss:%0.3f" % (epoch, epoch_loss/step))
    torch.save(model.state_dict(), 'weights_%d.pth' % epoch)
    return model

#训练模型 
Example 27
Source File: CorrMCNN_Arch2.py    From DeepLearn with MIT License 6 votes vote down vote up
def trainModel(model,data_left,data_right,loss_type,nb_epoch,batch_size):

    X_train_l = data_left
    X_train_r = data_right
    
    data_l = np.load('data_l.npy')
    data_r = np.load('data_r.npy')
    label = np.load('data_label.npy')
    X_train_l, X_test_l, X_train_r, X_test_r,y_train,y_test = split(data_l,data_r,label,ratio=0.01)
    print ('data split')
    print ('L_Type: l2+l3-L4   h_dim:',hdim,'   hdim_deep',hdim_deep,'  lamda:',lamda)
    model.fit([X_train_l,X_train_r], [X_train_r,X_train_l,X_train_l,X_train_r,
              np.zeros((X_train_l.shape[0],h_loss)),
             np.zeros((X_train_l.shape[0],hdim_deep)),np.zeros((X_train_l.shape[0],hdim_deep2))],
              nb_epoch=nb_epoch,
              batch_size=batch_size,verbose=1) 
Example 28
Source File: end_model.py    From metal with Apache License 2.0 6 votes vote down vote up
def train_model(self, train_data, valid_data=None, log_writer=None, **kwargs):
        self.config = recursive_merge_dicts(self.config, kwargs)

        # If train_data is provided as a tuple (X, Y), we can make sure Y is in
        # the correct format
        # NOTE: Better handling for if train_data is Dataset or DataLoader...?
        if isinstance(train_data, (tuple, list)):
            X, Y = train_data
            Y = self._preprocess_Y(self._to_torch(Y, dtype=torch.FloatTensor), self.k)
            train_data = (X, Y)

        # Convert input data to data loaders
        train_loader = self._create_data_loader(train_data, shuffle=True)

        # Create loss function
        loss_fn = self._get_loss_fn()

        # Execute training procedure
        self._train_model(
            train_loader, loss_fn, valid_data=valid_data, log_writer=log_writer
        ) 
Example 29
Source File: breakout_a3c.py    From reinforcement-learning with MIT License 6 votes vote down vote up
def train_model(self, done):
        discounted_rewards = self.discount_rewards(self.rewards, done)

        states = np.zeros((len(self.states), 84, 84, 4))
        for i in range(len(self.states)):
            states[i] = self.states[i]

        states = np.float32(states / 255.)

        values = self.critic.predict(states)
        values = np.reshape(values, len(values))

        advantages = discounted_rewards - values

        self.optimizer[0]([states, self.actions, advantages])
        self.optimizer[1]([states, discounted_rewards])
        self.states, self.actions, self.rewards = [], [], [] 
Example 30
Source File: task.py    From cloudml-samples with Apache License 2.0 6 votes vote down vote up
def train_model(args):
    """Load the data, train the model, test the model, export / save the model
    """
    torch.manual_seed(args.seed)

    # Open our dataset
    train_loader, test_loader = data_utils.load_data(args.test_split,
                                                     args.batch_size)

    # Create the model
    net = model.SonarDNN().double()
    optimizer = optim.SGD(net.parameters(), lr=args.lr,
                          momentum=args.momentum, nesterov=False)

    # Train / Test the model
    for epoch in range(1, args.epochs + 1):
        train(net, train_loader, optimizer, epoch)
        test(net, test_loader)

    # Export the trained model
    torch.save(net.state_dict(), args.model_name)

    if args.model_dir:
        # Save the model to GCS
        data_utils.save_model(args.model_dir, args.model_name) 
Example 31
Source File: train.py    From tripletloss with MIT License 6 votes vote down vote up
def train_model(self, max_iters):
        """Network training loop."""
        last_snapshot_iter = -1
        timer = Timer()
        while self.solver.iter < max_iters:
            timer.tic()
            self.solver.step(1)	    
            print 'fc9_1:',sorted(self.solver.net.params['fc9_1'][0].data[0])[-1]
            #print 'fc9:',sorted(self.solver.net.params['fc9'][0].data[0])[-1]
            #print 'fc7:',sorted(self.solver.net.params['fc7'][0].data[0])[-1]
            #print 'fc6:',sorted(self.solver.net.params['fc6'][0].data[0])[-1]
            #print 'fc9:',(self.solver.net.params['fc9'][0].data[0])[0]
            #print 'fc7:',(self.solver.net.params['fc7'][0].data[0])[0]
            #print 'fc6:',(self.solver.net.params['fc6'][0].data[0])[0]
            #print 'conv5_3:',self.solver.net.params['conv5_3'][0].data[0][0][0]
            #print 'conv5_2:',self.solver.net.params['conv5_2'][0].data[0][0][0]
            #print 'conv5_1:',self.solver.net.params['conv5_1'][0].data[0][0][0]
            #print 'conv4_3:',self.solver.net.params['conv4_3'][0].data[0][0][0]
            #print 'fc9:',self.solver.net.params['fc9'][0].data[0][0]
            timer.toc()
            if self.solver.iter % (10 * self.solver_param.display) == 0:
                print 'speed: {:.3f}s / iter'.format(timer.average_time) 
Example 32
Source File: train_model.py    From pytorch-project-template with Apache License 2.0 6 votes vote down vote up
def train_model(hp, model, train_loader, writer, logger):
    model.net.train()
    for input_, target in train_loader:
        model.feed_data(input=input_, GT=target)
        model.optimize_parameters()
        loss = model.log.loss_v
        model.step += 1

        if logger is not None and (loss > 1e8 or math.isnan(loss)):
            logger.error("Loss exploded to %.02f at step %d!" % (loss, model.step))
            raise Exception("Loss exploded")

        if model.step % hp.log.summary_interval == 0:
            if writer is not None:
                writer.logging_with_step(loss, model.step, "train_loss")
            if logger is not None:
                logger.info("Train Loss %.04f at step %d" % (loss, model.step)) 
Example 33
Source File: lenet5.py    From iAI with MIT License 6 votes vote down vote up
def train_model():
    # Build and compile model
    model = build_model()
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    # Load data
    x_train, y_train, x_test, y_test = load_data()

    # Train the model on the data
    model.fit(
        x_train, y_train,
        epochs = 10,
        verbose = 1
    )

    # Evaluate the model on test data
    test_loss, test_acc = model.evaluate(x_test, y_test)
    print("Test loss: {}\nTest accuracy: {}".format(test_loss, test_acc))

    return model 
Example 34
Source File: utils.py    From molencoder with MIT License 6 votes vote down vote up
def train_model(train_loader, encoder, decoder, optimizer, dtype,
                print_every=100):
    encoder.train()
    decoder.train()

    for t, (x, y) in enumerate(train_loader):
        x_var = Variable(x.type(dtype))

        y_var = encoder(x_var)
        z_var = decoder(y_var)

        loss = encoder.vae_loss(z_var, x_var)
        if (t + 1) % print_every == 0:
            print('t = %d, loss = %.4f' % (t + 1, loss.data[0]))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step() 
Example 35
Source File: ddpg.py    From tensorflow_RL with MIT License 6 votes vote down vote up
def train_model(self):
        batch = random.sample(self.memory,self.batch_size)
        states = np.asarray([e[0] for e in batch])
        actions = np.asarray([e[1] for e in batch])
        rewards = np.asarray([e[2] for e in batch])
        next_states = np.asarray([e[3] for e in batch])
        dones = np.asarray([e[4] for e in batch])
        target_action_input = self.sess.run(self.target_actor.actor,feed_dict={self.target_actor.state:next_states})
        target_q_value = self.sess.run(self.target_critic.critic,feed_dict={self.target_critic.state:next_states,
                                                                            self.target_critic.action:target_action_input})
        targets = np.asarray([r + self.gamma * (1-d) * tv for r,tv,d in zip(rewards,target_q_value,dones)])
        self.sess.run(self.ctrain_op,feed_dict=
        {
            self.critic.state:states,
            self.critic.action:actions,
            self.target_value:np.squeeze(targets)
        })
        action_for_train = self.sess.run(self.actor.actor,feed_dict={self.actor.state:states})
        self.sess.run(self.atrain_op,feed_dict=
        {
            self.actor.state:states,
            self.critic.state:states,
            self.critic.action:action_for_train
        })
        self.sess.run(self.update_target_soft) 
Example 36
Source File: NER.py    From Jtyoui with MIT License 6 votes vote down vote up
def train_model():
    if cxl_model:
        embedding_matrix = load_embedding()
    else:
        embedding_matrix = {}
    train, label = vocab_train_label(train_path, vocab=vocab, tags=tag, max_chunk_length=length)
    n = np.array(label, dtype=np.float)
    labels = n.reshape((n.shape[0], n.shape[1], 1))
    model = Sequential([
        Embedding(input_dim=len(vocab), output_dim=300, mask_zero=True, input_length=length, weights=[embedding_matrix],
                  trainable=False),
        SpatialDropout1D(0.2),
        Bidirectional(layer=LSTM(units=150, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)),
        TimeDistributed(Dense(len(tag), activation=relu)),
    ])
    crf_ = CRF(units=len(tag), sparse_target=True)
    model.add(crf_)
    model.compile(optimizer=Adam(), loss=crf_.loss_function, metrics=[crf_.accuracy])
    model.fit(x=np.array(train), y=labels, batch_size=16, epochs=4, callbacks=[RemoteMonitor()])
    model.save(model_path) 
Example 37
Source File: actionCNN.py    From SupervisedChromeTrex with MIT License 6 votes vote down vote up
def trainModel(model):

    # Split X and y into training and testing sets
    X_train, X_test, Y_train, Y_test = initializers()

    # Now start the training of the loaded model
    hist = model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch,
                 verbose=1, validation_split=0.2)

    

    ans = raw_input("Do you want to save the trained weights - y/n ?")
    if ans == 'y':
        filename = raw_input("Enter file name - ")
        fname = path + str(filename) + ".hdf5"
        model.save_weights(fname,overwrite=True)
    else:
        model.save_weights("newWeight.hdf5",overwrite=True)

    visualizeHis(hist)
    # Save model as well
    # model.save("newModel.hdf5")
#%% 
Example 38
Source File: pipelinecomponents.py    From sia-cog with MIT License 6 votes vote down vote up
def model_train(X, Y, pipeline, X_test=None, Y_test=None, more = False):
    try:
        result = None
        if model_type == "mlp":
            deepmodel = projectmgr.GetDeepModel(name, "ml", pipeline['options']['model_name'])
            if deepmodel is None:
                raise Exception(pipeline['options']['model_name'] + ": Model not found!")

            modeljson = json.loads(deepmodel.modeldata)
            modelObj = mxnetfactory.createModel(modeljson)
            #modelObj.compile(loss=pipeline['options']['loss'], optimizer=pipeline['options']['optimizer'],
            #              metrics=pipeline['options']['scoring'])
            epoches = pipeline["options"]["epoches"]
            batch_size = pipeline["options"]["batch_size"]
            mxnetfactory.init(mxnetfactory, name, jobid)
            result = mxnetfactory.Train(modelObj, X, Y, projectfolder, pipeline["options"], epoches, batch_size, X_test=None, Y_test=None, more=more)
            projectmgr.UpdateExecuteResult(jobid, json.dumps(result))
            picklefile = projectfolder + "/model.json"
            model_json = modelObj.to_json()
            with open(picklefile, "w") as json_file:
                json_file.write(model_json)

        return result
    except Exception as e:
        raise Exception("model_train: " + str(e)) 
Example 39
Source File: Config.py    From OpenNRE-PyTorch with MIT License 6 votes vote down vote up
def set_train_model(self, model):
		print("Initializing training model...")
		self.model = model
		self.trainModel = self.model(config = self)
		if self.pretrain_model != None:
			self.trainModel.load_state_dict(torch.load(self.pretrain_model))
		self.trainModel.cuda()
		if self.optimizer != None:
			pass
		elif self.opt_method == "Adagrad" or self.opt_method == "adagrad":
			self.optimizer = optim.Adagrad(self.trainModel.parameters(), lr = self.learning_rate, lr_decay = self.lr_decay, weight_decay = self.weight_decay)
		elif self.opt_method == "Adadelta" or self.opt_method == "adadelta":
			self.optimizer = optim.Adadelta(self.trainModel.parameters(), lr = self.learning_rate, weight_decay = self.weight_decay)
		elif self.opt_method == "Adam" or self.opt_method == "adam":
			self.optimizer = optim.Adam(self.trainModel.parameters(), lr = self.learning_rate, weight_decay = self.weight_decay)
		else:
			self.optimizer = optim.SGD(self.trainModel.parameters(), lr = self.learning_rate, weight_decay = self.weight_decay)
		print("Finish initializing") 
Example 40
Source File: sgcn.py    From SGCN with GNU General Public License v3.0 6 votes vote down vote up
def create_and_train_model(self):
        """
        Model training and scoring.
        """
        print("\nTraining started.\n")
        self.model = SignedGraphConvolutionalNetwork(self.device, self.args, self.X).to(self.device)
        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=self.args.learning_rate,
                                          weight_decay=self.args.weight_decay)
        self.model.train()
        self.epochs = trange(self.args.epochs, desc="Loss")
        for epoch in self.epochs:
            start_time = time.time()
            self.optimizer.zero_grad()
            loss, _ = self.model(self.positive_edges, self.negative_edges, self.y)
            loss.backward()
            self.epochs.set_description("SGCN (Loss=%g)" % round(loss.item(), 4))
            self.optimizer.step()
            self.logs["training_time"].append([epoch+1, time.time()-start_time])
            if self.args.test_size > 0:
                self.score_model(epoch) 
Example 41
Source File: main.py    From StageDP with MIT License 6 votes vote down vote up
def train_model(data_helper):
    # initialize the parser
    action_clf = ActionClassifier(data_helper.action_feat_template, data_helper.action_map)
    relation_clf = RelationClassifier(data_helper.relation_feat_template_level_0,
                                      data_helper.relation_feat_template_level_1,
                                      data_helper.relation_feat_template_level_2,
                                      data_helper.relation_map)
    rst_parser = RstParser(action_clf, relation_clf)
    # train action classifier
    action_fvs, action_labels = list(zip(*data_helper.gen_action_train_data()))
    rst_parser.action_clf.train(scipy.sparse.vstack(action_fvs), action_labels)
    # train relation classifier
    for level in [0, 1, 2]:
        relation_fvs, relation_labels = list(zip(*data_helper.gen_relation_train_data(level)))
        print('{} relation samples at level {}.'.format(len(relation_labels), level))
        rst_parser.relation_clf.train(scipy.sparse.vstack(relation_fvs), relation_labels, level)
    rst_parser.save(model_dir='../data/model') 
Example 42
Source File: breakout_a3c.py    From reinforcement-learning-kr with MIT License 6 votes vote down vote up
def train_model(self, done):
        discounted_prediction = self.discounted_prediction(self.rewards, done)

        states = np.zeros((len(self.states), 84, 84, 4))
        for i in range(len(self.states)):
            states[i] = self.states[i]

        states = np.float32(states / 255.)

        values = self.local_critic.predict(states)
        values = np.reshape(values, len(values))

        advantages = discounted_prediction - values

        self.optimizer[0]([states, self.actions, advantages])
        self.optimizer[1]([states, discounted_prediction])
        self.states, self.actions, self.rewards = [], [], []

    # 로컬신경망을 생성하는 함수 
Example 43
Source File: rock_gensim.py    From MusicTaster with MIT License 6 votes vote down vote up
def train_artistsong2vec_model(fout_path, input_datas=None, data_path=None,
                               min_count=5, sorted_vocab=1, window=10,
                               size=250,
                               iter_n=50):
    if not input_datas and data_path:
        input_datas = pickle.load(open(data_path, 'rb'))
    full_data = []
    for i in input_datas:
        tmp = []
        for j in i:
            tmp.append(j[0])
            tmp.append(j[1])
        full_data.append(tmp)
    data_process_logger.info('start training')
    wv_model = gensim.models.Word2Vec(full_data, min_count=min_count, sorted_vocab=sorted_vocab, window=window,
                                      size=size, iter=iter_n)
    with open(fout_path, 'wb') as fout:
        data_process_logger.info('start saving model')
        pickle.dump(wv_model, fout)
        print 'model saved' 
Example 44
Source File: CNN.py    From AI_Sudoku with Creative Commons Zero v1.0 Universal 6 votes vote down vote up
def train_and_evaluate_model(self):
        if not self.modelbuilt:
            raise Exception("Build and train the model first!")
        if self.modeltrained:
            return
        # MNIST object
        mnist = tf.keras.datasets.mnist
        # Loading the Train/Test data
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
        # Reshape to form a 3D Vector
        x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
        # Normalize the train/test dataset
        x_train, x_test = x_train / 255.0, x_test / 255.0
        # Train the model
        self.model.fit(x=x_train, y=y_train, epochs=5)
        # Evaluate the model
        test_loss, test_acc = self.model.evaluate(x=x_test, y=y_test)
        # Print out the model accuracy
        print('\nTest accuracy:', test_acc)
        self.modeltrained = True 
Example 45
Source File: comet.py    From ludwig with Apache License 2.0 6 votes vote down vote up
def train_model(self, *args, **kwargs):
        logger.info("comet.train_model() called......")
        if self.cometml_experiment:
            model = args[0]
            model_definition = args[1]
            model_definition_path = args[2]
            if model:
                self.cometml_experiment.set_model_graph(
                    str(model._graph.as_graph_def()))
            if model_definition:
                if model_definition_path:
                    base_name = os.path.basename(model_definition_path)
                else:
                    base_name = "model_definition.yaml"
                if "." in base_name:
                    base_name = base_name.rsplit(".", 1)[0] + ".json"
                else:
                    base_name = base_name + ".json"
                self.cometml_experiment.log_asset_data(model_definition,
                                                       base_name) 
Example 46
Source File: __init__.py    From spleeter with MIT License 6 votes vote down vote up
def build_train_model(self, labels):
        """ Builder interface for creating model instance that aims to perform
        model training. The output of such estimator will be a dictionary
        with a key "<instrument>_spectrogram" per separated instrument,
        associated to the estimated separated instrument magnitude spectrogram.

        :param labels: Model labels.
        :returns: An estimator for performing model training.
        """
        loss, metrics = self._build_loss(labels)
        optimizer = self._build_optimizer()
        train_operation = optimizer.minimize(
                loss=loss,
                global_step=tf.compat.v1.train.get_global_step())
        return tf.estimator.EstimatorSpec(
            mode=tf.estimator.ModeKeys.TRAIN,
            loss=loss,
            train_op=train_operation,
            eval_metric_ops=metrics,
        ) 
Example 47
Source File: baseline.py    From MELD with GNU General Public License v3.0 6 votes vote down vote up
def train_model(self):

		checkpoint = ModelCheckpoint(self.PATH, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')

		if self.modality == "audio":
			model = self.get_audio_model()
			model.compile(optimizer='adadelta', loss='categorical_crossentropy', sample_weight_mode='temporal')
		elif self.modality == "text":
			model = self.get_text_model()
			model.compile(optimizer='adadelta', loss='categorical_crossentropy', sample_weight_mode='temporal')
		elif self.modality == "bimodal":
			model = self.get_bimodal_model()
			model.compile(optimizer='adam', loss='categorical_crossentropy', sample_weight_mode='temporal')

		early_stopping = EarlyStopping(monitor='val_loss', patience=10)
		model.fit(self.train_x, self.train_y,
		                epochs=self.epochs,
		                batch_size=self.batch_size,
		                sample_weight=self.train_mask,
		                shuffle=True, 
		                callbacks=[early_stopping, checkpoint],
		                validation_data=(self.val_x, self.val_y, self.val_mask))

		self.test_model() 
Example 48
Source File: train.py    From style-token_tacotron2 with MIT License 6 votes vote down vote up
def model_train_mode(args, feeder, hparams, global_step):
	with tf.variable_scope('Tacotron_model', reuse=tf.AUTO_REUSE) as scope:
		model_name = None
		if args.model == 'Tacotron-2':
			model_name = 'Tacotron'
		model = create_model(model_name or args.model, hparams)
		if hparams.predict_linear:
			model.initialize(feeder.inputs, feeder.input_lengths, feeder.mel_targets, feeder.token_targets, linear_targets=feeder.linear_targets,
				targets_lengths=feeder.targets_lengths, global_step=global_step,
				is_training=True, split_infos=feeder.split_infos)
		else:
			model.initialize(feeder.inputs, feeder.input_lengths, feeder.mel_targets, feeder.token_targets,
				targets_lengths=feeder.targets_lengths, global_step=global_step,
				is_training=True, split_infos=feeder.split_infos)
		model.add_loss()
		model.add_optimizer(global_step)
		stats = add_train_stats(model, hparams)
		return model, stats 
Example 49
Source File: pathnet.py    From pathnet-pytorch with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def train_model(self, train_loader, path, num_batch):
        self.train()
        fitness = 0
        train_len = 0
        for batch_idx, (data, target) in enumerate(train_loader):
            if self.args.cuda:
                data, target = data.cuda(), target.cuda()
            data, target = Variable(data), Variable(target)
            self.optimizer.zero_grad()
            output = self(data, path, -1)
            pred = output.data.max(1)[1] # get the index of the max log-probability
            fitness += pred.eq(target.data).cpu().sum()
            train_len += len(target.data)
            loss = F.cross_entropy(output, target)
            loss.backward()
            self.optimizer.step()
            if not batch_idx < num_batch -1:
                break
        fitness = fitness / train_len
        return fitness 
Example 50
Source File: train.py    From Vocalize-Sign-Language with Apache License 2.0 6 votes vote down vote up
def train_model(model, X, X_test, Y, Y_test):
    checkpoints = []
    if not os.path.exists('Data/Checkpoints/'):
        os.makedirs('Data/Checkpoints/')

    checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
    checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=1, write_graph=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))

    '''
    # Creates live data:
    # For better yield. The duration of the training is extended.

    from keras.preprocessing.image import ImageDataGenerator
    generated_data = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2) # For include left hand data add: 'horizontal_flip = True'
    generated_data.fit(X)

    model.fit_generator(generated_data.flow(X, Y, batch_size=batch_size), steps_per_epoch=X.shape[0]/batch_size, epochs=epochs, validation_data=(X_test, Y_test), callbacks=checkpoints)
    '''
    
    model.fit(X, Y, batch_size=batch_size, epochs=epochs, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)

    return model 
Example 51
Source File: train_snowboy.py    From GassistPi with GNU General Public License v3.0 6 votes vote down vote up
def train_snowboy_model():
    while not record_hotword_samples():
        time.sleep(.1)
    data = {
        "name": hotword_name,
        "language": language,
        "age_group": age_group,
        "gender": gender,
        "microphone": microphone,
        "token": token,
        "voice_samples": [
            {"wave": get_wave('/tmp/1.wav')},
            {"wave": get_wave('/tmp/2.wav')},
            {"wave": get_wave('/tmp/3.wav')}
        ]
    }
    response = requests.post(endpoint, json=data)
    if response.ok:
        with open(modelfilename, "w") as outfile:
            outfile.write(response.content)
        print("Saved model to '%s'." % modelfilename)
    else:
        print("Request failed.")
        print(response.text) 
Example 52
Source File: topic_extraction.py    From nlp-architect with Apache License 2.0 6 votes vote down vote up
def train_w2v_model(data):
    """
    Train a w2v (skipgram) model using fasttext package

    Args:
        data: A path to the training data (String)
    """
    with open(data, encoding="utf-8") as fp:
        texts = [line.split() for line in fp.readlines()]
    logger.info("Fasttext embeddings training...")
    try:
        model = FasttextEmbeddingsModel(size=100, min_count=1, skipgram=True)
        model.train(texts, epochs=100)
        model.save(str(path.join(data_dir, "W2V_Models/model.bin")))
    except Exception as e:
        logger.error("Error: %s", str(e)) 
Example 53
Source File: slac_agent.py    From slac with MIT License 6 votes vote down vote up
def train_model(self, experience, weights=None):
    if self._enable_functions and getattr(
        self, "_train_model_fn", None) is None:
      raise RuntimeError(
          "Cannot find _train_model_fn.  Did %s.__init__ call super?"
          % type(self).__name__)
    if not isinstance(experience, trajectory.Trajectory):
      raise ValueError(
          "experience must be type Trajectory, saw type: %s" % type(experience))

    if self._enable_functions:
      loss_info = self._train_model_fn(experience=experience, weights=weights)
    else:
      loss_info = self._train_model(experience=experience, weights=weights)

    if not isinstance(loss_info, tf_agent.LossInfo):
      raise TypeError(
          "loss_info is not a subclass of LossInfo: {}".format(loss_info))
    return loss_info 
Example 54
Source File: train.py    From TrafficFlowPrediction with MIT License 6 votes vote down vote up
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """

    model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    hist = model.fit(
        X_train, y_train,
        batch_size=config["batch"],
        epochs=config["epochs"],
        validation_split=0.05)

    model.save('model/' + name + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/' + name + ' loss.csv', encoding='utf-8', index=False)