Python Code Examples for train model

60 Python code examples are found related to "train model". These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Project: BERT   Author: yyht   File: tokenization.py    License: Apache License 2.0 6 votes vote down vote up
def train_model(self, train_config=None):
		'''
		https://github.com/google/sentencepiece/blob/master/python/sentencepiece_python_module_example.ipynb
		see from this tutorial for sentence piece training
		'''
		config = train_config if train_config else self.config
		param = ""
		param += "--input={} ".format(config["corpus"])
		param += "--model_prefix={} ".format(config["model_prefix"])
		param += "--vocab_size={} ".format(config["vocab_size"])
		param += "--model_type={} ".format(config.get("model_type", "unigram"))
		param += "--character_coverage={} ".format(config.get("character_coverage", 0.995))
		param += "--mining_sentence_size={} ".format(config.get("mining_sentence_size", 5000000))
		param += "--input_sentence_size={} ".format(config.get("input_sentence_size", 5000000))
		param += "--max_sentencepiece_length={} ".format(config.get("max_sentencepiece_length", 5))
		try:
			SentencePieceTrainer.Train(param)
			self.sp.Load(config["model_prefix"]+".model")
		except:
			raise ValueError(" training word piece model failed ") 
Example 2
Project: kaggle-HomeDepot   Author: ChenglongChen   File: embedding_trainer.py    License: MIT License 6 votes vote down vote up
def train_doc2vec_model(df, columns):
    model_param = {
        "alpha": config.EMBEDDING_ALPHA,
        "learning_rate_decay": config.EMBEDDING_LEARNING_RATE_DECAY,
        "n_epoch": config.EMBEDDING_N_EPOCH,
        "sg": 1, # not use
        "dm": 1,
        "hs": 1,
        "min_count": config.EMBEDDING_MIN_COUNT,
        "size": config.EMBEDDING_DIM,
        "sample": 0.001,
        "window": config.EMBEDDING_WINDOW,
        "workers": config.EMBEDDING_WORKERS,
    }
    model_dir = config.DOC2VEC_MODEL_DIR
    model_name = "Homedepot-doc2vec-D%d-min_count%d.model"%(
                    model_param["size"], model_param["min_count"])

    doc2vec = DataFrameDoc2Vec(df, columns, model_param)
    doc2vec.train()
    doc2vec.save(model_dir, model_name)


#---------------------- Main ---------------------- 
Example 3
Project: atap   Author: foxbook   File: mp_train.py    License: Apache License 2.0 6 votes vote down vote up
def train_model(path, model, saveto=None, cv=12):
    """
    Trains model from corpus at specified path; constructing cross-validation
    scores using the cv parameter, then fitting the model on the full data and
    writing it to disk at the saveto path if specified. Returns the scores.
    """
    # Load the corpus data and labels for classification
    corpus = PickledCorpusReader(path)
    X = documents(corpus)
    y = labels(corpus)

    # Compute cross validation scores
    scores = cross_val_score(model, X, y, cv=cv)

    # Fit the model on entire data set
    model.fit(X, y)

    # Write to disk if specified
    if saveto:
        joblib.dump(model, saveto)

    # Return scores as well as training time via decorator
    return scores 
Example 4
Project: ASR_WORD   Author: zw76859420   File: speech_model_01.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def train_model(self , datapath , epoch=4 , save_step=2000 , batch_size=8):
        data = DataSpeech(datapath , 'train')
        num_data = data.get_data_num()
        yielddatas = data.data_generator(batch_size , self.AUDIO_LENGTH)
        for epoch in range(epoch):
            print('[*running] train epoch %d .' % epoch)
            n_step = 0
            while True:
                try:
                    print('[*message] epoch %d , Having training data %d+' % (epoch , n_step * save_step))
                    self._model.fit_generator(yielddatas , save_step)
                    n_step += 1
                except StopIteration:
                    print('======================Error StopIteration==============================')
                    break
                self.save_model(comments='_e_' + str(epoch) + '_step_' + str(n_step * save_step))
                self.test_model(datapath=self.datapath , str_dataset='train' , data_count=4)
                self.test_model(datapath=self.datapath , str_dataset='dev' , data_count=16) 
Example 5
Project: CycleGAN-Keras   Author: simontomaskarlsson   File: model.py    License: GNU General Public License v3.0 6 votes vote down vote up
def trainSimpleModel(self):
        real_A = self.A_test[0]
        real_B = self.B_test[0]
        real_A = real_A[np.newaxis, :, :, :]
        real_B = real_B[np.newaxis, :, :, :]
        epochs = 200
        for epoch in range(epochs):
            print('Epoch {} started'.format(epoch))
            self.G_A2B.fit(x=self.A_train, y=self.B_train, epochs=1, batch_size=1)
            self.G_B2A.fit(x=self.B_train, y=self.A_train, epochs=1, batch_size=1)
            #loss = self.G_A2B.train_on_batch(x=real_A, y=real_B)
            #print('loss: ', loss)
            synthetic_image_A = self.G_B2A.predict(real_B, batch_size=1)
            synthetic_image_B = self.G_A2B.predict(real_A, batch_size=1)
            self.save_tmp_images(real_A, real_B, synthetic_image_A, synthetic_image_B)

        self.saveModel(self.G_A2B, 200)
        self.saveModel(self.G_B2A, 200)

#===============================================================================
# Training 
Example 6
Project: mcf-tracker   Author: nwojke   File: min_cost_flow_pymot.py    License: GNU General Public License v3.0 6 votes vote down vote up
def train_transition_cost_model(self, n_estimators=100):
        """Train transition cost model from given detections.

        Parameters
        ----------
        n_estimators : int
            Number of gradient boosting stages to perform. A larger number
            usually results in increased performance at higher computational
            cost.

        Returns
        -------
        min_cost_flow_tracker.TransitionCostModel
            Returns a transition cost model that has been trained on the
            given detections.

        """
        model = min_cost_flow_tracker.TransitionCostModel(n_estimators)
        model.train(self._positive_pairs, self._negative_pairs)
        return model 
Example 7
Project: AI-for-Finance   Author: PacktPublishing   File: train.py    License: MIT License 6 votes vote down vote up
def train_model(name, train_x, train_y, epochs, batches):
    """
    Get model if it exists, train if needed.
    """
    mparams=confs[name]
    model=mparams['model'](mparams['inputs'])
    # Loss is our loss or cost function - mean_squared_error
    # is a good choice assuming we don't have a lot of "outliers"
    # in our dataset.
    # Adam optimizer works great for most problems.
    #
    # Metrics are loss metrics that we want to have available for each epoch,
    # so we can review how are we doing at each training stage.
    # mse is mean_squared_error, mpe is mean_absolute_percentage_error
    model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse','mape'])
    # Here we're starting our training
    history=model.fit(train_x, train_y, verbose=2, epochs=epochs, batch_size=batches)
    return model, name, mparams, history 
Example 8
Project: rasa_lookup_demo   Author: RasaHQ   File: time_train_test.py    License: Apache License 2.0 6 votes vote down vote up
def train_model():
    # trains a model and times it
    t = time()
    # training_data = load_data('demo_train.md')
    training_data = load_data("data/company_train_lookup.json")
    td_load_time = time() - t
    trainer = Trainer(config.load("config.yaml"))
    t = time()
    trainer.train(training_data)
    train_time = time() - t
    clear_model_dir()
    t = time()
    model_directory = trainer.persist(
        "./tmp/models"
    )  # Returns the directory the model is stored in
    persist_time = time() - t
    return td_load_time, train_time, persist_time 
Example 9
Project: SGCN   Author: benedekrozemberczki   File: sgcn.py    License: GNU General Public License v3.0 6 votes vote down vote up
def create_and_train_model(self):
        """
        Model training and scoring.
        """
        print("\nTraining started.\n")
        self.model = SignedGraphConvolutionalNetwork(self.device, self.args, self.X).to(self.device)
        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=self.args.learning_rate,
                                          weight_decay=self.args.weight_decay)
        self.model.train()
        self.epochs = trange(self.args.epochs, desc="Loss")
        for epoch in self.epochs:
            start_time = time.time()
            self.optimizer.zero_grad()
            loss, _ = self.model(self.positive_edges, self.negative_edges, self.y)
            loss.backward()
            self.epochs.set_description("SGCN (Loss=%g)" % round(loss.item(), 4))
            self.optimizer.step()
            self.logs["training_time"].append([epoch+1, time.time()-start_time])
            if self.args.test_size > 0:
                self.score_model(epoch) 
Example 10
Project: dlib-minified-models   Author: Luca96   File: training_script.py    License: Apache License 2.0 6 votes vote down vote up
def train_model(name, xml):
  '''requires: the model name, and the path to the xml annotations.
  It trains and saves a new model according to the specified 
  training options and given annotations'''
  # get the training options
  options = dlib.shape_predictor_training_options()
  options.tree_depth = 3
  options.nu = 0.1
  options.cascade_depth = 10
  options.feature_pool_size = 150
  options.num_test_splits = 350 
  options.oversampling_amount = 5
  options.oversampling_translation_jitter = 0

  options.be_verbose = True  # tells what is happening during the training
  options.num_threads = 1    # number of the threads used to train the model
  
  # finally, train the model
  dlib.train_shape_predictor(xml, name, options) 
Example 11
Project: dpl   Author: ppengtang   File: train.py    License: MIT License 6 votes vote down vote up
def train_model(self, max_iters):
        """Network training loop."""
        last_snapshot_iter = -1
        timer = Timer()
        while self.solver.iter < max_iters:
            # Make one SGD update
            timer.tic()
            
            self.solver.step(1)
            
            timer.toc()
            if self.solver.iter % (10 * self.solver_param.display) == 0:
                print 'speed: {:.3f}s / iter'.format(timer.average_time)

            if self.solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
                last_snapshot_iter = self.solver.iter
                self.snapshot()

        if last_snapshot_iter != self.solver.iter:
            self.snapshot() 
Example 12
Project: safekit   Author: pnnl   File: svm.py    License: MIT License 6 votes vote down vote up
def train_model(model, batcher, res_file):
    """
    :param model: A sklearn anomaly detection model. Needs to have the decision_function() function.
    :param batcher: A Batcher object that delivers batches of training data.
    :param outfile: (file obj) Where to write results.
    """

    resultsfile = open(res_file, 'w')
    resultsfile.write('day user red loss\n')
    
    mat = batcher.next_batch()
    batch_num = 0
    while mat is not None:
        datadict = {'features': mat[:, 3:], 'red': mat[:, 2], 'user': mat[:, 1], 'day': mat[:, 0]}
        model.fit(datadict['features'])
        anomaly_scores = model.decision_function(datadict['features'])
        for day, user, red, score in zip(datadict['day'], datadict['user'], datadict['red'], anomaly_scores):
            resultsfile.write(str(day) + ' ' + str(user) + ' ' + str(red) + ' ' + str(score[0]) + '\n')
        batch_num += 1
        print('finished batch num: ' + str(batch_num))
        mat = batcher.next_batch() 
Example 13
Project: Vocalize-Sign-Language   Author: ardamavi   File: train.py    License: Apache License 2.0 6 votes vote down vote up
def train_model(model, X, X_test, Y, Y_test):
    checkpoints = []
    if not os.path.exists('Data/Checkpoints/'):
        os.makedirs('Data/Checkpoints/')

    checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
    checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=1, write_graph=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))

    '''
    # Creates live data:
    # For better yield. The duration of the training is extended.

    from keras.preprocessing.image import ImageDataGenerator
    generated_data = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2) # For include left hand data add: 'horizontal_flip = True'
    generated_data.fit(X)

    model.fit_generator(generated_data.flow(X, Y, batch_size=batch_size), steps_per_epoch=X.shape[0]/batch_size, epochs=epochs, validation_data=(X_test, Y_test), callbacks=checkpoints)
    '''
    
    model.fit(X, Y, batch_size=batch_size, epochs=epochs, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)

    return model 
Example 14
Project: NMT_GAN   Author: ZhenYangIACAS   File: nmt_generator.py    License: Apache License 2.0 6 votes vote down vote up
def build_train_model(self):
        reuse_var = self.reuse_var
        loss=tf.convert_to_tensor(0.)
        grads = []
        for i, gpu_device in enumerate(self.gpu_devices):
                if i > 0 :
                    reuse_var = True
                cost, grad = self.build_model(reuse_var=reuse_var, gpu_device=gpu_device)
                loss += cost
                grads.append(grad)
                
        loss = loss / self.gpu_num
        grads_and_vars = average_clip_gradient(grads, self.clip_c)
        optm = self.optimizer.apply_gradients(grads_and_vars)

        self.train_loss = loss
        self.train_grads_and_vars = grads_and_vars
        self.train_optm = optm 
Example 15
Project: Zeroshot-GAN   Author: LinkWoong   File: unet.py    License: MIT License 6 votes vote down vote up
def check_train_model(self,batch_labels,batch_images,epoch,save_path_prefix):

        fake_imgs, real_imgs = self.generate_fake_samples(batch_images, batch_labels)
        diff_imgs = fake_imgs - real_imgs
        minV=np.min(diff_imgs)
        diff_imgs=diff_imgs-minV
        maxV=np.max(diff_imgs)
        diff_imgs=diff_imgs/maxV

        current_time = time.strftime('%Y-%m-%d@%H:%M:%S', time.localtime())
        sample_img_path = os.path.join(save_path_prefix, "check_train_%02d_%04d.png" % (epoch, self.counter))
        #print("Time:%s,CheckTrain@%s" % (current_time,sample_img_path))


        merged_fake_images = merge(scale_back(fake_imgs), [self.batch_size, 1])
        merged_real_images = merge(scale_back(real_imgs), [self.batch_size, 1])
        merged_diff_images = merge(scale_back_magnification(diff_imgs), [self.batch_size, 1])
        merged_pair = np.concatenate([merged_real_images, merged_fake_images, merged_diff_images], axis=1)
        #plt.imshow(merged_pair)

        misc.imsave(sample_img_path, merged_pair)

        return merged_pair 
Example 16
Project: TrafficFlowPrediction   Author: xiaochus   File: train.py    License: MIT License 6 votes vote down vote up
def train_model(model, X_train, y_train, name, config):
    """train
    train a single model.

    # Arguments
        model: Model, NN model to train.
        X_train: ndarray(number, lags), Input data for train.
        y_train: ndarray(number, ), result data for train.
        name: String, name of model.
        config: Dict, parameter for train.
    """

    model.compile(loss="mse", optimizer="rmsprop", metrics=['mape'])
    # early = EarlyStopping(monitor='val_loss', patience=30, verbose=0, mode='auto')
    hist = model.fit(
        X_train, y_train,
        batch_size=config["batch"],
        epochs=config["epochs"],
        validation_split=0.05)

    model.save('model/' + name + '.h5')
    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model/' + name + ' loss.csv', encoding='utf-8', index=False) 
Example 17
Project: pytorch-unet-segmentation   Author: ugent-korea   File: modules.py    License: MIT License 6 votes vote down vote up
def train_model(model, data_train, criterion, optimizer):
    """Train the model and report validation error with training error
    Args:
        model: the model to be trained
        criterion: loss function
        data_train (DataLoader): training dataset
    """
    model.train()
    for batch, (images, masks) in enumerate(data_train):
        images = Variable(images.cuda())
        masks = Variable(masks.cuda())
        outputs = model(images)
        # print(masks.shape, outputs.shape)
        loss = criterion(outputs, masks)
        optimizer.zero_grad()
        loss.backward()
        # Update weights
        optimizer.step()
    # total_loss = get_loss_train(model, data_train, criterion) 
Example 18
Project: naacl18-multitask_argument_mining   Author: UKPLab   File: BiLSTM.py    License: Apache License 2.0 6 votes vote down vote up
def trainModel(self):
        if self.model == None:
            self.buildModel()        
            
        trainMatrix = self.dataset['trainMatrix'] 
        self.epoch += 1
        
        if self.params['optimizer'] in self.learning_rate_updates and self.epoch in self.learning_rate_updates[self.params['optimizer']]:
            K.set_value(self.model.optimizer.lr, self.learning_rate_updates[self.params['optimizer']][self.epoch])          
            logging.info("Update Learning Rate to %f" % (K.get_value(self.model.optimizer.lr)))
        
        iterator = self.online_iterate_dataset(trainMatrix, self.labelKey) if self.params['miniBatchSize'] == 1 else self.batch_iterate_dataset(trainMatrix, self.labelKey)
        
        for batch in iterator: 
            labels = batch[0]
            nnInput = batch[1:]                
            self.model.train_on_batch(nnInput, labels) 
Example 19
Project: deepwalk_keras_igraph   Author: napsternxg   File: skipgram_network.py    License: GNU General Public License v2.0 6 votes vote down vote up
def train_on_model(model, g, vocab_size, max_len = 10, epochs = 100, print_every=10, window_size=4, negative_sampling=1.0, sampling_table=None):
  losses, valid_sequences = 0.0, 0
  for epoch in xrange(epochs):
    sequences = pad_sequences([g.random_walk(k,max_len) for k in range(vocab_size)])
    X_couples = []
    y_labels = []
    for seq in sequences:
      couples, labels = skipgrams(seq, vocab_size, window_size=window_size, negative_samples=negative_sampling, sampling_table=sampling_table)
      X_couples.extend(couples)
      y_labels.extend(labels)
      if len(couples) == 0:
        continue
      valid_sequences += 1
    loss = train_batch(model, X_couples, y_labels)
    losses += loss
    if epoch % print_every == 0:
      logging.info("Mean loss in Epoch [%s] with %s valid sequences = %s" % (epoch, valid_sequences, losses / valid_sequences))
      losses, valid_sequences = 0.0, 0 
Example 20
Project: bootcamp   Author: milvus-io   File: cli.py    License: Apache License 2.0 6 votes vote down vote up
def train_model(working_dir, pre_training_phase):
    # PRE TRAINING

    # commit a5030dd7a1b53cd11d5ab7832fa2d43f2093a464
    # Merge: a11d13e b30e64e
    # Author: Philippe Remy <premy.enseirb@gmail.com>
    # Date:   Fri Apr 10 10:37:59 2020 +0900
    # LibriSpeech train-clean-data360 (600, 100). 0.985 on test set (enough for pre-training).

    # TRIPLET TRAINING
    # [...]
    # Epoch 175/1000
    # 2000/2000 [==============================] - 919s 459ms/step - loss: 0.0077 - val_loss: 0.0058
    # Epoch 176/1000
    # 2000/2000 [==============================] - 917s 458ms/step - loss: 0.0075 - val_loss: 0.0059
    # Epoch 177/1000
    # 2000/2000 [==============================] - 927s 464ms/step - loss: 0.0075 - val_loss: 0.0059
    # Epoch 178/1000
    # 2000/2000 [==============================] - 948s 474ms/step - loss: 0.0073 - val_loss: 0.0058
    start_training(working_dir, pre_training_phase) 
Example 21
Project: kaggle-dsb2018   Author: nicolefinnie   File: schwaebische_nuclei.py    License: Apache License 2.0 6 votes vote down vote up
def train_model(self, train_df, val_df, cluster_ix, save_model_path, load_model_path=None, model_type='grey'):
        self.logger.info('### Build X_train/Y_train %s: images ####', model_type)
        (X_train, Y_train) = self.build_model_training_inputs(train_df, cluster_ix)

        if self.KFOLD > 0:
            if os.path.isdir(load_model_path):
                self.logger.info('Retrain the %s models under %s and save the new models under %s', model_type, load_model_path, save_model_path)
                models, _ = train_model_kfold(X_train, Y_train, self.KFOLD, self.EPOCH, self.GPU, self.EARLYSTOPPING, load_model_path, save_model_path, min_dropout=self.DROPOUT)
            else:
                self.logger.info("Train the new %s models from scratch and save the new models under %s", model_type, save_model_path)
                models, _ = train_model_kfold(X_train, Y_train, self.KFOLD, self.EPOCH, self.GPU, self.EARLYSTOPPING, save_model_path=save_model_path, min_dropout=self.DROPOUT)

        else:
            self.logger.info('### Build X_val/Y_val %s: images ####', model_type)
            (X_val, Y_val) = self.build_model_training_inputs(val_df, cluster_ix)
            if os.path.isdir(load_model_path):
                self.logger.info('Retrain the single %s model under %s and save the new model under %s', model_type, load_model_path, save_model_path)
                model, _ = train_model(X_train, Y_train, X_val, Y_val, self.EPOCH, self.GPU, self.EARLYSTOPPING, load_model_path, save_model_path, min_dropout=self.DROPOUT)
            else:
                self.logger.info("Train the new %s model from scratch and save the new model under %s", model_type, save_model_path)
                model, _ = train_model(X_train, Y_train, X_val, Y_val, self.EPOCH, self.GPU, self.EARLYSTOPPING, save_model_path=save_model_path, min_dropout=self.DROPOUT)

            models = [model]
           
        return models 
Example 22
Project: seq2seq.pytorch   Author: eladhoffer   File: tokenizer.py    License: MIT License 6 votes vote down vote up
def train_sp_model(**kwargs):
        """possible arguments:
        --input: one-sentence-per-line raw corpus file. You can pass a comma-separated list of files.
        --model_prefix: output model name prefix. <model_name>.model and <model_name>.vocab are generated.
        --vocab_size: vocabulary size, e.g., 8000, 16000, or 32000
        --character_coverage: amount of characters covered by the model
        --model_type: model type. Choose from unigram (default), bpe, char, or word. The input sentence must be pretokenized when using word type.
        """
        kwargs.update({'unk_piece': UNK_TOKEN, 'bos_piece': BOS_TOKEN,
                       'eos_piece': EOS_TOKEN, 'pad_piece': PAD_TOKEN,
                       'unk_id': UNK, 'bos_id': BOS,
                       'eos_id': EOS, 'pad_id': PAD,
                       'unk_surface': UNK_TOKEN,
                       })
        for arg, val in kwargs.items():
            if isinstance(val, bool):
                kwargs[arg] = 'true' if val else 'false'
        config = ' '.join(['--{}={}'.format(name, value)
                           for name, value in kwargs.items() if value is not None])
        spm.SentencePieceTrainer.Train(config) 
Example 23
Project: tensorflow_RL   Author: RLOpensource   File: ddpg.py    License: MIT License 6 votes vote down vote up
def train_model(self):
        batch = random.sample(self.memory,self.batch_size)
        states = np.asarray([e[0] for e in batch])
        actions = np.asarray([e[1] for e in batch])
        rewards = np.asarray([e[2] for e in batch])
        next_states = np.asarray([e[3] for e in batch])
        dones = np.asarray([e[4] for e in batch])
        target_action_input = self.sess.run(self.target_actor.actor,feed_dict={self.target_actor.state:next_states})
        target_q_value = self.sess.run(self.target_critic.critic,feed_dict={self.target_critic.state:next_states,
                                                                            self.target_critic.action:target_action_input})
        targets = np.asarray([r + self.gamma * (1-d) * tv for r,tv,d in zip(rewards,target_q_value,dones)])
        self.sess.run(self.ctrain_op,feed_dict=
        {
            self.critic.state:states,
            self.critic.action:actions,
            self.target_value:np.squeeze(targets)
        })
        action_for_train = self.sess.run(self.actor.actor,feed_dict={self.actor.state:states})
        self.sess.run(self.atrain_op,feed_dict=
        {
            self.actor.state:states,
            self.critic.state:states,
            self.critic.action:action_for_train
        })
        self.sess.run(self.update_target_soft) 
Example 24
Project: CNNGestureRecognizer   Author: asingh33   File: gestureCNN.py    License: MIT License 6 votes vote down vote up
def trainModel(model):

    # Split X and y into training and testing sets
    X_train, X_test, Y_train, Y_test = initializers()

    # Now start the training of the loaded model
    hist = model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch,
                 verbose=1, validation_split=0.2)

    visualizeHis(hist)

    ans = input("Do you want to save the trained weights - y/n ?")
    if ans == 'y':
        filename = input("Enter file name - ")
        fname = path + str(filename) + ".hdf5"
        model.save_weights(fname,overwrite=True)
    else:
        model.save_weights("newWeight.hdf5",overwrite=True)

    # Save model as well
    # model.save("newModel.hdf5")
#%% 
Example 25
Project: nlp-journey   Author: msgi   File: svm_classifier.py    License: Apache License 2.0 6 votes vote down vote up
def train_model(self, test_size=0.2):
        """
        训练模型,简单地将生成的TF-IDF数据,chi提取后的特征,以及svm算法模型写入到了磁盘中
        :return: 返回训练好的模型
        """
        data_set = pd.read_csv(self.train_path,
                               sep='##',
                               encoding='utf-8',
                               header=None,
                               engine='python')
        data_set = data_set.dropna()
        chi_features, tf_idf_model, chi_model = self.__select_features(data_set)
        x_train, x_test, y_train, y_test = train_test_split(chi_features,
                                                            data_set[1],
                                                            test_size=test_size,
                                                            random_state=42)
        # 这里采用的是线性分类模型,如果采用rbf径向基模型,速度会非常慢.
        clf_model = svm.SVC(kernel='linear', verbose=True)
        print(clf_model)
        clf_model.fit(x_train, y_train)
        score = clf_model.score(x_test, y_test)
        print('测试准确率:', score)
        return tf_idf_model, chi_model, clf_model 
Example 26
Project: pytorch-project-template   Author: ryul99   File: train_model.py    License: Apache License 2.0 6 votes vote down vote up
def train_model(hp, model, train_loader, writer, logger):
    model.net.train()
    for input_, target in train_loader:
        model.feed_data(input=input_, GT=target)
        model.optimize_parameters()
        loss = model.log.loss_v
        model.step += 1

        if logger is not None and (loss > 1e8 or math.isnan(loss)):
            logger.error("Loss exploded to %.02f at step %d!" % (loss, model.step))
            raise Exception("Loss exploded")

        if model.step % hp.log.summary_interval == 0:
            if writer is not None:
                writer.logging_with_step(loss, model.step, "train_loss")
            if logger is not None:
                logger.info("Train Loss %.04f at step %d" % (loss, model.step)) 
Example 27
Project: DeepCRF   Author: yjernite   File: model_use.py    License: MIT License 6 votes vote down vote up
def train_model(train_data, dev_data, sequ_nn, config, params, graph):
    #~ train_data_32 = cut_and_pad(train_data, config)
    #~ dev_data_32 = cut_and_pad(dev_data, config)
    train_data_32 = cut_batches(train_data, config)
    dev_data_32 = cut_batches(dev_data, config)
    accuracies = []
    preds = {}
    for i in range(config.num_epochs):
        print i
        shuffle(train_data_32)
        sequ_nn.train_epoch(train_data_32, config, params)
        train_acc = sequ_nn.validate_accuracy(train_data_32, config)
        dev_acc = sequ_nn.validate_accuracy(dev_data_32, config)
        accuracies += [(train_acc, dev_acc)]
        if i % config.num_predict == config.num_predict - 1:
            preds[i+1] = tag_dataset(dev_data, config, params, graph)
    return (accuracies, preds) 
Example 28
Project: slac   Author: alexlee-gk   File: slac_agent.py    License: MIT License 6 votes vote down vote up
def train_model(self, experience, weights=None):
    if self._enable_functions and getattr(
        self, "_train_model_fn", None) is None:
      raise RuntimeError(
          "Cannot find _train_model_fn.  Did %s.__init__ call super?"
          % type(self).__name__)
    if not isinstance(experience, trajectory.Trajectory):
      raise ValueError(
          "experience must be type Trajectory, saw type: %s" % type(experience))

    if self._enable_functions:
      loss_info = self._train_model_fn(experience=experience, weights=weights)
    else:
      loss_info = self._train_model(experience=experience, weights=weights)

    if not isinstance(loss_info, tf_agent.LossInfo):
      raise TypeError(
          "loss_info is not a subclass of LossInfo: {}".format(loss_info))
    return loss_info 
Example 29
Project: OpenNRE-PyTorch   Author: ShulinCao   File: Config.py    License: MIT License 6 votes vote down vote up
def set_train_model(self, model):
		print("Initializing training model...")
		self.model = model
		self.trainModel = self.model(config = self)
		if self.pretrain_model != None:
			self.trainModel.load_state_dict(torch.load(self.pretrain_model))
		self.trainModel.cuda()
		if self.optimizer != None:
			pass
		elif self.opt_method == "Adagrad" or self.opt_method == "adagrad":
			self.optimizer = optim.Adagrad(self.trainModel.parameters(), lr = self.learning_rate, lr_decay = self.lr_decay, weight_decay = self.weight_decay)
		elif self.opt_method == "Adadelta" or self.opt_method == "adadelta":
			self.optimizer = optim.Adadelta(self.trainModel.parameters(), lr = self.learning_rate, weight_decay = self.weight_decay)
		elif self.opt_method == "Adam" or self.opt_method == "adam":
			self.optimizer = optim.Adam(self.trainModel.parameters(), lr = self.learning_rate, weight_decay = self.weight_decay)
		else:
			self.optimizer = optim.SGD(self.trainModel.parameters(), lr = self.learning_rate, weight_decay = self.weight_decay)
		print("Finish initializing") 
Example 30
Project: GoogLeNet-Inception   Author: conan7882   File: googlenet.py    License: MIT License 6 votes vote down vote up
def create_train_model(self):
        self.set_is_training(is_training=True)
        self._create_train_input()
        if self._sub_imagenet_mean:
            net_input = module.sub_rgb2bgr_mean(self.image)
        else:
            net_input = self.image

        with tf.variable_scope('conv_layers', reuse=tf.AUTO_REUSE):
            self.layers['conv_out'] = self._conv_layers(net_input)
        with tf.variable_scope('inception_layers', reuse=tf.AUTO_REUSE):
            self.layers['inception_out'] = self._inception_layers(self.layers['conv_out'])
        with tf.variable_scope('fc_layers', reuse=tf.AUTO_REUSE):   
            self.layers['logits'] = self._fc_layers(self.layers['inception_out'])

        with tf.variable_scope('auxiliary_classifier_0'):
            self.layers['auxiliary_logits_0'] = self._auxiliary_classifier(
                self.layers['inception_4a'])
        with tf.variable_scope('auxiliary_classifier_1'):
            self.layers['auxiliary_logits_1'] = self._auxiliary_classifier(
                self.layers['inception_4d']) 
Example 31
Project: Gun-Detector   Author: itsamitgoel   File: mobilenet_v1_train.py    License: Apache License 2.0 6 votes vote down vote up
def train_model():
  """Trains mobilenet_v1."""
  g, train_tensor = build_model()
  with g.as_default():
    slim.learning.train(
        train_tensor,
        FLAGS.checkpoint_dir,
        is_chief=(FLAGS.task == 0),
        master=FLAGS.master,
        log_every_n_steps=FLAGS.log_every_n_steps,
        graph=g,
        number_of_steps=FLAGS.number_of_steps,
        save_summaries_secs=FLAGS.save_summaries_secs,
        save_interval_secs=FLAGS.save_interval_secs,
        init_fn=get_checkpoint_init_fn(),
        global_step=tf.train.get_global_step()) 
Example 32
Project: Machine-Learning-Study-Notes   Author: yhswjtuILMARE   File: RNNUnit.py    License: Apache License 2.0 6 votes vote down vote up
def train_model():
    obj = corpus(path, 50, 50, 3000)
    with shelve.open("{0}corpus".format(save_path)) as fp:
        fp["obj"] = obj
    model = lstm_model(hidden_size=128, num_layer=2,
                     corpus=obj, keep_prob=1.0,
                     embedding_size=128, max_step=5000,
                     lr=0.005, save_path=save_path)
    result = []
    fig = plt.figure("cross-entropy")
    mpl.rcParams['xtick.labelsize'] = 8
    mpl.rcParams['ytick.labelsize'] = 8
    ax = fig.add_subplot(111)
    # ax.grid(True)
    for return_mat in model.train():
        result.extend(return_mat)
        # x = np.arange((len(return_mat)))
        # y = np.array(return_mat)
        # ax.plot(x, y, linewidth=0.8, color="b")
        # plt.pause(0.1)
    x = np.arange((len(return_mat)))
    y = np.array(return_mat)
    ax.plot(x, y, linewidth=0.8, color="b")
    plt.show() 
Example 33
Project: ludwig   Author: uber   File: comet.py    License: Apache License 2.0 6 votes vote down vote up
def train_model(self, *args, **kwargs):
        logger.info("comet.train_model() called......")
        if self.cometml_experiment:
            model = args[0]
            model_definition = args[1]
            model_definition_path = args[2]
            if model:
                self.cometml_experiment.set_model_graph(
                    str(model._graph.as_graph_def()))
            if model_definition:
                if model_definition_path:
                    base_name = os.path.basename(model_definition_path)
                else:
                    base_name = "model_definition.yaml"
                if "." in base_name:
                    base_name = base_name.rsplit(".", 1)[0] + ".json"
                else:
                    base_name = base_name + ".json"
                self.cometml_experiment.log_asset_data(model_definition,
                                                       base_name) 
Example 34
Project: Jtyoui   Author: jtyoui   File: NER.py    License: MIT License 6 votes vote down vote up
def train_model():
    if cxl_model:
        embedding_matrix = load_embedding()
    else:
        embedding_matrix = {}
    train, label = vocab_train_label(train_path, vocab=vocab, tags=tag, max_chunk_length=length)
    n = np.array(label, dtype=np.float)
    labels = n.reshape((n.shape[0], n.shape[1], 1))
    model = Sequential([
        Embedding(input_dim=len(vocab), output_dim=300, mask_zero=True, input_length=length, weights=[embedding_matrix],
                  trainable=False),
        SpatialDropout1D(0.2),
        Bidirectional(layer=LSTM(units=150, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)),
        TimeDistributed(Dense(len(tag), activation=relu)),
    ])
    crf_ = CRF(units=len(tag), sparse_target=True)
    model.add(crf_)
    model.compile(optimizer=Adam(), loss=crf_.loss_function, metrics=[crf_.accuracy])
    model.fit(x=np.array(train), y=labels, batch_size=16, epochs=4, callbacks=[RemoteMonitor()])
    model.save(model_path) 
Example 35
Project: R-net   Author: matthew-z   File: main.py    License: MIT License 6 votes vote down vote up
def train_model_from_args(args: argparse.Namespace):
    """
    Just converts from an ``argparse.Namespace`` object to string paths.
    """

    start_time = datetime.datetime.now().strftime('%b-%d_%H-%M')

    if args.serialization_dir:
        serialization_dir = args.serialization_dir
    else:
        path = Path(args.param_path.replace("configs/", "results/")).resolve()
        serialization_dir = path.with_name(path.stem) / start_time


    train_model_from_file(args.param_path,
                          serialization_dir,
                          args.overrides,
                          args.file_friendly_logging,
                          args.recover,
                          args.force,
                          args.ext_vars) 
Example 36
Project: deep-learning-note   Author: wdxtub   File: 1_linear_basic.py    License: MIT License 6 votes vote down vote up
def trainModel(trainData, features, labels):
    """
    利用训练数据,估计模型参数

    参数
    ----
    trainData : DataFrame,训练数据集,包含特征和标签

    features : 特征名列表

    labels : 标签名列表

    返回
    ----
    model : LinearRegression, 训练好的线性模型
    """
    # 创建一个线性回归模型
    model = linear_model.LinearRegression()
    # 训练模型,估计模型参数
    model.fit(trainData[features], trainData[labels])
    return model 
Example 37
Project: caption_generator   Author: anuragmishracse   File: train_model.py    License: MIT License 6 votes vote down vote up
def train_model(weight = None, batch_size=32, epochs = 10):

    cg = caption_generator.CaptionGenerator()
    model = cg.create_model()

    if weight != None:
        model.load_weights(weight)

    counter = 0
    file_name = 'weights-improvement-{epoch:02d}.hdf5'
    checkpoint = ModelCheckpoint(file_name, monitor='loss', verbose=1, save_best_only=True, mode='min')
    callbacks_list = [checkpoint]
    model.fit_generator(cg.data_generator(batch_size=batch_size), steps_per_epoch=cg.total_samples/batch_size, epochs=epochs, verbose=2, callbacks=callbacks_list)
    try:
        model.save('Models/WholeModel.h5', overwrite=True)
        model.save_weights('Models/Weights.h5',overwrite=True)
    except:
        print "Error in saving model."
    print "Training complete...\n" 
Example 38
Project: AI_Sudoku   Author: neeru1207   File: CNN.py    License: Creative Commons Zero v1.0 Universal 6 votes vote down vote up
def train_and_evaluate_model(self):
        if not self.modelbuilt:
            raise Exception("Build and train the model first!")
        if self.modeltrained:
            return
        # MNIST object
        mnist = tf.keras.datasets.mnist
        # Loading the Train/Test data
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
        # Reshape to form a 3D Vector
        x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
        # Normalize the train/test dataset
        x_train, x_test = x_train / 255.0, x_test / 255.0
        # Train the model
        self.model.fit(x=x_train, y=y_train, epochs=5)
        # Evaluate the model
        test_loss, test_acc = self.model.evaluate(x=x_test, y=y_test)
        # Print out the model accuracy
        print('\nTest accuracy:', test_acc)
        self.modeltrained = True 
Example 39
Project: doc2vec   Author: ibrahimsharaf   File: classifier_model.py    License: MIT License 6 votes vote down vote up
def train_model(self, d2v, training_vectors, training_labels):
        logging.info("Classifier training")
        train_vectors = doc2VecModel.get_vectors(
            d2v, len(training_vectors), 300, 'Train')
        self.model.fit(train_vectors, np.array(training_labels))
        training_predictions = self.model.predict(train_vectors)
        logging.info(
            'Training predicted classes: {}'.format(np.unique(
                training_predictions)))
        logging.info(
            'Training accuracy: {}'.format(
                accuracy_score(training_labels, training_predictions)))
        logging.info(
            'Training F1 score: {}'.format(
                f1_score(
                    training_labels, training_predictions,
                    average='weighted'))) 
Example 40
Project: SentimentAnalysis   Author: ljw9609   File: svm.py    License: MIT License 6 votes vote down vote up
def train_model(self, data):
        print("------ SVM Classifier is training ------")
        for d in data:
            label = d[0]
            doc = d[1]
            self.train_data.append(doc)
            self.train_label.append(label)

        self.train_data = np.array(self.train_data)
        self.train_label = np.array(self.train_label)

        train_vectors = self.words2vector(self.train_data)

        self.clf.fit(train_vectors, self.train_label)

        print("------ SVM Classifier training over ------") 
Example 41
Project: tcav   Author: tensorflow   File: kdd99_model.py    License: Apache License 2.0 6 votes vote down vote up
def train_and_save_model(model_path, labels_path):
  """ Trains simple feedforward model for the KDD99 dataset"""
  # Prepare dataset and split it
  data, labels = prepare_dataset(labels_path)
  train_data, test_data, train_labels, test_labels = train_test_split(
      data, labels, test_size=0.2)

  # Create categorical map for the embedding layer
  categorical_map = create_categorical_map(data)
  model = make_keras_model(categorical_map)

  print(model.summary())
  model.fit(
      train_data,
      train_labels,
      validation_data=(test_data, test_labels),
      epochs=4,
      batch_size=64)
  model.save(model_path)

  # Test on a small subset of predictions
  predictions = model.predict(test_data[:10])
  print(predictions) 
Example 42
Project: Scale-Adaptive-Network   Author: speedinghzl   File: train-details.py    License: MIT License 6 votes vote down vote up
def train_model(self):
        """Network training loop."""
        #self.solver.solve()
        while True:
            self.solver.step(1)
            blobs = self.solver.net.blobs 
            print 'max ', np.max(blobs['fc2_part'].data[:]), np.max(blobs['fc2_part'].data[:])
            print 'min ', np.min(blobs['fc2_part'].data[:]), np.min(blobs['fc2_part'].data[:])

            print 'mean ', np.mean(blobs['fc2_part_fc2_part_0_split_0'].diff[:]), np.mean(blobs['fc2_part_fc2_part_0_split_1'].diff[:]), np.mean(blobs['res5c'].diff[:])
            print 'scale ', np.linalg.norm(blobs['fc2_part_fc2_part_0_split_0'].diff[:]), np.linalg.norm(blobs['fc2_part_fc2_part_0_split_1'].diff[:]), np.linalg.norm(blobs['res5c'].diff[:])
            print 'max ', np.max(blobs['fc2_part_fc2_part_0_split_0'].diff[:]), np.max(blobs['fc2_part_fc2_part_0_split_1'].diff[:]), np.max(blobs['res5c'].diff[:])
            print 'min ', np.min(blobs['fc2_part_fc2_part_0_split_0'].diff[:]), np.min(blobs['fc2_part_fc2_part_0_split_1'].diff[:]), np.min(blobs['res5c'].diff[:])
            print '-------------'
            # print 'mean ', np.mean(blobs['gate_3s'].data[:]), np.mean(blobs['gate_2s'].data[:]), np.mean(blobs['gate_1s'].data[:])
            # print 'scale ', np.linalg.norm(blobs['gate_3s'].data[:]), np.linalg.norm(blobs['gate_2s'].data[:]), np.linalg.norm(blobs['gate_1s'].data[:])
            # print 'max ', np.max(blobs['gate_3s'].data[:]), np.max(blobs['gate_2s'].data[:]), np.max(blobs['gate_1s'].data[:])
            # print 'min ', np.min(blobs['gate_3s'].data[:]), np.min(blobs['gate_2s'].data[:]), np.min(blobs['gate_1s'].data[:])
            print '*************' *10 
Example 43
Project: face-py-faster-rcnn   Author: playerkk   File: train.py    License: MIT License 6 votes vote down vote up
def train_model(self, max_iters):
        """Network training loop."""
        last_snapshot_iter = -1
        timer = Timer()
        model_paths = []
        while self.solver.iter < max_iters:
            # Make one SGD update
            timer.tic()
            self.solver.step(1)
            timer.toc()
            if self.solver.iter % (10 * self.solver_param.display) == 0:
                print 'speed: {:.3f}s / iter'.format(timer.average_time)

            if self.solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
                last_snapshot_iter = self.solver.iter
                model_paths.append(self.snapshot())

        if last_snapshot_iter != self.solver.iter:
            model_paths.append(self.snapshot())
        return model_paths 
Example 44
Project: multilabel-image-classification-tensorflow   Author: isobar-us   File: mobilenet_v1_train.py    License: MIT License 6 votes vote down vote up
def train_model():
  """Trains mobilenet_v1."""
  g, train_tensor = build_model()
  with g.as_default():
    slim.learning.train(
        train_tensor,
        FLAGS.checkpoint_dir,
        is_chief=(FLAGS.task == 0),
        master=FLAGS.master,
        log_every_n_steps=FLAGS.log_every_n_steps,
        graph=g,
        number_of_steps=FLAGS.number_of_steps,
        save_summaries_secs=FLAGS.save_summaries_secs,
        save_interval_secs=FLAGS.save_interval_secs,
        init_fn=get_checkpoint_init_fn(),
        global_step=tf.train.get_global_step()) 
Example 45
Project: metal   Author: HazyResearch   File: end_model.py    License: Apache License 2.0 6 votes vote down vote up
def train_model(self, train_data, valid_data=None, log_writer=None, **kwargs):
        self.config = recursive_merge_dicts(self.config, kwargs)

        # If train_data is provided as a tuple (X, Y), we can make sure Y is in
        # the correct format
        # NOTE: Better handling for if train_data is Dataset or DataLoader...?
        if isinstance(train_data, (tuple, list)):
            X, Y = train_data
            Y = self._preprocess_Y(self._to_torch(Y, dtype=torch.FloatTensor), self.k)
            train_data = (X, Y)

        # Convert input data to data loaders
        train_loader = self._create_data_loader(train_data, shuffle=True)

        # Create loss function
        loss_fn = self._get_loss_fn()

        # Execute training procedure
        self._train_model(
            train_loader, loss_fn, valid_data=valid_data, log_writer=log_writer
        ) 
Example 46
Project: 2019-OSS-Summer-RL   Author: utilForever   File: cartpole_a2c.py    License: MIT License 6 votes vote down vote up
def train_model(self, state, action, reward, next_state, done):
        value = self.critic.predict(state)[0]
        next_value = self.critic.predict(next_state)[0]

        act = np.zeros([1, self.action_size])
        act[0][action] = 1

        # 벨만 기대 방정식를 이용한 어드벤티지와 업데이트 타깃
        if done:
            advantage = reward - value
            target = [reward]
        else:
            advantage = (reward + self.discount_factor * next_value) - value
            target = reward + self.discount_factor * next_value

        self.actor_updater([state, act, advantage])
        self.critic_updater([state, target]) 
Example 47
Project: ADLxMLDS2017   Author: thtang   File: cartpole_a2c.py    License: Apache License 2.0 6 votes vote down vote up
def train_model(self, state, action, reward, next_state, done):
        target = np.zeros((1, self.value_size))
        advantages = np.zeros((1, self.action_size))

        value = self.critic.predict(state)[0]
        next_value = self.critic.predict(next_state)[0]

        if done:
            advantages[0][action] = reward - value
            target[0][0] = reward
        else:
            advantages[0][action] = reward + self.discount_factor * (next_value) - value
            target[0][0] = reward + self.discount_factor * next_value

        self.actor.fit(state, advantages, epochs=1, verbose=0)
        self.critic.fit(state, target, epochs=1, verbose=0) 
Example 48
Project: Dog-Cat-Classifier   Author: ardamavi   File: train.py    License: Apache License 2.0 6 votes vote down vote up
def train_model(model, X, X_test, Y, Y_test):
    checkpoints = []
    if not os.path.exists('Data/Checkpoints/'):
        os.makedirs('Data/Checkpoints/')
    checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1))
    checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None))

    # Creates live data:
    # For better yield. The duration of the training is extended.

    # If you don't want, use this:
    # model.fit(X, Y, batch_size=10, epochs=25, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints)

    from keras.preprocessing.image import ImageDataGenerator
    generated_data = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0,  width_shift_range=0.1, height_shift_range=0.1, horizontal_flip = True, vertical_flip = False)
    generated_data.fit(X)
    import numpy
    model.fit_generator(generated_data.flow(X, Y, batch_size=8), steps_per_epoch=X.shape[0]//8, epochs=25, validation_data=(X_test, Y_test), callbacks=checkpoints)

    return model 
Example 49
def train_document_field_detector_model(cls,
                                            log: ProcessLogger,
                                            field: DocumentField,
                                            train_data_project_ids: Optional[List],
                                            use_only_confirmed_field_values: bool = False) -> Optional[ClassifierModel]:
        log.info(f'Training model for field {field.code} (#{field.pk})...')

        if train_data_project_ids and not use_only_confirmed_field_values:
            train_data_sets = cls.get_train_datasets_from_projects(field.pk, train_data_project_ids)
        else:
            train_data_sets = cls.get_train_data_sets(field, train_data_project_ids)

        if not train_data_sets:
            log.info('Not enough data to train model for document_type #{0} and field #{1}.'
                     .format(field.document_type.pk, field.pk))
            return None

        classifier_model = cls.train_model(field, train_data_sets)
        log.info(
            'Finished training model for document_type #{0} and field #{1}.'.format(field.document_type.pk, field.pk))

        return classifier_model 
Example 50
Project: MAX-Image-Segmenter   Author: IBM   File: mobilenet_v1_train.py    License: Apache License 2.0 6 votes vote down vote up
def train_model():
  """Trains mobilenet_v1."""
  g, train_tensor = build_model()
  with g.as_default():
    slim.learning.train(
        train_tensor,
        FLAGS.checkpoint_dir,
        is_chief=(FLAGS.task == 0),
        master=FLAGS.master,
        log_every_n_steps=FLAGS.log_every_n_steps,
        graph=g,
        number_of_steps=FLAGS.number_of_steps,
        save_summaries_secs=FLAGS.save_summaries_secs,
        save_interval_secs=FLAGS.save_interval_secs,
        init_fn=get_checkpoint_init_fn(),
        global_step=tf.train.get_global_step()) 
Example 51
Project: sia-cog   Author: tech-quantum   File: pipelinecomponents.py    License: MIT License 6 votes vote down vote up
def model_train(X, Y, pipeline, X_test=None, Y_test=None, more = False):
    try:
        result = None
        if model_type == "mlp":
            deepmodel = projectmgr.GetDeepModel(name, "ml", pipeline['options']['model_name'])
            if deepmodel is None:
                raise Exception(pipeline['options']['model_name'] + ": Model not found!")

            modeljson = json.loads(deepmodel.modeldata)
            modelObj = mxnetfactory.createModel(modeljson)
            #modelObj.compile(loss=pipeline['options']['loss'], optimizer=pipeline['options']['optimizer'],
            #              metrics=pipeline['options']['scoring'])
            epoches = pipeline["options"]["epoches"]
            batch_size = pipeline["options"]["batch_size"]
            mxnetfactory.init(mxnetfactory, name, jobid)
            result = mxnetfactory.Train(modelObj, X, Y, projectfolder, pipeline["options"], epoches, batch_size, X_test=None, Y_test=None, more=more)
            projectmgr.UpdateExecuteResult(jobid, json.dumps(result))
            picklefile = projectfolder + "/model.json"
            model_json = modelObj.to_json()
            with open(picklefile, "w") as json_file:
                json_file.write(model_json)

        return result
    except Exception as e:
        raise Exception("model_train: " + str(e)) 
Example 52
Project: tensor2robot   Author: google-research   File: vrgripper_env_meta_models.py    License: Apache License 2.0 6 votes vote down vote up
def model_train_fn(
      self,
      features,
      labels,
      inference_outputs,
      mode,
      config = None,
      params = None
  ):
    """Returns weighted sum of losses and individual losses. See base class."""
    bc_loss = self._action_decoder.loss(labels)
    bc_loss = tf.identity(bc_loss, name='bc_loss')
    embed_loss = tec.compute_embedding_contrastive_loss(
        inference_outputs['inference_embedding'],
        inference_outputs['condition_embedding'])
    end_loss = self._compute_end_loss(inference_outputs, labels)
    train_outputs = {'bc_loss': bc_loss, 'embed_loss': embed_loss,
                     'end_loss': end_loss}
    return (bc_loss + self._embed_loss_weight * embed_loss +
            self._predict_end_weight * end_loss, train_outputs)  # pytype: disable=bad-return-type 
Example 53
Project: models   Author: tensorflow   File: mobilenet_v1_train.py    License: Apache License 2.0 6 votes vote down vote up
def train_model():
  """Trains mobilenet_v1."""
  g, train_tensor = build_model()
  with g.as_default():
    slim.learning.train(
        train_tensor,
        FLAGS.checkpoint_dir,
        is_chief=(FLAGS.task == 0),
        master=FLAGS.master,
        log_every_n_steps=FLAGS.log_every_n_steps,
        graph=g,
        number_of_steps=FLAGS.number_of_steps,
        save_summaries_secs=FLAGS.save_summaries_secs,
        save_interval_secs=FLAGS.save_interval_secs,
        init_fn=get_checkpoint_init_fn(),
        global_step=tf.train.get_global_step()) 
Example 54
Project: SMIT   Author: BCV-Uniandes   File: train.py    License: MIT License 6 votes vote down vote up
def train_model(self, generator=False, discriminator=False):
        if torch.cuda.device_count() > 1 and hvd.size() == 1:
            G = self.G.module
        else:
            G = self.G
        for p in G.generator.parameters():
            try:
                p.requires_grad_(generator)
            except AttributeError:
                p.requires_grad = generator
        for p in self.D.parameters():
            try:
                p.requires_grad_(discriminator)
            except AttributeError:
                p.requires_grad = discriminator

    # ============================================================#
    # ============================================================# 
Example 55
Project: edafa   Author: andrewekhalel   File: mobilenet_v1_train.py    License: MIT License 6 votes vote down vote up
def train_model():
  """Trains mobilenet_v1."""
  g, train_tensor = build_model()
  with g.as_default():
    slim.learning.train(
        train_tensor,
        FLAGS.checkpoint_dir,
        is_chief=(FLAGS.task == 0),
        master=FLAGS.master,
        log_every_n_steps=FLAGS.log_every_n_steps,
        graph=g,
        number_of_steps=FLAGS.number_of_steps,
        save_summaries_secs=FLAGS.save_summaries_secs,
        save_interval_secs=FLAGS.save_interval_secs,
        init_fn=get_checkpoint_init_fn(),
        global_step=tf.train.get_global_step()) 
Example 56
Project: cookiecutter-easydata   Author: hackalog   File: train.py    License: MIT License 6 votes vote down vote up
def train_model(algorithm_params=None,
                run_number=0, *, dataset_name, algorithm_name, hash_type,
                **kwargs):
    """Train a model using the specified algorithm using the given dataset.

    """
    metadata = {}
    ds = Dataset.load(dataset_name)
    metadata['data_hash'] = joblib.hash(ds.data, hash_name=hash_type)
    metadata['target_hash'] = joblib.hash(ds.target, hash_name=hash_type)
    model = available_algorithms(keys_only=False)[algorithm_name]
    model.set_params(**algorithm_params)
    start_time = time.time()
    model.fit(ds.data, y=ds.target)
    end_time = record_time_interval('train_model', start_time)
    metadata['start_time'] = start_time
    metadata['duration'] = end_time - start_time
    return model, metadata 
Example 57
Project: helen   Author: kishwarshafin   File: TrainInterface.py    License: MIT License 6 votes vote down vote up
def train_model(self):
        # train a model
        train(self.train_file,
              self.test_file,
              self.batch_size,
              self.epochs,
              self.gpu_mode,
              self.num_workers,
              self.retrain_model,
              self.retrain_model_path,
              self.gru_layers,
              self.hidden_size,
              self.learning_rate,
              self.weight_decay,
              self.model_dir,
              self.stats_dir,
              not_hyperband=True) 
Example 58
Project: pathnet-pytorch   Author: kimhc6028   File: pathnet.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def train_model(self, train_loader, path, num_batch):
        self.train()
        fitness = 0
        train_len = 0
        for batch_idx, (data, target) in enumerate(train_loader):
            if self.args.cuda:
                data, target = data.cuda(), target.cuda()
            data, target = Variable(data), Variable(target)
            self.optimizer.zero_grad()
            output = self(data, path, -1)
            pred = output.data.max(1)[1] # get the index of the max log-probability
            fitness += pred.eq(target.data).cpu().sum()
            train_len += len(target.data)
            loss = F.cross_entropy(output, target)
            loss.backward()
            self.optimizer.step()
            if not batch_idx < num_batch -1:
                break
        fitness = fitness / train_len
        return fitness 
Example 59
Project: WPAL-network   Author: kyu-sz   File: train.py    License: GNU General Public License v3.0 6 votes vote down vote up
def train_model(self, max_iters):
        """Network training loop."""
        last_snapshot_iter = -1
        timer = Timer()
        model_paths = []
        while self._solver.iter < max_iters:
            # Make one SGD update
            timer.tic()
            self._solver.step(1)
            timer.toc()
            if self._solver.iter % (10 * self._solver_param.display) == 0:
                print 'speed: {:.3f}s / iter'.format(timer.average_time)
            if self._solver.iter % 10 == 0:
                print "Python: iter", self._solver.iter
            if self._solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
                last_snapshot_iter = self._solver.iter
                model_paths.append(self.snapshot())

            if self._solver.iter % cfg.TRAIN.TEST_ITERS == 0:
                test_net(self._solver.test_net, self._db, self._output_dir)

        if last_snapshot_iter != self._solver.iter:
            model_paths.append(self.snapshot())
        return model_paths 
Example 60
Project: tacotron2-mandarin-griffin-lim   Author: Joee1995   File: train.py    License: MIT License 6 votes vote down vote up
def model_train_mode(args, feeder, hparams, global_step):
	with tf.variable_scope('model', reuse=tf.AUTO_REUSE) as scope:
		model_name = None
		if args.model == 'Tacotron-2':
			model_name = 'Tacotron'
		model = create_model(model_name or args.model, hparams)
		if hparams.predict_linear:
			model.initialize(feeder.inputs, feeder.input_lengths, feeder.mel_targets, feeder.token_targets, linear_targets=feeder.linear_targets,
				targets_lengths=feeder.targets_lengths, global_step=global_step,
				is_training=True)
		else:
			model.initialize(feeder.inputs, feeder.input_lengths, feeder.mel_targets, feeder.token_targets,
				targets_lengths=feeder.targets_lengths, global_step=global_step,
				is_training=True)
		model.add_loss()
		model.add_optimizer(global_step)
		stats = add_train_stats(model, hparams)
		return model, stats