Python models.py() Examples

The following are 6 code examples of models.py(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module models , or try the search function .
Example #1
Source File: generate.py    From GroundedTranslation with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, args):
        self.args = args
        self.vocab = dict()
        self.unkdict = dict()
        self.counter = 0
        self.maxSeqLen = 0

        # consistent with models.py
        self.use_sourcelang = args.source_vectors is not None
        self.use_image = not args.no_image
        self.model = None
        self.prepare_datagenerator()

        # this results in two file handlers for dataset (here and
        # data_generator)
        if not self.args.dataset:
            logger.warn("No dataset given, using flickr8k")
            self.dataset = h5py.File("flickr8k/dataset.h5", "r")
        else:
            self.dataset = h5py.File("%s/dataset.h5" % self.args.dataset, "r")

        if self.args.debug:
            theano.config.optimizer = 'None'
            theano.config.exception_verbosity = 'high' 
Example #2
Source File: initial_state_features.py    From GroundedTranslation with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, args):
        self.args = args
        self.vocab = dict()
        self.unkdict = dict()
        self.counter = 0
        self.maxSeqLen = 0

        # consistent with models.py
        # maybe use_sourcelang isn't applicable here?
        self.use_sourcelang = args.source_vectors is not None
        self.use_image = not args.no_image

        if self.args.debug:
            theano.config.optimizer = 'None'
            theano.config.exception_verbosity = 'high' 
Example #3
Source File: extract_hidden_features.py    From GroundedTranslation with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, args):
        self.args = args
        self.args.generate_from_N_words = 0  # Default 0
        self.vocab = dict()
        self.unkdict = dict()
        self.counter = 0
        self.maxSeqLen = 0
        self.MAX_HT = self.args.generation_timesteps - 1

        # consistent with models.py
        # maybe use_sourcelang isn't applicable here?
        self.use_sourcelang = args.source_vectors is not None
        self.use_image = not args.no_image

        if self.args.debug:
            theano.config.optimizer = 'None'
            theano.config.exception_verbosity = 'high'

        self.source_type = "predicted" if self.args.use_predicted_tokens else "gold"
        self.source_encoder = "mt_enc" if self.args.no_image else "vis_enc"
        self.source_dim = self.args.hidden_size

        self.h5_dataset_str = "%s-hidden_feats-%s-%d" % (self.source_type,
                                                         self.source_encoder,
                                                         self.source_dim)
        logger.info("Serialising into %s" % self.h5_dataset_str) 
Example #4
Source File: train_dnn.py    From x-vector-kaldi-tf with Apache License 2.0 5 votes vote down vote up
def eval_trained_dnn(main_dir, _iter, egs_dir, run_opts):
    input_model_dir = "{dir}/model_{iter}".format(dir=main_dir, iter=_iter)

    # we assume that there are just one tar file for validation
    tar_file = ("{0}/valid_egs.1.tar".format(egs_dir))

    _command = '{command} "{main_dir}/log/compute_prob_valid.{iter}.log" ' \
               'local/tf/eval_dnn.py ' \
               '--tar-file="{tar_file}" --use-gpu=no ' \
               '--log-file="{main_dir}/log/compute_prob_valid.{iter}.log" ' \
               '--input-dir="{input_model_dir}"'.format(command=run_opts.command,
                                                        main_dir=main_dir,
                                                        iter=_iter,
                                                        tar_file=tar_file,
                                                        input_model_dir=input_model_dir)

    utils.background_command(_command)

    # we assume that there are just one tar file for train diagnostics
    tar_file = ("{0}/train_subset_egs.1.tar".format(egs_dir))

    _command = '{command} "{main_dir}/log/compute_prob_train_subset.{iter}.log" ' \
               'local/tf/eval_dnn.py ' \
               '--tar-file="{tar_file}" --use-gpu=no ' \
               '--log-file="{main_dir}/log/compute_prob_train_subset.{iter}.log" ' \
               '--input-dir="{input_model_dir}"'.format(command=run_opts.command,
                                                        main_dir=main_dir,
                                                        iter=_iter,
                                                        tar_file=tar_file,
                                                        input_model_dir=input_model_dir)

    utils.background_command(_command) 
Example #5
Source File: api.py    From EnergyPATHWAYS with MIT License 5 votes vote down vote up
def delete(self, scenario_id=None):
        if scenario_id is None:
            return {'message': "Requests to delete a scenario must specify the id in the URI."}, 400

        scenario = fetch_owned_scenario(scenario_id)
        # We don't allow built-in scenarios to be deleted via the API (even by an admin) because it may be unsafe.
        # See comment on demand_case and supply_case relationships for Scenario in models.py for discussion.
        if scenario.is_built_in():
            return {'message': "Built-in scenarios cannot be deleted via this API."}, 400

        models.db.session.delete(scenario)
        models.db.session.commit()

        return {'message': 'Deleted'}, 200 
Example #6
Source File: main.py    From transferlearning with MIT License 4 votes vote down vote up
def finetune(model, dataloaders, optimizer, criterion, best_model_path, use_lr_schedule=False):
    N_EPOCH = args.epoch
    best_model_wts = copy.deepcopy(model.state_dict())
    since = time.time()
    best_acc = 0.0
    acc_hist = []

    for epoch in range(1, N_EPOCH + 1):
        if use_lr_schedule:
            lr_schedule(optimizer, epoch)
        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()
            else:
                model.eval()
            total_loss, correct = 0, 0
            for inputs, labels in dataloaders[phase]:
                inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
                optimizer.zero_grad()
                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    loss = criterion(outputs, labels)
                preds = torch.max(outputs, 1)[1]
                if phase == 'train':
                    loss.backward()
                    optimizer.step()
                total_loss += loss.item() * inputs.size(0)
                correct += torch.sum(preds == labels.data)
            epoch_loss = total_loss / len(dataloaders[phase].dataset)
            epoch_acc = correct.double() / len(dataloaders[phase].dataset)
            acc_hist.append([epoch_loss, epoch_acc])
            print('Epoch: [{:02d}/{:02d}]---{}, loss: {:.6f}, acc: {:.4f}'.format(epoch, N_EPOCH, phase, epoch_loss,
                                                                                  epoch_acc))
            if phase == 'val' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())
                torch.save(model.state_dict(
                ), 'save_model/best_{}_{}-{}.pth'.format(args.model_name, args.source, epoch))
    time_pass = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_pass // 60, time_pass % 60))
    print('------Best acc: {}'.format(best_acc))

    model.load_state_dict(best_model_wts)
    torch.save(model.state_dict(), best_model_path)
    print('Best model saved!')
    return model, best_acc, acc_hist


# Extract features for given intermediate layers
# Currently, this only works for ResNet since AlexNet and VGGNET only have features and classifiers modules.
# You will need to manually define a function in the forward function to extract features
# (by letting it return features and labels).
# Please follow digit_deep_network.py for reference.