Python tensorflow.get_logger() Examples

The following are 28 code examples of tensorflow.get_logger(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: training.py    From OpenNMT-tf with MIT License 6 votes vote down vote up
def __init__(self, variables, step, decay=0.9999):
    """Initializes the moving average object.

    Args:
      variables: The list of variable for which to maintain a moving average.
      step: The training step counter as a ``tf.Variable``.
      decay: The decay rate of the exponential moving average. Usually close to
        1, e.g. 0.9999, see the complete formula on
        https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage.

    Raises:
      TypeError: is :obj:`step` is not a ``tf.Variable``.
    """
    if not isinstance(step, tf.Variable):
      raise TypeError("step should be a tf.Variable")
    if decay < 0.9 or decay > 1:
      tf.get_logger().warning("Moving average decay should be close to 1 (e.g. 0.9999) but you "
                              "passed %f, is it correct? See https://www.tensorflow.org/api_docs"
                              "/python/tf/train/ExponentialMovingAverage for details about the "
                              "formula and recommended decay values.")
    self._ema = tf.train.ExponentialMovingAverage(decay, num_updates=step)
    self._variables = variables
    self.update() 
Example #2
Source File: hyperband_test.py    From keras-tuner with Apache License 2.0 6 votes vote down vote up
def test_hyperband_integration(tmp_dir):
    tuner = hyperband_module.Hyperband(
        objective='val_loss',
        hypermodel=build_model,
        hyperband_iterations=2,
        max_epochs=6,
        factor=3,
        directory=tmp_dir)

    x, y = np.ones((2, 5)), np.ones((2, 1))
    tuner.search(x, y, validation_data=(x, y))

    # Make sure Oracle is registering new HPs.
    updated_hps = tuner.oracle.get_space().values
    assert 'units1' in updated_hps
    assert 'bias1' in updated_hps

    tf.get_logger().setLevel(logging.ERROR)

    best_score = tuner.oracle.get_best_trials()[0].score
    best_model = tuner.get_best_models()[0]
    assert best_model.evaluate(x, y) == best_score 
Example #3
Source File: inference_with_ckpt.py    From CartoonGan-tensorflow with Apache License 2.0 6 votes vote down vote up
def main(m_path, img_path, out_dir, light=False):
    logger = get_logger("inference")
    logger.info(f"generating image from {img_path}")
    try:
        g = Generator(light=light)
        g.load_weights(tf.train.latest_checkpoint(m_path))
    except ValueError as e:
        logger.error(e)
        logger.error("Failed to load specified weight.")
        logger.error("If you trained your model with --light, "
                     "consider adding --light when executing this script; otherwise, "
                     "do not add --light when executing this script.")
        exit(1)
    img = np.array(Image.open(img_path).convert("RGB"))
    img = np.expand_dims(img, 0).astype(np.float32) / 127.5 - 1
    out = ((g(img).numpy().squeeze() + 1) * 127.5).astype(np.uint8)
    if out_dir != "" and not os.path.isdir(out_dir):
        os.makedirs(out_dir)
    if out_dir == "":
        out_dir = "."
    out_path = os.path.join(out_dir, os.path.split(img_path)[1])
    imwrite(out_path, out)
    logger.info(f"generated image saved to {out_path}") 
Example #4
Source File: inference_with_saved_model.py    From CartoonGan-tensorflow with Apache License 2.0 6 votes vote down vote up
def main(m_path, img_path, out_dir):
    logger = get_logger("inference")
    logger.info(f"generating image from {img_path}")
    imported = tf.saved_model.load(m_path)
    f = imported.signatures["serving_default"]
    img = np.array(Image.open(img_path).convert("RGB"))
    img = np.expand_dims(img, 0).astype(np.float32) / 127.5 - 1
    out = f(tf.constant(img))['output_1']
    out = ((out.numpy().squeeze() + 1) * 127.5).astype(np.uint8)
    if out_dir != "" and not os.path.isdir(out_dir):
        os.makedirs(out_dir)
    if out_dir == "":
        out_dir = "."
    out_path = os.path.join(out_dir, os.path.split(img_path)[1])
    imwrite(out_path, out)
    logger.info(f"generated image saved to {out_path}") 
Example #5
Source File: beam_search_decoder.py    From addons with Apache License 2.0 6 votes vote down vote up
def _check_static_batch_beam_maybe(shape, batch_size, beam_width):
    """Raises an exception if dimensions are known statically and can not be
    reshaped to [batch_size, beam_size, -1]."""
    reshaped_shape = tf.TensorShape([batch_size, beam_width, None])
    assert len(shape.dims) > 0
    if batch_size is None or shape[0] is None:
        return True  # not statically known => no check
    if shape[0] == batch_size * beam_width:
        return True  # flattened, matching
    has_second_dim = shape.ndims >= 2 and shape[1] is not None
    if has_second_dim and shape[0] == batch_size and shape[1] == beam_width:
        return True  # non-flattened, matching
    # Otherwise we could not find a match and warn:
    tf.get_logger().warn(
        "TensorArray reordering expects elements to be "
        "reshapable to %s which is incompatible with the "
        "current shape %s. Consider setting "
        "reorder_tensor_arrays to False to disable TensorArray "
        "reordering during the beam search." % (reshaped_shape, shape)
    )
    return False 
Example #6
Source File: evaluation.py    From OpenNMT-tf with MIT License 6 votes vote down vote up
def should_stop(self):
    """Returns ``True`` if early stopping conditions are met."""
    if self._early_stopping is None:
      return False
    target_metric = self._early_stopping.metric
    higher_is_better = self._is_higher_better_for_metric(target_metric)
    metrics = self._get_metric_history(target_metric)
    should_stop = early_stop(
        metrics,
        self._early_stopping.steps,
        min_improvement=self._early_stopping.min_improvement,
        higher_is_better=higher_is_better)
    if should_stop:
      tf.get_logger().warning(
          "Evaluation metric '%s' did not improve more than %f in the last %d evaluations",
          target_metric,
          self._early_stopping.min_improvement,
          self._early_stopping.steps)
    return should_stop 
Example #7
Source File: beam_search_decoder.py    From addons with Apache License 2.0 5 votes vote down vote up
def _maybe_sort_array_beams(self, t, parent_ids, sequence_length):
        """Maybe sorts beams within a `TensorArray`.

        Args:
          t: A `TensorArray` of size `max_time` that contains `Tensor`s of
            shape `[batch_size, beam_width, s]` or
            `[batch_size * beam_width, s]` where `s` is the depth shape.
          parent_ids: The parent ids of shape
            `[max_time, batch_size, beam_width]`.
          sequence_length: The sequence length of shape
            `[batch_size, beam_width]`.

        Returns:
          A `TensorArray` where beams are sorted in each `Tensor` or `t` itself
            if it is not a `TensorArray` or does not meet shape requirements.
        """
        if not isinstance(t, tf.TensorArray):
            return t
        if t.element_shape.ndims is None or t.element_shape.ndims < 1:
            tf.get_logger().warn(
                "The TensorArray %s in the cell state is not amenable to "
                "sorting based on the beam search result. For a "
                "TensorArray to be sorted, its elements shape must be "
                "defined and have at least a rank of 1, but saw shape: %s"
                % (t.handle.name, t.element_shape)
            )
            return t
        if not _check_static_batch_beam_maybe(
            t.element_shape, tf.get_static_value(self._batch_size), self._beam_width
        ):
            return t
        t = t.stack()
        with tf.control_dependencies(
            [_check_batch_beam(t, self._batch_size, self._beam_width)]
        ):
            return gather_tree_from_array(t, parent_ids, sequence_length) 
Example #8
Source File: base_tuner.py    From keras-tuner with Apache License 2.0 5 votes vote down vote up
def search(self, *fit_args, **fit_kwargs):
        """Performs a search for best hyperparameter configuations.

        # Arguments:
            *fit_args: Positional arguments that should be passed to
              `run_trial`, for example the training and validation data.
            *fit_kwargs: Keyword arguments that should be passed to
              `run_trial`, for example the training and validation data.
        """
        if 'verbose' in fit_kwargs:
            self._display.verbose = fit_kwargs.get('verbose')
        self.on_search_begin()
        while True:
            trial = self.oracle.create_trial(self.tuner_id)
            if trial.status == trial_module.TrialStatus.STOPPED:
                # Oracle triggered exit.
                tf.get_logger().info('Oracle triggered exit')
                break
            if trial.status == trial_module.TrialStatus.IDLE:
                # Oracle is calculating, resend request.
                continue

            self.on_trial_begin(trial)
            self.run_trial(trial, *fit_args, **fit_kwargs)
            self.on_trial_end(trial)
        self.on_search_end() 
Example #9
Source File: oracle.py    From keras-tuner with Apache License 2.0 5 votes vote down vote up
def _set_project_dir(self, directory, project_name, overwrite=False):
        """Sets the project directory and reloads the Oracle."""
        self._directory = directory
        self._project_name = project_name
        if not overwrite and tf.io.gfile.exists(self._get_oracle_fname()):
            tf.get_logger().info('Reloading Oracle from existing project {}'.format(
                self._get_oracle_fname()))
            self.reload() 
Example #10
Source File: utils_tf.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def set_tf_logging(logging_flag):
    print('setting tf logging:',logging_flag)
    if logging_flag:
        os.environ["TF_CPP_MIN_LOG_LEVEL"] = "0"
        tf.get_logger().setLevel("INFO")
    else:
        os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
        tf.get_logger().setLevel("ERROR") 
Example #11
Source File: evaluation.py    From OpenNMT-tf with MIT License 5 votes vote down vote up
def _record_results(self, step, results):
    # Clear history for steps that are greater than step.
    while self._metrics_history and self._metrics_history[-1][0] > step:
      self._metrics_history.pop()
    self._metrics_history.append((step, dict(results)))
    tf.get_logger().info(
        "Evaluation result for step %d: %s",
        step,
        " ; ".join("%s = %f" % (k, v) for k, v in results.items()))
    with self._summary_writer.as_default():
      for key, value in results.items():
        tf.summary.scalar("%s/%s" % (_SUMMARIES_SCOPE, key), value, step=step)
      self._summary_writer.flush() 
Example #12
Source File: export.py    From CartoonGan-tensorflow with Apache License 2.0 5 votes vote down vote up
def main(m_path, out_dir, light):
    logger = get_logger("export")
    try:
        g = Generator(light=light)
        g.load_weights(tf.train.latest_checkpoint(m_path))
        t = tf.keras.Input(shape=[None, None, 3], batch_size=None)
        g(t, training=False)
        g.summary()
    except ValueError as e:
        logger.error(e)
        logger.error("Failed to load specified weight.")
        logger.error("If you trained your model with --light, "
                     "consider adding --light when executing this script; otherwise, "
                     "do not add --light when executing this script.")
        exit(1)
    m_num = 0
    smd = os.path.join(out_dir, "SavedModel")
    tfmd = os.path.join(out_dir, "tfjs_model")
    if light:
        smd += "Light"
        tfmd += "_light"
    saved_model_dir = f"{smd}_{m_num:04d}"
    tfjs_model_dir = f"{tfmd}_{m_num:04d}"
    while os.path.exists(saved_model_dir):
        m_num += 1
        saved_model_dir = f"{smd}_{m_num:04d}"
        tfjs_model_dir = f"{tfmd}_{m_num:04d}"
    tf.saved_model.save(g, saved_model_dir)
    cmd = ['tensorflowjs_converter', '--input_format', 'tf_saved_model',
           '--output_format', 'tfjs_graph_model', saved_model_dir, tfjs_model_dir]
    logger.info(" ".join(cmd))
    exit_code = Popen(cmd).wait()
    if exit_code == 0:
        logger.info(f"Model converted to {saved_model_dir} and {tfjs_model_dir} successfully")
    else:
        logger.error("tfjs model conversion failed") 
Example #13
Source File: evaluation.py    From OpenNMT-tf with MIT License 5 votes vote down vote up
def _maybe_export(self, step, results):
    if self._export_on_best is None or not self.is_best(self._export_on_best):
      return
    export_dir = os.path.join(self._export_dir, str(step))
    tf.get_logger().info("Exporting model to %s (best %s so far: %f)",
                         export_dir, self._export_on_best, results[self._export_on_best])
    self._model.export(export_dir, exporter=self._exporter) 
Example #14
Source File: checkpoint.py    From OpenNMT-tf with MIT License 5 votes vote down vote up
def save(self, step=None):
    """Saves a checkpoint.

    Args:
      step: The step to save for. If ``None``, get the value from ``optimizer.iterations``.

    Returns:
      The path to the saved checkpoint.
    """
    if step is None:
      step = self._optimizer.iterations
    path = self._checkpoint_manager.save(checkpoint_number=step)
    tf.get_logger().info("Saved checkpoint %s", path)
    return path 
Example #15
Source File: main.py    From OpenNMT-tf with MIT License 5 votes vote down vote up
def _set_log_level(log_level):
  tf.get_logger().setLevel(log_level)
  os.environ["TF_CPP_MIN_LOG_LEVEL"] = str(_PYTHON_TO_TENSORFLOW_LOGGING_LEVEL[log_level]) 
Example #16
Source File: custom_transformer_training.py    From OpenNMT-tf with MIT License 5 votes vote down vote up
def main():
  parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  parser.add_argument("run", choices=["train", "translate"],
                      help="Run type.")
  parser.add_argument("--src", required=True,
                      help="Path to the source file.")
  parser.add_argument("--tgt",
                      help="Path to the target file.")
  parser.add_argument("--src_vocab", required=True,
                      help="Path to the source vocabulary.")
  parser.add_argument("--tgt_vocab", required=True,
                      help="Path to the target vocabulary.")
  parser.add_argument("--model_dir", default="checkpoint",
                      help="Directory where checkpoint are written.")
  args = parser.parse_args()

  data_config = {
      "source_vocabulary": args.src_vocab,
      "target_vocabulary": args.tgt_vocab
  }

  model.initialize(data_config)

  checkpoint_manager = tf.train.CheckpointManager(checkpoint, args.model_dir, max_to_keep=5)
  if checkpoint_manager.latest_checkpoint is not None:
    tf.get_logger().info("Restoring parameters from %s", checkpoint_manager.latest_checkpoint)
    checkpoint.restore(checkpoint_manager.latest_checkpoint)

  if args.run == "train":
    train(args.src, args.tgt, checkpoint_manager)
  elif args.run == "translate":
    translate(args.src) 
Example #17
Source File: training.py    From OpenNMT-tf with MIT License 5 votes vote down vote up
def _report_training_status(step,
                            loss,
                            learning_rate,
                            words_counters,
                            last_report_step,
                            last_report_time):
  elapsed_time = time.time() - last_report_time

  steps_per_sec = (step - last_report_step) / elapsed_time
  tf.summary.scalar("steps_per_sec", steps_per_sec, description="Training steps per second")
  steps_per_sec_fmt = "steps/s = %0.2f" % steps_per_sec

  words_per_sec_fmt = []
  for name, counter in words_counters.items():
    avg = int(counter.numpy() / elapsed_time)
    tf.summary.scalar(
        "words_per_sec/%s" % name,
        avg,
        description="%s words per second" % name.capitalize())
    words_per_sec_fmt.append("%s words/s = %d" % (name, avg))

  if isinstance(learning_rate, tf.optimizers.schedules.LearningRateSchedule):
    learning_rate = learning_rate(step)
  elif isinstance(learning_rate, tf.Variable):
    learning_rate = learning_rate.value()

  tf.get_logger().info(
      "Step = %d ; %s ; Learning rate = %f ; Loss = %f",
      step,
      ", ".join([steps_per_sec_fmt] + list(sorted(words_per_sec_fmt))),
      learning_rate,
      loss)
  tf.summary.scalar("loss", loss, description="Training loss")
  tf.summary.scalar("optim/learning_rate", learning_rate, description="Learning rate") 
Example #18
Source File: training.py    From OpenNMT-tf with MIT License 5 votes vote down vote up
def _run_model(self, source, target):
    """Computes the loss of the given source and target pair.

    Args:
      source: A nested structure of tensors.
      target: A nested structure of tensors.

    Returns:
      A tuple containing,

      - The loss to compute the gradients.
      - The loss to report.
    """
    first_call = not self._model.built
    outputs, _ = self._model(
        source,
        labels=target,
        training=True,
        step=self._optimizer.iterations)
    loss = self._model.compute_loss(outputs, target, training=True)
    if isinstance(loss, tuple):
      training_loss = loss[0] / loss[1]
      reported_loss = loss[0] / loss[2] if len(loss) > 2 else training_loss
    else:
      training_loss, reported_loss = loss, loss
    training_loss = self._model.regularize_loss(
        training_loss, variables=self._model.trainable_variables)
    self._update_words_counter("source", source)
    if not self._model.unsupervised:
      self._update_words_counter("target", target)
    if first_call and self._is_master:
      if self._checkpoint is not None:
        self._model.visualize(self._checkpoint.model_dir)
      tf.get_logger().info("Number of model parameters: %d", self._model.count_params())
      tf.get_logger().info(
          "Number of model weights: %d (trainable = %d, non trainable = %d)",
          len(self._model.weights),
          len(self._model.trainable_weights),
          len(self._model.non_trainable_weights))
    return training_loss, reported_loss 
Example #19
Source File: sequence_to_sequence.py    From OpenNMT-tf with MIT License 5 votes vote down vote up
def compute_loss(self, outputs, labels, training=True):
    params = self.params
    if not isinstance(outputs, dict):
      outputs = dict(logits=outputs)
    logits = outputs["logits"]
    noisy_logits = outputs.get("noisy_logits")
    attention = outputs.get("attention")
    if noisy_logits is not None and params.get("contrastive_learning"):
      return losses.max_margin_loss(
          logits,
          labels["ids_out"],
          labels["length"],
          noisy_logits,
          labels["noisy_ids_out"],
          labels["noisy_length"],
          eta=params.get("max_margin_eta", 0.1))
    loss, loss_normalizer, loss_token_normalizer = losses.cross_entropy_sequence_loss(
        logits,
        labels["ids_out"],
        labels["length"],
        label_smoothing=params.get("label_smoothing", 0.0),
        average_in_time=params.get("average_loss_in_time", False),
        training=training)
    if training:
      gold_alignments = labels.get("alignment")
      guided_alignment_type = params.get("guided_alignment_type")
      if gold_alignments is not None and guided_alignment_type is not None:
        if attention is None:
          tf.get_logger().warning("This model did not return attention vectors; "
                                  "guided alignment will not be applied")
        else:
          loss += losses.guided_alignment_cost(
              attention[:, :-1],  # Do not constrain last timestep.
              gold_alignments,
              sequence_length=self.labels_inputter.get_length(labels, ignore_special_tokens=True),
              cost_type=guided_alignment_type,
              weight=params.get("guided_alignment_weight", 1))
    return loss, loss_normalizer, loss_token_normalizer 
Example #20
Source File: runner.py    From OpenNMT-tf with MIT License 5 votes vote down vote up
def __init__(self,
               model,
               config,
               auto_config=False,
               mixed_precision=False,
               seed=None):
    """Initializes the runner parameters.

    Args:
      model: A :class:`opennmt.models.Model` instance to run or a callable that
        returns such instance.
      config: The run configuration.
      auto_config: If ``True``, use automatic configuration values defined by
        :obj:`model`.
      mixed_precision: Enable mixed precision.
      seed: The random seed to set.

    Raises:
      TypeError: if :obj:`model` is not a :class:`opennmt.models.Model` instance
        or a callable.
    """
    if isinstance(model, models.Model):
      self._model = model
      self._model_fn = lambda: misc.clone_layer(model)
    elif callable(model):
      self._model = model()
      self._model_fn = model
    else:
      raise TypeError("model should be a opennmt.models.Model instance or a callable")
    tf.get_logger().info("Using model:\n%s", self._model)
    self._optimizer = None
    self._config = copy.deepcopy(config)
    self._auto_config = auto_config
    self._mixed_precision = mixed_precision
    if mixed_precision:
      tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
    if seed is not None:
      np.random.seed(seed)
      random.seed(seed)
      tf.random.set_seed(seed) 
Example #21
Source File: exporters.py    From OpenNMT-tf with MIT License 5 votes vote down vote up
def export(self, model, export_dir):
    """Exports :obj:`model` to :obj:`export_dir`.

    Raises:
      ValueError: if :obj:`model` is not supported by this exporter.
    """
    self._export_model(model, export_dir)
    with tempfile.TemporaryDirectory() as tmp_dir:
      extra_assets = model.export_assets(tmp_dir)
      if extra_assets:
        assets_extra = os.path.join(export_dir, "assets.extra")
        tf.io.gfile.makedirs(assets_extra)
        for filename, path in extra_assets.items():
          tf.io.gfile.copy(path, os.path.join(assets_extra, filename), overwrite=True)
        tf.get_logger().info("Extra assets written to: %s", assets_extra) 
Example #22
Source File: checkpoint.py    From OpenNMT-tf with MIT License 5 votes vote down vote up
def average_checkpoints_into_layer(checkpoints, layer, layer_prefix):
  """Updates the layer weights with their average value in the checkpoints.

  Args:
    checkpoints: A non empty list of checkpoint paths.
    layer: A ``tf.keras.layers.Layer`` instance.
    layer_prefix: The name/scope that prefixes the layer variables names in the
      checkpoints.

  Raises:
    ValueError: if :obj:`checkpoints` is empty.
    ValueError: if :obj:`layer` is not already built.

  See Also:
    :func:`opennmt.utils.average_checkpoints`
  """
  if not checkpoints:
    raise ValueError("There should be at least one checkpoint")
  if not layer.built:
    raise ValueError("The layer should be built before calling this function")

  # Reset the layer variables to 0.
  for variable in layer.variables:
    variable.assign(tf.zeros_like(variable))

  # Get a map from variable names in the checkpoint to variables in the layer.
  _, names_to_variables = misc.get_variables_name_mapping(layer, root_key=layer_prefix)

  num_checkpoints = len(checkpoints)
  tf.get_logger().info("Averaging %d checkpoints...", num_checkpoints)
  for checkpoint_path in checkpoints:
    tf.get_logger().info("Reading checkpoint %s...", checkpoint_path)
    reader = tf.train.load_checkpoint(checkpoint_path)
    for path in reader.get_variable_to_shape_map().keys():
      if not path.startswith(layer_prefix) or ".OPTIMIZER_SLOT" in path:
        continue
      variable = names_to_variables[path]
      value = reader.get_tensor(path)
      variable.assign_add(value / num_checkpoints) 
Example #23
Source File: train.py    From pycorrector with Apache License 2.0 4 votes vote down vote up
def main(model_dir='',
         src_train_path='',
         tgt_train_path='',
         src_vocab_path='',
         tgt_vocab_path='',
         batch_size=32,
         maximum_length=100,
         train_steps=10000,
         save_every=1000,
         report_every=50):
    data_reader = CGEDReader(src_train_path)
    src_input_texts = data_reader.build_dataset(src_train_path)
    tgt_input_texts = data_reader.build_dataset(tgt_train_path)

    # load or save word dict
    if not os.path.exists(src_vocab_path):
        print('Training data...')
        print('input_texts:', src_input_texts[0])
        print('target_texts:', tgt_input_texts[0])
        max_input_texts_len = max([len(text) for text in src_input_texts])

        print('num of samples:', len(src_input_texts))
        print('max sequence length for inputs:', max_input_texts_len)

        src_vocab = data_reader.read_vocab(src_input_texts)
        id2char = {i: j for i, j in enumerate(src_vocab)}
        char2id = {j: i for i, j in id2char.items()}
        save_word_dict(char2id, src_vocab_path)

        tgt_vocab = data_reader.read_vocab(tgt_input_texts)
        id2char = {i: j for i, j in enumerate(tgt_vocab)}
        char2id = {j: i for i, j in id2char.items()}
        save_word_dict(char2id, tgt_vocab_path)

    data_config = {
        "source_vocabulary": src_vocab_path,
        "target_vocabulary": tgt_vocab_path
    }

    model.initialize(data_config)

    checkpoint_manager = tf.train.CheckpointManager(checkpoint, model_dir, max_to_keep=5)
    if checkpoint_manager.latest_checkpoint is not None:
        tf.get_logger().info("Restoring parameters from %s", checkpoint_manager.latest_checkpoint)
        checkpoint.restore(checkpoint_manager.latest_checkpoint)

    train(src_train_path, tgt_train_path, checkpoint_manager,
          batch_size=batch_size,
          maximum_length=maximum_length,
          train_steps=train_steps,
          save_every=save_every,
          report_every=report_every) 
Example #24
Source File: config.py    From OpenNMT-tf with MIT License 4 votes vote down vote up
def load_model(model_dir,
               model_file=None,
               model_name=None,
               serialize_model=True,
               as_builder=False):
  """Loads the model from the catalog or a definition file.

  Args:
    model_dir: The model directory.
    model_file: An optional model configuration.
      Mutually exclusive with :obj:`model_name`.
    model_name: An optional model name from the catalog.
      Mutually exclusive with :obj:`model_file`.
    serialize_model: Serialize the model definition in the model directory to
      make it optional for future runs.
    as_builder: If ``True``, return a callable building the model on call.

  Returns:
    A :class:`opennmt.models.Model` instance or a callable returning such
    instance.

  Raises:
    ValueError: if both :obj:`model_file` and :obj:`model_name` are set.
  """
  if model_file and model_name:
    raise ValueError("only one of model_file and model_name should be set")
  model_name_or_path = model_file or model_name
  model_description_path = os.path.join(model_dir, "model_description.py")

  if model_name_or_path:
    if tf.train.latest_checkpoint(model_dir) is not None:
      tf.get_logger().warning(
          "You provided a model configuration but a checkpoint already exists. "
          "The model configuration must define the same model as the one used for "
          "the initial training. However, you can change non structural values like "
          "dropout.")

    if model_file:
      model = load_model_from_file(model_file, as_builder=as_builder)
      if serialize_model:
        tf.io.gfile.copy(model_file, model_description_path, overwrite=True)
    elif model_name:
      model = load_model_from_catalog(model_name, as_builder=as_builder)
      if serialize_model:
        with tf.io.gfile.GFile(model_description_path, mode="w") as model_description_file:
          model_description_file.write(
              "from opennmt import models\n"
              "model = lambda: models.get_model_from_catalog(\"%s\")\n" % model_name)
  elif tf.io.gfile.exists(model_description_path):
    tf.get_logger().info("Loading model description from %s", model_description_path)
    model = load_model_from_file(model_description_path, as_builder=as_builder)
  else:
    raise RuntimeError("A model configuration is required: you probably need to "
                       "set --model or --model_type on the command line.")

  return model 
Example #25
Source File: test.py    From arcface-tf2 with MIT License 4 votes vote down vote up
def main(_argv):
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu

    logger = tf.get_logger()
    logger.disabled = True
    logger.setLevel(logging.FATAL)
    set_memory_growth()

    cfg = load_yaml(FLAGS.cfg_path)

    model = ArcFaceModel(size=cfg['input_size'],
                         backbone_type=cfg['backbone_type'],
                         training=False)

    ckpt_path = tf.train.latest_checkpoint('./checkpoints/' + cfg['sub_name'])
    if ckpt_path is not None:
        print("[*] load ckpt from {}".format(ckpt_path))
        model.load_weights(ckpt_path)
    else:
        print("[*] Cannot find ckpt from {}.".format(ckpt_path))
        exit()

    if FLAGS.img_path:
        print("[*] Encode {} to ./output_embeds.npy".format(FLAGS.img_path))
        img = cv2.imread(FLAGS.img_path)
        img = cv2.resize(img, (cfg['input_size'], cfg['input_size']))
        img = img.astype(np.float32) / 255.
        if len(img.shape) == 3:
            img = np.expand_dims(img, 0)
        embeds = l2_norm(model(img))
        np.save('./output_embeds.npy', embeds)
    else:
        print("[*] Loading LFW, AgeDB30 and CFP-FP...")
        lfw, agedb_30, cfp_fp, lfw_issame, agedb_30_issame, cfp_fp_issame = \
            get_val_data(cfg['test_dataset'])

        print("[*] Perform Evaluation on LFW...")
        acc_lfw, best_th = perform_val(
            cfg['embd_shape'], cfg['batch_size'], model, lfw, lfw_issame,
            is_ccrop=cfg['is_ccrop'])
        print("    acc {:.4f}, th: {:.2f}".format(acc_lfw, best_th))

        print("[*] Perform Evaluation on AgeDB30...")
        acc_agedb30, best_th = perform_val(
            cfg['embd_shape'], cfg['batch_size'], model, agedb_30,
            agedb_30_issame, is_ccrop=cfg['is_ccrop'])
        print("    acc {:.4f}, th: {:.2f}".format(acc_agedb30, best_th))

        print("[*] Perform Evaluation on CFP-FP...")
        acc_cfp_fp, best_th = perform_val(
            cfg['embd_shape'], cfg['batch_size'], model, cfp_fp, cfp_fp_issame,
            is_ccrop=cfg['is_ccrop'])
        print("    acc {:.4f}, th: {:.2f}".format(acc_cfp_fp, best_th)) 
Example #26
Source File: base_tuner.py    From keras-tuner with Apache License 2.0 4 votes vote down vote up
def __init__(self,
                 oracle,
                 hypermodel,
                 directory=None,
                 project_name=None,
                 logger=None,
                 overwrite=False):
        # Ops and metadata
        self.directory = directory or '.'
        self.project_name = project_name or 'untitled_project'
        if overwrite and tf.io.gfile.exists(self.project_dir):
            tf.io.gfile.rmtree(self.project_dir)

        if not isinstance(oracle, oracle_module.Oracle):
            raise ValueError('Expected oracle to be '
                             'an instance of Oracle, got: %s' % (oracle,))
        self.oracle = oracle
        self.oracle._set_project_dir(
            self.directory, self.project_name, overwrite=overwrite)

        # Run in distributed mode.
        if dist_utils.is_chief_oracle():
            # Blocks forever.
            oracle_chief.start_server(self.oracle)
        elif dist_utils.has_chief_oracle():
            # Proxies requests to the chief oracle.
            self.oracle = oracle_client.OracleClient(self.oracle)

        # To support tuning distribution.
        self.tuner_id = os.environ.get('KERASTUNER_TUNER_ID', 'tuner0')

        self.hypermodel = hm_module.get_hypermodel(hypermodel)

        # Logs etc
        self.logger = logger
        self._display = tuner_utils.Display(oracle=self.oracle)

        self._populate_initial_space()

        if not overwrite and tf.io.gfile.exists(self._get_tuner_fname()):
            tf.get_logger().info('Reloading Tuner from {}'.format(
                self._get_tuner_fname()))
            self.reload() 
Example #27
Source File: distribute_test.py    From keras-tuner with Apache License 2.0 4 votes vote down vote up
def test_random_search(tmp_dir):
    # TensorFlow model building and execution is not thread-safe.
    num_workers = 1

    def _test_random_search():
        def build_model(hp):
            model = keras.Sequential()
            model.add(keras.layers.Dense(3, input_shape=(5,)))
            for i in range(hp.Int('num_layers', 1, 3)):
                model.add(keras.layers.Dense(
                    hp.Int('num_units_%i' % i, 1, 3),
                    activation='relu'))
            model.add(keras.layers.Dense(1, activation='sigmoid'))
            model.compile('sgd', 'binary_crossentropy')
            return model

        x = np.random.uniform(-1, 1, size=(2, 5))
        y = np.ones((2, 1))

        tuner = kt.tuners.RandomSearch(
            hypermodel=build_model,
            objective='val_loss',
            max_trials=10,
            directory=tmp_dir)

        # Only worker makes it to this point, server runs until thread stops.
        assert dist_utils.has_chief_oracle()
        assert not dist_utils.is_chief_oracle()
        assert isinstance(tuner.oracle, kt.distribute.oracle_client.OracleClient)

        tuner.search(x, y, validation_data=(x, y), epochs=1, batch_size=2)

        # Suppress warnings about optimizer state not being restored by tf.keras.
        tf.get_logger().setLevel(logging.ERROR)

        trials = tuner.oracle.get_best_trials(2)
        assert trials[0].score <= trials[1].score

        models = tuner.get_best_models(2)
        assert models[0].evaluate(x, y) <= models[1].evaluate(x, y)

    mock_distribute.mock_distribute(_test_random_search, num_workers) 
Example #28
Source File: checkpoint.py    From OpenNMT-tf with MIT License 4 votes vote down vote up
def restore(self, checkpoint_path=None, weights_only=False):
    """Restores a checkpoint.

    Args:
      checkpoint_path: Path a checkpoint to restore. If not set, the latest
        checkpoint from :obj:`model_dir` will be restored.
      weights_only: Only restore model weights.

    Returns:
      Path to the restored checkpoint.
    """
    if weights_only:
      checkpoint = tf.train.Checkpoint(model=self._model)
    else:
      checkpoint = self._checkpoint
    if checkpoint_path is not None:
      if tf.io.gfile.isdir(checkpoint_path):
        checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
    elif self._checkpoint_manager.latest_checkpoint is not None:
      checkpoint_path = self._checkpoint_manager.latest_checkpoint
    if checkpoint_path is None:
      tf.get_logger().warning("No checkpoint to restore in %s", self._model_dir)
      return None
    if is_v1_checkpoint(checkpoint_path):
      tf.get_logger().info("Upgrading V1 checkpoint...")
      # Work with copies of model and optimizer as the downstream task might
      # need to create the variable differently (e.g. under a distribution
      # strategy scope).
      tmp_model = misc.clone_layer(self._model)
      tmp_optimizer = copy.deepcopy(self._optimizer) if self._optimizer is not None else None
      tmp_model.create_variables(optimizer=tmp_optimizer)
      step = _restore_v1_checkpoint(
          checkpoint_path, tmp_model, optimizer=tmp_optimizer)
      # Save an updated checkpoint in the model directory and restore this one instead.
      tmp_checkpoint = Checkpoint(
          tmp_model, optimizer=tmp_optimizer, model_dir=self._model_dir)
      checkpoint_path = tmp_checkpoint.save(step)
      return self.restore(checkpoint_path=checkpoint_path, weights_only=weights_only)
    load_status = checkpoint.restore(checkpoint_path)
    if weights_only:
      load_status.expect_partial()
    tf.get_logger().info("Restored checkpoint %s", checkpoint_path)
    return checkpoint_path