Python tensorflow.compat.v1.global_variables() Examples

The following are 30 code examples of tensorflow.compat.v1.global_variables(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.compat.v1 , or try the search function .
Example #1
Source File: player_utils.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def __init__(self, hparams, action_space, observation_space, policy_dir):
    assert hparams.base_algo == "ppo"
    ppo_hparams = trainer_lib.create_hparams(hparams.base_algo_params)

    frame_stack_shape = (1, hparams.frame_stack_size) + observation_space.shape
    self._frame_stack = np.zeros(frame_stack_shape, dtype=np.uint8)

    with tf.Graph().as_default():
      self.obs_t = tf.placeholder(shape=self.frame_stack_shape, dtype=np.uint8)
      self.logits_t, self.value_function_t = get_policy(
          self.obs_t, ppo_hparams, action_space
      )
      model_saver = tf.train.Saver(
          tf.global_variables(scope=ppo_hparams.policy_network + "/.*")  # pylint: disable=unexpected-keyword-arg
      )
      self.sess = tf.Session()
      self.sess.run(tf.global_variables_initializer())
      trainer_lib.restore_checkpoint(policy_dir, model_saver,
                                     self.sess) 
Example #2
Source File: flop_regularizer_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def BuildModel(self):
    # Our test model is:
    #
    #         -> conv1 --+     -> conv3 -->
    #        /           |    /
    #  image          [concat]
    #        \           |    \
    #         -> conv2 --+     -> conv4 -->
    #
    # (the model has two "outputs", conv3 and conv4).
    #

    # op.name: 'Const'
    image = tf.constant(0.0, shape=[1, 17, 19, NUM_CHANNELS])
    # op.name: 'conv1/Conv2D'
    self.conv1 = slim.layers.conv2d(
        image, 13, [7, 5], padding='SAME', scope='conv1')
    self.conv2 = slim.layers.conv2d(
        image, 23, [1, 1], padding='SAME', scope='conv2')
    self.concat = tf.concat([self.conv1, self.conv2], 3)
    self.conv3 = slim.layers.conv2d(
        self.concat, 29, [3, 3], stride=2, padding='SAME', scope='conv3')
    self.conv4 = slim.layers.conv2d(
        self.concat, 31, [1, 1], stride=1, padding='SAME', scope='conv4')
    self.name_to_var = {v.op.name: v for v in tf.global_variables()} 
Example #3
Source File: latency_regularizer_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def build_model(self):
    # Our test model is:
    #
    #         -> conv1 --+     -> conv3 -->
    #        /           |    /
    #  image          [concat]
    #        \           |    \
    #         -> conv2 --+     -> conv4 -->
    #
    # (the model has two "outputs", conv3 and conv4).
    #
    image = tf.constant(0.0, shape=[1, 17, 19, NUM_CHANNELS])
    conv1 = slim.layers.conv2d(image, 13, [7, 5], padding='SAME', scope='conv1')
    conv2 = slim.layers.conv2d(image, 23, [1, 1], padding='SAME', scope='conv2')
    concat = tf.concat([conv1, conv2], 3)
    self.conv3 = slim.layers.conv2d(
        concat, 29, [3, 3], stride=2, padding='SAME', scope='conv3')
    self.conv4 = slim.layers.conv2d(
        concat, 31, [1, 1], stride=1, padding='SAME', scope='conv4')
    self.name_to_var = {v.op.name: v for v in tf.global_variables()}

    self.regularizer = latency_regularizer.GammaLatencyRegularizer(
        [self.conv3.op, self.conv4.op],
        gamma_threshold=0.45, hardware=HARDWARE) 
Example #4
Source File: post_training_quantization.py    From models with Apache License 2.0 6 votes vote down vote up
def restore_model(sess, checkpoint_path, enable_ema=True):
  """Restore variables from the checkpoint into the provided session.

  Args:
    sess: A tensorflow session where the checkpoint will be loaded.
    checkpoint_path: Path to the trained checkpoint.
    enable_ema: (optional) Whether to load the exponential moving average (ema)
      version of the tensorflow variables. Defaults to True.
  """
  if enable_ema:
    ema = tf.train.ExponentialMovingAverage(decay=0.0)
    ema_vars = tf.trainable_variables() + tf.get_collection("moving_vars")
    for v in tf.global_variables():
      if "moving_mean" in v.name or "moving_variance" in v.name:
        ema_vars.append(v)
    ema_vars = list(set(ema_vars))
    var_dict = ema.variables_to_restore(ema_vars)
  else:
    var_dict = None

  sess.run(tf.global_variables_initializer())
  saver = tf.train.Saver(var_dict, max_to_keep=1)
  saver.restore(sess, checkpoint_path) 
Example #5
Source File: common_layers.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def underlying_variable(t):
  """Find the underlying tf.Variable object.

  Args:
    t: a Tensor

  Returns:
    tf.Variable.
  """
  t = underlying_variable_ref(t)
  assert t is not None
  # make sure that the graph has a variable index and that it is up-to-date
  if not hasattr(tf.get_default_graph(), "var_index"):
    tf.get_default_graph().var_index = {}
  var_index = tf.get_default_graph().var_index
  for v in tf.global_variables()[len(var_index):]:
    var_index[v.name] = v
  return var_index[t.name] 
Example #6
Source File: transformer_test.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def testVarNames(self):
    with tf.Graph().as_default():
      model, features = get_model(
          mode=tf.estimator.ModeKeys.PREDICT,
          model_cls=transformer.TransformerScorer)
      _ = model.infer(features)
      scorer_vars = [v.name for v in tf.global_variables()]

    with tf.Graph().as_default():
      model, features = get_model(
          mode=tf.estimator.ModeKeys.EVAL,
          model_cls=transformer.TransformerScorer)
      _ = model(features)
      scorer_eval_vars = [v.name for v in tf.global_variables()]

    with tf.Graph().as_default():
      model, features = get_model(
          mode=tf.estimator.ModeKeys.EVAL,
          model_cls=transformer.Transformer)
      _ = model(features)
      transformer_vars = [v.name for v in tf.global_variables()]

    self.assertEqual(sorted(scorer_vars), sorted(transformer_vars))
    self.assertEqual(sorted(scorer_eval_vars), sorted(transformer_vars)) 
Example #7
Source File: utils_tf.py    From pyslam with GNU General Public License v3.0 6 votes vote down vote up
def recoverer(sess, model_path, meta_graph_path=None):
    """
    Recovery parameters from a pretrained model.
    Args:
        sess: The tensorflow session instance.
        model_path: Checkpoint file path.
    Returns:
        Nothing
    """
    if meta_graph_path is None:
        restore_var = tf.global_variables()
        restorer = tf.train.Saver(restore_var)
    else:
        restorer = tf.train.import_meta_graph(meta_graph_path)
    restorer.restore(sess, model_path)


# from https://stackoverflow.com/questions/35911252/disable-tensorflow-debugging-information
# 0 = all messages are logged (default behavior)
# 1 = INFO messages are not printed
# 2 = INFO and WARNING messages are not printed
# 3 = INFO, WARNING, and ERROR messages are not printed 
Example #8
Source File: model_size_regularizer_test.py    From morph-net with Apache License 2.0 6 votes vote down vote up
def testLossCostDecorated(self):
    params = {'trainable': True, 'normalizer_fn': slim.batch_norm,
              'normalizer_params': {'scale': True}}

    with slim.arg_scope([slim.layers.conv2d], **params):
      image = tf.constant(0.0, shape=[1, 1, 1, NUM_CHANNELS])
      conv1 = slim.layers.conv2d(
          image, 2, [1, 1], padding='SAME', scope='conv1')
    with self.cached_session():
      tf.global_variables_initializer().run()
      name_to_var = {v.op.name: v for v in tf.global_variables()}
      gamma1 = name_to_var['conv1/BatchNorm/gamma']
      gamma1.assign([1] * 2).eval()

    self.gamma_flop_reg = model_size_regularizer.GammaModelSizeRegularizer(
        [conv1.op],
        gamma_threshold=0.1,
        regularizer_decorator=dummy_decorator.DummyDecorator,
        decorator_parameters={'scale': 0.5})

    conv = self.get_conv('conv1')
    self.assertEqual(_coeff(conv) * 3 * 1, self.loss([conv]))
    self.assertEqual(_coeff(conv) * 2 * NUM_CHANNELS, self.cost([conv])) 
Example #9
Source File: rl_utils.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def __init__(
      self, batch_size, observation_space, action_space, policy_hparams,
      policy_dir, sampling_temp
  ):
    super(PolicyAgent, self).__init__(
        batch_size, observation_space, action_space
    )
    self._sampling_temp = sampling_temp
    with tf.Graph().as_default():
      self._observations_t = tf.placeholder(
          shape=((batch_size,) + self.observation_space.shape),
          dtype=self.observation_space.dtype
      )
      (logits, self._values_t) = rl.get_policy(
          self._observations_t, policy_hparams, self.action_space
      )
      actions = common_layers.sample_with_temperature(logits, sampling_temp)
      self._probs_t = tf.nn.softmax(logits / sampling_temp)
      self._actions_t = tf.cast(actions, tf.int32)
      model_saver = tf.train.Saver(
          tf.global_variables(policy_hparams.policy_network + "/.*")  # pylint: disable=unexpected-keyword-arg
      )
      self._sess = tf.Session()
      self._sess.run(tf.global_variables_initializer())
      trainer_lib.restore_checkpoint(policy_dir, model_saver, self._sess) 
Example #10
Source File: ppo_learner.py    From tensor2tensor with Apache License 2.0 6 votes vote down vote up
def evaluate(self, env_fn, hparams, sampling_temp):
    with tf.Graph().as_default():
      with tf.name_scope("rl_eval"):
        eval_env = env_fn(in_graph=True)
        (collect_memory, _, collect_init) = _define_collect(
            eval_env,
            hparams,
            "ppo_eval",
            eval_phase=True,
            frame_stack_size=self.frame_stack_size,
            force_beginning_resets=False,
            sampling_temp=sampling_temp,
            distributional_size=self._distributional_size,
        )
        model_saver = tf.train.Saver(
            tf.global_variables(hparams.policy_network + "/.*")
            # tf.global_variables("clean_scope.*")  # Needed for sharing params.
        )

        with tf.Session() as sess:
          sess.run(tf.global_variables_initializer())
          collect_init(sess)
          trainer_lib.restore_checkpoint(self.agent_model_dir, model_saver,
                                         sess)
          sess.run(collect_memory) 
Example #11
Source File: variable_mgr.py    From benchmarks with Apache License 2.0 6 votes vote down vote up
def savable_variables(self):
    """Returns a list/dict of savable variables to pass to tf.train.Saver."""
    params = {}
    for v in tf.global_variables():
      assert (v.name.startswith(variable_mgr_util.PS_SHADOW_VAR_PREFIX + '/v0/')
              or v.name in ('global_step:0', 'loss_scale:0',
                            'loss_scale_normal_steps:0')), (
                                'Invalid global variable: %s' % v)
      # We store variables in the checkpoint with the shadow variable prefix
      # removed so we can evaluate checkpoints in non-distributed replicated
      # mode. The checkpoints can also be loaded for training in
      # distributed_replicated mode.
      name = self._strip_port(self._remove_shadow_var_prefix_if_present(v.name))
      params[name] = v
    for v in tf.local_variables():
      # Non-trainable variables, such as batch norm moving averages, do not have
      # corresponding global shadow variables, so we add them here. Trainable
      # local variables have corresponding global shadow variables, which were
      # added in the global variable loop above.
      if v.name.startswith('v0/') and v not in tf.trainable_variables():
        params[self._strip_port(v.name)] = v
    return params 
Example #12
Source File: variable_mgr.py    From benchmarks with Apache License 2.0 6 votes vote down vote up
def get_post_init_ops(self):
    # Copy initialized variables for variables on the parameter server
    # to the local copy of the variable.

    local_vars = tf.local_variables()
    local_var_by_name = dict(
        [(self._strip_port(v.name), v) for v in local_vars])
    post_init_ops = []
    for v in tf.global_variables():
      if v.name.startswith(variable_mgr_util.PS_SHADOW_VAR_PREFIX + '/v0/'):
        prefix = self._strip_port(
            v.name[len(variable_mgr_util.PS_SHADOW_VAR_PREFIX + '/v0'):])
        for i in range(self.benchmark_cnn.num_gpus):
          name = 'v%s%s' % (i, prefix)
          if name in local_var_by_name:
            copy_to = local_var_by_name[name]
            post_init_ops.append(copy_to.assign(v.read_value()))
    return post_init_ops 
Example #13
Source File: rnn_test.py    From magenta with Apache License 2.0 6 votes vote down vote up
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
    # REMARKS: factory(scope) is a function accepting a scope
    #          as an argument, such scope can be None, a string
    #          or a VariableScope instance.
    with self.session(use_gpu=True, graph=tf.Graph()):
      if use_outer_scope:
        with tf.variable_scope(prefix) as scope:
          factory(scope)
      else:
        factory(prefix)

      # check that all the variables names starts with the proper scope.
      tf.global_variables_initializer()
      all_vars = tf.global_variables()
      prefix = prefix or "stack_bidirectional_rnn"
      scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
      tf.logging.info("StackRNN with scope: %s (%s)" %
                      (prefix, "scope" if use_outer_scope else "str"))
      for v in scope_vars:
        tf.logging.info(v.name)
      self.assertEqual(len(scope_vars), len(all_vars)) 
Example #14
Source File: gansynth_train.py    From magenta with Apache License 2.0 6 votes vote down vote up
def run(config):
  """Entry point to run training."""
  init_data_normalizer(config)

  stage_ids = train_util.get_stage_ids(**config)
  if not config['train_progressive']:
    stage_ids = list(stage_ids)[-1:]

  # Train one stage at a time
  for stage_id in stage_ids:
    batch_size = train_util.get_batch_size(stage_id, **config)
    tf.reset_default_graph()
    with tf.device(tf.train.replica_device_setter(config['ps_tasks'])):
      model = lib_model.Model(stage_id, batch_size, config)
      model.add_summaries()
      print('Variables:')
      for v in tf.global_variables():
        print('\t', v.name, v.get_shape().as_list())
      logging.info('Calling train.train')
      train_util.train(model, **config) 
Example #15
Source File: variables_helper.py    From models with Apache License 2.0 6 votes vote down vote up
def get_global_variables_safely():
  """If not executing eagerly, returns tf.global_variables().

  Raises a ValueError if eager execution is enabled,
  because the variables are not tracked when executing eagerly.

  If executing eagerly, use a Keras model's .variables property instead.

  Returns:
    The result of tf.global_variables()
  """
  with tf.init_scope():
    if tf.executing_eagerly():
      raise ValueError("Global variables collection is not tracked when "
                       "executing eagerly. Use a Keras model's `.variables` "
                       "attribute instead.")
  return tf.global_variables() 
Example #16
Source File: utils.py    From EfficientNet-PyTorch with Apache License 2.0 5 votes vote down vote up
def get_ema_vars():
  """Get all exponential moving average (ema) variables."""
  ema_vars = tf.trainable_variables() + tf.get_collection('moving_vars')
  for v in tf.global_variables():
    # We maintain mva for batch norm moving mean and variance as well.
    if 'moving_mean' in v.name or 'moving_variance' in v.name:
      ema_vars.append(v)
  return list(set(ema_vars)) 
Example #17
Source File: rl_tuner_ops.py    From magenta with Apache License 2.0 5 votes vote down vote up
def get_variable_names(graph, scope):
  """Finds all the variable names in a graph that begin with a given scope.

  Args:
    graph: A tensorflow graph.
    scope: A string scope.
  Returns:
    List of variables.
  """
  with graph.as_default():
    return [v.name for v in tf.global_variables() if v.name.startswith(scope)] 
Example #18
Source File: util.py    From nni with MIT License 5 votes vote down vote up
def initialize():
    """Initialize all the uninitialized variables in the global scope."""
    new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
    get_session().run(tf.variables_initializer(new_variables))

    ALREADY_INITIALIZED.update(new_variables) 
Example #19
Source File: utils.py    From Object_Detection_Tracking with Apache License 2.0 5 votes vote down vote up
def get_ema_vars():
  """Get all exponential moving average (ema) variables."""
  ema_vars = tf.trainable_variables() + tf.get_collection('moving_vars')
  for v in tf.global_variables():
    # We maintain mva for batch norm moving mean and variance as well.
    if 'moving_mean' in v.name or 'moving_variance' in v.name:
      ema_vars.append(v)
  return list(set(ema_vars)) 
Example #20
Source File: utils.py    From rigl with Apache License 2.0 5 votes vote down vote up
def initialize_parameters_from_ckpt(ckpt_path, model_dir, param_suffixes):
  """Load parameters from an existing checkpoint.

  Args:
    ckpt_path: str, loads the mask variables from this checkpoint.
    model_dir: str, if checkpoint exists in this folder no-op.
    param_suffixes: list or str, suffix of parameters to be load from
      checkpoint.
  """
  already_has_ckpt = model_dir and tf.train.latest_checkpoint(
      model_dir) is not None
  if already_has_ckpt:
    tf.logging.info(
        'Training already started on this model, not loading masks from'
        'previously trained model')
    return

  reader = tf.train.NewCheckpointReader(ckpt_path)
  param_names = reader.get_variable_to_shape_map().keys()
  param_names = [x for x in param_names if x.endswith(param_suffixes)]

  variable_map = {}
  for var in tf.global_variables():
    var_name = var.name.split(':')[0]
    if var_name in param_names:
      tf.logging.info('Loading parameter variable from checkpoint: %s',
                      var_name)
      variable_map[var_name] = var
    elif var_name.endswith(param_suffixes):
      tf.logging.info(
          'Cannot find parameter variable in checkpoint, skipping: %s',
          var_name)
  tf.train.init_from_checkpoint(ckpt_path, variable_map) 
Example #21
Source File: note_rnn_loader.py    From magenta with Apache License 2.0 5 votes vote down vote up
def variables(self):
    """Gets names of all the variables in the graph belonging to this model.

    Returns:
      List of variable names.
    """
    with self.graph.as_default():
      return [v for v in tf.global_variables() if v.name.startswith(self.scope)] 
Example #22
Source File: export_checkpoints.py    From albert with Apache License 2.0 5 votes vote down vote up
def main(_):
  sess = tf.Session()
  tf.train.get_or_create_global_step()
  sess = build_model(sess)
  my_vars = []
  for var in tf.global_variables():
    if "lamb_v" not in var.name and "lamb_m" not in var.name:
      my_vars.append(var)
  saver = tf.train.Saver(my_vars)
  saver.save(sess, FLAGS.export_path) 
Example #23
Source File: mobilenet_v3_test.py    From models with Apache License 2.0 5 votes vote down vote up
def assertVariablesHaveNormalizerFn(self, use_groupnorm):
    global_variables = [v.name for v in tf.global_variables()]
    has_batch_norm = False
    has_group_norm = False
    for global_variable in global_variables:
      if 'BatchNorm' in global_variable:
        has_batch_norm = True
      if 'GroupNorm' in global_variable:
        has_group_norm = True
    if use_groupnorm:
      self.assertFalse(has_batch_norm)
      self.assertTrue(has_group_norm)
    else:
      self.assertTrue(has_batch_norm)
      self.assertFalse(has_group_norm) 
Example #24
Source File: inception_resnet_v2_test.py    From models with Apache License 2.0 5 votes vote down vote up
def testNoBatchNormScaleByDefault(self):
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
      inception.inception_resnet_v2(inputs, num_classes, is_training=False)

    self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), []) 
Example #25
Source File: inception_resnet_v2_test.py    From models with Apache License 2.0 5 votes vote down vote up
def testBatchNormScale(self):
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with slim.arg_scope(
        inception.inception_resnet_v2_arg_scope(batch_norm_scale=True)):
      inception.inception_resnet_v2(inputs, num_classes, is_training=False)

    gamma_names = set(
        v.op.name
        for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
    self.assertGreater(len(gamma_names), 0)
    for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
      self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names) 
Example #26
Source File: inception_v4_test.py    From models with Apache License 2.0 5 votes vote down vote up
def testNoBatchNormScaleByDefault(self):
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with slim.arg_scope(inception.inception_v4_arg_scope()):
      inception.inception_v4(inputs, num_classes, is_training=False)

    self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), []) 
Example #27
Source File: inception_v2_test.py    From models with Apache License 2.0 5 votes vote down vote up
def testNoBatchNormScaleByDefault(self):
    height, width = 224, 224
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with slim.arg_scope(inception.inception_v2_arg_scope()):
      inception.inception_v2(inputs, num_classes, is_training=False)

    self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), []) 
Example #28
Source File: inception_v2_test.py    From models with Apache License 2.0 5 votes vote down vote up
def testBatchNormScale(self):
    height, width = 224, 224
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with slim.arg_scope(
        inception.inception_v2_arg_scope(batch_norm_scale=True)):
      inception.inception_v2(inputs, num_classes, is_training=False)

    gamma_names = set(
        v.op.name
        for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
    self.assertGreater(len(gamma_names), 0)
    for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
      self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names) 
Example #29
Source File: inception_v3_test.py    From models with Apache License 2.0 5 votes vote down vote up
def testNoBatchNormScaleByDefault(self):
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with slim.arg_scope(inception.inception_v3_arg_scope()):
      inception.inception_v3(inputs, num_classes, is_training=False)

    self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), []) 
Example #30
Source File: inception_v3_test.py    From models with Apache License 2.0 5 votes vote down vote up
def testBatchNormScale(self):
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with slim.arg_scope(
        inception.inception_v3_arg_scope(batch_norm_scale=True)):
      inception.inception_v3(inputs, num_classes, is_training=False)

    gamma_names = set(
        v.op.name
        for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
    self.assertGreater(len(gamma_names), 0)
    for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
      self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)