Python tensorflow.model_variables() Examples

The following are 30 code examples of tensorflow.model_variables(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: base_model.py    From hfnet with MIT License 6 votes vote down vote up
def _checkpoint_var_search(self, checkpoint_path):
        reader = tf.train.NewCheckpointReader(checkpoint_path)
        saved_shapes = reader.get_variable_to_shape_map()
        model_names = tf.model_variables()  # Used by tf.slim layers
        if not len(tf.model_variables()):
            model_names = tf.global_variables()  # Fallback when slim is not used
        model_names = set([v.name.split(':')[0] for v in model_names])
        checkpoint_names = set(saved_shapes.keys())
        found_names = model_names & checkpoint_names
        missing_names = model_names - checkpoint_names
        shape_conflicts = set()
        restored = []
        with tf.variable_scope('', reuse=True):
            for name in found_names:
                var = tf.get_variable(name)
                var_shape = var.get_shape().as_list()
                if var_shape == saved_shapes[name]:
                    restored.append(var)
                else:
                    shape_conflicts.add(name)
        found_names -= shape_conflicts
        return (restored, sorted(found_names),
                sorted(missing_names), sorted(shape_conflicts)) 
Example #2
Source File: model_utils.py    From eccv18_mtvae with MIT License 6 votes vote down vote up
def get_init_fn(scopes, init_model):
  """Initialize assigment operator function used while training."""
  if not init_model:
    return None

  for var in tf.trainable_variables():
    if not (var in tf.model_variables()):
      tf.contrib.framework.add_model_variable(var)
   
  is_trainable = lambda x: x in tf.trainable_variables()
  var_list = []
  for scope in scopes:
    var_list.extend(
      filter(is_trainable, tf.contrib.framework.get_model_variables(scope)))
    
  init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
    init_model, var_list)
  
  def init_assign_function(sess):
    sess.run(init_assign_op, init_feed_dict)

  return init_assign_function 
Example #3
Source File: train_Solver_VCOCO_pose_pattern_inD_more_positive.py    From Transferable-Interactiveness-Network with MIT License 6 votes vote down vote up
def from_previous_ckpt(self,sess):

        sess.run(tf.global_variables_initializer())
        for var in tf.trainable_variables(): # trainable weights, we need surgery
            print(var.name, var.eval().mean())

        print('Restoring model snapshots from {:s}'.format(self.pretrained_model))
        saver_t = {}

        saver_t  = [var for var in tf.model_variables()]

        self.saver_restore = tf.train.Saver(saver_t)
        self.saver_restore.restore(sess, self.pretrained_model)

        print("the variables is being trained now \n")
        for var in tf.trainable_variables():
           print(var.name, var.eval().mean()) 
Example #4
Source File: train_Solver_HICO_pose_pattern_inD_more_positive_coslr.py    From Transferable-Interactiveness-Network with MIT License 6 votes vote down vote up
def from_previous_ckpt(self,sess):

        sess.run(tf.global_variables_initializer())
        for var in tf.trainable_variables(): # trainable weights, we need surgery
            print(var.name, var.eval().mean())

        print('Restoring model snapshots from {:s}'.format(self.pretrained_model))
        saver_t = {}

        saver_t  = [var for var in tf.model_variables() if 'fc_binary' not in var.name \
                                                       and 'binary_classification' not in var.name \
                                                       and 'conv1_pose_map' not in var.name \
                                                       and 'pool1_pose_map' not in var.name \
                                                       and 'conv2_pose_map' not in var.name \
                                                       and 'pool2_pose_map' not in var.name]

        self.saver_restore = tf.train.Saver(saver_t)
        self.saver_restore.restore(sess, self.pretrained_model)

        print("the variables is being trained now \n")
        for var in tf.trainable_variables():
           print(var.name, var.eval().mean()) 
Example #5
Source File: base_model.py    From hierarchical_loc with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _checkpoint_var_search(self, checkpoint_path):
        reader = tf.train.NewCheckpointReader(checkpoint_path)
        saved_shapes = reader.get_variable_to_shape_map()
        model_names = tf.model_variables()  # Used by tf.slim layers
        if not len(tf.model_variables()):
            model_names = tf.global_variables()  # Fallback when slim is not used
        model_names = set([v.name.split(':')[0] for v in model_names])
        checkpoint_names = set(saved_shapes.keys())
        found_names = model_names & checkpoint_names
        missing_names = model_names - checkpoint_names
        shape_conflicts = set()
        restored = []
        with tf.variable_scope('', reuse=True):
            for name in found_names:
                # print(tf.global_variables())
                # print(name, name in model_names, name in checkpoint_names)
                var = tf.get_variable(name)
                var_shape = var.get_shape().as_list()
                if var_shape == saved_shapes[name]:
                    restored.append(var)
                else:
                    shape_conflicts.add(name)
        found_names -= shape_conflicts
        return (restored, sorted(found_names),
                sorted(missing_names), sorted(shape_conflicts)) 
Example #6
Source File: slam.py    From DeepV2D with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, cfg, ckpt, n_keyframes=1, rate=2, use_fcrn=True, 
            viz=True, mode='global', image_dims=[None, 480, 640]):
        
        self.cfg = cfg
        self.ckpt = ckpt

        self.viz = viz
        self.mode = mode
        self.use_fcrn = use_fcrn
        self.image_dims = image_dims

        self.index = 0
        self.keyframe_inds = []

        self.images = []
        self.depths = []
        self.poses = []

        # tracking config parameters
        self.n_keyframes = n_keyframes # number of keyframes to use
        self.rate = rate # how often to sample new frames
        self.window = 3  # add edges if frames are within distance

        # build tensorflow graphs
        self.outputs = {}
        self._create_placeholders()
        self._build_motion_graph()
        self._build_depth_graph()
        self._build_reprojection_graph()
        self._build_visibility_graph()
        self._build_point_cloud_graph()

        if self.use_fcrn:
            self._build_fcrn_graph()

        self.saver = tf.train.Saver(tf.model_variables()) 
Example #7
Source File: distributed.py    From tensorpack with Apache License 2.0 5 votes vote down vote up
def _get_sync_model_vars_op(self):
        """
        Get the op to sync local model_variables to PS.
        """
        ops = []
        for (shadow_v, local_v) in self._shadow_model_vars:
            ops.append(shadow_v.assign(local_v.read_value()))
        assert len(ops)
        return tf.group(*ops, name='sync_{}_model_variables_to_ps'.format(len(ops))) 
Example #8
Source File: distributed.py    From tensorpack with Apache License 2.0 5 votes vote down vote up
def _shadow_model_variables(shadow_vars):
        """
        Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``.

        Returns:
            list of (shadow_model_var, local_model_var) used for syncing.
        """
        G = tf.get_default_graph()
        curr_shadow_vars = {v.name for v in shadow_vars}
        model_vars = tf.model_variables()
        shadow_model_vars = []
        for v in model_vars:
            assert v.name.startswith('tower'), "Found some MODEL_VARIABLES created outside of the tower function!"
            stripped_op_name, stripped_var_name = get_op_tensor_name(re.sub('^tower[0-9]+/', '', v.name))
            if stripped_op_name in curr_shadow_vars:
                continue
            try:
                G.get_tensor_by_name(stripped_var_name)
                logger.warn("Model Variable {} also appears in other collections.".format(stripped_var_name))
                continue
            except KeyError:
                pass
            new_v = tf.get_variable(stripped_op_name, dtype=v.dtype.base_dtype,
                                    initializer=v.initial_value,
                                    trainable=False)

            curr_shadow_vars.add(stripped_op_name)  # avoid duplicated shadow_model_vars
            shadow_vars.append(new_v)
            shadow_model_vars.append((new_v, v))  # only need to sync model_var from one tower
        return shadow_model_vars 
Example #9
Source File: train_utils.py    From models with Apache License 2.0 5 votes vote down vote up
def get_model_gradient_multipliers(last_layers, last_layer_gradient_multiplier):
  """Gets the gradient multipliers.

  The gradient multipliers will adjust the learning rates for model
  variables. For the task of semantic segmentation, the models are
  usually fine-tuned from the models trained on the task of image
  classification. To fine-tune the models, we usually set larger (e.g.,
  10 times larger) learning rate for the parameters of last layer.

  Args:
    last_layers: Scopes of last layers.
    last_layer_gradient_multiplier: The gradient multiplier for last layers.

  Returns:
    The gradient multiplier map with variables as key, and multipliers as value.
  """
  gradient_multipliers = {}

  for var in tf.model_variables():
    # Double the learning rate for biases.
    if 'biases' in var.op.name:
      gradient_multipliers[var.op.name] = 2.

    # Use larger learning rate for last layer variables.
    for layer in last_layers:
      if layer in var.op.name and 'biases' in var.op.name:
        gradient_multipliers[var.op.name] = 2 * last_layer_gradient_multiplier
        break
      elif layer in var.op.name:
        gradient_multipliers[var.op.name] = last_layer_gradient_multiplier
        break

  return gradient_multipliers 
Example #10
Source File: osmn.py    From video_seg with Apache License 2.0 5 votes vote down vote up
def extract_sp_params(model_params, checkpoint_file, result_path, config=None):
    
    if config is None:
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        # config.log_device_placement = True
        config.allow_soft_placement = True
    tf.logging.set_verbosity(tf.logging.INFO)
    batch_size = 1
    guide_image = tf.placeholder(tf.float32, [batch_size, None, None, 3])
    gb_image = tf.placeholder(tf.float32, [batch_size, None, None, 1])
    input_image = tf.placeholder(tf.float32, [batch_size, None, None, 3])

    # Create the cnn
    net, end_points = osmn([guide_image, gb_image, input_image], model_params, is_training=False)
    saver = tf.train.Saver([v for v in tf.global_variables() if '-up' not in v.name]) #if '-up' not in v.name and '-cr' not in v.name])
    if not os.path.exists(result_path):
        os.makedirs(result_path)
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, checkpoint_file)
        with tf.variable_scope("osmn", reuse=True):
            for v in tf.model_variables():
                print v.name
            sp_variables = []
            for layer_id in range(3, 6):
                layer_name = 'modulator_sp/conv%d/weights' % (layer_id)
                v = tf.get_variable(layer_name)
                sp_variables.append(v)
            res = sess.run(sp_variables)
            for layer_id in range(3):
                np.save(os.path.join(result_path, 'sp_params_%d' % (layer_id+3)), 
                        res[layer_id]) 
Example #11
Source File: model_utils.py    From eccv18_mtvae with MIT License 5 votes vote down vote up
def get_train_op_for_scope(loss, optimizer, scopes, clip_gradient_norm):
  """Train operation function for the given scope used for training."""
  for var in tf.trainable_variables():
    if not (var in tf.model_variables()):
      tf.contrib.framework.add_model_variable(var)

  is_trainable = lambda x: x in tf.trainable_variables()

  var_list = []
  update_ops = []
  
  for scope in scopes:
    var_list.extend(
      filter(is_trainable, tf.contrib.framework.get_model_variables(scope)))
    update_ops.extend(tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope))
    
    for var in tf.contrib.framework.get_model_variables(scope):
      print('%s\t%s' % (scope, var))

    #print('Trainable parameters %s' % tf.contrib.framework.get_model_variables(scope))
  return slim.learning.create_train_op(
    loss,
    optimizer,
    update_ops=update_ops,
    variables_to_train=var_list,
    clip_gradient_norm=clip_gradient_norm) 
Example #12
Source File: train_Solver_VCOCO_pose_pattern_inD_more_positive.py    From Transferable-Interactiveness-Network with MIT License 5 votes vote down vote up
def from_best_trained_model(self, sess):

        sess.run(tf.global_variables_initializer())
        for var in tf.trainable_variables():
            print(var.name, var.eval().mean())

        print('Restoring model snapshots from {:s}'.format(self.pretrained_model))
        saver_t = {}

        saver_t  = [var for var in tf.model_variables() if 'fc_binary' not in var.name \
                                           and 'binary_classification' not in var.name \
                                           and 'conv1_pose_map' not in var.name \
                                           and 'pool1_pose_map' not in var.name \
                                           and 'conv2_pose_map' not in var.name \
                                           and 'pool2_pose_map' not in var.name]

        for var in tf.trainable_variables():
            print(var.name, var.eval().mean())

        # for ele in tf.model_variables():
        #     saver_t[ele.name[:-2]] = ele

        self.saver_restore = tf.train.Saver(saver_t)
        self.saver_restore.restore(sess, self.pretrained_model)


        print("the variables is being trained now \n")
        for var in tf.trainable_variables():
           print(var.name, var.eval().mean()) 
Example #13
Source File: train_Solver_HICO_pose_pattern_inD_more_positive_coslr.py    From Transferable-Interactiveness-Network with MIT License 5 votes vote down vote up
def from_best_trained_model(self, sess):

        sess.run(tf.global_variables_initializer())
        for var in tf.trainable_variables(): # trainable weights, we need surgery
            print(var.name, var.eval().mean())

        print('Restoring model snapshots from {:s}'.format(self.pretrained_model))
        saver_t = {}

        saver_t  = [var for var in tf.model_variables() if 'fc_binary' not in var.name \
                                           and 'binary_classification' not in var.name \
                                           and 'conv1_pose_map' not in var.name \
                                           and 'pool1_pose_map' not in var.name \
                                           and 'conv2_pose_map' not in var.name \
                                           and 'pool2_pose_map' not in var.name]

        for var in tf.trainable_variables():
            print(var.name, var.eval().mean())

        # for ele in tf.model_variables():
        #     saver_t[ele.name[:-2]] = ele

        self.saver_restore = tf.train.Saver(saver_t)
        self.saver_restore.restore(sess, self.pretrained_model)


        print("the variables is being trained now \n")
        for var in tf.trainable_variables():
           print(var.name, var.eval().mean()) 
Example #14
Source File: slam_kitti.py    From DeepV2D with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, cfg, ckpt, n_keyframes=2, rate=2, use_fcrn=True, 
            viz=True, mode='global', image_dims=[None, 192, 1088]):
        
        self.cfg = cfg
        self.ckpt = ckpt

        self.viz = viz
        self.mode = mode
        self.use_fcrn = use_fcrn
        self.image_dims = image_dims

        self.index = 0
        self.keyframe_inds = []

        self.images = []
        self.depths = []
        self.poses = []

        # tracking config parameters
        self.n_keyframes = n_keyframes # number of keyframes to use
        self.rate = rate # how often to sample new frames
        self.window = 3  # add edges if frames are within distance

        # build tensorflow graphs
        self.outputs = {}
        self._create_placeholders()
        self._build_motion_graph()
        self._build_depth_graph()
        self._build_reprojection_graph()
        self._build_point_cloud_graph()

        self.saver = tf.train.Saver(tf.model_variables()) 
Example #15
Source File: train_utils.py    From Master-R-CNN with Apache License 2.0 5 votes vote down vote up
def get_var_list_to_restore():
  """Choose which vars to restore, ignore vars by setting --checkpoint_exclude_scopes """

  variables_to_restore = []
  if FLAGS.checkpoint_exclude_scopes is not None:
    exclusions = [scope.strip()
                  for scope in FLAGS.checkpoint_exclude_scopes.split(',')]

    # build restore list
    for var in tf.model_variables():
      for exclusion in exclusions:
        if var.name.startswith(exclusion):
          break
      else:
        variables_to_restore.append(var)
  else:
    variables_to_restore = tf.model_variables()

  variables_to_restore_final = []
  if FLAGS.checkpoint_include_scopes is not None:
      includes = [
              scope.strip()
              for scope in FLAGS.checkpoint_include_scopes.split(',')
              ]
      for var in variables_to_restore:
          for include in includes:
              if var.name.startswith(include):
                  variables_to_restore_final.append(var)
                  break
  else:
      variables_to_restore_final = variables_to_restore

  return variables_to_restore_final 
Example #16
Source File: train_utils.py    From FastFPN with Apache License 2.0 5 votes vote down vote up
def get_var_list_to_restore():
  """Choose which vars to restore, ignore vars by setting --checkpoint_exclude_scopes """

  variables_to_restore = []
  if FLAGS.checkpoint_exclude_scopes is not None:
    exclusions = [scope.strip()
                  for scope in FLAGS.checkpoint_exclude_scopes.split(',')]

    # build restore list
    for var in tf.model_variables():
      for exclusion in exclusions:
        if var.name.startswith(exclusion):
          break
      else:
        variables_to_restore.append(var)
  else:
    variables_to_restore = tf.model_variables()

  variables_to_restore_final = []
  if FLAGS.checkpoint_include_scopes is not None:
      includes = [
              scope.strip()
              for scope in FLAGS.checkpoint_include_scopes.split(',')
              ]
      for var in variables_to_restore:
          for include in includes:
              if var.name.startswith(include):
                  variables_to_restore_final.append(var)
                  break
  else:
      variables_to_restore_final = variables_to_restore

  return variables_to_restore_final 
Example #17
Source File: train_utils.py    From FastMaskRCNN with Apache License 2.0 5 votes vote down vote up
def get_var_list_to_restore():
  """Choose which vars to restore, ignore vars by setting --checkpoint_exclude_scopes """

  variables_to_restore = []
  if FLAGS.checkpoint_exclude_scopes is not None:
    exclusions = [scope.strip()
                  for scope in FLAGS.checkpoint_exclude_scopes.split(',')]

    # build restore list
    for var in tf.model_variables():
      for exclusion in exclusions:
        if var.name.startswith(exclusion):
          break
      else:
        variables_to_restore.append(var)
  else:
    variables_to_restore = tf.model_variables()

  variables_to_restore_final = []
  if FLAGS.checkpoint_include_scopes is not None:
      includes = [
              scope.strip()
              for scope in FLAGS.checkpoint_include_scopes.split(',')
              ]
      for var in variables_to_restore:
          for include in includes:
              if var.name.startswith(include):
                  variables_to_restore_final.append(var)
                  break
  else:
      variables_to_restore_final = variables_to_restore

  return variables_to_restore_final 
Example #18
Source File: train_utils.py    From FastMaskRCNN with Apache License 2.0 5 votes vote down vote up
def get_var_list_to_restore():
  """Choosing which vars to restore, ignore vars by setting --checkpoint_exclude_scopes """

  variables_to_restore = []
  if FLAGS.checkpoint_exclude_scopes is not None:
    exclusions = [scope.strip()
                  for scope in FLAGS.checkpoint_exclude_scopes.split(',')]

    # build restore list
    for var in tf.model_variables():
      excluded = False
      for exclusion in exclusions:
        if var.name.startswith(exclusion):
          excluded = True
          break
      if not excluded:
        variables_to_restore.append(var)
  else:
    variables_to_restore = tf.model_variables()

  variables_to_restore_final = []
  if FLAGS.checkpoint_include_scopes is not None:
      includes = [
              scope.strip()
              for scope in FLAGS.checkpoint_include_scopes.split(',')
              ]
      for var in variables_to_restore:
          included = False
          for include in includes:
              if var.name.startswith(include):
                  included = True
                  break
          if included:
              variables_to_restore_final.append(var)
  else:
      variables_to_restore_final = variables_to_restore

  return variables_to_restore_final 
Example #19
Source File: deepv2d.py    From DeepV2D with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, cfg, ckpt, 
                 is_calibrated=True, 
                 use_fcrn=False, 
                 use_regressor=True, 
                 image_dims=None,
                 mode='keyframe'):

        self.cfg = cfg
        self.ckpt = ckpt 
        self.mode = mode

        self.use_fcrn = use_fcrn
        self.use_regressor = use_regressor
        self.is_calibrated = is_calibrated

        if image_dims is not None:
            self.image_dims = image_dims
        else:
            if cfg.STRUCTURE.MODE == 'concat':
                self.image_dims = [cfg.INPUT.FRAMES, cfg.INPUT.HEIGHT, cfg.INPUT.WIDTH]
            else:
                self.image_dims = [None, cfg.INPUT.HEIGHT, cfg.INPUT.WIDTH]

        self.outputs = {}
        self._create_placeholders()
        self._build_motion_graph()
        self._build_depth_graph()
        self._build_reprojection_graph()
        self._build_visibility_graph()
        self._build_point_cloud_graph()

        self.depths = []
        self.poses = []

        if self.use_fcrn:
            self._build_fcrn_graph()

        self.saver = tf.train.Saver(tf.model_variables()) 
Example #20
Source File: train_utils.py    From MAX-Image-Segmenter with Apache License 2.0 5 votes vote down vote up
def get_model_gradient_multipliers(last_layers, last_layer_gradient_multiplier):
  """Gets the gradient multipliers.

  The gradient multipliers will adjust the learning rates for model
  variables. For the task of semantic segmentation, the models are
  usually fine-tuned from the models trained on the task of image
  classification. To fine-tune the models, we usually set larger (e.g.,
  10 times larger) learning rate for the parameters of last layer.

  Args:
    last_layers: Scopes of last layers.
    last_layer_gradient_multiplier: The gradient multiplier for last layers.

  Returns:
    The gradient multiplier map with variables as key, and multipliers as value.
  """
  gradient_multipliers = {}

  for var in tf.model_variables():
    # Double the learning rate for biases.
    if 'biases' in var.op.name:
      gradient_multipliers[var.op.name] = 2.

    # Use larger learning rate for last layer variables.
    for layer in last_layers:
      if layer in var.op.name and 'biases' in var.op.name:
        gradient_multipliers[var.op.name] = 2 * last_layer_gradient_multiplier
        break
      elif layer in var.op.name:
        gradient_multipliers[var.op.name] = last_layer_gradient_multiplier
        break

  return gradient_multipliers 
Example #21
Source File: train.py    From MAX-Image-Segmenter with Apache License 2.0 5 votes vote down vote up
def _log_summaries(input_image, label, num_of_classes, output):
  """Logs the summaries for the model.

  Args:
    input_image: Input image of the model. Its shape is [batch_size, height,
      width, channel].
    label: Label of the image. Its shape is [batch_size, height, width].
    num_of_classes: The number of classes of the dataset.
    output: Output of the model. Its shape is [batch_size, height, width].
  """
  # Add summaries for model variables.
  for model_var in tf.model_variables():
    tf.summary.histogram(model_var.op.name, model_var)

  # Add summaries for images, labels, semantic predictions.
  if FLAGS.save_summaries_images:
    tf.summary.image('samples/%s' % common.IMAGE, input_image)

    # Scale up summary image pixel values for better visualization.
    pixel_scaling = max(1, 255 // num_of_classes)
    summary_label = tf.cast(label * pixel_scaling, tf.uint8)
    tf.summary.image('samples/%s' % common.LABEL, summary_label)

    predictions = tf.expand_dims(tf.argmax(output, 3), -1)
    summary_predictions = tf.cast(predictions * pixel_scaling, tf.uint8)
    tf.summary.image('samples/%s' % common.OUTPUT_TYPE, summary_predictions) 
Example #22
Source File: distributed.py    From petridishnn with MIT License 5 votes vote down vote up
def _shadow_model_variables(shadow_vars):
        """
        Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``.

        Returns:
            list of (shadow_model_var, local_model_var) used for syncing.
        """
        G = tf.get_default_graph()
        curr_shadow_vars = set([v.name for v in shadow_vars])
        model_vars = tf.model_variables()
        shadow_model_vars = []
        for v in model_vars:
            assert v.name.startswith('tower'), "Found some MODEL_VARIABLES created outside of the tower function!"
            stripped_op_name, stripped_var_name = get_op_tensor_name(re.sub('^tower[0-9]+/', '', v.name))
            if stripped_op_name in curr_shadow_vars:
                continue
            try:
                G.get_tensor_by_name(stripped_var_name)
                logger.warn("Model Variable {} also appears in other collections.".format(stripped_var_name))
                continue
            except KeyError:
                pass
            new_v = tf.get_variable(stripped_op_name, dtype=v.dtype.base_dtype,
                                    initializer=v.initial_value,
                                    trainable=False)

            curr_shadow_vars.add(stripped_op_name)  # avoid duplicated shadow_model_vars
            shadow_vars.append(new_v)
            shadow_model_vars.append((new_v, v))  # only need to sync model_var from one tower
        return shadow_model_vars 
Example #23
Source File: distributed.py    From petridishnn with MIT License 5 votes vote down vote up
def _get_sync_model_vars_op(self):
        """
        Get the op to sync local model_variables to PS.
        """
        ops = []
        for (shadow_v, local_v) in self._shadow_model_vars:
            ops.append(shadow_v.assign(local_v.read_value()))
        assert len(ops)
        return tf.group(*ops, name='sync_{}_model_variables_to_ps'.format(len(ops))) 
Example #24
Source File: distributed.py    From ADL with MIT License 5 votes vote down vote up
def _shadow_model_variables(shadow_vars):
        """
        Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``.

        Returns:
            list of (shadow_model_var, local_model_var) used for syncing.
        """
        G = tf.get_default_graph()
        curr_shadow_vars = {v.name for v in shadow_vars}
        model_vars = tf.model_variables()
        shadow_model_vars = []
        for v in model_vars:
            assert v.name.startswith('tower'), "Found some MODEL_VARIABLES created outside of the tower function!"
            stripped_op_name, stripped_var_name = get_op_tensor_name(re.sub('^tower[0-9]+/', '', v.name))
            if stripped_op_name in curr_shadow_vars:
                continue
            try:
                G.get_tensor_by_name(stripped_var_name)
                logger.warn("Model Variable {} also appears in other collections.".format(stripped_var_name))
                continue
            except KeyError:
                pass
            new_v = tf.get_variable(stripped_op_name, dtype=v.dtype.base_dtype,
                                    initializer=v.initial_value,
                                    trainable=False)

            curr_shadow_vars.add(stripped_op_name)  # avoid duplicated shadow_model_vars
            shadow_vars.append(new_v)
            shadow_model_vars.append((new_v, v))  # only need to sync model_var from one tower
        return shadow_model_vars 
Example #25
Source File: distributed.py    From ADL with MIT License 5 votes vote down vote up
def _get_sync_model_vars_op(self):
        """
        Get the op to sync local model_variables to PS.
        """
        ops = []
        for (shadow_v, local_v) in self._shadow_model_vars:
            ops.append(shadow_v.assign(local_v.read_value()))
        assert len(ops)
        return tf.group(*ops, name='sync_{}_model_variables_to_ps'.format(len(ops))) 
Example #26
Source File: test_kitti_depth.py    From DF-Net with MIT License 4 votes vote down vote up
def main(_):
    if FLAGS.split == 'val':
        txt_file = 'data/kitti/val_files_eigen.txt'
    elif FLAGS.split == 'test':
        txt_file = 'data/kitti/test_files_eigen.txt'
    else:
        assert False

    with open(txt_file, 'r') as f:
        test_files = f.readlines()
        test_files = [FLAGS.dataset_dir + t[:-1] for t in test_files]
    if not os.path.exists(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)
    basename = os.path.basename(FLAGS.ckpt_file)
    output_file = os.path.join(FLAGS.output_dir, basename)
    model = DFLearner()
    model.setup_inference(img_height=FLAGS.img_height,
                        img_width=FLAGS.img_width,
                        batch_size=FLAGS.batch_size,
                        mode='depth')
    saver = tf.train.Saver([var for var in tf.model_variables()]) 
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        saver.restore(sess, FLAGS.ckpt_file)
        pred_all = []
        for t in range(0, len(test_files), FLAGS.batch_size):
            if t % 100 == 0:
                print('processing %s: %d/%d' % (basename, t, len(test_files)))
            inputs = np.zeros(
                (FLAGS.batch_size, FLAGS.img_height, FLAGS.img_width, 3), 
                dtype=np.uint8)
            for b in range(FLAGS.batch_size):
                idx = t + b
                if idx >= len(test_files):
                    break
                raw_im = pil.open(test_files[idx])
                scaled_im = raw_im.resize((FLAGS.img_width, FLAGS.img_height), pil.ANTIALIAS)
                inputs[b] = np.array(scaled_im)
            pred = model.inference(inputs, sess, mode='depth')
            for b in range(FLAGS.batch_size):
                idx = t + b
                if idx >= len(test_files):
                    break
                pred_all.append(pred['depth'][b,:,:,0])
        np.save(output_file, pred_all) 
Example #27
Source File: export_model.py    From Gun-Detector with Apache License 2.0 4 votes vote down vote up
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)
  tf.logging.info('Prepare to export model to: %s', FLAGS.export_path)

  with tf.Graph().as_default():
    image, image_size, resized_image_size = _create_input_tensors()

    model_options = common.ModelOptions(
        outputs_to_num_classes={common.OUTPUT_TYPE: FLAGS.num_classes},
        crop_size=FLAGS.crop_size,
        atrous_rates=FLAGS.atrous_rates,
        output_stride=FLAGS.output_stride)

    if tuple(FLAGS.inference_scales) == (1.0,):
      tf.logging.info('Exported model performs single-scale inference.')
      predictions = model.predict_labels(
          image,
          model_options=model_options,
          image_pyramid=FLAGS.image_pyramid)
    else:
      tf.logging.info('Exported model performs multi-scale inference.')
      predictions = model.predict_labels_multi_scale(
          image,
          model_options=model_options,
          eval_scales=FLAGS.inference_scales,
          add_flipped_images=FLAGS.add_flipped_images)

    # Crop the valid regions from the predictions.
    semantic_predictions = tf.slice(
        predictions[common.OUTPUT_TYPE],
        [0, 0, 0],
        [1, resized_image_size[0], resized_image_size[1]])
    # Resize back the prediction to the original image size.
    def _resize_label(label, label_size):
      # Expand dimension of label to [1, height, width, 1] for resize operation.
      label = tf.expand_dims(label, 3)
      resized_label = tf.image.resize_images(
          label,
          label_size,
          method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
          align_corners=True)
      return tf.squeeze(resized_label, 3)
    semantic_predictions = _resize_label(semantic_predictions, image_size)
    semantic_predictions = tf.identity(semantic_predictions, name=_OUTPUT_NAME)

    saver = tf.train.Saver(tf.model_variables())

    tf.gfile.MakeDirs(os.path.dirname(FLAGS.export_path))
    freeze_graph.freeze_graph_with_def_protos(
        tf.get_default_graph().as_graph_def(add_shapes=True),
        saver.as_saver_def(),
        FLAGS.checkpoint_path,
        _OUTPUT_NAME,
        restore_op_name=None,
        filename_tensor_name=None,
        output_graph=FLAGS.export_path,
        clear_devices=True,
        initializer_nodes=None) 
Example #28
Source File: geonet_test_depth.py    From GeoNet with MIT License 4 votes vote down vote up
def test_depth(opt):
    ##### load testing list #####
    with open('data/kitti/test_files_%s.txt' % opt.depth_test_split, 'r') as f:
        test_files = f.readlines()
        test_files = [opt.dataset_dir + t[:-1] for t in test_files]
    if not os.path.exists(opt.output_dir):
        os.makedirs(opt.output_dir)

    ##### init #####
    input_uint8 = tf.placeholder(tf.uint8, [opt.batch_size,
                opt.img_height, opt.img_width, 3], name='raw_input')

    model = GeoNetModel(opt, input_uint8, None, None)
    fetches = { "depth": model.pred_depth[0] }

    saver = tf.train.Saver([var for var in tf.model_variables()])
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    ##### Go #####
    with tf.Session(config=config) as sess:
        saver.restore(sess, opt.init_ckpt_file)
        pred_all = []
        for t in range(0, len(test_files), opt.batch_size):
            if t % 100 == 0:
                print('processing: %d/%d' % (t, len(test_files)))
            inputs = np.zeros(
                (opt.batch_size, opt.img_height, opt.img_width, 3),
                dtype=np.uint8)

            for b in range(opt.batch_size):
                idx = t + b
                if idx >= len(test_files):
                    break
                fh = open(test_files[idx], 'r')
                raw_im = pil.open(fh)
                scaled_im = raw_im.resize((opt.img_width, opt.img_height), pil.ANTIALIAS)
                inputs[b] = np.array(scaled_im)

            pred = sess.run(fetches, feed_dict={input_uint8: inputs})
            for b in range(opt.batch_size):
                idx = t + b
                if idx >= len(test_files):
                    break
                pred_all.append(pred['depth'][b,:,:,0])

        np.save(opt.output_dir + '/' + os.path.basename(opt.init_ckpt_file), pred_all) 
Example #29
Source File: test_kitti_depth.py    From DeepMatchVO with MIT License 4 votes vote down vote up
def main(_):
    with open('data/kitti/test_files_eigen.txt', 'r') as f:
        test_files = f.readlines()
        test_files = [FLAGS.dataset_dir + t[:-1] for t in test_files]
    if not os.path.exists(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)
    basename = os.path.basename(FLAGS.ckpt_file)
    system = DeepSlam()
    system.setup_inference(img_height=FLAGS.img_height,
                           img_width=FLAGS.img_width,
                           batch_size=FLAGS.batch_size,
                           mode='depth')
    saver = tf.train.Saver([var for var in tf.model_variables()]) 

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        saver.restore(sess, FLAGS.ckpt_file)
        pred_all = []
        for t in range(0, len(test_files), FLAGS.batch_size):
            #if t % 100 == 0:
            #    print('processing %s: %d/%d' % (basename, t, len(test_files)))
            inputs = np.zeros(
                (FLAGS.batch_size, FLAGS.img_height, FLAGS.img_width, 3), 
                dtype=np.uint8)
            for b in range(FLAGS.batch_size):
                idx = t + b
                if idx >= len(test_files):
                    break
                fh = open(test_files[idx], 'r')
                raw_im = pil.open(fh)
                scaled_im = raw_im.resize((FLAGS.img_width, FLAGS.img_height), pil.ANTIALIAS)
                inputs[b] = np.array(scaled_im)
            pred = system.inference(sess, 'depth', inputs)
            for b in range(FLAGS.batch_size):
                idx = t + b
                if idx >= len(test_files):
                    break
                tmp_depth = pred['depth'][b,:,:,0]
                pred_all.append(tmp_depth)

                # obtain scaled image and depth image
                fh = open(test_files[idx], 'r')
                raw_im = pil.open(fh)
                scaled_im = raw_im.resize((FLAGS.img_width, FLAGS.img_height), pil.ANTIALIAS)
                scaled_im = np.array(scaled_im)
                depth_img = np.squeeze(pred['depth'][b,:,:,0])

                # show the image side by side
                if FLAGS.show:
                    plt.figure()
                    plt.subplot(211)
                    plt.imshow(scaled_im)

                    plt.subplot(212)
                    plt.imshow(depth_img, cmap='gray')
                    plt.show()

        output_file = FLAGS.output_dir + '/' + basename
        np.save(output_file, pred_all)
        print('Save predicted depth map to', output_file) 
Example #30
Source File: export_model.py    From g-tensorflow-models with Apache License 2.0 4 votes vote down vote up
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)
  tf.logging.info('Prepare to export model to: %s', FLAGS.export_path)

  with tf.Graph().as_default():
    image, image_size, resized_image_size = _create_input_tensors()

    model_options = common.ModelOptions(
        outputs_to_num_classes={common.OUTPUT_TYPE: FLAGS.num_classes},
        crop_size=FLAGS.crop_size,
        atrous_rates=FLAGS.atrous_rates,
        output_stride=FLAGS.output_stride)

    if tuple(FLAGS.inference_scales) == (1.0,):
      tf.logging.info('Exported model performs single-scale inference.')
      predictions = model.predict_labels(
          image,
          model_options=model_options,
          image_pyramid=FLAGS.image_pyramid)
    else:
      tf.logging.info('Exported model performs multi-scale inference.')
      predictions = model.predict_labels_multi_scale(
          image,
          model_options=model_options,
          eval_scales=FLAGS.inference_scales,
          add_flipped_images=FLAGS.add_flipped_images)

    predictions = tf.cast(predictions[common.OUTPUT_TYPE], tf.float32)
    # Crop the valid regions from the predictions.
    semantic_predictions = tf.slice(
        predictions,
        [0, 0, 0],
        [1, resized_image_size[0], resized_image_size[1]])
    # Resize back the prediction to the original image size.
    def _resize_label(label, label_size):
      # Expand dimension of label to [1, height, width, 1] for resize operation.
      label = tf.expand_dims(label, 3)
      resized_label = tf.image.resize_images(
          label,
          label_size,
          method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
          align_corners=True)
      return tf.cast(tf.squeeze(resized_label, 3), tf.int32)
    semantic_predictions = _resize_label(semantic_predictions, image_size)
    semantic_predictions = tf.identity(semantic_predictions, name=_OUTPUT_NAME)

    saver = tf.train.Saver(tf.model_variables())

    tf.gfile.MakeDirs(os.path.dirname(FLAGS.export_path))
    freeze_graph.freeze_graph_with_def_protos(
        tf.get_default_graph().as_graph_def(add_shapes=True),
        saver.as_saver_def(),
        FLAGS.checkpoint_path,
        _OUTPUT_NAME,
        restore_op_name=None,
        filename_tensor_name=None,
        output_graph=FLAGS.export_path,
        clear_devices=True,
        initializer_nodes=None)