Python tensorflow.RunMetadata() Examples

The following are 30 code examples of tensorflow.RunMetadata(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: trainer.py    From dvae with Apache License 2.0 7 votes vote down vote up
def optimize(self, data, with_metrics=False, with_trace=False):
        """ Optimize a single batch """
        run_metadata = tf.RunMetadata() if with_trace else None
        trace = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) if with_trace else None

        _, metrics = self.run(
            self.training_operation, data,
            run_options=trace, run_metadata=run_metadata)

        if with_metrics:
            self.timer_update()
            steps, elapsed = self.elapsed()
            num_devices = len(self.towers)
            examples = steps * self.batch_size * num_devices
            print('Step {}, examples/sec {:.3f}, ms/batch {:.1f}'.format(
                self.global_step, examples / elapsed, 1000 * elapsed / num_devices))

            self.output_metrics(data, metrics)
            self.write_summaries(data)

        if with_trace:
            step = '{}/step{}'.format(self.name, self.global_step)
            self.summary_writer.add_run_metadata(run_metadata, step, global_step=self.global_step) 
Example #2
Source File: train.py    From GraphSAINT with MIT License 6 votes vote down vote up
def evaluate_full_batch(sess,model,minibatch_iter,many_runs_timeline,mode):
    """
    Full batch evaluation
    NOTE: HERE GCN RUNS THROUGH THE FULL GRAPH. HOWEVER, WE CALCULATE F1 SCORE
        FOR VALIDATION / TEST NODES ONLY. 
    """
    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()
    t1 = time.time()
    num_cls = minibatch_iter.class_arr.shape[-1]
    feed_dict, labels = minibatch_iter.feed_dict(mode)
    if args_global.timeline:
        preds,loss = sess.run([model.preds, model.loss], feed_dict=feed_dict, options=options, run_metadata=run_metadata)
        fetched_timeline = timeline.Timeline(run_metadata.step_stats)
        chrome_trace = fetched_timeline.generate_chrome_trace_format()
        many_runs_timeline.append(chrome_trace)
    else:
        preds,loss = sess.run([model.preds, model.loss], feed_dict=feed_dict)
    node_val_test = minibatch_iter.node_val if mode=='val' else minibatch_iter.node_test
    t2 = time.time()
    f1_scores = calc_f1(labels[node_val_test],preds[node_val_test],model.sigmoid_loss)
    return loss, f1_scores[0], f1_scores[1], (t2-t1) 
Example #3
Source File: DCSCN.py    From dcscn-super-resolution with MIT License 6 votes vote down vote up
def log_model_analysis(self):
        run_metadata = tf.RunMetadata()
        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)

        _, loss = self.sess.run([self.optimizer, self.loss], feed_dict={self.x: self.batch_input,
                                                                        self.x2: self.batch_input_bicubic,
                                                                        self.y: self.batch_true,
                                                                        self.lr_input: self.lr,
                                                                        self.dropout: self.dropout_rate},
                                options=run_options, run_metadata=run_metadata)

        # tf.contrib.tfprof.model_analyzer.print_model_analysis(
        #   tf.get_default_graph(),
        #   run_meta=run_metadata,
        #   tfprof_options=tf.contrib.tfprof.model_analyzer.PRINT_ALL_TIMING_MEMORY)
        self.first_training = False 
Example #4
Source File: util.py    From am3 with Apache License 2.0 6 votes vote down vote up
def profiled_run(sess, ops, feed_dict, is_profiling=False, log_dir=None):
    if not is_profiling:
        return sess.run(ops, feed_dict=feed_dict)
    else:
        if log_dir is None:
            raise ValueError("You need to provide a log_dir for profiling.")
        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        outputs = sess.run(ops, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)

        # Create the Timeline object, and write it to a json
        tl = timeline.Timeline(run_metadata.step_stats)
        ctf = tl.generate_chrome_trace_format()
        with open(os.path.join(log_dir, 'timeline.json'), 'w') as f:
            f.write(ctf)

        return outputs 
Example #5
Source File: tf_distributions.py    From MJHMC with GNU General Public License v2.0 6 votes vote down vote up
def dEdX_val(self, X):
        with self.graph.as_default(), tf.device(self.grad_device):
            if self.prof_run:
                run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()

                grad = self.sess.run(self.grad_op, feed_dict={self.state_pl: X},
                                       options=run_options, run_metadata=run_metadata)

                tf_tl  = timeline.Timeline(run_metadata.step_stats)
                ctf = tf_tl.generate_chrome_trace_format()
                log_path = expanduser('~/tmp/logs/tf_{}_grad_timeline_{}.json'.format(self.name, time.time()))
                with open(log_path, 'w') as log_file:
                    log_file.write(ctf)
            else:
                grad = self.sess.run(self.grad_op, feed_dict={self.state_pl: X})
            return grad 
Example #6
Source File: tf_distributions.py    From MJHMC with GNU General Public License v2.0 6 votes vote down vote up
def E_val(self, X):
        with self.graph.as_default(), tf.device(self.energy_device):
            if self.prof_run:
                run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()

                energy = self.sess.run(self.energy_op, feed_dict={self.state_pl: X},
                                       options=run_options, run_metadata=run_metadata)
                tf_tl  = timeline.Timeline(run_metadata.step_stats)
                ctf = tf_tl.generate_chrome_trace_format()
                log_path = expanduser('~/tmp/logs/tf_{}_energy_timeline_{}.json'.format(self.name, time.time()))
                with open(log_path, 'w') as log_file:
                    log_file.write(ctf)
            else:
                energy = self.sess.run(self.energy_op, feed_dict={self.state_pl: X})
            return energy 
Example #7
Source File: test_util.py    From in-silico-labeling with Apache License 2.0 6 votes vote down vote up
def profile(self,
              tensors: List[Union[tf.Tensor, tf.Operation, lt.LabeledTensor]]):
    tensors = [
        t.tensor if isinstance(t, lt.LabeledTensor) else t for t in tensors
    ]

    run_metadata = tf.RunMetadata()
    sv = tf.train.Supervisor(graph=tensors[0].graph)
    sess = sv.PrepareSession()
    sv.StartQueueRunners(sess)

    results = sess.run(
        tensors,
        options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
        run_metadata=run_metadata)

    options = tf.contrib.tfprof.model_analyzer.PRINT_ALL_TIMING_MEMORY
    options['viz'] = True
    tf.contrib.tfprof.model_analyzer.print_model_analysis(
        tf.get_default_graph(), run_meta=run_metadata, tfprof_options=options)

    sv.Stop()

    return results 
Example #8
Source File: test_single_threaded_logging.py    From delira with GNU Affero General Public License v3.0 6 votes vote down vote up
def test_graph_tf(self):

        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()

        with tf.Session() as sess:
            outputs = self._model_tf(
                np.zeros(
                    shape=(
                        1,
                        28,
                        28,
                        1),
                    dtype=np.float32))
            sess.run(tf.initializers.global_variables())
            sess.run(outputs, options=run_options, run_metadata=run_metadata)

        self._logger.log({"graph_tf": {
            "graph": self._model_tf._graph.as_graph_def(add_shapes=True),
            "run_metadata": run_metadata
        }}) 
Example #9
Source File: profile.py    From lang2program with Apache License 2.0 6 votes vote down vote up
def run(self, fetches, feed_dict=None):
        """like Session.run, but return a Timeline object in Chrome trace format (JSON).

        Save the json to a file, go to chrome://tracing, and open the file.

        Args:
            fetches
            feed_dict

        Returns:
            dict: a JSON dict
        """
        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        super(ProfiledSession, self).run(fetches, feed_dict, options=options, run_metadata=run_metadata)

        # Create the Timeline object, and write it to a json
        tl = timeline.Timeline(run_metadata.step_stats)
        ctf = tl.generate_chrome_trace_format()
        return json.loads(ctf) 
Example #10
Source File: profile.py    From lang2program with Apache License 2.0 6 votes vote down vote up
def run(self, fetches, feed_dict=None):
        """like Session.run, but return a Timeline object in Chrome trace format (JSON).

        Save the json to a file, go to chrome://tracing, and open the file.

        Args:
            fetches
            feed_dict

        Returns:
            dict: a JSON dict
        """
        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        super(ProfiledSession, self).run(fetches, feed_dict, options=options, run_metadata=run_metadata)

        # Create the Timeline object, and write it to a json
        tl = timeline.Timeline(run_metadata.step_stats)
        ctf = tl.generate_chrome_trace_format()
        return json.loads(ctf) 
Example #11
Source File: cli.py    From mayo with MIT License 6 votes vote down vote up
def cli_profile_timeline(self):
        """Performs training profiling to produce timeline.json.  """
        # TODO integrate this into Profile.
        from tensorflow.python.client import timeline
        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        session = self._get_session('train')
        # run 100 iterations to warm up
        max_iterations = 100
        for i in range(max_iterations):
            log.info(
                'Running {}/{} iterations to warm up...'
                .format(i, max_iterations), update=True)
            session.run(session._train_op)
        log.info('Running the final iteration to generate timeline...')
        session.run(
            session._train_op, options=options, run_metadata=run_metadata)
        fetched_timeline = timeline.Timeline(run_metadata.step_stats)
        chrome_trace = fetched_timeline.generate_chrome_trace_format()
        with open('timeline.json', 'w') as f:
            f.write(chrome_trace) 
Example #12
Source File: avsr.py    From avsr-tf1 with GNU General Public License v3.0 6 votes vote down vote up
def _create_sessions(self):
        config = tf.ConfigProto(allow_soft_placement=True)
        if 'train' in self._required_graphs:
            self._train_session = tf.Session(graph=self._train_graph, config=config)
        if 'eval' in self._required_graphs:
            self._evaluate_session = tf.Session(graph=self._evaluate_graph, config=config)
        # self._predict_session = tf.Session(graph=self._predict_graph, config=config)

        if self._hparams.profiling is True:
            from tensorflow.profiler import Profiler
            self.profiler = Profiler(self._train_session.graph)
            self.run_meta = tf.RunMetadata()
            makedirs('/tmp/timelines/', exist_ok=True)
            self.sess_opts = {
                'options': tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
                'run_metadata': self.run_meta
            }
        else:
            self.sess_opts = {} 
Example #13
Source File: tfprof_logger_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testFillMissingShape(self):
    a, b, y = self._BuildSmallPlaceholderlModel()
    run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()
    sess = tf.Session()
    sess.run(y,
             options=run_options,
             run_metadata=run_metadata,
             feed_dict={a: [[1, 2], [2, 3]],
                        b: [[1, 2], [2, 3]]})

    graph2 = tf.Graph()
    # Use copy_op_to_graph to remove shape information.
    y2 = tf.contrib.copy_graph.copy_op_to_graph(y, graph2, [])
    self.assertEquals('<unknown>', str(y2.get_shape()))

    tf.contrib.tfprof.tfprof_logger._fill_missing_graph_shape(graph2,
                                                              run_metadata)
    self.assertEquals('(2, 2)', str(y2.get_shape())) 
Example #14
Source File: stt.py    From rnn-speech with MIT License 6 votes vote down vote up
def configure_tf_session(xla, timeline):
    # Configure tensorflow's session
    config = tf.ConfigProto()
    jit_level = 0
    if xla:
        # Turns on XLA JIT compilation.
        jit_level = tf.OptimizerOptions.ON_1
    config.graph_options.optimizer_options.global_jit_level = jit_level
    run_metadata = tf.RunMetadata()

    # Add timeline data generation options if needed
    if timeline is True:
        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    else:
        run_options = None
    return config, run_metadata, run_options 
Example #15
Source File: seg_model.py    From RGCNN with Apache License 2.0 5 votes vote down vote up
def build_graph(self, M_0):
        """Build the computational graph of the model."""
        self.graph = tf.Graph()
        with self.graph.as_default():
            # Inputs.
            with tf.name_scope('inputs'):
                # self.pj_graph = tf.placeholder(tf.float32, (self.batch_size, M_0, M_0), 'lapacian')
                self.ph_data = tf.placeholder(tf.float32, (self.batch_size, M_0, 6), 'data')
                self.ph_labels = tf.placeholder(tf.int32, (self.batch_size, M_0), 'labels')
                self.ph_cat = tf.placeholder(tf.int32, (self.batch_size), 'labels')
                self.ph_dropout = tf.placeholder(tf.float32, (), 'dropout')

            # Model.
            op_logits = self.inference(self.ph_data, self.ph_cat, self.ph_dropout)

            run_meta = tf.RunMetadata()
            opts = tf.profiler.ProfileOptionBuilder.float_operation()
            flops = tf.profiler.profile(self.graph, run_meta=run_meta, cmd='op', options=opts)
            print('Total flops' + str(flops.total_float_ops))

            self.op_logits = op_logits
            self.op_loss, self.op_loss_average = self.loss(op_logits, self.ph_labels, self.regularization)
            self.op_train = self.training(self.op_loss, self.learning_rate,
                                          self.decay_steps, self.decay_rate, self.momentum)
            self.op_prediction = self.prediction(op_logits)

            # Initialize variables, i.e. weights and biases.
            self.op_init = tf.global_variables_initializer()

            # Summaries for TensorBoard and Save for model parameters.
            self.op_summary = tf.summary.merge_all()
            self.op_saver = tf.train.Saver(max_to_keep=5)

        self.graph.finalize() 
Example #16
Source File: base.py    From PoseFix_RELEASE with MIT License 5 votes vote down vote up
def _make_graph(self):
        self.logger.info("Generating testing graph on {} GPUs ...".format(self.cfg.num_gpus))

        with tf.variable_scope(tf.get_variable_scope()):
            for i in range(self.cfg.num_gpus):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('tower_%d' % i) as name_scope:
                        with slim.arg_scope([slim.model_variable, slim.variable], device='/device:CPU:0'):
                            self.net.make_network(is_train=False)
                            self._input_list.append(self.net.get_inputs())
                            self._output_list.append(self.net.get_outputs())

                        tf.get_variable_scope().reuse_variables()

        self._outputs = aggregate_batch(self._output_list)

        # run_meta = tf.RunMetadata()
        # opts = tf.profiler.ProfileOptionBuilder.float_operation()
        # flops = tf.profiler.profile(self.sess.graph, run_meta=run_meta, cmd='op', options=opts)
        #
        # opts = tf.profiler.ProfileOptionBuilder.trainable_variables_parameter()
        # params = tf.profiler.profile(self.sess.graph, run_meta=run_meta, cmd='op', options=opts)

        # print("{:,} --- {:,}".format(flops.total_float_ops, params.total_parameters))
        # from IPython import embed; embed()

        return self._outputs 
Example #17
Source File: mnist_hpo_demo.py    From auptimizer with GNU General Public License v3.0 5 votes vote down vote up
def get_flop():
    run_meta = tf.RunMetadata()
    with tf.Session(graph=tf.Graph()) as sess:
        x = tf.zeros([1,784])
        out = conv_net({'images':x}, 10, False, False)
        
        opts = tf.profiler.ProfileOptionBuilder.float_operation()    
        flops = tf.profiler.profile(sess.graph, run_meta=run_meta, cmd='op', options=opts)
    print("Model FLOPs is %d"%flops.total_float_ops)
    return flops.total_float_ops 
Example #18
Source File: trainer.py    From ternarynet with Apache License 2.0 5 votes vote down vote up
def run_step(self):
        """ Simply run self.train_op"""
        self.sess.run(self.train_op)
        #run_metadata = tf.RunMetadata()
        #self.sess.run([self.train_op],
                #options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
                #run_metadata=run_metadata
                #)
        #from tensorflow.python.client import timeline
        #trace = timeline.Timeline(step_stats=run_metadata.step_stats)
        #trace_file = open('timeline.ctf.json', 'w')
        #trace_file.write(trace.generate_chrome_trace_format())
        #import sys; sys.exit() 
Example #19
Source File: sourcesep.py    From multisensory with Apache License 2.0 5 votes vote down vote up
def make_model(self):
    with tf.device(self.default_gpu):
      pr = self.pr

      if self.is_training:
        self.make_train_ops()
      else:
        self.make_test_ops(reuse=False)

      self.coord = tf.train.Coordinator()
      self.saver_fast = tf.train.Saver()
      self.saver_slow = tf.train.Saver(max_to_keep = 1000)

      self.init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
      self.sess.run(self.init_op)
      tf.train.start_queue_runners(sess = self.sess, coord = self.coord)
      print 'Initializing'

      self.merged_summary = tf.summary.merge_all()
      print 'Tensorboard command:'
      summary_dir = ut.mkdir(pj(pr.summary_dir, ut.simple_timestamp()))
      print 'tensorboard --logdir=%s' % summary_dir
      self.sum_writer = tf.summary.FileWriter(summary_dir, self.sess.graph)

      if self.profile:
        #self.run_meta = tf.RunMetadata()
        self.profiler = tf.profiler.Profiler(self.sess.graph) 
Example #20
Source File: graph_repl.py    From tensorlang with Apache License 2.0 5 votes vote down vote up
def _run(self, summary_writer, run_id, src):
    self._compiler.clear()
    self._compiler.put_source("main%s" % self._suffix, src)
    self._compiler.set_default_device("/cpu:0")

    above = None
    pkg = self._compiler.resolve_import_path("main", reimport=True)
    above = pkg.ctx().get_above()

    # Write graph once we've generated it.
    summary_writer.add_graph(self._session.graph, run_id)
    summary_writer.flush()

    vars = frozenset(self._vars())
    self._init_new_vars(vars - self._previous_vars)
    self._previous_vars = vars

    queue_runners = frozenset(self._queue_runners())
    self._init_new_queue_runners(queue_runners - self._previous_queue_runners)
    self._previous_queue_runners = queue_runners

    if isinstance(above, RetvalBag):
      above = above.get(None)

    if isinstance(above, (tf.Tensor, tf.Variable, tf.Operation)):
      run_metadata = tf.RunMetadata()
      above = self._session.run(above, run_metadata=run_metadata)
      summary_writer.add_run_metadata(run_metadata, "repl-%04d" % run_id, run_id)

    return above 
Example #21
Source File: prof.py    From petridishnn with MIT License 5 votes vote down vote up
def __init__(self, dump_metadata=False, dump_tracing=True, dump_event=False):
        """
        Args:
            dump_metadata(bool): Dump :class:`tf.RunMetadata` to be used with tfprof.
            dump_tracing(bool): Dump chrome tracing files.
            dump_event(bool): Dump to an event processed by FileWriter and
                will be shown in TensorBoard.
        """
        self._dir = logger.get_logger_dir()
        self._dump_meta = bool(dump_metadata)
        self._dump_tracing = bool(dump_tracing)
        self._dump_event = bool(dump_event)
        assert os.path.isdir(self._dir), self._dir 
Example #22
Source File: base.py    From petridishnn with MIT License 5 votes vote down vote up
def _do_call(self, dp):
        assert len(dp) == len(self.input_tensors), \
            "{} != {}".format(len(dp), len(self.input_tensors))
        if self.sess is None:
            self.sess = tf.get_default_session()
            assert self.sess is not None, "Predictor isn't called under a default session!"

        if self._callable is None:
            self._callable = self.sess.make_callable(
                fetches=self.output_tensors,
                feed_list=self.input_tensors,
                accept_options=self.ACCEPT_OPTIONS)
        # run_metadata = tf.RunMetadata()
        # options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        return self._callable(*dp) 
Example #23
Source File: base.py    From cdvae-vc with MIT License 5 votes vote down vote up
def __init__(self, model, train_data, arch, args, dirs, ckpt):
        self.model = model
        self.loss = self.model.loss(train_data)
        self.arch = arch
        self.args = args
        self.dirs = dirs
        self.ckpt = ckpt
        
        # get optimization ops
        self.opt = self._optimize()

        # get metadata, and session configs for GPU
        self.sess_config = self._sess_config()
        self.run_metadata = tf.RunMetadata()
       
        # define saver
        self.saver = tf.train.Saver(max_to_keep=None)
        
        # define hooks
        hooks = self.get_hooks(self.saver)

        # Initialize TensorFlow monitored training session
        self.sess =  tf.train.MonitoredTrainingSession(
                        hooks = hooks,
                        config = self.sess_config,
                        )

        # init windows for time and each losses
        self.reset_time_windows() 
Example #24
Source File: model_statistics.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def get_flops(model):
    run_meta = tf.RunMetadata()
    graph = tf.get_default_graph()

    # We use the Keras session graph in the call to the profiler.
    opts = tf.profiler.ProfileOptionBuilder.float_operation()
    flops = tf.profiler.profile(graph=graph, run_meta=run_meta, cmd='op', options=opts)

    opts = tf.profiler.ProfileOptionBuilder.trainable_variables_parameter()
    params = tf.profiler.profile(graph=graph, run_meta=run_meta, cmd='op', options=opts)

    print('Total FLOPs: {}m float_ops'.format(flops.total_float_ops/1e6))
    print('Total PARAMs: {}m'.format(params.total_parameters/1e6)) 
Example #25
Source File: models.py    From DeepTL-Lane-Change-Classification with MIT License 5 votes vote down vote up
def get_flops(self):

        run_meta_data = tf.RunMetadata()
        flop_opts = tf.profiler.ProfileOptionBuilder.float_operation()

        conv_flops = tf.profiler.profile(graph=K.get_session().graph, run_meta=run_meta_data, cmd='op', options=flop_opts)
        self.flops = conv_flops.total_float_ops
        print(self.flops) 
Example #26
Source File: trainer.py    From VDAIC2017 with MIT License 5 votes vote down vote up
def run_step(self):
        """ Simply run self.train_op"""
        self.sess.run(self.train_op)
        #run_metadata = tf.RunMetadata()
        #self.sess.run([self.train_op],
                #options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
                #run_metadata=run_metadata
                #)
        #from tensorflow.python.client import timeline
        #trace = timeline.Timeline(step_stats=run_metadata.step_stats)
        #trace_file = open('timeline.ctf.json', 'w')
        #trace_file.write(trace.generate_chrome_trace_format())
        #import sys; sys.exit() 
Example #27
Source File: mnist_correctness_test.py    From gradient-checkpointing with MIT License 5 votes vote down vote up
def sessrun(*args, **kwargs):
  global sess, run_metadata
  
  if not GLOBAL_PROFILE:
    return sess.run(*args, **kwargs)
  
  run_metadata = tf.RunMetadata()

  kwargs['options'] = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
  kwargs['run_metadata'] = run_metadata
  result = sess.run(*args, **kwargs)
  first_entry = args[0]
  if isinstance(first_entry, list):
    if len(first_entry) == 0 and len(args) == 1:
      return None
    first_entry = first_entry[0]

  if DUMP_TIMELINES:
    name = first_entry.name
    name = name.replace('/', '-')

    tl = timeline.Timeline(run_metadata.step_stats)
    ctf = tl.generate_chrome_trace_format()
    with open('timelines/%s.json'%(name,), 'w') as f:
      f.write(ctf)
    with open('timelines/%s.pbtxt'%(name,), 'w') as f:
      f.write(str(run_metadata))

  return result 
Example #28
Source File: deep_resnet_benchmark.py    From gradient-checkpointing with MIT License 5 votes vote down vote up
def sessrun(*args, **kwargs):
  global sess, run_metadata
  
  if not GLOBAL_PROFILE:
    return sess.run(*args, **kwargs)
  
  run_metadata = tf.RunMetadata()

  kwargs['options'] = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
  kwargs['run_metadata'] = run_metadata
  result = sess.run(*args, **kwargs)
  first_entry = args[0]
  if isinstance(first_entry, list):
    if len(first_entry) == 0 and len(args) == 1:
      return None
    first_entry = first_entry[0]

  if DUMP_TIMELINES:
    name = first_entry.name
    name = name.replace('/', '-')

    tl = timeline.Timeline(run_metadata.step_stats)
    ctf = tl.generate_chrome_trace_format()
    with open('timelines/%s.json'%(name,), 'w') as f:
      f.write(ctf)
    with open('timelines/%s.pbtxt'%(name,), 'w') as f:
      f.write(str(run_metadata))

  return result 
Example #29
Source File: deep_imagenet_benchmark.py    From gradient-checkpointing with MIT License 5 votes vote down vote up
def sessrun(*args, **kwargs):
  global sess, run_metadata
  
  if not GLOBAL_PROFILE:
    return sess.run(*args, **kwargs)
  
  run_metadata = tf.RunMetadata()

  kwargs['options'] = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
  kwargs['run_metadata'] = run_metadata
  result = sess.run(*args, **kwargs)
  first_entry = args[0]
  if isinstance(first_entry, list):
    if len(first_entry) == 0 and len(args) == 1:
      return None
    first_entry = first_entry[0]

  if DUMP_TIMELINES:
    name = first_entry.name
    name = name.replace('/', '-')

    tl = timeline.Timeline(run_metadata.step_stats)
    ctf = tl.generate_chrome_trace_format()
    with open('timelines/%s.json'%(name,), 'w') as f:
      f.write(ctf)
    with open('timelines/%s.pbtxt'%(name,), 'w') as f:
      f.write(str(run_metadata))

  return result 
Example #30
Source File: base.py    From lighttrack with MIT License 5 votes vote down vote up
def _make_graph(self):
        self.logger.info("Generating testing graph on {} GPUs ...".format(self.cfg.nr_gpus))

        with tf.variable_scope(tf.get_variable_scope()):
            for i in range(self.cfg.nr_gpus):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('tower_%d' % i) as name_scope:
                        with slim.arg_scope([slim.model_variable, slim.variable], device='/device:CPU:0'):
                            self.net.make_network(is_train=False)
                            self._input_list.append(self.net.get_inputs())
                            self._output_list.append(self.net.get_outputs())

                        tf.get_variable_scope().reuse_variables()

        self._outputs = aggregate_batch(self._output_list)

        # run_meta = tf.RunMetadata()
        # opts = tf.profiler.ProfileOptionBuilder.float_operation()
        # flops = tf.profiler.profile(self.sess.graph, run_meta=run_meta, cmd='op', options=opts)
        #
        # opts = tf.profiler.ProfileOptionBuilder.trainable_variables_parameter()
        # params = tf.profiler.profile(self.sess.graph, run_meta=run_meta, cmd='op', options=opts)

        # print("{:,} --- {:,}".format(flops.total_float_ops, params.total_parameters))
        # from IPython import embed; embed()

        return self._outputs