Python tensorflow.RunOptions() Examples

The following are 30 code examples of tensorflow.RunOptions(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: trainer.py    From dvae with Apache License 2.0 7 votes vote down vote up
def optimize(self, data, with_metrics=False, with_trace=False):
        """ Optimize a single batch """
        run_metadata = tf.RunMetadata() if with_trace else None
        trace = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) if with_trace else None

        _, metrics = self.run(
            self.training_operation, data,
            run_options=trace, run_metadata=run_metadata)

        if with_metrics:
            self.timer_update()
            steps, elapsed = self.elapsed()
            num_devices = len(self.towers)
            examples = steps * self.batch_size * num_devices
            print('Step {}, examples/sec {:.3f}, ms/batch {:.1f}'.format(
                self.global_step, examples / elapsed, 1000 * elapsed / num_devices))

            self.output_metrics(data, metrics)
            self.write_summaries(data)

        if with_trace:
            step = '{}/step{}'.format(self.name, self.global_step)
            self.summary_writer.add_run_metadata(run_metadata, step, global_step=self.global_step) 
Example #2
Source File: stt.py    From rnn-speech with MIT License 6 votes vote down vote up
def configure_tf_session(xla, timeline):
    # Configure tensorflow's session
    config = tf.ConfigProto()
    jit_level = 0
    if xla:
        # Turns on XLA JIT compilation.
        jit_level = tf.OptimizerOptions.ON_1
    config.graph_options.optimizer_options.global_jit_level = jit_level
    run_metadata = tf.RunMetadata()

    # Add timeline data generation options if needed
    if timeline is True:
        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    else:
        run_options = None
    return config, run_metadata, run_options 
Example #3
Source File: DCSCN.py    From dcscn-super-resolution with MIT License 6 votes vote down vote up
def log_model_analysis(self):
        run_metadata = tf.RunMetadata()
        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)

        _, loss = self.sess.run([self.optimizer, self.loss], feed_dict={self.x: self.batch_input,
                                                                        self.x2: self.batch_input_bicubic,
                                                                        self.y: self.batch_true,
                                                                        self.lr_input: self.lr,
                                                                        self.dropout: self.dropout_rate},
                                options=run_options, run_metadata=run_metadata)

        # tf.contrib.tfprof.model_analyzer.print_model_analysis(
        #   tf.get_default_graph(),
        #   run_meta=run_metadata,
        #   tfprof_options=tf.contrib.tfprof.model_analyzer.PRINT_ALL_TIMING_MEMORY)
        self.first_training = False 
Example #4
Source File: tf_distributions.py    From MJHMC with GNU General Public License v2.0 6 votes vote down vote up
def E_val(self, X):
        with self.graph.as_default(), tf.device(self.energy_device):
            if self.prof_run:
                run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()

                energy = self.sess.run(self.energy_op, feed_dict={self.state_pl: X},
                                       options=run_options, run_metadata=run_metadata)
                tf_tl  = timeline.Timeline(run_metadata.step_stats)
                ctf = tf_tl.generate_chrome_trace_format()
                log_path = expanduser('~/tmp/logs/tf_{}_energy_timeline_{}.json'.format(self.name, time.time()))
                with open(log_path, 'w') as log_file:
                    log_file.write(ctf)
            else:
                energy = self.sess.run(self.energy_op, feed_dict={self.state_pl: X})
            return energy 
Example #5
Source File: tf_distributions.py    From MJHMC with GNU General Public License v2.0 6 votes vote down vote up
def dEdX_val(self, X):
        with self.graph.as_default(), tf.device(self.grad_device):
            if self.prof_run:
                run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()

                grad = self.sess.run(self.grad_op, feed_dict={self.state_pl: X},
                                       options=run_options, run_metadata=run_metadata)

                tf_tl  = timeline.Timeline(run_metadata.step_stats)
                ctf = tf_tl.generate_chrome_trace_format()
                log_path = expanduser('~/tmp/logs/tf_{}_grad_timeline_{}.json'.format(self.name, time.time()))
                with open(log_path, 'w') as log_file:
                    log_file.write(ctf)
            else:
                grad = self.sess.run(self.grad_op, feed_dict={self.state_pl: X})
            return grad 
Example #6
Source File: test_util.py    From in-silico-labeling with Apache License 2.0 6 votes vote down vote up
def profile(self,
              tensors: List[Union[tf.Tensor, tf.Operation, lt.LabeledTensor]]):
    tensors = [
        t.tensor if isinstance(t, lt.LabeledTensor) else t for t in tensors
    ]

    run_metadata = tf.RunMetadata()
    sv = tf.train.Supervisor(graph=tensors[0].graph)
    sess = sv.PrepareSession()
    sv.StartQueueRunners(sess)

    results = sess.run(
        tensors,
        options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
        run_metadata=run_metadata)

    options = tf.contrib.tfprof.model_analyzer.PRINT_ALL_TIMING_MEMORY
    options['viz'] = True
    tf.contrib.tfprof.model_analyzer.print_model_analysis(
        tf.get_default_graph(), run_meta=run_metadata, tfprof_options=options)

    sv.Stop()

    return results 
Example #7
Source File: test_single_threaded_logging.py    From delira with GNU Affero General Public License v3.0 6 votes vote down vote up
def test_graph_tf(self):

        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()

        with tf.Session() as sess:
            outputs = self._model_tf(
                np.zeros(
                    shape=(
                        1,
                        28,
                        28,
                        1),
                    dtype=np.float32))
            sess.run(tf.initializers.global_variables())
            sess.run(outputs, options=run_options, run_metadata=run_metadata)

        self._logger.log({"graph_tf": {
            "graph": self._model_tf._graph.as_graph_def(add_shapes=True),
            "run_metadata": run_metadata
        }}) 
Example #8
Source File: profile.py    From lang2program with Apache License 2.0 6 votes vote down vote up
def run(self, fetches, feed_dict=None):
        """like Session.run, but return a Timeline object in Chrome trace format (JSON).

        Save the json to a file, go to chrome://tracing, and open the file.

        Args:
            fetches
            feed_dict

        Returns:
            dict: a JSON dict
        """
        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        super(ProfiledSession, self).run(fetches, feed_dict, options=options, run_metadata=run_metadata)

        # Create the Timeline object, and write it to a json
        tl = timeline.Timeline(run_metadata.step_stats)
        ctf = tl.generate_chrome_trace_format()
        return json.loads(ctf) 
Example #9
Source File: profile.py    From lang2program with Apache License 2.0 6 votes vote down vote up
def run(self, fetches, feed_dict=None):
        """like Session.run, but return a Timeline object in Chrome trace format (JSON).

        Save the json to a file, go to chrome://tracing, and open the file.

        Args:
            fetches
            feed_dict

        Returns:
            dict: a JSON dict
        """
        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        super(ProfiledSession, self).run(fetches, feed_dict, options=options, run_metadata=run_metadata)

        # Create the Timeline object, and write it to a json
        tl = timeline.Timeline(run_metadata.step_stats)
        ctf = tl.generate_chrome_trace_format()
        return json.loads(ctf) 
Example #10
Source File: fifo_queue_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testReusableAfterTimeout(self):
    with self.test_session() as sess:
      q = tf.FIFOQueue(10, tf.float32)
      dequeued_t = q.dequeue()
      enqueue_op = q.enqueue(37)

      with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
                                   "Timed out waiting for notification"):
        sess.run(dequeued_t, options=tf.RunOptions(timeout_in_ms=10))

      with self.assertRaisesRegexp(tf.errors.DeadlineExceededError,
                                   "Timed out waiting for notification"):
        sess.run(dequeued_t, options=tf.RunOptions(timeout_in_ms=10))

      sess.run(enqueue_op)
      self.assertEqual(37, sess.run(dequeued_t)) 
Example #11
Source File: cli.py    From mayo with MIT License 6 votes vote down vote up
def cli_profile_timeline(self):
        """Performs training profiling to produce timeline.json.  """
        # TODO integrate this into Profile.
        from tensorflow.python.client import timeline
        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        session = self._get_session('train')
        # run 100 iterations to warm up
        max_iterations = 100
        for i in range(max_iterations):
            log.info(
                'Running {}/{} iterations to warm up...'
                .format(i, max_iterations), update=True)
            session.run(session._train_op)
        log.info('Running the final iteration to generate timeline...')
        session.run(
            session._train_op, options=options, run_metadata=run_metadata)
        fetched_timeline = timeline.Timeline(run_metadata.step_stats)
        chrome_trace = fetched_timeline.generate_chrome_trace_format()
        with open('timeline.json', 'w') as f:
            f.write(chrome_trace) 
Example #12
Source File: avsr.py    From avsr-tf1 with GNU General Public License v3.0 6 votes vote down vote up
def _create_sessions(self):
        config = tf.ConfigProto(allow_soft_placement=True)
        if 'train' in self._required_graphs:
            self._train_session = tf.Session(graph=self._train_graph, config=config)
        if 'eval' in self._required_graphs:
            self._evaluate_session = tf.Session(graph=self._evaluate_graph, config=config)
        # self._predict_session = tf.Session(graph=self._predict_graph, config=config)

        if self._hparams.profiling is True:
            from tensorflow.profiler import Profiler
            self.profiler = Profiler(self._train_session.graph)
            self.run_meta = tf.RunMetadata()
            makedirs('/tmp/timelines/', exist_ok=True)
            self.sess_opts = {
                'options': tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
                'run_metadata': self.run_meta
            }
        else:
            self.sess_opts = {} 
Example #13
Source File: train.py    From GraphSAINT with MIT License 6 votes vote down vote up
def evaluate_full_batch(sess,model,minibatch_iter,many_runs_timeline,mode):
    """
    Full batch evaluation
    NOTE: HERE GCN RUNS THROUGH THE FULL GRAPH. HOWEVER, WE CALCULATE F1 SCORE
        FOR VALIDATION / TEST NODES ONLY. 
    """
    options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()
    t1 = time.time()
    num_cls = minibatch_iter.class_arr.shape[-1]
    feed_dict, labels = minibatch_iter.feed_dict(mode)
    if args_global.timeline:
        preds,loss = sess.run([model.preds, model.loss], feed_dict=feed_dict, options=options, run_metadata=run_metadata)
        fetched_timeline = timeline.Timeline(run_metadata.step_stats)
        chrome_trace = fetched_timeline.generate_chrome_trace_format()
        many_runs_timeline.append(chrome_trace)
    else:
        preds,loss = sess.run([model.preds, model.loss], feed_dict=feed_dict)
    node_val_test = minibatch_iter.node_val if mode=='val' else minibatch_iter.node_test
    t2 = time.time()
    f1_scores = calc_f1(labels[node_val_test],preds[node_val_test],model.sigmoid_loss)
    return loss, f1_scores[0], f1_scores[1], (t2-t1) 
Example #14
Source File: model.py    From realtime_object_detection with MIT License 6 votes vote down vote up
def __init__(self,config):
        self.config = config
        self.detection_graph = tf.Graph()
        self.category_index = None
        self.masks = None
        self._tf_config = tf.ConfigProto(allow_soft_placement=True)
        self._tf_config.gpu_options.allow_growth=True
        #self._tf_config.gpu_options.force_gpu_compatible=True
        #self._tf_config.gpu_options.per_process_gpu_memory_fraction = 0.01
        self._run_options = tf.RunOptions(trace_level=tf.RunOptions.NO_TRACE)
        self._run_metadata = False
        self._wait_thread = False
        self._is_imageD = False
        self._is_videoD = False
        self._is_rosD = False
        print ('> Model: {}'.format(self.config.MODEL_PATH)) 
Example #15
Source File: tfprof_logger_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testFillMissingShape(self):
    a, b, y = self._BuildSmallPlaceholderlModel()
    run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()
    sess = tf.Session()
    sess.run(y,
             options=run_options,
             run_metadata=run_metadata,
             feed_dict={a: [[1, 2], [2, 3]],
                        b: [[1, 2], [2, 3]]})

    graph2 = tf.Graph()
    # Use copy_op_to_graph to remove shape information.
    y2 = tf.contrib.copy_graph.copy_op_to_graph(y, graph2, [])
    self.assertEquals('<unknown>', str(y2.get_shape()))

    tf.contrib.tfprof.tfprof_logger._fill_missing_graph_shape(graph2,
                                                              run_metadata)
    self.assertEquals('(2, 2)', str(y2.get_shape())) 
Example #16
Source File: model_seq2seq.py    From tensorflow-chatbot-chinese with MIT License 6 votes vote down vote up
def train(self, sess, batch, print_pred, summary_writer, add_global, prob):

        feed_dict = {self.encoder_inputs: batch.encoder_inputs,
                      self.encoder_inputs_length: batch.encoder_inputs_length,
                      self.decoder_targets: batch.decoder_targets,
                      self.decoder_targets_length: batch.decoder_targets_length,
                      self.batch_size: len(batch.encoder_inputs),
                      self.sampling_prob: prob}

        if print_pred:
            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            _, loss, pred, summary, current_step, print_lr = sess.run([self.train_op, self.train_loss, 
                self.decoder_predict_train, self.train_summary, add_global, self.lr], 
                feed_dict=feed_dict, options=run_options)

            i = np.random.randint(0, len(batch.encoder_inputs))
            util.decoder_print(self.idx2word, batch.encoder_inputs[i], batch.encoder_inputs_length[i],
                batch.decoder_targets[i], batch.decoder_targets_length[i], pred[i], 'yellow')
            summary_writer.add_summary(summary, global_step=current_step)
        else:
            _, loss, current_step, print_lr = sess.run([self.train_op, self.train_loss, 
                add_global, self.lr], feed_dict=feed_dict)
        return loss, calc_perplexity(loss), current_step, print_lr 
Example #17
Source File: util.py    From am3 with Apache License 2.0 6 votes vote down vote up
def profiled_run(sess, ops, feed_dict, is_profiling=False, log_dir=None):
    if not is_profiling:
        return sess.run(ops, feed_dict=feed_dict)
    else:
        if log_dir is None:
            raise ValueError("You need to provide a log_dir for profiling.")
        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        outputs = sess.run(ops, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)

        # Create the Timeline object, and write it to a json
        tl = timeline.Timeline(run_metadata.step_stats)
        ctf = tl.generate_chrome_trace_format()
        with open(os.path.join(log_dir, 'timeline.json'), 'w') as f:
            f.write(ctf)

        return outputs 
Example #18
Source File: prof.py    From ADL with MIT License 5 votes vote down vote up
def _before_run(self, _):
        opt = tf.RunOptions()
        opt.trace_level = tf.RunOptions.FULL_TRACE
        return tf.train.SessionRunArgs(fetches=None, options=opt) 
Example #19
Source File: tucker.py    From tensorD with MIT License 5 votes vote down vote up
def train(self, steps=None):
        """

        Parameters
        ----------
        steps : Ignore

        Returns
        -------

        """
        self._is_train_finish = False
        sess = self._env.sess

        sum_op = tf.summary.merge_all()
        sum_writer = tf.summary.FileWriter(self._env.summary_path, sess.graph)

        sess.run(self._init_op, feed_dict=self._feed_dict)
        print('HOSVD model initial finish')

        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()


        loss_v, self._full_tensor, self._factors, self._core, sum_msg = sess.run(
            [self._loss_op, self._full_op, self._factor_update_op, self._core_op, sum_op], feed_dict=self._feed_dict, options=run_options, run_metadata=run_metadata)
        sum_writer.add_run_metadata(run_metadata, 'step1')
        sum_writer.add_summary(sum_msg, 1)
        print('HOSVD model train finish, with RMSE = %f' % loss_v)
        self._is_train_finish = True 
Example #20
Source File: hooks.py    From natural-language-summary-generation-from-structured-data with MIT License 5 votes vote down vote up
def before_run(self, _run_context):
    if not self.is_chief or self._done:
      return
    if not self._active:
      return tf.train.SessionRunArgs(self._global_step)
    else:
      tf.logging.info("Performing full trace on next step.")
      run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) #pylint: disable=E1101
      return tf.train.SessionRunArgs(self._global_step, options=run_options) 
Example #21
Source File: profile_session_run_hooks.py    From aster with MIT License 5 votes vote down vote up
def before_run(self, run_context):  # pylint: disable=unused-argument
    if self._do_profile:
      options = tf.RunOptions(trace_level=self._trace_level)
    else:
      options = None
    return tf.train.SessionRunArgs(self._global_step_tensor, options=options) 
Example #22
Source File: base.py    From petridishnn with MIT License 5 votes vote down vote up
def _do_call(self, dp):
        assert len(dp) == len(self.input_tensors), \
            "{} != {}".format(len(dp), len(self.input_tensors))
        if self.sess is None:
            self.sess = tf.get_default_session()
            assert self.sess is not None, "Predictor isn't called under a default session!"

        if self._callable is None:
            self._callable = self.sess.make_callable(
                fetches=self.output_tensors,
                feed_list=self.input_tensors,
                accept_options=self.ACCEPT_OPTIONS)
        # run_metadata = tf.RunMetadata()
        # options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        return self._callable(*dp) 
Example #23
Source File: hooks.py    From ctc-asr with MIT License 5 votes vote down vote up
def __init__(self, file_writer, log_frequency, trace_level=tf.RunOptions.FULL_TRACE):
        self._trace = log_frequency == 1
        self.writer = file_writer
        self.trace_level = trace_level
        self.log_frequency = log_frequency
        self._global_step_tensor = None 
Example #24
Source File: hooks.py    From ctc-asr with MIT License 5 votes vote down vote up
def before_run(self, run_context):
        if self._trace:
            options = tf.RunOptions(trace_level=self.trace_level)
        else:
            options = None

        return tf.train.SessionRunArgs(fetches=self._global_step_tensor, options=options) 
Example #25
Source File: staffline_patches_dofn.py    From moonlight with Apache License 2.0 5 votes vote down vote up
def start_bundle(self):
    self.extractor = staffline_extractor.StafflinePatchExtractor(
        patch_height=self.patch_height,
        patch_width=self.patch_width,
        run_options=tf.RunOptions(timeout_in_ms=self.timeout_ms))
    self.session = tf.Session(graph=self.extractor.graph) 
Example #26
Source File: train.py    From imgcomp-cvpr with GNU General Public License v3.0 5 votes vote down vote up
def run_and_fetch_metadata(fetches, sess):
    print('*** Adding metadata...')
    run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
    run_metadata = tf.RunMetadata()
    return sess.run(fetches, options=run_options, run_metadata=run_metadata), run_metadata 
Example #27
Source File: trainer.py    From VDAIC2017 with MIT License 5 votes vote down vote up
def run_step(self):
        """ Simply run self.train_op"""
        self.sess.run(self.train_op)
        #run_metadata = tf.RunMetadata()
        #self.sess.run([self.train_op],
                #options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
                #run_metadata=run_metadata
                #)
        #from tensorflow.python.client import timeline
        #trace = timeline.Timeline(step_stats=run_metadata.step_stats)
        #trace_file = open('timeline.ctf.json', 'w')
        #trace_file.write(trace.generate_chrome_trace_format())
        #import sys; sys.exit() 
Example #28
Source File: mnist_correctness_test.py    From gradient-checkpointing with MIT License 5 votes vote down vote up
def sessrun(*args, **kwargs):
  global sess, run_metadata
  
  if not GLOBAL_PROFILE:
    return sess.run(*args, **kwargs)
  
  run_metadata = tf.RunMetadata()

  kwargs['options'] = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
  kwargs['run_metadata'] = run_metadata
  result = sess.run(*args, **kwargs)
  first_entry = args[0]
  if isinstance(first_entry, list):
    if len(first_entry) == 0 and len(args) == 1:
      return None
    first_entry = first_entry[0]

  if DUMP_TIMELINES:
    name = first_entry.name
    name = name.replace('/', '-')

    tl = timeline.Timeline(run_metadata.step_stats)
    ctf = tl.generate_chrome_trace_format()
    with open('timelines/%s.json'%(name,), 'w') as f:
      f.write(ctf)
    with open('timelines/%s.pbtxt'%(name,), 'w') as f:
      f.write(str(run_metadata))

  return result 
Example #29
Source File: deep_resnet_benchmark.py    From gradient-checkpointing with MIT License 5 votes vote down vote up
def sessrun(*args, **kwargs):
  global sess, run_metadata
  
  if not GLOBAL_PROFILE:
    return sess.run(*args, **kwargs)
  
  run_metadata = tf.RunMetadata()

  kwargs['options'] = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
  kwargs['run_metadata'] = run_metadata
  result = sess.run(*args, **kwargs)
  first_entry = args[0]
  if isinstance(first_entry, list):
    if len(first_entry) == 0 and len(args) == 1:
      return None
    first_entry = first_entry[0]

  if DUMP_TIMELINES:
    name = first_entry.name
    name = name.replace('/', '-')

    tl = timeline.Timeline(run_metadata.step_stats)
    ctf = tl.generate_chrome_trace_format()
    with open('timelines/%s.json'%(name,), 'w') as f:
      f.write(ctf)
    with open('timelines/%s.pbtxt'%(name,), 'w') as f:
      f.write(str(run_metadata))

  return result 
Example #30
Source File: deep_imagenet_benchmark.py    From gradient-checkpointing with MIT License 5 votes vote down vote up
def sessrun(*args, **kwargs):
  global sess, run_metadata
  
  if not GLOBAL_PROFILE:
    return sess.run(*args, **kwargs)
  
  run_metadata = tf.RunMetadata()

  kwargs['options'] = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
  kwargs['run_metadata'] = run_metadata
  result = sess.run(*args, **kwargs)
  first_entry = args[0]
  if isinstance(first_entry, list):
    if len(first_entry) == 0 and len(args) == 1:
      return None
    first_entry = first_entry[0]

  if DUMP_TIMELINES:
    name = first_entry.name
    name = name.replace('/', '-')

    tl = timeline.Timeline(run_metadata.step_stats)
    ctf = tl.generate_chrome_trace_format()
    with open('timelines/%s.json'%(name,), 'w') as f:
      f.write(ctf)
    with open('timelines/%s.pbtxt'%(name,), 'w') as f:
      f.write(str(run_metadata))

  return result