Python tensorflow.print() Examples

The following are 30 code examples of tensorflow.print(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: custom_optimizer.py    From dreamer with Apache License 2.0 7 votes vote down vote up
def maybe_minimize(self, condition, loss):
    with tf.name_scope('optimizer_{}'.format(self._name)):
      # loss = tf.cond(condition, lambda: loss, float)
      update_op, grad_norm = tf.cond(
          condition,
          lambda: self.minimize(loss),
          lambda: (tf.no_op(), 0.0))
      with tf.control_dependencies([update_op]):
        summary = tf.cond(
            tf.logical_and(condition, self._log),
            lambda: self.summarize(grad_norm), str)
      if self._debug:
        # print_op = tf.print('{}_grad_norm='.format(self._name), grad_norm)
        message = 'Zero gradient norm in {} optimizer.'.format(self._name)
        assertion = lambda: tf.assert_greater(grad_norm, 0.0, message=message)
        assert_op = tf.cond(condition, assertion, tf.no_op)
        with tf.control_dependencies([assert_op]):
          summary = tf.identity(summary)
      return summary, grad_norm 
Example #2
Source File: bfgs_optimizer.py    From tfdiffeq with MIT License 6 votes vote down vote up
def minimize(self, loss_func, model):
        optim_func = self._function_wrapper(loss_func, model)

        # convert initial model parameters to a 1D tf.Tensor
        init_params = tf.dynamic_stitch(optim_func.idx, model.trainable_variables)

        # train the model with L-BFGS solver
        results = tfp.optimizer.lbfgs_minimize(
            value_and_gradients_function=optim_func, initial_position=init_params,
            max_iterations=self.max_iterations,
            tolerance=self.tolerance,
            x_tolerance=self.tolerance,
            f_relative_tolerance=self.tolerance,
            **self.lbfgs_kwargs)

        # after training, the final optimized parameters are still in results.position
        # so we have to manually put them back to the model
        optim_func.assign_new_model_parameters(results.position)

        print("L-BFGS complete, and parameters updated !")
        return model 
Example #3
Source File: utils.py    From kfac with Apache License 2.0 6 votes vote down vote up
def multiline_print(lists):
  """Prints multiple lines of output using tf.print."""

  combined_list = []
  combined_list += lists[0]

  # We prepend newline characters to strings at the start of lines to avoid
  # the ugly space intendations that tf.print's behavior of separating
  # everything with a space would otherwise cause.
  for item in lists[1:]:
    if isinstance(item[0], str):
      combined_list += (("\n" + item[0],) + item[1:])
    else:
      combined_list += (("\n",) + item)

  return tf.print(*combined_list) 
Example #4
Source File: basic_model.py    From TIES-2.0 with MIT License 6 votes vote down vote up
def run_training_iteration(self, sess, summary_writer, iteration_number):
        feeds = sess.run(self.training_feeds)
        feed_dict = {
            self._placeholder_vertex_features : feeds[0],
            self._placeholder_image : feeds[1],
            self._placeholder_global_features : feeds[2],
            self._placeholder_cell_adj_matrix : feeds[3],
            self._placeholder_row_adj_matrix : feeds[4],
            self._placeholder_col_adj_matrix : feeds[5],
        }
        print("Training Iteration %d:" % iteration_number)
        ops_to_run = self.graph_predicted_sampled_adj_matrices + self.graph_gt_sampled_adj_matrices + \
            self.graph_sampled_indices+ [self.graph_optimizer, self.graph_prints, self.graph_summaries_training]
        ops_result = sess.run(ops_to_run, feed_dict = feed_dict)

        summary_writer.add_summary(ops_result[-1], iteration_number) 
Example #5
Source File: utility.py    From planet with Apache License 2.0 6 votes vote down vote up
def collect_initial_episodes(config):
  items = config.random_collects.items()
  items = sorted(items, key=lambda x: x[0])
  existing = {}
  for name, params in items:
    outdir = params.save_episode_dir
    tf.gfile.MakeDirs(outdir)
    if outdir not in existing:
      existing[outdir] = len(tf.gfile.Glob(os.path.join(outdir, '*.npz')))
    if params.num_episodes <= existing[outdir]:
      existing[outdir] -= params.num_episodes
    else:
      remaining = params.num_episodes - existing[outdir]
      existing[outdir] = 0
      env_ctor = params.task.env_ctor
      print('Collecting {} initial episodes ({}).'.format(remaining, name))
      control.random_episodes(env_ctor, remaining, outdir) 
Example #6
Source File: custom_optimizer.py    From planet with Apache License 2.0 6 votes vote down vote up
def maybe_minimize(self, condition, loss):
    # loss = tf.cond(condition, lambda: loss, float)
    update_op, grad_norm = tf.cond(
        condition,
        lambda: self.minimize(loss),
        lambda: (tf.no_op(), 0.0))
    with tf.control_dependencies([update_op]):
      summary = tf.cond(
          tf.logical_and(condition, self._log),
          lambda: self.summarize(grad_norm), str)
    if self._debug:
      # print_op = tf.print('{}_grad_norm='.format(self._name), grad_norm)
      message = 'Zero gradient norm in {} optimizer.'.format(self._name)
      assertion = lambda: tf.assert_greater(grad_norm, 0.0, message=message)
      assert_op = tf.cond(condition, assertion, tf.no_op)
      with tf.control_dependencies([assert_op]):
        summary = tf.identity(summary)
    return summary, grad_norm 
Example #7
Source File: utility.py    From planet with Apache License 2.0 6 votes vote down vote up
def print_metrics(metrics, step, every, name='metrics'):
  means, updates = [], []
  for key, value in metrics.items():
    key = 'metrics_{}_{}'.format(name, key)
    mean = tools.StreamingMean((), tf.float32, key)
    means.append(mean)
    updates.append(mean.submit(value))
  with tf.control_dependencies(updates):
    # message = 'step/' + '/'.join(metrics.keys()) + ' = '
    message = '{}: step/{} ='.format(name, '/'.join(metrics.keys()))
    gs = tf.train.get_or_create_global_step()
    print_metrics = tf.cond(
        tf.equal(step % every, 0),
        lambda: tf.print(message, [gs] + [mean.clear() for mean in means]),
        tf.no_op)
  return print_metrics 
Example #8
Source File: transformer_memory_test.py    From BERT with Apache License 2.0 6 votes vote down vote up
def testLoss(self):
    batch_size = 2
    key_depth = 5
    val_depth = 5
    memory_size = 4
    window_size = 3
    x_depth = 5
    memory = transformer_memory.TransformerMemory(
        batch_size, key_depth, val_depth, memory_size)
    x = tf.random_uniform([batch_size, window_size, x_depth], minval=.0)
    memory_results, _, _, _ = (
        memory.pre_attention(
            tf.random_uniform([batch_size], minval=0, maxval=1, dtype=tf.int32),
            x, None, None))
    x = memory.post_attention(memory_results, x)
    with tf.control_dependencies([tf.print("x", x)]):
      is_nan = tf.reduce_any(tf.math.is_nan(x))
    with self.test_session() as session:
      session.run(tf.global_variables_initializer())
      for _ in range(100):
        is_nan_value, _ = session.run([is_nan, x])
    self.assertEqual(is_nan_value, False) 
Example #9
Source File: common.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def evaluate(self, sess, x, y, data_owner):
        """Return the accuracy"""

        def print_accuracy(y_hat, y) -> tf.Operation:
            with tf.name_scope("print-accuracy"):
                correct_prediction = tf.equal(tf.round(y_hat), y)
                accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
                print_op = tf.print(
                    "Accuracy on {}:".format(data_owner.player_name), accuracy
                )
                return print_op

        with tf.name_scope("evaluate"):
            y_hat = self.forward(x)
            print_accuracy_op = tfe.define_output(
                data_owner.player_name, [y_hat, y], print_accuracy
            )

        sess.run(print_accuracy_op, tag="evaluate") 
Example #10
Source File: network_c.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def cond(
        self,
        i: tf.Tensor,
        max_iter: tf.Tensor,
        nb_epochs: tf.Tensor,
        avg_loss: tf.Tensor,
    ):
        """Check if training termination condition has been met."""
        is_end_epoch = tf.equal(i % max_iter, 0)
        to_continue = tf.cast(i < max_iter * nb_epochs, tf.bool)

        def true_fn() -> tf.Tensor:
            to_continue = tf.print("avg_loss: ", avg_loss)
            return to_continue

        def false_fn() -> tf.Tensor:
            return to_continue

        return tf.cond(is_end_epoch, true_fn, false_fn) 
Example #11
Source File: network_b.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def cond(
        self,
        i: tf.Tensor,
        max_iter: tf.Tensor,
        nb_epochs: tf.Tensor,
        avg_loss: tf.Tensor,
    ):
        """Check if training termination condition has been met."""
        is_end_epoch = tf.equal(i % max_iter, 0)
        to_continue = tf.cast(i < max_iter * nb_epochs, tf.bool)

        def true_fn() -> tf.Tensor:
            to_continue = tf.print("avg_loss: ", avg_loss)
            return to_continue

        def false_fn() -> tf.Tensor:
            return to_continue

        return tf.cond(is_end_epoch, true_fn, false_fn) 
Example #12
Source File: pond.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def debug(x: PondTensor, summarize=None, message=""):
    """Print contents of a PondTensor for debugging purposes."""
    if isinstance(x, PondPublicTensor):
        tf.print(
            x.value_on_0.value,
            [x.value_on_0.value],
            summarize=summarize,
            message=message,
        )

    elif isinstance(x, PondPrivateTensor):
        tf.print(
            x.share0.value,
            [x.reveal().value_on_0.value],
            summarize=summarize,
            message=message,
        )

    else:
        raise TypeError("Don't know how to debug {}".format(type(x)))


#
# identity
# 
Example #13
Source File: logistic_regression.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def evaluate(self, sess, x, y, data_owner):
        """Return the accuracy"""

        def print_accuracy(y_hat, y) -> tf.Operation:
            with tf.name_scope("print-accuracy"):
                correct_prediction = tf.equal(tf.round(y_hat), y)
                accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
                print_op = tf.print(
                    "Accuracy on {}:".format(data_owner.player_name), accuracy
                )
                return print_op

        with tf.name_scope("evaluate"):
            y_hat = self.forward(x)
            print_accuracy_op = tfe.define_output(
                data_owner.player_name, [y_hat, y], print_accuracy
            )

        sess.run(print_accuracy_op, tag="evaluate") 
Example #14
Source File: CVAE.py    From deep-generative-models with MIT License 6 votes vote down vote up
def train(self, train_dataset):
        """ main training call for CVAE """
        num_samples = int(train_dataset.shape[0]/self.batch_size)
        train_dataset = tf.data.Dataset.from_tensor_slices(train_dataset).shuffle(train_dataset.shape[0]).batch(self.batch_size)
        for i in range(self.epochs):
            j = 1
            norm = 0
            Loss = 0
            print("Epoch: %s" % str(i+1))
            for train_x in train_dataset:
                gradients, loss = self.compute_gradients(train_x)
                Loss += loss
                norm += tf.reduce_mean([tf.norm(g) for g in gradients])
                self.apply_gradients(gradients)
                if j != 1 and j % 20 == 0:
                    # good to print out euclidean norm of gradients
                    tf.print("Epoch: %s, Batch: %s/%s" % (i+1,j,num_samples))
                    tf.print("Mean-Loss: ", Loss/j, ", Mean gradient-norm: ", norm/j)
                j += 1 
Example #15
Source File: train.py    From mobilenetv2-yolov3 with MIT License 6 votes vote down vote up
def _distributed_epoch(self, dataset, step):
        total_loss = 0.0
        num_batches = 0.0
        for batch in dataset:
            if self.writer is not None:
                with self.writer.as_default():
                    tf.summary.image(
                        "Training data",
                        tf.cast(batch[0] * 255, tf.uint8),
                        max_outputs=8)
            per_replica_loss = self._distribution_strategy.experimental_run_v2(
                self._train_step if step else self._val_step, args=(batch,))
            total_loss += self._distribution_strategy.reduce(
                tf.distribute.ReduceOp.SUM, per_replica_loss,
                axis=None)
            num_batches += 1.0
            tf.print(num_batches, ':', total_loss / num_batches, sep='')
        total_loss = total_loss / num_batches
        return total_loss 
Example #16
Source File: semsegfull.py    From Hands-On-Neural-Networks-with-TensorFlow-2.0 with MIT License 6 votes vote down vote up
def _info(self):
        parent_info = tfds.object_detection.voc.Voc().info
        print(parent_info)
        return tfds.core.DatasetInfo(
            builder=self,
            description=parent_info.description,
            features=tfds.features.FeaturesDict(
                {
                    "image": tfds.features.Image(shape=(None, None, 3)),
                    "image/filename": tfds.features.Text(),
                    "label": tfds.features.Image(shape=(None, None, 1)),
                }
            ),
            homepage=parent_info.homepage,
            citation=parent_info.citation,
        ) 
Example #17
Source File: utility.py    From dreamer with Apache License 2.0 6 votes vote down vote up
def save_config(config, logdir=None):
  if logdir:
    with config.unlocked:
      config.logdir = logdir
    message = 'Start a new run and write summaries and checkpoints to {}.'
    print(message.format(config.logdir))
    tf.gfile.MakeDirs(config.logdir)
    config_path = os.path.join(config.logdir, 'config.yaml')
    with tf.gfile.GFile(config_path, 'w') as file_:
      yaml.dump(
          config, file_, yaml.Dumper,
          allow_unicode=True,
          default_flow_style=False)
  else:
    message = (
        'Start a new run without storing summaries and checkpoints since no '
        'logging directory was specified.')
    print(message)
  return config 
Example #18
Source File: utility.py    From dreamer with Apache License 2.0 6 votes vote down vote up
def print_metrics(metrics, step, every, decimals=2, name='metrics'):
  factor = 10 ** decimals
  means, updates = [], []
  for key, value in metrics.items():
    key = 'metrics_{}_{}'.format(name, key)
    mean = tools.StreamingMean((), tf.float32, key)
    means.append(mean)
    updates.append(mean.submit(value))
  with tf.control_dependencies(updates):
    message = '{}: step/{} ='.format(name, '/'.join(metrics.keys()))
    print_metrics = tf.cond(
        tf.equal(step % every, 0),
        lambda: tf.print(message, [step] + [
            tf.round(mean.clear() * factor) / factor for mean in means]),
        tf.no_op)
  return print_metrics 
Example #19
Source File: bfgs_optimizer.py    From tfdiffeq with MIT License 6 votes vote down vote up
def minimize(self, loss_func, model):
        optim_func = self._function_wrapper(loss_func, model)

        # convert initial model parameters to a 1D tf.Tensor
        init_params = tf.dynamic_stitch(optim_func.idx, model.trainable_variables)

        # train the model with BFGS solver
        results = tfp.optimizer.bfgs_minimize(
            value_and_gradients_function=optim_func, initial_position=init_params,
            max_iterations=self.max_iterations,
            tolerance=self.tolerance,
            x_tolerance=self.tolerance,
            f_relative_tolerance=self.tolerance,
            **self.bfgs_kwargs)

        # after training, the final optimized parameters are still in results.position
        # so we have to manually put them back to the model
        optim_func.assign_new_model_parameters(results.position)

        print("BFGS complete, and parameters updated !")
        return model 
Example #20
Source File: network_d.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def cond(
        self,
        i: tf.Tensor,
        max_iter: tf.Tensor,
        nb_epochs: tf.Tensor,
        avg_loss: tf.Tensor,
    ):
        """Check if training termination condition has been met."""
        is_end_epoch = tf.equal(i % max_iter, 0)
        to_continue = tf.cast(i < max_iter * nb_epochs, tf.bool)

        def true_fn() -> tf.Tensor:
            to_continue = tf.print("avg_loss: ", avg_loss)
            return to_continue

        def false_fn() -> tf.Tensor:
            return to_continue

        return tf.cond(is_end_epoch, true_fn, false_fn) 
Example #21
Source File: data.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def gen_training_input(total_size, nb_feats, batch_size):
    """Generate random data for training."""
    x_np = np.random.uniform(-0.5, 0.5, size=[total_size, nb_feats])
    y_np = np.array(x_np.mean(axis=1) > 0, np.float32)
    train_set = (
        tf.data.Dataset.from_tensor_slices((x_np, y_np))
        .map(norm)
        .shuffle(buffer_size=100)
        .repeat()
        .batch(batch_size)
    )
    train_set_iterator = train_set.make_one_shot_iterator()
    x, y = train_set_iterator.get_next()
    x = tf.reshape(x, [batch_size, nb_feats])
    y = tf.reshape(y, [batch_size, 1])

    # tf.print(x, data=[x], message="x: ", summarize=6)
    return x, y 
Example #22
Source File: basic_model.py    From TIES-2.0 with MIT License 5 votes vote down vote up
def run_testing_iteration(self, sess, summary_writer, iteration_number):
        feeds = sess.run(self.testing_feeds)
        feed_dict = {
            self._placeholder_vertex_features : feeds[0],
            self._placeholder_image : feeds[1],
            self._placeholder_global_features : feeds[2],
            self._placeholder_cell_adj_matrix : feeds[3],
            self._placeholder_row_adj_matrix : feeds[4],
            self._placeholder_col_adj_matrix : feeds[5],
        }


        print("Testing Iteration %d:" % iteration_number)
        ops_to_run = self.graph_predicted_sampled_adj_matrices + self.graph_gt_sampled_adj_matrices + \
            self.graph_sampled_indices + [self.graph_prints, self.test_x, self.graph_summaries_training]
        ops_result = sess.run(ops_to_run, feed_dict = feed_dict)

        vv =  ops_result[-2]

        summary_writer.add_summary(ops_result[-1], iteration_number)

        result = {
            'image': feeds[1][0],
            'sampled_ground_truths': [ops_result[3][0], ops_result[4][0], ops_result[5][0]],
            'sampled_predictions': [ops_result[0][0], ops_result[1][0], ops_result[2][0]],
            'sampled_indices': [ops_result[6][0], ops_result[7][0], ops_result[8][0]],
            'global_features': feeds[2][0],
            'vertex_features': feeds[0][0],
        }

        # if vv==1:
        self.inference_output_streamer.add(result) 
Example #23
Source File: network_b.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def receive_output(self, likelihoods: tf.Tensor, y_true: tf.Tensor):
        with tf.name_scope("post-processing"):
            prediction = tf.argmax(likelihoods, axis=1)
            eq_values = tf.equal(prediction, tf.cast(y_true, tf.int64))
            acc = tf.reduce_mean(tf.cast(eq_values, tf.float32))
            op = tf.print(
                "Expected:", y_true, "\nActual:", prediction, "\nAccuracy:", acc
            )

            return op 
Example #24
Source File: tf_mlperf_log.py    From models with Apache License 2.0 5 votes vote down vote up
def _example():
  for kwargs in [dict(first_n=1), dict(), dict(every_n=2),
                 dict(first_n=2, every_n=2)]:
    op = tf.compat.v1.assign_add(tf.Variable(tf.zeros(shape=(), dtype=tf.int32) - 1), 1)
    op = log_deferred(op, str(uuid.uuid4()), **kwargs)
    init = [tf.compat.v1.local_variables_initializer(), tf.compat.v1.global_variables_initializer()]
    print("-" * 5)
    with tf.compat.v1.Session().as_default() as sess:
      sess.run(init)
      for _ in range(6):
        sess.run(op) 
Example #25
Source File: logistic_regression.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def loss(self, sess, x, y, player_name):
        def print_loss(y_hat, y):
            with tf.name_scope("print-loss"):
                loss = -y * tf.log(y_hat) - (1 - y) * tf.log(1 - y_hat)
                print_op = tf.print("Loss on {}:".format(player_name), loss)
                return print_op

        with tf.name_scope("loss"):
            y_hat = self.forward(x)
            print_loss_op = tfe.define_output(player_name, [y_hat, y], print_loss)
        sess.run(print_loss_op, tag="loss") 
Example #26
Source File: tf_mlperf_log.py    From models with Apache License 2.0 5 votes vote down vote up
def _example():
  for kwargs in [dict(first_n=1), dict(), dict(every_n=2),
                 dict(first_n=2, every_n=2)]:
    op = tf.compat.v1.assign_add(tf.Variable(tf.zeros(shape=(), dtype=tf.int32) - 1), 1)
    op = log_deferred(op, str(uuid.uuid4()), **kwargs)
    init = [tf.compat.v1.local_variables_initializer(), tf.compat.v1.global_variables_initializer()]
    print("-" * 5)
    with tf.compat.v1.Session().as_default() as sess:
      sess.run(init)
      for _ in range(6):
        sess.run(op) 
Example #27
Source File: logistic_regression.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def receive_weights(self, *weights):
        return tf.print("Weights on {}:\n".format(self.player_name), weights) 
Example #28
Source File: logistic_regression.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def loss(self, model, x, y):
        def print_loss(y_hat, y):
            with tf.name_scope("print-loss"):
                loss = -y * tf.log(y_hat) - (1 - y) * tf.log(1 - y_hat)
                loss = tf.reduce_mean(loss)
                print_op = tf.print("Loss on {}:".format(self.player_name), loss)
                return print_op

        with tf.name_scope("loss"):
            y_hat = model.forward(x)
            print_loss_op = tfe.define_output(self.player_name, [y_hat, y], print_loss)
        return print_loss_op 
Example #29
Source File: CGAN.py    From deep-generative-models with MIT License 5 votes vote down vote up
def train(self, train_dataset):
        """ main training call for CGAN """
        num_samples = int(train_dataset.shape[0]/self.batch_size)
        train_dataset = tf.data.Dataset.from_tensor_slices(train_dataset).shuffle(train_dataset.shape[0]).batch(self.batch_size)
        for i in range(self.epochs):
            j = 1
            norm_d = 0
            Loss_d = 0
            norm_g = 0
            Loss_g = 0
            print("Epoch: %s" % str(i+1))
            for train_x in train_dataset:
                # disciminator step
                gradients, t_v, loss = self.compute_gradients(train_x, sub = "discriminator")
                Loss_d += loss
                norm_d += tf.reduce_mean([tf.norm(g) for g in gradients])
                self.apply_gradients(gradients, t_v, sub = "discriminator")
                # generator step
                gradients, t_v, loss = self.compute_gradients(train_x, sub = "generator")
                Loss_g += loss
                norm_g += tf.reduce_mean([tf.norm(g) for g in gradients])
                self.apply_gradients(gradients, t_v, sub = "generator")
                if j != 1 and j % 20 == 0:
                    # good to print out euclidean norm of gradients
                    tf.print("Epoch: %s, Batch: %s/%s" % (i+1,j,num_samples))
                    tf.print("Mean discriminator loss: ", Loss_d/j, ", Mean discriminator gradient norm: ", norm_d/j)
                    tf.print("Mean generator loss: ", Loss_g/j, ", Mean generator gradient norm: ", norm_g/j)
                j += 1 
Example #30
Source File: basic_model.py    From TIES-2.0 with MIT License 5 votes vote down vote up
def run_validation_iteration(self, sess, summary_writer, iteration_number):
        feeds = sess.run(self.validation_feeds)
        feed_dict = {
            self._placeholder_vertex_features : feeds[0],
            self._placeholder_image : feeds[1],
            self._placeholder_global_features : feeds[2],
            self._placeholder_cell_adj_matrix : feeds[3],
            self._placeholder_row_adj_matrix : feeds[4],
            self._placeholder_col_adj_matrix : feeds[5],
        }


        print("---------------------------------------------------")
        print("Validation Iteration %d:" % iteration_number)
        ops_to_run = self.graph_predicted_sampled_adj_matrices + self.graph_gt_sampled_adj_matrices + \
            self.graph_sampled_indices + [self.graph_prints, self.graph_summaries_training]
        ops_result = sess.run(ops_to_run, feed_dict = feed_dict)
        print("---------------------------------------------------")

        summary_writer.add_summary(ops_result[-1], iteration_number)

        data = {
            'image' : feeds[1][0],
            'sampled_ground_truths' : [ops_result[3][0], ops_result[4][0], ops_result[5][0]],
            'sampled_predictions' : [ops_result[0][0], ops_result[1][0], ops_result[2][0]],
            'sampled_indices' : [ops_result[6][0], ops_result[7][0], ops_result[8][0]],
            'global_features' : feeds[2][0],
            'vertex_features' : feeds[0][0],
        }

        if iteration_number % self.visualize_validation_results_after == 0:
            print("Visualizing")
            self.visual_feedback_generator.add(iteration_number,  data)