Python tensorflow.logical_not() Examples

The following are 30 code examples of tensorflow.logical_not(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: metrics.py    From blueoil with Apache License 2.0 6 votes vote down vote up
def tp_tn_fp_fn(output, labels, threshold=0.5):
    """Calculate True Positive, True Negative, False Positive, False Negative.

    Args:
        output: network output sigmoided tensor. shape is [batch_size, num_class]
        labels: multi label encoded bool tensor. shape is [batch_size, num_class]
        threshold: python float

    """
    predicted = tf.greater_equal(output, threshold)

    gt_positive = tf.reduce_sum(tf.cast(labels, tf.int32))
    gt_negative = tf.reduce_sum(tf.cast(tf.logical_not(labels), tf.int32))

    true_positive = tf.math.logical_and(predicted, labels)
    true_positive = tf.reduce_sum(tf.cast(true_positive, tf.int32))

    true_negative = tf.math.logical_and(tf.logical_not(predicted), tf.math.logical_not(labels))
    true_negative = tf.reduce_sum(tf.cast(true_negative, tf.int32))

    false_negative = gt_positive - true_positive

    false_positive = gt_negative - true_negative

    return true_positive, true_negative, false_positive, false_negative 
Example #2
Source File: ops.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
  """Filters out groundtruth with no bounding boxes.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
  nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
      tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
  valid_indicator_vector = tf.logical_not(nan_indicator_vector)
  valid_indices = tf.where(valid_indicator_vector)

  return retain_groundtruth(tensor_dict, valid_indices) 
Example #3
Source File: ops.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def filter_groundtruth_with_crowd_boxes(tensor_dict):
  """Filters out groundtruth with boxes corresponding to crowd.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_keypoints
      fields.InputDataFields.groundtruth_instance_masks
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  if fields.InputDataFields.groundtruth_is_crowd in tensor_dict:
    is_crowd = tensor_dict[fields.InputDataFields.groundtruth_is_crowd]
    is_not_crowd = tf.logical_not(is_crowd)
    is_not_crowd_indices = tf.where(is_not_crowd)
    tensor_dict = retain_groundtruth(tensor_dict, is_not_crowd_indices)
  return tensor_dict 
Example #4
Source File: ops.py    From object_detector_app with MIT License 6 votes vote down vote up
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
  """Filters out groundtruth with no bounding boxes.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
  nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
      tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
  valid_indicator_vector = tf.logical_not(nan_indicator_vector)
  valid_indices = tf.where(valid_indicator_vector)

  return retain_groundtruth(tensor_dict, valid_indices) 
Example #5
Source File: imagenet_utils.py    From ghostnet with Apache License 2.0 6 votes vote down vote up
def compute_loss_and_error(logits, label, label_smoothing):
        loss = sparse_softmax_cross_entropy(
                logits=logits, labels=label,
                label_smoothing = label_smoothing,
                weights=1.0)
        loss = tf.reduce_mean(loss, name='xentropy-loss')

        def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'):
            with tf.name_scope('prediction_incorrect'):
                x = tf.logical_not(tf.nn.in_top_k(logits, label, topk))
            return tf.cast(x, tf.float32, name=name)
        
        if label.shape.ndims > 1:
            label = tf.cast(tf.argmax(label, axis=1), tf.int32)
        wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
        add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1'))

        wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')
        add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5'))
        return loss 
Example #6
Source File: postU.py    From decompose with MIT License 6 votes vote down vote up
def updateK(self, k, prepVars, U):
        f = self.__f
        UfShape = U[f].get_shape()

        lhUfk = self.__likelihood.lhUfk(U[f], prepVars, f, k)
        postfk = lhUfk*self.prior[k].cond()
        Ufk = postfk.draw()
        Ufk = tf.expand_dims(Ufk, 0)

        normUfk = tf.norm(Ufk)
        notNanNorm = tf.logical_not(tf.is_nan(normUfk))
        finiteNorm = tf.is_finite(normUfk)
        positiveNorm = normUfk > 0.
        isValid = tf.logical_and(notNanNorm,
                                 tf.logical_and(finiteNorm,
                                                positiveNorm))
        Uf = tf.cond(isValid, lambda: self.updateUf(U[f], Ufk, k),
                     lambda: U[f])

        # TODO: if valid -> self.__likelihood.lhU()[f].updateUfk(U[f][k], k)
        Uf.set_shape(UfShape)
        U[f] = Uf
        return(U) 
Example #7
Source File: cvNormalNdLikelihood.py    From decompose with MIT License 6 votes vote down vote up
def init(self, data: Tensor) -> None:
        tau = self.__tauInit
        dtype = self.__dtype
        properties = self.__properties
        noiseDistribution = CenNormal(tau=tf.constant([tau], dtype=dtype),
                                      properties=properties)
        self.__noiseDistribution = noiseDistribution
        observedMask = tf.logical_not(tf.is_nan(data))
        trainMask = tf.logical_not(self.cv.mask(X=data))
        trainMask = tf.get_variable("trainMask",
                                    dtype=trainMask.dtype,
                                    initializer=trainMask)
        trainMask = tf.logical_and(trainMask, observedMask)
        testMask = tf.logical_and(observedMask,
                                  tf.logical_not(trainMask))
        self.__observedMask = observedMask
        self.__trainMask = trainMask
        self.__testMask = testMask 
Example #8
Source File: ops.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
  """Filters out groundtruth with no bounding boxes.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_keypoints
      fields.InputDataFields.groundtruth_instance_masks
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
  nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
      tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
  valid_indicator_vector = tf.logical_not(nan_indicator_vector)
  valid_indices = tf.where(valid_indicator_vector)

  return retain_groundtruth(tensor_dict, valid_indices) 
Example #9
Source File: cvNormal2dLikelihood.py    From decompose with MIT License 6 votes vote down vote up
def init(self, data: Tensor) -> None:
        tau = self.__tauInit
        dtype = self.__dtype
        properties = self.__properties
        noiseDistribution = CenNormal(tau=tf.constant([tau], dtype=dtype),
                                      properties=properties)
        self.__noiseDistribution = noiseDistribution
        observedMask = tf.logical_not(tf.is_nan(data))
        trainMask = tf.logical_not(self.cv.mask(X=data))
        trainMask = tf.get_variable("trainMask",
                                    dtype=trainMask.dtype,
                                    initializer=trainMask)
        trainMask = tf.logical_and(trainMask, observedMask)
        testMask = tf.logical_and(observedMask,
                                  tf.logical_not(trainMask))
        self.__observedMask = observedMask
        self.__trainMask = trainMask
        self.__testMask = testMask 
Example #10
Source File: imagenet_utils.py    From benchmarks with The Unlicense 6 votes vote down vote up
def compute_loss_and_error(logits, label, label_smoothing=0.):
        if label_smoothing != 0.:
            nclass = logits.shape[-1]
            label = tf.one_hot(label, nclass) if label.shape.ndims == 1 else label

        if label.shape.ndims == 1:
            loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
        else:
            loss = tf.losses.softmax_cross_entropy(
                label, logits, label_smoothing=label_smoothing,
                reduction=tf.losses.Reduction.NONE)
        loss = tf.reduce_mean(loss, name='xentropy-loss')

        def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'):
            with tf.name_scope('prediction_incorrect'):
                x = tf.logical_not(tf.nn.in_top_k(logits, label, topk))
            return tf.cast(x, tf.float32, name=name)

        wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
        add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1'))

        wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')
        add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5'))
        return loss 
Example #11
Source File: ops.py    From BMW-TensorFlow-Inference-API-CPU with Apache License 2.0 6 votes vote down vote up
def filter_groundtruth_with_crowd_boxes(tensor_dict):
  """Filters out groundtruth with boxes corresponding to crowd.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_keypoints
      fields.InputDataFields.groundtruth_instance_masks
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  if fields.InputDataFields.groundtruth_is_crowd in tensor_dict:
    is_crowd = tensor_dict[fields.InputDataFields.groundtruth_is_crowd]
    is_not_crowd = tf.logical_not(is_crowd)
    is_not_crowd_indices = tf.where(is_not_crowd)
    tensor_dict = retain_groundtruth(tensor_dict, is_not_crowd_indices)
  return tensor_dict 
Example #12
Source File: ops.py    From BMW-TensorFlow-Inference-API-CPU with Apache License 2.0 6 votes vote down vote up
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
  """Filters out groundtruth with no bounding boxes.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_keypoints
      fields.InputDataFields.groundtruth_instance_masks
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
  nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
      tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
  valid_indicator_vector = tf.logical_not(nan_indicator_vector)
  valid_indices = tf.where(valid_indicator_vector)

  return retain_groundtruth(tensor_dict, valid_indices) 
Example #13
Source File: ops.py    From ros_people_object_detection_tensorflow with Apache License 2.0 6 votes vote down vote up
def filter_groundtruth_with_crowd_boxes(tensor_dict):
  """Filters out groundtruth with boxes corresponding to crowd.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  if fields.InputDataFields.groundtruth_is_crowd in tensor_dict:
    is_crowd = tensor_dict[fields.InputDataFields.groundtruth_is_crowd]
    is_not_crowd = tf.logical_not(is_crowd)
    is_not_crowd_indices = tf.where(is_not_crowd)
    tensor_dict = retain_groundtruth(tensor_dict, is_not_crowd_indices)
  return tensor_dict 
Example #14
Source File: triplet_loss_utils.py    From BERT with Apache License 2.0 6 votes vote down vote up
def _get_anchor_positive_triplet_mask(labels):
	"""Return a 2D mask where mask[a, p] is True iff a and p are distinct and have same label.
	Args:
		labels: tf.int32 `Tensor` with shape [batch_size]
	Returns:
		mask: tf.bool `Tensor` with shape [batch_size, batch_size]
	"""
	# Check that i and j are distinct
	indices_equal = tf.cast(tf.eye(tf.shape(labels)[0]), tf.bool)
	indices_not_equal = tf.logical_not(indices_equal)

	# Check if labels[i] == labels[j]
	# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
	labels_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))

	# Combine the two masks
	mask = tf.logical_and(indices_not_equal, labels_equal)

	return mask 
Example #15
Source File: area_attention.py    From BERT with Apache License 2.0 6 votes vote down vote up
def lengths_to_area_mask(feature_length, length, max_area_size):
  """Generates a non-padding mask for areas based on lengths.

  Args:
    feature_length: a tensor of [batch_size]
    length: the length of the batch
    max_area_size: the maximum area size considered
  Returns:
    mask: a tensor in shape of [batch_size, num_areas]
  """

  paddings = tf.cast(tf.expand_dims(
      tf.logical_not(
          tf.sequence_mask(feature_length, maxlen=length)), 2), tf.float32)
  _, _, area_sum, _, _ = compute_area_features(paddings,
                                               max_area_width=max_area_size)
  mask = tf.squeeze(tf.logical_not(tf.cast(area_sum, tf.bool)), [2])
  return mask 
Example #16
Source File: metrics.py    From blueoil with Apache License 2.0 6 votes vote down vote up
def tp_tn_fp_fn_for_each(output, labels, threshold=0.5):
    """Calculate True Positive, True Negative, False Positive, False Negative.

    Args:
        output: network output sigmoided tensor. shape is [batch_size, num_class]
        labels: multi label encoded bool tensor. shape is [batch_size, num_class]
        threshold: python float

    Returns:
        shape is [4(tp, tn, fp, fn), num_class]

    """
    predicted = tf.greater_equal(output, threshold)
    gt_positive = tf.reduce_sum(tf.cast(labels, tf.int32), axis=0, keepdims=True)
    gt_negative = tf.reduce_sum(tf.cast(tf.logical_not(labels), tf.int32), axis=0, keepdims=True)
    true_positive = tf.math.logical_and(predicted, labels)
    true_positive = tf.reduce_sum(tf.cast(true_positive, tf.int32), axis=0, keepdims=True)

    true_negative = tf.math.logical_and(tf.logical_not(predicted), tf.math.logical_not(labels))
    true_negative = tf.reduce_sum(tf.cast(true_negative, tf.int32), axis=0, keepdims=True)
    false_negative = gt_positive - true_positive
    false_positive = gt_negative - true_negative

    return tf.concat(axis=0, values=[true_positive, true_negative, false_positive, false_negative]) 
Example #17
Source File: ops.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
  """Filters out groundtruth with no bounding boxes.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_instance_masks
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
  nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
      tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
  valid_indicator_vector = tf.logical_not(nan_indicator_vector)
  valid_indices = tf.where(valid_indicator_vector)

  return retain_groundtruth(tensor_dict, valid_indices) 
Example #18
Source File: ops.py    From yolo_v2 with Apache License 2.0 6 votes vote down vote up
def filter_groundtruth_with_crowd_boxes(tensor_dict):
  """Filters out groundtruth with boxes corresponding to crowd.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  if fields.InputDataFields.groundtruth_is_crowd in tensor_dict:
    is_crowd = tensor_dict[fields.InputDataFields.groundtruth_is_crowd]
    is_not_crowd = tf.logical_not(is_crowd)
    is_not_crowd_indices = tf.where(is_not_crowd)
    tensor_dict = retain_groundtruth(tensor_dict, is_not_crowd_indices)
  return tensor_dict 
Example #19
Source File: ops.py    From HereIsWally with MIT License 6 votes vote down vote up
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
  """Filters out groundtruth with no bounding boxes.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
  nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
      tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
  valid_indicator_vector = tf.logical_not(nan_indicator_vector)
  valid_indices = tf.where(valid_indicator_vector)

  return retain_groundtruth(tensor_dict, valid_indices) 
Example #20
Source File: ops.py    From garbage-object-detection-tensorflow with MIT License 6 votes vote down vote up
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
  """Filters out groundtruth with no bounding boxes.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
  nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
      tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
  valid_indicator_vector = tf.logical_not(nan_indicator_vector)
  valid_indices = tf.where(valid_indicator_vector)

  return retain_groundtruth(tensor_dict, valid_indices) 
Example #21
Source File: imagenet_utils.py    From webvision-2.0-benchmarks with Apache License 2.0 6 votes vote down vote up
def compute_loss_and_error(logits, label):
        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
        loss = tf.reduce_mean(loss, name='xentropy-loss')

        def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'):
            with tf.name_scope('prediction_incorrect'):
                x = tf.logical_not(tf.nn.in_top_k(logits, label, topk))
            return tf.cast(x, tf.float32, name=name)

        res_scores, res_top5 = tf.nn.top_k(logits, k=5)
        res_scores=tf.identity(logits, name="logits")
        res_top = tf.identity(res_top5, name="res-top5")
        wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
        add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1'))

        wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')
        add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5'))
        return loss 
Example #22
Source File: ops.py    From cartoonify with MIT License 6 votes vote down vote up
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
  """Filters out groundtruth with no bounding boxes.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_instance_masks
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
  nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
      tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
  valid_indicator_vector = tf.logical_not(nan_indicator_vector)
  valid_indices = tf.where(valid_indicator_vector)

  return retain_groundtruth(tensor_dict, valid_indices) 
Example #23
Source File: ops.py    From cartoonify with MIT License 6 votes vote down vote up
def filter_groundtruth_with_crowd_boxes(tensor_dict):
  """Filters out groundtruth with boxes corresponding to crowd.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  if fields.InputDataFields.groundtruth_is_crowd in tensor_dict:
    is_crowd = tensor_dict[fields.InputDataFields.groundtruth_is_crowd]
    is_not_crowd = tf.logical_not(is_crowd)
    is_not_crowd_indices = tf.where(is_not_crowd)
    tensor_dict = retain_groundtruth(tensor_dict, is_not_crowd_indices)
  return tensor_dict 
Example #24
Source File: ops.py    From Person-Detection-and-Tracking with MIT License 6 votes vote down vote up
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
  """Filters out groundtruth with no bounding boxes.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_keypoints
      fields.InputDataFields.groundtruth_instance_masks
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
  nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
      tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
  valid_indicator_vector = tf.logical_not(nan_indicator_vector)
  valid_indices = tf.where(valid_indicator_vector)

  return retain_groundtruth(tensor_dict, valid_indices) 
Example #25
Source File: ops.py    From Person-Detection-and-Tracking with MIT License 6 votes vote down vote up
def filter_groundtruth_with_crowd_boxes(tensor_dict):
  """Filters out groundtruth with boxes corresponding to crowd.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_keypoints
      fields.InputDataFields.groundtruth_instance_masks
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  if fields.InputDataFields.groundtruth_is_crowd in tensor_dict:
    is_crowd = tensor_dict[fields.InputDataFields.groundtruth_is_crowd]
    is_not_crowd = tf.logical_not(is_crowd)
    is_not_crowd_indices = tf.where(is_not_crowd)
    tensor_dict = retain_groundtruth(tensor_dict, is_not_crowd_indices)
  return tensor_dict 
Example #26
Source File: ops.py    From ros_people_object_detection_tensorflow with Apache License 2.0 6 votes vote down vote up
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
  """Filters out groundtruth with no bounding boxes.

  Args:
    tensor_dict: a dictionary of following groundtruth tensors -
      fields.InputDataFields.groundtruth_boxes
      fields.InputDataFields.groundtruth_instance_masks
      fields.InputDataFields.groundtruth_classes
      fields.InputDataFields.groundtruth_is_crowd
      fields.InputDataFields.groundtruth_area
      fields.InputDataFields.groundtruth_label_types

  Returns:
    a dictionary of tensors containing only the groundtruth that have bounding
    boxes.
  """
  groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
  nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32(
      tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0)
  valid_indicator_vector = tf.logical_not(nan_indicator_vector)
  valid_indices = tf.where(valid_indicator_vector)

  return retain_groundtruth(tensor_dict, valid_indices) 
Example #27
Source File: beam_search_sampler.py    From nematus with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _generate_while_loop_cond_func(max_translation_len):

    def continue_decoding(curr_time_step, alive_sequences, alive_scores,
                          finished_sequences, finished_scores,
                          finished_eos_flags, alive_memories):
        """Determines whether decoding should continue or terminate."""

        # Check maximum prediction length has not been reached.
        length_criterion = tf.less(curr_time_step, max_translation_len)

        # Otherwise, check if the most likely alive hypothesis is less likely
        # than the least probable completed sequence.

        # Calculate the best possible score of the most probable sequence
        # currently alive.
        highest_alive_score = alive_scores[:, 0]

        # Calculate the score of the least likely sequence currently finished.
        lowest_finished_score = tf.reduce_min(
            input_tensor=finished_scores * tf.cast(finished_eos_flags, FLOAT_DTYPE), axis=1)

        # Account for the case in which none of the sequences in 'finished'
        # have terminated so far; In that case, each of the unfinished
        # sequences is assigned a high negative probability, so that the
        # termination condition is not met.
        tmp = tf.reduce_any(input_tensor=finished_eos_flags, axis=1)
        mask_unfinished = (1. - tf.cast(tmp, dtype=tf.float32)) * (-1. * 1e7)
        lowest_finished_score += mask_unfinished

        # Check is the current highest alive score is lower than the current
        # lowest finished score.
        likelihood_criterion = \
            tf.logical_not(
                tf.reduce_all(
                  input_tensor=tf.greater(lowest_finished_score, highest_alive_score)))

        # Decide whether to continue the decoding process.
        return tf.logical_and(length_criterion, likelihood_criterion)

    return continue_decoding 
Example #28
Source File: Dataset.py    From MOTSFusion with MIT License 5 votes vote down vote up
def create_input_tensors_dict(self, batch_size):
    self._load_inputfile_lists()
    tfdata = tf.data.Dataset.from_tensor_slices(self.inputfile_lists)
    if self.subset == "train":
      tfdata = tfdata.shuffle(buffer_size=self.shuffle_buffer_size)

    def _load_example(*input_filenames):
      example = self.load_example(input_filenames)
      # this has different sizes and therefore cannot be batched
      if batch_size > 1:
        if DataKeys.SEGMENTATION_LABELS_ORIGINAL_SIZE in example:
          del example[DataKeys.SEGMENTATION_LABELS_ORIGINAL_SIZE]
        if DataKeys.RAW_IMAGES in example:
          del example[DataKeys.RAW_IMAGES]
      return example

    def _filter_example(tensors):
      if DataKeys.SKIP_EXAMPLE in tensors:
        return tf.logical_not(tensors[DataKeys.SKIP_EXAMPLE])
      else:
        return tf.constant(True)

    tfdata = tfdata.map(_load_example, num_parallel_calls=self._num_parallel_calls)
    tfdata = tfdata.filter(_filter_example)
    tfdata = tfdata.repeat()
    if batch_size > 1:
      tfdata = tfdata.batch(batch_size)
    elif batch_size == 1:
      # like this we are able to retain the batch size in the shape information
      tfdata = tfdata.map(lambda x: {k: tf.expand_dims(v, axis=0) for k, v in x.items()})
    else:
      assert False, ("invalid batch size", batch_size)
    tfdata = tfdata.prefetch(buffer_size=self._prefetch_buffer_size)
    res = tfdata.make_one_shot_iterator().get_next()

    if self.use_summaries:
      self.create_summaries(res)
    return res

  # Override to add extraction keys that will be used by trainer. 
Example #29
Source File: dataset.py    From spleeter with MIT License 5 votes vote down vote up
def filter_infinity(self, sample):
        """ Filter infinity sample. """
        return tf.logical_not(
            tf.math.is_inf(
                sample[self._min_spectrogram_key])) 
Example #30
Source File: base.py    From BERT with Apache License 2.0 5 votes vote down vote up
def get_scheduled_sample_inputs(self,
                                  done_warm_start,
                                  groundtruth_items,
                                  generated_items,
                                  scheduled_sampling_func):
    """Scheduled sampling.

    Args:
      done_warm_start: whether we are done with warm start or not.
      groundtruth_items: list of ground truth items.
      generated_items: list of generated items.
      scheduled_sampling_func: scheduled sampling function to choose between
        groundtruth items and generated items.

    Returns:
      A mix list of ground truth and generated items.
    """
    def sample():
      """Calculate the scheduled sampling params based on iteration number."""
      with tf.variable_scope("scheduled_sampling", reuse=tf.AUTO_REUSE):
        return [
            scheduled_sampling_func(item_gt, item_gen)
            for item_gt, item_gen in zip(groundtruth_items, generated_items)]

    cases = [
        (tf.logical_not(done_warm_start), lambda: groundtruth_items),
        (tf.logical_not(self.is_training), lambda: generated_items),
    ]
    output_items = tf.case(cases, default=sample, strict=True)

    return output_items