Python tensorflow.reduce_any() Examples

The following are 30 code examples of tensorflow.reduce_any(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: models.py    From tf2-yolo3 with Apache License 2.0 8 votes vote down vote up
def yolo_nms(outputs, anchors, masks, num_classes, iou_threshold=0.6, score_threshold=0.15):
    boxes, confs, classes = [], [], []

    for o in outputs:
        boxes.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))
        confs.append(tf.reshape(o[1], (tf.shape(o[0])[0], -1, tf.shape(o[1])[-1])))
        classes.append(tf.reshape(o[2], (tf.shape(o[0])[0], -1, tf.shape(o[2])[-1])))
    boxes = tf.concat(boxes, axis=1)
    confs = tf.concat(confs, axis=1)
    class_probs = tf.concat(classes, axis=1)
    box_scores = confs * class_probs
    mask = box_scores >= score_threshold
    mask = tf.reduce_any(mask, axis=-1)

    class_boxes = tf.boolean_mask(boxes, mask)
    class_boxes = tf.reshape(class_boxes, (tf.shape(boxes)[0], -1, 4))
    class_box_scores = tf.boolean_mask(box_scores, mask)
    class_box_scores = tf.reshape(class_box_scores, (tf.shape(boxes)[0], -1, num_classes))

    class_boxes, class_box_scores = tf.py_function(func=batched_nms,
                                                   inp=[class_boxes, class_box_scores, num_classes, iou_threshold],
                                                   Tout=[tf.float32, tf.float32])
    classes = tf.argmax(class_box_scores, axis=-1)

    return class_boxes, class_box_scores, classes 
Example #2
Source File: diin_utils.py    From BERT with Apache License 2.0 6 votes vote down vote up
def self_attention(config, is_train, p, p_mask=None, scope=None): #[N, L, 2d]
    with tf.variable_scope(scope or "self_attention"):
        PL = p.get_shape()[1]
        dim = p.get_shape()[-1]
        # HL = tf.shape(h)[1]
        p_aug_1 = tf.tile(tf.expand_dims(p, 2), [1,1,config.max_seq_len_word,1])
        p_aug_2 = tf.tile(tf.expand_dims(p, 1), [1,config.max_seq_len_word,1,1]) #[N, PL, HL, 2d]

        if p_mask is None:
            ph_mask = None
        else:
            p_mask_aug_1 = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 2), [1, 1, config.max_seq_len_word, 1]), tf.bool), axis=3)
            p_mask_aug_2 = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 1), [1, config.max_seq_len_word, 1, 1]), tf.bool), axis=3)
            self_mask = p_mask_aug_1 & p_mask_aug_2


        h_logits = get_logits([p_aug_1, p_aug_2], None, True, wd=config.wd, mask=self_mask,
                              is_train=is_train, func=config.self_att_logit_func, scope='h_logits')  # [N, PL, HL]
        self_att = softsel(p_aug_2, h_logits) 

        return self_att 
Example #3
Source File: diin_utils.py    From BERT with Apache License 2.0 6 votes vote down vote up
def bi_attention_mx(config, is_train, p, h, p_mask=None, h_mask=None, scope=None): #[N, L, 2d]
    with tf.variable_scope(scope or "dense_logit_bi_attention"):
        PL = p.get_shape()[1]
        HL = h.get_shape()[1]
        p_aug = tf.tile(tf.expand_dims(p, 2), [1,1,config.max_seq_len_word,1])
        h_aug = tf.tile(tf.expand_dims(h, 1), [1,config.max_seq_len_word,1,1]) #[N, PL, HL, 2d]

        if p_mask is None:
            ph_mask = None
        else:
            p_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 2), [1, 1, config.max_seq_len_word, 1]), tf.bool), axis=3)
            h_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(h_mask, 1), [1, config.max_seq_len_word, 1, 1]), tf.bool), axis=3)
            ph_mask = p_mask_aug & h_mask_aug
        ph_mask = None

        
        h_logits = p_aug * h_aug
        
        return h_logits 
Example #4
Source File: transformer_memory_test.py    From BERT with Apache License 2.0 6 votes vote down vote up
def testLoss(self):
    batch_size = 2
    key_depth = 5
    val_depth = 5
    memory_size = 4
    window_size = 3
    x_depth = 5
    memory = transformer_memory.TransformerMemory(
        batch_size, key_depth, val_depth, memory_size)
    x = tf.random_uniform([batch_size, window_size, x_depth], minval=.0)
    memory_results, _, _, _ = (
        memory.pre_attention(
            tf.random_uniform([batch_size], minval=0, maxval=1, dtype=tf.int32),
            x, None, None))
    x = memory.post_attention(memory_results, x)
    with tf.control_dependencies([tf.print("x", x)]):
      is_nan = tf.reduce_any(tf.math.is_nan(x))
    with self.test_session() as session:
      session.run(tf.global_variables_initializer())
      for _ in range(100):
        is_nan_value, _ = session.run([is_nan, x])
    self.assertEqual(is_nan_value, False) 
Example #5
Source File: beam_search.py    From models with Apache License 2.0 6 votes vote down vote up
def search(self, initial_ids, initial_cache):
    """Beam search for sequences with highest scores."""
    state, state_shapes = self._create_initial_state(initial_ids, initial_cache)

    finished_state = tf.while_loop(
        cond=self._continue_search, body=self._search_step, loop_vars=[state],
        shape_invariants=[state_shapes], parallel_iterations=1, back_prop=False)
    finished_state = finished_state[0]

    alive_seq = finished_state[_StateKeys.ALIVE_SEQ]
    alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS]
    finished_seq = finished_state[_StateKeys.FINISHED_SEQ]
    finished_scores = finished_state[_StateKeys.FINISHED_SCORES]
    finished_flags = finished_state[_StateKeys.FINISHED_FLAGS]

    # Account for corner case where there are no finished sequences for a
    # particular batch item. In that case, return alive sequences for that batch
    # item.
    finished_seq = tf.compat.v1.where(
        tf.reduce_any(input_tensor=finished_flags, axis=1), finished_seq, alive_seq)
    finished_scores = tf.compat.v1.where(
        tf.reduce_any(input_tensor=finished_flags, axis=1), finished_scores, alive_log_probs)
    return finished_seq, finished_scores 
Example #6
Source File: beam_search.py    From models with Apache License 2.0 6 votes vote down vote up
def search(self, initial_ids, initial_cache):
    """Beam search for sequences with highest scores."""
    state, state_shapes = self._create_initial_state(initial_ids, initial_cache)

    finished_state = tf.while_loop(
        cond=self._continue_search, body=self._search_step, loop_vars=[state],
        shape_invariants=[state_shapes], parallel_iterations=1, back_prop=False)
    finished_state = finished_state[0]

    alive_seq = finished_state[_StateKeys.ALIVE_SEQ]
    alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS]
    finished_seq = finished_state[_StateKeys.FINISHED_SEQ]
    finished_scores = finished_state[_StateKeys.FINISHED_SCORES]
    finished_flags = finished_state[_StateKeys.FINISHED_FLAGS]

    # Account for corner case where there are no finished sequences for a
    # particular batch item. In that case, return alive sequences for that batch
    # item.
    finished_seq = tf.compat.v1.where(
        tf.reduce_any(input_tensor=finished_flags, axis=1), finished_seq, alive_seq)
    finished_scores = tf.compat.v1.where(
        tf.reduce_any(input_tensor=finished_flags, axis=1), finished_scores, alive_log_probs)
    return finished_seq, finished_scores 
Example #7
Source File: helpers.py    From vae_tacotron2 with MIT License 6 votes vote down vote up
def next_inputs(self, time, outputs, state, sample_ids, stop_token_prediction, name=None):
		'''Stop on EOS. Otherwise, pass the last output as the next input and pass through state.'''
		with tf.name_scope('TacoTestHelper'):
			#A sequence is finished when the output probability is > 0.5
			finished = tf.cast(tf.round(stop_token_prediction), tf.bool)

			#Since we are predicting r frames at each step, two modes are 
			#then possible:
			#	Stop when the model outputs a p > 0.5 for any frame between r frames (Recommended)
			#	Stop when the model outputs a p > 0.5 for all r frames (Safer)
			#Note:
			#	With enough training steps, the model should be able to predict when to stop correctly
			#	and the use of stop_at_any = True would be recommended. If however the model didn't
			#	learn to stop correctly yet, (stops too soon) one could choose to use the safer option 
			#	to get a correct synthesis
			if hparams.stop_at_any:
				finished = tf.reduce_any(finished) #Recommended
			else:
				finished = tf.reduce_all(finished) #Safer option
			
			# Feed last output frame as next input. outputs is [N, output_dim * r]
			next_inputs = outputs[:, -self._output_dim:]
			next_state = state
			return (finished, next_inputs, next_state) 
Example #8
Source File: ops.py    From ICML2019-TREX with MIT License 6 votes vote down vote up
def __call__(self,input_var,name=None,**kwargs) :
        def _init():
            v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,2])
            t = tf.nn.conv2d(input_var,v_norm,self.strides,self.padding,data_format='NHWC')
            mu,var = tf.nn.moments(t,axes=[0,1,2])
            std = tf.sqrt(var+self.epsilon)
            return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]

        require_init = tf.reduce_any(tf.is_nan(self.g))
        init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])

        with tf.control_dependencies(init_ops):
            w = tf.reshape(self.g,[1,1,1,tf.shape(self.v)[-1]]) * tf.nn.l2_normalize(self.v,axis=[0,1,2])
            return tf.nn.bias_add(
                        tf.nn.conv2d(input_var, w,data_format='NHWC',
                                    strides=self.strides, padding=self.padding),
                        self.b,data_format='NHWC',name=name) 
Example #9
Source File: ops.py    From ICML2019-TREX with MIT License 6 votes vote down vote up
def __call__(self,input_var,name=None,**kwargs) :
        if( input_var.shape.ndims > 2 ) :
            dims = tf.reduce_prod(tf.shape(input_var)[1:])
            input_var = tf.reshape(input_var,[-1,dims])

        def _init():
            v_norm = tf.nn.l2_normalize(self.v,axis=0)
            t = tf.matmul(input_var,v_norm)
            mu,var = tf.nn.moments(t,axes=[0])
            std = tf.sqrt(var+self.epsilon)
            return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]

        require_init = tf.reduce_any(tf.is_nan(self.g))
        init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])

        with tf.control_dependencies(init_ops):
            w = tf.expand_dims(self.g,axis=0) * tf.nn.l2_normalize(self.v,axis=0)
            return tf.matmul(input_var,w)+self.b 
Example #10
Source File: structured_graph_builder.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def _BuildSequence(self,
                     batch_size,
                     max_steps,
                     features,
                     state,
                     use_average=False):
    """Adds a sequence of beam parsing steps."""
    def Advance(state, step, scores_array, alive, alive_steps, *features):
      scores = self._BuildNetwork(features,
                                  return_average=use_average)['logits']
      scores_array = scores_array.write(step, scores)
      features, state, alive = (
          gen_parser_ops.beam_parser(state, scores, self._feature_size))
      return [state, step + 1, scores_array, alive, alive_steps + tf.cast(
          alive, tf.int32)] + list(features)

    # args: (state, step, scores_array, alive, alive_steps, *features)
    def KeepGoing(*args):
      return tf.logical_and(args[1] < max_steps, tf.reduce_any(args[3]))

    step = tf.constant(0, tf.int32, [])
    scores_array = tensor_array_ops.TensorArray(dtype=tf.float32,
                                                size=0,
                                                dynamic_size=True)
    alive = tf.constant(True, tf.bool, [batch_size])
    alive_steps = tf.constant(0, tf.int32, [batch_size])
    t = tf.while_loop(
        KeepGoing,
        Advance,
        [state, step, scores_array, alive, alive_steps] + list(features),
        shape_invariants=[tf.TensorShape(None)] * (len(features) + 5),
        parallel_iterations=100)

    # Link to the final nodes/values of ops that have passed through While:
    return {'state': t[0],
            'concat_scores': t[2].concat(),
            'alive': t[3],
            'alive_steps': t[4]} 
Example #11
Source File: tensorflow_backend.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def any(x, axis=None, keepdims=False):
    """Bitwise reduction (logical OR).

    # Arguments
        x: Tensor or variable.
        axis: An integer or list of integers in [-rank(x), rank(x)),
            the axes to compute the logical or. If `None` (default), computes
            the logical or over all dimensions.
        keepdims: whether the drop or broadcast the reduction axes.

    # Returns
        A uint8 tensor (0s and 1s).
    """
    x = tf.cast(x, tf.bool)
    return tf.reduce_any(x, axis, keepdims) 
Example #12
Source File: box_list_ops.py    From tensorflow with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def prune_completely_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall completely outside of the given window.

  The function clip_to_window prunes bounding boxes that fall
  completely outside the window, but also clips any bounding boxes that
  partially overflow. This function does not clip partially overflowing boxes.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
        tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices 
Example #13
Source File: box_list_ops.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def prune_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall outside a given window.

  This function prunes bounding boxes that even partially fall outside the given
  window. See also clip_to_window which only prunes bounding boxes that fall
  completely outside the window, and clips any bounding boxes that partially
  overflow.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
        tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices 
Example #14
Source File: box_list_ops.py    From Gun-Detector with Apache License 2.0 5 votes vote down vote up
def prune_completely_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall completely outside of the given window.

  The function clip_to_window prunes bounding boxes that fall
  completely outside the window, but also clips any bounding boxes that
  partially overflow. This function does not clip partially overflowing boxes.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_boxlist: a new BoxList with all bounding boxes partially or fully in
      the window.
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
        tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices 
Example #15
Source File: voc_ds_test.py    From efficientdet-tf with GNU General Public License v3.0 5 votes vote down vote up
def test_compute_gt(self):
        ds = voc.build_dataset('test/data/VOC2007', 
                               im_input_size=(512, 512), 
                               shuffle=False)
        ds = ds.skip(1).batch(1)

        wrapped_ds = utils.training.wrap_detection_dataset(ds, (512, 512), 20)
        anchors = self.generate_anchors(config.AnchorsConfig(), 512)

        im, (regressors, l) = next(iter(wrapped_ds.take(1)))

        im = unnormalize_image(im[0])
        near_mask = regressors[0, :, -1] == 1
        nearest_regressors = tf.expand_dims(
            tf.boolean_mask(regressors[0], near_mask)[:, :-1], 0)
        nearest_anchors = tf.expand_dims(anchors[near_mask], 0)

        # apply regression to boxes
        regressed_boxes = utils.bndbox.regress_bndboxes(nearest_anchors, 
                                                        nearest_regressors)

        im = utils.visualizer.draw_boxes(
            im, nearest_anchors[0], colors=[(255, 255, 0)])
        im = utils.visualizer.draw_boxes(
            im, regressed_boxes[0], colors=[(0, 255, 255)])
        
        plt.imshow(im)
        plt.axis('off')
        plt.show(block=True)

        print('GT shapes:', l.shape, regressors.shape)
        print('Found any overlapping anchor?', 
              tf.reduce_any(tf.equal(l[:, :, -1], 1.))) 
Example #16
Source File: box_list_ops.py    From tpu_models with Apache License 2.0 5 votes vote down vote up
def prune_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall outside a given window.

  This function prunes bounding boxes that even partially fall outside the given
  window. See also clip_to_window which only prunes bounding boxes that fall
  completely outside the window, and clips any bounding boxes that partially
  overflow.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
        tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices 
Example #17
Source File: data.py    From tpu_models with Apache License 2.0 5 votes vote down vote up
def metric_fn(answers, prediction, start, end, yp1, yp2, num_answers):
  """Compute span accuracies and token F1/EM scores."""

  yp1 = tf.expand_dims(yp1, -1)
  yp2 = tf.expand_dims(yp2, -1)
  answer_mask = tf.sequence_mask(num_answers)

  start = tf.to_int64(start)
  end = tf.to_int64(end)
  start_correct = tf.reduce_any(tf.equal(start, yp1) & answer_mask, 1)
  end_correct = tf.reduce_any(tf.equal(end, yp2) & answer_mask, 1)
  correct = start_correct & end_correct

  em = tf.py_func(
      enum_fn(_exact_match_score, dtype='float32'),
      [prediction, answers, answer_mask], 'float32')
  f1 = tf.py_func(
      enum_fn(_f1_score, dtype='float32'), [prediction, answers, answer_mask],
      'float32')

  eval_metric_ops = {
      # TODO(ddohan): Add other useful metrics
      'acc_start':
          tf.metrics.mean(tf.cast(start_correct, 'float')),
      'acc_end':
          tf.metrics.mean(tf.cast(end_correct, 'float')),
      'acc_span':
          tf.metrics.mean(tf.cast(correct, 'float')),
      'em':
          tf.metrics.mean(em),
      'f1':
          tf.metrics.mean(f1),
      # Number of questions processed
      'num_question':
          tf.metrics.true_positives(
              tf.ones([tf.shape(prediction)][0]),
              tf.ones([tf.shape(prediction)][0]))
  }
  return eval_metric_ops 
Example #18
Source File: box_list_ops.py    From Traffic-Rule-Violation-Detection-System with MIT License 5 votes vote down vote up
def prune_completely_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall completely outside of the given window.

  The function clip_to_window prunes bounding boxes that fall
  completely outside the window, but also clips any bounding boxes that
  partially overflow. This function does not clip partially overflowing boxes.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_boxlist: a new BoxList with all bounding boxes partially or fully in
      the window.
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
        tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices 
Example #19
Source File: variable_mgr_util.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def aggregate_gradients_using_copy(tower_grads, use_mean, check_inf_nan):
  """Calculate the average gradient for each shared variable across all towers.

  Note that this function provides a synchronization point across all towers.

  Args:
    tower_grads: List of lists of (gradient, variable) tuples. The outer list
      is over towers. The inner list is over individual gradients.
    use_mean: if True, mean is taken, else sum of gradients is taken.
    check_inf_nan: check grads for nans and infs.

  Returns:
    The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
      gradient has been averaged across all towers. The variable is chosen from
      the first tower. The has_nan_or_inf indicates the grads has nan or inf.
  """
  agg_grads = []
  has_nan_or_inf_list = []

  for single_grads in zip(*tower_grads):
    grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
        single_grads, use_mean, check_inf_nan)
    agg_grads.append(grad_and_var)
    has_nan_or_inf_list.append(has_nan_or_inf)

  if check_inf_nan:
    return agg_grads, tf.reduce_any(has_nan_or_inf_list)
  else:
    return agg_grads, None 
Example #20
Source File: variable_mgr_util.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def aggregate_gradients_using_copy_with_variable_colocation(
    tower_grads, use_mean, check_inf_nan):
  """Aggregate gradients, colocating computation with the gradient's variable.

  Args:
    tower_grads: List of lists of (gradient, variable) tuples. The outer list
      is over towers. The inner list is over individual gradients. All variables
      of the same gradient across towers must be the same (that is,
      tower_grads[x][a][1] == tower_grads[y][a][1] for all indices x, y, and a)
    use_mean: if True, mean is taken, else sum of gradients is taken.
    check_inf_nan: If true, check grads for nans and infs.

  Returns:
    The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
      gradient has been averaged across all towers. The variable is chosen from
      the first tower. The has_nan_or_inf indicates the grads has nan or inf.
  """
  agg_grads = []
  has_nan_or_inf_list = []
  for single_grads in zip(*tower_grads):
    # Note that each single_grads looks like the following:
    #   ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
    var = single_grads[0][1]

    for _, v in single_grads:
      assert v == var

    with tf.device(var.device):
      grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
          single_grads, use_mean, check_inf_nan)
      agg_grads.append(grad_and_var)
      has_nan_or_inf_list.append(has_nan_or_inf)

  if check_inf_nan:
    return agg_grads, tf.reduce_any(has_nan_or_inf_list)
  else:
    return agg_grads, None 
Example #21
Source File: variable_mgr_util.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def aggregate_gradients_using_copy_with_device_selection(
    benchmark_cnn, tower_grads, use_mean, check_inf_nan):
  """Aggregate gradients, controlling device for the aggregation.

  Args:
    benchmark_cnn: benchmark_cnn class.
    tower_grads: List of lists of (gradient, variable) tuples. The outer list
      is over towers. The inner list is over individual gradients.
    use_mean: if True, mean is taken, else sum of gradients is taken.
    check_inf_nan: If true, check grads for nans and infs.

  Returns:
    The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
      gradient has been averaged across all towers. The variable is chosen from
      the first tower. The has_nan_or_inf indicates the grads has nan or inf.
  """
  if benchmark_cnn.local_parameter_device_flag == 'gpu':
    avail_devices = benchmark_cnn.raw_devices
  else:
    avail_devices = [benchmark_cnn.param_server_device]
  agg_grads = []
  has_nan_or_inf_list = []
  for i, single_grads in enumerate(zip(*tower_grads)):
    with tf.device(avail_devices[i % len(avail_devices)]):
      grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
          single_grads, use_mean, check_inf_nan)
      agg_grads.append(grad_and_var)
      has_nan_or_inf_list.append(has_nan_or_inf)
  if check_inf_nan:
    return agg_grads, tf.reduce_any(has_nan_or_inf_list)
  else:
    return agg_grads, None 
Example #22
Source File: mask_rcnn_architecture.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def _self_suppression(iou, _, iou_sum):
  batch_size = tf.shape(iou)[0]
  can_suppress_others = tf.cast(
      tf.reshape(tf.reduce_max(iou, 1) <= 0.5, [batch_size, -1, 1]), iou.dtype)
  iou_suppressed = tf.reshape(
      tf.cast(tf.reduce_max(can_suppress_others * iou, 1) <= 0.5, iou.dtype),
      [batch_size, -1, 1]) * iou
  iou_sum_new = tf.reduce_sum(iou_suppressed, [1, 2])
  return [
      iou_suppressed,
      tf.reduce_any(iou_sum - iou_sum_new > 0.5), iou_sum_new
  ] 
Example #23
Source File: box_list_ops.py    From Traffic-Rule-Violation-Detection-System with MIT License 5 votes vote down vote up
def prune_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall outside a given window.

  This function prunes bounding boxes that even partially fall outside the given
  window. See also clip_to_window which only prunes bounding boxes that fall
  completely outside the window, and clips any bounding boxes that partially
  overflow.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
        tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices 
Example #24
Source File: box_list_ops.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def prune_completely_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall completely outside of the given window.

  The function clip_to_window prunes bounding boxes that fall
  completely outside the window, but also clips any bounding boxes that
  partially overflow. This function does not clip partially overflowing boxes.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
        tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices 
Example #25
Source File: box_list_ops.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def prune_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall outside a given window.

  This function prunes bounding boxes that even partially fall outside the given
  window. See also clip_to_window which only prunes bounding boxes that fall
  completely outside the window, and clips any bounding boxes that partially
  overflow.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
        tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices 
Example #26
Source File: structured_graph_builder.py    From yolo_v2 with Apache License 2.0 5 votes vote down vote up
def _BuildSequence(self,
                     batch_size,
                     max_steps,
                     features,
                     state,
                     use_average=False):
    """Adds a sequence of beam parsing steps."""
    def Advance(state, step, scores_array, alive, alive_steps, *features):
      scores = self._BuildNetwork(features,
                                  return_average=use_average)['logits']
      scores_array = scores_array.write(step, scores)
      features, state, alive = (
          gen_parser_ops.beam_parser(state, scores, self._feature_size))
      return [state, step + 1, scores_array, alive, alive_steps + tf.cast(
          alive, tf.int32)] + list(features)

    # args: (state, step, scores_array, alive, alive_steps, *features)
    def KeepGoing(*args):
      return tf.logical_and(args[1] < max_steps, tf.reduce_any(args[3]))

    step = tf.constant(0, tf.int32, [])
    scores_array = tensor_array_ops.TensorArray(dtype=tf.float32,
                                                size=0,
                                                dynamic_size=True)
    alive = tf.constant(True, tf.bool, [batch_size])
    alive_steps = tf.constant(0, tf.int32, [batch_size])
    t = tf.while_loop(
        KeepGoing,
        Advance,
        [state, step, scores_array, alive, alive_steps] + list(features),
        shape_invariants=[tf.TensorShape(None)] * (len(features) + 5),
        parallel_iterations=100)

    # Link to the final nodes/values of ops that have passed through While:
    return {'state': t[0],
            'concat_scores': t[2].concat(),
            'alive': t[3],
            'alive_steps': t[4]} 
Example #27
Source File: box_list_ops.py    From HereIsWally with MIT License 5 votes vote down vote up
def prune_completely_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall completely outside of the given window.

  The function clip_to_window prunes bounding boxes that fall
  completely outside the window, but also clips any bounding boxes that
  partially overflow. This function does not clip partially overflowing boxes.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
        tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices 
Example #28
Source File: box_list_ops.py    From HereIsWally with MIT License 5 votes vote down vote up
def prune_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall outside a given window.

  This function prunes bounding boxes that even partially fall outside the given
  window. See also clip_to_window which only prunes bounding boxes that fall
  completely outside the window, and clips any bounding boxes that partially
  overflow.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
        tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices 
Example #29
Source File: box_list_ops.py    From garbage-object-detection-tensorflow with MIT License 5 votes vote down vote up
def prune_completely_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall completely outside of the given window.

  The function clip_to_window prunes bounding boxes that fall
  completely outside the window, but also clips any bounding boxes that
  partially overflow. This function does not clip partially overflowing boxes.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
        tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices 
Example #30
Source File: box_list_ops.py    From garbage-object-detection-tensorflow with MIT License 5 votes vote down vote up
def prune_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall outside a given window.

  This function prunes bounding boxes that even partially fall outside the given
  window. See also clip_to_window which only prunes bounding boxes that fall
  completely outside the window, and clips any bounding boxes that partially
  overflow.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
        tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices