Python tensorflow.python.ops.data_flow_ops.PaddingFIFOQueue() Examples

The following are 7 code examples of tensorflow.python.ops.data_flow_ops.PaddingFIFOQueue(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.python.ops.data_flow_ops , or try the search function .
Example #1
Source File: input.py    From lambda-packs with MIT License 5 votes vote down vote up
def _which_queue(dynamic_pad):
  return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
          else data_flow_ops.FIFOQueue) 
Example #2
Source File: input.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def _which_queue(dynamic_pad):
  return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
          else data_flow_ops.FIFOQueue) 
Example #3
Source File: prefetch_queue.py    From tf-slim with Apache License 2.0 5 votes vote down vote up
def _which_queue(dynamic_pad):
  return (data_flow_ops.PaddingFIFOQueue
          if dynamic_pad else data_flow_ops.FIFOQueue) 
Example #4
Source File: input.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _which_queue(dynamic_pad):
  return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
          else data_flow_ops.FIFOQueue)


# Batching functions ---------------------------------------------------------- 
Example #5
Source File: input.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 5 votes vote down vote up
def _which_queue(dynamic_pad):
  return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
          else data_flow_ops.FIFOQueue) 
Example #6
Source File: input.py    From keras-lambda with MIT License 5 votes vote down vote up
def _which_queue(dynamic_pad):
  return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
          else data_flow_ops.FIFOQueue) 
Example #7
Source File: prefetch_queue_test.py    From tf-slim with Apache License 2.0 4 votes vote down vote up
def testDynamicPad(self):
    with self.cached_session() as sess:
      # Create 3 tensors of variable but compatible shapes.
      var_shape = [None, 2]
      p1 = constant_op.constant([[1, 2], [3, 4]])
      p1.set_shape(var_shape)
      p2 = constant_op.constant([[5, 6], [7, 8], [9, 10]])
      p2.set_shape(var_shape)
      p3 = constant_op.constant([[11, 12]])
      p3.set_shape(var_shape)
      batch = [p1, p2, p3]
      batch_size = len(batch)

      zero64 = constant_op.constant(0, dtype=dtypes.int64)
      examples = variables.Variable(zero64)
      counter = examples.count_up_to(batch_size)

      # Create a PaddingFIFOQueue to enqueue these tensors.
      q = data_flow_ops.PaddingFIFOQueue(
          capacity=10, dtypes=[dtypes.int32], shapes=[var_shape])
      for tensor in [p1, p2, p3]:
        q.enqueue([tensor]).run()

      # Dequeue from the queue and batch them using batch().
      batches = input_lib.batch([q.dequeue(), counter], batch_size=batch_size,
                                num_threads=1, dynamic_pad=True)
      self.assertEqual([batch_size, None, 2], batches[0].shape.as_list())

      # Finally, assemble them into prefetch_queue with dynamic_pad.
      batcher = prefetch_queue.prefetch_queue(batches, dynamic_pad=True)
      batches = batcher.dequeue()
      self.assertEqual([batch_size, None, 2], batches[0].shape.as_list())

      variables.global_variables_initializer().run()
      threads = queue_runner_impl.start_queue_runners()

      values, _ = sess.run(batches)
      # We enqueued 3 tensors of [None, 2] shapes, so using dynamic_pad
      # they should be padded to the fixed size [3, 3, 2], where 3
      # is the maximum length of the batch.
      self.assertTrue(np.array_equal(
          np.array([[[1, 2], [3, 4], [0, 0]],
                    [[5, 6], [7, 8], [9, 10]],
                    [[11, 12], [0, 0], [0, 0]]]),
          values))

      with self.assertRaises(errors_impl.OutOfRangeError):
        sess.run(batches)
      for thread in threads:
        thread.join()