Python tensorflow.pack() Examples

The following are 30 code examples of tensorflow.pack(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: dataset.py    From PSPNet-Keras-tensorflow with MIT License 6 votes vote down vote up
def process_image(img, scale, isotropic, crop, mean):
    '''Crops, scales, and normalizes the given image.
    scale : The image wil be first scaled to this size.
            If isotropic is true, the smaller side is rescaled to this,
            preserving the aspect ratio.
    crop  : After scaling, a central crop of this size is taken.
    mean  : Subtracted from the image
    '''
    # Rescale
    if isotropic:
        img_shape = tf.to_float(tf.shape(img)[:2])
        min_length = tf.minimum(img_shape[0], img_shape[1])
        new_shape = tf.to_int32((scale / min_length) * img_shape)
    else:
        new_shape = tf.pack([scale, scale])
    img = tf.image.resize_images(img, new_shape[0], new_shape[1])
    # Center crop
    # Use the slice workaround until crop_to_bounding_box supports deferred tensor shapes
    # See: https://github.com/tensorflow/tensorflow/issues/521
    offset = (new_shape - crop) / 2
    img = tf.slice(img, begin=tf.pack([offset[0], offset[1], 0]), size=tf.pack([crop, crop, -1]))
    # Mean subtraction
    return tf.to_float(img) - mean 
Example #2
Source File: ssim.py    From Multiview2Novelview with MIT License 6 votes vote down vote up
def tf_ms_ssim(img1, img2, mean_metric=True, level=5):
    weight = tf.constant([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=tf.float32)
    mssim = []
    mcs = []
    for l in range(level):
        ssim_map, cs_map = tf_ssim(img1, img2, cs_map=True, mean_metric=False)
        mssim.append(tf.reduce_mean(ssim_map))
        mcs.append(tf.reduce_mean(cs_map))
        filtered_im1 = tf.nn.avg_pool(img1, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
        filtered_im2 = tf.nn.avg_pool(img2, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
        img1 = filtered_im1
        img2 = filtered_im2

    # list to tensor of dim D+1
    mssim = tf.pack(mssim, axis=0)
    mcs = tf.pack(mcs, axis=0)

    value = (tf.reduce_prod(
        mcs[0:level-1]**weight[0:level-1]) * (mssim[level-1]**weight[level-1]))

    if mean_metric:
        value = tf.reduce_mean(value)
    return value 
Example #3
Source File: pack_op_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testGradientsAxis1(self):
    np.random.seed(7)
    for shape in (2, 3), (3, 2), (4, 3, 2):
      data = np.random.randn(*shape)
      shapes = [shape[1:]] * shape[0]
      out_shape = list(shape[1:])
      out_shape.insert(1, shape[0])
      with self.test_session(use_gpu=True):
        # TODO(irving): Remove list() once we handle maps correctly
        xs = list(map(tf.constant, data))
        c = tf.pack(xs, axis=1)
        err = tf.test.compute_gradient_error(xs, shapes, c, out_shape)
        self.assertLess(err, 1e-6)

        c = tf.stack(xs, axis=1)
        err = tf.test.compute_gradient_error(xs, shapes, c, out_shape)
        self.assertLess(err, 1e-6) 
Example #4
Source File: pack_op_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def testAgainstNumpy(self):
    # For 1 to 5 dimensions.
    for i in range(1, 6):
      expected = np.random.random(np.random.permutation(i) + 1)

      # For all the possible axis to split it, including negative indices.
      for j in range(-i, i):
        test_arrays = np_split_squeeze(expected, j)

        with self.test_session(use_gpu=True):
          actual_pack = tf.pack(test_arrays, axis=j)
          self.assertEqual(expected.shape, actual_pack.get_shape())
          actual_pack = actual_pack.eval()

          actual_stack = tf.pack(test_arrays, axis=j)
          self.assertEqual(expected.shape, actual_stack.get_shape())
          actual_stack = actual_stack.eval()

        self.assertNDArrayNear(expected, actual_pack, 1e-6)
        self.assertNDArrayNear(expected, actual_stack, 1e-6) 
Example #5
Source File: sdc_export_graph.py    From tensorflow-litterbox with Apache License 2.0 6 votes vote down vote up
def _merge_outputs(outputs, weights):
    assert outputs

    merged = defaultdict(list)
    weights_tensor = tf.pack(weights)
    print('weights ', weights_tensor.get_shape())

    # recombine multiple model outputs by dict key or list position under output name based dict
    if isinstance(outputs[0], dict):
        for o in outputs:
            for name, tensor in o.items():
                merged['output_%s' % name].append(tensor)
    elif isinstance(outputs[0], list):
        for o in outputs:
            for index, tensor in enumerate(o):
                merged['output_%d' % index].append(tensor)
    else:
        merged['output'] = outputs

    reduced = {name: _weighted_mean(value_list, weights_tensor) for name, value_list in merged.items()}
    for k, v in reduced.items():
        print(k, v, v.get_shape())

    return reduced 
Example #6
Source File: ops.py    From inception_v3 with Apache License 2.0 6 votes vote down vote up
def one_hot_encoding(labels, num_classes, scope=None):
  """Transform numeric labels into onehot_labels.

  Args:
    labels: [batch_size] target labels.
    num_classes: total number of classes.
    scope: Optional scope for op_scope.
  Returns:
    one hot encoding of the labels.
  """
  with tf.op_scope([labels], scope, 'OneHotEncoding'):
    batch_size = labels.get_shape()[0]
    indices = tf.expand_dims(tf.range(0, batch_size), 1)
    labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
    concated = tf.concat(1, [indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
    onehot_labels.set_shape([batch_size, num_classes])
    return onehot_labels 
Example #7
Source File: model.py    From tensorflow-deeplab-lfov with MIT License 6 votes vote down vote up
def loss(self, img_batch, label_batch):
        """Create the network, run inference on the input batch and compute loss.
        
        Args:
          input_batch: batch of pre-processed images.
          
        Returns:
          Pixel-wise softmax loss.
        """
        raw_output = self._create_network(tf.cast(img_batch, tf.float32), keep_prob=tf.constant(0.5))
        prediction = tf.reshape(raw_output, [-1, n_classes])
        
        # Need to resize labels and convert using one-hot encoding.
        label_batch = self.prepare_label(label_batch, tf.pack(raw_output.get_shape()[1:3]))
        gt = tf.reshape(label_batch, [-1, n_classes])
        
        # Pixel-wise softmax loss.
        loss = tf.nn.softmax_cross_entropy_with_logits(prediction, gt)
        reduced_loss = tf.reduce_mean(loss)
        
        return reduced_loss 
Example #8
Source File: odes_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def test_5th_order_polynomial(self):
    # this should be an exact fit
    f = lambda x: x ** 4 + x ** 3 - 2 * x ** 2 + 4 * x + 5
    f_prime = lambda x: 4 * x ** 3 + 3 * x ** 2 - 4 * x + 4
    coeffs = odes._interp_fit(
        f(0.0), f(10.0), f(5.0), f_prime(0.0), f_prime(10.0), 10.0)
    times = np.linspace(0, 10, dtype=np.float32)
    y_fit = tf.pack([odes._interp_evaluate(coeffs, 0.0, 10.0, t)
                     for t in times])
    y_expected = f(times)
    with self.test_session() as sess:
      y_actual = sess.run(y_fit)
      self.assertAllClose(y_expected, y_actual)

    # attempt interpolation outside bounds
    y_invalid = odes._interp_evaluate(coeffs, 0.0, 10.0, 100.0)
    with self.test_session() as sess:
      with self.assertRaises(tf.errors.InvalidArgumentError):
        sess.run(y_invalid) 
Example #9
Source File: model.py    From sonic_contest with MIT License 6 votes vote down vote up
def deconv2d(x, out_shape, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None, prevNumFeat=None):
    with tf.variable_scope(name):
        num_filters = out_shape[-1]
        prevNumFeat = int(x.get_shape()[3]) if prevNumFeat is None else prevNumFeat
        stride_shape = [1, stride[0], stride[1], 1]
        # transpose_filter : [height, width, out_channels, in_channels]
        filter_shape = [filter_size[0], filter_size[1], num_filters, prevNumFeat]

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(filter_shape[:2]) * prevNumFeat
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width"
        fan_out = np.prod(filter_shape[:3])
        # initialize weights with random weights
        w_bound = np.sqrt(6. / (fan_in + fan_out))

        w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
                            collections=collections)
        b = tf.get_variable("b", [num_filters], initializer=tf.constant_initializer(0.0),
                            collections=collections)
        deconv2d = tf.nn.conv2d_transpose(x, w, tf.pack(out_shape), stride_shape, pad)
        # deconv2d = tf.reshape(tf.nn.bias_add(deconv2d, b), deconv2d.get_shape())
        return deconv2d 
Example #10
Source File: modellib.py    From rec-attend-public with MIT License 6 votes vote down vote up
def build_skip_conn_attn(cnn_channels, h_cnn_time, x_time, timespan):
  """Build skip connection for attention based model."""
  skip = [None]
  skip_ch = [0]
  nlayers = len(h_cnn_time[0])
  timespan = len(h_cnn_time)
  for jj in range(nlayers):
    lidx = nlayers - jj - 2
    if lidx >= 0:
      ll = [h_cnn_time[tt][lidx] for tt in range(timespan)]
    else:
      ll = x_time
    layer = tf.concat(1, [tf.expand_dims(l, 1) for l in ll])
    ss = tf.shape(layer)
    layer = tf.reshape(layer, tf.pack([-1, ss[2], ss[3], ss[4]]))
    skip.append(layer)
    ch_idx = lidx + 1
    skip_ch.append(cnn_channels[ch_idx])
  return skip, skip_ch 
Example #11
Source File: custom_ops.py    From StackGAN with MIT License 6 votes vote down vote up
def __call__(self, input_layer, output_size, scope=None, in_dim=None, stddev=0.02, bias_start=0.0):
        shape = input_layer.shape
        input_ = input_layer.tensor
        try:
            if len(shape) == 4:
                input_ = tf.reshape(input_, tf.pack([tf.shape(input_)[0], np.prod(shape[1:])]))
                input_.set_shape([None, np.prod(shape[1:])])
                shape = input_.get_shape().as_list()

            with tf.variable_scope(scope or "Linear"):
                matrix = self.variable("Matrix", [in_dim or shape[1], output_size], dt=tf.float32,
                                       init=tf.random_normal_initializer(stddev=stddev))
                bias = self.variable("bias", [output_size], init=tf.constant_initializer(bias_start))
                return input_layer.with_tensor(tf.matmul(input_, matrix) + bias, parameters=self.vars)
        except Exception:
            import ipdb; ipdb.set_trace() 
Example #12
Source File: custom_ops.py    From StackGAN with MIT License 6 votes vote down vote up
def __call__(self, input_layer, output_shape,
                 k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
                 name="deconv2d"):
        output_shape[0] = input_layer.shape[0]
        ts_output_shape = tf.pack(output_shape)
        with tf.variable_scope(name):
            # filter : [height, width, output_channels, in_channels]
            w = self.variable('w', [k_h, k_w, output_shape[-1], input_layer.shape[-1]],
                              init=tf.random_normal_initializer(stddev=stddev))

            try:
                deconv = tf.nn.conv2d_transpose(input_layer, w,
                                                output_shape=ts_output_shape,
                                                strides=[1, d_h, d_w, 1])

            # Support for versions of TensorFlow before 0.7.0
            except AttributeError:
                deconv = tf.nn.deconv2d(input_layer, w, output_shape=ts_output_shape,
                                        strides=[1, d_h, d_w, 1])

            # biases = self.variable('biases', [output_shape[-1]], init=tf.constant_initializer(0.0))
            # deconv = tf.reshape(tf.nn.bias_add(deconv, biases), [-1] + output_shape[1:])
            deconv = tf.reshape(deconv, [-1] + output_shape[1:])

            return deconv 
Example #13
Source File: build_resnet_sdc.py    From tensorflow-litterbox with Apache License 2.0 6 votes vote down vote up
def _build_global_context(
        net,
        is_training=False,
        bayesian=False,
        dropout_keep_prob=0.8):

    with tf.variable_scope('GlobalContext'):
        # Reduce feature dimension before LSTM to reduce param count
        net = slim.conv2d(net, 1024, 1, padding='VALID', scope='conv_reduce_1x1')

        #net = slim.dropout(net, dropout_keep_prob, is_training=bayesian or is_training, scope='Dropout')

        rows = tf.unpack(net, axis=1)
        net = tf.pack(
            [lstm.bidir_lstm(r, 512, scope='row%d' % i) for i, r in enumerate(rows)],
            axis=1)
        print('Horizontal LSTM', net.get_shape())

        cols = tf.unpack(net, axis=2)
        net = tf.pack(
            [lstm.bidir_lstm(r, 512, scope='col%d' % i) for i, r in enumerate(cols)],
            axis=2)
        print('Vertical LSTM', net.get_shape())

    return net 
Example #14
Source File: ChainCRF.py    From naacl18-multitask_argument_mining with Apache License 2.0 6 votes vote down vote up
def batch_gather(reference, indices):
        '''Batchwise gathering of row indices.
    
        The numpy equivalent is reference[np.arange(batch_size), indices].
    
        # Arguments
            reference: tensor with ndim >= 2 of shape
              (batch_size, dim1, dim2, ..., dimN)
            indices: 1d integer tensor of shape (batch_size) satisfiying
              0 <= i < dim2 for each element i.
    
        # Returns
            A tensor with shape (batch_size, dim2, ..., dimN)
            equal to reference[1:batch_size, indices]
        '''
        batch_size = K.shape(reference)[0]
        indices = tf.pack([tf.range(batch_size), indices], axis=1)
        return tf.gather_nd(reference, indices) 
Example #15
Source File: ops.py    From deeplearning-benchmark with Apache License 2.0 6 votes vote down vote up
def one_hot_encoding(labels, num_classes, scope=None):
  """Transform numeric labels into onehot_labels.

  Args:
    labels: [batch_size] target labels.
    num_classes: total number of classes.
    scope: Optional scope for op_scope.
  Returns:
    one hot encoding of the labels.
  """
  with tf.op_scope([labels], scope, 'OneHotEncoding'):
    batch_size = labels.get_shape()[0]
    indices = tf.expand_dims(tf.range(0, batch_size), 1)
    labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
    concated = tf.concat(1, [indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
    onehot_labels.set_shape([batch_size, num_classes])
    return onehot_labels 
Example #16
Source File: ops.py    From DM-GAN with MIT License 6 votes vote down vote up
def one_hot_encoding(labels, num_classes, scope=None):
  """Transform numeric labels into onehot_labels.

  Args:
    labels: [batch_size] target labels.
    num_classes: total number of classes.
    scope: Optional scope for name_scope.
  Returns:
    one hot encoding of the labels.
  """
  with tf.name_scope(scope, 'OneHotEncoding', [labels]):
    batch_size = labels.get_shape()[0]
    indices = tf.expand_dims(tf.range(0, batch_size), 1)
    labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
    concated = tf.concat([indices, labels], 1)
    onehot_labels = tf.sparse_to_dense(
        concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
    onehot_labels.set_shape([batch_size, num_classes])
    return onehot_labels 
Example #17
Source File: loss_functions.py    From Adversarial_Video_Generation with MIT License 6 votes vote down vote up
def adv_loss(preds, labels):
    """
    Calculates the sum of BCE losses between the predicted classifications and true labels.

    @param preds: The predicted classifications at each scale.
    @param labels: The true labels. (Same for every scale).

    @return: The adversarial loss.
    """
    # calculate the loss for each scale
    scale_losses = []
    for i in xrange(len(preds)):
        loss = bce_loss(preds[i], labels)
        scale_losses.append(loss)

    # condense into one tensor and avg
    return tf.reduce_mean(tf.pack(scale_losses)) 
Example #18
Source File: loss_functions.py    From Adversarial_Video_Generation with MIT License 6 votes vote down vote up
def lp_loss(gen_frames, gt_frames, l_num):
    """
    Calculates the sum of lp losses between the predicted and ground truth frames.

    @param gen_frames: The predicted frames at each scale.
    @param gt_frames: The ground truth frames at each scale
    @param l_num: 1 or 2 for l1 and l2 loss, respectively).

    @return: The lp loss.
    """
    # calculate the loss for each scale
    scale_losses = []
    for i in xrange(len(gen_frames)):
        scale_losses.append(tf.reduce_sum(tf.abs(gen_frames[i] - gt_frames[i])**l_num))

    # condense into one tensor and avg
    return tf.reduce_mean(tf.pack(scale_losses)) 
Example #19
Source File: symbolic_functions.py    From VDAIC2017 with MIT License 5 votes vote down vote up
def batch_flatten(x):
    """
    Flatten the tensor except the first dimension.
    """
    shape = x.get_shape().as_list()[1:]
    if None not in shape:
        return tf.reshape(x, [-1, np.prod(shape)])
    return tf.reshape(x, tf.pack([tf.shape(x)[0], -1])) 
Example #20
Source File: inception_v3.py    From RetinaNet_Tensorflow_Rotation with MIT License 5 votes vote down vote up
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  """Define kernel size which is automatically reduced for small input.

  If the shape of the input images is unknown at graph construction time this
  function assumes that the input images are is large enough.

  Args:
    input_tensor: input tensor of size [batch_size, height, width, channels].
    kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]

  Returns:
    a tensor with the kernel size.

  TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  can be done with the code below. Problems are two-fold: (1) If the shape was
  known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  handle tensors that define the kernel size.
      shape = tf.shape(input_tensor)
      return = tf.pack([tf.minimum(shape[1], kernel_size[0]),
                        tf.minimum(shape[2], kernel_size[1])])

  """
  shape = input_tensor.get_shape().as_list()
  if shape[1] is None or shape[2] is None:
    kernel_size_out = kernel_size
  else:
    kernel_size_out = [min(shape[1], kernel_size[0]),
                       min(shape[2], kernel_size[1])]
  return kernel_size_out 
Example #21
Source File: pool.py    From VDAIC2017 with MIT License 5 votes vote down vote up
def FixedUnPooling(x, shape, unpool_mat=None):
    """
    Unpool the input with a fixed mat to perform kronecker product with.

    :param input: NHWC tensor
    :param shape: int or [h, w]
    :param unpool_mat: a tf/np matrix with size=shape. If None, will use a mat
        with 1 at top-left corner.
    :returns: NHWC tensor
    """
    shape = shape2d(shape)

    # a faster implementation for this special case
    if shape[0] == 2 and shape[1] == 2 and unpool_mat is None:
        return UnPooling2x2ZeroFilled(x)

    input_shape = tf.shape(x)
    if unpool_mat is None:
        mat = np.zeros(shape, dtype='float32')
        mat[0][0] = 1
        unpool_mat = tf.constant(mat, name='unpool_mat')
    elif isinstance(unpool_mat, np.ndarray):
        unpool_mat = tf.constant(unpool_mat, name='unpool_mat')
    assert unpool_mat.get_shape().as_list() == list(shape)

    # perform a tensor-matrix kronecker product
    fx = flatten(tf.transpose(x, [0, 3, 1, 2]))
    fx = tf.expand_dims(fx, -1)       # (bchw)x1
    mat = tf.expand_dims(flatten(unpool_mat), 0)    #1x(shxsw)
    prod = tf.matmul(fx, mat)    #(bchw) x(shxsw)
    prod = tf.reshape(prod, tf.pack(
        [-1, input_shape[3], input_shape[1], input_shape[2], shape[0], shape[1]]))
    prod = tf.transpose(prod, [0, 2, 4, 3, 5, 1])
    prod = tf.reshape(prod, tf.pack(
        [-1, input_shape[1] * shape[0], input_shape[2] * shape[1], input_shape[3]]))
    return prod 
Example #22
Source File: inception_v2.py    From RetinaNet_Tensorflow_Rotation with MIT License 5 votes vote down vote up
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  """Define kernel size which is automatically reduced for small input.

  If the shape of the input images is unknown at graph construction time this
  function assumes that the input images are is large enough.

  Args:
    input_tensor: input tensor of size [batch_size, height, width, channels].
    kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]

  Returns:
    a tensor with the kernel size.

  TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  can be done with the code below. Problems are two-fold: (1) If the shape was
  known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  handle tensors that define the kernel size.
      shape = tf.shape(input_tensor)
      return = tf.pack([tf.minimum(shape[1], kernel_size[0]),
                        tf.minimum(shape[2], kernel_size[1])])

  """
  shape = input_tensor.get_shape().as_list()
  if shape[1] is None or shape[2] is None:
    kernel_size_out = kernel_size
  else:
    kernel_size_out = [min(shape[1], kernel_size[0]),
                       min(shape[2], kernel_size[1])]
  return kernel_size_out 
Example #23
Source File: pack_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testZeroSize(self):
    # Verify that pack doesn't crash for zero size inputs
    with self.test_session(use_gpu=True):
      for shape in (0,), (3,0), (0, 3):
        x = np.zeros((2,) + shape)
        p = tf.pack(list(x)).eval()
        self.assertAllEqual(p, x)

        p = tf.stack(list(x)).eval()
        self.assertAllEqual(p, x) 
Example #24
Source File: predictron.py    From predictron with MIT License 5 votes vote down vote up
def build_preturns(self):
    ''' Eqn (2) '''

    g_preturns = []
    # for k = 0, g_0 = v[0], still fits.
    for k in range(self.max_depth, -1, -1):
      g_k = self.values[:, k, :]
      for kk in range(k, 0, -1):
        g_k = self.rewards[:, kk, :] + self.gammas[:, kk, :] * g_k
      g_preturns.append(g_k)
    # reverse to make 0...K from K...0
    g_preturns = g_preturns[::-1]
    self.g_preturns = tf.pack(g_preturns, axis=1, name='preturns')
    self.g_preturns = tf.reshape(self.g_preturns, [-1, self.max_depth + 1, self.maze_size]) 
Example #25
Source File: text_corrector_models.py    From deep-text-corrector with Apache License 2.0 5 votes vote down vote up
def build_input_bias(self, encoder_inputs, batch_corrective_tokens_mask):
        packed_one_hot_inputs = tf.one_hot(indices=tf.pack(
            encoder_inputs, axis=1), depth=self.target_vocab_size)
        return tf.maximum(batch_corrective_tokens_mask,
                          tf.reduce_max(packed_one_hot_inputs,
                                        reduction_indices=1)) 
Example #26
Source File: inception_v3.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  """Define kernel size which is automatically reduced for small input.

  If the shape of the input images is unknown at graph construction time this
  function assumes that the input images are is large enough.

  Args:
    input_tensor: input tensor of size [batch_size, height, width, channels].
    kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]

  Returns:
    a tensor with the kernel size.

  TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  can be done with the code below. Problems are two-fold: (1) If the shape was
  known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  handle tensors that define the kernel size.
      shape = tf.shape(input_tensor)
      return = tf.pack([tf.minimum(shape[1], kernel_size[0]),
                        tf.minimum(shape[2], kernel_size[1])])

  """
  shape = input_tensor.get_shape().as_list()
  if shape[1] is None or shape[2] is None:
    kernel_size_out = kernel_size
  else:
    kernel_size_out = [min(shape[1], kernel_size[0]),
                       min(shape[2], kernel_size[1])]
  return kernel_size_out 
Example #27
Source File: inception_v2.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  """Define kernel size which is automatically reduced for small input.

  If the shape of the input images is unknown at graph construction time this
  function assumes that the input images are is large enough.

  Args:
    input_tensor: input tensor of size [batch_size, height, width, channels].
    kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]

  Returns:
    a tensor with the kernel size.

  TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  can be done with the code below. Problems are two-fold: (1) If the shape was
  known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  handle tensors that define the kernel size.
      shape = tf.shape(input_tensor)
      return = tf.pack([tf.minimum(shape[1], kernel_size[0]),
                        tf.minimum(shape[2], kernel_size[1])])

  """
  shape = input_tensor.get_shape().as_list()
  if shape[1] is None or shape[2] is None:
    kernel_size_out = kernel_size
  else:
    kernel_size_out = [min(shape[1], kernel_size[0]),
                       min(shape[2], kernel_size[1])]
  return kernel_size_out 
Example #28
Source File: processor_sdc.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def process_example(self, tensors, mode='eval', thread_id=0):
        train = (mode == 'train')
        image, image_timestamp, camera_id = tensors[:3]

        #FIXME push single/multi image handling into image_process_sdc if we want to share random augmentations
        if self.num_input_images > 1:
            assert(len(image.get_shape()) > 0)
            print('Multi image', image.get_shape())
            split_image = tf.unpack(image)
            split_processed = []
            for i, x in enumerate(split_image):
                suffix = '%d' % i
                xp, _ = image_preprocess_sdc(
                    x, camera_id,
                    height=self.height, width=self.width, image_fmt=self.image_fmt,
                    normalize=self.standardize_input, train=train, summary_suffix=suffix, thread_id=thread_id)
                split_processed.append(xp)
            processed_image = tf.pack(split_processed)
            #FIXME need to sort out flip across mult-images
            flip_coeff = tf.constant(1.0, dtype=tf.float32)
        else:
            print('Single image')
            processed_image, flip_coeff = image_preprocess_sdc(
                image, camera_id,
                height=self.height, width=self.width, image_fmt=self.image_fmt,
                normalize=self.standardize_input, train=train, thread_id=thread_id)

        if mode != 'pred':
            steering_angle, gps_coord = tensors[-2:]
            if steering_angle is not None:
                steering_angle = tf.mul(steering_angle, flip_coeff)
                if self.standardize_labels:
                    steering_angle /= STEERING_STD
                elif self.mu_law_steering:
                    print("Encode mu-law angles")
                    steering_angle = mu_law_steering_enc(steering_angle)
            if gps_coord is not None and self.standardize_labels:
                gps_coord = (gps_coord - GPS_MEAN) / GPS_STD
            return processed_image, image_timestamp, steering_angle, gps_coord
        else:
            return processed_image, image_timestamp, tf.zeros((1,)), tf.zeros((2,)) 
Example #29
Source File: image_ops.py    From rec-attend-public with MIT License 5 votes vote down vote up
def random_flip_up_down(image, seed=None):
  uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
  mirror = math_ops.less(tf.pack([1.0, uniform_random, 1.0, 1.0]), 0.5)
  return tf.reverse(image, mirror) 
Example #30
Source File: image_ops.py    From rec-attend-public with MIT License 5 votes vote down vote up
def random_flip_left_right(image, seed=None):
  uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
  mirror = math_ops.less(tf.pack([1.0, 1.0, uniform_random, 1.0]), 0.5)
  return tf.reverse(image, mirror)