Python tensorflow.space_to_depth() Examples

The following are 30 code examples of tensorflow.space_to_depth(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: keras_yolo.py    From PiCamNN with MIT License 6 votes vote down vote up
def yolo_body(inputs, num_anchors, num_classes):
    """Create YOLO_V2 model CNN body in Keras."""
    darknet = Model(inputs, darknet_body()(inputs))
    conv13 = darknet.get_layer('batchnormalization_13').output
    conv20 = compose(
        DarknetConv2D_BN_Leaky(1024, 3, 3),
        DarknetConv2D_BN_Leaky(1024, 3, 3))(darknet.output)

    # TODO: Allow Keras Lambda to use func arguments for output_shape?
    conv13_reshaped = Lambda(
        space_to_depth_x2,
        output_shape=space_to_depth_x2_output_shape,
        name='space_to_depth')(conv13)

    # Concat conv13 with conv20.
    x = merge([conv13_reshaped, conv20], mode='concat')
    x = DarknetConv2D_BN_Leaky(1024, 3, 3)(x)
    x = DarknetConv2D(num_anchors * (num_classes + 5), 1, 1)(x)
    return Model(inputs, x) 
Example #2
Source File: keras_yolo.py    From YOLO-Pi with Apache License 2.0 6 votes vote down vote up
def yolo_body(inputs, num_anchors, num_classes):
    """Create YOLO_V2 model CNN body in Keras."""
    darknet = Model(inputs, darknet_body()(inputs))
    conv20 = compose(
        DarknetConv2D_BN_Leaky(1024, (3, 3)),
        DarknetConv2D_BN_Leaky(1024, (3, 3)))(darknet.output)

    conv13 = darknet.layers[43].output
    conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13)
    # TODO: Allow Keras Lambda to use func arguments for output_shape?
    conv21_reshaped = Lambda(
        space_to_depth_x2,
        output_shape=space_to_depth_x2_output_shape,
        name='space_to_depth')(conv21)

    x = concatenate([conv21_reshaped, conv20])
    x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x)
    x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x)
    return Model(inputs, x) 
Example #3
Source File: spacetodepth_op_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _checkGrad(self, x, block_size):
    assert 4 == x.ndim
    with self.test_session(use_gpu=True):
      tf_x = tf.convert_to_tensor(x)
      tf_y = tf.space_to_depth(tf_x, block_size)
      epsilon = 1e-2
      ((x_jacob_t, x_jacob_n)) = tf.test.compute_gradient(
          tf_x,
          x.shape,
          tf_y,
          tf_y.get_shape().as_list(),
          x_init_value=x,
          delta=epsilon)

    self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)

  # Tests a gradient for space_to_depth of x which is a four dimensional
  # tensor of shape [b, h * block_size, w * block_size, d]. 
Example #4
Source File: keras_yolo.py    From object-detection with MIT License 6 votes vote down vote up
def yolo_body(inputs, num_anchors, num_classes):
    """Create YOLO_V2 model CNN body in Keras."""
    darknet = Model(inputs, darknet_body()(inputs))
    conv20 = compose(
        DarknetConv2D_BN_Leaky(1024, (3, 3)),
        DarknetConv2D_BN_Leaky(1024, (3, 3)))(darknet.output)

    conv13 = darknet.layers[43].output
    conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13)
    # TODO: Allow Keras Lambda to use func arguments for output_shape?
    conv21_reshaped = Lambda(
        space_to_depth_x2,
        output_shape=space_to_depth_x2_output_shape,
        name='space_to_depth')(conv21)

    x = concatenate([conv21_reshaped, conv20])
    x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x)
    x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x)
    return Model(inputs, x) 
Example #5
Source File: hf_net.py    From hfnet with MIT License 6 votes vote down vote up
def detector_loss(inp, out, config):
    if 'keypoint_map' in inp:  # hard labels
        labels = tf.to_float(inp['keypoint_map'][..., tf.newaxis])  # for GPU
        labels = tf.space_to_depth(labels, config['local']['detector_grid'])
        shape = tf.concat([tf.shape(labels)[:3], [1]], axis=0)
        labels = tf.argmax(tf.concat([2*labels, tf.ones(shape)], 3), axis=3)
        with tf.device('/cpu:0'):
            d = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    labels=labels, logits=out['logits'])
        mask = None
    elif 'dense_scores' in inp:  # soft labels
        d = tf.nn.softmax_cross_entropy_with_logits_v2(
                labels=inp['dense_scores'], logits=out['logits'], dim=-1)
        mask = inp.get('dense_scores_valid_mask', None)
    else:
        raise ValueError

    if mask is not None:
        mask = tf.to_float(mask)
        d = (tf.reduce_sum(d * mask, axis=[1, 2])
             / tf.reduce_sum(mask, axis=[1, 2]))
    else:
        d = tf.reduce_mean(d, axis=[1, 2])
    return d 
Example #6
Source File: det_tools.py    From hfnet with MIT License 6 votes vote down vote up
def compute_repeatable_loss(pred_d_heatmaps, trans_heatmaps, block_size, name='REPEAT-LOSS'):
    '''
    Args:
        pred_d_heatmaps: [batch, height/N, width/N, N**2+1]
        trans_heatmaps: [batch, height, width, 1]
    '''
    with tf.name_scope(name):
        trans_d_heatmaps = tf.space_to_depth(trans_heatmaps, block_size)
        kp_bg_map = tf.reduce_sum(trans_d_heatmaps, axis=-1, keep_dims=True)
        kp_bg_map = tf.cast(tf.less(kp_bg_map, 1.0), tf.float32)
        kp_fg_map = 1.0 - tf.squeeze(kp_bg_map, axis=-1)

        trans_d_heatmaps = tf.concat([trans_d_heatmaps, kp_bg_map], axis=3) # add BG channels

        xentropy = tf.nn.softmax_cross_entropy_with_logits(labels=trans_d_heatmaps,
                                                        logits=pred_d_heatmaps) # shape = [batch,height/N,width/N]
        kp_count = tf.maximum(1.0, tf.reduce_sum(kp_fg_map))
        repeat_loss = tf.div(tf.reduce_sum(kp_fg_map * xentropy), kp_count)

        return repeat_loss 
Example #7
Source File: FFDNet.py    From VideoSuperResolution with MIT License 6 votes vote down vote up
def build_graph(self):
    super(FFDNet, self).build_graph()  # build inputs placeholder
    with tf.variable_scope(self.name):
      # build layers
      inputs = self.inputs_preproc[-1] / 255
      if self.training:
        sigma = tf.random_uniform((), maxval=self.sigma / 255)
        inputs += tf.random_normal(tf.shape(inputs)) * sigma
      else:
        sigma = self.sigma / 255
      inputs = tf.space_to_depth(inputs, block_size=self.space_down)
      noise_map = tf.ones_like(inputs)[..., 0:1] * sigma
      x = tf.concat([inputs, noise_map], axis=-1)
      x = self.relu_conv2d(x, 64, 3)
      for i in range(1, self.layers - 1):
        x = self.bn_relu_conv2d(x, 64, 3, use_bias=False)
      # the last layer w/o BN and ReLU
      x = self.conv2d(x, self.channel * self.space_down ** 2, 3)
      denoised = tf.depth_to_space(x, block_size=self.space_down)
      self.outputs.append(denoised * 255) 
Example #8
Source File: flows.py    From flowpp with MIT License 5 votes vote down vote up
def forward(self, x, **kwargs):
        return tf.space_to_depth(x, self.block_size), None 
Example #9
Source File: gqn_vae.py    From tf-gqn with Apache License 2.0 5 votes vote down vote up
def vae_simple_encoder(x, scope="VAESimpleEncoder"):
  with tf.variable_scope(scope):
    endpoints = {}

    net = x  # shape (b, 64, 64, 3)
    net = tf.layers.conv2d(
      net, kernel_size=3, filters=512, activation=tf.nn.relu,
      padding="SAME")  # shape out: (b, 64, 64, 512)
    net = tf.layers.conv2d(
      net, kernel_size=3, filters=64, strides=2, activation=tf.nn.relu,
      padding="SAME")  # shape out: (b, 32, 32, 64)
    net = tf.layers.conv2d(
      net, kernel_size=3, filters=128, strides=2, activation=tf.nn.relu,
      padding="SAME")  # shape out: (b, 16, 16, 128)
    net = tf.layers.conv2d(
      net, kernel_size=5, filters=512, strides=2, activation=tf.nn.relu,
      padding="SAME")  # shape out: (b, 8, 8, 512)

    # to go from (8, 8, 512) to (1, 1, 256), we do a regular strided convolution
    # and move the extra spatial information to channels
    net = tf.layers.conv2d(
      net, kernel_size=5, filters=16, strides=2, activation=tf.nn.relu,
      padding="SAME")  # shape out: (b, 4, 4, 16)
    net = tf.space_to_depth(net, block_size=4)  # shape out: (b, 1, 1, 256)

    return net, endpoints 
Example #10
Source File: frvsr.py    From PFNL with MIT License 5 votes vote down vote up
def upscale_warp(self, uv, est):
        n,h,w,c=est.get_shape().as_list()
        upuv=tf.image.resize_images(uv,[h,w],method=0)
        warp_est=imwarp_forward(upuv,est,[h,w])
        warp_est=tf.space_to_depth(warp_est, self.scale, name='est')

        return warp_est 
Example #11
Source File: layers.py    From mayo with MIT License 5 votes vote down vote up
def instantiate_space_to_depth(self, node, tensors, params):
        return tf.space_to_depth(tensors, **use_name_not_scope(params)) 
Example #12
Source File: yolo_v2.py    From YOLO2TensorFlow with Apache License 2.0 5 votes vote down vote up
def yolo_v2(inputs, num_classes, is_training, num_anchors=5, scope='yolo_v2'):
    with tf.variable_scope(scope, 'yolo_v2', [inputs]) as sc:
        end_points_collection = sc.name + '_end_points'
        with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
                            outputs_collections=end_points_collection):
            net = slim.conv2d(inputs, 32, scope='layer_0')
            net = slim.max_pool2d(net, scope='layer_1')
            net = slim.conv2d(net, 64, scope='layer_2')
            net = slim.max_pool2d(net, scope='layer_3')
            net = slim.conv2d(net, 128, scope='layer_4')
            net = slim.conv2d(net, 64, kernel_size=[1, 1], scope='layer_5')
            net = slim.conv2d(net, 128, scope='layer_6')
            net = slim.max_pool2d(net, scope='layer_7')
            net = slim.conv2d(net, 256, scope='layer_8')
            net = slim.conv2d(net, 128, kernel_size=[1, 1], scope='layer_9')
            net = slim.conv2d(net, 256, scope='layer_10')
            net = slim.max_pool2d(net, scope='layer_11')
            net = slim.conv2d(net, 512, scope='layer_12')
            net = slim.conv2d(net, 256, kernel_size=[1, 1], scope='layer_13')
            net = slim.conv2d(net, 512, scope='layer_14')
            net = slim.conv2d(net, 256, kernel_size=[1, 1], scope='layer_15')
            net = slim.conv2d(net, 512, scope='layer_16')
            path_1 = tf.space_to_depth(net, block_size=2, name='path_1')
            net = slim.max_pool2d(net, scope='layer_17')
            net = slim.conv2d(net, 1024, scope='layer_18')
            net = slim.conv2d(net, 512, kernel_size=[1, 1], scope='layer_19')
            net = slim.conv2d(net, 1024, scope='layer_20')
            net = slim.conv2d(net, 512, kernel_size=[1, 1], scope='layer_21')
            net = slim.conv2d(net, 1024, scope='layer_22')
            net = slim.conv2d(net, 1024, scope='layer_23')
            net = slim.conv2d(net, 1024, scope='layer_24')
            path_2 = net
            net = tf.concat([path_1, path_2], 3, name='concat2path')
            net = slim.conv2d(net, 1024, scope='layer_25')
            net = slim.conv2d(net, (num_classes + 5) * num_anchors, kernel_size=[1, 1], scope='layer_26')
            end_points = slim.utils.convert_collection_to_dict(end_points_collection)
            return net, end_points 
Example #13
Source File: initializers.py    From faceswap with GNU General Public License v3.0 5 votes vote down vote up
def _space_to_depth(self, input_tensor):
        """ Space to depth implementation.

        PlaidML does not have a space to depth operation, so calculate if backend is amd
        otherwise returns the :func:`tensorflow.space_to_depth` operation.

        Parameters
        ----------
        input_tensor: tensor
            The tensor to be manipulated

        Returns
        -------
        tensor
            The manipulated input tensor
        """
        if get_backend() == "amd":
            batch, height, width, depth = input_tensor.shape.dims
            new_height = height // self.scale
            new_width = width // self.scale
            reshaped = K.reshape(input_tensor,
                                 (batch, new_height, self.scale, new_width, self.scale, depth))
            retval = K.reshape(K.permute_dimensions(reshaped, [0, 1, 3, 2, 4, 5]),
                               (batch, new_height, new_width, -1))
        else:
            retval = tf.space_to_depth(input_tensor, block_size=self.scale, data_format="NHWC")
        logger.debug("Input Tensor: %s, Output Tensor: %s", input_tensor, retval)
        return retval 
Example #14
Source File: test_tf_converter.py    From tf-coreml with Apache License 2.0 5 votes vote down vote up
def test_space_to_depth(self):
    self._test_reorganize_data(tf.space_to_depth, [1, 2, 2, 1]) 
Example #15
Source File: nn_extra_nvp.py    From bruno with MIT License 5 votes vote down vote up
def forward_and_jacobian(self, x, z, sum_log_det_jacobian):
        xs = int_shape(x)
        assert xs[1] % 2 == 0 and xs[2] % 2 == 0
        y = tf.space_to_depth(x, 2)
        if z is not None:
            z = tf.space_to_depth(z, 2)

        return y, z, sum_log_det_jacobian 
Example #16
Source File: nn_extra_nvp_conditional.py    From bruno with MIT License 5 votes vote down vote up
def forward_and_jacobian(self, x, sum_log_det_jacobians, z, y_label=None):
        xs = int_shape(x)
        assert xs[1] % 2 == 0 and xs[2] % 2 == 0
        y = tf.space_to_depth(x, 2)
        if z is not None:
            z = tf.space_to_depth(z, 2)

        return y, sum_log_det_jacobians, z 
Example #17
Source File: celeba64_5bit_official.py    From flowpp with MIT License 5 votes vote down vote up
def forward(self, x, **kwargs):
        return tf.space_to_depth(x, self.block_size), None 
Example #18
Source File: celeba128_5bit_official.py    From flowpp with MIT License 5 votes vote down vote up
def forward(self, x, **kwargs):
        return tf.space_to_depth(x, self.block_size), None 
Example #19
Source File: celeba64_3bit_official.py    From flowpp with MIT License 5 votes vote down vote up
def forward(self, x, **kwargs):
        return tf.space_to_depth(x, self.block_size), None 
Example #20
Source File: keras2_emitter.py    From MMdnn with MIT License 5 votes vote down vote up
def emit_SpaceToDepth(self, IR_node, in_scope=False):
        self.used_layers.add(IR_node.type)
        assert IR_node.get_attr('blocksize') == 2
        # TODO: arguments won't be saved in keras export model

        blocksize = "arguments={'blocksize': %d}" % 2
        code = "{:<15} = layers.Lambda(space_to_depth, {}, name='{}')({})".format(
            IR_node.variable_name,
            blocksize,
            IR_node.name,
            self.parent_variable_name(IR_node))
        return code 
Example #21
Source File: imagenet32_official.py    From flowpp with MIT License 5 votes vote down vote up
def forward(self, x, **kwargs):
        return tf.space_to_depth(x, self.block_size), None 
Example #22
Source File: imagenet64_official.py    From flowpp with MIT License 5 votes vote down vote up
def forward(self, x, **kwargs):
        return tf.space_to_depth(x, self.block_size), None 
Example #23
Source File: det_tools.py    From hfnet with MIT License 5 votes vote down vote up
def compute_fg_mask_from_gradients(gradients, block_size, grad_thresh, reduce_op=tf.reduce_mean, name='FGMASK', keep_dims=False):

    with tf.name_scope(name):
        d_grads = tf.space_to_depth(gradients, block_size)
        d_grads = reduce_op(d_grads, axis=3, keep_dims=keep_dims)
        d_fgmask = tf.cast(tf.greater(d_grads, grad_thresh), tf.float32)

        # restore fgmask to original resolution
        # d_fgmask2 = tf.tile(tf.expand_dims(d_fgmask, -1), [1,1,1,block_size**2])
        # fgmask = tf.depth_to_space(d_fgmask2, block_size)

        return d_fgmask 
Example #24
Source File: keras_yolo.py    From YOLO-Pi with Apache License 2.0 5 votes vote down vote up
def space_to_depth_x2(x):
    """Thin wrapper for Tensorflow space_to_depth with block_size=2."""
    # Import currently required to make Lambda work.
    # See: https://github.com/fchollet/keras/issues/5088#issuecomment-273851273
    import tensorflow as tf
    return tf.space_to_depth(x, block_size=2) 
Example #25
Source File: keras_yolo.py    From YOLO-Pi with Apache License 2.0 5 votes vote down vote up
def space_to_depth_x2_output_shape(input_shape):
    """Determine space_to_depth output shape for block_size=2.

    Note: For Lambda with TensorFlow backend, output shape may not be needed.
    """
    return (input_shape[0], input_shape[1] // 2, input_shape[2] // 2, 4 *
            input_shape[3]) if input_shape[1] else (input_shape[0], None, None,
                                                    4 * input_shape[3]) 
Example #26
Source File: nn.py    From flow-gan with MIT License 5 votes vote down vote up
def forward_and_jacobian(self, x, sum_log_det_jacobians, z, reuse=False, train=False):
    xs = int_shape(x)
    assert xs[1] % 2 == 0 and xs[2] % 2 == 0
    y = tf.space_to_depth(x, 2)
    if z is not None:
      z = tf.space_to_depth(z, 2)      

    return y,sum_log_det_jacobians, z 
Example #27
Source File: feature_map_generators.py    From mtl-ssl with Apache License 2.0 5 votes vote down vote up
def concatenate_feature_maps(image_features, layer_names=None):
  """Generates single concatenated feature map from input image features.

  Args:
    image_features: dicionary which contains feature maps.
    layer_names: List of layer names. Output feature map will be concatenated
      by its order.
  """
  if layer_names is None:
    layer_names = image_features.keys()
  base_feature_map = image_features[layer_names.pop()]
  _, base_h, base_w, _ = base_feature_map.get_shape().as_list()

  concat_feature_map = base_feature_map
  for layer in layer_names[::-1]:
    feature_map = image_features[layer]
    _, h, w, _ = feature_map.get_shape().as_list()
    assert h/base_h == w/base_w, "Ratios for height and width are different."

    if h == base_h:
      stretch_feature_map = feature_map
    else:
      stretch_feature_map = tf.space_to_depth(feature_map, block_size=h/base_h)
    concat_feature_map = tf.concat(axis=3, values=[stretch_feature_map, concat_feature_map])

  return concat_feature_map 
Example #28
Source File: depthtospace_op_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testBlockSizeLargerThanInput(self):
    # The block size is too large for this input.
    x_np = [[[[1], [2]],
             [[3], [4]]]]
    block_size = 10
    with self.assertRaises(ValueError):
      out_tf = tf.space_to_depth(x_np, block_size)
      out_tf.eval() 
Example #29
Source File: keras_yolo.py    From PiCamNN with MIT License 5 votes vote down vote up
def space_to_depth_x2_output_shape(input_shape):
    """Determine space_to_depth output shape for block_size=2.

    Note: For Lambda with TensorFlow backend, output shape may not be needed.
    """
    return (input_shape[0], input_shape[1] // 2, input_shape[2] // 2, 4 *
            input_shape[3]) if input_shape[1] else (input_shape[0], None, None,
                                                    4 * input_shape[3]) 
Example #30
Source File: ops.py    From Tensorflow-Cookbook with MIT License 5 votes vote down vote up
def conv_pixel_shuffle_down(x, scale_factor=2, use_bias=True, sn=False, scope='pixel_shuffle'):
    channel = x.get_shape()[-1] // (scale_factor ** 2)
    x = conv(x, channel, kernel=1, stride=1, use_bias=use_bias, sn=sn, scope=scope)
    x = tf.space_to_depth(x, block_size=scale_factor)

    return x