Python tensorflow.atan2() Examples

The following are 22 code examples of tensorflow.atan2(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: flowlib.py    From DDFlow with MIT License 6 votes vote down vote up
def flow_to_color(flow, mask=None, max_flow=None):
    """Converts flow to 3-channel color image.

    Args:
        flow: tensor of shape [num_batch, height, width, 2].
        mask: flow validity mask of shape [num_batch, height, width, 1].
    """
    n = 8
    num_batch, height, width, _ = tf.unstack(tf.shape(flow))
    mask = tf.ones([num_batch, height, width, 1]) if mask is None else mask
    flow_u, flow_v = tf.unstack(flow, axis=3)
    if max_flow is not None:
        max_flow = tf.maximum(tf.to_float(max_flow), 1.)
    else:
        max_flow = tf.reduce_max(tf.abs(flow * mask))
    mag = tf.sqrt(tf.reduce_sum(tf.square(flow), 3))
    angle = tf.atan2(flow_v, flow_u)

    im_h = tf.mod(angle / (2 * np.pi) + 1.0, 1.0)
    im_s = tf.clip_by_value(mag * n / max_flow, 0, 1)
    im_v = tf.clip_by_value(n - im_s, 0, 1)
    im_hsv = tf.stack([im_h, im_s, im_v], 3)
    im = tf.image.hsv_to_rgb(im_hsv)
    return im * mask 
Example #2
Source File: geo_utils.py    From DeepMatchVO with MIT License 6 votes vote down vote up
def mat2euler(rot):
    r00 = tf.slice(rot, [0, 0, 0], [-1, 1, 1])
    r01 = tf.slice(rot, [0, 0, 1], [-1, 1, 1])
    r02 = tf.slice(rot, [0, 0, 2], [-1, 1, 1])
    r10 = tf.slice(rot, [0, 1, 0], [-1, 1, 1])
    r11 = tf.slice(rot, [0, 1, 1], [-1, 1, 1])
    r12 = tf.slice(rot, [0, 1, 2], [-1, 1, 1])
    r22 = tf.slice(rot, [0, 2, 2], [-1, 1, 1])
    cy = tf.sqrt(r22*r22 + r12 * r12)

    def f1():
        z = tf.atan2(-r01, r00)    
        y = tf.atan2(r02, cy)
        x = tf.atan2(-r12, r22)
        return tf.concat([z,y,x], axis=1)

    def f2():
        z = tf.atan2(r10, r11)
        y = tf.atan2(r02, cy)
        x = tf.zeros_like(y)
        return tf.concat([z,y,x], axis=1)
    
    x1 = f1()
    x2 = f2()
    return tf.where(tf.squeeze(tf.less(cy, 1e-6), axis=[1,2]), x2, x1) 
Example #3
Source File: tf_utils.py    From video_prediction with MIT License 6 votes vote down vote up
def flow_to_rgb(flows):
    """The last axis should have dimension 2, for x and y values."""

    def cartesian_to_polar(x, y):
        magnitude = tf.sqrt(tf.square(x) + tf.square(y))
        angle = tf.atan2(y, x)
        return magnitude, angle

    mag, ang = cartesian_to_polar(*tf.unstack(flows, axis=-1))
    ang_normalized = (ang + np.pi) / (2 * np.pi)
    mag_min = tf.reduce_min(mag)
    mag_max = tf.reduce_max(mag)
    mag_normalized = (mag - mag_min) / (mag_max - mag_min)
    hsv = tf.stack([ang_normalized, tf.ones_like(ang), mag_normalized], axis=-1)
    rgb = tf.image.hsv_to_rgb(hsv)
    return rgb 
Example #4
Source File: flowlib.py    From SelFlow with MIT License 6 votes vote down vote up
def flow_to_color(flow, mask=None, max_flow=None):
    """Converts flow to 3-channel color image.

    Args:
        flow: tensor of shape [num_batch, height, width, 2].
        mask: flow validity mask of shape [num_batch, height, width, 1].
    """
    n = 8
    num_batch, height, width, _ = tf.unstack(tf.shape(flow))
    mask = tf.ones([num_batch, height, width, 1]) if mask is None else mask
    flow_u, flow_v = tf.unstack(flow, axis=3)
    if max_flow is not None:
        max_flow = tf.maximum(tf.to_float(max_flow), 1.)
    else:
        max_flow = tf.reduce_max(tf.abs(flow * mask))
    mag = tf.sqrt(tf.reduce_sum(tf.square(flow), 3))
    angle = tf.atan2(flow_v, flow_u)

    im_h = tf.mod(angle / (2 * np.pi) + 1.0, 1.0)
    im_s = tf.clip_by_value(mag * n / max_flow, 0, 1)
    im_v = tf.clip_by_value(n - im_s, 0, 1)
    im_hsv = tf.stack([im_h, im_s, im_v], 3)
    im = tf.image.hsv_to_rgb(im_hsv)
    return im * mask 
Example #5
Source File: Motion.py    From VideoSuperResolution with MIT License 6 votes vote down vote up
def viz_flow(flow):
  """Visualize optical flow in TF"""
  from VSR.Util.VisualizeOpticalFlow import _color_wheel
  with tf.name_scope('VizFlow'):
    color_wheel = _color_wheel().astype('float32')
    n_cols = color_wheel.shape[0]
    u, v = flow[..., 0], flow[..., 1]
    rot = tf.atan2(-v, -u) / np.pi
    fk = (rot + 1) / 2 * (n_cols - 1)  # -1~1 mapped to 0~n_cols
    k0 = tf.to_int32(fk)  # 0, 1, 2, ..., n_cols
    k1 = tf.mod(k0 + 1, n_cols)
    f = fk - tf.to_float(k0)
    f = tf.expand_dims(f, -1)
    col0 = tf.gather_nd(color_wheel, tf.expand_dims(k0, -1))
    col1 = tf.gather_nd(color_wheel, tf.expand_dims(k1, -1))
    col = (1 - f) * col0 + f * col1
  return col 
Example #6
Source File: math_helpers.py    From graphics with Apache License 2.0 5 votes vote down vote up
def cartesian_to_spherical_coordinates(point_cartesian, eps=None, name=None):
  """Function to transform Cartesian coordinates to spherical coordinates.

  This function assumes a right handed coordinate system with `z` pointing up.
  When `x` and `y` are both `0`, the function outputs `0` for `phi`. Note that
  the function is not smooth when `x = y = 0`.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    point_cartesian: A tensor of shape `[A1, ..., An, 3]`. In the last
      dimension, the data follows the `x`, `y`, `z` order.
    eps: A small `float`, to be added to the denominator. If left as `None`,
      its value is automatically selected using `point_cartesian.dtype`.
    name: A name for this op. Defaults to `cartesian_to_spherical_coordinates`.

  Returns:
    A tensor of shape `[A1, ..., An, 3]`. The last dimensions contains
    (`r`,`theta`,`phi`), where `r` is the sphere radius, `theta` is the polar
    angle and `phi` is the azimuthal angle.
  """
  with tf.compat.v1.name_scope(name, "cartesian_to_spherical_coordinates",
                               [point_cartesian]):
    point_cartesian = tf.convert_to_tensor(value=point_cartesian)

    shape.check_static(
        tensor=point_cartesian,
        tensor_name="point_cartesian",
        has_dim_equals=(-1, 3))

    x, y, z = tf.unstack(point_cartesian, axis=-1)
    radius = tf.norm(tensor=point_cartesian, axis=-1)
    theta = tf.acos(safe_ops.safe_unsigned_div(z, radius, eps))
    phi = tf.atan2(y, x)
    return tf.stack((radius, theta, phi), axis=-1) 
Example #7
Source File: test_forward.py    From incubator-tvm with Apache License 2.0 5 votes vote down vote up
def test_forward_atan2():
    """test operator tan """
    tf.disable_eager_execution()
    np_data_1 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
    np_data_2 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
    tf.reset_default_graph()
    in_data_1 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_1")
    in_data_2 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_2")
    tf.atan2(in_data_1, in_data_2, name="atan2")
    compare_tf_with_tvm([np_data_1, np_data_2], ['in_data_1:0', 'in_data_2:0'], 'atan2:0') 
Example #8
Source File: det_tools.py    From hfnet with MIT License 5 votes vote down vote up
def get_degree_maps(ori_maps):
    # ori_maps : [B,H,W,2], consist of cos, sin response
    cos_maps = tf.slice(ori_maps, [0,0,0,0], [-1,-1,-1,1])
    sin_maps = tf.slice(ori_maps, [0,0,0,1], [-1,-1,-1,1])
    atan_maps = tf.atan2(sin_maps, cos_maps)
    angle2rgb = tf.constant(get_angle_colorbar())
    degree_maps = tf.cast(tf.clip_by_value(atan_maps*180/np.pi+180, 0, 360), tf.int32)
    degree_maps = tf.gather(angle2rgb, degree_maps[...,0])
    return degree_maps, atan_maps

# def inverse_warp_view_1_to_2(photos1, depths1, depths2, c1Tc2s, intrinsics_3x3, thetas1=None, thetas2=None, depth_thresh=0.5):
#     if thetas1 is not None:
#         # inverse in-plane transformation
#         photo_depths1 = tf.concat([photos1, depths1], axis=-1)
#         inwarp_photo_depths1, _ = inplane_inverse_warp(photo_depths1, thetas1)
#         photos1 = tf.slice(inwarp_photo_depths1, [0,0,0,0],[-1,-1,-1,1])
#         depths1 = tf.slice(inwarp_photo_depths1, [0,0,0,1],[-1,-1,-1,1])
#     # projective inverse transformation
#     photos2w, visible_masks2 = projective_inverse_warp(photos1, depths2, c1Tc2s,
#                                                     intrinsics_3x3, depths1, depth_thresh)
#     projective_visible_masks2 = tf.identity(visible_masks2)
#     if thetas2 is not None:
#         # inverse in-plane transformation
#         photo_masks2 = tf.concat([photos2w, visible_masks2], axis=-1)
#         inwarp_photo_masks2, _ = inplane_inverse_warp(photo_masks2, thetas2)
#         photos2w = tf.slice(inwarp_photo_masks2, [0,0,0,0], [-1,-1,-1,1])
#         visible_masks2 = tf.slice(inwarp_photo_masks2, [0,0,0,1], [-1,-1,-1,1])
#     visible_masks2 = tf.cast(tf.greater(visible_masks2, 0.5), tf.float32)
#     return photos2w, visible_masks2, projective_visible_masks2 
Example #9
Source File: orientation_encoder.py    From monopsr with MIT License 5 votes vote down vote up
def tf_angle_vector_to_orientation(angle_vectors_tensor):
    """ Converts angle unit vectors into orientation angle representation.
        e.g. [0.717, 0.717] -> 45, [0, 1] -> 90

    Args:
        angle_vectors_tensor: a tensor of shape (N, 2) of angle unit vectors
            in the format [x, y]

    Returns:
        A tensor of shape (N,) of orientation angles
    """
    x = angle_vectors_tensor[:, 0]
    y = angle_vectors_tensor[:, 1]

    return tf.atan2(y, x) 
Example #10
Source File: tfutil.py    From multisensory with Apache License 2.0 5 votes vote down vote up
def angle(z):
  # from https://github.com/tensorflow/tensorflow/issues/483
  """
  Returns the elementwise arctan of z, choosing the quadrant correctly.

  Quadrant I: arctan(y/x)
  Qaudrant II: \pi + arctan(y/x) (phase of x<0, y=0 is \pi)
  Quadrant III: -\pi + arctan(y/x)
  Quadrant IV: arctan(y/x)

  Inputs:
      z: tf.complex64 or tf.complex128 tensor
  Retunrs:
      Angle of z
  """
  return tf.atan2(tf.imag(z), tf.real(z))
  # if z.dtype == tf.complex128:
  #     dtype = tf.float64
  # else:
  #     dtype = tf.float32
  # x = tf.real(z)
  # y = tf.imag(z)
  # xneg = tf.cast(x < 0.0, dtype)
  # yneg = tf.cast(y < 0.0, dtype)
  # ypos = tf.cast(y >= 0.0, dtype)

  # offset = xneg * (ypos - yneg) * np.pi

  # return tf.atan(y / x) + offset 
Example #11
Source File: orientation_encoder.py    From avod-ssd with MIT License 5 votes vote down vote up
def tf_angle_vector_to_orientation(angle_vectors_tensor):
    """ Converts angle unit vectors into orientation angle representation.
        e.g. [0.717, 0.717] -> 45, [0, 1] -> 90

    Args:
        angle_vectors_tensor: a tensor of shape (N, 2) of angle unit vectors
            in the format [x, y]

    Returns:
        A tensor of shape (N,) of orientation angles
    """
    x = angle_vectors_tensor[:, 0]
    y = angle_vectors_tensor[:, 1]

    return tf.atan2(y, x) 
Example #12
Source File: timedomain.py    From time-domain-neural-audio-style-transfer with Apache License 2.0 5 votes vote down vote up
def compute_inputs(x, freqs, n_fft, n_frames, input_features, norm=False):
    if norm:
        norm_fn = instance_norm
    else:
        def norm_fn(x):
            return x
    freqs_tf = tf.constant(freqs, name="freqs", dtype='float32')
    inputs = {}
    with tf.variable_scope('real'):
        inputs['real'] = norm_fn(tf.reshape(
            tf.matmul(x, tf.cos(freqs_tf)), [1, 1, n_frames, n_fft // 2]))
    with tf.variable_scope('imag'):
        inputs['imag'] = norm_fn(tf.reshape(
            tf.matmul(x, tf.sin(freqs_tf)), [1, 1, n_frames, n_fft // 2]))
    with tf.variable_scope('mags'):
        inputs['mags'] = norm_fn(tf.reshape(
            tf.sqrt(
                tf.maximum(1e-15, inputs['real'] * inputs['real'] + inputs[
                    'imag'] * inputs['imag'])), [1, 1, n_frames, n_fft // 2]))
    with tf.variable_scope('phase'):
        inputs['phase'] = norm_fn(tf.atan2(inputs['imag'], inputs['real']))
    with tf.variable_scope('unwrapped'):
        inputs['unwrapped'] = tf.py_func(
            unwrap, [inputs['phase']], tf.float32)
    with tf.variable_scope('unwrapped_difference'):
        inputs['unwrapped_difference'] = (tf.slice(
                inputs['unwrapped'],
                [0, 0, 0, 1], [-1, -1, -1, n_fft // 2 - 1]) -
            tf.slice(
                inputs['unwrapped'],
                [0, 0, 0, 0], [-1, -1, -1, n_fft // 2 - 1]))
    if 'unwrapped_difference' in input_features:
        for k, v in input_features:
            if k is not 'unwrapped_difference':
                inputs[k] = tf.slice(
                        v, [0, 0, 0, 0], [-1, -1, -1, n_fft // 2 - 1])
    net = tf.concat([inputs[i] for i in input_features], 1)
    return inputs, net 
Example #13
Source File: inference.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def visualize_degree_map(ori_maps, name='degree_maps'):
    # ori_maps [B,H,W,2] tf.float32, cos,sin
    with tf.name_scope(name):
        cos_maps = tf.slice(ori_maps, [0,0,0,0], [-1,-1,-1,1])
        sin_maps = tf.slice(ori_maps, [0,0,0,1], [-1,-1,-1,1])
        atan_maps = tf.atan2(sin_maps, cos_maps)
        angle2rgb = tf.constant(get_angle_colorbar())
        degree_maps = tf.cast(tf.clip_by_value(atan_maps*180/np.pi+180, 0, 360), tf.int32) 
        degree_maps = tf.gather(angle2rgb, degree_maps[...,0])
        return degree_maps 
Example #14
Source File: orientation_encoder.py    From TLNet with Apache License 2.0 5 votes vote down vote up
def tf_angle_vector_to_orientation(angle_vectors_tensor):
    """ Converts angle unit vectors into orientation angle representation.
        e.g. [0.717, 0.717] -> 45, [0, 1] -> 90

    Args:
        angle_vectors_tensor: a tensor of shape (N, 2) of angle unit vectors
            in the format [x, y]

    Returns:
        A tensor of shape (N,) of orientation angles
    """
    x = angle_vectors_tensor[:, 0]
    y = angle_vectors_tensor[:, 1]

    return tf.atan2(y, x) 
Example #15
Source File: orientation_encoder.py    From avod with MIT License 5 votes vote down vote up
def tf_angle_vector_to_orientation(angle_vectors_tensor):
    """ Converts angle unit vectors into orientation angle representation.
        e.g. [0.717, 0.717] -> 45, [0, 1] -> 90

    Args:
        angle_vectors_tensor: a tensor of shape (N, 2) of angle unit vectors
            in the format [x, y]

    Returns:
        A tensor of shape (N,) of orientation angles
    """
    x = angle_vectors_tensor[:, 0]
    y = angle_vectors_tensor[:, 1]

    return tf.atan2(y, x) 
Example #16
Source File: axis_angle.py    From graphics with Apache License 2.0 5 votes vote down vote up
def from_quaternion(quaternion, name=None):
  """Converts a quaternion to an axis-angle representation.

  Note:
    In the following, A1 to An are optional batch dimensions.

  Args:
    quaternion: A tensor of shape `[A1, ..., An, 4]`, where the last dimension
      represents a normalized quaternion.
    name: A name for this op that defaults to "axis_angle_from_quaternion".

  Returns:
    Tuple of two tensors of shape `[A1, ..., An, 3]` and `[A1, ..., An, 1]`,
    where the first tensor represents the axis, and the second represents the
    angle. The resulting axis is a normalized vector.

  Raises:
    ValueError: If the shape of `quaternion` is not supported.
  """
  with tf.compat.v1.name_scope(name, "axis_angle_from_quaternion",
                               [quaternion]):
    quaternion = tf.convert_to_tensor(value=quaternion)

    shape.check_static(
        tensor=quaternion, tensor_name="quaternion", has_dim_equals=(-1, 4))
    quaternion = asserts.assert_normalized(quaternion)

    # This prevents zero norm xyz and zero w, and is differentiable.
    quaternion += asserts.select_eps_for_addition(quaternion.dtype)
    xyz, w = tf.split(quaternion, (3, 1), axis=-1)
    norm = tf.norm(tensor=xyz, axis=-1, keepdims=True)
    angle = 2.0 * tf.atan2(norm, tf.abs(w))
    axis = safe_ops.safe_unsigned_div(safe_ops.nonzero_sign(w) * xyz, norm)
    return axis, angle 
Example #17
Source File: geom_ops.py    From rgn with MIT License 5 votes vote down vote up
def reduce_mean_angle(weights, angles, use_complex=False, name=None):
    """ Computes the weighted mean of angles. Accepts option to compute use complex exponentials or real numbers.

        Complex number-based version is giving wrong gradients for some reason, but forward calculation is fine.

        See https://en.wikipedia.org/wiki/Mean_of_circular_quantities

    Args:
        weights: [BATCH_SIZE, NUM_ANGLES]
        angles:  [NUM_ANGLES, NUM_DIHEDRALS]

    Returns:
                 [BATCH_SIZE, NUM_DIHEDRALS]

    """

    with tf.name_scope(name, 'reduce_mean_angle', [weights, angles]) as scope:
        weights = tf.convert_to_tensor(weights, name='weights')
        angles  = tf.convert_to_tensor(angles,  name='angles')

        if use_complex:
            # use complexed-valued exponentials for calculation
            cwts =        tf.complex(weights, 0.) # cast to complex numbers
            exps = tf.exp(tf.complex(0., angles)) # convert to point on complex plane

            unit_coords = tf.matmul(cwts, exps) # take the weighted mixture of the unit circle coordinates

            return tf.angle(unit_coords, name=scope) # return angle of averaged coordinate

        else:
            # use real-numbered pairs of values
            sins = tf.sin(angles)
            coss = tf.cos(angles)

            y_coords = tf.matmul(weights, sins)
            x_coords = tf.matmul(weights, coss)

            return tf.atan2(y_coords, x_coords, name=scope) 
Example #18
Source File: test_lfnet.py    From pyslam with GNU General Public License v3.0 4 votes vote down vote up
def build_networks(config, photo, is_training):

    DET = importlib.import_module(config.detector)
    detector = DET.Model(config, is_training)

    if config.input_inst_norm:
        print('Apply instance norm on input photos')
        photos1 = instance_normalization(photo)

    heatmaps, det_endpoints = build_detector_helper(config, detector, photo)

    # extract patches
    kpts = det_endpoints['kpts']
    batch_inds = det_endpoints['batch_inds']

    kp_patches = build_patch_extraction(config, det_endpoints, photo)

    # Descriptor
    DESC = importlib.import_module(config.descriptor)
    descriptor = DESC.Model(config, is_training)
    desc_feats, desc_endpoints = descriptor.build_model(kp_patches, reuse=False) # [B*K,D]

    # scale and orientation (extra)
    scale_maps = det_endpoints['scale_maps']
    ori_maps = det_endpoints['ori_maps'] # cos/sin
    degree_maps, _ = get_degree_maps(ori_maps) # degree (rgb psuedo color code)
    kpts_scale = det_endpoints['kpts_scale']
    kpts_ori = det_endpoints['kpts_ori']
    kpts_ori = tf.atan2(kpts_ori[:,1], kpts_ori[:,0]) # radian

    ops = {
        'photo': photo,
        'is_training': is_training,
        'kpts': kpts,
        'feats': desc_feats,
        # EXTRA
        'scale_maps': scale_maps,
        'kpts_scale': kpts_scale,
        'degree_maps': degree_maps,
        'kpts_ori': kpts_ori,
    }

    return ops 
Example #19
Source File: feature_lfnet.py    From pyslam with GNU General Public License v3.0 4 votes vote down vote up
def build_networks(lfnet_config, photo, is_training):
    # Detector 
    DET = importlib.import_module(lfnet_config.detector)
    detector = DET.Model(lfnet_config, is_training)

    if lfnet_config.input_inst_norm:
        print('Apply instance norm on input photos')
        photos1 = instance_normalization(photo)

    heatmaps, det_endpoints = build_detector_helper(lfnet_config, detector, photo)

    # extract patches
    kpts = det_endpoints['kpts']
    batch_inds = det_endpoints['batch_inds']

    kp_patches = build_patch_extraction(lfnet_config, det_endpoints, photo)

    # Descriptor
    DESC = importlib.import_module(lfnet_config.descriptor)
    descriptor = DESC.Model(lfnet_config, is_training)
    desc_feats, desc_endpoints = descriptor.build_model(kp_patches, reuse=False) # [B*K,D]

    # scale and orientation (extra)
    scale_maps = det_endpoints['scale_maps']
    ori_maps = det_endpoints['ori_maps'] # cos/sin
    degree_maps, _ = get_degree_maps(ori_maps) # degree (rgb psuedo color code)
    kpts_scale = det_endpoints['kpts_scale'] # scale factor 
    kpts_ori = det_endpoints['kpts_ori']
    kpts_ori = tf.atan2(kpts_ori[:,1], kpts_ori[:,0]) # radian

    ops = {
        'photo': photo,
        'is_training': is_training,
        'kpts': kpts,
        'feats': desc_feats,
        # EXTRA
        'scale_maps': scale_maps,
        'kpts_scale': kpts_scale,
        'degree_maps': degree_maps,
        'kpts_ori': kpts_ori,
        'heatmaps': heatmaps, 
    }
    return ops 
Example #20
Source File: lf_net.py    From hfnet with MIT License 4 votes vote down vote up
def build_networks(config, photo, is_training):
    detector = Detector(config, is_training)

    if config.input_inst_norm:
        print('Apply instance norm on input photos')
        photos1 = instance_normalization(photo)

    if config.use_nms3d:
        heatmaps, det_endpoints = build_multi_scale_deep_detector_3DNMS(
            config, detector, photo, reuse=False)
    else:
        heatmaps, det_endpoints = build_multi_scale_deep_detector(
            config, detector, photo, reuse=False)

    # extract patches
    kpts = det_endpoints['kpts']
    batch_inds = det_endpoints['batch_inds']

    kp_patches = build_patch_extraction(config, det_endpoints, photo)

    # Descriptor
    descriptor = Descriptor(config, is_training)
    desc_feats, desc_endpoints = descriptor.build_model(
        kp_patches, reuse=False) # [B*K,D]

    # scale and orientation (extra)
    scale_maps = det_endpoints['scale_maps']
    ori_maps = det_endpoints['ori_maps'] # cos/sin
    degree_maps, _ = get_degree_maps(ori_maps) # degree (rgb psuedo color code)
    kpts_scale = det_endpoints['kpts_scale']
    kpts_ori = det_endpoints['kpts_ori']
    kpts_ori = tf.atan2(kpts_ori[:,1], kpts_ori[:,0]) # radian
    kpts_scores = det_endpoints['kpts_scores']

    ops = {
        'photo': photo,
        'is_training': is_training,
        'kpts': kpts,
        'scores': kpts_scores,
        'feats': desc_feats,
        # EXTRA
        'scale_maps': scale_maps,
        'kpts_scale': kpts_scale,
        'degree_maps': degree_maps,
        'kpts_ori': kpts_ori,
    }
    return ops 
Example #21
Source File: euler.py    From differentiable-point-clouds with MIT License 4 votes vote down vote up
def quaternion2euler_full_tf(q, rotseq="yzy"):
    def twoaxisrot_tf(r11, r12, r21, r31, r32):
        a0 = tf.atan2(r11, r12)
        a1 = tf.acos(r21)
        a2 = tf.atan2(r31, r32)
        return tf.stack([a0, a1, a2], axis=-1)

    def threeaxisrot_tf(r11, r12, r21, r31, r32):
        a0 = tf.atan2(r31, r32)
        a1 = tf.asin(tf.clip_by_value(r21, -1.0, 1.0))
        a2 = tf.atan2(r11, r12)
        return tf.stack([a0, a1, a2], axis=-1)

    q_norm = tf.expand_dims(tf.norm(q, axis=-1), axis=-1)
    q /= q_norm

    if rotseq == "yzy":
        angles = twoaxisrot_tf(2 * (q[:, 2] * q[:, 3] + q[:, 0] * q[:, 1]),
                               -2 * (q[:, 1] * q[:, 2] - q[:, 0] * q[:, 3]),
                               q[:, 0] * q[:, 0] - q[:, 1] * q[:, 1] + q[:, 2] * q[:, 2] - q[:, 3] * q[:, 3],
                               2 * (q[:, 2] * q[:, 3] - q[:, 0] * q[:, 1]),
                               2 * (q[:, 1] * q[:, 2] + q[:, 0] * q[:, 3]))
        yaw = angles[:, 2]
        pitch = angles[:, 1]
    elif rotseq == "xzy":
        angles = threeaxisrot_tf(2 * (q[:, 2] * q[:, 3] + q[:, 0] * q[:, 1]),
                                 q[:, 0] * q[:, 0] - q[:, 1] * q[:, 1] + q[:, 2] * q[:, 2] - q[:, 3] * q[:, 3],
                                 -2 * (q[:, 1] * q[:, 2] - q[:, 0] * q[:, 3]),
                                 2 * (q[:, 1] * q[:, 3] + q[:, 0] * q[:, 2]),
                                 q[:, 0] * q[:, 0] + q[:, 1] * q[:, 1] - q[:, 2] * q[:, 2] - q[:, 3] * q[:, 3])
        yaw = angles[:, 0]
        pitch = angles[:, 1]
    elif rotseq == "zxy":
        angles = threeaxisrot_tf(-2 * (q[:, 1] * q[:, 2] - q[:, 0] * q[:, 3]),
                                 q[:, 0] * q[:, 0] - q[:, 1] * q[:, 1] + q[:, 2] * q[:, 2] - q[:, 3] * q[:, 3],
                                 2 * (q[:, 2] * q[:, 3] + q[:, 0] * q[:, 1]),
                                 -2 * (q[:, 1] * q[:, 3] - q[:, 0] * q[:, 2]),
                                 q[:, 0] * q[:, 0] - q[:, 1] * q[:, 1] - q[:, 2] * q[:, 2] + q[:, 3] * q[:, 3])
        yaw = angles[:, 0]
        pitch = angles[:, 2]

    return yaw, pitch 
Example #22
Source File: feat3dnet.py    From 3DFeatNet with MIT License 4 votes vote down vote up
def feature_detection_module(xyz, points, num_clusters, radius, is_training, mlp, mlp2, num_samples=64, use_bn=True):
    """ Detect features in point cloud

    Args:
        xyz (tf.Tensor): Input point cloud of size (batch_size, ndataset, 3)
        points (tf.Tensor): Point features. Unused in 3DFeat-Net
        num_clusters (int): Number of clusters to extract. Set to -1 to use all points
        radius (float): Radius to consider for feature detection
        is_training (tf.placeholder): Set to True if training, False during evaluation
        mlp: list of int32 -- output size for MLP on each point
        mlp2: list of int32 -- output size for MLP on each region. Set to None or [] to ignore
        num_samples: Maximum number of points to consider per cluster
        use_bn: bool -- Whether to perform batch normalization

    Returns:
        new_xyz: Cluster centers
        idx: Indices of points sampled for the clusters
        attention: Output attention weights
        orientation: Output orientation (radians)
        end_points: Unused

    """
    end_points = {}
    new_xyz = sample_points(xyz, num_clusters)  # Sample point centers
    new_points, idx = query_and_group_points(xyz, points, new_xyz, num_samples, radius, knn=False, use_xyz=True,
                                             normalize_radius=True, orientations=None)  # Extract clusters

    # Pre pooling MLP
    for i, num_out_channel in enumerate(mlp):
        new_points = conv2d(new_points, num_out_channel, kernel_size=[1, 1], stride=[1, 1], padding='VALID',
                            bn=use_bn, is_training=is_training,
                            scope='conv%d' % (i), reuse=False, )

    # Max Pool
    new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True)

    # Max pooling MLP
    if mlp2 is None: mlp2 = []
    for i, num_out_channel in enumerate(mlp2):
        new_points = conv2d(new_points, num_out_channel, [1, 1],
                            padding='VALID', stride=[1, 1],
                            bn=use_bn, is_training=is_training,
                            scope='conv_post_%d' % (i))

    # Attention and orientation regression
    attention = conv2d(new_points, 1, [1, 1], stride=[1, 1], padding='VALID',
                       activation=tf.nn.softplus, bn=False, scope='attention', reuse=False)
    attention = tf.squeeze(attention, axis=[2, 3])

    orientation_xy = conv2d(new_points, 2, [1, 1], stride=[1, 1], padding='VALID',
                            activation=None, bn=False, scope='orientation', reuse=False)
    orientation_xy = tf.squeeze(orientation_xy, axis=2)
    orientation_xy = tf.nn.l2_normalize(orientation_xy, dim=2, epsilon=1e-8)
    orientation = tf.atan2(orientation_xy[:, :, 1], orientation_xy[:, :, 0])

    return new_xyz, idx, attention, orientation, end_points