Python cv2.Rodrigues() Examples

The following are 30 code examples of cv2.Rodrigues(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: head_pose_solver.py    From talking-head-anime-demo with MIT License 8 votes vote down vote up
def solve_head_pose(self, face_landmarks):
        indices = [17, 21, 22, 26, 36, 39, 42, 45, 31, 35]
        image_pts = np.zeros((len(indices), 2))
        for i in range(len(indices)):
            part = face_landmarks.part(indices[i])
            image_pts[i, 0] = part.x
            image_pts[i, 1] = part.y

        _, rotation_vec, translation_vec = cv2.solvePnP(self.face_model_points,
                                                        image_pts,
                                                        self.camera_matrix,
                                                        self.distortion_coeffs)
        projected_head_pose_box_points, _ = cv2.projectPoints(self.head_pose_box_points,
                                                              rotation_vec,
                                                              translation_vec,
                                                              self.camera_matrix,
                                                              self.distortion_coeffs)
        projected_head_pose_box_points = tuple(map(tuple, projected_head_pose_box_points.reshape(8, 2)))

        # Calculate euler angle
        rotation_mat, _ = cv2.Rodrigues(rotation_vec)
        pose_mat = cv2.hconcat((rotation_mat, translation_vec))
        _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat)
        return projected_head_pose_box_points, euler_angles 
Example #2
Source File: models.py    From DAMDNet with Apache License 2.0 6 votes vote down vote up
def fun(self, x, params):
        #skalowanie
        s = params[0]
        #rotacja
        r = params[1:4]
        #przesuniecie (translacja)
        t = params[4:6]
        w = params[6:]

        mean3DShape = x[0]
        blendshapes = x[1]

        #macierz rotacji z wektora rotacji, wzor Rodriguesa
        R = cv2.Rodrigues(r)[0]
        P = R[:2]
        shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)

        projected = s * np.dot(P, shape3D) + t[:, np.newaxis]

        return projected 
Example #3
Source File: conversions.py    From spl with GNU General Public License v3.0 6 votes vote down vote up
def rotmat2aa(rotmats):
    """
    Convert rotation matrices to angle-axis using opencv's Rodrigues formula.
    Args:
        rotmats: A np array of shape (..., 3, 3)

    Returns:
        A np array of shape (..., 3)
    """
    assert rotmats.shape[-1] == 3 and rotmats.shape[-2] == 3 and len(rotmats.shape) >= 3, 'invalid input dimension'
    orig_shape = rotmats.shape[:-2]
    rots = np.reshape(rotmats, [-1, 3, 3])
    aas = np.zeros([rots.shape[0], 3])
    for i in range(rots.shape[0]):
        aas[i] = np.squeeze(cv2.Rodrigues(rots[i])[0])
    return np.reshape(aas, orig_shape + (3,)) 
Example #4
Source File: utils_.py    From DAMDNet with Apache License 2.0 6 votes vote down vote up
def getShape3D(mean3DShape, blendshapes, params):
    #skalowanie
    s = params[0]
    #rotacja
    r = params[1:4]
    #przesuniecie (translacja)
    t = params[4:6]
    w = params[6:]

    #macierz rotacji z wektora rotacji, wzor Rodriguesa
    R = cv2.Rodrigues(r)[0]
    shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)

    shape3D = s * np.dot(R, shape3D)
    shape3D[:2, :] = shape3D[:2, :] + t[:, np.newaxis]

    return shape3D 
Example #5
Source File: utils.py    From FaceSwap with MIT License 6 votes vote down vote up
def getShape3D(mean3DShape, blendshapes, params):
    #skalowanie
    s = params[0]
    #rotacja
    r = params[1:4]
    #przesuniecie (translacja)
    t = params[4:6]
    w = params[6:]

    #macierz rotacji z wektora rotacji, wzor Rodriguesa
    R = cv2.Rodrigues(r)[0]
    shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)

    shape3D = s * np.dot(R, shape3D)
    shape3D[:2, :] = shape3D[:2, :] + t[:, np.newaxis]

    return shape3D 
Example #6
Source File: models.py    From FaceSwap with MIT License 6 votes vote down vote up
def fun(self, x, params):
        #skalowanie
        s = params[0]
        #rotacja
        r = params[1:4]
        #przesuniecie (translacja)
        t = params[4:6]
        w = params[6:]

        mean3DShape = x[0]
        blendshapes = x[1]

        #macierz rotacji z wektora rotacji, wzor Rodriguesa
        R = cv2.Rodrigues(r)[0]
        P = R[:2]
        shape3D = mean3DShape + np.sum(w[:, np.newaxis, np.newaxis] * blendshapes, axis=0)

        projected = s * np.dot(P, shape3D) + t[:, np.newaxis]

        return projected 
Example #7
Source File: preprocessing.py    From HairNet with MIT License 6 votes vote down vote up
def gen_RT_matrix(path):
    with open(path, 'r') as f:
        lines = f.readlines()
        lines = lines[0].split(' ')
        R_vec = np.array([float(lines[3]),float(lines[5]), float(lines[4])]).reshape(3, 1)
        T_vec = np.array([float(lines[0]),float(lines[1]), float(lines[2])]).reshape(3, 1)
        R_vec = np.array(R_vec).reshape(3,1)
        T_vec = np.array(T_vec).reshape(3,1)
        R_mat = cv2.Rodrigues(R_vec)[0].reshape(3,3)
        RT_mat = np.hstack((R_mat, T_vec)).reshape(3,4)
        RT_mat = np.vstack((RT_mat, [0,0,0,1])).reshape(4,4)
        return inv(RT_mat)


# read strandsXXXXX_YYYYY_AAAAA_mBB.convdata
# Dimension: 100*4*32*32  
# v[i,0:3,n,m] is the x,y,z position of the ith point on the [n,m]th strand.
# v[i,3,n,m] is a value related to the curvature of that point. 
# if v[:,:,n,m] all equals to 0, it means it is an empty strand.
# x: v[i,3,n,m][0] 
Example #8
Source File: utils.py    From models with MIT License 6 votes vote down vote up
def pnp(points_3D, points_2D, cameraMatrix):
    try:
        distCoeffs = pnp.distCoeffs
    except:
        distCoeffs = np.zeros((8, 1), dtype='float32')

    assert points_2D.shape[0] == points_2D.shape[0], 'points 3D and points 2D must have same number of vertices'

    _, R_exp, t = cv2.solvePnP(points_3D,
                              # points_2D,
                              np.ascontiguousarray(points_2D[:,:2]).reshape((-1,1,2)),
                              cameraMatrix,
                              distCoeffs)
                              # , None, None, False, cv2.SOLVEPNP_UPNP)

    R, _ = cv2.Rodrigues(R_exp)
    return R, t 
Example #9
Source File: preprocess_dip.py    From spl with GNU General Public License v3.0 6 votes vote down vote up
def rotmat2aa(rotmats):
    """
    Convert rotation matrices to angle-axis format.
    Args:
        oris: np array of shape (seq_length, n_joints*9).

    Returns: np array of shape (seq_length, n_joints*3)
    """
    seq_length = rotmats.shape[0]
    assert rotmats.shape[1] % 9 == 0
    n_joints = rotmats.shape[1] // 9
    ori = np.reshape(rotmats, [seq_length*n_joints, 3, 3])
    aas = np.zeros([seq_length*n_joints, 3])
    for i in range(ori.shape[0]):
        aas[i] = np.squeeze(cv2.Rodrigues(ori[i])[0])
    return np.reshape(aas, [seq_length, n_joints*3]) 
Example #10
Source File: local_descriptors.py    From hfnet with MIT License 6 votes vote down vote up
def compute_pose_error(kpts1, kpts2_3d_2, matches, vis1, vis2, T_2to1, K1,
                       reproj_thresh):
    valid = vis1[matches[:, 0]] & vis2[matches[:, 1]]
    matches = matches[valid]
    failure = (None, None)

    if len(matches) < 4:
        return failure

    kpts1 = kpts1[matches[:, 0]].astype(np.float32).reshape((-1, 1, 2))
    kpts2_3d_2 = kpts2_3d_2[matches[:, 1]].reshape((-1, 1, 3))
    success, R_vec, t, inliers = cv2.solvePnPRansac(
        kpts2_3d_2, kpts1, K1, np.zeros(4), flags=cv2.SOLVEPNP_P3P,
        iterationsCount=1000, reprojectionError=reproj_thresh)
    if not success:
        return failure

    R, _ = cv2.Rodrigues(R_vec)
    t = t[:, 0]

    error_t = np.linalg.norm(t - T_2to1[:3, 3])
    error_R = angle_error(R, T_2to1[:3, :3])
    return error_t, error_R 
Example #11
Source File: utils.py    From dip18 with GNU General Public License v3.0 5 votes vote down vote up
def joint_angle_error(predicted_pose_params, target_pose_params):
    """
    Computes the distance in joint angles between predicted and target joints for every given frame. Currently,
    this function can only handle input pose parameters represented as rotation matrices.

    :param predicted_pose_params: An np array of shape `(seq_length, dof)` where `dof` is 216, i.e. a 3-by-3 rotation
      matrix for each of the 24 joints.
    :param target_pose_params: An np array of the same shape as `predicted_pose_params` representing the target poses.
    :return: An np array of shape `(seq_length, 24)` containing the joint angle error in Radians for each joint.
    """
    seq_length, dof = predicted_pose_params.shape[0], predicted_pose_params.shape[1]
    assert dof == 216, 'unexpected number of degrees of freedom'
    assert target_pose_params.shape[0] == seq_length and target_pose_params.shape[1] == dof, 'target_pose_params must match predicted_pose_params'

    # reshape to have rotation matrices explicit
    n_joints = dof // 9
    p1 = np.reshape(predicted_pose_params, [-1, n_joints, 3, 3])
    p2 = np.reshape(target_pose_params, [-1, n_joints, 3, 3])

    # compute R1 * R2.T, if prediction and target match, this will be the identity matrix
    r1 = np.reshape(p1, [-1, 3, 3])
    r2 = np.reshape(p2, [-1, 3, 3])
    r2t = np.transpose(r2, [0, 2, 1])
    r = np.matmul(r1, r2t)

    # convert `r` to angle-axis representation and extract the angle, which is our measure of difference between
    # the predicted and target orientations
    angles = []
    for i in range(r1.shape[0]):
        aa, _ = cv2.Rodrigues(r[i])
        angles.append(np.linalg.norm(aa))

    return np.reshape(np.array(angles), [seq_length, n_joints]) 
Example #12
Source File: headpose.py    From PINTO_model_zoo with MIT License 5 votes vote down vote up
def get_head_pose(shape,img):
    h,w,_=img.shape
    K = [w, 0.0, w//2,
         0.0, w, h//2,
         0.0, 0.0, 1.0]
    # Assuming no lens distortion
    D = [0, 0, 0.0, 0.0, 0]

    cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
    dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)



    # image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
    #                         shape[39], shape[42], shape[45], shape[31], shape[35],
    #                         shape[48], shape[54], shape[57], shape[8]])
    image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
                            shape[39], shape[42], shape[45], shape[31], shape[35]])
    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, euler_angle 
Example #13
Source File: imutils.py    From GraphCMR with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def rot_aa(aa, rot):
    """Rotate axis angle parameters."""
    # pose parameters
    R = np.array([[np.cos(np.deg2rad(-rot)), -np.sin(np.deg2rad(-rot)), 0],
                  [np.sin(np.deg2rad(-rot)), np.cos(np.deg2rad(-rot)), 0],
                  [0, 0, 1]])
    # find the rotation of the body in camera frame
    per_rdg, _ = cv2.Rodrigues(aa)
    # apply the global rotation to the global orientation
    resrot, _ = cv2.Rodrigues(np.dot(R,per_rdg))
    aa = (resrot.T)[0]
    return aa 
Example #14
Source File: util_func.py    From sanet_relocal_demo with GNU General Public License v3.0 5 votes vote down vote up
def compute_pose_lm_pnp(gt_Tcws, query_X_w, rand_R, scene_center, query_K, pnp_x_2d, repro_thres):
    N, _, H, W = query_X_w.shape

    # recover original scene coordinates
    query_X_3d_w = query_X_w.permute(0, 2, 3, 1).view(N, -1, 3)
    rand_R_t = torch.transpose(rand_R, 1, 2).to(query_X_3d_w.device)
    query_X_3d_w = batched_transpose(rand_R_t, torch.zeros(N, 3).to(query_X_3d_w.device), query_X_3d_w)
    query_X_3d_w += scene_center.view(N, 1, 3)
    query_X_3d_w = recover_original_scene_coordinates(query_X_w, rand_R, scene_center)
    query_X_3d_w = query_X_3d_w.view(N, H, W, 3).squeeze(0).detach().cpu().numpy()

    # run Ransac PnP
    lm_pnp_pose_vec, inlier_map = lm_pnp.compute_lm_pnp(pnp_x_2d, query_X_3d_w, query_K, repro_thres, 128, 100)
    R_res, _ = cv2.Rodrigues(lm_pnp_pose_vec[:3])
    lm_pnp_pose = np.eye(4, dtype=np.float32)
    lm_pnp_pose[:3, :3] = R_res
    lm_pnp_pose[:3, 3] = lm_pnp_pose_vec[3:].ravel()

    # measure accuracy
    gt_pose = gt_Tcws.squeeze(0).detach().cpu().numpy()

    R_acc = rel_rot_angle(lm_pnp_pose, gt_pose)
    t_acc = rel_distance(lm_pnp_pose, gt_pose)

    #     ransc_inlier = None
    return R_acc, t_acc, lm_pnp_pose, inlier_map 
Example #15
Source File: ransc_pnp.py    From sanet_relocal_demo with GNU General Public License v3.0 5 votes vote down vote up
def solve_pnp(K: torch.Tensor, x_2d: torch.Tensor, X_3d_w: torch.Tensor, reproj_thres=2.0):
    """
    Solve PnP problem with OpenCV lib
    :param K: camera intrinsic matrix, dim: (N, 3x3) or (3, 3)
    :param x_2d: 2D coordinates, dim: (N, H, W, 2), (H, W, 2),
    :param X_3d_w: 3D world coordinates, dim: (N, H, W, 2), (H, W, 3)
    :return:
    """

    keep_dim_n = False
    if K.dim() == 2:
        keep_dim_n = True
        K = K.unsqueeze(0)
        x_2d = x_2d.unsqueeze(0)
        X_3d_w = X_3d_w.unsqueeze(0)

    N, H, W = x_2d.shape[:3]
    K = K.detach().cpu().numpy()
    x_2d = x_2d.detach().cpu().numpy()
    X_3d_w = X_3d_w.view(N, -1, 3).detach().cpu().numpy()

    poses = []
    x_2d = x_2d[0].reshape(1, H*W, 2)
    dist = np.zeros(4)
    for n in range(N):
        k = K[n]
        X_3d = X_3d_w[n].reshape(1, H*W, 3)
        _, R_res, t_res, _ = cv2.solvePnPRansac(X_3d, x_2d, k, dist, reprojectionError=reproj_thres)
        R_res, _ = cv2.Rodrigues(R_res)

        pnp_pose = np.eye(4, dtype=np.float32)
        pnp_pose[:3, :3] = R_res
        pnp_pose[:3, 3] = t_res.ravel()
        poses.append(pnp_pose)

    poses = torch.cat([torch.from_numpy(pose) for pose in poses])

    if keep_dim_n is True:
        poses.squeeze(0)

    return poses 
Example #16
Source File: camera_calibration.py    From EmotionClassifier with GNU General Public License v3.0 5 votes vote down vote up
def calib_camera(model3D, fidu_XY):
    #compute pose using refrence 3D points + query 2D points
    ret, rvecs, tvec = cv2.solvePnP(model3D.model_TD, fidu_XY, model3D.out_A, None, None, None, False)
    rmat, jacobian = cv2.Rodrigues(rvecs, None)

    inside = calc_inside(model3D.out_A, rmat, tvec, model3D.size_U[0], model3D.size_U[1], model3D.model_TD)
    if(inside == 0):
        tvec = -tvec
        t = np.pi
        RRz180 = np.asmatrix([np.cos(t), -np.sin(t), 0, np.sin(t), np.cos(t), 0, 0, 0, 1]).reshape((3, 3))
        rmat = RRz180*rmat
    return rmat, tvec 
Example #17
Source File: bird_vis.py    From cmr with MIT License 5 votes vote down vote up
def __init__(self, img_size, faces, t_size=3):
        self.renderer = NeuralRenderer(img_size)
        self.faces = Variable(
            torch.IntTensor(faces).cuda(), requires_grad=False)
        if self.faces.dim() == 2:
            self.faces = torch.unsqueeze(self.faces, 0)
        default_tex = np.ones((1, self.faces.shape[1], t_size, t_size, t_size,
                               3))
        blue = np.array([156, 199, 234.]) / 255.
        default_tex = default_tex * blue
        # Could make each triangle different color
        self.default_tex = Variable(
            torch.FloatTensor(default_tex).cuda(), requires_grad=False)
        # rot = transformations.quaternion_about_axis(np.pi/8, [1, 0, 0])
        # This is median quaternion from sfm_pose
        # rot = np.array([ 0.66553962,  0.31033762, -0.02249813,  0.01267084])
        # This is the side view:
        import cv2
        R0 = cv2.Rodrigues(np.array([np.pi / 3, 0, 0]))[0]
        R1 = cv2.Rodrigues(np.array([0, np.pi / 2, 0]))[0]
        R = R1.dot(R0)
        R = np.vstack((np.hstack((R, np.zeros((3, 1)))), np.array([0, 0, 0,
                                                                   1])))
        rot = transformations.quaternion_from_matrix(R, isprecise=True)
        cam = np.hstack([0.75, 0, 0, rot])
        self.default_cam = Variable(
            torch.FloatTensor(cam).cuda(), requires_grad=False)
        self.default_cam = torch.unsqueeze(self.default_cam, 0) 
Example #18
Source File: bird_vis.py    From cmr with MIT License 5 votes vote down vote up
def rotated(self, vert, deg, axis=[0, 1, 0], cam=None, texture=None):
        """
        vert is N x 3, torch FloatTensor (or Variable)
        """
        import cv2
        new_rot = cv2.Rodrigues(np.deg2rad(deg) * np.array(axis))[0]
        new_rot = convert_as(torch.FloatTensor(new_rot), vert)

        center = vert.mean(0)
        new_vert = torch.t(torch.matmul(new_rot,
                                        torch.t(vert - center))) + center
        # new_vert = torch.matmul(vert - center, new_rot) + center

        return self.__call__(new_vert, cams=cam, texture=texture) 
Example #19
Source File: marker.py    From BAR4Py with MIT License 5 votes vote down vote up
def cvt2ModelView(self, Rx=np.array([[1,0,0],[0,-1,0],[0,0,-1]])):
        R, _ = cv2.Rodrigues(self.rvec)
        Rt = np.hstack((R, self.tvec))
        M = np.eye(4)
        M[:3,:] = np.dot(Rx, Rt)
        return M 
Example #20
Source File: renderer.py    From motion_reconstruction with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def rotated(self,
                verts,
                deg,
                cam=None,
                axis='y',
                img=None,
                do_alpha=True,
                far=None,
                near=None,
                color_id=0,
                img_size=None):
        import math
        if axis == 'y':
            around = cv2.Rodrigues(np.array([0, math.radians(deg), 0]))[0]
        elif axis == 'x':
            around = cv2.Rodrigues(np.array([math.radians(deg), 0, 0]))[0]
        else:
            around = cv2.Rodrigues(np.array([0, 0, math.radians(deg)]))[0]
        center = verts.mean(axis=0)
        new_v = np.dot((verts - center), around) + center

        return self.__call__(
            new_v,
            cam,
            img=img,
            do_alpha=do_alpha,
            far=far,
            near=near,
            img_size=img_size,
            color_id=color_id) 
Example #21
Source File: detect_aruco.py    From flock with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def rvec_and_tvec_to_matrix(rvec, tvec):
    """Rodrigues rotation and translation vector to 4x4 matrix"""
    t_matrix = tft.translation_matrix(tvec)
    R, _ = cv2.Rodrigues(rvec)
    r_matrix = tft.identity_matrix()
    r_matrix[:3, :3] = R
    return np.dot(t_matrix, r_matrix) 
Example #22
Source File: eval_util.py    From human_dynamics with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def rot_mat_to_axis_angle(rot_matrices):
    """
    Args:
        rot_matrices (24x3x3).

    Returns:
        poses_aa (72).
    """
    pose = []
    for rot_mat in rot_matrices:
        pose.append(cv2.Rodrigues(rot_mat)[0])
    return np.array(pose).reshape(72) 
Example #23
Source File: eval_util.py    From human_dynamics with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def axis_angle_to_rot_mat(poses_aa):
    """
    Args:
        poses_aa (72).

    Returns:
        rot_matrices (24x3x3).
    """
    rot_matrices = []
    for pose in poses_aa.reshape(-1, 3):
        rot_matrices.append(cv2.Rodrigues(pose)[0])
    return np.array(rot_matrices) 
Example #24
Source File: camera_calibration.py    From camera_calibration_API with Apache License 2.0 5 votes vote down vote up
def _draw_camera_boards(ax, camera_matrix, cam_width, cam_height, scale_focal,
                       extrinsics, board_width, board_height, square_size,
                       patternCentric):
    # util function
    min_values = np.zeros((3,1))
    min_values = np.inf
    max_values = np.zeros((3,1))
    max_values = -np.inf

    if patternCentric:
        X_moving = _create_camera_model(camera_matrix, cam_width, cam_height, scale_focal)
        X_static = _create_board_model(extrinsics, board_width, board_height, square_size)
    else:
        X_static = _create_camera_model(camera_matrix, cam_width, cam_height, scale_focal, True)
        X_moving = _create_board_model(extrinsics, board_width, board_height, square_size)

    cm_subsection = linspace(0.0, 1.0, extrinsics.shape[0])
    colors = [ cm.jet(x) for x in cm_subsection ]

    for i in range(len(X_static)):
        X = np.zeros(X_static[i].shape)
        for j in range(X_static[i].shape[1]):
            X[:,j] = _transform_to_matplotlib_frame(np.eye(4), X_static[i][:,j])
        ax.plot3D(X[0,:], X[1,:], X[2,:], color='r')
        min_values = np.minimum(min_values, X[0:3,:].min(1))
        max_values = np.maximum(max_values, X[0:3,:].max(1))

    for idx in range(extrinsics.shape[0]):
        R, _ = cv2.Rodrigues(extrinsics[idx,0:3])
        cMo = np.eye(4,4)
        cMo[0:3,0:3] = R
        cMo[0:3,3] = extrinsics[idx,3:6]
        for i in range(len(X_moving)):
            X = np.zeros(X_moving[i].shape)
            for j in range(X_moving[i].shape[1]):
                X[0:4,j] = _transform_to_matplotlib_frame(cMo, X_moving[i][0:4,j], patternCentric)
            ax.plot3D(X[0,:], X[1,:], X[2,:], color=colors[idx])
            min_values = np.minimum(min_values, X[0:3,:].min(1))
            max_values = np.maximum(max_values, X[0:3,:].max(1))

    return min_values, max_values 
Example #25
Source File: kitti_loader.py    From self-supervised-depth-completion with MIT License 5 votes vote down vote up
def __getitem__(self, index):
        rgb, sparse, target, rgb_near = self.__getraw__(index)
        rgb, sparse, target, rgb_near = self.transform(rgb, sparse, target,
                                                       rgb_near, self.args)
        r_mat, t_vec = None, None
        if self.split == 'train' and self.args.use_pose:
            success, r_vec, t_vec = get_pose_pnp(rgb, rgb_near, sparse, self.K)
            # discard if translation is too small
            success = success and LA.norm(t_vec) > self.threshold_translation
            if success:
                r_mat, _ = cv2.Rodrigues(r_vec)
            else:
                # return the same image and no motion when PnP fails
                rgb_near = rgb
                t_vec = np.zeros((3, 1))
                r_mat = np.eye(3)

        rgb, gray = handle_gray(rgb, self.args)
        candidates = {"rgb":rgb, "d":sparse, "gt":target, \
            "g":gray, "r_mat":r_mat, "t_vec":t_vec, "rgb_near":rgb_near}
        items = {
            key: to_float_tensor(val)
            for key, val in candidates.items() if val is not None
        }

        return items 
Example #26
Source File: pose.py    From Peppa_Pig_Face_Engine with Apache License 2.0 5 votes vote down vote up
def get_head_pose(shape,img):
    h,w,_=img.shape
    K = [w, 0.0, w//2,
         0.0, w, h//2,
         0.0, 0.0, 1.0]
    # Assuming no lens distortion
    D = [0, 0, 0.0, 0.0, 0]

    cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
    dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)



    # image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
    #                         shape[39], shape[42], shape[45], shape[31], shape[35],
    #                         shape[48], shape[54], shape[57], shape[8]])
    image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
                            shape[39], shape[42], shape[45], shape[31], shape[35]])
    _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)

    reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,
                                        dist_coeffs)

    reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))

    # calc euler angle
    rotation_mat, _ = cv2.Rodrigues(rotation_vec)
    pose_mat = cv2.hconcat((rotation_mat, translation_vec))
    _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)

    return reprojectdst, euler_angle 
Example #27
Source File: util.py    From ngransac with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def pose_error(R, gt_R, t, gt_t):
	"""Compute the angular error between two rotation matrices and two translation vectors.


	Keyword arguments:
	R -- 2D numpy array containing an estimated rotation
	gt_R -- 2D numpy array containing the corresponding ground truth rotation
	t -- 2D numpy array containing an estimated translation as column
	gt_t -- 2D numpy array containing the corresponding ground truth translation
	"""

	# calculate angle between provided rotations
	dR = np.matmul(R, np.transpose(gt_R))
	dR = cv2.Rodrigues(dR)[0]
	dR = np.linalg.norm(dR) * 180 / math.pi
	
	# calculate angle between provided translations
	dT = float(np.dot(gt_t.T, t))
	dT /= float(np.linalg.norm(gt_t))

	if dT > 1 or dT < -1:
		print("Domain warning! dT:", dT)
		dT = max(-1, min(1, dT))
	dT = math.acos(dT) * 180 / math.pi

	return dR, dT 
Example #28
Source File: marker.py    From BAR4Py with MIT License 5 votes vote down vote up
def cvt2ModelView(self, Rx=np.array([[1,0,0],[0,-1,0],[0,0,-1]])):
        R, _ = cv2.Rodrigues(self.rvec)
        Rt = np.hstack((R, self.tvec))
        M = np.eye(4)
        M[:3,:] = np.dot(Rx, Rt)
        return M 
Example #29
Source File: marker.py    From BAR4Py with MIT License 5 votes vote down vote up
def cvt2ModelView(self, Rx=np.array([[1,0,0],[0,-1,0],[0,0,-1]])):
        R, _ = cv2.Rodrigues(self.rvec)
        Rt = np.hstack((R, self.tvec))
        M = np.eye(4)
        M[:3,:] = np.dot(Rx, Rt)
        return M 
Example #30
Source File: lbs.py    From RingNet with MIT License 5 votes vote down vote up
def global_rigid_transformation(pose, J, kintree_table, xp):
    results = {}
    pose = pose.reshape((-1,3))
    id_to_col = {kintree_table[1,i] : i for i in range(kintree_table.shape[1])}
    parent = {i : id_to_col[kintree_table[0,i]] for i in range(1, kintree_table.shape[1])}

    if xp == chumpy:
        from posemapper import Rodrigues
        rodrigues = lambda x : Rodrigues(x)
    else:
        import cv2
        rodrigues = lambda x : cv2.Rodrigues(x)[0]

    with_zeros = lambda x : xp.vstack((x, xp.array([[0.0, 0.0, 0.0, 1.0]])))
    results[0] = with_zeros(xp.hstack((rodrigues(pose[0,:]), J[0,:].reshape((3,1)))))        
        
    for i in range(1, kintree_table.shape[1]):
        results[i] = results[parent[i]].dot(with_zeros(xp.hstack((
            rodrigues(pose[i,:]),
            ((J[i,:] - J[parent[i],:]).reshape((3,1)))
            ))))

    pack = lambda x : xp.hstack([np.zeros((4, 3)), x.reshape((4,1))])
    
    results = [results[i] for i in sorted(results.keys())]
    results_global = results

    if True:
        results2 = [results[i] - (pack(
            results[i].dot(xp.concatenate( ( (J[i,:]), 0 ) )))
            ) for i in range(len(results))]
        results = results2
    result = xp.dstack(results)
    return result, results_global