Python cv2.triangulatePoints() Examples

The following are 8 code examples of cv2.triangulatePoints(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: ops_3d.py    From DF-VO with MIT License 9 votes vote down vote up
def triangulation(kp1, kp2, T_1w, T_2w):
    """Triangulation to get 3D points
    Args:
        kp1 (Nx2): keypoint in view 1 (normalized)
        kp2 (Nx2): keypoints in view 2 (normalized)
        T_1w (4x4): pose of view 1 w.r.t  i.e. T_1w (from w to 1)
        T_2w (4x4): pose of view 2 w.r.t world, i.e. T_2w (from w to 2)
    Returns:
        X (3xN): 3D coordinates of the keypoints w.r.t world coordinate
        X1 (3xN): 3D coordinates of the keypoints w.r.t view1 coordinate
        X2 (3xN): 3D coordinates of the keypoints w.r.t view2 coordinate
    """
    kp1_3D = np.ones((3, kp1.shape[0]))
    kp2_3D = np.ones((3, kp2.shape[0]))
    kp1_3D[0], kp1_3D[1] = kp1[:, 0].copy(), kp1[:, 1].copy()
    kp2_3D[0], kp2_3D[1] = kp2[:, 0].copy(), kp2[:, 1].copy()
    X = cv2.triangulatePoints(T_1w[:3], T_2w[:3], kp1_3D[:2], kp2_3D[:2])
    X /= X[3]
    X1 = T_1w[:3] @ X
    X2 = T_2w[:3] @ X
    return X[:3], X1, X2 
Example #2
Source File: utils_geom.py    From pyslam with GNU General Public License v3.0 6 votes vote down vote up
def triangulate_normalized_points(pose_1w, pose_2w, kpn_1, kpn_2):
    # P1w = np.dot(K1,  M1w) # K1*[R1w, t1w]
    # P2w = np.dot(K2,  M2w) # K2*[R2w, t2w]
    # since we are working with normalized coordinates x_hat = Kinv*x, one has         
    P1w = pose_1w[:3,:] # [R1w, t1w]
    P2w = pose_2w[:3,:] # [R2w, t2w]

    point_4d_hom = cv2.triangulatePoints(P1w, P2w, kpn_1.T, kpn_2.T)
    good_pts_mask = np.where(point_4d_hom[3]!= 0)[0]
    point_4d = point_4d_hom / point_4d_hom[3] 
    
    if __debug__:
        if False: 
            point_reproj = P1w @ point_4d;
            point_reproj = point_reproj / point_reproj[2] - add_ones(kpn_1).T
            err = np.sum(point_reproj**2)
            print('reproj err: ', err)     

    #return point_4d.T
    points_3d = point_4d[:3, :].T
    return points_3d, good_pts_mask  


# compute the fundamental mat F12 and the infinite homography H21 [Hartley Zisserman pag 339] 
Example #3
Source File: components.py    From stereo_ptam with GNU General Public License v3.0 5 votes vote down vote up
def triangulate_points(self, kps_left, desps_left, kps_right, desps_right):
        matches = self.feature.row_match(
            kps_left, desps_left, kps_right, desps_right)
        assert len(matches) > 0

        px_left = np.array([kps_left[m.queryIdx].pt for m in matches])
        px_right = np.array([kps_right[m.trainIdx].pt for m in matches])

        points = cv2.triangulatePoints(
            self.left.projection_matrix, 
            self.right.projection_matrix, 
            px_left.transpose(), 
            px_right.transpose() 
            ).transpose()  # shape: (N, 4)

        points = points[:, :3] / points[:, 3:]

        can_view = np.logical_and(
            self.left.can_view(points), 
            self.right.can_view(points))

        mappoints = []
        matchs = []
        for i, point in enumerate(points):
            if not can_view[i]:
                continue
            normal = point - self.position
            normal = normal / np.linalg.norm(normal)

            color = self.left.get_color(px_left[i])

            mappoint = MapPoint(
                point, normal, desps_left[matches[i].queryIdx], color)
            mappoints.append(mappoint)
            matchs.append((matches[i].queryIdx, matches[i].trainIdx))

        return mappoints, matchs 
Example #4
Source File: test_triangulatepoints.py    From pyslam with GNU General Public License v3.0 5 votes vote down vote up
def triangulatePoints( P1, P2, x1, x2 ):
    X = cv2.triangulatePoints( P1[:3], P2[:3], x1[:2], x2[:2] )
    return X/X[3] # Remember to divide out the 4th row. Make it homogeneous 
Example #5
Source File: auxiliaryfunctions_3d.py    From DeepLabCut with GNU Lesser General Public License v3.0 5 votes vote down vote up
def compute_triangulation_calibration_images(
    stereo_matrix, projectedPoints1, projectedPoints2, path_undistort, cfg_3d, plot=True
):
    """
    Performs triangulation of the calibration images.
    """
    from mpl_toolkits.mplot3d import Axes3D

    triangulate = []
    P1 = stereo_matrix["P1"]
    P2 = stereo_matrix["P2"]
    cmap = cfg_3d["colormap"]
    colormap = plt.get_cmap(cmap)
    markerSize = cfg_3d["dotsize"]
    markerType = cfg_3d["markerType"]

    for i in range(projectedPoints1.shape[0]):
        X_l = triangulatePoints(P1, P2, projectedPoints1[i], projectedPoints2[i])
        triangulate.append(X_l)
    triangulate = np.asanyarray(triangulate)

    # Plotting
    if plot == True:
        col = colormap(np.linspace(0, 1, triangulate.shape[0]))
        fig = plt.figure()
        ax = fig.add_subplot(111, projection="3d")

        for i in range(triangulate.shape[0]):
            xs = triangulate[i, 0, :]
            ys = triangulate[i, 1, :]
            zs = triangulate[i, 2, :]
            ax.scatter(xs, ys, zs, c=col[i], marker=markerType, s=markerSize)
            ax.set_xlabel("X")
            ax.set_ylabel("Y")
            ax.set_zlabel("Z")
        plt.savefig(os.path.join(str(path_undistort), "checkerboard_3d.png"))
    return triangulate 
Example #6
Source File: auxiliaryfunctions_3d.py    From DeepLabCut with GNU Lesser General Public License v3.0 5 votes vote down vote up
def triangulatePoints(P1, P2, x1, x2):
    X = cv2.triangulatePoints(P1[:3], P2[:3], x1, x2)
    return X / X[3] 
Example #7
Source File: smart.py    From ImageAnalysis with MIT License 5 votes vote down vote up
def triangulate_features(i1, i2):
    # quick sanity checks
    if i1 == i2:
        return None
    if not i2.name in i1.match_list:
        return None
    if len(i1.match_list[i2.name]) == 0:
        return None

    if not i1.kp_list or not len(i1.kp_list):
        i1.load_features()
    if not i2.kp_list or not len(i2.kp_list):
        i2.load_features()

    # camera calibration
    K = camera.get_K()
    IK = np.linalg.inv(K)

    # get poses
    rvec1, tvec1 = i1.get_proj()
    rvec2, tvec2 = i2.get_proj()
    R1, jac = cv2.Rodrigues(rvec1)
    PROJ1 = np.concatenate((R1, tvec1), axis=1)
    R2, jac = cv2.Rodrigues(rvec2)
    PROJ2 = np.concatenate((R2, tvec2), axis=1)

    # setup data structures for cv2 call
    uv1 = []; uv2 = []; indices = []
    for pair in i1.match_list[i2.name]:
        p1 = i1.kp_list[ pair[0] ].pt
        p2 = i2.kp_list[ pair[1] ].pt
        uv1.append( [p1[0], p1[1], 1.0] )
        uv2.append( [p2[0], p2[1], 1.0] )
    pts1 = IK.dot(np.array(uv1).T)
    pts2 = IK.dot(np.array(uv2).T)
    points = cv2.triangulatePoints(PROJ1, PROJ2, pts1[:2], pts2[:2])
    points /= points[3]
    return points

# find (forward) affine transformation between feature pairs 
Example #8
Source File: image.py    From ImageAnalysis with MIT License 5 votes vote down vote up
def get_body2ned(self, opt=False):
        ned, ypr, quat = self.get_camera_pose(opt)
        return transformations.quaternion_matrix(np.array(quat))[:3,:3]

    # compute rvec and tvec (used to build the camera projection
    # matrix for things like cv2.triangulatePoints) from camera pose