Python cv2.warpPerspective() Examples

The following are 30 code examples of cv2.warpPerspective(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: augmentation.py    From face_landmark with Apache License 2.0 8 votes vote down vote up
def Perspective_aug(src,strength,label=None):
    image = src
    pts_base = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])
    pts1=np.random.rand(4, 2)*random.uniform(-strength,strength)+pts_base
    pts1=pts1.astype(np.float32)
    #pts1 =np.float32([[56, 65], [368, 52], [28, 387], [389, 398]])
    M = cv2.getPerspectiveTransform(pts1, pts_base)
    trans_img = cv2.warpPerspective(image, M, (src.shape[1], src.shape[0]))

    label_rotated=None
    if label is not  None:
        label=label.T
        full_label = np.row_stack((label, np.ones(shape=(1, label.shape[1]))))
        label_rotated = np.dot(M, full_label)
        label_rotated=label_rotated.astype(np.int32)
        label_rotated=label_rotated.T
    return trans_img,label_rotated 
Example #2
Source File: view_perspective.py    From Advanced_Lane_Lines with MIT License 6 votes vote down vote up
def test():
	pickle_file = open("trans_pickle.p", "rb")
	trans_pickle = pickle.load(pickle_file)
	M = trans_pickle["M"]  
	Minv = trans_pickle["Minv"]

	img_size = (1280, 720)

	image_files = glob.glob("../output_images/undistort/*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = "../output_images/perspect_trans/"+file_name
		print(out_image)
		# convert to opencv BGR format
		warped = cv2.cvtColor(warped, cv2.COLOR_RGB2BGR)
		cv2.imwrite(out_image, warped) 
Example #3
Source File: cBuckley.py    From facial_expressions with Apache License 2.0 6 votes vote down vote up
def per_trans(img,cols,rows,img_path_mod):
    #3 rotational transformations
    #transformation 1
    pts7 = np.float32([[2,3],[93,4],[5,90],[92,91]])
    pts8 = np.float32([[0,0],[96,0],[0,96],[96,96]])
    M8 = cv2.getPerspectiveTransform(pts7,pts8)
    dst8 = cv2.warpPerspective(img,M8,(96,96))
    cv2.imwrite(img_path_mod + '_pt1.jpg',dst8)
    #transformation 2
    pts9 = np.float32([[6,7],[89,8],[9,87],[85,88]])
    pts10 = np.float32([[0,0],[96,0],[0,96],[96,96]])
    M9 = cv2.getPerspectiveTransform(pts9,pts10)
    dst9 = cv2.warpPerspective(img,M9,(96,96))
    cv2.imwrite(img_path_mod + '_pt2.jpg',dst9)
    #transformation 3
    pts11 = np.float32([[10,11],[93,12],[13,82],[83,84]])
    pts12 = np.float32([[0,0],[96,0],[0,96],[96,96]])
    M10 = cv2.getPerspectiveTransform(pts11,pts12)
    dst10 = cv2.warpPerspective(img,M10,(96,96))
    cv2.imwrite(img_path_mod + '_pt3.jpg',dst10) 
Example #4
Source File: main.py    From specularity-removal with GNU General Public License v3.0 6 votes vote down vote up
def _solve(img1, img2):
    h, w, d = img1.shape

    # step 1: Find homography of 2 images
    homo = homography(img2, img1)

    # step 2: warp image2 to image1 frame
    img2_w = cv.warpPerspective(img2, homo, (w, h))

    # step 3: resolve highlights by picking the best pixels out of two images
    im1 = _resolve_spec(img1, img2_w)

    # step 4: repeat the same process for Image2 using warped Image1
    im_w = cv.warpPerspective(im1, np.linalg.inv(homo), (w, h))
    im2 = _resolve_spec(img2, im_w)

    return im1, im2 
Example #5
Source File: SudokuExtractor.py    From SolveSudoku with MIT License 6 votes vote down vote up
def crop_and_warp(img, crop_rect):
	"""Crops and warps a rectangular section from an image into a square of similar size."""

	# Rectangle described by top left, top right, bottom right and bottom left points
	top_left, top_right, bottom_right, bottom_left = crop_rect[0], crop_rect[1], crop_rect[2], crop_rect[3]

	# Explicitly set the data type to float32 or `getPerspectiveTransform` will throw an error
	src = np.array([top_left, top_right, bottom_right, bottom_left], dtype='float32')

	# Get the longest side in the rectangle
	side = max([
		distance_between(bottom_right, top_right),
		distance_between(top_left, bottom_left),
		distance_between(bottom_right, bottom_left),
		distance_between(top_left, top_right)
	])

	# Describe a square with side of the calculated length, this is the new perspective we want to warp to
	dst = np.array([[0, 0], [side - 1, 0], [side - 1, side - 1], [0, side - 1]], dtype='float32')

	# Gets the transformation matrix for skewing the image to fit a square by comparing the 4 before and after points
	m = cv2.getPerspectiveTransform(src, dst)

	# Performs the transformation on the original image
	return cv2.warpPerspective(img, m, (int(side), int(side))) 
Example #6
Source File: handdetector.py    From deep-prior-pp with GNU General Public License v3.0 6 votes vote down vote up
def recropHand(self, crop, M, Mnew, target_size, background_value=0., nv_val=0., thresh_z=True, com=None,
                   size=(250, 250, 250)):

        if self.resizeMethod == self.RESIZE_CV2_NN:
            flags = cv2.INTER_NEAREST
        elif self.resizeMethod == self.RESIZE_CV2_LINEAR:
            flags = cv2.INTER_LINEAR
        else:
            raise NotImplementedError
        warped = cv2.warpPerspective(crop, numpy.dot(M, Mnew), target_size, flags=flags,
                                     borderMode=cv2.BORDER_CONSTANT, borderValue=float(background_value))
        warped[numpy.isclose(warped, nv_val)] = background_value

        if thresh_z is True:
            assert com is not None
            _, _, _, _, zstart, zend = self.comToBounds(com, size)
            msk1 = numpy.logical_and(warped < zstart, warped != 0)
            msk2 = numpy.logical_and(warped > zend, warped != 0)
            warped[msk1] = zstart
            warped[msk2] = 0.  # backface is at 0, it is set later

        return warped 
Example #7
Source File: reader.py    From Baidu_Lane_Segmentation with MIT License 6 votes vote down vote up
def get_img(self):
        while True:
            img_name = self.image_files[self.index]
            label_name = img_name.replace('.jpg', '.png')
            img = cv2.imread(img_name)
            if img is None:
                print("load img failed:", img_name)
                self.next_img()
            else:
                break
        if self.birdeye == True:
            warped_img = cv2.warpPerspective(img, self.M, (4000, 4000),flags=cv2.INTER_CUBIC)
            img   = cv2.resize(warped_img, (self.cols, self.rows), interpolation=cv2.INTER_CUBIC)
        else:
            img   = cv2.resize(img, (self.cols, self.rows), interpolation=cv2.INTER_CUBIC)
        img   = img.transpose((2,0,1))
        return img, label_name 
Example #8
Source File: CopyTexture.py    From PlaneNet with MIT License 6 votes vote down vote up
def copyTextureTest(options):
    testdir = 'texture_test/'
    for index in xrange(1):
        planes = np.load(testdir + '/planes_' + str(index) + '.npy')
        image = cv2.imread(testdir + '/image_' + str(index) + '.png')
        segmentations = np.load(testdir + '/segmentations_' + str(index) + '.npy')
        segmentation = np.argmax(segmentations, axis=2)
        plane_depths = calcPlaneDepths(planes, WIDTH, HEIGHT)
        
        textureImage = cv2.imread('../textures/texture_0.jpg')
        textureImage = cv2.resize(textureImage, (WIDTH, HEIGHT), interpolation=cv2.INTER_LINEAR)
        floorPlaneIndex = findFloorPlane(planes, segmentation)
        if floorPlaneIndex == -1:
            continue
        mask = segmentation == floorPlaneIndex
        uv = findCornerPoints(planes[floorPlaneIndex], plane_depths[:, :, floorPlaneIndex], mask)
        source_uv = np.array([[0, 0], [0, HEIGHT], [WIDTH, 0], [WIDTH, HEIGHT]])

        h, status = cv2.findHomography(source_uv, uv)
        textureImageWarped = cv2.warpPerspective(textureImage, h, (WIDTH, HEIGHT))
        image[mask] = textureImageWarped[mask]
        cv2.imwrite(testdir + '/' + str(index) + '_texture.png', textureImageWarped)
        cv2.imwrite(testdir + '/' + str(index) + '_result.png', image)
        continue
    return 
Example #9
Source File: align.py    From EasyPR-python with Apache License 2.0 6 votes vote down vote up
def align(image, points):
    """
    :param image:
    :param points:
    :return: aligned image
    """
    # alignment
    origin_point = np.require(np.array(points).reshape((4, 2)), dtype=np.single)
    height = int(max(np.linalg.norm(origin_point[0] - origin_point[1]), np.linalg.norm(origin_point[2] - origin_point[3])))
    width = int(max(np.linalg.norm(origin_point[0] - origin_point[3]), np.linalg.norm(origin_point[1] - origin_point[2])))

    target_point = np.float32([[0, 0], [0, height], [width, height], [width, 0]])
    map_matrix = cv2.getPerspectiveTransform(origin_point, target_point)
    cols = width + 1
    rows = height + 1
    color = cv2.warpPerspective(image, map_matrix, (cols, rows))
    return color 
Example #10
Source File: unet_transforms.py    From pytorch-saltnet with MIT License 6 votes vote down vote up
def do_horizontal_shear(image, mask, scale=0):
    height, width = image.shape[:2]
    dx = int(scale * width)

    box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ], np.float32)
    box1 = np.array([[+dx, 0], [width + dx, 0], [width - dx, height], [-dx, height], ], np.float32)

    box0 = box0.astype(np.float32)
    box1 = box1.astype(np.float32)
    mat = cv2.getPerspectiveTransform(box0, box1)

    image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR,
                                borderMode=cv2.BORDER_REFLECT_101, borderValue=(0, 0, 0,))
    mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_NEAREST,
                               borderMode=cv2.BORDER_REFLECT_101, borderValue=(0, 0, 0,))
    # mask = (mask > 0.5).astype(np.float32)
    return image, mask 
Example #11
Source File: .demo.py    From dual-fisheye-video-stitching with MIT License 6 votes vote down vote up
def pivot_stitch(img, wd):
    # Stitch the area in between
    D = stitch(img[:, 1280 - wd:1280], img[:, 1280:1280 + wd], sigma=15.0)

    # Warp backwards
    pt1 = np.dot(D['H'], [wd, 400, 1])
    pt3 = np.dot(D['H'], [wd, 800, 1])
    pt1 = pt1 / pt1[2]
    pt3 = pt3 / pt3[2]
    src = np.zeros((4, 2), np.float32)
    dst = np.zeros((4, 2), np.float32)
    src[0] = [0, 0]
    src[1] = pt1[:2]
    src[2] = [0, 1280]
    src[3] = pt3[:2]
    dst = np.array(src)
    dst[1] = [2 * wd - 1, 400]
    dst[3] = [2 * wd - 1, 800]

    result = np.copy(img)
    M = cv2.getPerspectiveTransform(src, dst)
    result[:, 1280 - wd:1280 +
           wd] = cv2.warpPerspective(D['res'], M, (2 * wd, 1280))
    result[:, 1280 - wd:1280 + wd] = D['res']
    return result 
Example #12
Source File: image_process.py    From Advanced_Lane_Lines with MIT License 6 votes vote down vote up
def draw_lane_fit(undist, warped ,Minv, left_fitx, right_fitx, ploty):
	# Drawing
	# Create an image to draw the lines on
	warp_zero = np.zeros_like(warped).astype(np.uint8)
	color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

	# Recast the x and y points into usable format for cv2.fillPoly()
	pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
	pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
	pts = np.hstack((pts_left, pts_right))

	# Draw the lane onto the warped blank image
	cv2.fillPoly(color_warp, np.int_([pts]), (0,255,0))

	# Warp the blank back to original image space using inverse perspective matrix(Minv)
	newwarp = cv2.warpPerspective(color_warp, Minv, (undist.shape[1], undist.shape[0]))
	# Combine the result with the original image
	result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)

	return result 
Example #13
Source File: helpers.py    From Advanced_Lane_Lines with MIT License 6 votes vote down vote up
def wrap_images(src, dst):
	"""
	apply the wrap to images
	"""
	# load M, Minv
	img_size = (1280, 720)
	pickle_file = open("../helper/trans_pickle.p", "rb")
	trans_pickle = pickle.load(pickle_file)
	M = trans_pickle["M"]
	Minv = trans_pickle["Minv"]
	# loop the file folder
	image_files = glob.glob(src+"*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		image_wraped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = dst+file_name
		print(out_image)
		# no need to covert RGB to BGR since 3 channel is same
		image_wraped = cv2.cvtColor(image_wraped, cv2.COLOR_RGB2BGR)
		cv2.imwrite(out_image, image_wraped) 
Example #14
Source File: head_pose_normalizer.py    From pytorch_mpiigaze with MIT License 6 votes vote down vote up
def _normalize_image(self, image: np.ndarray,
                         eye_or_face: FaceParts) -> None:
        camera_matrix_inv = np.linalg.inv(self.camera.camera_matrix)
        normalized_camera_matrix = self.normalized_camera.camera_matrix

        scale = self._get_scale_matrix(eye_or_face.distance)
        conversion_matrix = scale @ eye_or_face.normalizing_rot.as_matrix()

        projection_matrix = normalized_camera_matrix @ conversion_matrix @ camera_matrix_inv

        normalized_image = cv2.warpPerspective(
            image, projection_matrix,
            (self.normalized_camera.width, self.normalized_camera.height))

        if eye_or_face.name in {FacePartsName.REYE, FacePartsName.LEYE}:
            normalized_image = cv2.cvtColor(normalized_image,
                                            cv2.COLOR_BGR2GRAY)
            normalized_image = cv2.equalizeHist(normalized_image)
        eye_or_face.normalized_image = normalized_image 
Example #15
Source File: unet_transforms.py    From pytorch-saltnet with MIT License 6 votes vote down vote up
def do_rotation_transform(image, mask, angle=0):
    height, width = image.shape[:2]
    cc = np.cos(angle / 180 * np.pi)
    ss = np.sin(angle / 180 * np.pi)
    rotate_matrix = np.array([[cc, -ss], [ss, cc]])

    box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ], np.float32)
    box1 = box0 - np.array([width / 2, height / 2])
    box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2, height / 2])

    box0 = box0.astype(np.float32)
    box1 = box1.astype(np.float32)
    mat = cv2.getPerspectiveTransform(box0, box1)

    image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR,
                                borderMode=cv2.BORDER_REFLECT_101,
                                borderValue=(0, 0, 0,))
    mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_NEAREST,
                               borderMode=cv2.BORDER_REFLECT_101,
                               borderValue=(0, 0, 0,))
    # mask = (mask > 0.5).astype(np.float32)
    return image, mask 
Example #16
Source File: cpm_utils.py    From convolutional-pose-machines-tensorflow with Apache License 2.0 6 votes vote down vote up
def warpImage(src, theta, phi, gamma, scale, fovy):
    halfFovy = fovy * 0.5
    d = math.hypot(src.shape[1], src.shape[0])
    sideLength = scale * d / math.cos(deg2Rad(halfFovy))
    sideLength = np.int32(sideLength)

    M = warpMatrix(src.shape[1], src.shape[0], theta, phi, gamma, scale, fovy)
    dst = cv2.warpPerspective(src, M, (sideLength, sideLength))
    mid_x = mid_y = dst.shape[0] // 2
    target_x = target_y = src.shape[0] // 2
    offset = (target_x % 2)

    if len(dst.shape) == 3:
        dst = dst[mid_y - target_y:mid_y + target_y + offset,
              mid_x - target_x:mid_x + target_x + offset,
              :]
    else:
        dst = dst[mid_y - target_y:mid_y + target_y + offset,
              mid_x - target_x:mid_x + target_x + offset]

    return dst 
Example #17
Source File: correspondence_database.py    From GIFT with Apache License 2.0 5 votes vote down vote up
def scale_transform_img(img, min_ratio=0.15, max_ratio=0.25, base_ratio=2, flip=True):
    h, w = img.shape[0], img.shape[1]
    pts0 = np.asarray([[0, 0], [w, 0], [w, h], [0, h]], np.float32)

    scale_ratio = base_ratio ** (-np.random.uniform(min_ratio, max_ratio))
    if np.random.random() < 0.5 and flip:
        scale_ratio = 1.0 / scale_ratio
    center = np.mean(pts0, 0, keepdims=True)
    pts1 = (pts0 - center) * scale_ratio + center
    if scale_ratio > 1:
        min_pt = np.min(pts1, 0)  # <0
        max_pt = np.max(pts1, 0)  # >w,h
        min_w, min_h = -(max_pt - np.asarray([w, h]))
        max_w, max_h = -min_pt
    else:
        min_pt = np.min(pts1, 0)  # >0
        max_pt = np.max(pts1, 0)  # <w,h
        min_w, min_h = -min_pt
        max_w, max_h = np.asarray([w, h]) - max_pt

    offset_h = np.random.uniform(min_h, max_h)
    offset_w = np.random.uniform(min_w, max_w)
    pts1 += np.asarray([[offset_w, offset_h]], np.float32)

    th, tw = h, w  # int(h * scale_ratio), int(w * scale_ratio)
    H = cv2.getPerspectiveTransform(pts0.astype(np.float32), pts1.astype(np.float32))

    img1 = cv2.warpPerspective(img, H, (tw, th), flags=cv2.INTER_LINEAR)
    return img1, H 
Example #18
Source File: warp.py    From ProxImaL with MIT License 5 votes vote down vote up
def adjoint(self, inputs, outputs):
        """The adjoint operator.

        Reads from inputs and writes to outputs.
        """

        if self.implementation == Impl['halide']:

            # Halide implementation
            if len(self.H.shape) == 2:
                tmpin = np.asfortranarray(inputs[0][..., np.newaxis].astype(np.float32))
            else:
                tmpin = np.asfortranarray(inputs[0].astype(np.float32))

            Halide('At_warp.cpp').At_warp(tmpin, self.Hf, self.tmpadj)  # Call
            np.copyto(outputs[0], self.tmpadj)

        else:

            # CV2 version
            inimg = inputs[0]
            if len(self.H.shape) == 2:
                # + cv2.WARP_INVERSE_MAP
                warpedInput = cv2.warpPerspective(np.asfortranarray(inimg), self.Hinv.T,
                                                  inimg.shape[1::-1], flags=cv2.INTER_LINEAR,
                                                  borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
                np.copyto(outputs[0], warpedInput)

            else:
                outputs[0][:] = 0.0
                for j in range(self.H.shape[2]):
                    warpedInput = cv2.warpPerspective(np.asfortranarray(inimg[:, :, :, j]),
                                                      self.Hinv[:, :, j].T, inimg.shape[1::-1],
                                                      flags=cv2.INTER_LINEAR,
                                                      borderMode=cv2.BORDER_CONSTANT,
                                                      borderValue=0.)
                    # Necessary due to array layout in opencv
                    outputs[0] += warpedInput

    # TODO what is the spectral norm of a warp? 
Example #19
Source File: correspondence_database.py    From GIFT with Apache License 2.0 5 votes vote down vote up
def perspective_transform_img(img, perspective_type='lr', min_ratio=0.05, max_ratio=0.1):
    h, w = img.shape[0], img.shape[1]
    pts0 = np.asarray([[0, 0], [w, 0], [w, h], [0, h]], np.float32)
    pts1 = np.asarray([[0, 0], [w, 0], [w, h], [0, h]], np.float32)

    # left right
    if perspective_type == 'lr':
        val = h * np.random.uniform(min_ratio, max_ratio)
        if np.random.random() < 0.5: val *= -1
        pts1[0, 1] -= val
        pts1[1, 1] += val
        pts1[2, 1] -= val
        pts1[3, 1] += val

        val = h * np.random.uniform(min_ratio, max_ratio)
        pts1[0, 0] += val
        pts1[1, 0] -= val
        pts1[2, 0] -= val
        pts1[3, 0] += val
    else:  # 'ud'
        val = w * np.random.uniform(min_ratio, max_ratio)
        if np.random.random() < 0.5: val *= -1
        pts1[0, 0] += val
        pts1[1, 0] -= val
        pts1[2, 0] += val
        pts1[3, 0] -= val

        val = h * np.random.uniform(min_ratio, max_ratio)
        pts1[0, 1] += val
        pts1[1, 1] += val
        pts1[2, 1] -= val
        pts1[3, 1] -= val

    pts1 = pts1 - np.min(pts1, 0, keepdims=True)
    tw, th = np.max(pts1, 0)
    H = cv2.getPerspectiveTransform(pts0.astype(np.float32), pts1.astype(np.float32))
    img1 = cv2.warpPerspective(img, H, (tw, th), flags=cv2.INTER_LINEAR)
    return img1, H 
Example #20
Source File: correspondence_database.py    From GIFT with Apache License 2.0 5 votes vote down vote up
def rotate_transform_img(img, min_angle=0, max_angle=360, random_flip=False):
    h, w = img.shape[0], img.shape[1]

    pts0 = np.asarray([[0, 0], [w, 0], [w, h], [0, h]], np.float32)
    center = np.mean(pts0, 0, keepdims=True)
    theta = np.random.uniform(min_angle / 180 * np.pi, max_angle / 180 * np.pi)
    if random_flip and np.random.random() < 0.5: theta = -theta
    R = np.asarray([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]], np.float32)
    pts1 = (pts0 - center) @ R + center
    H = cv2.getPerspectiveTransform(pts0.astype(np.float32), pts1.astype(np.float32))

    img1 = cv2.warpPerspective(img, H, (w, h))
    return img1, H 
Example #21
Source File: correspondence_database.py    From GIFT with Apache License 2.0 5 votes vote down vote up
def add_homography_background(img, H):
        img_dir = os.path.join('data', 'SUN2012Images', 'JPEGImages')
        background_pths = [os.path.join(img_dir, fn) for fn in os.listdir(img_dir)]
        bpth = background_pths[np.random.randint(0, len(background_pths))]

        h, w, _ = img.shape
        bimg = cv2.resize(imread(bpth), (w, h))
        if len(bimg.shape) == 2: bimg = np.repeat(bimg[:, :, None], 3, axis=2)
        if bimg.shape[2] > 3: bimg = bimg[:, :, :3]
        msk_tgt = cv2.warpPerspective(np.ones([h, w], np.uint8), H, (w, h), flags=cv2.INTER_NEAREST).astype(np.bool)
        img[np.logical_not(msk_tgt)] = bimg[np.logical_not(msk_tgt)]
        return img 
Example #22
Source File: transforms.py    From kaggle_carvana_segmentation with MIT License 5 votes vote down vote up
def __call__(self, img, mask=None):
        if random.random() < self.prob:
            height, width, channel = img.shape

            angle = random.uniform(-self.rotate_limit, self.rotate_limit)
            scale = random.uniform(1-self.scale_limit, 1+self.scale_limit)
            dx = round(random.uniform(-self.shift_limit, self.shift_limit)) * width
            dy = round(random.uniform(-self.shift_limit, self.shift_limit)) * height

            cc = math.cos(angle/180*math.pi) * scale
            ss = math.sin(angle/180*math.pi) * scale
            rotate_matrix = np.array([[cc, -ss], [ss, cc]])

            box0 = np.array([[0, 0], [width, 0],  [width, height], [0, height], ])
            box1 = box0 - np.array([width/2, height/2])
            box1 = np.dot(box1, rotate_matrix.T) + np.array([width/2+dx, height/2+dy])

            box0 = box0.astype(np.float32)
            box1 = box1.astype(np.float32)
            mat = cv2.getPerspectiveTransform(box0, box1)
            img = cv2.warpPerspective(img, mat, (width, height),
                                      flags=cv2.INTER_LINEAR,
                                      borderMode=cv2.BORDER_REFLECT_101)
            if mask is not None:
                mask = cv2.warpPerspective(mask, mat, (width, height),
                                           flags=cv2.INTER_LINEAR,
                                           borderMode=cv2.BORDER_REFLECT_101)

        return img, mask 
Example #23
Source File: test_lin_ops.py    From ProxImaL with MIT License 5 votes vote down vote up
def test_warp_halide(self):
        """Test warp lin op in halide.
        """
        if halide_installed():
            # Load image
            testimg_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                            'data', 'angela.jpg')
            img = Image.open(testimg_filename)
            np_img = np.asfortranarray(im2nparray(img))

            # Convert to gray
            np_img = np.mean(np_img, axis=2)

            # Generate problem
            theta_rad = 5.0 * np.pi / 180.0
            H = np.array([[np.cos(theta_rad), -np.sin(theta_rad), 0.0001],
                          [np.sin(theta_rad), np.cos(theta_rad), 0.0003],
                          [0., 0., 1.]], dtype=np.float32, order='F')

            # Reference
            output_ref = cv2.warpPerspective(np_img, H.T, np_img.shape[1::-1], flags=cv2.INTER_LINEAR,
                                             borderMode=cv2.BORDER_CONSTANT, borderValue=0.)

            # Halide
            output = np.zeros_like(np_img)
            Hc = np.asfortranarray(np.linalg.pinv(H)[..., np.newaxis])  # Third axis for halide
            Halide('A_warp.cpp').A_warp(np_img, Hc, output)  # Call

            # Transpose
            output_trans = np.zeros_like(np_img)
            Hinvc = np.asfortranarray(H[..., np.newaxis])  # Third axis for halide
            Halide('At_warp.cpp').At_warp(output, Hinvc, output_trans)  # Call

            # Compute reference
            output_ref_trans = cv2.warpPerspective(output_ref, H.T, np_img.shape[1::-1],
                                                   flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP,
                                                   borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
            # Opencv does inverse warp
            self.assertItemsAlmostEqual(output, output_ref, places=1)
            # Opencv does inverse warp
            self.assertItemsAlmostEqual(output_trans, output_ref_trans, places=1) 
Example #24
Source File: main.py    From FaceSwap with MIT License 5 votes vote down vote up
def warp_image(img, tM, shape):
    out = np.zeros(shape, dtype=img.dtype)
    # cv2.warpAffine(img,
    #                tM[:2],
    #                (shape[1], shape[0]),
    #                dst=out,
    #                borderMode=cv2.BORDER_TRANSPARENT,
    #                flags=cv2.WARP_INVERSE_MAP)
    cv2.warpPerspective(img, tM, (shape[1], shape[0]), dst=out,
                        borderMode=cv2.BORDER_TRANSPARENT,
                        flags=cv2.WARP_INVERSE_MAP)
    return out

# TODO: Modify this method to get a better face contour mask 
Example #25
Source File: helpers.py    From songoku with MIT License 5 votes vote down vote up
def perspective_transform(img, transformation_matrix, original_shape=None):
    warped = img

    if original_shape is not None:
        if original_shape[0]>0 and original_shape[1]>0:
            warped = cv2.resize(warped, (original_shape[1], original_shape[0]), interpolation=cv2.INTER_CUBIC)

    white_image = np.zeros((640, 480, 3), np.uint8)

    white_image[:,:,:] = 255

    # warped = cv2.warpPerspective(warped, transformation_matrix, (640, 480), borderMode=cv2.BORDER_TRANSPARENT)
    warped = cv2.warpPerspective(warped, transformation_matrix, (640, 480))

    return warped 
Example #26
Source File: calibration_utils.py    From depthai with MIT License 5 votes vote down vote up
def rectify_dataset(self, dataset_dir, calibration_file):
        images_left = glob.glob(dataset_dir + '/left/*.png')
        print(images_left)
        images_right = glob.glob(dataset_dir + '/right/*.png')
        left_result_dir = os.path.join(dataset_dir + "Rectified", "left")
        right_result_dir = os.path.join(dataset_dir + "Rectified", "right")
        images_left.sort()
        images_right.sort()

        assert len(images_left) != 0, "ERROR: Images not read correctly"
        assert len(images_right) != 0, "ERROR: Images not read correctly"

        mkdir_overwrite(left_result_dir)
        mkdir_overwrite(right_result_dir)

        H = np.fromfile(calibration_file, dtype=np.float32).reshape((3, 3))

        print("Using Homography from file, with values: ")
        print(H)

        H = np.linalg.inv(H)
        for image_left, image_right in zip(images_left, images_right):
            # read images
            img_l = cv2.imread(image_left, 0)
            img_r = cv2.imread(image_right, 0)

            # warp right image
            img_r = cv2.warpPerspective(img_r, H, img_r.shape[::-1],
                                        cv2.INTER_LINEAR +
                                        cv2.WARP_FILL_OUTLIERS +
                                        cv2.WARP_INVERSE_MAP)

            # save images
            cv2.imwrite(os.path.join(left_result_dir,
                                     os.path.basename(image_left)), img_l)
            cv2.imwrite(os.path.join(right_result_dir,
                                     os.path.basename(image_right)), img_r) 
Example #27
Source File: panorama.py    From Panoramic-Image-Stitching-using-invariant-features with MIT License 5 votes vote down vote up
def getwarp_perspective(self,imageA,imageB,Homography):
        val = imageA.shape[1] + imageB.shape[1]
        result_image = cv2.warpPerspective(imageA, Homography, (val , imageA.shape[0]))

        return result_image 
Example #28
Source File: markers_detection.py    From niryo_one_ros with GNU General Public License v3.0 5 votes vote down vote up
def extract_sub_img(img, list_corners, ratio_w_h=1.0):
    """
    Extract an small image from a big one using a Perspective Warp
    :param img: Big image from which the small one will be extracted
    :param list_corners: corners list of the small image
    :param ratio_w_h: Width over Height ratio of the area. It helps to not stretch the working area image
    :return: extracted and warped image
    """
    if list_corners is None or len(list_corners) != 4:
        return None

    if ratio_w_h >= 1.0:
        target_w_area = int(round(ratio_w_h * 200))
        target_h_area = 200
    else:
        ratio_w_h = 1.0 / ratio_w_h
        target_h_area = int(round(ratio_w_h * 200))
        target_w_area = 200

    points_grid = []

    for marker in list_corners:
        points_grid.append(marker.get_center())
    points_grid = np.array(points_grid, dtype=np.float32)
    final_pts = np.array(
        [[0, 0], [target_w_area - 1, 0],
         [target_w_area - 1, target_h_area - 1], [0, target_h_area - 1]],
        dtype=np.float32)
    transfo_matrix = cv2.getPerspectiveTransform(points_grid, final_pts)
    # print transfo_matrix
    # print np.linalg.det(transfo_matrix)
    area_im = cv2.warpPerspective(img, transfo_matrix, (target_w_area, target_h_area))
    return area_im 
Example #29
Source File: warp.py    From IkaLog with Apache License 2.0 5 votes vote down vote up
def execute(self, frame):
        if not (self.enabled and self.pre_execute(frame)):
            return frame

        return cv2.warpPerspective(frame, self.M, (1280, 720)) 
Example #30
Source File: augment.py    From imips_open with GNU General Public License v3.0 5 votes vote down vote up
def testHpatchPair(pair):
    plt.figure()
    plt.imshow(pair.im[1])
    plt.figure()
    plt.imshow(cv2.warpPerspective(
        pair.im[0], pair.H, dsize=tuple(
            [pair.im[1].shape[1], pair.im[1].shape[0]])))