Python cv2.remap() Examples
The following are 30
code examples of cv2.remap().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
cv2
, or try the search function
.
Example #1
Source File: data_augment.py From ssds.pytorch with MIT License | 7 votes |
def _elastic(image, p, alpha=None, sigma=None, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 From: https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation """ if random.random() > p: return image if alpha == None: alpha = image.shape[0] * random.uniform(0.5,2) if sigma == None: sigma = int(image.shape[0] * random.uniform(0.5,1)) if random_state is None: random_state = np.random.RandomState(None) shape = image.shape[:2] dx, dy = [cv2.GaussianBlur((random_state.rand(*shape) * 2 - 1) * alpha, (sigma|1, sigma|1), 0) for _ in range(2)] x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0])) x, y = np.clip(x+dx, 0, shape[1]-1).astype(np.float32), np.clip(y+dy, 0, shape[0]-1).astype(np.float32) return cv2.remap(image, x, y, interpolation=cv2.INTER_LINEAR, borderValue= 0, borderMode=cv2.BORDER_REFLECT)
Example #2
Source File: test-mvs.py From dfc2019 with MIT License | 6 votes |
def rectify_images_float(img1, x1, img2, x2, K, d, F, shearing=False): imsize = (img1.shape[1], img1.shape[0]) H1, H2, rms, max_error = epipolar.rectify_uncalibrated(x1, x2, F, imsize) if shearing: S = epipolar.rectify_shearing(H1, H2, imsize) H1 = S.dot(H1) rH = la.inv(K).dot(H1).dot(K) lH = la.inv(K).dot(H2).dot(K) map1x, map1y = cv2.initUndistortRectifyMap(K, d, rH, K, imsize, cv.CV_16SC2) map2x, map2y = cv2.initUndistortRectifyMap(K, d, lH, K, imsize, cv.CV_16SC2) rimg1 = cv2.remap(img1, map1x, map1y, interpolation=cv.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0, 0)) rimg2 = cv2.remap(img2, map2x, map2y, interpolation=cv.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0, 0)) return rimg1, rimg2 # get NITF metadata that we embedded in the GeoTIFF header
Example #3
Source File: lucidDream.py From pyLucid with MIT License | 6 votes |
def spline_transform_multi(img, mask): bimask=mask>0 M,N=np.where(bimask) w=np.ptp(N)+1 h=np.ptp(M)+1 kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)) bound=cv2.dilate(bimask.astype('uint8'),kernel)-bimask y,x=np.where(bound>0) if x.size>4: newxy=thin_plate_transform(x,y,w,h,mask.shape[:2],num_points=5) new_img=cv2.remap(img,newxy,None,cv2.INTER_LINEAR) new_msk=cv2.remap(mask,newxy,None,cv2.INTER_NEAREST) elif x.size>0: new_img=img new_msk=mask return new_img,new_msk
Example #4
Source File: functional.py From albumentations with MIT License | 6 votes |
def optical_distortion( img, k=0, dx=0, dy=0, interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_REFLECT_101, value=None ): """Barrel / pincushion distortion. Unconventional augment. Reference: | https://stackoverflow.com/questions/6199636/formulas-for-barrel-pincushion-distortion | https://stackoverflow.com/questions/10364201/image-transformation-in-opencv | https://stackoverflow.com/questions/2477774/correcting-fisheye-distortion-programmatically | http://www.coldvision.io/2017/03/02/advanced-lane-finding-using-opencv/ """ height, width = img.shape[:2] fx = width fy = height cx = width * 0.5 + dx cy = height * 0.5 + dy camera_matrix = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]], dtype=np.float32) distortion = np.array([k, k, 0, 0, 0], dtype=np.float32) map1, map2 = cv2.initUndistortRectifyMap(camera_matrix, distortion, None, None, (width, height), cv2.CV_32FC1) img = cv2.remap(img, map1, map2, interpolation=interpolation, borderMode=border_mode, borderValue=value) return img
Example #5
Source File: nclt.py From hierarchical_loc with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, fin, scale=1.0, fmask=None): self.fin = fin # read in distort with open(fin, 'r') as f: header = f.readline().rstrip() chunks = re.sub(r'[^0-9,]', '', header).split(',') self.mapu = np.zeros((int(chunks[1]), int(chunks[0])), dtype=np.float32) self.mapv = np.zeros((int(chunks[1]), int(chunks[0])), dtype=np.float32) for line in f.readlines(): chunks = line.rstrip().split(' ') self.mapu[int(chunks[0]), int(chunks[1])] = float(chunks[3]) self.mapv[int(chunks[0]), int(chunks[1])] = float(chunks[2]) # generate a mask self.mask = np.ones(self.mapu.shape, dtype=np.uint8) self.mask = cv2.remap(self.mask, self.mapu, self.mapv, cv2.INTER_LINEAR) kernel = np.ones((30, 30), np.uint8) self.mask = cv2.erode(self.mask, kernel, iterations=1) # crop black regions out h, w = self.mask.shape self.x_lim = [f(np.where(self.mask[int(h/2), :])[0]) for f in [np.min, np.max]] self.y_lim = [f(np.where(self.mask[:, int(w/2)])[0]) for f in [np.min, np.max]]
Example #6
Source File: nclt.py From hfnet with MIT License | 6 votes |
def __init__(self, fin, scale=1.0, fmask=None): self.fin = fin # read in distort with open(fin, 'r') as f: header = f.readline().rstrip() chunks = re.sub(r'[^0-9,]', '', header).split(',') self.mapu = np.zeros((int(chunks[1]), int(chunks[0])), dtype=np.float32) self.mapv = np.zeros((int(chunks[1]), int(chunks[0])), dtype=np.float32) for line in f.readlines(): chunks = line.rstrip().split(' ') self.mapu[int(chunks[0]), int(chunks[1])] = float(chunks[3]) self.mapv[int(chunks[0]), int(chunks[1])] = float(chunks[2]) # generate a mask self.mask = np.ones(self.mapu.shape, dtype=np.uint8) self.mask = cv2.remap(self.mask, self.mapu, self.mapv, cv2.INTER_LINEAR) kernel = np.ones((30, 30), np.uint8) self.mask = cv2.erode(self.mask, kernel, iterations=1) # crop black regions out h, w = self.mask.shape self.x_lim = [f(np.where(self.mask[int(h/2), :])[0]) for f in [np.min, np.max]] self.y_lim = [f(np.where(self.mask[:, int(w/2)])[0]) for f in [np.min, np.max]]
Example #7
Source File: image_augmentation.py From youtube-video-face-swap with MIT License | 6 votes |
def random_warp( image ): assert image.shape == (256,256,3) range_ = numpy.linspace( 128-80, 128+80, 5 ) mapx = numpy.broadcast_to( range_, (5,5) ) mapy = mapx.T mapx = mapx + numpy.random.normal( size=(5,5), scale=5 ) mapy = mapy + numpy.random.normal( size=(5,5), scale=5 ) interp_mapx = cv2.resize( mapx, (80,80) )[8:72,8:72].astype('float32') interp_mapy = cv2.resize( mapy, (80,80) )[8:72,8:72].astype('float32') warped_image = cv2.remap( image, interp_mapx, interp_mapy, cv2.INTER_LINEAR ) src_points = numpy.stack( [ mapx.ravel(), mapy.ravel() ], axis=-1 ) dst_points = numpy.mgrid[0:65:16,0:65:16].T.reshape(-1,2) mat = umeyama( src_points, dst_points, True )[0:2] target_image = cv2.warpAffine( image, mat, (64,64) ) return warped_image, target_image
Example #8
Source File: warp.py From DeepFaceLab with GNU General Public License v3.0 | 6 votes |
def warp_by_params (params, img, can_warp, can_transform, can_flip, border_replicate, cv2_inter=cv2.INTER_CUBIC): rw = params['rw'] if (can_warp or can_transform) and rw is not None: img = cv2.resize(img, (64,64), interpolation=cv2_inter) if can_warp: img = cv2.remap(img, params['mapx'], params['mapy'], cv2_inter ) if can_transform: img = cv2.warpAffine( img, params['rmat'], (params['w'], params['w']), borderMode=(cv2.BORDER_REPLICATE if border_replicate else cv2.BORDER_CONSTANT), flags=cv2_inter ) if (can_warp or can_transform) and rw is not None: img = cv2.resize(img, (rw,rw), interpolation=cv2_inter) if len(img.shape) == 2: img = img[...,None] if can_flip and params['flip']: img = img[:,::-1,...] return img
Example #9
Source File: visualize_mesh.py From Structured3D with MIT License | 6 votes |
def E2P(image, corner_i, corner_j, wall_height, camera, resolution=512, is_wall=True): """convert panorama to persepctive image """ corner_i = corner_i - camera corner_j = corner_j - camera if is_wall: xs = np.linspace(corner_i[0], corner_j[0], resolution)[None].repeat(resolution, 0) ys = np.linspace(corner_i[1], corner_j[1], resolution)[None].repeat(resolution, 0) zs = np.linspace(-camera[-1], wall_height - camera[-1], resolution)[:, None].repeat(resolution, 1) else: xs = np.linspace(corner_i[0], corner_j[0], resolution)[None].repeat(resolution, 0) ys = np.linspace(corner_i[1], corner_j[1], resolution)[:, None].repeat(resolution, 1) zs = np.zeros_like(xs) + wall_height - camera[-1] coorx, coory = xyz_2_coorxy(xs, ys, zs) persp = cv2.remap(image, coorx.astype(np.float32), coory.astype(np.float32), cv2.INTER_CUBIC, borderMode=cv2.BORDER_WRAP) return persp
Example #10
Source File: reconstruction_utils.py From MOTSFusion with MIT License | 6 votes |
def get_points_from_masks(mask_t0, mask_t1, point_img_t0, point_img_t1, flow_t1_t0, img_t0, img_t1, calibration_params): #point_img_t0[np.logical_not(mask_t0)] = [0, 0, 0] h, w = flow_t1_t0.shape[:2] flow = -flow_t1_t0 flow[:, :, 0] += np.arange(w) flow[:, :, 1] += np.arange(h)[:, np.newaxis] point_img_t0 = cv2.remap(point_img_t0, flow, None, cv2.INTER_NEAREST) mask_t0_warped = cv2.remap(mask_t0, flow, None, cv2.INTER_NEAREST) mask_t0_warped = np.equal(mask_t0_warped, 1).astype(np.uint8) mask_overlap = np.logical_and(mask_t0_warped.astype(np.bool), mask_t1.astype(np.bool)) object_points = np.concatenate((np.expand_dims(point_img_t0[mask_overlap], axis=1), np.expand_dims(point_img_t1[mask_overlap], axis=1)), axis=1) colors = np.concatenate((np.expand_dims(img_t0[mask_overlap], axis=1), np.expand_dims(img_t1[mask_overlap], axis=1)), axis=1) return object_points, colors
Example #11
Source File: training_data.py From faceswap with GNU General Public License v3.0 | 6 votes |
def _random_warp(self, batch): """ Randomly warp the input batch """ logger.trace("Randomly warping batch") mapx = self._constants["warp_mapx"] mapy = self._constants["warp_mapy"] pad = self._constants["warp_pad"] slices = self._constants["warp_slices"] rands = np.random.normal(size=(self._batchsize, 2, 5, 5), scale=self._scale).astype("float32") batch_maps = np.stack((mapx, mapy), axis=1) + rands batch_interp = np.array([[cv2.resize(map_, (pad, pad))[slices, slices] for map_ in maps] for maps in batch_maps]) warped_batch = np.array([cv2.remap(image, interp[0], interp[1], cv2.INTER_LINEAR) for image, interp in zip(batch, batch_interp)]) logger.trace("Warped image shape: %s", warped_batch.shape) return warped_batch
Example #12
Source File: base.py From pychubby with MIT License | 6 votes |
def warp(self, img, order=1): """Warp image into new coordinate system. Parameters ---------- img : np.ndarray Image to be warped. Any number of channels and dtype either uint8 or float32. order : int Interpolation order. * 0 - nearest neigbours * 1 - linear * 2 - cubic Returns ------- warped_img : np.ndarray Warped image. The same number of channels and same dtype as the `img`. """ tform_x, tform_y = self.transformation warped_img = cv2.remap(img, tform_x, tform_y, order) return warped_img
Example #13
Source File: api.py From PRNet-Depth-Generation with MIT License | 5 votes |
def get_texture(self, image, pos): ''' extract uv texture from image. opencv is needed here. Args: image: input image. pos: the 3D position map. shape = (256, 256, 3). Returns: texture: the corresponding colors of vertices. shape = (num of points, 3). n is 45128 here. ''' texture = cv2.remap(image, pos[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,borderValue=(0)) return texture
Example #14
Source File: tools.py From FALdetector with Apache License 2.0 | 5 votes |
def warp(im, flow, alpha=1, interp=cv2.INTER_CUBIC): height, width, _ = flow.shape cart = np.dstack(np.meshgrid(np.arange(width), np.arange(height))) pixel_map = (cart + alpha * flow).astype(np.float32) warped = cv2.remap( im, pixel_map[:, :, 0], pixel_map[:, :, 1], interp, borderMode=cv2.BORDER_REPLICATE) return warped
Example #15
Source File: img_dim.py From sanet_relocal_demo with GNU General Public License v3.0 | 5 votes |
def wrap(img, coordinate_2d, interp='bilinear'): """ Wrap the image with respect to the coordinate mapping :param img: the source image :param coordinate_2d: the mapping coordinates (refer the in source image), a 2D array with dimension of (num_points, 2) :param interp: interp method, 'bilinear' for bilinar interp, or 'nearest' for nearest interp. :return: wrapped image """ h = img.shape[0] w = img.shape[1] remap_2ds = coordinate_2d.reshape((h, w, 2)).astype(np.float32) return cv2.remap(img, remap_2ds[:, :, 0], remap_2ds[:, :, 1], interpolation=(cv2.INTER_LINEAR if interp == 'bilinear' else cv2.INTER_NEAREST))
Example #16
Source File: merge_functions.py From PReMVOS with MIT License | 5 votes |
def warp_flow(img, flow, binarize=True): h, w = flow.shape[:2] flow = -flow flow[:, :, 0] += np.arange(w) flow[:, :, 1] += np.arange(h)[:, np.newaxis] res = cv2.remap(img, flow, None, cv2.INTER_LINEAR) if binarize: res = np.equal(res,1).astype(np.uint8) return res
Example #17
Source File: utils.py From PartiallyReversibleUnet with BSD 3-Clause "New" or "Revised" License | 5 votes |
def dense_image_warp(im, dx, dy, interp=cv2.INTER_LINEAR): map_x, map_y = deformation_to_transformation(dx, dy) do_optimization = (interp == cv2.INTER_LINEAR) # The following command converts the maps to compact fixed point representation # this leads to a ~20% increase in speed but could lead to accuracy losses # Can be uncommented if do_optimization: map_x, map_y = cv2.convertMaps(map_x, map_y, dstmap1type=cv2.CV_16SC2) remapped = cv2.remap(im, map_x, map_y, interpolation=interp, borderMode=cv2.BORDER_REFLECT) #borderValue=float(np.min(im))) if im.ndim > remapped.ndim: remapped = np.expand_dims(remapped, im.ndim) return remapped
Example #18
Source File: Util_tracking.py From TrackR-CNN with MIT License | 5 votes |
def _warp(img, flow): # for some reason the result is all zeros with INTER_LINEAR... # res = cv2.remap(img, flow, None, cv2.INTER_LINEAR) res = remap(img, flow, None, INTER_NEAREST) res = np.equal(res, 1).astype(np.uint8) return res
Example #19
Source File: calibration.py From StereoVision with GNU General Public License v3.0 | 5 votes |
def rectify(self, frames): """ Rectify frames passed as (left, right) pair of OpenCV Mats. Remapping is done with nearest neighbor for speed. """ new_frames = [] for i, side in enumerate(("left", "right")): new_frames.append(cv2.remap(frames[i], self.undistortion_map[side], self.rectification_map[side], cv2.INTER_NEAREST)) return new_frames
Example #20
Source File: opt_flow.py From PyCV-time with MIT License | 5 votes |
def warp_flow(img, flow): h, w = flow.shape[:2] flow = -flow flow[:,:,0] += np.arange(w) flow[:,:,1] += np.arange(h)[:,np.newaxis] res = cv2.remap(img, flow, None, cv2.INTER_LINEAR) return res
Example #21
Source File: functional.py From dsb2018_topcoders with MIT License | 5 votes |
def elastic_transform_fast(image, alpha, sigma, alpha_affine, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 """ if random_state is None: random_state = np.random.RandomState(1234) shape = image.shape shape_size = shape[:2] # Random affine center_square = np.float32(shape_size) // 2 square_size = min(shape_size) // 3 alpha = float(alpha) sigma = float(sigma) alpha_affine = float(alpha_affine) pts1 = np.float32([center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size], center_square - square_size]) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101) dx = np.float32(gaussian_filter((random_state.rand(*shape_size) * 2 - 1), sigma) * alpha) dy = np.float32(gaussian_filter((random_state.rand(*shape_size) * 2 - 1), sigma) * alpha) x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0])) mapx = np.float32(x + dx) mapy = np.float32(y + dy) return cv2.remap(image, mapx, mapy, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
Example #22
Source File: functional.py From dsb2018_topcoders with MIT License | 5 votes |
def distort1(img, k=0, dx=0, dy=0): """" ## unconverntional augmnet ################################################################################3 ## https://stackoverflow.com/questions/6199636/formulas-for-barrel-pincushion-distortion ## https://stackoverflow.com/questions/10364201/image-transformation-in-opencv ## https://stackoverflow.com/questions/2477774/correcting-fisheye-distortion-programmatically ## http://www.coldvision.io/2017/03/02/advanced-lane-finding-using-opencv/ ## barrel\pincushion distortion """ height, width = img.shape[:2] # map_x, map_y = # cv2.initUndistortRectifyMap(intrinsics, dist_coeffs, None, None, (width,height),cv2.CV_32FC1) # https://stackoverflow.com/questions/6199636/formulas-for-barrel-pincushion-distortion # https://stackoverflow.com/questions/10364201/image-transformation-in-opencv k = k * 0.00001 dx = dx * width dy = dy * height x, y = np.mgrid[0:width:1, 0:height:1] x = x.astype(np.float32) - width/2 - dx y = y.astype(np.float32) - height/2 - dy theta = np.arctan2(y, x) d = (x*x + y*y)**0.5 r = d*(1+k*d*d) map_x = r*np.cos(theta) + width/2 + dx map_y = r*np.sin(theta) + height/2 + dy img = cv2.remap(img, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101) return img
Example #23
Source File: functional.py From dsb2018_topcoders with MIT License | 5 votes |
def distort1(img, k=0, dx=0, dy=0): """" ## unconverntional augmnet ################################################################################3 ## https://stackoverflow.com/questions/6199636/formulas-for-barrel-pincushion-distortion ## https://stackoverflow.com/questions/10364201/image-transformation-in-opencv ## https://stackoverflow.com/questions/2477774/correcting-fisheye-distortion-programmatically ## http://www.coldvision.io/2017/03/02/advanced-lane-finding-using-opencv/ ## barrel\pincushion distortion """ height, width = img.shape[:2] # map_x, map_y = # cv2.initUndistortRectifyMap(intrinsics, dist_coeffs, None, None, (width,height),cv2.CV_32FC1) # https://stackoverflow.com/questions/6199636/formulas-for-barrel-pincushion-distortion # https://stackoverflow.com/questions/10364201/image-transformation-in-opencv k = k * 0.00001 dx = dx * width dy = dy * height x, y = np.mgrid[0:width:1, 0:height:1] x = x.astype(np.float32) - width/2 - dx y = y.astype(np.float32) - height/2 - dy theta = np.arctan2(y, x) d = (x*x + y*y)**0.5 r = d*(1+k*d*d) map_x = r*np.cos(theta) + width/2 + dx map_y = r*np.sin(theta) + height/2 + dy img = cv2.remap(img, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101) return img
Example #24
Source File: functional.py From dsb2018_topcoders with MIT License | 5 votes |
def elastic_transform_fast(image, alpha, sigma, alpha_affine, random_state=None): """Elastic deformation of images as described in [Simard2003]_ (with modifications). .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5 """ if random_state is None: random_state = np.random.RandomState(1234) shape = image.shape shape_size = shape[:2] # Random affine center_square = np.float32(shape_size) // 2 square_size = min(shape_size) // 3 alpha = float(alpha) sigma = float(sigma) alpha_affine = float(alpha_affine) pts1 = np.float32([center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size], center_square - square_size]) pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32) M = cv2.getAffineTransform(pts1, pts2) image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101) dx = np.float32(gaussian_filter((random_state.rand(*shape_size) * 2 - 1), sigma) * alpha) dy = np.float32(gaussian_filter((random_state.rand(*shape_size) * 2 - 1), sigma) * alpha) x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0])) mapx = np.float32(x + dx) mapy = np.float32(y + dy) return cv2.remap(image, mapx, mapy, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
Example #25
Source File: functional.py From dsb2018_topcoders with MIT License | 5 votes |
def distort1(img, k=0, dx=0, dy=0): """" ## unconverntional augmnet ################################################################################3 ## https://stackoverflow.com/questions/6199636/formulas-for-barrel-pincushion-distortion ## https://stackoverflow.com/questions/10364201/image-transformation-in-opencv ## https://stackoverflow.com/questions/2477774/correcting-fisheye-distortion-programmatically ## http://www.coldvision.io/2017/03/02/advanced-lane-finding-using-opencv/ ## barrel\pincushion distortion """ height, width = img.shape[:2] # map_x, map_y = # cv2.initUndistortRectifyMap(intrinsics, dist_coeffs, None, None, (width,height),cv2.CV_32FC1) # https://stackoverflow.com/questions/6199636/formulas-for-barrel-pincushion-distortion # https://stackoverflow.com/questions/10364201/image-transformation-in-opencv k = k * 0.00001 dx = dx * width dy = dy * height x, y = np.mgrid[0:width:1, 0:height:1] x = x.astype(np.float32) - width/2 - dx y = y.astype(np.float32) - height/2 - dy theta = np.arctan2(y, x) d = (x*x + y*y)**0.5 r = d*(1+k*d*d) map_x = r*np.cos(theta) + width/2 + dx map_y = r*np.sin(theta) + height/2 + dy img = cv2.remap(img, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101) return img
Example #26
Source File: utils.py From PHiSeg-code with Apache License 2.0 | 5 votes |
def dense_image_warp(im, dx, dy, interp=cv2.INTER_LINEAR, do_optimisation=True): map_x, map_y = deformation_to_transformation(dx, dy) # The following command converts the maps to compact fixed point representation # this leads to a ~20% increase in speed but could lead to accuracy losses # Can be uncommented if do_optimisation: map_x, map_y = cv2.convertMaps(map_x, map_y, dstmap1type=cv2.CV_16SC2) return cv2.remap(im, map_x, map_y, interpolation=interp, borderMode=cv2.BORDER_REFLECT) #borderValue=float(np.min(im)))
Example #27
Source File: test-mvs.py From dfc2019 with MIT License | 5 votes |
def rectify_images_rgb(img1, x1, img2, x2, K, d, F, shearing=False): imsize = (img1.shape[1], img1.shape[0]) H1, H2, rms, max_error = epipolar.rectify_uncalibrated(x1, x2, F, imsize) if shearing: S = epipolar.rectify_shearing(H1, H2, imsize) H1 = S.dot(H1) rH = la.inv(K).dot(H1).dot(K) lH = la.inv(K).dot(H2).dot(K) # TODO: lRect or rRect for img1/img2 ?? map1x, map1y = cv2.initUndistortRectifyMap(K, d, rH, K, imsize, cv.CV_16SC2) map2x, map2y = cv2.initUndistortRectifyMap(K, d, lH, K, imsize, cv.CV_16SC2) rimg1 = cv2.remap(img1, map1x, map1y, interpolation=cv.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0)) rimg2 = cv2.remap(img2, map2x, map2y, interpolation=cv.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0)) return rimg1, rimg2, rms, max_error # rectify a floating point image pair based on the Fundamental matrix # use this for XYZ images
Example #28
Source File: blob_detect.py From pc-drone with MIT License | 5 votes |
def undistort_crop(orig_img): #cv2.remap(src, map1, map2, interpolation[, dst[, borderMode[, borderValue]]]) -> dst dst = cv2.remap(orig_img, map1, map2, cv2.INTER_LINEAR) x,y,w,h = roi crop_frame = dst[y:y+h, x:x+w] return crop_frame
Example #29
Source File: warp.py From pytorch_connectomics with MIT License | 5 votes |
def __call__(self, data, random_state=np.random): if 'label' in data and data['label'] is not None: image, label = data['image'], data['label'] else: image = data['image'] height, width = image.shape[-2:] # (c, z, y, x) dx = np.float32(gaussian_filter((random_state.rand(height, width) * 2 - 1), self.sigma) * self.alpha) dy = np.float32(gaussian_filter((random_state.rand(height, width) * 2 - 1), self.sigma) * self.alpha) x, y = np.meshgrid(np.arange(width), np.arange(height)) mapx, mapy = np.float32(x + dx), np.float32(y + dy) output = {} transformed_image = [] transformed_label = [] for i in range(image.shape[-3]): if image.ndim == 3: transformed_image.append(cv2.remap(image[i], mapx, mapy, self.image_interpolation, borderMode=self.border_mode)) else: temp = [cv2.remap(image[channel, i], mapx, mapy, self.image_interpolation, borderMode=self.border_mode) for channel in range(image.shape[0])] transformed_image.append(np.stack(temp, 0)) if 'label' in data and data['label'] is not None: transformed_label.append(cv2.remap(label[i], mapx, mapy, self.label_interpolation, borderMode=self.border_mode)) if image.ndim == 3: # (z,y,x) transformed_image = np.stack(transformed_image, 0) else: # (c,z,y,x) transformed_image = np.stack(transformed_image, 1) transformed_label = np.stack(transformed_label, 0) output['image'] = transformed_image output['label'] = transformed_label return output
Example #30
Source File: main.py From fisheye with Apache License 2.0 | 5 votes |
def undistort(img_path,K,D,DIM,scale=0.6,imshow=False): img = cv2.imread(img_path) dim1 = img.shape[:2][::-1] #dim1 is the dimension of input image to un-distort assert dim1[0]/dim1[1] == DIM[0]/DIM[1], "Image to undistort needs to have same aspect ratio as the ones used in calibration" if dim1[0]!=DIM[0]: img = cv2.resize(img,DIM,interpolation=cv2.INTER_AREA) Knew = K.copy() if scale:#change fov Knew[(0,1), (0,1)] = scale * Knew[(0,1), (0,1)] map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), Knew, DIM, cv2.CV_16SC2) undistorted_img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) if imshow: cv2.imshow("undistorted", undistorted_img) return undistorted_img