Python cv2.BORDER_REFLECT_101 Examples

The following are 30 code examples of cv2.BORDER_REFLECT_101(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: train.py    From ss-ood with MIT License 6 votes vote down vote up
def __getitem__(self, index):
        x, _ = self.dataset[index // num_perts]
        pert = pert_configs[index % num_perts]

        x = np.asarray(resize_and_crop(x))

        if np.random.uniform() < 0.5:
            x = x[:, ::-1]
        x = cv2f.affine(np.asarray(x), 0, (pert[0], pert[1]), 1, 0,
                        interpolation=cv2.INTER_LINEAR, mode=cv2.BORDER_REFLECT_101)

        label = [expanded_params[i].index(pert[i]) for i in range(len(expanded_params))]
        label = np.vstack((label + [0], label + [1], label + [2], label + [3]))

        x = trnF.to_tensor(x.copy()).unsqueeze(0).numpy()
        x = np.concatenate((x, np.rot90(x, 1, axes=(2, 3)),
                            np.rot90(x, 2, axes=(2, 3)), np.rot90(x, 3, axes=(2, 3))), 0)

        return torch.FloatTensor(x), label 
Example #2
Source File: chineselib.py    From ctw-baseline with MIT License 6 votes vote down vote up
def cv_preprocess_image(img, output_height, output_width, is_training):
        assert output_height == output_width
        img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
        img[:, :, 0] = np.uint8((np.int32(img[:, :, 0]) + (180 + random.randrange(-9, 10))) % 180)
        img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
        rows, cols, ch = img.shape
        output_size = output_width

        def r():
            return (random.random() - 0.5) * 0.1 * output_size
        pts1 = np.float32([[0, 0], [cols, rows], [0, rows]])
        pts2 = np.float32([[r(), r()], [output_size + r(), output_size + r()], [r(), output_size + r()]])
        M = cv2.getAffineTransform(pts1, pts2)
        noize = np.random.normal(0, random.random() * (0.05 * 255), size=img.shape)
        img = np.array(img, dtype=np.float32) + noize
        img = cv2.warpAffine(img, M, (output_size, output_size), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
        return img 
Example #3
Source File: functional.py    From dsb2018_topcoders with MIT License 6 votes vote down vote up
def shift_scale_rotate(img, angle, scale, dx, dy):
    height, width = img.shape[:2]

    cc = math.cos(angle/180*math.pi) * scale
    ss = math.sin(angle/180*math.pi) * scale
    rotate_matrix = np.array([[cc, -ss], [ss, cc]])

    box0 = np.array([[0, 0], [width, 0],  [width, height], [0, height], ])
    box1 = box0 - np.array([width/2, height/2])
    box1 = np.dot(box1, rotate_matrix.T) + np.array([width/2+dx*width, height/2+dy*height])

    box0 = box0.astype(np.float32)
    box1 = box1.astype(np.float32)
    mat = cv2.getPerspectiveTransform(box0, box1)
    img = cv2.warpPerspective(img, mat, (width, height),
                              flags=cv2.INTER_LINEAR,
                              borderMode=cv2.BORDER_REFLECT_101)

    return img 
Example #4
Source File: image_processing_common.py    From tensorflow-litterbox with Apache License 2.0 6 votes vote down vote up
def distort_affine_cv2(image, alpha_affine=10, random_state=None):
    if random_state is None:
        random_state = np.random.RandomState(None)

    shape = image.shape
    shape_size = shape[:2]

    center_square = np.float32(shape_size) // 2
    square_size = min(shape_size) // 3
    pts1 = np.float32([
        center_square + square_size,
        [center_square[0] + square_size, center_square[1] - square_size],
        center_square - square_size])
    pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)

    M = cv2.getAffineTransform(pts1, pts2)
    distorted_image = cv2.warpAffine(
        image, M, shape_size[::-1], borderMode=cv2.BORDER_REPLICATE) #cv2.BORDER_REFLECT_101)

    return distorted_image 
Example #5
Source File: unet_transforms.py    From pytorch-saltnet with MIT License 6 votes vote down vote up
def do_rotation_transform(image, mask, angle=0):
    height, width = image.shape[:2]
    cc = np.cos(angle / 180 * np.pi)
    ss = np.sin(angle / 180 * np.pi)
    rotate_matrix = np.array([[cc, -ss], [ss, cc]])

    box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ], np.float32)
    box1 = box0 - np.array([width / 2, height / 2])
    box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2, height / 2])

    box0 = box0.astype(np.float32)
    box1 = box1.astype(np.float32)
    mat = cv2.getPerspectiveTransform(box0, box1)

    image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR,
                                borderMode=cv2.BORDER_REFLECT_101,
                                borderValue=(0, 0, 0,))
    mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_NEAREST,
                               borderMode=cv2.BORDER_REFLECT_101,
                               borderValue=(0, 0, 0,))
    # mask = (mask > 0.5).astype(np.float32)
    return image, mask 
Example #6
Source File: AverageFace.py    From Machine-Learning-Study-Notes with Apache License 2.0 6 votes vote down vote up
def warpTriangle(img1, img2, t1, t2):
        def applyAffineTransform(src, srcTri, dstTri, size) :
            warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))
            dst = cv2.warpAffine(src, warpMat, (size[0], size[1]), None, 
                                 flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
            return dst
        r1 = cv2.boundingRect(np.float32([t1]))
        r2 = cv2.boundingRect(np.float32([t2]))
        t1Rect = [] 
        t2Rect = []
        t2RectInt = []
        for i in range(0, 3):
            t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1])))
            t2Rect.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))
            t2RectInt.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))
        mask = np.zeros((r2[3], r2[2], 3), dtype = np.float32)
        cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1, 1, 1));
        img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
        size = (r2[2], r2[3])
        img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size)
        img2Rect = img2Rect * mask
        img2[r2[1]: r2[1] + r2[3], r2[0]: r2[0] + r2[2]] = img2[r2[1]: r2[1] + r2[3], 
                                                                r2[0]: r2[0] + r2[2]] * ((1.0, 1.0, 1.0) - mask)
        img2[r2[1]: r2[1] + r2[3], r2[0]: r2[0] + r2[2]] = img2[r2[1]: r2[1] + r2[3],
                                                                 r2[0]: r2[0] + r2[2]] + img2Rect 
Example #7
Source File: transforms.py    From kaggle_carvana_segmentation with MIT License 6 votes vote down vote up
def __call__(self, img, mask=None):
        if random.random() < self.prob:
            limit = self.limit
            dx = round(random.uniform(-limit, limit))
            dy = round(random.uniform(-limit, limit))

            height, width, channel = img.shape
            y1 = limit+1+dy
            y2 = y1 + height
            x1 = limit+1+dx
            x2 = x1 + width

            img1 = cv2.copyMakeBorder(img, limit+1, limit+1, limit+1, limit+1, borderType=cv2.BORDER_REFLECT_101)
            img = img1[y1:y2, x1:x2, :]
            if mask is not None:
                msk1 = cv2.copyMakeBorder(mask, limit+1, limit+1, limit+1, limit+1, borderType=cv2.BORDER_REFLECT_101)
                mask = msk1[y1:y2, x1:x2, :]

        return img, mask 
Example #8
Source File: augmentations.py    From segmentation-networks-benchmark with MIT License 6 votes vote down vote up
def __call__(self, img, mask=None):
        if random.random() < self.prob:
            limit = self.limit
            dx = round(random.uniform(-limit, limit))
            dy = round(random.uniform(-limit, limit))

            height, width, channel = img.shape
            y1 = limit + 1 + dy
            y2 = y1 + height
            x1 = limit + 1 + dx
            x2 = x1 + width

            img1 = cv2.copyMakeBorder(img, limit + 1, limit + 1, limit + 1, limit + 1, borderType=cv2.BORDER_REFLECT_101)
            img = img1[y1:y2, x1:x2, :].copy()
            if mask is not None:
                msk1 = cv2.copyMakeBorder(mask, limit + 1, limit + 1, limit + 1, limit + 1, borderType=cv2.BORDER_REFLECT_101)
                mask = msk1[y1:y2, x1:x2, :].copy()

        return img, mask 
Example #9
Source File: unet_transforms.py    From pytorch-saltnet with MIT License 6 votes vote down vote up
def do_horizontal_shear(image, mask, scale=0):
    height, width = image.shape[:2]
    dx = int(scale * width)

    box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ], np.float32)
    box1 = np.array([[+dx, 0], [width + dx, 0], [width - dx, height], [-dx, height], ], np.float32)

    box0 = box0.astype(np.float32)
    box1 = box1.astype(np.float32)
    mat = cv2.getPerspectiveTransform(box0, box1)

    image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR,
                                borderMode=cv2.BORDER_REFLECT_101, borderValue=(0, 0, 0,))
    mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_NEAREST,
                               borderMode=cv2.BORDER_REFLECT_101, borderValue=(0, 0, 0,))
    # mask = (mask > 0.5).astype(np.float32)
    return image, mask 
Example #10
Source File: dilated.py    From adversarial-attacks with MIT License 6 votes vote down vote up
def PreprocessImage(name, args):
    """Preprocess according to the original author's code."""

    image = cv2.imread(name, 1).astype(np.float32) - args.mean

    if args.resize_dims is not None:
        image = cv2.resize(image, dsize = (args.resize_dims[0], args.resize_dims[1]))

    im_height = image.shape[0]
    im_width = image.shape[1]

    label_margin = 186
    input_image = cv2.copyMakeBorder(image, label_margin, label_margin,
                                     label_margin, label_margin, cv2.BORDER_REFLECT_101)
    input_size = [args.pad_size[1], args.pad_size[0]] # Order is H x W
    margin = [0, input_size[0] - input_image.shape[0],
              0, input_size[1] - input_image.shape[1]]

    input_image = cv2.copyMakeBorder(input_image, margin[0], margin[1], margin[2],
                                     margin[3], cv2.BORDER_REFLECT_101)

    input_image = input_image.transpose([2,0,1]) # To make it C x H x W
    return input_image, im_height, im_width, image 
Example #11
Source File: functional.py    From dsb2018_topcoders with MIT License 6 votes vote down vote up
def shift_scale_rotate(img, angle, scale, dx, dy):
    height, width = img.shape[:2]

    cc = math.cos(angle/180*math.pi) * scale
    ss = math.sin(angle/180*math.pi) * scale
    rotate_matrix = np.array([[cc, -ss], [ss, cc]])

    box0 = np.array([[0, 0], [width, 0],  [width, height], [0, height], ])
    box1 = box0 - np.array([width/2, height/2])
    box1 = np.dot(box1, rotate_matrix.T) + np.array([width/2+dx*width, height/2+dy*height])

    box0 = box0.astype(np.float32)
    box1 = box1.astype(np.float32)
    mat = cv2.getPerspectiveTransform(box0, box1)
    img = cv2.warpPerspective(img, mat, (width, height),
                              flags=cv2.INTER_LINEAR,
                              borderMode=cv2.BORDER_REFLECT_101)

    return img 
Example #12
Source File: functional.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def rotate(img, angle):
    height, width = img.shape[0:2]
    mat = cv2.getRotationMatrix2D((width/2, height/2), angle, 1.0)
    img = cv2.warpAffine(img, mat, (width, height),
                         flags=cv2.INTER_LINEAR,
                         borderMode=cv2.BORDER_REFLECT_101)
    return img 
Example #13
Source File: functional.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def elastic_transform_fast(image, alpha, sigma, alpha_affine, random_state=None):
    """Elastic deformation of images as described in [Simard2003]_ (with modifications).
    .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
         Convolutional Neural Networks applied to Visual Document Analysis", in
         Proc. of the International Conference on Document Analysis and
         Recognition, 2003.

     Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
    """
    if random_state is None:
        random_state = np.random.RandomState(1234)

    shape = image.shape
    shape_size = shape[:2]


    # Random affine
    center_square = np.float32(shape_size) // 2
    square_size = min(shape_size) // 3
    alpha = float(alpha)
    sigma = float(sigma)
    alpha_affine = float(alpha_affine)

    pts1 = np.float32([center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size],
                       center_square - square_size])
    pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
    M = cv2.getAffineTransform(pts1, pts2)

    image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)

    dx = np.float32(gaussian_filter((random_state.rand(*shape_size) * 2 - 1), sigma) * alpha)
    dy = np.float32(gaussian_filter((random_state.rand(*shape_size) * 2 - 1), sigma) * alpha)

    x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))

    mapx = np.float32(x + dx)
    mapy = np.float32(y + dy)

    return cv2.remap(image, mapx, mapy, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101) 
Example #14
Source File: opencv_functional.py    From ss-ood with MIT License 5 votes vote down vote up
def rotate(img, angle, resample=False, expand=False, center=None):
    """Rotate the image by angle.
    Args:
        img (numpy ndarray): numpy ndarray to be rotated.
        angle (float or int): In degrees degrees counter clockwise order.
        resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
            An optional resampling filter. See `filters`_ for more information.
            If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
        expand (bool, optional): Optional expansion flag.
            If true, expands the output image to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
        center (2-tuple, optional): Optional center of rotation.
            Origin is the upper left corner.
            Default is the center of the image.
    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
    rows,cols = img.shape[0:2]
    if center is None:
        center = (cols/2-0.5, rows/2-0.5)
    M = cv2.getRotationMatrix2D(center,angle,1)
    # if img.shape[2]==1:
    #     return cv2.warpAffine(img,M,(cols,rows), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)[:,:,np.newaxis]
    # else:
    #     return cv2.warpAffine(img,M,(cols,rows), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
    if img.shape[2]==1:
        return cv2.warpAffine(img,M,(cols,rows), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)[:,:,np.newaxis]
    else:
        return cv2.warpAffine(img,M,(cols,rows), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101) 
Example #15
Source File: opencv_functional.py    From ss-ood with MIT License 5 votes vote down vote up
def affine(img, angle, translate, scale, shear, interpolation=cv2.INTER_CUBIC, mode=cv2.BORDER_CONSTANT, fillcolor=0):
    """Apply affine transformation on the image keeping image center invariant
    Args:
        img (numpy ndarray): numpy ndarray to be transformed.
        angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction.
        translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)
        scale (float): overall scale
        shear (float): shear angle value in degrees between -180 to 180, clockwise direction.
        interpolation (``cv2.INTER_NEAREST` or ``cv2.INTER_LINEAR`` or ``cv2.INTER_AREA``, ``cv2.INTER_CUBIC``):
            An optional resampling filter.
            See `filters`_ for more information.
            If omitted, it is set to ``cv2.INTER_CUBIC``, for bicubic interpolation.
        mode (``cv2.BORDER_CONSTANT`` or ``cv2.BORDER_REPLICATE`` or ``cv2.BORDER_REFLECT`` or ``cv2.BORDER_REFLECT_101``)
            Method for filling in border regions. 
            Defaults to cv2.BORDER_CONSTANT, meaning areas outside the image are filled with a value (val, default 0)
        val (int): Optional fill color for the area outside the transform in the output image. Default: 0
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy Image. Got {}'.format(type(img)))

    assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
        "Argument translate should be a list or tuple of length 2"

    assert scale > 0.0, "Argument scale should be positive"

    output_size = img.shape[0:2]
    center = (img.shape[1] * 0.5 + 0.5, img.shape[0] * 0.5 + 0.5)
    matrix = _get_affine_matrix(center, angle, translate, scale, shear)
    
    if img.shape[2]==1:
        return cv2.warpAffine(img, matrix, output_size[::-1],interpolation, borderMode=mode, borderValue=fillcolor)[:,:,np.newaxis]
    else:
        return cv2.warpAffine(img, matrix, output_size[::-1],interpolation, borderMode=mode, borderValue=fillcolor) 
Example #16
Source File: morph.py    From DeepFaceLab with GNU General Public License v3.0 5 votes vote down vote up
def applyAffineTransform(src, srcTri, dstTri, size) :
    warpMat = cv2.getAffineTransform( np.float32(srcTri), np.float32(dstTri) )
    return cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 ) 
Example #17
Source File: custum_aug.py    From pytorch-segmentation with MIT License 5 votes vote down vote up
def __init__(self, limit=90, interpolation=cv2.INTER_LINEAR,
                 border_mode=cv2.BORDER_REFLECT_101, border_value=255, always_apply=False, p=.5):
        super().__init__(always_apply, p)
        self.limit = to_tuple(limit)
        self.interpolation = interpolation
        self.border_mode = border_mode
        self.border_value = border_value 
Example #18
Source File: PerturbDataset.py    From ss-ood with MIT License 5 votes vote down vote up
def __getitem__(self, index):
        x_orig, classifier_target = self.dataset[index]

        if self.train_mode == True and np.random.uniform() < 0.5:
            x_orig = np.copy(x_orig)[:, ::-1]
        else:
            x_orig =  np.copy(x_orig)

        if self.train_mode == True:
            x_orig = Image.fromarray(x_orig)
            x_orig = randomly_crop(x_orig)
            x_orig = np.asarray(x_orig)

        x_tf_0 = np.copy(x_orig)
        x_tf_90 = np.rot90(x_orig.copy(), k=1).copy()
        x_tf_180 = np.rot90(x_orig.copy(), k=2).copy()
        x_tf_270 = np.rot90(x_orig.copy(), k=3).copy()

        possible_translations = list(itertools.product([0, 8, -8], [0, 8, -8]))
        num_possible_translations = len(possible_translations)
        tx, ty = possible_translations[random.randint(0, num_possible_translations - 1)]
        tx_target = {0: 0, 8: 1, -8: 2}[tx]
        ty_target = {0: 0, 8: 1, -8: 2}[ty]
        x_tf_trans = cv2f.affine(np.asarray(x_orig).copy(), 0, (tx, ty), 1, 0, interpolation=cv2.INTER_CUBIC, mode=cv2.BORDER_REFLECT_101)

        return \
            normalize(trnF.to_tensor(x_tf_0)), \
            normalize(trnF.to_tensor(x_tf_90)), \
            normalize(trnF.to_tensor(x_tf_180)), \
            normalize(trnF.to_tensor(x_tf_270)), \
            normalize(trnF.to_tensor(x_tf_trans)), \
            torch.tensor(tx_target), \
            torch.tensor(ty_target), \
            torch.tensor(classifier_target) 
Example #19
Source File: PerturbDataset.py    From ss-ood with MIT License 5 votes vote down vote up
def __getitem__(self, index):
        x_orig, classifier_target = self.dataset[index]
        x_orig = x_orig.numpy()
        classifier_target = classifier_target.numpy()

        x_tf_0 = np.copy(x_orig)
        x_tf_90 = np.rot90(x_orig.copy(), k=1).copy()
        x_tf_180 = np.rot90(x_orig.copy(), k=2).copy()
        x_tf_270 = np.rot90(x_orig.copy(), k=3).copy()

        possible_translations = list(itertools.product([0, 8, -8], [0, 8, -8]))
        num_possible_translations = len(possible_translations)
        tx, ty = possible_translations[random.randint(0, num_possible_translations - 1)]
        tx_target = {0: 0, 8: 1, -8: 2}[tx]
        ty_target = {0: 0, 8: 1, -8: 2}[ty]
        x_tf_trans = cv2f.affine(np.asarray(x_orig).copy(), 0, (tx, ty), 1, 0, interpolation=cv2.INTER_CUBIC, mode=cv2.BORDER_REFLECT_101)

        return \
            normalize(trnF.to_tensor(x_tf_0)), \
            normalize(trnF.to_tensor(x_tf_90)), \
            normalize(trnF.to_tensor(x_tf_180)), \
            normalize(trnF.to_tensor(x_tf_270)), \
            normalize(trnF.to_tensor(x_tf_trans)), \
            torch.tensor(tx_target), \
            torch.tensor(ty_target), \
            torch.tensor(classifier_target) 
Example #20
Source File: image.py    From keras-retinanet with Apache License 2.0 5 votes vote down vote up
def cvBorderMode(self):
        if self.fill_mode == 'constant':
            return cv2.BORDER_CONSTANT
        if self.fill_mode == 'nearest':
            return cv2.BORDER_REPLICATE
        if self.fill_mode == 'reflect':
            return cv2.BORDER_REFLECT_101
        if self.fill_mode == 'wrap':
            return cv2.BORDER_WRAP 
Example #21
Source File: functional.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def distort1(img, k=0, dx=0, dy=0):
    """"
    ## unconverntional augmnet ################################################################################3
    ## https://stackoverflow.com/questions/6199636/formulas-for-barrel-pincushion-distortion

    ## https://stackoverflow.com/questions/10364201/image-transformation-in-opencv
    ## https://stackoverflow.com/questions/2477774/correcting-fisheye-distortion-programmatically
    ## http://www.coldvision.io/2017/03/02/advanced-lane-finding-using-opencv/

    ## barrel\pincushion distortion
    """
    height, width = img.shape[:2]
    #  map_x, map_y =
    # cv2.initUndistortRectifyMap(intrinsics, dist_coeffs, None, None, (width,height),cv2.CV_32FC1)
    # https://stackoverflow.com/questions/6199636/formulas-for-barrel-pincushion-distortion
    # https://stackoverflow.com/questions/10364201/image-transformation-in-opencv
    k = k * 0.00001
    dx = dx * width
    dy = dy * height
    x, y = np.mgrid[0:width:1, 0:height:1]
    x = x.astype(np.float32) - width/2 - dx
    y = y.astype(np.float32) - height/2 - dy
    theta = np.arctan2(y, x)
    d = (x*x + y*y)**0.5
    r = d*(1+k*d*d)
    map_x = r*np.cos(theta) + width/2 + dx
    map_y = r*np.sin(theta) + height/2 + dy

    img = cv2.remap(img, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
    return img 
Example #22
Source File: functional.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def elastic_transform_fast(image, alpha, sigma, alpha_affine, random_state=None):
    """Elastic deformation of images as described in [Simard2003]_ (with modifications).
    .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
         Convolutional Neural Networks applied to Visual Document Analysis", in
         Proc. of the International Conference on Document Analysis and
         Recognition, 2003.

     Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
    """
    if random_state is None:
        random_state = np.random.RandomState(1234)

    shape = image.shape
    shape_size = shape[:2]


    # Random affine
    center_square = np.float32(shape_size) // 2
    square_size = min(shape_size) // 3
    alpha = float(alpha)
    sigma = float(sigma)
    alpha_affine = float(alpha_affine)

    pts1 = np.float32([center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size],
                       center_square - square_size])
    pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
    M = cv2.getAffineTransform(pts1, pts2)

    image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)

    dx = np.float32(gaussian_filter((random_state.rand(*shape_size) * 2 - 1), sigma) * alpha)
    dy = np.float32(gaussian_filter((random_state.rand(*shape_size) * 2 - 1), sigma) * alpha)

    x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))

    mapx = np.float32(x + dx)
    mapy = np.float32(y + dy)

    return cv2.remap(image, mapx, mapy, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101) 
Example #23
Source File: functional.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def distort1(img, k=0, dx=0, dy=0):
    """"
    ## unconverntional augmnet ################################################################################3
    ## https://stackoverflow.com/questions/6199636/formulas-for-barrel-pincushion-distortion

    ## https://stackoverflow.com/questions/10364201/image-transformation-in-opencv
    ## https://stackoverflow.com/questions/2477774/correcting-fisheye-distortion-programmatically
    ## http://www.coldvision.io/2017/03/02/advanced-lane-finding-using-opencv/

    ## barrel\pincushion distortion
    """
    height, width = img.shape[:2]
    #  map_x, map_y =
    # cv2.initUndistortRectifyMap(intrinsics, dist_coeffs, None, None, (width,height),cv2.CV_32FC1)
    # https://stackoverflow.com/questions/6199636/formulas-for-barrel-pincushion-distortion
    # https://stackoverflow.com/questions/10364201/image-transformation-in-opencv
    k = k * 0.00001
    dx = dx * width
    dy = dy * height
    x, y = np.mgrid[0:width:1, 0:height:1]
    x = x.astype(np.float32) - width/2 - dx
    y = y.astype(np.float32) - height/2 - dy
    theta = np.arctan2(y, x)
    d = (x*x + y*y)**0.5
    r = d*(1+k*d*d)
    map_x = r*np.cos(theta) + width/2 + dx
    map_y = r*np.sin(theta) + height/2 + dy

    img = cv2.remap(img, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
    return img 
Example #24
Source File: functional.py    From dsb2018_topcoders with MIT License 5 votes vote down vote up
def rotate(img, angle):
    height, width = img.shape[0:2]
    mat = cv2.getRotationMatrix2D((width/2, height/2), angle, 1.0)
    img = cv2.warpAffine(img, mat, (width, height),
                         flags=cv2.INTER_LINEAR,
                         borderMode=cv2.BORDER_REFLECT_101)
    return img 
Example #25
Source File: image.py    From CameraRadarFusionNet with Apache License 2.0 5 votes vote down vote up
def cvBorderMode(self):
        if self.fill_mode == 'constant':
            return cv2.BORDER_CONSTANT
        if self.fill_mode == 'nearest':
            return cv2.BORDER_REPLICATE
        if self.fill_mode == 'reflect':
            return cv2.BORDER_REFLECT_101
        if self.fill_mode == 'wrap':
            return cv2.BORDER_WRAP 
Example #26
Source File: train92_9ch_fold.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 5 votes vote down vote up
def rotate_image(image, angle, scale, rot_pnt):
    rot_mat = cv2.getRotationMatrix2D(rot_pnt, angle, scale)
    result = cv2.warpAffine(image, rot_mat, image.shape[:2], flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
    return result 
Example #27
Source File: train92_9ch_fold.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 5 votes vote down vote up
def shift_image(img, shift_pnt):
    M = np.float32([[1, 0, shift_pnt[0]], [0, 1, shift_pnt[1]]])
    res = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]), borderMode=cv2.BORDER_REFLECT_101)
    return res 
Example #28
Source File: train101_9ch_fold.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 5 votes vote down vote up
def shift_image(img, shift_pnt):
    M = np.float32([[1, 0, shift_pnt[0]], [0, 1, shift_pnt[1]]])
    res = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]), borderMode=cv2.BORDER_REFLECT_101)
    return res 
Example #29
Source File: train50_9ch_fold.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 5 votes vote down vote up
def rotate_image(image, angle, scale, rot_pnt):
    rot_mat = cv2.getRotationMatrix2D(rot_pnt, angle, scale)
    result = cv2.warpAffine(image, rot_mat, image.shape[:2], flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
    return result 
Example #30
Source File: train50_9ch_fold.py    From SpaceNet_Off_Nadir_Solutions with Apache License 2.0 5 votes vote down vote up
def shift_image(img, shift_pnt):
    M = np.float32([[1, 0, shift_pnt[0]], [0, 1, shift_pnt[1]]])
    res = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]), borderMode=cv2.BORDER_REFLECT_101)
    return res