Python cv2.filter2D() Examples

The following are 30 code examples of cv2.filter2D(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: calculate_PSNR_SSIM.py    From EDVR with Apache License 2.0 6 votes vote down vote up
def ssim(img1, img2):
    C1 = (0.01 * 255)**2
    C2 = (0.03 * 255)**2

    img1 = img1.astype(np.float64)
    img2 = img2.astype(np.float64)
    kernel = cv2.getGaussianKernel(11, 1.5)
    window = np.outer(kernel, kernel.transpose())

    mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
    mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
    mu1_sq = mu1**2
    mu2_sq = mu2**2
    mu1_mu2 = mu1 * mu2
    sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
    sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
    sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
                                                            (sigma1_sq + sigma2_sq + C2))
    return ssim_map.mean() 
Example #2
Source File: util.py    From real-world-sr with MIT License 6 votes vote down vote up
def ssim(img1, img2):
    C1 = (0.01 * 255)**2
    C2 = (0.03 * 255)**2

    img1 = img1.astype(np.float64)
    img2 = img2.astype(np.float64)
    kernel = cv2.getGaussianKernel(11, 1.5)
    window = np.outer(kernel, kernel.transpose())

    mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
    mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
    mu1_sq = mu1**2
    mu2_sq = mu2**2
    mu1_mu2 = mu1 * mu2
    sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
    sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
    sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
                                                            (sigma1_sq + sigma2_sq + C2))
    return ssim_map.mean() 
Example #3
Source File: calculate_PSNR_SSIM.py    From real-world-sr with MIT License 6 votes vote down vote up
def ssim(img1, img2):
    C1 = (0.01 * 255)**2
    C2 = (0.03 * 255)**2

    img1 = img1.astype(np.float64)
    img2 = img2.astype(np.float64)
    kernel = cv2.getGaussianKernel(11, 1.5)
    window = np.outer(kernel, kernel.transpose())

    mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
    mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
    mu1_sq = mu1**2
    mu2_sq = mu2**2
    mu1_mu2 = mu1 * mu2
    sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
    sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
    sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
                                                            (sigma1_sq + sigma2_sq + C2))
    return ssim_map.mean() 
Example #4
Source File: trainer.py    From visual-pushing-grasping with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def push_heuristic(self, depth_heightmap):

        num_rotations = 16

        for rotate_idx in range(num_rotations):
            rotated_heightmap = ndimage.rotate(depth_heightmap, rotate_idx*(360.0/num_rotations), reshape=False, order=0)
            valid_areas = np.zeros(rotated_heightmap.shape)
            valid_areas[ndimage.interpolation.shift(rotated_heightmap, [0,-25], order=0) - rotated_heightmap > 0.02] = 1
            # valid_areas = np.multiply(valid_areas, rotated_heightmap)
            blur_kernel = np.ones((25,25),np.float32)/9
            valid_areas = cv2.filter2D(valid_areas, -1, blur_kernel)
            tmp_push_predictions = ndimage.rotate(valid_areas, -rotate_idx*(360.0/num_rotations), reshape=False, order=0)
            tmp_push_predictions.shape = (1, rotated_heightmap.shape[0], rotated_heightmap.shape[1])

            if rotate_idx == 0:
                push_predictions = tmp_push_predictions
            else:
                push_predictions = np.concatenate((push_predictions, tmp_push_predictions), axis=0)

        best_pix_ind = np.unravel_index(np.argmax(push_predictions), push_predictions.shape)
        return best_pix_ind 
Example #5
Source File: trainer.py    From visual-pushing-grasping with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def grasp_heuristic(self, depth_heightmap):

        num_rotations = 16

        for rotate_idx in range(num_rotations):
            rotated_heightmap = ndimage.rotate(depth_heightmap, rotate_idx*(360.0/num_rotations), reshape=False, order=0)
            valid_areas = np.zeros(rotated_heightmap.shape)
            valid_areas[np.logical_and(rotated_heightmap - ndimage.interpolation.shift(rotated_heightmap, [0,-25], order=0) > 0.02, rotated_heightmap - ndimage.interpolation.shift(rotated_heightmap, [0,25], order=0) > 0.02)] = 1
            # valid_areas = np.multiply(valid_areas, rotated_heightmap)
            blur_kernel = np.ones((25,25),np.float32)/9
            valid_areas = cv2.filter2D(valid_areas, -1, blur_kernel)
            tmp_grasp_predictions = ndimage.rotate(valid_areas, -rotate_idx*(360.0/num_rotations), reshape=False, order=0)
            tmp_grasp_predictions.shape = (1, rotated_heightmap.shape[0], rotated_heightmap.shape[1])

            if rotate_idx == 0:
                grasp_predictions = tmp_grasp_predictions
            else:
                grasp_predictions = np.concatenate((grasp_predictions, tmp_grasp_predictions), axis=0)

        best_pix_ind = np.unravel_index(np.argmax(grasp_predictions), grasp_predictions.shape)
        return best_pix_ind 
Example #6
Source File: helpers.py    From simple_convnet with MIT License 6 votes vote down vote up
def batch_filter3D(input_arr, filters):
    """
    3D filtering (i.e. convolution but without mirroring the filter).

    Parameters
    ----------
    input_arr : numpy array, NxHxWxC size where N is the number of images to be filtered
    filter : numpy array, H1xW1xC size
    
    Returns
    -------
    result : numpy array, NxHxW size
    
    """
    assert input_arr.shape[3] == filters.shape[2]
    num_input = input_arr.shape[0]
    output = np.zeros(input_arr.shape[:3] + (filters.shape[-1],))
    for n in xrange(num_input):
        input1 = input_arr[n]
        for f in xrange(filters.shape[-1]):
            for c in xrange(filters.shape[-2]):
                output[n,:,:,f] += filter2D(input1[...,c].copy(), filters[...,c,f].copy())
    return output 
Example #7
Source File: helpers.py    From simple_convnet with MIT License 6 votes vote down vote up
def filter2D(input_arr, filter):
    """
    2D filtering (i.e. convolution but without mirroring the filter).  Mostly a convenience wrapper
    around OpenCV.

    Parameters
    ----------
    input_arr : numpy array, HxW size
    filter : numpy array, H1xW1 size
    
    Returns
    -------
    result : numpy array, HxW size

    """
    return cv2.filter2D(input_arr, 
                        -1, 
                        filter,
                        borderType=cv2.BORDER_CONSTANT) 
Example #8
Source File: util.py    From BasicSR with Apache License 2.0 6 votes vote down vote up
def ssim(img1, img2):
    C1 = (0.01 * 255)**2
    C2 = (0.03 * 255)**2

    img1 = img1.astype(np.float64)
    img2 = img2.astype(np.float64)
    kernel = cv2.getGaussianKernel(11, 1.5)
    window = np.outer(kernel, kernel.transpose())

    mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
    mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
    mu1_sq = mu1**2
    mu2_sq = mu2**2
    mu1_mu2 = mu1 * mu2
    sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
    sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
    sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
                                                            (sigma1_sq + sigma2_sq + C2))
    return ssim_map.mean() 
Example #9
Source File: calculate_PSNR_SSIM.py    From BasicSR with Apache License 2.0 6 votes vote down vote up
def ssim(img1, img2):
    C1 = (0.01 * 255)**2
    C2 = (0.03 * 255)**2

    img1 = img1.astype(np.float64)
    img2 = img2.astype(np.float64)
    kernel = cv2.getGaussianKernel(11, 1.5)
    window = np.outer(kernel, kernel.transpose())

    mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
    mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
    mu1_sq = mu1**2
    mu2_sq = mu2**2
    mu1_mu2 = mu1 * mu2
    sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
    sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
    sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
                                                            (sigma1_sq + sigma2_sq + C2))
    return ssim_map.mean() 
Example #10
Source File: calculate_PSNR_SSIM.py    From IKC with Apache License 2.0 6 votes vote down vote up
def ssim(img1, img2):
    C1 = (0.01 * 255)**2
    C2 = (0.03 * 255)**2

    img1 = img1.astype(np.float64)
    img2 = img2.astype(np.float64)
    kernel = cv2.getGaussianKernel(11, 1.5)
    window = np.outer(kernel, kernel.transpose())

    mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
    mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
    mu1_sq = mu1**2
    mu2_sq = mu2**2
    mu1_mu2 = mu1 * mu2
    sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
    sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
    sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
                                                            (sigma1_sq + sigma2_sq + C2))
    return ssim_map.mean() 
Example #11
Source File: util.py    From IKC with Apache License 2.0 6 votes vote down vote up
def ssim(img1, img2):
    C1 = (0.01 * 255)**2
    C2 = (0.03 * 255)**2

    img1 = img1.astype(np.float64)
    img2 = img2.astype(np.float64)
    kernel = cv2.getGaussianKernel(11, 1.5)
    window = np.outer(kernel, kernel.transpose())

    mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
    mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
    mu1_sq = mu1**2
    mu2_sq = mu2**2
    mu1_mu2 = mu1 * mu2
    sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
    sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
    sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
                                                            (sigma1_sq + sigma2_sq + C2))
    return ssim_map.mean() 
Example #12
Source File: util.py    From SRFBN_CVPR19 with MIT License 6 votes vote down vote up
def ssim(img1, img2):

    C1 = (0.01 * 255)**2
    C2 = (0.03 * 255)**2

    img1 = img1.astype(np.float64)
    img2 = img2.astype(np.float64)
    kernel = cv2.getGaussianKernel(11, 1.5)
    window = np.outer(kernel, kernel.transpose())

    mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
    mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
    mu1_sq = mu1**2
    mu2_sq = mu2**2
    mu1_mu2 = mu1 * mu2
    sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
    sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
    sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
                                                            (sigma1_sq + sigma2_sq + C2))
    return ssim_map.mean() 
Example #13
Source File: motion_blur.py    From pytorch_connectomics with MIT License 6 votes vote down vote up
def motion_blur(self, data, random_state):
        images = data['image'].copy()
        labels = data['label'].copy()

        # generating the kernel
        kernel_motion_blur = np.zeros((self.size, self.size))
        if random.random() > 0.5: # horizontal kernel
            kernel_motion_blur[int((self.size-1)/2), :] = np.ones(self.size)
        else: # vertical kernel
            kernel_motion_blur[:, int((self.size-1)/2)] = np.ones(self.size)
        kernel_motion_blur = kernel_motion_blur / self.size

        k = min(self.sections, images.shape[0])
        selected_idx = np.random.choice(images.shape[0], k, replace=True)

        for idx in selected_idx:
            # applying the kernel to the input image
            images[idx] = cv2.filter2D(images[idx], -1, kernel_motion_blur)
    
        return images, labels 
Example #14
Source File: spfunctions.py    From spfeas with MIT License 6 votes vote down vote up
def get_mag_avg(img):

    img = np.sqrt(img)

    kernels = get_kernels()

    mag = np.zeros(img.shape, dtype='float32')

    for kernel_filter in kernels:

        gx = cv2.filter2D(np.float32(img), cv2.CV_32F, kernel_filter[1], borderType=cv2.BORDER_REFLECT)
        gy = cv2.filter2D(np.float32(img), cv2.CV_32F, kernel_filter[0], borderType=cv2.BORDER_REFLECT)

        mag += cv2.magnitude(gx, gy)

    mag /= len(kernels)

    return np.uint8(mag) 
Example #15
Source File: transforms.py    From kaggle_carvana_segmentation with MIT License 6 votes vote down vote up
def __call__(self, img):
        if random.random() < self.prob:
            alpha = self.limit * random.uniform(0, 1)
            kernel = np.ones((3, 3), np.float32)/9 * 0.2

            colored = img[..., :3]
            colored = alpha * cv2.filter2D(colored, -1, kernel) + (1-alpha) * colored
            maxval = np.max(img[..., :3])
            dtype = img.dtype
            img[..., :3] = clip(colored, dtype, maxval)

        return img


# https://github.com/pytorch/vision/pull/27/commits/659c854c6971ecc5b94dca3f4459ef2b7e42fb70
# color augmentation

# brightness, contrast, saturation-------------
# from mxnet code, see: https://github.com/dmlc/mxnet/blob/master/python/mxnet/image.py 
Example #16
Source File: psnr.py    From pytorch-tools with MIT License 6 votes vote down vote up
def _ssim(img1, img2):
        C1 = (0.01 * 255) ** 2
        C2 = (0.03 * 255) ** 2

        img1 = img1.astype(np.float64)
        img2 = img2.astype(np.float64)
        kernel = cv2.getGaussianKernel(11, 1.5)
        window = np.outer(kernel, kernel.transpose())

        mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
        mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
        mu1_sq = mu1 ** 2
        mu2_sq = mu2 ** 2
        mu1_mu2 = mu1 * mu2
        sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq
        sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq
        sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

        ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / (
            (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
        )
        return ssim_map.mean() 
Example #17
Source File: utils_image.py    From KAIR with MIT License 6 votes vote down vote up
def ssim(img1, img2):
    C1 = (0.01 * 255)**2
    C2 = (0.03 * 255)**2

    img1 = img1.astype(np.float64)
    img2 = img2.astype(np.float64)
    kernel = cv2.getGaussianKernel(11, 1.5)
    window = np.outer(kernel, kernel.transpose())

    mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
    mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
    mu1_sq = mu1**2
    mu2_sq = mu2**2
    mu1_mu2 = mu1 * mu2
    sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
    sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
    sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
                                                            (sigma1_sq + sigma2_sq + C2))
    return ssim_map.mean() 
Example #18
Source File: util.py    From EDVR with Apache License 2.0 6 votes vote down vote up
def ssim(img1, img2):
    C1 = (0.01 * 255)**2
    C2 = (0.03 * 255)**2

    img1 = img1.astype(np.float64)
    img2 = img2.astype(np.float64)
    kernel = cv2.getGaussianKernel(11, 1.5)
    window = np.outer(kernel, kernel.transpose())

    mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]  # valid
    mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
    mu1_sq = mu1**2
    mu2_sq = mu2**2
    mu1_mu2 = mu1 * mu2
    sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
    sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
    sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2

    ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
                                                            (sigma1_sq + sigma2_sq + C2))
    return ssim_map.mean() 
Example #19
Source File: autoRIFT.py    From autoRIFT with Apache License 2.0 6 votes vote down vote up
def preprocess_filt_sob(self):
        '''
        Do the pre processing using sobel filter (4.5/5.8 min).
        '''
        import cv2
        import numpy as np
        
        
        
        if self.zeroMask is not None:
            self.zeroMask = (self.I1 == 0)
        
        sobelx = cv2.getDerivKernels(1,0,self.WallisFilterWidth)
        
        kernelx = np.outer(sobelx[0],sobelx[1])
        
        sobely = cv2.getDerivKernels(0,1,self.WallisFilterWidth)
        
        kernely = np.outer(sobely[0],sobely[1])
        
        kernel = kernelx + kernely
        
        self.I1 = cv2.filter2D(self.I1,-1,kernel,borderType=cv2.BORDER_CONSTANT)
        
        self.I2 = cv2.filter2D(self.I2,-1,kernel,borderType=cv2.BORDER_CONSTANT) 
Example #20
Source File: linemod_dataset.py    From PVN3D with MIT License 6 votes vote down vote up
def linear_motion_blur(self, img, angle, length):
        """:param angle: in degree"""
        rad = np.deg2rad(angle)
        dx = np.cos(rad)
        dy = np.sin(rad)
        a = int(max(list(map(abs, (dx, dy)))) * length * 2)
        if a <= 0:
            return img
        kern = np.zeros((a, a))
        cx, cy = a // 2, a // 2
        dx, dy = list(map(int, (dx * length + cx, dy * length + cy)))
        cv2.line(kern, (cx, cy), (dx, dy), 1.0)
        s = kern.sum()
        if s == 0:
            kern[cx, cy] = 1.0
        else:
            kern /= s
        return cv2.filter2D(img, -1, kern) 
Example #21
Source File: autoRIFT.py    From autoRIFT with Apache License 2.0 6 votes vote down vote up
def preprocess_filt_hps(self):
        '''
        Do the pre processing using (orig - low-pass filter) = high-pass filter filter (3.9/5.3 min).
        '''
        import cv2
        import numpy as np
        

        if self.zeroMask is not None:
            self.zeroMask = (self.I1 == 0)

        kernel = -np.ones((self.WallisFilterWidth,self.WallisFilterWidth), dtype=np.float32)

        kernel[int((self.WallisFilterWidth-1)/2),int((self.WallisFilterWidth-1)/2)] = kernel.size - 1

        kernel = kernel / kernel.size

#        pdb.set_trace()

        self.I1 = cv2.filter2D(self.I1,-1,kernel,borderType=cv2.BORDER_CONSTANT)

        self.I2 = cv2.filter2D(self.I2,-1,kernel,borderType=cv2.BORDER_CONSTANT) 
Example #22
Source File: ycb_dataset.py    From PVN3D with MIT License 6 votes vote down vote up
def linear_motion_blur(self, img, angle, length):
        """:param angle: in degree"""
        rad = np.deg2rad(angle)
        dx = np.cos(rad)
        dy = np.sin(rad)
        a = int(max(list(map(abs, (dx, dy)))) * length * 2)
        if a <= 0:
            return img
        kern = np.zeros((a, a))
        cx, cy = a // 2, a // 2
        dx, dy = list(map(int, (dx * length + cx, dy * length + cy)))
        cv2.line(kern, (cx, cy), (dx, dy), 1.0)
        s = kern.sum()
        if s == 0:
            kern[cx, cy] = 1.0
        else:
            kern /= s
        return cv2.filter2D(img, -1, kern) 
Example #23
Source File: siam_mask_dataset.py    From SiamMask with MIT License 6 votes vote down vote up
def blur_image(self, image):
        def rand_kernel():
            size = np.random.randn(1)
            size = int(np.round(size)) * 2 + 1
            if size < 0: return None
            if random.random() < 0.5: return None
            size = min(size, 45)
            kernel = np.zeros((size, size))
            c = int(size/2)
            wx = random.random()
            kernel[:, c] += 1. / size * wx
            kernel[c, :] += 1. / size * (1-wx)
            return kernel

        kernel = rand_kernel()

        if kernel is not None:
            image = cv2.filter2D(image, -1, kernel)
        return image 
Example #24
Source File: siam_rpn_dataset.py    From SiamMask with MIT License 6 votes vote down vote up
def blur_image(self, image):
        def rand_kernel():
            size = np.random.randn(1)
            size = int(np.round(size)) * 2 + 1
            if size < 0: return None
            if random.random() < 0.5: return None
            size = min(size, 45)
            kernel = np.zeros((size, size))
            c = int(size/2)
            wx = random.random()
            kernel[:, c] += 1. / size * wx
            kernel[c, :] += 1. / size * (1-wx)
            return kernel

        kernel = rand_kernel()

        if kernel is not None:
            image = cv2.filter2D(image, -1, kernel)
        return image 
Example #25
Source File: cvutils.py    From 1ZLAB_PyEspCar with GNU General Public License v3.0 6 votes vote down vote up
def backprojection(target, roihist):
    '''图像预处理'''
    hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)
    dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)
    # Now convolute with circular disc
    disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
    cv2.filter2D(dst,-1,disc,dst)
    # threshold and binary AND
    ret,binary = cv2.threshold(dst,80,255,0)
    # 创建 核
    kernel = np.ones((5,5), np.uint8)
    iter_time = 1
    # 闭运算
    binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel,iterations=iter_time)

    thresh = cv2.merge((binary,binary,binary))
    target_filter = cv2.bitwise_and(target,thresh)
    
    return binary, target_filter 
Example #26
Source File: dwdii.py    From facial_expressions with Apache License 2.0 5 votes vote down vote up
def cvBlurMotion2(img):
    """https://www.packtpub.com/mapt/book/application-development/9781785283932/2/ch02lvl1sec23/Embossing"""
    size = 30
    kernel_motion_blur = np.zeros((size, size))
    kernel_motion_blur[int((size - 1) / 2), :] = np.ones(size)
    kernel_motion_blur = kernel_motion_blur / size

    img2 = cv2.filter2D(img, -1, kernel_motion_blur)
    return img2 
Example #27
Source File: dwdii.py    From facial_expressions with Apache License 2.0 5 votes vote down vote up
def cvEdgeEnhancement(img):
    """https://www.packtpub.com/mapt/book/application-development/9781785283932/2/ch02lvl1sec22/Sharpening"""
    kernel_sharpen_3 = np.array([[-1, -1, -1, -1, -1],
                                 [-1, 2, 2, 2, -1],
                                 [-1, 2, 8, 2, -1],
                                 [-1, 2, 2, 2, -1],
                                 [-1, -1, -1, -1, -1]]) / 8.0

    img2 = cv2.filter2D(img, -1, kernel_sharpen_3)
    return img2 
Example #28
Source File: pySaliencyMap.py    From pliers with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def OFMGetFM(self, src):
        # creating a Gaussian pyramid
        GaussianI = self.FMCreateGaussianPyr(src)
        # convoluting a Gabor filter with an intensity image to extract
        # oriemtation features
        # dummy data: any kinds of np.array()s are OK
        GaborOutput0 = [np.empty((1, 1)), np.empty((1, 1))]
        GaborOutput45 = [np.empty((1, 1)), np.empty((1, 1))]
        GaborOutput90 = [np.empty((1, 1)), np.empty((1, 1))]
        GaborOutput135 = [np.empty((1, 1)), np.empty((1, 1))]
        for j in range(2, 9):
            GaborOutput0.append(
                cv2.filter2D(GaussianI[j], cv2.CV_32F, self.GaborKernel0))
            GaborOutput45.append(
                cv2.filter2D(GaussianI[j], cv2.CV_32F, self.GaborKernel45))
            GaborOutput90.append(
                cv2.filter2D(GaussianI[j], cv2.CV_32F, self.GaborKernel90))
            GaborOutput135.append(
                cv2.filter2D(GaussianI[j], cv2.CV_32F, self.GaborKernel135))
        # calculating center-surround differences for every oriantation
        CSD0 = self.FMCenterSurroundDiff(GaborOutput0)
        CSD45 = self.FMCenterSurroundDiff(GaborOutput45)
        CSD90 = self.FMCenterSurroundDiff(GaborOutput90)
        CSD135 = self.FMCenterSurroundDiff(GaborOutput135)
        # concatenate
        dst = list(CSD0)
        dst.extend(CSD45)
        dst.extend(CSD90)
        dst.extend(CSD135)
        # return
        return dst
    # motion feature maps 
Example #29
Source File: dwdii.py    From facial_expressions with Apache License 2.0 5 votes vote down vote up
def cvExcessiveSharpening(img):
    """https://www.packtpub.com/mapt/book/application-development/9781785283932/2/ch02lvl1sec22/Sharpening"""
    kernel_sharpen_1 = np.array([[1, 1, 1], [1, -7, 1], [1, 1, 1]])
    img2 = cv2.filter2D(img, -1, kernel_sharpen_1)
    return img2 
Example #30
Source File: KernalFiltering.py    From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def main():
    image = cv2.imread("../data/7.1.01.tiff", 1)

    '''
    # Kernal or Convolution matrix for Identity Filter

    kernal = np.array(([0, 0, 0],
                       [0, 1, 0],
                       [0, 0, 0]), np.float32)

    # Kernal or Convolution matrix for Edge Detection

    kernal = np.array(([-1, -1, -1],
                       [-1, 8, -1],
                       [-1, -1, -1]), np.float32)

    '''

    # Kernal or Convolution matrix for Box BLue Filter

    kernal = np.ones((5, 5), np.uint8) / 25
    output = cv2.filter2D(image, -1, kernal)

    # Low pass filters implementation
    box_blur = cv2.boxFilter(image, -1, (31, 31))
    simple_blur = cv2.blur(image, (21, 21))
    gaussian_blur = cv2.GaussianBlur(image, (51, 51), 0)

    cv2.imshow("Orignal Image", image)
    cv2.imshow("Filtered Image", output)

    cv2.imshow("Box Blur", box_blur)
    cv2.imshow("Simple Blur", simple_blur)
    cv2.imshow("Gaussian Blur", gaussian_blur)

    cv2.waitKey(0)
    cv2.destroyAllWindows()