Python cv2.pyrDown() Examples

The following are 28 code examples of cv2.pyrDown(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: keyframes.py    From pyslam with MIT License 8 votes vote down vote up
def compute_disparity_pyramid(self):
        self.disparity = []
        stereo = cv2.StereoBM_create()
        # stereo = cv2.StereoSGBM_create(minDisparity=0,
        #                                numDisparities=64,
        #                                blockSize=11)

        # Compute disparity at full resolution and downsample
        disp = stereo.compute(self.im_left, self.im_right).astype(float) / 16.

        for pyrlevel in range(self.pyrlevels):
            if pyrlevel == 0:
                self.disparity = [disp]
            else:
                pyr_factor = 2**-pyrlevel
                # disp = cv2.pyrDown(disp) # Applies a large Gaussian blur
                # kernel!
                disp = disp[0::2, 0::2]
                self.disparity.append(disp * pyr_factor) 
Example #2
Source File: util.py    From DoNotSnap with GNU General Public License v3.0 6 votes vote down vote up
def pyramid(image, minSize):
    yield image

    if image.shape[0] < minSize[0] and image.shape[1] < minSize[1]:
        # image too small - upscaling until we hit window level
        image = cv2.pyrUp(image)

        while (image.shape[0] <= minSize[0] or image.shape[1] <= minSize[1]):
            yield image
            image = cv2.pyrUp(image)
    else:
        # image too big - downscaling until we hit window level
        image = cv2.pyrDown(image)

        while (image.shape[0] >= minSize[0] or image.shape[1] >= minSize[1]):
            yield image
            image = cv2.pyrDown(image)


# Malisiewicz et al. 
Example #3
Source File: 04_feature_match.py    From Practical-Computer-Vision with MIT License 6 votes vote down vote up
def compute_orb_keypoints(filename):
    """
    Takes in filename to read and computes ORB keypoints
    Returns image, keypoints and descriptors 
    """

    img = cv2.imread(filename)
    img = cv2.pyrDown(img)
    img = cv2.pyrDown(img)
    # img = cv2.pyrDown(img)
    # img = cv2.pyrDown(img)
    # create orb object
    orb = cv2.ORB_create()
    
    # set parameters 
    orb.setScoreType(cv2.FAST_FEATURE_DETECTOR_TYPE_9_16)
    orb.setWTA_K(3)
    
    kp = orb.detect(img,None)

    kp, des = orb.compute(img, kp)
    return img,kp,  des 
Example #4
Source File: Cartoonlization.py    From rabbitVE with GNU General Public License v3.0 6 votes vote down vote up
def cartoonise(self, img_rgb, num_down, num_bilateral, medianBlur, D, sigmaColor, sigmaSpace):
        # 用高斯金字塔降低取样
        img_color = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)
        for _ in range(num_down):
            img_color = cv2.pyrDown(img_color)
        # 重复使用小的双边滤波代替一个大的滤波
        for _ in range(num_bilateral):
            img_color = cv2.bilateralFilter(img_color, d=D, sigmaColor=sigmaColor, sigmaSpace=sigmaSpace)
        # 升采样图片到原始大小
        for _ in range(num_down):
            img_color = cv2.pyrUp(img_color)
        if not self.Save_Edge:
            img_cartoon = img_color
        else:
            img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
            img_blur = cv2.medianBlur(img_gray, medianBlur)
            img_edge = cv2.adaptiveThreshold(img_blur, 255,
                                             cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                             cv2.THRESH_BINARY,
                                             blockSize=self.Adaptive_Threshold_Block_Size,
                                             C=self.C)
            img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB)
            img_edge = cv2.resize(img_edge, img_color.shape[:2][::-1])
            img_cartoon = cv2.bitwise_and(img_color, img_edge)
        return cv2.cvtColor(img_cartoon, cv2.COLOR_RGB2BGR) 
Example #5
Source File: util.py    From SPTM with MIT License 5 votes vote down vote up
def downsample(input, factor):
  for _ in xrange(factor):
    input = cv2.pyrDown(input)
  return input 
Example #6
Source File: amplify_color.py    From Heart-rate-measurement-using-camera with Apache License 2.0 5 votes vote down vote up
def build_gaussian_pyramid(self,src,level=3):
        s=src.copy()
        pyramid=[s]
        for i in range(level):
            s=cv2.pyrDown(s)
            pyramid.append(s)
        return pyramid 
Example #7
Source File: facedetect.py    From FindFaceInVideo with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def ProcImage(imagePath, modifyPath):
    print 'ProcImage IN:'
    
    img = cv2.imread(imagePath)
    print imagePath
    sp = img.shape
    while(sp[0] > 768 + 512 or sp[1] > 1024 + 1024):    #sp[0]: height sp[1]: width sp[3]: tongdao
        img = cv2.pyrDown(img)
        sp = img.shape
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = cv2.equalizeHist(gray)

    # t = clock().utc()
    rects = detect(gray, cascade)
    vis = img.copy()
    print rects
    		#print rects.shape#, rects.dtype, rects
    new_rects = procOverlap(rects)
    print new_rects.shape
    
    if DEBUG_FLAG == 1:
    	draw_rects(vis, new_rects, (0, 255, 0))
    
    
    if(modifyPath[-1] != '/'):
        modifyPath = modifyPath + '/'
    cv2.imwrite(modifyPath + imagePath.split('/')[-1].split('.')[0] + '_modify.' + imagePath.split('/')[-1].split('.')[-1], vis) 
Example #8
Source File: pySaliencyMap.py    From aim with MIT License 5 votes vote down vote up
def FMCreateGaussianPyr(self, src):
        dst = list()
        dst.append(src)
        for i in range(1, 9):
            nowdst = cv2.pyrDown(dst[i - 1])
            dst.append(nowdst)

        return dst


    # Taking center-surround differences 
Example #9
Source File: buildpyr.py    From PyCV-time with MIT License 5 votes vote down vote up
def buildpyr(img_in):

    h = img_in.shape[0]
    w = img_in.shape[1]
    d = img_in.shape[2]
    
    img_out = np.full((h + h/2, w, d), 0, np.uint8)
    img_pyr = img_in
    x, y = 0, 0
    dx, dy = w, h
    
    
    for i in range(10):
        # place image at x, y
        img_out = mix_image(img_out, img_pyr, (x, y))
        
        if i % 2 == 0:
            y = y + dy
        else:
            x = x + dx
        
        dx, dy = dx/2, dy/2
        
        img_pyr = cv2.pyrDown(img_pyr)
    
    
    return img_out 
Example #10
Source File: lappyr.py    From PyCV-time with MIT License 5 votes vote down vote up
def build_lappyr(img, leveln=6, dtype=np.int16):
    img = dtype(img)
    levels = []
    for i in xrange(leveln-1):
        next_img = cv2.pyrDown(img)
        img1 = cv2.pyrUp(next_img, dstsize=getsize(img))
        levels.append(img-img1)
        img = next_img
    levels.append(img)
    return levels 
Example #11
Source File: lappyr.py    From PyCV-time with MIT License 5 votes vote down vote up
def build_lappyr(img, leveln=6, dtype=np.int16):
    img = dtype(img)
    levels = []
    for i in xrange(leveln-1):
        next_img = cv2.pyrDown(img)
        img1 = cv2.pyrUp(next_img, dstsize=getsize(img))
        levels.append(img-img1)
        img = next_img
    levels.append(img)
    return levels 
Example #12
Source File: 03_pyramid_down_smaple.py    From Practical-Computer-Vision with MIT License 5 votes vote down vote up
def main():
    # read an image 
    img = cv2.imread('../figures/flower.png')
    print(img.shape)

    lower_resolution1 = cv2.pyrDown(img)
    print(lower_resolution1.shape)

    lower_resolution2 = cv2.pyrDown(lower_resolution1)
    print(lower_resolution2.shape)

    lower_resolution3 = cv2.pyrDown(lower_resolution2)
    print(lower_resolution3.shape)

    higher_resolution3 = cv2.pyrUp(lower_resolution3)
    print(higher_resolution3.shape)

    higher_resolution2 = cv2.pyrUp(higher_resolution3)
    print(higher_resolution2.shape)

    higher_resolution1 = cv2.pyrUp(higher_resolution2)
    print(higher_resolution1.shape)




    
    # Do plot
    plot_lr_img(img, lower_resolution1, lower_resolution2, lower_resolution3)
    plot_hy_img(lower_resolution3, higher_resolution3, higher_resolution2, higher_resolution1) 
Example #13
Source File: ImageFusion.py    From ImageStitch with MIT License 5 votes vote down vote up
def GaussianPyramid(self, R, level):
        G = R.copy().astype(np.float64)
        gp = [G]
        for i in range(level):
            G = cv2.pyrDown(G)
            gp.append(G)
        return gp

    #权值矩阵归一化 
Example #14
Source File: CVAnalysis.py    From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International 5 votes vote down vote up
def get_image_diff (img1, img2):
	"""
		Function: get_image_diff
		------------------------
		given two images, this finds the eroded/dilated difference 
		between them on a coarse grain.
		NOTE: assumes both are full-size, color
	"""
	#=====[ Step 1: convert to gray	]=====
	img1_gray = cv2.cvtColor (img1, cv2.COLOR_BGR2GRAY)
	img2_gray = cv2.cvtColor (img2, cv2.COLOR_BGR2GRAY)	

	#=====[ Step 2: downsample 	]=====
	img1_small = cv2.pyrDown(cv2.pyrDown(img1_gray))
	img2_small = cv2.pyrDown(cv2.pyrDown(img2_gray))	

	#=====[ Step 3: find differnece	]=====
	difference = img2_small - img1_small

	#=====[ Step 4: erode -> dilate	]=====
	kernel = np.ones ((4, 4), np.uint8)
	difference_ed = cv2.dilate(cv2.erode (difference, kernel), kernel)

	#=====[ Step 5: blow back up	]=====
	return cv2.pyrUp (cv2.pyrUp (difference_ed))






####################################################################################################
##############################[ --- CORNER DETECTION/DESCRIPTION--- ]###############################
#################################################################################################### 
Example #15
Source File: CVAnalysis_old.py    From DE3-ROB1-CHESS with Creative Commons Attribution 4.0 International 5 votes vote down vote up
def get_image_diff (img1, img2):
	"""
		Function: get_image_diff
		------------------------
		given two images, this finds the eroded/dilated difference 
		between them on a coarse grain.
		NOTE: assumes both are full-size, color
	"""
	#=====[ Step 1: convert to gray	]=====
	img1_gray = cv2.cvtColor (img1, cv2.COLOR_BGR2GRAY)
	img2_gray = cv2.cvtColor (img2, cv2.COLOR_BGR2GRAY)	

	#=====[ Step 2: downsample 	]=====
	img1_small = cv2.pyrDown(cv2.pyrDown(img1_gray))
	img2_small = cv2.pyrDown(cv2.pyrDown(img2_gray))	

	#=====[ Step 3: find differnece	]=====
	difference = img2_small - img1_small

	#=====[ Step 4: erode -> dilate	]=====
	kernel = np.ones ((4, 4), np.uint8)
	difference_ed = cv2.dilate(cv2.erode (difference, kernel), kernel)

	#=====[ Step 5: blow back up	]=====
	return cv2.pyrUp (cv2.pyrUp (difference_ed))






####################################################################################################
##############################[ --- CORNER DETECTION/DESCRIPTION--- ]###############################
#################################################################################################### 
Example #16
Source File: util.py    From SPTM with MIT License 5 votes vote down vote up
def double_downsampling(input):
  return cv2.pyrDown(cv2.pyrDown(input)) 
Example #17
Source File: lappyr.py    From OpenCV-Python-Tutorial with MIT License 5 votes vote down vote up
def build_lappyr(img, leveln=6, dtype=np.int16):
    img = dtype(img)
    levels = []
    for i in xrange(leveln-1):
        next_img = cv2.pyrDown(img)
        img1 = cv2.pyrUp(next_img, dstsize=getsize(img))
        levels.append(img-img1)
        img = next_img
    levels.append(img)
    return levels 
Example #18
Source File: keyframes.py    From pyslam with MIT License 5 votes vote down vote up
def compute_image_pyramid(self, pyrimage):
        """Compute an image pyramid."""

        for pyrlevel in range(self.pyrlevels):
            if pyrlevel == 0:
                im_pyr = [pyrimage]
            else:
                im_pyr.append(cv2.pyrDown(im_pyr[-1]))

        self.im_pyr = [im.astype(float) / 255. for im in im_pyr] 
Example #19
Source File: Pyramids.py    From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def main():
    image = cv2.imread("../data/4.2.03.tiff", 1)

    first_layer_down = cv2.pyrDown(image)
    first_layer_up = cv2.pyrUp(first_layer_down)

    laplasian = cv2.subtract(image, first_layer_up)

    cv2.imshow("Orignal Image", image)
    cv2.imshow("Laplasian Image", laplasian)

    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example #20
Source File: normalized.py    From virtual-dressing-room with Apache License 2.0 5 votes vote down vote up
def getRGB(self,rgb):
        self.rgb=rgb
        #self.down=cv2.pyrDown(rgb)
        self.down[:,:,:]=self.rgb[:,:,:]
        
        #print  self.down.shape 
        #self.down.shape=(150,200) 
Example #21
Source File: pySaliencyMap.py    From pliers with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def FMCreateGaussianPyr(self, src):
        dst = list()
        dst.append(src)
        for i in range(1, 9):
            nowdst = cv2.pyrDown(dst[i-1])
            dst.append(nowdst)
        return dst
    # taking center-surround differences 
Example #22
Source File: multi_band_blending.py    From dual-fisheye-video-stitching with MIT License 5 votes vote down vote up
def LaplacianPyramid(img, leveln):
    LP = []
    for i in range(leveln - 1):
        next_img = cv2.pyrDown(img)
        LP.append(img - cv2.pyrUp(next_img, img.shape[1::-1]))
        img = next_img
    LP.append(img)
    return LP 
Example #23
Source File: multi_band_blending.py    From dual-fisheye-video-stitching with MIT License 5 votes vote down vote up
def GaussianPyramid(img, leveln):
    GP = [img]
    for i in range(leveln - 1):
        GP.append(cv2.pyrDown(GP[i]))
    return GP 
Example #24
Source File: blending.py    From dual-fisheye-video-stitching with MIT License 5 votes vote down vote up
def LaplacianPyramid(img, leveln):
    LP = []
    for i in range(leveln - 1):
        next_img = cv2.pyrDown(img)
        LP.append(img - cv2.pyrUp(next_img, img.shape[1::-1]))
        img = next_img
    LP.append(img)
    return LP 
Example #25
Source File: blending.py    From dual-fisheye-video-stitching with MIT License 5 votes vote down vote up
def GaussianPyramid(img, leveln):
    GP = [img]
    for i in range(leveln - 1):
        GP.append(cv2.pyrDown(GP[i]))
    return GP 
Example #26
Source File: TemplateMatchers.py    From lackey with MIT License 5 votes vote down vote up
def _build_pyramid(self, image, levels):
        """ Returns a list of reduced-size images, from smallest to original size """
        pyramid = [image]
        for l in range(levels-1):
            if any(x < 20 for x in pyramid[-1].shape[:2]):
                break
            pyramid.append(cv2.pyrDown(pyramid[-1]))
        return list(reversed(pyramid)) 
Example #27
Source File: FaceBlurring.py    From ImageProcessingProjects with MIT License 4 votes vote down vote up
def camshift_face_track():
    face_cascade = cv2.CascadeClassifier('Image_Lib/Face_Data/haarcascade_frontalface_default.xml')
    termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
    ALPHA = 0.5

    camera = cv2.VideoCapture(0)
    face_box = None

    #wait till first face box is available
    print "Waiting to get first face frame..."
    while face_box is None:
        grabbed, frame = camera.read()
        if not grabbed:
            raise EnvironmentError("Camera read failed!")
        image_prev = cv2.pyrDown(frame)
        face_box = utils.detect_face(face_cascade, image_prev)

    print "Face found!"
    prev_frames = image_prev.astype(np.float32)
    while (True):
        _, frame = camera.read()
        image_curr = cv2.pyrDown(frame)
        cv2.accumulateWeighted(image_curr, prev_frames, ALPHA)
        image_curr = cv2.convertScaleAbs(prev_frames)
        if face_box is not None:
            face_box = camshift_track(image_curr, face_box, termination)
            cv2.rectangle(image_curr, (face_box[0], face_box[1]), (face_box[0]+face_box[2], face_box[1] + face_box[3]),
                          (255, 0,0), 2)
            # cv2.rectangle(image_curr, (box[0], box[1]), (box[0]+box[2], box[1] + box[3]),
            #               (0, 0,255), 2)

        else:
            face_box = utils.detect_face(face_cascade, image_curr)

        cv2.imshow("Output", image_curr)
        key = cv2.waitKey(1)
        if key & 0xFF == ord('q'):
            break

        elif key & 0xFF == ord('r'):
            print "Reseting face detection!"
            face_box = None 
Example #28
Source File: LiveFourierTransform.py    From image-processing-from-scratch with MIT License 4 votes vote down vote up
def work_func(vCrop,hCrop,vc,imMin,figid,contrast):
    # read image
    rval = vc.grab()
    rval, im = vc.retrieve()
    im = np.array(im, dtype=float)

    # crop image
    im = im[vCrop[0]: vCrop[1], hCrop[0]: hCrop[1], :]

    # pyramid downscaling
    # im = cv2.pyrDown(im)

    # reduce dimensionality
    im = np.mean(im, axis=2, dtype=float)
    # make sure we have no zeros
    im = (im - im.min()) / (im.max() - im.min())
    im = np.maximum(im, imMin)
    Intensity = np.abs(np.fft.fftshift(np.fft.fft2(im))) ** 2

    Intensity += imMin

    # kill the center lines for higher dynamic range
    # by copying the next row/column
    # h, w = np.shape(Intensity)
    # Intensity[(h / 2 - 1):(h / 2 + 1), :] = Intensity[(h / 2 + 1):(h / 2 + 3), :]
    # Intensity[:, (w / 2 - 1):(w / 2 + 1)] = Intensity[:, (w / 2 + 1):(w / 2 + 3)]

    # running average of contrast
    ##circshift contrast matrix up
    contrast = contrast[np.arange(1, np.size(contrast, 0) + 1) % np.size(contrast, 0), :]
    ##replace bottom values with new values for minimum and maximum
    contrast[-1, :] = [np.min(Intensity), np.max(Intensity)]

    maxContrast = 1
    minContrast = 7   # to be modify
    # openCV draw
    vmin = np.log(contrast[:, 0].mean()) + minContrast
    vmax = np.log(contrast[:, 1].mean()) - maxContrast
    Intensity = (np.log(Intensity + imMin) - vmin) / (vmax - vmin)
    Intensity = Intensity.clip(0., 1.)
    # Intensity = (Intensity - Intensity.min()) / (Intensity.max() - Intensity.min())

    time.sleep(.01)
    cv2.imshow(figid, np.concatenate((im, Intensity), axis=1))

    cv2.waitKey(1)

    return contrast