Python cv2.convexHull() Examples

The following are 30 code examples of cv2.convexHull(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: chapter2.py    From OpenCV-Computer-Vision-Projects-with-Python with MIT License 19 votes vote down vote up
def FindHullDefects(self, segment):
        _,contours,hierarchy = cv2.findContours(segment, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        # find largest area contour
        max_area = -1
        for i in range(len(contours)):
            area = cv2.contourArea(contours[i])
            if area>max_area:
                cnt = contours[i]
                max_area = area

        cnt = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
        hull = cv2.convexHull(cnt, returnPoints=False)
        defects = cv2.convexityDefects(cnt, hull)

        return [cnt,defects] 
Example #2
Source File: new.py    From Fingers-Detection-using-OpenCV-and-Python with MIT License 7 votes vote down vote up
def calculateFingers(res,drawing):  # -> finished bool, cnt: finger count
    #  convexity defect
    hull = cv2.convexHull(res, returnPoints=False)
    if len(hull) > 3:
        defects = cv2.convexityDefects(res, hull)
        if type(defects) != type(None):  # avoid crashing.   (BUG not found)

            cnt = 0
            for i in range(defects.shape[0]):  # calculate the angle
                s, e, f, d = defects[i][0]
                start = tuple(res[s][0])
                end = tuple(res[e][0])
                far = tuple(res[f][0])
                a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
                b = math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)
                c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)
                angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))  # cosine theorem
                if angle <= math.pi / 2:  # angle less than 90 degree, treat as fingers
                    cnt += 1
                    cv2.circle(drawing, far, 8, [211, 84, 0], -1)
            return True, cnt
    return False, 0


# Camera 
Example #3
Source File: Ring_Detector.py    From Precland with GNU General Public License v3.0 7 votes vote down vote up
def fit_circle(self, contour, eccentricity, area_ratio,min_radius = 0, max_radius = 500000):
		#convert to convex hull
		hull = cv2.convexHull(contour)
		min_area = math.pi * min_radius * min_radius
		max_area = math.pi * max_radius * max_radius
		c_area = cv2.contourArea(hull)

		#check for a shape of a certain size and corner resolution
		if len(hull) > 4:

			#fit an ellipse
			ellipse = cv2.fitEllipse(hull)
			radius = int((ellipse[1][0] + ellipse[1][0]) /4.0)
			#check for a circular ellipse
			if ellipse[1][0] * 1.0/ ellipse[1][1] > eccentricity and max_radius > radius > min_radius:
				#compare area of raw hull vs area of ellipse to ellinate objects with corners
				e_area = (ellipse[1][0]/2.0) * (ellipse[1][1]/2.0) * math.pi
				if (c_area / e_area) > area_ratio:
					center = Point(int(ellipse[0][0]), int(ellipse[0][1]))
					radius = int((ellipse[1][0] + ellipse[1][0]) /4.0) #average  and diameter -> radius
					return Circle(center,radius,contour,ellipse)
		return None 
Example #4
Source File: FingerDetection.py    From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License 7 votes vote down vote up
def manage_image_opr(frame, hand_hist):
    hist_mask_image = hist_masking(frame, hand_hist)

    hist_mask_image = cv2.erode(hist_mask_image, None, iterations=2)
    hist_mask_image = cv2.dilate(hist_mask_image, None, iterations=2)

    contour_list = contours(hist_mask_image)
    max_cont = max(contour_list, key=cv2.contourArea)

    cnt_centroid = centroid(max_cont)
    cv2.circle(frame, cnt_centroid, 5, [255, 0, 255], -1)

    if max_cont is not None:
        hull = cv2.convexHull(max_cont, returnPoints=False)
        defects = cv2.convexityDefects(max_cont, hull)
        far_point = farthest_point(defects, max_cont, cnt_centroid)
        print("Centroid : " + str(cnt_centroid) + ", farthest Point : " + str(far_point))
        cv2.circle(frame, far_point, 5, [0, 0, 255], -1)
        if len(traverse_point) < 20:
            traverse_point.append(far_point)
        else:
            traverse_point.pop(0)
            traverse_point.append(far_point)

        draw_circles(frame, traverse_point) 
Example #5
Source File: read_human36m.py    From human_dynamics with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def crop_image(silhs):
    res = np.asarray(silhs).any(axis=0)
    cnts, hier = cv2.findContours(
        np.uint8(res) * 255, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    """
    checks = []
    for cnt in cnts:
        kk = np.zeros((1000, 1000, 3), dtype=np.uint8)
        hull = cv2.convexHull(cnt)
        cv2.drawContours(kk, [cnt], 0, (255,255,255), -1)
        checks.append(kk)
    """

    max_id = 0
    max_length = len(cnts[0])
    for i in range(1, len(cnts)):
        if len(cnts[i]) > max_length:
            max_id = i
            max_length = len(cnts[i])

    (x, y, w, h) = cv2.boundingRect(cnts[max_id])
    return (x, y, w, h) 
Example #6
Source File: mesh.py    From plumo with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def fill_convex (image):
    H, W = image.shape
    padded = np.zeros((H+20, W+20), dtype=np.uint8)
    padded[10:(10+H),10:(10+W)] = image

    contours = measure.find_contours(padded, 0.5)
    if len(contours) == 0:
        return image
    if len(contours) == 1:
        contour = contours[0]
    else:
        contour = np.vstack(contours)
    cc = np.zeros_like(contour, dtype=np.int32)
    cc[:,0] = contour[:, 1]
    cc[:,1] = contour[:, 0]
    hull = cv2.convexHull(cc)
    contour = hull.reshape((1, -1, 2)) 
    cv2.fillPoly(padded, contour, 1)
    return padded[10:(10+H),10:(10+W)] 
Example #7
Source File: ImageProcessing.py    From FaceSwap with MIT License 6 votes vote down vote up
def blendImages(src, dst, mask, featherAmount=0.2):
    #indeksy nie czarnych pikseli maski
    maskIndices = np.where(mask != 0)
    #te same indeksy tylko, ze teraz w jednej macierzy, gdzie kazdy wiersz to jeden piksel (x, y)
    maskPts = np.hstack((maskIndices[1][:, np.newaxis], maskIndices[0][:, np.newaxis]))
    faceSize = np.max(maskPts, axis=0) - np.min(maskPts, axis=0)
    featherAmount = featherAmount * np.max(faceSize)

    hull = cv2.convexHull(maskPts)
    dists = np.zeros(maskPts.shape[0])
    for i in range(maskPts.shape[0]):
        dists[i] = cv2.pointPolygonTest(hull, (maskPts[i, 0], maskPts[i, 1]), True)

    weights = np.clip(dists / featherAmount, 0, 1)

    composedImg = np.copy(dst)
    composedImg[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * src[maskIndices[0], maskIndices[1]] + (1 - weights[:, np.newaxis]) * dst[maskIndices[0], maskIndices[1]]

    return composedImg

#uwaga, tutaj src to obraz, z ktorego brany bedzie kolor 
Example #8
Source File: mesh.py    From plumo with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def fill_convex (image):
    H, W = image.shape
    padded = np.zeros((H+20, W+20), dtype=np.uint8)
    padded[10:(10+H),10:(10+W)] = image

    contours = measure.find_contours(padded, 0.5)
    if len(contours) == 0:
        return image
    if len(contours) == 1:
        contour = contours[0]
    else:
        contour = np.vstack(contours)
    cc = np.zeros_like(contour, dtype=np.int32)
    cc[:,0] = contour[:, 1]
    cc[:,1] = contour[:, 0]
    hull = cv2.convexHull(cc)
    contour = hull.reshape((1, -1, 2)) 
    cv2.fillPoly(padded, contour, 1)
    return padded[10:(10+H),10:(10+W)] 
Example #9
Source File: generate_sub_final_ensemble.py    From kaggle_carvana_segmentation with MIT License 6 votes vote down vote up
def check_if_top_is_unreliable(mean_pred, albu_pred):
    unreliable = np.zeros_like(albu_pred)
    rows, cols = unreliable.shape
    unreliable[(albu_pred > 30) & (albu_pred < 210)] = 255
    unreliable = cv2.erode(unreliable, (55, 55), iterations=10)
    unreliable = unreliable[0:rows // 2, ...]
    biggest = biggest_contour(unreliable)
    if biggest is None:
        return None
    if cv2.contourArea(biggest) > 40000:
        x, y, w, h = cv2.boundingRect(biggest)
        x, y, w, h = max(x - 50, 0), y - 50, w + 100, h + 100
        mask = (albu_pred > 55).astype(np.uint8) * 255
        c = biggest_contour(mask[y:y + h, x:x + w])
        c = cv2.convexHull(c)
        mask[y:y + h, x:x + w] = cv2.drawContours(mask[y:y + h, x:x + w], [c], -1, 255, -1)
        result = (mean_pred > 127).astype(np.uint8) * 255
        result[y:y + h, x:x + w] = mask[y:y + h, x:x + w]
        return result
    return None 
Example #10
Source File: ImageProcessing.py    From DAMDNet with Apache License 2.0 6 votes vote down vote up
def blendImages(src, dst, mask, featherAmount=0.2):
    #indeksy nie czarnych pikseli maski
    maskIndices = np.where(mask != 0)
    #te same indeksy tylko, ze teraz w jednej macierzy, gdzie kazdy wiersz to jeden piksel (x, y)
    maskPts = np.hstack((maskIndices[1][:, np.newaxis], maskIndices[0][:, np.newaxis]))
    faceSize = np.max(maskPts, axis=0) - np.min(maskPts, axis=0)
    featherAmount = featherAmount * np.max(faceSize)

    hull = cv2.convexHull(maskPts)
    dists = np.zeros(maskPts.shape[0])
    for i in range(maskPts.shape[0]):
        dists[i] = cv2.pointPolygonTest(hull, (maskPts[i, 0], maskPts[i, 1]), True)

    weights = np.clip(dists / featherAmount, 0, 1)

    composedImg = np.copy(dst)
    composedImg[maskIndices[0], maskIndices[1]] = weights[:, np.newaxis] * src[maskIndices[0], maskIndices[1]] + (1 - weights[:, np.newaxis]) * dst[maskIndices[0], maskIndices[1]]

    return composedImg

#uwaga, tutaj src to obraz, z ktorego brany bedzie kolor 
Example #11
Source File: LandmarksProcessor.py    From DeepFaceLab with GNU General Public License v3.0 6 votes vote down vote up
def get_image_hull_mask (image_shape, image_landmarks, eyebrows_expand_mod=1.0 ):
    hull_mask = np.zeros(image_shape[0:2]+(1,),dtype=np.float32)

    lmrks = expand_eyebrows(image_landmarks, eyebrows_expand_mod)

    r_jaw = (lmrks[0:9], lmrks[17:18])
    l_jaw = (lmrks[8:17], lmrks[26:27])
    r_cheek = (lmrks[17:20], lmrks[8:9])
    l_cheek = (lmrks[24:27], lmrks[8:9])
    nose_ridge = (lmrks[19:25], lmrks[8:9],)
    r_eye = (lmrks[17:22], lmrks[27:28], lmrks[31:36], lmrks[8:9])
    l_eye = (lmrks[22:27], lmrks[27:28], lmrks[31:36], lmrks[8:9])
    nose = (lmrks[27:31], lmrks[31:36])
    parts = [r_jaw, l_jaw, r_cheek, l_cheek, nose_ridge, r_eye, l_eye, nose]

    for item in parts:
        merged = np.concatenate(item)
        cv2.fillConvexPoly(hull_mask, cv2.convexHull(merged), (1,) )

    return hull_mask 
Example #12
Source File: LandmarksProcessor.py    From DeepFaceLab with GNU General Public License v3.0 6 votes vote down vote up
def get_image_eye_mask (image_shape, image_landmarks):
    if len(image_landmarks) != 68:
        raise Exception('get_image_eye_mask works only with 68 landmarks')

    h,w,c = image_shape

    hull_mask = np.zeros( (h,w,1),dtype=np.float32)

    image_landmarks = image_landmarks.astype(np.int)

    cv2.fillConvexPoly( hull_mask, cv2.convexHull( image_landmarks[36:42]), (1,) )
    cv2.fillConvexPoly( hull_mask, cv2.convexHull( image_landmarks[42:48]), (1,) )

    dilate = h // 32
    hull_mask = cv2.dilate(hull_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(dilate,dilate)), iterations = 1 )

    blur = h // 16
    blur = blur + (1-blur % 2)
    hull_mask = cv2.GaussianBlur(hull_mask, (blur, blur) , 0)
    hull_mask = hull_mask[...,None]

    return hull_mask 
Example #13
Source File: AIMakeup.py    From AIMakeup with Apache License 2.0 6 votes vote down vote up
def get_forehead_landmark(self,im_bgr,face_landmark,mask_organs,mask_nose):
        '''
        计算额头坐标
        '''
        #画椭圆
        radius=(np.linalg.norm(face_landmark[0]-face_landmark[16])/2).astype('int32')
        center_abs=tuple(((face_landmark[0]+face_landmark[16])/2).astype('int32'))
        
        angle=np.degrees(np.arctan((lambda l:l[1]/l[0])(face_landmark[16]-face_landmark[0]))).astype('int32')
        mask=np.zeros(mask_organs.shape[:2], dtype=np.float64)
        cv2.ellipse(mask,center_abs,(radius,radius),angle,180,360,1,-1)
        #剔除与五官重合部分
        mask[mask_organs[:,:,0]>0]=0
        #根据鼻子的肤色判断真正的额头面积
        index_bool=[]
        for ch in range(3):
            mean,std=np.mean(im_bgr[:,:,ch][mask_nose[:,:,ch]>0]),np.std(im_bgr[:,:,ch][mask_nose[:,:,ch]>0])
            up,down=mean+0.5*std,mean-0.5*std
            index_bool.append((im_bgr[:,:,ch]<down)|(im_bgr[:,:,ch]>up))
        index_zero=((mask>0)&index_bool[0]&index_bool[1]&index_bool[2])
        mask[index_zero]=0
        index_abs=np.array(np.where(mask>0)[::-1]).transpose()
        landmark=cv2.convexHull(index_abs).squeeze()
        return landmark 
Example #14
Source File: blur_face.py    From snapchat-filters-opencv with MIT License 6 votes vote down vote up
def blur(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = detector(gray, 0)

    mask = np.zeros(image.shape[:2], np.uint8)
    blurred_image = image.copy()
    for face in faces:  # if there are faces
        (x, y, w, h) = (face.left(), face.top(), face.width(), face.height())
        blurred_image[y : y + h, x : x + w, :] = anonymize_face_pixelate(
            blurred_image[y : y + h, x : x + w, :], blocks=10
        )
        # *** Facial Landmarks detection
        shape = predictor(gray, face)
        shape = face_utils.shape_to_np(shape)
        # Get mask with only face shape
        shape = cv2.convexHull(shape)
        cv2.drawContours(mask, [shape], -1, 255, -1)

        # Replace blurred image only in mask
        mask = mask / 255.0
        mask = np.expand_dims(mask, axis=-1)
        image = (1.0 - mask) * image + mask * blurred_image
        image = image.astype(np.uint8)

    return image 
Example #15
Source File: find_arrows.py    From PyCV-time with MIT License 6 votes vote down vote up
def isArrow(heptagon):
    hull = cv2.convexHull(heptagon, returnPoints = False)

    if len(hull) > 2:
        defects = cv2.convexityDefects(heptagon, hull)
        if defects is None or len(defects) != 2: 
            return False
      
        farpoints = [d[0][2] for d in defects]    
        if not np.abs(farpoints[0] - farpoints[1]) in [3, 4]:
            return False

        for defect in defects:
            s, e, f, d = defect[0]
            #    print defects
            #    s, e, f, d = defect[0]
            ps = heptagon[s, 0]
            pe = heptagon[e, 0]
            pd = heptagon[f, 0]
            if angle(ps, pd, pe) < 120:
                return True    

        return False 
Example #16
Source File: data_preprocessing_autoencoder.py    From AVSR-Deep-Speech with GNU General Public License v2.0 6 votes vote down vote up
def visualize(frame, coordinates_list, alpha = 0.80, color=[255, 255, 255]):
	"""
	Args:
		1. frame:				OpenCV's image which has to be visualized.
		2. coordinates_list:	List of coordinates which will be visualized in the given `frame`
		3. alpha, color:		Some parameters which help in visualizing properly. 
								A convex hull will be shown for each element in the `coordinates_list` 
	"""
	layer = frame.copy()
	output = frame.copy()

	for coordinates in coordinates_list:
		c_hull = cv2.convexHull(coordinates)
		cv2.drawContours(layer, [c_hull], -1, color, -1)

	cv2.addWeighted(layer, alpha, output, 1 - alpha, 0, output)
	cv2.imshow("Output", output) 
Example #17
Source File: losses_win.py    From R3Det_Tensorflow with MIT License 6 votes vote down vote up
def iou_rotate_calculate2(boxes1, boxes2):
    ious = []
    if boxes1.shape[0] != 0:
        area1 = boxes1[:, 2] * boxes1[:, 3]
        area2 = boxes2[:, 2] * boxes2[:, 3]

        for i in range(boxes1.shape[0]):
            temp_ious = []
            r1 = ((boxes1[i][0], boxes1[i][1]), (boxes1[i][2], boxes1[i][3]), boxes1[i][4])
            r2 = ((boxes2[i][0], boxes2[i][1]), (boxes2[i][2], boxes2[i][3]), boxes2[i][4])

            int_pts = cv2.rotatedRectangleIntersection(r1, r2)[1]
            if int_pts is not None:
                order_pts = cv2.convexHull(int_pts, returnPoints=True)

                int_area = cv2.contourArea(order_pts)

                inter = int_area * 1.0 / (area1[i] + area2[i] - int_area)
                temp_ious.append(inter)
            else:
                temp_ious.append(0.0)
            ious.append(temp_ious)

    return np.array(ious, dtype=np.float32) 
Example #18
Source File: whale176-rectangle.py    From PyCV-time with MIT License 6 votes vote down vote up
def isArrow(heptagon):
    hull = cv2.convexHull(heptagon, returnPoints=False)

    if len(hull) > 2:
        defects = cv2.convexityDefects(heptagon, hull)
        if defects is None or len(defects) != 2:
            return False

        farpoints = [d[0][2] for d in defects]
        if not np.abs(farpoints[0] - farpoints[1]) in [3, 4]:
            return False

        for defect in defects:
            s, e, f, d = defect[0]
            #    print defects
            #    s, e, f, d = defect[0]
            ps = heptagon[s, 0]
            pe = heptagon[e, 0]
            pd = heptagon[f, 0]
            if angle(ps, pd, pe) < 120:
                return True

        return False 
Example #19
Source File: morpher.py    From face_merge_master with Apache License 2.0 6 votes vote down vote up
def merge_img(src_img, dst_img, dst_matrix, dst_points, k_size=None, mat_multiple=None):
    face_mask = np.zeros(src_img.shape, dtype=src_img.dtype)

    for group in core.OVERLAY_POINTS:
        cv2.fillConvexPoly(face_mask, cv2.convexHull(dst_matrix[group]), (255, 255, 255))

    r = cv2.boundingRect(np.float32([dst_points[:core.FACE_END]]))

    center = (r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))

    if mat_multiple:
        mat = cv2.getRotationMatrix2D(center, 0, mat_multiple)
        face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))

    if k_size:
        face_mask = cv2.blur(face_mask, k_size, center)

    return cv2.seamlessClone(np.uint8(dst_img), src_img, face_mask, center, cv2.NORMAL_CLONE) 
Example #20
Source File: whale176-5angle.py    From PyCV-time with MIT License 6 votes vote down vote up
def isArrow(heptagon):
    hull = cv2.convexHull(heptagon, returnPoints=False)

    if len(hull) > 2:
        defects = cv2.convexityDefects(heptagon, hull)
        if defects is None or len(defects) != 2:
            return False

        farpoints = [d[0][2] for d in defects]
        if not np.abs(farpoints[0] - farpoints[1]) in [3, 4]:
            return False

        for defect in defects:
            s, e, f, d = defect[0]
            #    print defects
            #    s, e, f, d = defect[0]
            ps = heptagon[s, 0]
            pe = heptagon[e, 0]
            pd = heptagon[f, 0]
            if angle(ps, pd, pe) < 120:
                return True

        return False 
Example #21
Source File: whale176-circle.py    From PyCV-time with MIT License 6 votes vote down vote up
def isArrow(heptagon):
    hull = cv2.convexHull(heptagon, returnPoints=False)

    if len(hull) > 2:
        defects = cv2.convexityDefects(heptagon, hull)
        if defects is None or len(defects) != 2:
            return False

        farpoints = [d[0][2] for d in defects]
        if not np.abs(farpoints[0] - farpoints[1]) in [3, 4]:
            return False

        for defect in defects:
            s, e, f, d = defect[0]
            #    print defects
            #    s, e, f, d = defect[0]
            ps = heptagon[s, 0]
            pe = heptagon[e, 0]
            pd = heptagon[f, 0]
            if angle(ps, pd, pe) < 120:
                return True

        return False 
Example #22
Source File: whale176-triangle.py    From PyCV-time with MIT License 6 votes vote down vote up
def isArrow(heptagon):
    hull = cv2.convexHull(heptagon, returnPoints=False)

    if len(hull) > 2:
        defects = cv2.convexityDefects(heptagon, hull)
        if defects is None or len(defects) != 2:
            return False

        farpoints = [d[0][2] for d in defects]
        if not np.abs(farpoints[0] - farpoints[1]) in [3, 4]:
            return False

        for defect in defects:
            s, e, f, d = defect[0]
            #    print defects
            #    s, e, f, d = defect[0]
            ps = heptagon[s, 0]
            pe = heptagon[e, 0]
            pd = heptagon[f, 0]
            if angle(ps, pd, pe) < 120:
                return True

        return False 
Example #23
Source File: morpher.py    From yry with Apache License 2.0 6 votes vote down vote up
def merge_img(src_img, dst_img, dst_matrix, dst_points, blur_detail_x=None, blur_detail_y=None, mat_multiple=None):
    face_mask = np.zeros(src_img.shape, dtype=src_img.dtype)

    for group in core.OVERLAY_POINTS:
        cv2.fillConvexPoly(face_mask, cv2.convexHull(dst_matrix[group]), (255, 255, 255))

    r = cv2.boundingRect(np.float32([dst_points[:core.FACE_END]]))

    center = (r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))

    if mat_multiple:
        mat = cv2.getRotationMatrix2D(center, 0, mat_multiple)
        face_mask = cv2.warpAffine(face_mask, mat, (face_mask.shape[1], face_mask.shape[0]))

    if blur_detail_x and blur_detail_y:
        face_mask = cv2.blur(face_mask, (blur_detail_x, blur_detail_y), center)

    return cv2.seamlessClone(np.uint8(dst_img), src_img, face_mask, center, cv2.NORMAL_CLONE) 
Example #24
Source File: aligner.py    From faceswap with GNU General Public License v3.0 5 votes vote down vote up
def get_feature_mask(aligned_landmarks_68, size, padding=0, dilation=30):
        """ Return the face feature mask """
        logger.trace("aligned_landmarks_68: %s, size: %s, padding: %s, dilation: %s",
                     aligned_landmarks_68, size, padding, dilation)
        scale = size - 2 * padding
        translation = padding
        pad_mat = np.matrix([[scale, 0.0, translation], [0.0, scale, translation]])
        aligned_landmarks_68 = np.expand_dims(aligned_landmarks_68, axis=1)
        aligned_landmarks_68 = cv2.transform(aligned_landmarks_68,
                                             pad_mat,
                                             aligned_landmarks_68.shape)
        aligned_landmarks_68 = np.squeeze(aligned_landmarks_68)
        l_eye_points = aligned_landmarks_68[42:48].tolist()
        l_brow_points = aligned_landmarks_68[22:27].tolist()
        r_eye_points = aligned_landmarks_68[36:42].tolist()
        r_brow_points = aligned_landmarks_68[17:22].tolist()
        nose_points = aligned_landmarks_68[27:36].tolist()
        chin_points = aligned_landmarks_68[8:11].tolist()
        mouth_points = aligned_landmarks_68[48:68].tolist()
        # TODO remove excessive reshapes and flattens

        l_eye = np.array(l_eye_points + l_brow_points).reshape((-1, 2)).astype('int32').flatten()
        r_eye = np.array(r_eye_points + r_brow_points).reshape((-1, 2)).astype('int32').flatten()
        mouth = np.array(mouth_points + nose_points + chin_points)
        mouth = mouth.reshape((-1, 2)).astype('int32').flatten()
        l_eye_hull = cv2.convexHull(l_eye.reshape((-1, 2)))
        r_eye_hull = cv2.convexHull(r_eye.reshape((-1, 2)))
        mouth_hull = cv2.convexHull(mouth.reshape((-1, 2)))

        mask = np.zeros((size, size, 3), dtype=float)
        cv2.fillConvexPoly(mask, l_eye_hull, (1, 1, 1))
        cv2.fillConvexPoly(mask, r_eye_hull, (1, 1, 1))
        cv2.fillConvexPoly(mask, mouth_hull, (1, 1, 1))

        if dilation > 0:
            kernel = np.ones((dilation, dilation), np.uint8)
            mask = cv2.dilate(mask, kernel, iterations=1)

        logger.trace("Returning: %s", mask)
        return mask 
Example #25
Source File: extended.py    From faceswap with GNU General Public License v3.0 5 votes vote down vote up
def predict(self, batch):
        """ Run model to get predictions """
        for mask, face in zip(batch["feed"], batch["detected_faces"]):
            parts = self.parse_parts(np.array(face.feed_landmarks))
            for item in parts:
                item = np.rint(np.concatenate(item)).astype("int32")
                hull = cv2.convexHull(item)
                cv2.fillConvexPoly(mask, hull, 1.0, lineType=cv2.LINE_AA)
        batch["prediction"] = batch["feed"]
        return batch 
Example #26
Source File: faceswapper.py    From FaceSwapper with Apache License 2.0 5 votes vote down vote up
def draw_convex_hull(self,im, points, color):
        '''
        勾画多凸边形
        '''
        points = cv2.convexHull(points)
        cv2.fillConvexPoly(im, points, color=color) 
Example #27
Source File: setup.py    From multimodal-vae-public with MIT License 5 votes vote down vote up
def visualize_facial_landmarks(image, shape, colors=None):
    # create two copies of the input image -- one for the
    # overlay and one for the final output image
    overlay = np.ones_like(image) * 255

    # loop over the facial landmark regions individually
    for (i, name) in enumerate(FACIAL_LANDMARKS_IDXS.keys()):
        # grab the (x, y)-coordinates associated with the
        # face landmark
        (j, k) = FACIAL_LANDMARKS_IDXS[name]
        pts = shape[j:k]
 
        # check if are supposed to draw the jawline
        if name == "jaw":
            # since the jawline is a non-enclosed facial region,
            # just draw lines between the (x, y)-coordinates
            for l in range(1, len(pts)):
                ptA = tuple(pts[l - 1])
                ptB = tuple(pts[l])
                cv2.line(overlay, ptA, ptB, (0, 0, 0), 2)
 
        # otherwise, compute the convex hull of the facial
        # landmark coordinates points and display it
        else:
            hull = cv2.convexHull(pts)
            cv2.drawContours(overlay, [hull], -1, (0, 0, 0), -1)

    return overlay 
Example #28
Source File: components.py    From faceswap with GNU General Public License v3.0 5 votes vote down vote up
def predict(self, batch):
        """ Run model to get predictions """
        for mask, face in zip(batch["feed"], batch["detected_faces"]):
            parts = self.parse_parts(np.array(face.feed_landmarks))
            for item in parts:
                item = np.rint(np.concatenate(item)).astype("int32")
                hull = cv2.convexHull(item)
                cv2.fillConvexPoly(mask, hull, 1.0, lineType=cv2.LINE_AA)
        batch["prediction"] = batch["feed"]
        return batch 
Example #29
Source File: monkey.py    From ATX with Apache License 2.0 5 votes vote down vote up
def do_touch(self):
        width, height = 1080, 1920

        screen = self.device.screenshot_cv2()
        h, w = screen.shape[:2]
        img = cv2.resize(screen, (w/2, h/2))
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        edges = cv2.Canny(gray, 80, 200)
        _, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_OTSU)
        contours, _ = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
        contours.sort(key=lambda cnt: len(cnt), reverse=True)

        rects = []
        for cnt in contours:
            hull = cv2.convexHull(cnt)
            hull_area = cv2.contourArea(hull)
            x,y,w,h = cv2.boundingRect(cnt)
            rect_area = float(w*h)
            if w<20 or h<20 or rect_area<100:
                continue
            if hull_area/rect_area < 0.50:
                continue
            rects.append((x, y, x+w, y+h))
            cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)

        if not rects:
            x, y = randint(1, width), randint(1, height)
        else:
            x1, y1, x2, y2 = choice(rects)
            x, y = randint(x1, x2), randint(y1, y2)
            cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)

        x, y = self.device.screen2touch(x*2, y*2)
        self.device.touch(x, y)
        cv2.imshow('img', img)
        cv2.waitKey(1) 
Example #30
Source File: landmarks.py    From photo-a-day-aligner with MIT License 5 votes vote down vote up
def draw_convex_hull(im, points, color):
    points = cv2.convexHull(points)
    cv2.fillConvexPoly(im, points, color=color)