Python imutils.face_utils.shape_to_np() Examples

The following are 10 code examples of imutils.face_utils.shape_to_np(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module imutils.face_utils , or try the search function .
Example #1
Source File: input_static_pic_to_gif2_for_class.py    From face-detection-induction-course with MIT License 6 votes vote down vote up
def orientation(self):
        """
        人脸定位
        :return:
        """
        faces = []
        for rect in self.rects:
            face = {}
            face_shades_width = rect.right() - rect.left()
            predictor_shape = self.predictor(self.img_gray, rect)
            face_shape = face_utils.shape_to_np(predictor_shape)

            face['cigarette'] = self.get_cigarette_info(face_shape, face_shades_width)
            face['glasses'] = self.get_glasses_info(face_shape, face_shades_width)

            faces.append(face)

        return faces 
Example #2
Source File: input_video_stream_paste_mask.py    From face-detection-induction-course with MIT License 6 votes vote down vote up
def orientation(self, rects, img_gray):
        """
        人脸定位
        :return:
        """
        faces = []
        for rect in rects:
            face = {}
            face_shades_width = rect.right() - rect.left()
            predictor_shape = self.predictor(img_gray, rect)
            face_shape = face_utils.shape_to_np(predictor_shape)
            face['cigarette'] = self.get_cigarette_info(face_shape, face_shades_width)
            face['glasses'] = self.get_glasses_info(face_shape, face_shades_width)

            faces.append(face)

        return faces 
Example #3
Source File: blur_face.py    From snapchat-filters-opencv with MIT License 6 votes vote down vote up
def blur(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = detector(gray, 0)

    mask = np.zeros(image.shape[:2], np.uint8)
    blurred_image = image.copy()
    for face in faces:  # if there are faces
        (x, y, w, h) = (face.left(), face.top(), face.width(), face.height())
        blurred_image[y : y + h, x : x + w, :] = anonymize_face_pixelate(
            blurred_image[y : y + h, x : x + w, :], blocks=10
        )
        # *** Facial Landmarks detection
        shape = predictor(gray, face)
        shape = face_utils.shape_to_np(shape)
        # Get mask with only face shape
        shape = cv2.convexHull(shape)
        cv2.drawContours(mask, [shape], -1, 255, -1)

        # Replace blurred image only in mask
        mask = mask / 255.0
        mask = np.expand_dims(mask, axis=-1)
        image = (1.0 - mask) * image + mask * blurred_image
        image = image.astype(np.uint8)

    return image 
Example #4
Source File: FaceRecognizer.py    From FaceRecognition with GNU General Public License v3.0 5 votes vote down vote up
def shape_to_np(shape, dtype="int"):
    # initialize (x, y) coordinates to zero
    coords = np.zeros((shape.num_parts, 2), dtype=dtype)

    # loop through 68 facial landmarks and convert them
    # to a 2-tuple of (x, y)- coordinates
    for i in range(0, shape.num_parts):
        coords[i] = (shape.part(i).x, shape.part(i).y)

    return coords

# construct the arguments

# if you want to pass arguments at the time of running code
# follow below code and format for running code 
Example #5
Source File: easy_facial_recognition.py    From easy_facial_recognition with MIT License 5 votes vote down vote up
def encode_face(image):
    face_locations = face_detector(image, 1)
    face_encodings_list = []
    landmarks_list = []
    for face_location in face_locations:
        # DETECT FACES
        shape = pose_predictor_68_point(image, face_location)
        face_encodings_list.append(np.array(face_encoder.compute_face_descriptor(image, shape, num_jitters=1)))
        # GET LANDMARKS
        shape = face_utils.shape_to_np(shape)
        landmarks_list.append(shape)
    face_locations = transform(image, face_locations)
    return face_encodings_list, face_locations, landmarks_list 
Example #6
Source File: rec-feat.py    From Facial-Recognition-using-Facenet with MIT License 5 votes vote down vote up
def recognize():
    database = initialize()
    cap = cv2.VideoCapture(0)
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
    while True:
        ret, img = cap.read()
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        subjects = detector(gray, 0)
        for subject in subjects:
            shape = predictor(gray, subject)
            shape = face_utils.shape_to_np(shape)  # converting to NumPy Array
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)
            ear = (leftEAR + rightEAR) / 2.0
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(img, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(img, [rightEyeHull], -1, (0, 255, 0), 1)
            extract_face_info(img, img_rgb, database,ear)
        cv2.imshow('Recognizing faces', img)
        if cv2.waitKey(1) == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows() 
Example #7
Source File: setup.py    From multimodal-vae-public with MIT License 5 votes vote down vote up
def build_mask_dataset(in_dir, out_dir, model_path):
    """Generate a dataset of segmentation masks from images.

    @param in_dir: string
                   input directory of images.
    @param out_dir: string
                    output directory of images.
    @param model_path: string
                       path to HOG model for facial features.
    """
    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(model_path)

    image_paths = os.listdir(in_dir)
    n_images = len(image_paths)
    for i, image_path in enumerate(image_paths):
        print('Building face-mask dataset: [%d/%d] images.' % (i + 1, n_images))
        image_full_path = os.path.join(in_dir, image_path)

        image = cv2.imread(image_full_path)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale image
        rects = detector(gray, 1)
        try:
            rect = rects[0]  # we are only going to use the first one

            # determine the facial landmarks for the face region, then
            # convert the landmark (x, y)-coordinates to a NumPy array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)
            output = visualize_facial_landmarks(image, shape)
            cv2.imwrite(os.path.join(out_dir, image_path), output)
        except:
            # if for some reason no bounding box is found, send blank.
            output = np.ones_like(image) * 255
            cv2.imwrite(os.path.join(out_dir, image_path), output) 
Example #8
Source File: detect.py    From Animoji-Animate with MIT License 5 votes vote down vote up
def get_marks(gray, face_coord):
    """
    Arguments:
        gray: grayscale image array
        face_coord: rectangle coordinates for one face
    Returns:
        array of coordinates of facial landmarks scaled to range [-0.5, 0.5]
    """
    shape = face_marks(gray, face_coord)
    shape = face_utils.shape_to_np(shape).astype(float)
    shape[:,0] = (shape[:,0] - face_coord.left())/face_coord.width()-0.5
    shape[:,1] = (shape[:,1] - face_coord.top())/face_coord.height()-0.5
    return shape 
Example #9
Source File: face_utilities.py    From Heart-rate-measurement-using-camera with Apache License 2.0 5 votes vote down vote up
def get_landmarks(self, frame, type):
        '''
        Get all facial landmarks in a face 
        
        Args:
            frame (cv2 image): the original frame. In RGB format.
            type (str): 5 or 68 facial landmarks
        
        Outputs:
            shape (array): facial landmarks' co-ords in format of of tuples (x,y)
        '''
        if self.predictor is None:
            print("[INFO] load " + type + " facial landmarks model ...")
            self.predictor = dlib.shape_predictor("../shape_predictor_" + type + "_face_landmarks.dat")
            print("[INFO] Load model - DONE!")
        
        if frame is None:
            return None, None
        # all face will be resized to a fix size, e.g width = 200
        #face = imutils.resize(face, width=200)
        # face must be gray
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rects = self.face_detection(frame)
        
        if len(rects)<0 or len(rects)==0:
            return None, None
            
        shape = self.predictor(gray, rects[0])
        shape = face_utils.shape_to_np(shape)
        
        # in shape, there are 68 pairs of (x, y) carrying coords of 68 points.
        # to draw landmarks, use: for (x, y) in shape: cv2.circle(image, (x, y), 1, (0, 0, 255), -1)
        
        return shape, rects 
Example #10
Source File: FeatureExtractor.py    From adviser with GNU General Public License v3.0 4 votes vote down vote up
def extract_fl_features(self, video_input, user_acts):
        """TODO

        Returns:
            dict: TODO
        """
        def _distance(a, b):
            return np.linalg.norm(a-b)
        print(f'VIDEO FEATURE ENTER, len(video_input): {len(video_input)}')
        features = []
        aggregated_feats = None
        for frame in video_input[::2]:
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
            frame = self.CLAHE.apply(frame)
            faces = self.DETECTOR(frame, 1)
            if len(faces) > 0:  # at least one face detected
                landmarks = self.PREDICTOR(frame, faces[0])
                landmarks = face_utils.shape_to_np(landmarks)
                norm_left_eye = _distance(landmarks[21], landmarks[39])
                norm_right_eye = _distance(landmarks[22], landmarks[42])
                norm_lips = _distance(landmarks[33], landmarks[52])
                eyebrow_left = sum(
                    [(_distance(landmarks[39], landmarks[i]) / norm_left_eye)
                        for i in [18, 19, 20, 21]]
                )
                eyebrow_right = sum(
                    [(_distance(landmarks[42], landmarks[i]) / norm_right_eye)
                        for i in [22, 23, 24, 25]]
                )
                lip_left = sum(
                    [(_distance(landmarks[33], landmarks[i]) / norm_lips)
                        for i in [48, 49, 50]]
                )
                lip_right = sum(
                    [(_distance(landmarks[33], landmarks[i]) / norm_lips)
                        for i in [52, 53, 54]]
                )
                mouth_width = _distance(landmarks[48], landmarks[54])
                mouth_height = _distance(landmarks[51], landmarks[57])
                features.append(np.array([
                    eyebrow_left,
                    eyebrow_right,
                    lip_left,
                    lip_right,
                    mouth_width,
                    mouth_height
                ]))

        # aggregate features across frames
        if len(features) > 0:
            mean = np.mean(features, axis=0)
            mini = np.amin(features, axis=0)
            maxi = np.amax(features, axis=0)
            std = np.std(features, axis=0)
            perc25 = np.percentile(features, q=25, axis=0)
            perc75 = np.percentile(features, q=75, axis=0)

            aggregated_feats = np.array([mean, mini, maxi, std, perc25, perc75]).reshape(1, 36)

        print("VIDEO FEAT PUB")
        return {'fl_features': aggregated_feats}