Python cv2.CASCADE_SCALE_IMAGE Examples

The following are 24 code examples of cv2.CASCADE_SCALE_IMAGE(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: face_detection.py    From Neural-Network-Projects-with-Python with MIT License 7 votes vote down vote up
def detect_faces(img, draw_box=True):
	# convert image to grayscale
	grayscale_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

	# detect faces
	faces = face_cascade.detectMultiScale(grayscale_img, scaleFactor=1.1,
		minNeighbors=5,
        minSize=(30, 30),
        flags=cv2.CASCADE_SCALE_IMAGE)
	
	face_box, face_coords = None, []

	for (x, y, w, h) in faces:
		if draw_box:
			cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 5)
		face_box = img[y:y+h, x:x+w]
		face_coords = [x,y,w,h]

	return img, face_box, face_coords 
Example #2
Source File: camera.py    From live-stream-face-detection with MIT License 6 votes vote down vote up
def camera_stream():
     # Capture frame-by-frame
    ret, frame = video_capture.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.1,
        minNeighbors=5,
        minSize=(30, 30),
        flags=cv2.CASCADE_SCALE_IMAGE
    )

    # Draw a rectangle around the faces
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

    # Display the resulting frame in browser
    return cv2.imencode('.jpg', frame)[1].tobytes() 
Example #3
Source File: dog_filter.py    From OpenCV-Snapchat-DogFilter with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def detect(img, cascade):
    rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(
        30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
    if len(rects) == 0:
        return []
    rects[:, 2:] += rects[:, :2]
    return rects 
Example #4
Source File: Train Classifier and Test Video Feed.py    From Emotion-Recognition-Using-SVMs with MIT License 5 votes vote down vote up
def detectFaces(frame):
    cascPath = "../data/haarcascade_frontalface_default.xml"
    faceCascade = cv2.CascadeClassifier(cascPath)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    detected_faces = faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=6,
            minSize=(50, 50),
            flags=cv2.CASCADE_SCALE_IMAGE)
    return gray, detected_faces 
Example #5
Source File: facedetect.py    From PyCV-time with MIT License 5 votes vote down vote up
def detect(img, cascade):
    rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv2.CASCADE_SCALE_IMAGE)
    if len(rects) == 0:
        return []
    rects[:,2:] += rects[:,:2]
    return rects 
Example #6
Source File: face.py    From MMM-Facial-Recognition-OCV3 with MIT License 5 votes vote down vote up
def detect_eyes(self, image):
        eyes = self.haar_eyes.detectMultiScale(image, 
                                          scaleFactor=self.haar_scale_factor,
                                          minNeighbors=self.haar_min_neighbors_eyes, 
                                          minSize=self.haar_min_size_eyes, 
                                          flags=cv2.CASCADE_SCALE_IMAGE)
        return eyes 
Example #7
Source File: face.py    From MMM-Facial-Recognition-OCV3 with MIT License 5 votes vote down vote up
def detect_faces(self, image):
        """Return bounds (x, y, width, height) of detected face in grayscale image.
        return all faces found in the image
        """
        faces = self.haar_faces.detectMultiScale(image, 
                                            scaleFactor=self.haar_scale_factor, 
                                            minNeighbors=self.haar_min_neighbors_face, 
                                            minSize=self.haar_min_size_face, 
                                            flags=cv2.CASCADE_SCALE_IMAGE)
        return faces 
Example #8
Source File: face.py    From MMM-Facial-Recognition-OCV3 with MIT License 5 votes vote down vote up
def detect_single(self, image):
        """Return bounds (x, y, width, height) of detected face in grayscale image.
        If no face or more than one face are detected, None is returned.
        """
        faces = self.haar_faces.detectMultiScale(image, 
                                            scaleFactor=self.haar_scale_factor, 
                                            minNeighbors=self.haar_min_neighbors_face, 
                                            minSize=self.haar_min_size_face, 
                                            flags=cv2.CASCADE_SCALE_IMAGE)
        if len(faces) != 1:
            return None
        return faces[0] 
Example #9
Source File: facial_features.py    From snapchat-filters-opencv with MIT License 5 votes vote down vote up
def apply_Haar_filter(img, haar_cascade,scaleFact = 1.1, minNeigh = 5, minSizeW = 30):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    features = haar_cascade.detectMultiScale(
        gray,
        scaleFactor=scaleFact,
        minNeighbors=minNeigh,
        minSize=(minSizeW, minSizeW),
        flags=cv2.CASCADE_SCALE_IMAGE
    )
    return features 
Example #10
Source File: main.py    From snapchat-filters-opencv with MIT License 5 votes vote down vote up
def apply_Haar_filter(img, haar_cascade, scaleFact=1.1, minNeigh=5, minSizeW=30):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    features = haar_cascade.detectMultiScale(
        gray,
        scaleFactor=scaleFact,
        minNeighbors=minNeigh,
        minSize=(minSizeW, minSizeW),
        flags=cv2.CASCADE_SCALE_IMAGE,
    )
    return features


# Adjust the given sprite to the head's width and position
# in case of the sprite not fitting the screen in the top, the sprite should be trimed 
Example #11
Source File: WebCam.py    From FakeBlock with MIT License 5 votes vote down vote up
def format_image(image_to_format):
    if len(image_to_format.shape) > 2 and image_to_format.shape[2] == 3:
        image_to_format = cv2.cvtColor(image_to_format, cv2.COLOR_BGR2GRAY)
    else:
        image_to_format = cv2.imdecode(image_to_format, cv2.CV_LOAD_IMAGE_GRAYSCALE)

    detected_faces = face_cascade.detectMultiScale(
        image_to_format,
        scaleFactor=1.3,
        minNeighbors=5,
        minSize = (48, 48),
        flags = cv2.CASCADE_SCALE_IMAGE
    )

    # If we don't find a face, return None
    if not len(detected_faces) > 0:
        return None
    max_face = detected_faces[0]
    for face in detected_faces:
        if face[2] * face[3] > max_face[2] * max_face[3]:
            max_face = face

    # Chop image to face
    face = max_face
    image_to_format = image_to_format[face[1]:(face[1] + face[2]), face[0]:(face[0] + face[3])]

    # Resize image to fit network specs
    try:
        image_to_format = cv2.resize(image_to_format, (Constants.FACE_SIZE, Constants.FACE_SIZE),
                                     interpolation=cv2.INTER_CUBIC) / 255.
    except Exception:
        print("Image resize exception. Check input resolution inconsistency.")
        return None
    return image_to_format 
Example #12
Source File: CSVToNumpyConverter.py    From FakeBlock with MIT License 5 votes vote down vote up
def format_image(image_to_format):
    image_to_format = cv2.cvtColor(image_to_format, cv2.COLOR_BGR2GRAY)

    image_border = np.zeros((150, 150), np.uint8)
    image_border[:, :] = 200
    image_border[
        int((150 / 2) - (Constants.FACE_SIZE / 2)): int((150 / 2) + (Constants.FACE_SIZE / 2)),
        int((150 / 2) - (Constants.FACE_SIZE / 2)): int((150 / 2) + (Constants.FACE_SIZE / 2))
    ] = image_to_format

    image_to_format = image_border
    detected_faces = cascade_classifier.detectMultiScale(
        image_to_format,
        scaleFactor=1.3,
        minNeighbors=5,
        minSize=(48, 48),
        flags=cv2.CASCADE_SCALE_IMAGE
    )

    # If no faces are found, return Null
    if not detected_faces:
        return None

    max_face = detected_faces[0]
    for face in detected_faces:
        if face[2] * face[3] > max_face[2] * max_face[3]:
            max_face = face

    # Chop image to face
    face = max_face
    image_to_format = image_to_format[face[1]:(face[1] + face[2]), face[0]:(face[0] + face[3])]

    # Resize image to fit network specs
    try:
        image_to_format = cv2.resize(image_to_format, (Constants.FACE_SIZE, Constants.FACE_SIZE),
                                     interpolation=cv2.INTER_CUBIC) / 255.
    except Exception:
        # This happened once and now I'm scared to remove it.
        print("Image resize exception. Check input resolution inconsistency.")
        return None
    return image_to_format 
Example #13
Source File: app.py    From robovision with GNU General Public License v3.0 5 votes vote down vote up
def detect_face_in_image_data(self, image_data):
        """
        function detects faces in image data,
        draws rectangle for faces in image data,
        and returns this updated image data with highlighted face/s
        """
        self._red = (0, 0, 255)
        self._width = 2
        self._min_size = (30, 30)

        # haarclassifiers work better in black and white
        gray_image = cv2.cvtColor(image_data, cv2.COLOR_BGR2GRAY)
        gray_image = cv2.equalizeHist(gray_image)

        # path to Haar face classfier's xml file
        face_cascade_xml = './cascades/haarcascades_cuda/" \
                "haarcascade_frontalface_default.xml'
        self.classifier = cv2.CascadeClassifier(face_cascade_xml)
        faces = self.classifier.detectMultiScale(gray_image,
                                                 scaleFactor=1.3,
                                                 minNeighbors=4,
                                                 flags=cv2.CASCADE_SCALE_IMAGE,
                                                 minSize=self._min_size)

        for (x, y, w, h) in faces:
            cv2.rectangle(image_data,
                          (x, y),
                          (x+w, y+h),
                          self._red,
                          self._width)

        return image_data 
Example #14
Source File: dashboard.py    From robovision with GNU General Public License v3.0 5 votes vote down vote up
def detect_faces(self, image: np.ndarray):
        # haarclassifiers work better in black and white
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray_image = cv2.equalizeHist(gray_image)

        faces = self.classifier.detectMultiScale(gray_image,
                                                 scaleFactor=1.3,
                                                 minNeighbors=4,
                                                 flags=cv2.CASCADE_SCALE_IMAGE,
                                                 minSize=self._min_size)

        return faces 
Example #15
Source File: face_detector.py    From image_utility with MIT License 5 votes vote down vote up
def get_lbp_facebox(image):
    """
    Get the bounding box fo faces in image by LBP feature.
    """
    rects = CASCADES.detectMultiScale(image, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30),
                                      flags=cv.CASCADE_SCALE_IMAGE)
    if len(rects) == 0:
        return []
    for rect in rects:
        rect[2] += rect[0]
        rect[3] += rect[1]
    return rects 
Example #16
Source File: landmark.py    From deep-landmark with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def detectFace(self, img):
        rects = self.cc.detectMultiScale(img, scaleFactor=1.2, minNeighbors=2, \
                    minSize=(30, 30), flags = cv2.CASCADE_SCALE_IMAGE)
        for rect in rects:
            rect[2:] += rect[:2]
            yield BBox([rect[0], rect[2], rect[1], rect[3]]) 
Example #17
Source File: facedetect.py    From OpenCV-Python-Tutorial with MIT License 5 votes vote down vote up
def detect(img, cascade):
    rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30),
                                     flags=cv2.CASCADE_SCALE_IMAGE)
    if len(rects) == 0:
        return []
    rects[:,2:] += rects[:,:2]
    return rects 
Example #18
Source File: facedetector.py    From PracticalPythonAndOpenCV_CaseStudies with GNU General Public License v3.0 5 votes vote down vote up
def detect(self, image, scale_factor=1.1, min_neighbors=5):
        # Detect faces in the image
        boxes = self.face_cascade.detectMultiScale(image, scale_factor, min_neighbors, flags=cv2.CASCADE_SCALE_IMAGE)

        # Return the bounding boxes
        return boxes 
Example #19
Source File: facedetector.py    From PracticalPythonAndOpenCV_CaseStudies with GNU General Public License v3.0 5 votes vote down vote up
def detect(self, image, scale_factor=1.1, min_neighbors=5):
        # Detect faces in the image
        boxes = self.face_cascade.detectMultiScale(image, scale_factor, min_neighbors, flags=cv2.CASCADE_SCALE_IMAGE)

        # Return the bounding boxes
        return boxes 
Example #20
Source File: cascade.py    From object-detection with MIT License 5 votes vote down vote up
def prediction(self, image):
        objects = self.model.detectMultiScale(
                image,
                scaleFactor=1.1,
                minNeighbors=5,
                minSize=(30, 30),
                flags=cv2.CASCADE_SCALE_IMAGE
                )
        return objects 
Example #21
Source File: main.py    From slouchy with GNU General Public License v3.0 4 votes vote down vote up
def detect_face(MaybeImage):
  """
  Take an image and return positional information for the largest face in it.

  Args:
      MaybeImage: An image grabbed from the local camera.

  Returns:
      Maybe tuple((bool, [int]) or (bool, str)): True and list of positional
      coordinates of the largest face found. False and an error string if no
      faces are found.
  """

  if MaybeImage.success:
    image = MaybeImage.result
  else:
    return MaybeImage

  faceCascade = cv2.CascadeClassifier(config.face_cascade_path) # Load face classifier

  major_ver = (cv2.__version__).split('.')[0]

  if int(major_ver) < 3:
    flag_for_detect = cv2.cv.CV_HAAR_SCALE_IMAGE
  else:
    flag_for_detect = cv2.CASCADE_SCALE_IMAGE

  # Detect faces in the image
  # faces will be an iterable object
  faces = faceCascade.detectMultiScale(
      image=image,
      scaleFactor=1.1,
      minNeighbors=5,
      minSize=(40, 40),
      flags = flag_for_detect
  )

  try:                                     # Assume largest face is the subject
    face = faces[0]                        # [0] index is largest face.
    return Maybe(True, face)
  except IndexError:
    return Maybe(False, "No faces detected. This may be due to low or uneven \
lighting.") 
Example #22
Source File: detector.py    From detection with GNU General Public License v2.0 4 votes vote down vote up
def detect(self, img, tree, equalizeHist=True, debugTable=None, autoNeighbors=None,
               autoNeighborsParam=0):

        def detectTree(tree, parentRoi, parentName, parentHash, roiTree):
            """Recursive function to detect objects in the tree.
            """
            x, y, w, h = parentRoi
            cropped = img[y:y+h, x:x+w]
            for node, children in tree.iteritems():
                selected, param = node.data
                incNeighbors = True
                while incNeighbors:
                    if debugTable and not autoNeighbors and selected:
                        col1 = '{} ({})'.format(param.classifier, param.name)
                        col2 = 'detecting in {}x{} ({})...'.format(w, h, parentName)
                        debugTable([(col1, 200), (col2, 300), ('', 200)])
                        start = time.time()
                    rects = self.detectObject(cropped,
                                                 param.classifier,
                                                 param.scaleFactor,
                                                 param.minNeighbors,
                                                 param.minSize,
                                                 cv2.CASCADE_SCALE_IMAGE)
                    if isinstance(rects, np.ndarray):
                        rects = rects.tolist()

                    self.globalizeCoords(rects, parentRoi)

                    hashs = None
                    tracking = None
                    if not autoNeighbors:
                        res = self.stabilize(param, parentHash, rects)
                        if res:
                            rects, hashs = zip(*res[-1]) if res[-1] else ([], [])
                            tracking = res[:-1]

                    if debugTable and not autoNeighbors and selected:
                        end = time.time()
                        col = '{} found in {:.2f} s'.format(len(rects),
                                                        end - start)
                        debugTable([(col, 0)], append=True)

                    if autoNeighbors and node == autoNeighbors and len(rects) > autoNeighborsParam:
                        param.minNeighbors += 1
                    else:
                        incNeighbors = False

                for i, roi in enumerate(rects):
                    hash = hashs[i] if hashs else None
                    roiNode = Node(param.classifier, (roi, param, tracking))
                    roiTree[roiNode]
                    name = parentName + ' > ' + param.name
                    detectTree(children, roi, name, hash, roiTree[roiNode])

        img = self.preprocess(img, equalizeHist)
        self.preprocessed = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        h, w = img.shape[:2]
        roiTree = Tree()
        detectTree(tree, (0, 0, w, h), 'Root', None, roiTree)
        return roiTree 
Example #23
Source File: detectors.py    From Smart-Surveillance-System-using-Raspberry-Pi with GNU General Public License v3.0 4 votes vote down vote up
def detect(self, image, biggest_only=True):
        """ Detect face in an image.

        Find the biggest face in an image and return its position and
        dimensions (top, left, width and height).

        :param image: the image in which to detect faces
        :type image: numpy array
        :return: top, left, width and height of the rectangle around the face
        :rtype: tuple of length 4
        """
        is_color = 4
        if is_color:
            image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        else:
            image_gray = image
        # The algorithm checks for faces of different sizes,
        # the scale_factor is how much difference is between each size
        # that you want to check.
        # A bigger scale_factor means bigger jumps so it will perform
        # faster but less accuratelly, it is recommended 1.2 or 1.3
        scale_factor = 1.2

        # Treshold to detect a face, it needs a minimum of min_neighbors
        # neighbor pixels to return a detected a face on that pixel
        min_neighbors = 5

        # Sets the min_size of the face we want to detect. Default is 20x20
        min_size = (30, 30)

        # Change to True if we want to detect only one face
        flags = cv2.CASCADE_FIND_BIGGEST_OBJECT | \
            cv2.CASCADE_DO_ROUGH_SEARCH if biggest_only else \
            cv2.CASCADE_SCALE_IMAGE

        face_coord = self.classifier.detectMultiScale(
            image_gray,
            scaleFactor=scale_factor,
            minNeighbors=min_neighbors,
            minSize=min_size,
            flags=flags
        )

        return face_coord 
Example #24
Source File: process_images.py    From image-processing-pipeline with MIT License 4 votes vote down vote up
def main(args):
    os.makedirs(args.output, exist_ok=True)

    # load the face detector
    detector = cv2.CascadeClassifier(args.classifier)

    # list images from input directory
    input_image_files = list_images(args.input, (".jpg", ".png"))

    # Storage for JSON summary
    summary = {}

    # Loop over the image paths
    for image_file in input_image_files:
        # Load the image and convert it to grayscale
        image = cv2.imread(image_file)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # Detect faces
        face_rects = detector.detectMultiScale(gray, scaleFactor=1.05, minNeighbors=5,
                                               minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
        summary[image_file] = {}
        # Loop over all detected faces
        for i, (x, y, w, h) in enumerate(face_rects):
            face = image[y:y+w, x:x+h]

            # Prepare output directory for faces
            output = os.path.join(*(image_file.split(os.path.sep)[1:]))
            output = os.path.join(args.output, output)
            os.makedirs(output, exist_ok=True)

            # Save faces
            face_file = os.path.join(output, f"{i:05d}.jpg")
            cv2.imwrite(face_file, face)

            # Store summary data
            summary[image_file][face_file] = np.array([x, y, w, h], dtype=int).tolist()

        # Display summary
        print(f"[INFO] {image_file}: face detections {len(face_rects)}")

    # Save summary data
    if args.out_summary:
        summary_file = os.path.join(args.output, args.out_summary)
        print(f"[INFO] Saving summary to {summary_file}...")
        with open(summary_file, 'w') as json_file:
            json_file.write(json.dumps(summary))