Python cv2.rectangle() Examples

The following are 50 code examples for showing how to use cv2.rectangle(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don't like. You can also save this page to your account.

Example 1
Project: Gender   Author: rabeter   File: find.py    (license) View Source Project 17 votes vote down vote up
def draw_rects(img, rects):
    """
    ?????????????
    :param img: 
    :param rects: 
    :return: 
    """
    for x, y, w, h in rects:
        cv2.rectangle(img, (x, y), (x+w, y+h), (255, 255, 00), 2)
        face = img
        face = cv2.resize(face,(224,224))
        if Gender.predict(face)==1:
            text = "Male"
        else:
            text = "Female"
        cv2.putText(img, text, (x, h), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), lineType=cv2.LINE_AA) 
Example 2
Project: Speedy-TSLSR   Author: talhaHavadar   File: tslsr.py    (license) View Source Project 14 votes vote down vote up
def __bound_contours(roi):
    """
        returns modified roi(non-destructive) and rectangles that founded by the algorithm.
        @roi region of interest to find contours
        @return (roi, rects)
    """

    roi_copy = roi.copy()
    roi_hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV)
    # filter black color
    mask1 = cv2.inRange(roi_hsv, np.array([0, 0, 0]), np.array([180, 255, 125]))
    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
    mask1 = cv2.Canny(mask1, 100, 300)
    mask1 = cv2.GaussianBlur(mask1, (1, 1), 0)
    mask1 = cv2.Canny(mask1, 100, 300)

    # mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))

    # Find contours for detected portion of the image
    im2, cnts, hierarchy = cv2.findContours(mask1.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5] # get largest five contour area
    rects = []
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        x, y, w, h = cv2.boundingRect(approx)
        if h >= 15:
            # if height is enough
            # create rectangle for bounding
            rect = (x, y, w, h)
            rects.append(rect)
            cv2.rectangle(roi_copy, (x, y), (x+w, y+h), (0, 255, 0), 1);

    return (roi_copy, rects) 
Example 3
Project: facial_emotion_recognition   Author: adamaulia   File: image_test.py    (license) View Source Project 11 votes vote down vote up
def test_image(addr):
    target = ['angry','disgust','fear','happy','sad','surprise','neutral']
    font = cv2.FONT_HERSHEY_SIMPLEX
    
    im = cv2.imread(addr)
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1)
    
    for (x, y, w, h) in faces:
            cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5)
            face_crop = im[y:y+h,x:x+w]
            face_crop = cv2.resize(face_crop,(48,48))
            face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY)
            face_crop = face_crop.astype('float32')/255
            face_crop = np.asarray(face_crop)
            face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1])
            result = target[np.argmax(model.predict(face_crop))]
            cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA)
            
    cv2.imshow('result', im)
    cv2.imwrite('result.jpg',im)
    cv2.waitKey(0) 
Example 4
Project: pybot   Author: spillai   File: base_klt.py    (license) View Source Project 7 votes vote down vote up
def visualize(self, vis, colored=True): 

        try: 
            tids = set(self.ids)
        except: 
            return vis

        for hid, hbox in izip(self.ids, self.bboxes): 
            cv2.rectangle(vis, (hbox[0], hbox[1]), (hbox[2], hbox[3]), (0,255,0), 1)

        vis = super(BoundingBoxKLT, self).viz(vis, colored=colored)

        # for tid, pts in self.tm_.tracks.iteritems(): 
        #     if tid not in tids: continue
        #     cv2.polylines(vis, [np.vstack(pts.items).astype(np.int32)[-4:]], False, 
        #                   (0,255,0), thickness=1)
        #     tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2
        #     cv2.rectangle(vis, (tl[0], tl[1]), (br[0], br[1]), (0,255,0), -1)

        # OpenCVKLT.draw_tracks(self, vis, colored=colored, max_track_length=10)
        return vis 
Example 5
Project: pybot   Author: spillai   File: draw_utils.py    (license) View Source Project 6 votes vote down vote up
def draw_bboxes(vis, bboxes, texts=None, ellipse=False, colored=True):
    if not len(bboxes): 
        return vis

    if not colored: 
        cols = np.tile([240,240,240], [len(bboxes), 1])
    else: 
        N = 20
        cwheel = colormap(np.linspace(0, 1, N))
        cols = np.vstack([cwheel[idx % N] for idx, _ in enumerate(bboxes)])            

    texts = [None] * len(bboxes) if texts is None else texts
    for col, b, t in zip(cols, bboxes, texts): 
        if ellipse: 
            cv2.ellipse(vis, ((b[0]+b[2])/2, (b[1]+b[3])/2), ((b[2]-b[0])/2, (b[3]-b[1])/2), 0, 0, 360, 
                        color=tuple(col), thickness=1)
        else: 
            cv2.rectangle(vis, (b[0], b[1]), (b[2], b[3]), tuple(col), 2)
        if t: 
            annotate_bbox(vis, b, title=t)
    return vis 
Example 6
Project: pedestrianSys   Author: PhilipChicco   File: detector.py    (license) View Source Project 6 votes vote down vote up
def display_detected(self, frame, face_locs, people, confidence):
        """
        - Display ROI's of detected faces with labels
        :param frame:
        :param face_locs:
        :param people : people in image classified
        :param confidence : recognition confidence
        :return:
        """

        if not len(face_locs) == 0:  # nothing detected
            for (top, right, bottom, left), name, conf in zip(face_locs, people, confidence):
                # Scale back up face locations since the frame we detected in was scaled to 1/4 size
                top
                right
                bottom
                left

                # string
                conf_4f = "%.3f" % conf
                peop_conf = "{} {}%".format(name, float(conf_4f) * 100)

                # Draw a box around the face
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

                # Draw a label with a name below the face
                # cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
                cv2.rectangle(frame, (left, top + 20), (right, top), (0, 0, 255), cv2.FILLED)

                font = cv2.FONT_HERSHEY_DUPLEX  # color
                # cv2.putText(frame, peop_conf , (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1)
                cv2.putText(frame, peop_conf, (left, top + 15), font, 0.5, (255, 255, 255), 1)
        pass 
Example 7
Project: face_detection   Author: chintak   File: plotting.py    (license) View Source Project 6 votes vote down vote up
def plot_face_bb(p, bb, scale=True, path=True, plot=True):
    if path:
        im = cv2.imread(p)
    else:
        im = cv2.cvtColor(p, cv2.COLOR_RGB2BGR)
    if scale:
        h, w, _ = im.shape
        cv2.rectangle(im, (int(bb[0] * h), int(bb[1] * w)),
                      (int(bb[2] * h), int(bb[3] * w)),
                      (255, 255, 0), thickness=4)
        # print bb * np.asarray([h, w, h, w])
    else:
        cv2.rectangle(im, (int(bb[0]), int(bb[1])), (int(bb[2]), int(bb[3])),
                      (255, 255, 0), thickness=4)
        print "no"
    if plot:
        plt.figure()
        plt.imshow(im[:, :, ::-1])
    else:
        return im[:, :, ::-1] 
Example 8
Project: py-faster-rcnn-tk1   Author: joeking11829   File: demo_opencv.py    (license) View Source Project 6 votes vote down vote up
def vis_detections(im, class_name, dets, thresh=0.5):
    """Draw detected bounding boxes."""
    inds = np.where(dets[:, -1] >= thresh)[0]
    if len(inds) == 0:
        return
    
    for i in inds:
        bbox = dets[i, :4]
        score = dets[i, -1]

        #Create Rectangle and Text using OpenCV
        #print ('ClassName:', class_name, 'bbox:', bbox, 'score:' ,score)
        
        #Draw the Rectangle
        cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 3)
        #Draw the Text
        cv2.putText(im, class_name + ' ' + str(score), (bbox[0], bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2, cv2.LINE_AA)

        #Show Image
        #cv2.imshow("Detect Result", im) 
Example 9
Project: piwall-cvtools   Author: infinnovation   File: piwall.py    (license) View Source Project 6 votes vote down vote up
def contour_to_monitor_coords(screenCnt):
    '''Apply pyimagesearch algorithm to identify tl,tr,br,bl points from a contour'''
    # now that we have our screen contour, we need to determine
    # the top-left, top-right, bottom-right, and bottom-left
    # points so that we can later warp the image -- we'll start
    # by reshaping our contour to be our finals and initializing
    # our output rectangle in top-left, top-right, bottom-right,
    # and bottom-left order
    pts = screenCnt.reshape(4, 2)
    rect = np.zeros((4, 2), dtype = "float32")
    
    # the top-left point has the smallest sum whereas the
    # bottom-right has the largest sum
    s = pts.sum(axis = 1)
    rect[0] = pts[np.argmin(s)]
    rect[2] = pts[np.argmax(s)]
    
    # compute the difference between the points -- the top-right
    # will have the minumum difference and the bottom-left will
    # have the maximum difference
    diff = np.diff(pts, axis = 1)
    rect[1] = pts[np.argmin(diff)]
    rect[3] = pts[np.argmax(diff)]

    return rect 
Example 10
Project: piwall-cvtools   Author: infinnovation   File: model.py    (license) View Source Project 6 votes vote down vote up
def draw(self, image):
        if len(self.tilesByOrder) == 0:
            cv2.imshow("image", image)
        for tile in self.tilesByOrder:
            cv2.rectangle(image, (tile.wx, tile.wy), (tile.wx + tile.w, tile.wy + tile.h),
                          (0, 255, 0), 1)
            #Left bezel
            cv2.rectangle(image, (tile.wx - tile.l, tile.wy), (tile.wx, tile.wy + tile.h),
                          (40, 255, 40), -1)
            #Top bezel
            cv2.rectangle(image, (tile.wx - tile.l, tile.wy - tile.t), (tile.wx + tile.w, tile.wy),
                          (40, 255, 40), -1)
            #Right bezel
            cv2.rectangle(image, (tile.wx + tile.w, tile.wy - tile.t), (tile.wx + tile.w + tile.r, tile.wy + tile.h),
                          (40, 255, 40), -1)
            #Bottom bezel
            cv2.rectangle(image, (tile.wx - tile.l, tile.wy + tile.h), (tile.wx + tile.w + tile.r, tile.wy + tile.h + tile.b),
                          (40, 255, 40), -1)

            cv2.imshow("image", image) 
Example 11
Project: iGAN   Author: junyanz   File: gui_vis.py    (license) View Source Project 6 votes vote down vote up
def update_vis(self):
        ims = self.opt_engine.get_images(self.frame_id)

        if ims is not None:
            self.ims = ims

        if self.ims is None:
            return

        ims_show = []
        n_imgs = self.ims.shape[0]
        for n in range(n_imgs):
            # im = ims[n]
            im_s = cv2.resize(self.ims[n], (self.width, self.width), interpolation=cv2.INTER_CUBIC)
            if n == self.select_id and self.topK > 1:
                t = 3  # thickness
                cv2.rectangle(im_s, (t, t), (self.width - t, self.width - t), (0, 255, 0), t)
            im_s = im_s[np.newaxis, ...]
            ims_show.append(im_s)
        if ims_show:
            ims_show = np.concatenate(ims_show, axis=0)
            g_tmp = utils.grid_vis(ims_show, self.grid_size[1], self.grid_size[0]) # (nh, nw)
            self.vis_results = g_tmp.copy()
            self.update() 
Example 12
Project: facejack   Author: PetarV-   File: face_detect_cv3.py    (license) View Source Project 6 votes vote down vote up
def dispact_and_update(img, hack, base_im, x, y, w, h):
    try:
        myurl = "http://facejack.westeurope.cloudapp.azure.com:5001/imsend"
        headers = {
            'content-type': "application/x-www-form-urlencoded",
            'cache-control': "no-cache"
        }
        r = requests.post(url=myurl, data=img, headers=headers, params={'hack': str(hack)}).json()

        reply = 'authentication' in r and r['authentication'] == "ALLOWED"
        disp_face = cv2.resize(base_im[y:y + h, x:x + w], (224, 224), 0, 0, cv2.INTER_LANCZOS4)
        if reply:
            cv2.rectangle(disp_face, (0, 0), (222, 222), (0, 255, 0), 2)
        else:
            cv2.rectangle(disp_face, (0, 0), (222, 222), (0, 0, 255), 2)
        cv2.imshow("Face", disp_face)
    finally:
        myl.release() 
Example 13
Project: image_recognition   Author: tue-robotics   File: image_writer.py    (license) View Source Project 6 votes vote down vote up
def get_annotated_cv_image(cv_image, recognitions):
    """
    Gets an annotated CV image based on recognitions, drawin using cv.rectangle
    :param cv_image: Original cv image
    :param recognitions: List of recognitions
    :return: Annotated image
    """
    annotated_cv_image = cv_image.copy()

    c_map = color_map(N=len(recognitions), normalized=True)
    for i, recognition in enumerate(recognitions):
        x_min, y_min = recognition.roi.x_offset, recognition.roi.y_offset
        x_max, y_max = x_min + recognition.roi.width, y_min + recognition.roi.height

        cv2.rectangle(annotated_cv_image, (x_min, y_min), (x_max, y_max),
                      (c_map[i, 2] * 255, c_map[i, 1] * 255, c_map[i, 0] * 255), 10)
    return annotated_cv_image 
Example 14
Project: canshi   Author: hungsing92   File: tag_bb.py    (license) View Source Project 6 votes vote down vote up
def click_and_crop(event, x, y, flags, param):
    global bbs, x_upper, id

    if event == cv2.EVENT_LBUTTONDOWN:
        if x_upper:
            bbs.append([x,y,0,0, 0,0,0,0])
        else:
            bbs[-1][4] = x
            bbs[-1][5] = y
            
    elif event == cv2.EVENT_LBUTTONUP:
        if x_upper:
            bbs[-1][2] = abs(x - bbs[-1][0])            
            bbs[-1][3] = abs(y - bbs[-1][1])
            bbs[-1][0] = min(x, bbs[-1][0])
            bbs[-1][1] = min(y, bbs[-1][1])
            cv2.rectangle(image, (bbs[-1][0],bbs[-1][1]), (bbs[-1][0]+bbs[-1][2],bbs[-1][1]+bbs[-1][3]), (0,0,255), 2)
            #cv2.putText(image, 'Upper %d' % id, (bbs[-1][0],bbs[-1][1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,255))
        else:
            bbs[-1][6] = abs(x - bbs[-1][4])
            bbs[-1][7] = abs(y - bbs[-1][5])
            bbs[-1][4] = min(x, bbs[-1][4])
            bbs[-1][5] = min(y, bbs[-1][5])
            cv2.rectangle(image, (bbs[-1][4],bbs[-1][5]), (bbs[-1][4]+bbs[-1][6],bbs[-1][5]+bbs[-1][7]), (0,255,0), 2)
            cv2.putText(image, 'Body %d' % id, (bbs[-1][4],bbs[-1][5]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,255,0))
            
            
        cv2.imshow("image", image)        
        x_upper = not x_upper 
Example 15
Project: tensorflow-yolo   Author: hjimce   File: misc.py    (license) View Source Project 6 votes vote down vote up
def show(im, allobj, S, w, h, cellx, celly):
    for obj in allobj:
        a = obj[5] % S
        b = obj[5] // S
        cx = a + obj[1]
        cy = b + obj[2]
        centerx = cx * cellx
        centery = cy * celly
        ww = obj[3]**2 * w
        hh = obj[4]**2 * h
        cv2.rectangle(im,
            (int(centerx - ww/2), int(centery - hh/2)),
            (int(centerx + ww/2), int(centery + hh/2)),
            (0,0,255), 2)
    cv2.imshow("result", im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
Example 16
Project: cv-lane   Author: kendricktan   File: EyeCanSee.py    (license) View Source Project 6 votes vote down vote up
def get_hsv(self):
        cv2.namedWindow('hsv_extractor')
        while True:
            self.grab_frame()

            # Bottom ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_BOTTOM-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_BOTTOM + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Top ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_TOP-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Object
            cv2.rectangle(self.img_debug, (0, cvsettings.OBJECT_HEIGHT_PADDING), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP - cvsettings.OBJECT_HEIGHT_PADDING), (238, 130, 238), 2)

            self.hsv_frame = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)

            # Mouse handler
            cv2.setMouseCallback('hsv_extractor', self.on_mouse, 0)
            cv2.imshow('hsv_extractor', self.img_debug)

            key = cv2.waitKey(0) & 0xFF
            if key == ord('q'):
                break
        self.stop_camera()
        cv2.destroyAllWindows()

    # Starts camera (needs to be called before run) 
Example 17
Project: bib-tagger   Author: KateRita   File: bodydetector.py    (license) View Source Project 6 votes vote down vote up
def findbodies(image, faces):

    bodies = np.zeros_like(faces)
    bodiesindex = 0

    #for each face, draw a body
    for (x, y, facewidth, faceheight) in faces:
        #3*faceheight, 7/3 * facewidth, .5*faceheight below the face.
        bodyheight = 3 * faceheight
        bodywidth = 7/3 * facewidth
        y_body = y + faceheight + .5 * faceheight
        x_body = x + .5 * facewidth - .5 * bodywidth

        bodies[bodiesindex] = (x_body,y_body, bodywidth, bodyheight)
        bodiesindex = bodiesindex + 1

        #cv2.rectangle(image, (x_body, y_body), (x_body+bodywidth, y_body+bodyheight), (0, 255, 0), 2)

    return bodies 
Example 18
Project: Automatic-Plate-Number-Recognition-APNR   Author: kagan94   File: main.py    (license) View Source Project 6 votes vote down vote up
def verify_sizes(rectangle):
    # print candidate
    # help(cv2.minAreaRect)
    (x, y), (width, height), rect_angle = rectangle

    # Calculate angle and discard rects that has been rotated more than 15 degrees
    angle = 90 - rect_angle if (width < height) else -rect_angle
    if 15 < abs(angle) < 165:  # 180 degrees is maximum
        return False

    # We make basic validations about the regions detected based on its area and aspect ratio.
    # We only consider that a region can be a plate if the aspect ratio is approximately 520/110 = 4.727272
    # (plate width divided by plate height) with an error margin of 40 percent
    # and an area based on a minimum of 15 pixels and maximum of 125 pixels for the height of the plate.
    # These values are calculated depending on the image sizes and camera position:
    area = height * width

    if height == 0 or width == 0:
        return False
    if not satisfy_ratio(area, width, height):
        return False

    return True 
Example 19
Project: ATX   Author: NetEaseGame   File: simple-ide.py    (license) View Source Project 6 votes vote down vote up
def make_mouse_callback(imgs, ref_pt):
    # initialize the list of reference points and boolean indicating
    # whether cropping is being performed or not
    cropping = [False]
    clone = imgs[0]

    def _click_and_crop(event, x, y, flags, param):
        # grab references to the global variables
        # global ref_pt, cropping

        # if the left mouse button was clicked, record the starting
        # (x, y) coordinates and indicate that cropping is being
        # performed
        if event == cv2.EVENT_LBUTTONDOWN:
            ref_pt[0] = (x, y)
            cropping[0] = True

        # check to see if the left mouse button was released
        elif event == cv2.EVENT_LBUTTONUP:
            # record the ending (x, y) coordinates and indicate that
            # the cropping operation is finished
            ref_pt[1] = (x, y)
            cropping[0] = False

            # draw a rectangle around the region of interest
            imgs[1] = image = clone.copy()
            cv2.rectangle(image, ref_pt[0], ref_pt[1], (0, 255, 0), 2)
            cv2.imshow("image", image)
        elif event == cv2.EVENT_MOUSEMOVE and cropping[0]:
            img2 = clone.copy()
            cv2.rectangle(img2, ref_pt[0], (x, y), (0, 255, 0), 2)
            imgs[1] = image = img2
            cv2.imshow("image", image)
    return _click_and_crop 
Example 20
Project: cvloop   Author: shoeffner   File: functions.py    (license) View Source Project 6 votes vote down vote up
def find_faces(self, image, draw_box=False):
        """Uses a haarcascade to detect faces inside an image.

        Args:
            image: The image.
            draw_box: If True, the image will be marked with a rectangle.

        Return:
            The faces as returned by OpenCV's detectMultiScale method for
            cascades.
        """
        frame_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        faces = self.cascade.detectMultiScale(
            frame_gray,
            scaleFactor=1.3,
            minNeighbors=5,
            minSize=(50, 50),
            flags=0)

        if draw_box:
            for x, y, w, h in faces:
                cv2.rectangle(image, (x, y),
                              (x + w, y + h), (0, 255, 0), 2)
        return faces 
Example 21
Project: cervix-roi-segmentation-by-unet   Author: scottykwok   File: crop.py    (license) View Source Project 6 votes vote down vote up
def cropCircle(img, resize=None):
    if resize:
        if (img.shape[0] > img.shape[1]):
            tile_size = (int(img.shape[1] * resize / img.shape[0]), resize)
        else:
            tile_size = (resize, int(img.shape[0] * resize / img.shape[1]))
        img = cv2.resize(img, dsize=tile_size, interpolation=cv2.INTER_CUBIC)
    else:
        tile_size = img.shape

    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY);
    _, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)

    _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

    main_contour = sorted(contours, key=cv2.contourArea, reverse=True)[0]

    ff = np.zeros((gray.shape[0], gray.shape[1]), 'uint8')
    cv2.drawContours(ff, main_contour, -1, 1, 15)
    ff_mask = np.zeros((gray.shape[0] + 2, gray.shape[1] + 2), 'uint8')
    cv2.floodFill(ff, ff_mask, (int(gray.shape[1] / 2), int(gray.shape[0] / 2)), 1)

    rect = maxRect(ff)
    rectangle = [min(rect[0], rect[2]), max(rect[0], rect[2]), min(rect[1], rect[3]), max(rect[1], rect[3])]
    img_crop = img[rectangle[0]:rectangle[1], rectangle[2]:rectangle[3]]
    cv2.rectangle(ff, (min(rect[1], rect[3]), min(rect[0], rect[2])), (max(rect[1], rect[3]), max(rect[0], rect[2])), 3,
                  2)

    return [img_crop, rectangle, tile_size] 
Example 22
Project: chainer-faster-rcnn   Author: mitmul   File: forward.py    (license) View Source Project 6 votes vote down vote up
def draw_result(out, im_scale, clss, bbox, nms_thresh, conf):
    CV_AA = 16
    for cls_id in range(1, 21):
        _cls = clss[:, cls_id][:, np.newaxis]
        _bbx = bbox[:, cls_id * 4: (cls_id + 1) * 4]
        dets = np.hstack((_bbx, _cls))
        keep = nms(dets, nms_thresh)
        dets = dets[keep, :]

        inds = np.where(dets[:, -1] >= conf)[0]
        for i in inds:
            x1, y1, x2, y2 = map(int, dets[i, :4])
            cv.rectangle(out, (x1, y1), (x2, y2), (0, 0, 255), 2, CV_AA)
            ret, baseline = cv.getTextSize(
                CLASSES[cls_id], cv.FONT_HERSHEY_SIMPLEX, 0.8, 1)
            cv.rectangle(out, (x1, y2 - ret[1] - baseline),
                         (x1 + ret[0], y2), (0, 0, 255), -1)
            cv.putText(out, CLASSES[cls_id], (x1, y2 - baseline),
                       cv.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 1, CV_AA)

    return out 
Example 23
Project: chainer-faster-rcnn   Author: mitmul   File: test_anchor_target_layer.py    (license) View Source Project 6 votes vote down vote up
def test_generate_proposals(self):
        self.assertEqual(self.total_anchors, len(self.shifts) *
                         self.anchor_target_layer.anchors.shape[0])

        min_x = self.all_anchors[:, 0].min()
        min_y = self.all_anchors[:, 1].min()
        max_x = self.all_anchors[:, 2].max()
        max_y = self.all_anchors[:, 3].max()
        canvas = np.zeros(
            (int(abs(min_y) + max_y) + 1,
             int(abs(min_x) + max_x) + 1), dtype=np.uint8)
        self.all_anchors[:, 0] -= min_x
        self.all_anchors[:, 1] -= min_y
        self.all_anchors[:, 2] -= min_x
        self.all_anchors[:, 3] -= min_y
        for anchor in self.all_anchors:
            anchor = list(six.moves.map(int, anchor))
            cv.rectangle(
                canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255)
        cv.imwrite('tests/all_anchors.png', canvas) 
Example 24
Project: chainer-faster-rcnn   Author: mitmul   File: test_anchor_target_layer.py    (license) View Source Project 6 votes vote down vote up
def test_keep_inside(self):
        inds_inside, anchors = self.inds_inside, self.anchors

        min_x = anchors[:, 0].min()
        min_y = anchors[:, 1].min()
        max_x = anchors[:, 2].max()
        max_y = anchors[:, 3].max()
        canvas = np.zeros(
            (int(max_y - min_y) + 1,
             int(max_x - min_x) + 1), dtype=np.uint8)
        anchors[:, 0] -= min_x
        anchors[:, 1] -= min_y
        anchors[:, 2] -= min_x
        anchors[:, 3] -= min_y
        for i, anchor in enumerate(anchors):
            anchor = list(six.moves.map(int, anchor))
            _canvas = np.zeros(
                (int(max_y - min_y) + 1,
                 int(max_x - min_x) + 1), dtype=np.uint8)
            cv.rectangle(
                _canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255)
            cv.rectangle(
                canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255)
            cv.imwrite('tests/anchors_inside_{}.png'.format(i), _canvas)
        cv.imwrite('tests/anchors_inside.png'.format(i), canvas) 
Example 25
Project: Yugioh-bot   Author: will7200   File: test_nox.py    (license) View Source Project 6 votes vote down vote up
def test_initial_pass_through_compare(self):
        original = cv2.imread(os.path.join(self.provider.assets, "start_screen.png"))
        against = self.provider.get_img_from_screen_shot()
        wrong = cv2.imread(os.path.join(self.provider.assets, "battle.png"))

        # convert the images to grayscale
        original = mask_image([127], [255], cv2.cvtColor(original, cv2.COLOR_BGR2GRAY), True)
        against = mask_image([127], [255], cv2.cvtColor(against, cv2.COLOR_BGR2GRAY), True)
        wrong = mask_image([127], [255], cv2.cvtColor(wrong, cv2.COLOR_BGR2GRAY), True)
        # initialize the figure
        (score, diff) = compare_ssim(original, against, full=True)
        diff = (diff * 255).astype("uint8")
        self.assertTrue(score > .90, 'If this is less then .90 the initial compare of the app will fail')
        (score, nothing) = compare_ssim(original, wrong, full=True)
        self.assertTrue(score < .90)
        if self.__debug_pictures__:
            # threshold the difference image, followed by finding contours to
            # obtain the regions of the two input images that differ
            thresh = cv2.threshold(diff, 0, 255,
                                   cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = cnts[0]
            # loop over the contours
            for c in cnts:
                # compute the bounding box of the contour and then draw the
                # bounding box on both input images to represent where the two
                # images differ
                (x, y, w, h) = cv2.boundingRect(c)
                cv2.rectangle(original, (x, y), (x + w, y + h), (0, 0, 255), 2)
                cv2.rectangle(against, (x, y), (x + w, y + h), (0, 0, 255), 2)
            # show the output images
            diffs = ("Original", original), ("Modified", against), ("Diff", diff), ("Thresh", thresh)
            images = ("Original", original), ("Against", against), ("Wrong", wrong)
            self.setup_compare_images(diffs)
            self.setup_compare_images(images) 
Example 26
Project: AutomatorX   Author: xiaoyaojjian   File: simple-ide.py    (license) View Source Project 6 votes vote down vote up
def make_mouse_callback(imgs, ref_pt):
    # initialize the list of reference points and boolean indicating
    # whether cropping is being performed or not
    cropping = [False]
    clone = imgs[0]

    def _click_and_crop(event, x, y, flags, param):
        # grab references to the global variables
        # global ref_pt, cropping

        # if the left mouse button was clicked, record the starting
        # (x, y) coordinates and indicate that cropping is being
        # performed
        if event == cv2.EVENT_LBUTTONDOWN:
            ref_pt[0] = (x, y)
            cropping[0] = True

        # check to see if the left mouse button was released
        elif event == cv2.EVENT_LBUTTONUP:
            # record the ending (x, y) coordinates and indicate that
            # the cropping operation is finished
            ref_pt[1] = (x, y)
            cropping[0] = False

            # draw a rectangle around the region of interest
            imgs[1] = image = clone.copy()
            cv2.rectangle(image, ref_pt[0], ref_pt[1], (0, 255, 0), 2)
            cv2.imshow("image", image)
        elif event == cv2.EVENT_MOUSEMOVE and cropping[0]:
            img2 = clone.copy()
            cv2.rectangle(img2, ref_pt[0], (x, y), (0, 255, 0), 2)
            imgs[1] = image = img2
            cv2.imshow("image", image)
    return _click_and_crop 
Example 27
Project: AutomatorX   Author: xiaoyaojjian   File: pixelmatch.py    (license) View Source Project 6 votes vote down vote up
def locate_img(image, template):
    img = image.copy()
    res = cv2.matchTemplate(img, template, method)
    print res
    print res.shape
    cv2.imwrite('image/shape.png', res)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    print cv2.minMaxLoc(res)
    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
        top_left = min_loc
    else:
        top_left = max_loc
    h, w = template.shape
    bottom_right = (top_left[0] + w, top_left[1]+h)
    cv2.rectangle(img, top_left, bottom_right, 255, 2)
    cv2.imwrite('image/tt.jpg', img) 
Example 28
Project: traffic-light-detection   Author: ranveeraggarwal   File: extract_signals.py    (license) View Source Project 6 votes vote down vote up
def click_and_crop(event, x, y, flags, param):
	# grab references to the global variables
	global refPt, cropping, i
 
	# if the left mouse button was clicked, record the starting
	# (x, y) coordinates and indicate that cropping is being
	# performed
	if event == cv2.EVENT_LBUTTONDOWN:
		if refPt == []:
			refPt = [(x, y)]
		else:
			refPt.append((x,y))
		cropping = True
		i += 1

	if event == cv2.EVENT_MOUSEMOVE and cropping:
		image2 = image.copy()
		cv2.rectangle(image2, refPt[2*i-2], (x,y), (0,255,0), 2)
		cv2.imshow("image",image2)
 
	# check to see if the left mouse button was released
	elif event == cv2.EVENT_LBUTTONUP:
		# record the ending (x, y) coordinates and indicate that
		# the cropping operation is finished
		refPt.append((x, y))
		cropping = False
 
		# draw a rectangle around the region of interest
		cv2.rectangle(image, refPt[2*i-2], refPt[2*i-1], (0, 255, 0), 2)
		# cv2.rectangle(image2, refPt[2*i-2], refPt[2*i-1], (0, 255, 0), 2)
		cv2.imshow("image", image)

# construct the argument parser and parse the arguments 
Example 29
Project: text_detection   Author: hanguyen86   File: text.py    (license) View Source Project 6 votes vote down vote up
def showRegions(self):
        output = self.origin_image.copy()
        for r in range(0, np.shape(self.regions)[0]):
            rect = self.regions[r]
            cv2.rectangle(output,
                          (rect[0],rect[1]),
                          (rect[0]+rect[2],
                           rect[1]+rect[3]),
                          (0, 255, 0), 2)
            cv2.rectangle(output,
                          (rect[0],rect[1]),
                          (rect[0]+rect[2],
                           rect[1]+rect[3]),
                          (255, 0, 0), 1)
        return output
    
#--------------------------------------------------------
#--------------------------------------------------------
# Class provide an interface to perform OCR 
Example 30
Project: Gender   Author: rabeter   File: videoFind.py    (license) View Source Project 6 votes vote down vote up
def draw_rects(img, rects, color):
    """
    ?????????????
    :param img: 
    :param rects: 
    :param color: 
    :return: 
    """
    for x, y, w, h in rects:
        face = img[x:x+w,y:y+h]
        face = cv2.resize(face,(224,224))
        if gender.predict(face)==1:
            text = "Male"
        else:
            text = "Female"
        cv2.rectangle(img, (x, y), (w, h), color, 2)
        cv2.putText(img, text, (x, h), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (255, 255, 255), lineType=cv2.LINE_AA) 
Example 31
Project: object-detection-with-deep-learning   Author: neerajdixit   File: searchObject.py    (license) View Source Project 6 votes vote down vote up
def draw_labeled_bboxes(img, labels):
    """
        Draw the boxes around detected object.
    """
    # Iterate through all detected cars
    for car_number in range(1, labels[1]+1):
        # Find pixels with each car_number label value
        nonzero = (labels[0] == car_number).nonzero()
        # Identify x and y values of those pixels
        nonzeroy = np.array(nonzero[0])
        nonzerox = np.array(nonzero[1])
        # Define a bounding box based on min/max x and y
        bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
        # Draw the box on the image
        cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
    return img 
Example 32
Project: party-pi   Author: JustinShenk   File: play.py    (license) View Source Project 6 votes vote down vote up
def draw_countdown(self, frame):
        # Draw the count "3..".
        countdown_x_offset = 1 + self.countdown  # Offset from left edge
        countdown_x = int(self.screenwidth -
                          (self.screenwidth / 5) * countdown_x_offset)
        self.overlay = frame.copy()
        countdown_panel_y1 = int(self.screenheight * (4. / 5))
        cv2.rectangle(self.overlay, (0, countdown_panel_y1),
                      (self.screenwidth, self.screenheight), (224, 23, 101), -1)
        cv2.addWeighted(self.overlay, OPACITY, frame,
                        1 - OPACITY, 0, frame)
        countdown_y_offset = 20
        countdown_y = int((self.screenheight * 7. / 8) + countdown_y_offset)
        countdown_coord = (countdown_x, countdown_y)
        draw_text(countdown_coord, frame, str(self.countdown))
        return frame 
Example 33
Project: KAGGLE_CERVICAL_CANCER_2017   Author: ZFTurbo   File: a03_augmentation.py    (license) View Source Project 6 votes vote down vote up
def random_augment_image(image, row):
    # start0_max, end0_max, start1_max, end1_max = get_bounding_boxes_positions(image, row)
    # image = cv2.rectangle(image, (int(start1_max), int(start0_max)), (int(end1_max), int(end0_max)), (0, 0, 255), thickness=5)
    if random.randint(0, 1) == 0:
        image = return_random_crop(image, row)
    else:
        image = return_random_perspective(image, row)
    image = random_rotate(image)

    # all possible mirroring and flips (in total there are only 8 possible configurations)
    mirror = random.randint(0, 1)
    if mirror != 0:
        image = image[::-1, :, :]
    angle = random.randint(0, 3)
    if angle != 0:
        image = np.rot90(image, k=angle)

    image = lightning_change(image)
    image = blur_image(image)

    return image 
Example 34
Project: svm-street-detector   Author: morris-frank   File: grabcut.py    (license) View Source Project 6 votes vote down vote up
def grabcutbb(im, bbv):
    mask = np.full(im.shape[:2],cv2.GC_PR_BGD,np.uint8)

    for bb in bbv:
        if bb[4]:
            cv2.rectangle(mask, (bb[0], bb[1]), (bb[2], bb[3]), int(cv2.GC_FGD), -1)
        else:
            cv2.rectangle(mask, (bb[0], bb[1]), (bb[2], bb[3]), int(cv2.GC_BGD), -1)

    bgdModel = np.zeros((1,65),np.float64)
    fgdModel = np.zeros((1,65),np.float64)

    rect = (0, im.shape[:2][0]/2, im.shape[:2][1], im.shape[:2][0])

    cv2.grabCut(im, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK)

    mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')

    return mask2 
Example 35
Project: PyHack   Author: lanxia   File: picCarver.py    (license) View Source Project 6 votes vote down vote up
def faceDetect(path, fileName):
    img = cv2.read(path)
    cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
    rects = cascade.detectMultiScale(img, 1.3, 4, cv2.cv.CV_HAAR, SCALE_IMAGE, (20, 20))

    if len(rects) == 0:
        return False

    rects[:, 2:] += rects[:, :2]

    for x1, y1, x2, y2 in rects:
        cv2.rectangle(img, (x1, y1), (x2, y2), (127, 255, 0), 2)

    cv2.imwrite("%s/%s-%s" % (facesDirectory, pcapFile, fileName), img)

    return True 
Example 36
Project: AlphaLogo   Author: gigaflw   File: MMCQ.py    (license) View Source Project 6 votes vote down vote up
def demo():
    import cv2

    path = "../../static/dataset"

    for f in os.listdir(path):
        if not f.endswith('.jpg'):
            continue

        im = cv2.imread(os.path.join(path, f))
        colors = MMCQ(im, color_level=8, slots=8)
        colors, weights = zip(*colors)

        rows, cols, _ = im.shape

        n = np.zeros((rows, cols+200, 3), dtype=np.uint8) + 255
        n[:rows, :cols, :] = im

        cv2.rectangle(n, (cols+9, 29), (cols+190, 190), (0, 0, 0))
        for i, c in enumerate(colors):
            n[i*20+30: (i+1)*20+30, cols+10:cols+190, :] = [int(cc) for cc in c]

        cv2.imshow("test", n)

        cv2.waitKey(0) 
Example 37
Project: conta-bolas   Author: ocarneiro   File: new_approach.py    (license) View Source Project 6 votes vote down vote up
def draw_contours(self):
        """"""
        # contours all the objects found
        # (findContours changes the source image,
        #  hence copy)
        contours, _ = cv2.findContours(self.mask.copy(),
                                       cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)
        # rectangles
        for contour in contours:
            size = cv2.contourArea(contour)
            if size > self.threshold:  # only larger objects
                ret_x, ret_y, ret_w, ret_h = cv2.boundingRect(contour)
                cv2.rectangle(self.display, (ret_x, ret_y),
                              (ret_x+ret_w,
                               ret_y+ret_h),
                              (0, 255, 255), 2) 
Example 38
Project: conta-bolas   Author: ocarneiro   File: new_approach.py    (license) View Source Project 6 votes vote down vote up
def draw_sliders(self, image):
        # draw circles for slider positions
        for _, slider in self.sliders.iteritems():
            self.draw_slider(slider, image)

        # draw color of min hsv threshold
        min_hsv = self.get_hsv_color('min')
        min_bgr = self.single_hsv2bgr(min_hsv)
        cv2.rectangle(image, (0, 0), (60, 20),  # image, pos
                      min_bgr, FILLED)  # color, filled

        # draw color of max hsv threshold
        max_hsv = self.get_hsv_color('max')
        max_bgr = self.single_hsv2bgr(max_hsv)
        cv2.rectangle(image, (61, 0), (120, 20),  # image, pos
                      max_bgr, FILLED)  # color, filled 
Example 39
Project: CanLauncher   Author: hazenhamather   File: PlaystationEye.py    (license) View Source Project 6 votes vote down vote up
def aimToFace():
	while 1:
	    ret, frame = cap.read()
    	gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
    	faces = faceCascade.detectMultiScale(
        	gray,
        	scaleFactor = 1.3,
        	minNeighbors = 5
    	)
    	for (x,y,w,h) in faces:
        	cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
        	distance = 146.645*math.exp(-7.207e-3*w);
        	# print distance
        	if x < (halfScreen - 1.5*w):
        		#click servo right
        		print "Pan Right"
        	elif x > (halfScreen + 1.5*w):
        		#Click servo left
        		print "Pan Left"
        	else:
        		targetConfirmed = confirmTarget()
        		if targetConfirmed:
        			Launch(distance)
        		else:
        			break 
Example 40
Project: yolo2-pytorch   Author: longcw   File: yolo.py    (license) View Source Project 6 votes vote down vote up
def draw_detection(im, bboxes, scores, cls_inds, cfg, thr=0.3):
    # draw image
    colors = cfg.colors
    labels = cfg.label_names

    imgcv = np.copy(im)
    h, w, _ = imgcv.shape
    for i, box in enumerate(bboxes):
        if scores[i] < thr:
            continue
        cls_indx = cls_inds[i]

        thick = int((h + w) / 300)
        cv2.rectangle(imgcv,
                      (box[0], box[1]), (box[2], box[3]),
                      colors[cls_indx], thick)
        mess = '%s: %.3f' % (labels[cls_indx], scores[i])
        cv2.putText(imgcv, mess, (box[0], box[1] - 12),
                    0, 1e-3 * h, colors[cls_indx], thick // 3)

    return imgcv 
Example 41
Project: apparent-age-gender-classification   Author: danielyou0230   File: Modules.py    (license) View Source Project 6 votes vote down vote up
def facial_landmark_detection(image, detector, predictor, file):
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	img_size = gray.shape
	landmark_faces = detector(gray, 1)

	faces = list()
	area = 0
	face_idx = 0
	bItr = False
	for (idx, landmark_faces) in enumerate(landmark_faces):
		shape = predictor(gray, landmark_faces)
		shape = shape_to_np(shape)
		(x, y, w, h) = rect_to_bb(landmark_faces, img_size, file)
		
		if (w * h) > area:
			area = w * h
			faces = [x, y, w, h]
			bItr = True
		#cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
		#cv2.putText(image, "Face #{}".format(idx + 1), (x - 10, y - 10), \
		#           cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
		#for (x, y) in shape:
		#   cv2.circle(image, (x, y), 1, (0, 0, 255), -1)

	return bItr, faces 
Example 42
Project: apparent-age-gender-classification   Author: danielyou0230   File: Modules.py    (license) View Source Project 6 votes vote down vote up
def debug_face_classifier(file):
	face_cascade = cv2.CascadeClassifier(xml_face_classifier)
	image = cv2.imread(file)
	
	image = imutils.resize(image, width=500)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	faces = face_cascade.detectMultiScale(image, 1.07, 3)
	print faces
	for (x, y, w, h) in faces:
		cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)
		#roi_gray = gray[y:y+h, x:x+w]
		#roi_color = image[y:y+h, x:x+w]

	cv2.imshow('Image', image)
	cv2.waitKey(0)
	cv2.destroyAllWindows() 
Example 43
Project: apparent-age-gender-classification   Author: danielyou0230   File: Modules.py    (license) View Source Project 6 votes vote down vote up
def debug_face_landmark(file, output=False, output_name='output'):
	detector = dlib.get_frontal_face_detector()
	predictor = dlib.shape_predictor(dat_face_landmark)

	image = cv2.imread(file)
	image = imutils.resize(image, width=500)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	img_size = gray.shape

	faces = detector(gray, 1)
	for (i, itr_face) in enumerate(faces):
		shape = predictor(gray, itr_face)
		shape = shape_to_np(shape)
		# convert dlib's rectangle to a OpenCV-style bounding box
		# [i.e., (x, y, w, h)], then draw the face bounding box
		(x, y, w, h) = rect_to_bb(itr_face, img_size, file)
		#print "landmark: ({:d}, {:d}) ({:d}, {:d})".format(x, y, w, h)
		
		cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
		# show the face number
		cv2.putText(image, "Face #{}".format(i + 1), (x - 10, y - 10),
			cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
	 
		# loop over the (x, y)-coordinates for the facial landmarks
		# and draw them on the image
		for (x, y) in shape:
			cv2.circle(image, (x, y), 1, (0, 0, 255), -1)

	# show the output image with the face detections + facial landmarks
	cv2.imshow(file, image)
	cv2.waitKey(0)
	if output:
		cv2.imwrite("../" + str(output_name + 1) + '.jpg', image)
	cv2.destroyAllWindows() 
Example 44
Project: FacePoseEstimation   Author: abhisharma7   File: facepose_detection.py    (license) View Source Project 6 votes vote down vote up
def image(self):

        img = cv2.imread(self.image_path)
        img = imutils.resize(img,width=min(800,img.shape[1]))
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray,(21,21),0)
        fullbody = self.HogDescriptor(gray)
        for (x,y,w,h) in fullbody:
            cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)

        faces = self.haar_facedetection(gray)
        for (x,y,w,h) in faces:
            cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
            roi_gray = gray[y:y+h, x:x+w]
            roi_color = img[y:y+h, x:x+w]
            eyes = self.haar_eyedetection(roi_gray)
            for (ex,ey,ew,eh) in eyes:
                cv2.rectangle(roi_color, (ex,ey), (ex+ew,ey+eh), (0,255,0),2) 
            smile = self.haar_smilecascade(roi_gray)
            for (sx,sy,sw,sh) in smile:
                cv2.rectangle(roi_color, (sx,sy), (sx+sw,sy+sh),(0,255,0),2)
        img = self.dlib_function(img)
        cv2.imshow('img',img)
        cv2.waitKey(0) 
        cv2.destroyAllWindows() 
Example 45
Project: SOLAMS   Author: aishmittal   File: register.py    (license) View Source Project 6 votes vote down vote up
def display_video_stream(self):
        r , frame = self.capture.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = self.faceCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(40, 40),
            flags=cv2.cv.CV_HAAR_SCALE_IMAGE
        )

        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
            
        frame = cv2.cvtColor(frame, cv2.cv.CV_BGR2RGB)
        frame = cv2.flip(frame, 1)
        image = QImage(frame, frame.shape[1], frame.shape[0], 
                       frame.strides[0], QImage.Format_RGB888)
        
        self.imageLabel.setPixmap(QPixmap.fromImage(image)) 
Example 46
Project: Kaggle_the_Nature_Conservancy_Fisheries_Monitoring   Author: Sapphirine   File: detection_crop_window.py    (license) View Source Project 6 votes vote down vote up
def click_and_crop(event, x, y, flags, param):
    global refPt, cropping
    if event == cv2.EVENT_LBUTTONDOWN:    # indicates that the left mouse button is pressed
        refPt = [(x, y)]
        cropping = True
    elif event == cv2.EVENT_RBUTTONDOWN:   # indicates that the right mouse button is pressed
        refPt.append((x, y))
        cropping = False
        cv2.rectangle(image, refPt[0], refPt[1], (0, 255, 0), 2)
        cv2.imshow("image", image)


# <=====================================================================================>
# Here is the process to crop the window and get the position of the graph
# after the classification of the fish boat 
Example 47
Project: FPN   Author: xmyqsh   File: train.py    (license) View Source Project 6 votes vote down vote up
def _draw_boxes_to_image(im, res):
    colors = [(86, 0, 240), (173, 225, 61), (54, 137, 255),\
              (151, 0, 255), (243, 223, 48), (0, 117, 255),\
              (58, 184, 14), (86, 67, 140), (121, 82, 6),\
              (174, 29, 128), (115, 154, 81), (86, 255, 234)]
    font = cv2.FONT_HERSHEY_SIMPLEX
    image = np.copy(im)
    cnt = 0
    for ind, r in enumerate(res):
        if r['dets'] is None: continue
        dets = r['dets']
        for i in range(0, dets.shape[0]):
            (x1, y1, x2, y2, score) = dets[i, :]
            cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), colors[ind % len(colors)], 2)
            text = '{:s} {:.2f}'.format(r['class'], score)
            cv2.putText(image, text, (x1, y1), font, 0.6, colors[ind % len(colors)], 1)
            cnt = (cnt + 1)
    return image 
Example 48
Project: robik   Author: RecunchoMaker   File: scanner.py    (GNU General Public License v2.0) View Source Project 5 votes vote down vote up
def draw_osd(self,frame):
        font = cv2.FONT_HERSHEY_SIMPLEX
        self.colors=[]
        for x,y in self.roi:
            ff = frame.copy()
            roi = ff[y:y+self.roitamano, x:x+self.roitamano]
            roi_color = self.get_color_medio(roi, x,y)
            if roi_color == None:
                cv2.rectangle(frame,(x, y),(x+self.roitamano,y+self.roitamano),(255,0,0),1)
            else:
                a,b,c=int(roi_color[0]), int(roi_color[1]), int(roi_color[2])
                cv2.putText(frame,str(a),(x,y), font, 0.3,(0,0,0),1)
                self.colors.append(a)
                cv2.rectangle(frame,(x, y),(x+self.roitamano,y+self.roitamano),(0,255,0),2)
        if len(self.colors)<9:
            self.buscar_cubo()
        self.colorsant = list(self.colors) 
Example 49
Project: yolo_tensorflow   Author: hizhangp   File: test.py    (MIT License) View Source Project 5 votes vote down vote up
def draw_result(self, img, result):
        for i in range(len(result)):
            x = int(result[i][1])
            y = int(result[i][2])
            w = int(result[i][3] / 2)
            h = int(result[i][4] / 2)
            cv2.rectangle(img, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
            cv2.rectangle(img, (x - w, y - h - 20),
                          (x + w, y - h), (125, 125, 125), -1)
            cv2.putText(img, result[i][0] + ' : %.2f' % result[i][5], (x - w + 5, y - h - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.CV_AA) 
Example 50
Project: pybot   Author: spillai   File: uw_rgbd.py    (license) View Source Project 5 votes vote down vote up
def annotate_bboxes(vis, bboxes, target_names): # , box_color=lambda target: (0, 200, 0) if UWRGBDDataset.get_category_name(target) != 'background' else (100, 100, 100)): 
        for bbox,target_name in izip(bboxes, target_names): 
            box_color = (0, 200, 0) # if UWRGBDDataset.get_category_name(target) != 'background' else (100, 100, 100)
            annotate_bbox(vis, bbox.coords, color=box_color, title=target_name.title().replace('_', ' '))

            # cv2.rectangle(vis, (bbox.coords[0], bbox.coords[1]), (bbox.coords[2], bbox.coords[3]), box_color, 2) 
            # cv2.rectangle(vis, (bbox.coords[0]-1, bbox.coords[1]-15), (bbox.coords[2]+1, bbox.coords[1]), box_color, -1)
            # cv2.putText(vis, '%s' % (), 
            #             (bbox[0], bbox[1]-5), 
            #             cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), thickness=1, lineType=cv2.CV_AA)
        return vis