Python cv2.COLOR_BGR2RGB Examples

The following are 30 code examples for showing how to use cv2.COLOR_BGR2RGB(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module cv2 , or try the search function .

Example 1
Project: object_detector_app   Author: datitran   File: object_detection_multithreading.py    License: MIT License 7 votes vote down vote up
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        output_q.put(detect_objects(frame_rgb, sess, detection_graph))

    fps.stop()
    sess.close() 
Example 2
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    fps = FPS().start()
    while True:
        myprint("updata start ", time.time())
        fps.update()
        myprint("updata end ", time.time())
        # global lock
        # if lock.acquire():
        #    lock.release()

        frame = input_q.get()
        myprint("out queue {} and input que size {} after input_q get".format(output_q.qsize(), input_q.qsize()), time.time())
        myprint("out queue {} and input que size {} after lock release ".format(output_q.qsize(), input_q.qsize()), time.time())
        myprint("face process start", time.time())
        # frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        out_frame = face_process(frame)
        myprint("out queue {} and input que size {}".format(output_q.qsize(), input_q.qsize()), time.time())
        output_q.put(out_frame)
        myprint("out queue {} and input que size {} ".format(output_q.qsize(), input_q.qsize()), time.time())

    fps.stop() 
Example 3
Project: kuzushiji-recognition   Author: see--   File: data.py    License: MIT License 6 votes vote down vote up
def __getitem__(self, index, to_tensor=True):
    fn = self.image_fns[index]
    img = cv2.cvtColor(cv2.imread(fn, 1), cv2.COLOR_BGR2RGB)

    img, pad_top, pad_left = KuzushijiDataset.pad_to_ratio(img, ratio=1.5)
    h, w = img.shape[:2]
    # print(h / w, pad_left, pad_top)
    assert img.ndim == 3
    scaled_imgs = []
    for scale in self.scales:
      h_scale = int(scale * self.height)
      w_scale = int(scale * self.width)
      simg = cv2.resize(img, (w_scale, h_scale))

      if to_tensor:
        assert simg.ndim == 3, simg.ndim
        simg = simg.transpose((2, 0, 1))
        simg = th.from_numpy(simg.copy())

      scaled_imgs.append(simg)

    return scaled_imgs + [fn] 
Example 4
Project: object_detector_app   Author: datitran   File: object_detection_app.py    License: MIT License 6 votes vote down vote up
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    fps = FPS().start()
    while True:
        fps.update()
        frame = input_q.get()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        output_q.put(detect_objects(frame_rgb, sess, detection_graph))

    fps.stop()
    sess.close() 
Example 5
Project: bjtu_BinocularCameraRecord   Author: anonymouslycn   File: Main.py    License: MIT License 6 votes vote down vote up
def loop2(self,text,w=1280,h=720):
        cap = cv2.VideoCapture(int(text))
        cap.set(6 ,cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') );
        global capnum2
        capnum2 = int(text)
        cap.set(3,w);
        cap.set(4,h);
        global update2
        update2 = 1
        global shotmark2

        while (update2 == 1):
            ret, frame = cap.read() 
            if shotmark2 == 1:
                fn = self.lineEdit.text()
                name = "photo/2_"+fn + "video.jpg"
                if os.path.exists(name):
                    name = "photo/2_" + fn + "video"+str(int(time.time()))+".jpg"
                cv2.imwrite(name, frame)
                shotmark2 = 0
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.original2_image.updateImage(frame)
        # cap.release()
        cv_img_rgb = np.zeros((700,700,3))
        self.original2_image.updateImage(cv_img_rgb) 
Example 6
Project: License-Plate-Recognition   Author: wzh191920   File: surface.py    License: MIT License 6 votes vote down vote up
def get_imgtk(self, img_bgr):
		img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
		im = Image.fromarray(img)
		imgtk = ImageTk.PhotoImage(image=im)
		wide = imgtk.width()
		high = imgtk.height()
		if wide > self.viewwide or high > self.viewhigh:
			wide_factor = self.viewwide / wide
			high_factor = self.viewhigh / high
			factor = min(wide_factor, high_factor)
			wide = int(wide * factor)
			if wide <= 0 : wide = 1
			high = int(high * factor)
			if high <= 0 : high = 1
			im=im.resize((wide, high), Image.ANTIALIAS)
			imgtk = ImageTk.PhotoImage(image=im)
		return imgtk 
Example 7
Project: License-Plate-Recognition   Author: wzh191920   File: surface.py    License: MIT License 6 votes vote down vote up
def show_roi(self, r, roi, color):
		if r :
			roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
			roi = Image.fromarray(roi)
			self.imgtk_roi = ImageTk.PhotoImage(image=roi)
			self.roi_ctl.configure(image=self.imgtk_roi, state='enable')
			self.r_ctl.configure(text=str(r))
			self.update_time = time.time()
			try:
				c = self.color_transform[color]
				self.color_ctl.configure(text=c[0], background=c[1], state='enable')
			except: 
				self.color_ctl.configure(state='disabled')
		elif self.update_time + 8 < time.time():
			self.roi_ctl.configure(state='disabled')
			self.r_ctl.configure(text="")
			self.color_ctl.configure(state='disabled') 
Example 8
Project: insightface   Author: deepinsight   File: face_model.py    License: MIT License 6 votes vote down vote up
def get_input(self, face_img):
    ret = self.detector.detect_face(face_img, det_type = self.args.det)
    if ret is None:
      return None
    bbox, points = ret
    if bbox.shape[0]==0:
      return None
    bbox = bbox[0,0:4]
    points = points[0,:].reshape((2,5)).T
    #print(bbox)
    #print(points)
    nimg = face_preprocess.preprocess(face_img, bbox, points, image_size='112,112')
    nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
    aligned = np.transpose(nimg, (2,0,1))
    input_blob = np.expand_dims(aligned, axis=0)
    data = mx.nd.array(input_blob)
    db = mx.io.DataBatch(data=(data,))
    return db 
Example 9
Project: insightface   Author: deepinsight   File: face_genderage.py    License: MIT License 6 votes vote down vote up
def get(self, img):
        assert self.param_file and self.model
        assert img.shape[2]==3 and img.shape[0:2]==self.image_size
        data = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        data = np.transpose(data, (2,0,1))
        data = np.expand_dims(data, axis=0)
        data = mx.nd.array(data)
        db = mx.io.DataBatch(data=(data,))
        self.model.forward(db, is_train=False)
        ret = self.model.get_outputs()[0].asnumpy()
        g = ret[:,0:2].flatten()
        gender = np.argmax(g)
        a = ret[:,2:202].reshape( (100,2) )
        a = np.argmax(a, axis=1)
        age = int(sum(a))
        return gender, age 
Example 10
Project: rcan-tensorflow   Author: kozistr   File: dataset.py    License: MIT License 6 votes vote down vote up
def convert_to_img(self):
        def to_img(i):
            cv2.imwrite('imgHQ%05d.png' % i, cv2.COLOR_BGR2RGB)
            return True

        raw_data_shape = self.raw_data.shape  # (N, H * W * C)

        try:
            assert os.path.exists(self.save_file_name)
        except AssertionError:
            print("[-] There's no %s :(" % self.save_file_name)
            print("[*] Make directory at %s... " % self.save_file_name)
            os.mkdir(self.save_file_name)

        ii = [i for i in range(raw_data_shape[0])]

        pool = Pool(self.n_threads)
        print(pool.map(to_img, ii)) 
Example 11
Project: yolo2-pytorch   Author: ruiminshen   File: eval.py    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
def debug_visualize(self, data_yx_min, data_yx_max, yx_min, yx_max, c, tp, path):
        canvas = cv2.imread(path)
        canvas = cv2.cvtColor(canvas, cv2.COLOR_BGR2RGB)
        size = np.reshape(np.array(canvas.shape[:2], np.float32), [1, 2])
        data_yx_min, data_yx_max, yx_min, yx_max = (np.reshape(t.cpu().numpy(), [-1, 2]) * size for t in (data_yx_min, data_yx_max, yx_min, yx_max))
        canvas = self.draw_bbox(canvas, data_yx_min, data_yx_max, colors=['g'])
        canvas = self.draw_bbox(canvas, *(a[tp] for a in (yx_min, yx_max)), colors=['w'])
        fp = ~tp
        canvas = self.draw_bbox(canvas, *(a[fp] for a in (yx_min, yx_max)), colors=['k'])
        fig = plt.figure()
        ax = fig.gca()
        ax.imshow(canvas)
        ax.set_title('tp=%d, fp=%d' % (np.sum(tp), np.sum(fp)))
        fig.canvas.set_window_title(self.category[c] + ': ' + path)
        plt.show()
        plt.close(fig) 
Example 12
Project: yolo_tensorflow   Author: hizhangp   File: test.py    License: MIT License 6 votes vote down vote up
def detect(self, img):
        img_h, img_w, _ = img.shape
        inputs = cv2.resize(img, (self.image_size, self.image_size))
        inputs = cv2.cvtColor(inputs, cv2.COLOR_BGR2RGB).astype(np.float32)
        inputs = (inputs / 255.0) * 2.0 - 1.0
        inputs = np.reshape(inputs, (1, self.image_size, self.image_size, 3))

        result = self.detect_from_cvmat(inputs)[0]

        for i in range(len(result)):
            result[i][1] *= (1.0 * img_w / self.image_size)
            result[i][2] *= (1.0 * img_h / self.image_size)
            result[i][3] *= (1.0 * img_w / self.image_size)
            result[i][4] *= (1.0 * img_h / self.image_size)

        return result 
Example 13
Project: netvlad_tf_open   Author: uzh-rpg   File: image_descriptor.py    License: MIT License 6 votes vote down vote up
def describeAllJpegsInPath(self, path, batch_size, verbose=False):
        ''' returns a list of descriptors '''
        jpeg_paths = sorted(glob.glob(os.path.join(path, '*.jpg')))
        descs = []
        for batch_offset in range(0, len(jpeg_paths), batch_size):
            images = []
            for i in range(batch_offset, batch_offset + batch_size):
                if i == len(jpeg_paths):
                    break
                if verbose:
                    print('%d/%d' % (i, len(jpeg_paths)))
                if self.is_grayscale:
                    image = cv2.imread(jpeg_paths[i], cv2.IMREAD_GRAYSCALE)
                    images.append(np.expand_dims(
                            np.expand_dims(image, axis=0), axis=-1))
                else:
                    image = cv2.imread(jpeg_paths[i])
                    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                    images.append(np.expand_dims(image, axis=0))
            batch = np.concatenate(images, 0)
            descs = descs + list(self.sess.run(
                    self.net_out, feed_dict={self.tf_batch: batch}))
        return descs 
Example 14
Project: KAIR   Author: cszn   File: utils_image.py    License: MIT License 6 votes vote down vote up
def imread_uint(path, n_channels=3):
    #  input: path
    # output: HxWx3(RGB or GGG), or HxWx1 (G)
    if n_channels == 1:
        img = cv2.imread(path, 0)  # cv2.IMREAD_GRAYSCALE
        img = np.expand_dims(img, axis=2)  # HxWx1
    elif n_channels == 3:
        img = cv2.imread(path, cv2.IMREAD_UNCHANGED)  # BGR or G
        if img.ndim == 2:
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)  # GGG
        else:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # RGB
    return img


# --------------------------------------------
# matlab's imwrite
# -------------------------------------------- 
Example 15
Project: TripletLossFace   Author: aangfanboy   File: face_recognition_tester.py    License: MIT License 6 votes vote down vote up
def show_who_in_image(self, path, get_face: bool = True, show: bool = True, turn_rgb: bool = True):
		min_im, image, all_frames = self.detect_which(path, get_face)

		for (confidance, who), frame in zip(min_im, all_frames):
			color = self.colors[who]
			x1, x2, y1, y2 = frame
			cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
			cv2.putText(image, f"{who}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA) # -{round(float(confidance), 2)}

		if turn_rgb:
			image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

		if show:
			cv2.imshow("a", image)
			cv2.waitKey(0)

		return image 
Example 16
Project: TripletLossFace   Author: aangfanboy   File: main_engine.py    License: MIT License 6 votes vote down vote up
def show_who_in_image(self, path, get_face: bool = True, show: bool = True, turn_rgb: bool = True):
		min_im, image, all_frames = self.index_image(path, get_face)

		for (confidance, who), frame in zip(min_im, all_frames):
			try:
				color = self.colors[str(who)]
				x1, x2, y1, y2 = frame
				cv2.rectangle(image, (x1, y1), (x2, y2), color, 4)
				cv2.putText(image, f"id: {str(who)}- conf:{abs(round(float(confidance), 2))}", (x1, y1-10), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3, cv2.LINE_AA) # -{round(float(confidance), 2)}
			except KeyError:
				continue

		if turn_rgb:
			image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

		if show:
			cv2.imshow("a", image)
			cv2.waitKey(1)

		return image, min_im, all_frames 
Example 17
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: gradcam.py    License: Apache License 2.0 5 votes vote down vote up
def get_img_heatmap(orig_img, activation_map):
    """Draw a heatmap on top of the original image using intensities from activation_map"""
    heatmap = cv2.applyColorMap(activation_map, cv2.COLORMAP_COOL)
    heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)
    img_heatmap = np.float32(heatmap) + np.float32(orig_img)
    img_heatmap = img_heatmap / np.max(img_heatmap)
    img_heatmap *= 255
    return img_heatmap.astype(int) 
Example 18
def read_image_cv(path):
    return cv2.resize(cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB), image_sz)

# synset.txt contains the names of Imagenet categories
# Load the file to memory and create a helper method to query category_index -> category name 
Example 19
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: detector.py    License: Apache License 2.0 5 votes vote down vote up
def detect_and_visualize(self, im_list, root_dir=None, extension=None,
                             classes=[], thresh=0.6, show_timer=False):
        """
        wrapper for im_detect and visualize_detection

        Parameters:
        ----------
        im_list : list of str or str
            image path or list of image paths
        root_dir : str or None
            directory of input images, optional if image path already
            has full directory information
        extension : str or None
            image extension, eg. ".jpg", optional

        Returns:
        ----------

        """
        dets = self.im_detect(im_list, root_dir, extension, show_timer=show_timer)
        if not isinstance(im_list, list):
            im_list = [im_list]
        assert len(dets) == len(im_list)
        for k, det in enumerate(dets):
            img = cv2.imread(im_list[k])
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            self.visualize_detection(img, det, classes, thresh) 
Example 20
def read_image(img_path, image_dims=None, mean=None):
    """
    Reads an image from file path or URL, optionally resizing to given image dimensions and
    subtracting mean.
    :param img_path: path to file, or url to download
    :param image_dims: image dimensions to resize to, or None
    :param mean: mean file to subtract, or None
    :return: loaded image, in RGB format
    """

    import urllib

    filename = img_path.split("/")[-1]
    if img_path.startswith('http'):
        urllib.urlretrieve(img_path, filename)
        img = cv2.imread(filename)
    else:
        img = cv2.imread(img_path)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    if image_dims is not None:
        img = cv2.resize(img, image_dims)  # resize to image_dims to fit model
    img = np.rollaxis(img, 2) # change to (c, h, w) order
    img = img[np.newaxis, :]  # extend to (n, c, h, w)
    if mean is not None:
        mean = np.array(mean)
        if mean.shape == (3,):
            mean = mean[np.newaxis, :, np.newaxis, np.newaxis]  # extend to (n, c, 1, 1)
        img = img.astype(np.float32) - mean # subtract mean

    return img 
Example 21
def read_image(img_path, image_dims=None, mean=None):
    """
    Reads an image from file path or URL, optionally resizing to given image dimensions and
    subtracting mean.
    :param img_path: path to file, or url to download
    :param image_dims: image dimensions to resize to, or None
    :param mean: mean file to subtract, or None
    :return: loaded image, in RGB format
    """

    import urllib

    filename = img_path.split("/")[-1]
    if img_path.startswith('http'):
        urllib.urlretrieve(img_path, filename)
        img = cv2.imread(filename)
    else:
        img = cv2.imread(img_path)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    if image_dims is not None:
        img = cv2.resize(img, image_dims)  # resize to image_dims to fit model
    img = np.rollaxis(img, 2) # change to (c, h, w) order
    img = img[np.newaxis, :]  # extend to (n, c, h, w)
    if mean is not None:
        mean = np.array(mean)
        if mean.shape == (3,):
            mean = mean[np.newaxis, :, np.newaxis, np.newaxis]  # extend to (n, c, 1, 1)
        img = img.astype(np.float32) - mean # subtract mean

    return img 
Example 22
Project: OpenCV-Computer-Vision-Projects-with-Python   Author: PacktPublishing   File: gui.py    License: MIT License 5 votes vote down vote up
def _on_next_frame(self, event):
        """
            This method captures a new frame from the camera (or capture
            device) and sends an RGB version to the method self.process_frame.
            The latter will then apply task-specific post-processing and return
            an image to be displayed.
        """
        success, frame = self._acquire_frame()
        if success:
            # process current frame
            frame = self._process_frame(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))

            # update buffer and paint (EVT_PAINT triggered by Refresh)
            self.bmp.CopyFromBuffer(frame)
            self.Refresh(eraseBackground=False) 
Example 23
def describe(self, image, mask):
        image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
        hist = cv.calcHist([image], [0, 1, 2], mask,
                           self.bins, [0, 256, 0, 256, 0, 256])
        hist = hist / np.sum(hist)

        # 512 dimensions
        return hist.flatten() 
Example 24
Project: EdgeRealtimeVideoAnalytics   Author: RedisGears   File: server.py    License: Apache License 2.0 5 votes vote down vote up
def get_last(self):
        ''' Gets latest from camera and model '''
        p = self.conn.pipeline()
        p.xrevrange(self.camera, count=1)  # Latest frame
        p.xrevrange(self.boxes, count=1)   # Latest boxes
        cmsg, bmsg = p.execute()
        if cmsg:
            last_id = cmsg[0][0].decode('utf-8')
            label = f'{self.camera}:{last_id}'
            data = io.BytesIO(cmsg[0][1][self.field])
            img = Image.open(data)
            if bmsg:
                boxes = np.fromstring(bmsg[0][1]['boxes'.encode('utf-8')][1:-1], sep=',')
                label += ' people: {}'.format(bmsg[0][1]['people'.encode('utf-8')].decode('utf-8'))
                for box in range(int(bmsg[0][1]['people'.encode('utf-8')])):  # Draw boxes
                    x1 = boxes[box*4]
                    y1 = boxes[box*4+1]
                    x2 = boxes[box*4+2]
                    y2 = boxes[box*4+3]
                    draw = ImageDraw.Draw(img)
                    draw.rectangle(((x1, y1), (x2, y2)), width=5, outline='red')
            arr = np.array(img)
            arr = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)
            cv2.putText(arr, label, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 1, cv2.LINE_AA)
            ret, img = cv2.imencode('.jpg', arr)
            return img.tobytes()
        else:
            # TODO: put an 'we're experiencing technical difficlties' image
            pass 
Example 25
Project: iAI   Author: aimuch   File: call_engine_to_infer_all.py    License: MIT License 5 votes vote down vote up
def load_image(img_path, net_input_shape):
    # Use the same pre-processing as training
    img = cv2.resize(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB), NET_INPUT_SHAPE)
    img = (img-128.)/128.

    # Fixed usage
    img = np.transpose(img, (2, 0, 1)) # 要转换成CHW,这里要特别注意
    return np.ascontiguousarray(img, dtype=np.float32) # 避免error:ndarray is not contiguous 
Example 26
Project: iAI   Author: aimuch   File: call_engine_to_infer_one.py    License: MIT License 5 votes vote down vote up
def load_image(img_path, net_input_shape):
    # Use the same pre-processing as training
    img = cv2.resize(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB), NET_INPUT_SHAPE)
    img = (img-128.)/128.

    # Fixed usage
    img = np.transpose(img, (2, 0, 1)) # 要转换成CHW,这里要特别注意
    return np.ascontiguousarray(img, dtype=np.float32) # 避免error:ndarray is not contiguous 
Example 27
Project: iAI   Author: aimuch   File: tf_to_trt.py    License: MIT License 5 votes vote down vote up
def load_image(img_path, net_input_shape):
    # Use the same pre-processing as training
    img = cv2.resize(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB), NET_INPUT_SHAPE)
    img = (img-128.)/128.

    # Fixed usage
    img = np.transpose(img, (2, 0, 1)) # 要转换成CHW,这里要特别注意
    return np.ascontiguousarray(img, dtype=np.float32) # 避免error:ndarray is not contiguous 
Example 28
Project: bjtu_BinocularCameraRecord   Author: anonymouslycn   File: Main.py    License: MIT License 5 votes vote down vote up
def loop1(self,text,w=1280,h=720):
        cap = cv2.VideoCapture(int(text))
        cap.set(6 ,cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') );
        global capnum1
        capnum1 = int(text)
        cap.set(3,w);
        cap.set(4,h);
        global update1
        update1 = 1
        global shotmark1
        


        while (update1 == 1):
            ret, frame = cap.read() 
            if shotmark1 == 1:
                fn = self.lineEdit.text()
                name = "photo/1_"+fn + "video.jpg"
                if os.path.exists(name):
                    name = "photo/1_"+fn + "video"+str(int(time.time()))+".jpg"
                cv2.imwrite(name, frame)
                shotmark1 = 0
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.original1_image.updateImage(frame)
        # cap.release()
        cv_img_rgb = np.zeros((700,700,3))
        self.original1_image.updateImage(cv_img_rgb) 
Example 29
Project: RacingRobot   Author: sergionr2   File: picamera_threads.py    License: MIT License 5 votes vote down vote up
def extractInfo(self):
        # times = []
        try:
            while not self.exit:
                try:
                    frame = self.frame_queue.get(block=True, timeout=1)
                except queue.Empty:
                    print("Frame queue empty")
                    continue
                # 1 ms per loop
                # TODO: check that this conversion is not needed
                # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                if self.debug:
                    self.out_queue.put(item=frame, block=False)
                else:
                    try:
                        # start_time = time.time()
                        turn_percent, centroids = processImage(frame)
                        # times.append(time.time() - start_time)
                        self.out_queue.put(item=(turn_percent, centroids), block=False)
                    except Exception as e:
                        print("Exception in RBGAnalyser processing image: {}".format(e))
                self.frame_num += 1
        except Exception as e:
            print("Exception in RBGAnalyser after loop: {}".format(e))
        # s_per_loop_image = np.mean(times)
        # print("Image processing: {:.2f}ms per loop | {} fps".format(s_per_loop_image * 1000, int(1 / s_per_loop_image))) 
Example 30
Project: insightface   Author: deepinsight   File: face_model.py    License: MIT License 5 votes vote down vote up
def get_input(self, face_img):
    ret = self.detector.detect_face(face_img, det_type = self.args.det)
    if ret is None:
      return None
    bbox, points = ret
    if bbox.shape[0]==0:
      return None
    bbox = bbox[0,0:4]
    points = points[0,:].reshape((2,5)).T
    #print(bbox)
    #print(points)
    nimg = face_preprocess.preprocess(face_img, bbox, points, image_size='112,112')
    nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
    aligned = np.transpose(nimg, (2,0,1))
    return aligned