Python cv2.COLOR_GRAY2RGB Examples

The following are 30 code examples of cv2.COLOR_GRAY2RGB(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: dbscan_based.py    From PythonPilot with Apache License 2.0 7 votes vote down vote up
def draw(self, dbscan_input_array, dbscan_label, dbscan_label_n):

        # convert array to image
        frame_draw = np.zeros((self.__compress_height, self.__compress_width), np.uint8)
        frame_draw = cv2.cvtColor(frame_draw, cv2.COLOR_GRAY2RGB)
        for i in range(dbscan_input_array.shape[0]):
            if not dbscan_label[i] == -1:
                color_th = dbscan_label[i] / dbscan_label_n
                c_r = int(cm.hsv(color_th)[0]*255)
                c_g = int(cm.hsv(color_th)[1]*255)
                c_b = int(cm.hsv(color_th)[2]*255)
                frame_draw = cv2.circle(frame_draw, \
                                        (int(dbscan_input_array[i][0]), \
                                         int(dbscan_input_array[i][1])), \
                                        1, (c_r, c_g, c_b), 1)

        return frame_draw 
Example #2
Source File: imutils.py    From SickZil-Machine with GNU Affero General Public License v3.0 7 votes vote down vote up
def channel3img(img):
    '''
    If img is 3-channel img(h,w,3) then this is identity funcion.
    If img is grayscale img(h,w) then convert 3-channel img.
    If img is bgra img, then CONVERT to bgr(TODO: warning required!)
    else return None
    '''
    if len(img.shape) == 2:   # if grayscale image, convert.
        return cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
    elif len(img.shape) == 3: 
        _,_,c = img.shape
        if c == 3: # BGR(RGB)
            return img
        elif c == 4: # BGRA(RGBA)
            return cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) 
            #NOTE: warning: no alpha!
    #else: None

#---------------------------------------------------------------------------------
# for segmap 
Example #3
Source File: utils_image.py    From KAIR with MIT License 7 votes vote down vote up
def imread_uint(path, n_channels=3):
    #  input: path
    # output: HxWx3(RGB or GGG), or HxWx1 (G)
    if n_channels == 1:
        img = cv2.imread(path, 0)  # cv2.IMREAD_GRAYSCALE
        img = np.expand_dims(img, axis=2)  # HxWx1
    elif n_channels == 3:
        img = cv2.imread(path, cv2.IMREAD_UNCHANGED)  # BGR or G
        if img.ndim == 2:
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)  # GGG
        else:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # RGB
    return img


# --------------------------------------------
# matlab's imwrite
# -------------------------------------------- 
Example #4
Source File: cvfunctional.py    From opencv_transforms_torchvision with MIT License 7 votes vote down vote up
def to_grayscale(img, num_output_channels=1):
    """Convert image to grayscale version of image.

    Args:
        img (np.ndarray): Image to be converted to grayscale.

    Returns:
        CV Image:  Grayscale version of the image.
                    if num_output_channels == 1 : returned image is single channel
                    if num_output_channels == 3 : returned image is 3 channel with r == g == b
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be CV Image. Got {}'.format(type(img)))

    if num_output_channels == 1:
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    elif num_output_channels == 3:
        img = cv2.cvtColor(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)
    else:
        raise ValueError('num_output_channels should be either 1 or 3')

    return img 
Example #5
Source File: features.py    From pyCFTrackers with MIT License 6 votes vote down vote up
def get_features(self, img, pos, sample_sz, scales):
        feat1 = []
        feat2 = []
        if img.shape[2] == 1:
            img = cv2.cvtColor(img.squeeze(), cv2.COLOR_GRAY2RGB)
        if not isinstance(scales, list) and not isinstance(scales, np.ndarray):
            scales = [scales]
        patches = []
        for scale in scales:
            patch = self._sample_patch(img, pos, sample_sz*scale, sample_sz)
            patch = mx.nd.array(patch / 255., ctx=self._ctx)
            normalized = mx.image.color_normalize(patch,
                                                  mean=mx.nd.array([0.485, 0.456, 0.406], ctx=self._ctx),
                                                  std=mx.nd.array([0.229, 0.224, 0.225], ctx=self._ctx))
            normalized = normalized.transpose((2, 0, 1)).expand_dims(axis=0)
            patches.append(normalized)
        patches = mx.nd.concat(*patches, dim=0)
        f1, f2 = self._forward(patches)
        f1 = self._feature_normalization(f1)
        f2 = self._feature_normalization(f2)
        return f1, f2 
Example #6
Source File: test.py    From sscdnet with MIT License 6 votes vote down vote up
def display_results(self, t0, t1, mask_pred, mask_gt):

        w, h = self.w_orig, self.h_orig
        t0_disp = cv2.resize(np.transpose(t0.numpy(), (1, 2, 0)).astype(np.uint8), (w, h))
        t1_disp = cv2.resize(np.transpose(t1.numpy(), (1, 2, 0)).astype(np.uint8), (w, h))
        mask_pred_disp = cv2.resize(cv2.cvtColor(mask_pred.numpy().astype(np.uint8), cv2.COLOR_GRAY2RGB), (w, h))
        mask_gt_disp = cv2.resize(cv2.cvtColor(mask_gt.astype(np.uint8), cv2.COLOR_GRAY2RGB), (w, h))

        img_out = np.zeros((h* 2, w * 2, 3), dtype=np.uint8)
        img_out[0:h, 0:w, :] = t0_disp
        img_out[0:h, w:w * 2, :] = t1_disp
        img_out[h:h * 2, 0:w * 1, :] = mask_gt_disp
        img_out[h:h * 2, w * 1:w * 2, :] = mask_pred_disp
        for dn, img in zip(['mask', 'disp'], [mask_pred_disp, img_out]):
            dn_save = os.path.join(self.args.checkpointdir, 'result', dn)
            fn_save = os.path.join(dn_save, '{0:08d}.png'.format(self.index))
            if not os.path.exists(dn_save):
                os.makedirs(dn_save)
            print('Writing ... ' + fn_save)
            cv2.imwrite(fn_save, img) 
Example #7
Source File: image_process.py    From Advanced_Lane_Lines with MIT License 6 votes vote down vote up
def test_yellow_grid_thresh_images(src, dst, y_low=(10,50,0), y_high=(30,255,255), sx_thresh=(20, 100)):
	"""
	apply the thresh to images in a src folder and output to dst foler
	"""
	image_files = glob.glob(src+"*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		image_threshed = yellow_grid_thresh(img, y_low, y_high, sx_thresh)
		
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = dst+file_name
		print(out_image)
		# convert  binary to RGB, *255, to visiual, 1 will not visual after write to file
		image_threshed = cv2.cvtColor(image_threshed*255, cv2.COLOR_GRAY2RGB)
		cv2.imwrite(out_image, image_threshed) 
Example #8
Source File: image_process.py    From Advanced_Lane_Lines with MIT License 6 votes vote down vote up
def test_color_grid_thresh_dynamic(src, dst, s_thresh, sx_thresh):
	"""
	apply the thresh to images in a src folder and output to dst foler
	"""
	image_files = glob.glob(src+"*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		image_threshed = color_grid_thresh_dynamic(img, s_thresh=s_thresh, sx_thresh=sx_thresh)
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = dst+file_name
		print(out_image)
		# convert  binary to RGB, *255, to visiual, 1 will not visual after write to file
		image_threshed = cv2.cvtColor(image_threshed*255, cv2.COLOR_GRAY2RGB)
		cv2.imwrite(out_image, image_threshed) 
Example #9
Source File: image_process.py    From Advanced_Lane_Lines with MIT License 6 votes vote down vote up
def test_thresh_images(src, dst, s_thresh, sx_thresh):
	"""
	apply the thresh to images in a src folder and output to dst foler
	"""
	image_files = glob.glob(src+"*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		image_threshed = color_grid_thresh(img, s_thresh=s_thresh, sx_thresh=sx_thresh)
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = dst+file_name
		print(out_image)
		# convert  binary to RGB, *255, to visiual, 1 will not visual after write to file
		image_threshed = cv2.cvtColor(image_threshed*255, cv2.COLOR_GRAY2RGB)
		cv2.imwrite(out_image, image_threshed) 
Example #10
Source File: offset_ui_tool.py    From TGC-Designer-Tools with Apache License 2.0 6 votes vote down vote up
def drawNewLocation(ax, image_dict, result, image_scale, radio, sx, sy, event, ar):
    x_offset = 0.0
    y_offset = 0.0
    if sx is not None and sy is not None:
        x_offset = sx.val
        y_offset = sy.val

    vosm = np.copy(image_dict["Visible"])
    vosm = OSMTGC.addOSMToImage(result.ways, vosm, pc, image_scale, x_offset, y_offset)
    image_dict["Visible Golf"] = vosm

    hosm = np.copy(image_dict["Heightmap"]).astype('float32')
    hosm = np.clip(hosm, 0.0, 3.5*np.median( hosm[ hosm >= 0.0 ])) # Limit outlier pixels
    hosm = hosm / np.max(hosm)
    hosm = cv2.cvtColor(hosm, cv2.COLOR_GRAY2RGB)
    hosm = OSMTGC.addOSMToImage(result.ways, hosm, pc, image_scale, x_offset, y_offset)
    image_dict["Heightmap Golf"] = hosm

    # Always set to Visible Golf after drawing new golf features
    ax.imshow(image_dict["Visible Golf"], origin='lower')
    radio.set_active(1) 
Example #11
Source File: functional.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def adjust_saturation(img, saturation_factor):
    """Adjust color saturation of an image.

    Args:
        img (CV Image): CV Image to be adjusted.
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
        CV Image: Saturation adjusted image.
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be CV Image. Got {}'.format(type(img)))

    im = img.astype(np.float32)
    degenerate = cv2.cvtColor(
        cv2.cvtColor(
            im,
            cv2.COLOR_RGB2GRAY),
        cv2.COLOR_GRAY2RGB)
    im = (1 - saturation_factor) * degenerate + saturation_factor * im
    im = im.clip(min=0, max=255)
    return im.astype(img.dtype) 
Example #12
Source File: chapter2.py    From OpenCV-Computer-Vision-Projects-with-Python with MIT License 6 votes vote down vote up
def ProcessFrame(self, frame):
        # segment arm region
        segment = self.SegmentArm(frame)

        # make a copy of the segmented image to draw on
        draw = cv2.cvtColor(segment, cv2.COLOR_GRAY2RGB)

        # draw some helpers for correctly placing hand
        cv2.circle(draw,(self.imgWidth/2,self.imgHeight/2),3,[255,102,0],2)       
        cv2.rectangle(draw, (self.imgWidth/3,self.imgHeight/3), (self.imgWidth*2/3, self.imgHeight*2/3), [255,102,0],2)

        # find the hull of the segmented area, and based on that find the
        # convexity defects
        [contours,defects] = self.FindHullDefects(segment)

        # detect the number of fingers depending on the contours and convexity defects
        # draw defects that belong to fingers green, others red
        [nofingers,draw] = self.DetectNumberFingers(contours, defects, draw)

        # print number of fingers on image
        cv2.putText(draw, str(nofingers), (30,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))
        return draw 
Example #13
Source File: image_process.py    From Advanced_Lane_Lines with MIT License 6 votes vote down vote up
def test_yellow_white_thresh_images(src, dst, y_low=(10,50,0), y_high=(30,255,255), w_low=(180,180,180), w_high=(255,255,255)):
	"""
	apply the thresh to images in a src folder and output to dst foler
	"""
	image_files = glob.glob(src+"*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		image_threshed = yellow_white_thresh(img, y_low, y_high, w_low, w_high)
		
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = dst+file_name
		print(out_image)
		# convert  binary to RGB, *255, to visiual, 1 will not visual after write to file
		image_threshed = cv2.cvtColor(image_threshed*255, cv2.COLOR_GRAY2RGB)
		
		# HSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
		# V = HSV[:,:,2]
		# brightness = np.mean(V)
		# info_str = "brightness is: {}".format(int(brightness))
		# cv2.putText(image_threshed, info_str, (50,700), cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,255),2)
		
		cv2.imwrite(out_image, image_threshed) 
Example #14
Source File: features.py    From pyECO with MIT License 6 votes vote down vote up
def get_features(self, img, pos, sample_sz, scales):
        feat1 = []
        feat2 = []
        if img.shape[2] == 1:
            img = cv2.cvtColor(img.squeeze(), cv2.COLOR_GRAY2RGB)
        if not isinstance(scales, list) and not isinstance(scales, np.ndarray):
            scales = [scales]
        patches = []
        for scale in scales:
            patch = self._sample_patch(img, pos, sample_sz*scale, sample_sz)
            patch = mx.nd.array(patch / 255., ctx=self._ctx)
            normalized = mx.image.color_normalize(patch,
                                                  mean=mx.nd.array([0.485, 0.456, 0.406], ctx=self._ctx),
                                                  std=mx.nd.array([0.229, 0.224, 0.225], ctx=self._ctx))
            normalized = normalized.transpose((2, 0, 1)).expand_dims(axis=0)
            patches.append(normalized)
        patches = mx.nd.concat(*patches, dim=0)
        f1, f2 = self._forward(patches)
        f1 = self._feature_normalization(f1)
        f2 = self._feature_normalization(f2)
        return f1, f2 
Example #15
Source File: cvfunctional.py    From opencv_transforms_torchvision with MIT License 6 votes vote down vote up
def adjust_saturation(img, saturation_factor):
    """Adjust color saturation of an image.

    Args:
        img (np.ndarray): CV Image to be adjusted.
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a gray image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
        np.ndarray: Saturation adjusted image.
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    im = img.astype(np.float32)
    degenerate = cv2.cvtColor(cv2.cvtColor(im, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)
    im = (1-saturation_factor) * degenerate + saturation_factor * im
    im = im.clip(min=0, max=255)
    return im.astype(img.dtype) 
Example #16
Source File: lsun_bedroom_line2color.py    From chainer-cyclegan with MIT License 6 votes vote down vote up
def get_example(self, i):
        id = self.all_keys[i]
        img = None
        val = self.db.get(id.encode())

        img = cv2.imdecode(np.fromstring(val, dtype=np.uint8), 1)
        img = self.do_augmentation(img)

        img_color = img
        img_color = self.preprocess_image(img_color)

        img_line = XDoG(img)
        img_line = cv2.cvtColor(img_line, cv2.COLOR_GRAY2RGB)
        #if img_line.ndim == 2:
        #    img_line = img_line[:, :, np.newaxis]
        img_line = self.preprocess_image(img_line)

        return img_line, img_color 
Example #17
Source File: test_warp.py    From open-vot with MIT License 6 votes vote down vote up
def test_pad_array(self):
        dataset = OTB(self.otb_dir, download=True)

        npad = random.choice([0, 10, 50])
        padding = random.choice([None, 0, 'avg'])
        print('[cv2-pad] padding:', padding, 'npad:', npad)

        img_files, anno = random.choice(dataset)
        for f, img_file in enumerate(img_files):
            image = cv2.imread(img_file)
            if image.ndim == 2:
                image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
            elif image.ndim == 3:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            image = pad_array(image, npad, padding=padding)
            show_frame(image[:, :, ::-1], fig_n=1) 
Example #18
Source File: test_warp.py    From open-vot with MIT License 6 votes vote down vote up
def test_crop_array(self):
        dataset = OTB(self.otb_dir, download=True)

        padding = random.choice([None, 0, 'avg'])
        out_size = random.choice([None, 255])
        print('[cv2-crop] padding:', padding, 'out_size:', out_size)

        img_files, anno = random.choice(dataset)
        for f, img_file in enumerate(img_files):
            image = cv2.imread(img_file)
            if image.ndim == 2:
                image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
            elif image.ndim == 3:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            bndbox = anno[f, :]
            center = bndbox[:2] + bndbox[2:] / 2
            patch = crop_array(image, center, bndbox[2:],
                               padding=padding, out_size=out_size)
            show_frame(patch, fig_n=2, pause=0.1) 
Example #19
Source File: test_warp.py    From open-vot with MIT License 6 votes vote down vote up
def test_crop_tensor(self):
        dataset = OTB(self.otb_dir, download=True)

        padding = random.choice([None, 0, 'avg'])
        out_size = random.choice([255])
        print('[PyTorch-crop] padding:', padding, 'out_size:', out_size)

        img_files, anno = random.choice(dataset)
        for f, img_file in enumerate(img_files):
            image = cv2.imread(img_file)
            if image.ndim == 2:
                image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
            elif image.ndim == 3:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            image = torch.from_numpy(image).permute(
                2, 0, 1).unsqueeze(0).float()
            bndbox = torch.from_numpy(anno[f, :]).float()
            center = bndbox[:2] + bndbox[2:] / 2
            patch = crop_tensor(image, center, bndbox[2:],
                                padding=padding, out_size=out_size)
            patch = patch.squeeze().permute(1, 2, 0).cpu().numpy().astype(np.uint8)
            show_frame(patch, fig_n=1, pause=0.1) 
Example #20
Source File: test_warp.py    From open-vot with MIT License 6 votes vote down vote up
def test_resize_tensor(self):
        dataset = OTB(self.otb_dir, download=True)

        out_size = random.choice([30, 100, 255])
        print('[PyTorch-resize]:', out_size)

        img_files, anno = random.choice(dataset)
        for f, img_file in enumerate(img_files):
            image = cv2.imread(img_file)
            if image.ndim == 2:
                image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
            elif image.ndim == 3:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            image = torch.from_numpy(image).permute(
                2, 0, 1).unsqueeze(0).float()
            image = resize_tensor(image, out_size)
            image = image.squeeze().permute(1, 2, 0).numpy().astype(np.uint8)
            show_frame(image, fig_n=2, pause=0.1) 
Example #21
Source File: __init__.py    From open-vot with MIT License 6 votes vote down vote up
def track(self, img_files, init_rect, visualize=False):
        frame_num = len(img_files)
        bndboxes = np.zeros((frame_num, 4))
        bndboxes[0, :] = init_rect
        speed_fps = np.zeros(frame_num)

        for f, img_file in enumerate(img_files):
            image = cv2.imread(img_file)
            if image.ndim == 2:
                image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
            elif image.ndim == 3:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            start_time = time.time()
            if f == 0:
                self.init(image, init_rect)
            else:
                bndboxes[f, :] = self.update(image)
            elapsed_time = time.time() - start_time
            speed_fps[f] = 1. / elapsed_time

            if visualize:
                show_frame(image, bndboxes[f, :], fig_n=1)

        return bndboxes, speed_fps 
Example #22
Source File: OpenCVQImage.py    From vidpipe with GNU General Public License v3.0 6 votes vote down vote up
def __init__( self, opencvBgrImg ):
#        depth = cv2.IPL_DEPTH_8U

        if len( opencvBgrImg.shape ) == 3:
            h, w, nChannels = opencvBgrImg.shape
            opencvRgbImg = np.zeros( ( h, w, 3 ), np.uint8 )
            opencvRgbImg = cv2.cvtColor( opencvBgrImg, cv2.COLOR_BGR2RGB )
        else:
#            img_format = QtGui.QImage.Format_Mono
            h, w = opencvBgrImg.shape
#            opencvRgbImg = np.zeros( ( h, w, 3 ), np.uint8 )
            opencvRgbImg = cv2.cvtColor( opencvBgrImg, cv2.COLOR_GRAY2RGB )
#            cv2.mixChannels( [ opencvBgrImg ], [ opencvRgbImg ], [ 0, 2 ] )

#        if depth != cv.IPL_DEPTH_8U or nChannels != 3:
#            raise ValueError("the input image must be 8-bit, 3-channel")

        self._imgData = opencvRgbImg.tostring()
        super( OpenCVQImage, self ).__init__( self._imgData, w, h, QtGui.QImage.Format_RGB888 ) 
Example #23
Source File: coco.py    From chainer-mask-rcnn with MIT License 6 votes vote down vote up
def get_example(self, i):
        img_id = self.img_ids[i]
        ann_ids = self.coco.getAnnIds(imgIds=img_id)
        anns = self.coco.loadAnns(ann_ids)

        img_fname = self.img_fname.format(img_id)
        img = skimage.io.imread(img_fname)
        if img.ndim == 2:
            img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)

        example = self._annotations_to_example(
            anns, img.shape[0], img.shape[1])

        # img, bboxes, labels, masks
        # or img, bboxes, labels, masks, crowds
        # or img, bboxes, labels, masks, areas
        # or img, bboxes, labels, masks, crowds, areas
        return tuple([img] + example) 
Example #24
Source File: visual_odometry.py    From pyslam with GNU General Public License v3.0 6 votes vote down vote up
def drawFeatureTracks(self, img, reinit = False):
        draw_img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
        num_outliers = 0        
        if(self.stage == VoStage.GOT_FIRST_IMAGE):            
            if reinit:
                for p1 in self.kps_cur:
                    a,b = p1.ravel()
                    cv2.circle(draw_img,(a,b),1, (0,255,0),-1)                    
            else:    
                for i,pts in enumerate(zip(self.track_result.kps_ref_matched, self.track_result.kps_cur_matched)):
                    drawAll = False # set this to true if you want to draw outliers 
                    if self.mask_match[i] or drawAll:
                        p1, p2 = pts 
                        a,b = p1.ravel()
                        c,d = p2.ravel()
                        cv2.line(draw_img, (a,b),(c,d), (0,255,0), 1)
                        cv2.circle(draw_img,(a,b),1, (0,0,255),-1)   
                    else:
                        num_outliers+=1
            if kVerbose:
                print('# outliers: ', num_outliers)     
        return draw_img 
Example #25
Source File: utils_draw.py    From pyslam with GNU General Public License v3.0 6 votes vote down vote up
def combine_images_vertically(img1, img2): 
    if img1.ndim<=2:
        img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2RGB)    
    if img2.ndim<=2:
        img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2RGB)                     
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    img3 = np.zeros((h1+h2, max(w1, w2),3), np.uint8)
    img3[:h1, :w1,:3] = img1
    img3[h1:h1+h2,:w2,:3] = img2
    return img3 


# draw features matches (images are combined horizontally)
# input:
# - kps1 = [Nx2] array of keypoint coordinates 
# - kps2 = [Nx2] array of keypoint coordinates 
# - kps1_sizes = [Nx1] array of keypoint sizes 
# - kps2_sizes = [Nx1] array of keypoint sizes 
# output: drawn image 
Example #26
Source File: test-mvs.py    From dfc2019 with MIT License 6 votes vote down vote up
def category_to_color(category_image):
    # define colors
    # color table is here: https://www.rapidtables.com/web/color/RGB_Color.html
    colors = []
    colors.append((165, 42, 42))  # 0  brown (ground)
    colors.append((0, 128, 0))  # 1  green (trees)
    colors.append((255, 0, 0))  # 2  red (buildings)
    colors.append((0, 0, 255))  # 3  blue (water)
    colors.append((128, 128, 128))  # 4  gray (elevated road)
    colors.append((0, 0, 0))  # 5  black (other)

    # convert categories to color image
    rows = category_image.shape[0]
    cols = category_image.shape[1]
    categories = category_image.astype(np.uint8)
    categories = np.reshape(categories, [rows, cols])
    rgb_image = cv2.cvtColor(categories, cv2.COLOR_GRAY2RGB)
    for i in range(cols):
        for j in range(rows):
            rgb_image[j, i, :] = colors[categories[j, i]]
    return rgb_image 
Example #27
Source File: test-icnet.py    From dfc2019 with MIT License 6 votes vote down vote up
def category_to_color(category_image):

    # define colors
    # color table is here: https://www.rapidtables.com/web/color/RGB_Color.html
    colors = []
    colors.append((165,42,42))      # 0  brown (ground)
    colors.append((0,128,0))        # 1  green (trees)
    colors.append((255,0,0))        # 2  red (buildings)
    colors.append((0,0,255))        # 3  blue (water)
    colors.append((128,128,128))    # 4  gray (elevated road)
    colors.append((0,0,0))          # 6  black (other)

    # convert categories to color image
    rows = category_image.shape[0]
    cols = category_image.shape[1]
    categories = category_image.astype(np.uint8)
    categories = np.reshape(categories, [rows, cols])
    rgb_image = cv2.cvtColor(categories,cv2.COLOR_GRAY2RGB)
    for i in range(cols):
        for j in range(rows):
            rgb_image[j,i,:] = colors[categories[j,i]]
    return rgb_image 
Example #28
Source File: train.py    From dfc2019 with MIT License 6 votes vote down vote up
def category_to_color(self, category_image):

        # define colors
        # color table is here: https://www.rapidtables.com/web/color/RGB_Color.html
        colors = []
        colors.append((165, 42, 42))  # 0  brown (ground)
        colors.append((0, 128, 0))  # 1  green (trees)
        colors.append((255, 0, 0))  # 2  red   (buildings)
        colors.append((0, 0, 255))  # 3  blue  (water)
        colors.append((128, 128, 128))  # 4  gray  (elevated road / bridge)
        colors.append((0, 0, 0))  # 5  black (other)

        # convert categories to color image
        rows = category_image.shape[0]
        cols = category_image.shape[1]
        categories = category_image.astype(np.uint8)
        categories = np.reshape(categories, [rows, cols])
        rgb_image = cv2.cvtColor(categories, cv2.COLOR_GRAY2RGB)
        for i in range(cols):
            for j in range(rows):
                rgb_image[j, i, :] = colors[categories[j, i]]
        return rgb_image

    # save image with truth and prediction 
Example #29
Source File: functional.py    From torch-toolbox with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def to_tensor(pic):
    """Convert a ``numpy.ndarray`` image to tensor.

    See ``ToTensor`` for more details.

    Args:
        pic (numpy.ndarray): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
    if _is_numpy_image(pic):
        if pic.ndim == 2:
            pic = cv2.cvtColor(pic, cv2.COLOR_GRAY2RGB)
        img = torch.from_numpy(pic.transpose((2, 0, 1)))
        # backward compatibility
        if isinstance(img, torch.ByteTensor):
            return img.float().div(255)
        else:
            return img
    else:
        raise TypeError('pic should be ndarray. Got {}.'.format(type(pic))) 
Example #30
Source File: postprocessing.py    From nni with MIT License 5 votes vote down vote up
def save_pseudo_label_masks(submission_file):
    df = pd.read_csv(submission_file, na_filter=False)
    print(df.head())

    img_dir = os.path.join(settings.TEST_DIR, 'masks')

    for i, row in enumerate(df.values):
        decoded_mask = run_length_decoding(row[1], (101,101))
        filename = os.path.join(img_dir, '{}.png'.format(row[0]))
        rgb_mask = cv2.cvtColor(decoded_mask,cv2.COLOR_GRAY2RGB)
        print(filename)
        cv2.imwrite(filename, decoded_mask)
        if i % 100 == 0:
            print(i)