Python cv2.merge() Examples

The following are 30 code examples of cv2.merge(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: imagenet.py    From VAE-GAN with MIT License 8 votes vote down vote up
def read_image_by_index(self, ind, ):
			train_image_filepath = os.path.join(self.data_dir, self.x_train[index])
			train_image_label = np.zeros((self.nb_classes,))
			train_image_label[self.y_train[index]] = 1
			train_image = io.imread(train_image_filepath)

			# in case of single channel image
			if len(train_image.shape) == 2:
				train_image = cv2.merge([train_image, train_image, train_image])
				
			# in case of RGBA image
			if train_image.shape[2] == 4:
				train_image = train_image[:, :, 0:3]

			# other cases
			if len(train_image.shape) != 3 or train_image.shape[2] != 3:
				return None, None

			train_image = cv2.resize(train_image, (self.input_shape[1], self.input_shape[0])).astype(np.float32) / 255.0
			return train_image, train_image_label 
Example #2
Source File: neural_style.py    From neural-style-tf with GNU General Public License v3.0 7 votes vote down vote up
def convert_to_original_colors(content_img, stylized_img):
  content_img  = postprocess(content_img)
  stylized_img = postprocess(stylized_img)
  if args.color_convert_type == 'yuv':
    cvt_type = cv2.COLOR_BGR2YUV
    inv_cvt_type = cv2.COLOR_YUV2BGR
  elif args.color_convert_type == 'ycrcb':
    cvt_type = cv2.COLOR_BGR2YCR_CB
    inv_cvt_type = cv2.COLOR_YCR_CB2BGR
  elif args.color_convert_type == 'luv':
    cvt_type = cv2.COLOR_BGR2LUV
    inv_cvt_type = cv2.COLOR_LUV2BGR
  elif args.color_convert_type == 'lab':
    cvt_type = cv2.COLOR_BGR2LAB
    inv_cvt_type = cv2.COLOR_LAB2BGR
  content_cvt = cv2.cvtColor(content_img, cvt_type)
  stylized_cvt = cv2.cvtColor(stylized_img, cvt_type)
  c1, _, _ = cv2.split(stylized_cvt)
  _, c2, c3 = cv2.split(content_cvt)
  merged = cv2.merge((c1, c2, c3))
  dst = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32)
  dst = preprocess(dst)
  return dst 
Example #3
Source File: web.py    From Tabulo with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def get_image():
    image = request.files.get('image')
    if not image:
        raise ValueError
    basewidth = 300
    #wpercent = (basewidth/float(Image.open(image.stream).size[0]))
    #hsize = int((float(Image.open(image.stream).size[1])*float(wpercent)))
    img = Image.open(image.stream).convert('RGB')
    img = np.asarray(img)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    b = cv2.distanceTransform(img, distanceType=cv2.DIST_L2, maskSize=5)
    g = cv2.distanceTransform(img, distanceType=cv2.DIST_L1, maskSize=5)
    r = cv2.distanceTransform(img, distanceType=cv2.DIST_C, maskSize=5)
    
    # merge the transformed channels back to an image
    transformed_image = cv2.merge((b, g, r))
    
    return transformed_image 
Example #4
Source File: normalized.py    From virtual-dressing-room with Apache License 2.0 7 votes vote down vote up
def normalized(self):
               
#        t1=time.time()
        b=self.down[:,:,0]
        g=self.down[:,:,1]
        r=self.down[:,:,2]
        
        sum=b+g+r
        
        
        self.norm[:,:,0]=b/sum*255.0
        self.norm[:,:,1]=g/sum*255.0
        self.norm[:,:,2]=r/sum*255.0
        
 #       print "conversion time",time.time()-t1
        
        #self.norm=cv2.merge([self.norm1,self.norm2,self.norm3])
        self.norm_rgb=cv2.convertScaleAbs(self.norm)
        #self.norm.dtype=np.uint8
        return self.norm_rgb 
Example #5
Source File: train.py    From kaggle_carvana_segmentation with MIT License 7 votes vote down vote up
def random_hue_saturation_value(image,
                                hue_shift_limit=(-180, 180),
                                sat_shift_limit=(-255, 255),
                                val_shift_limit=(-255, 255)):

    image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(image)
    hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
    h = cv2.add(h, hue_shift)
    sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
    s = cv2.add(s, sat_shift)
    val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
    v = cv2.add(v, val_shift)
    image = cv2.merge((h, s, v))
    image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image 
Example #6
Source File: playground.py    From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License 7 votes vote down vote up
def main():
    image = cv2.imread("../data/house.tiff", 1)

    blue, green, red = cv2.split(image)
    rows, columns, channels = image.shape

    output = np.empty((rows, columns * 3, 3), np.uint8)

    output[:, 0:columns] = cv2.merge([blue, blue, blue])
    output[:, columns:columns * 2] = cv2.merge([green, green, green])
    output[:, columns * 2:columns * 3] = cv2.merge([red, red, red])

    hsvimage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    hue, satr, vlue = cv2.split(hsvimage)
    hsvoutput = np.concatenate((hue, satr, vlue), axis=1)

    cv2.imshow("Sample Image", image)
    cv2.imshow("Output Image", output)
    cv2.imshow("HSV Image", hsvoutput)

    cv2.waitKey(0)
    cv2.destroyAllWindows() 
Example #7
Source File: web.py    From Table-Detection-using-Deep-learning with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def get_image():
    image = request.files.get('image')
    if not image:
        raise ValueError

    img = Image.open(image.stream).convert('RGB')
    img = np.asarray(img)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    b = cv2.distanceTransform(img, distanceType=cv2.DIST_L2, maskSize=5)
    g = cv2.distanceTransform(img, distanceType=cv2.DIST_L1, maskSize=5)
    r = cv2.distanceTransform(img, distanceType=cv2.DIST_C, maskSize=5)
    
    # merge the transformed channels back to an image
    transformed_image = cv2.merge((b, g, r))
    
    return transformed_image 
Example #8
Source File: train.py    From Kaggle-Carvana-Image-Masking-Challenge with MIT License 6 votes vote down vote up
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
                             sat_shift_limit=(-255, 255),
                             val_shift_limit=(-255, 255), u=0.5):
    if np.random.random() < u:
        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(image)
        hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
        h = cv2.add(h, hue_shift)
        sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
        s = cv2.add(s, sat_shift)
        val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
        v = cv2.add(v, val_shift)
        image = cv2.merge((h, s, v))
        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image 
Example #9
Source File: dataset_utils.py    From rpg_davis_simulator with GNU General Public License v3.0 6 votes vote down vote up
def extract_grayscale(img, srgb=False):
  dw = img.header()['dataWindow']

  size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
  precision = Imath.PixelType(Imath.PixelType.FLOAT)
  R = img.channel('R', precision)
  G = img.channel('G', precision)
  B = img.channel('B', precision)
  
  r = np.fromstring(R, dtype = np.float32)
  g = np.fromstring(G, dtype = np.float32)
  b = np.fromstring(B, dtype = np.float32)
  
  r.shape = (size[1], size[0])
  g.shape = (size[1], size[0])
  b.shape = (size[1], size[0])
  
  rgb = cv2.merge([b, g, r])
  grayscale = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
  
  if srgb:
      grayscale = lin2srgb(grayscale)

  return grayscale 
Example #10
Source File: imgconnector.py    From How_to_generate_music_in_tensorflow_LIVE with Apache License 2.0 6 votes vote down vote up
def write_song(piano_roll, filename):
        """ Save the song on disk
        Args:
            piano_roll (np.array): a song object containing the tracks and melody
            filename (str): the path were to save the song (don't add the file extension)
        """
        note_played = piano_roll > 0.5
        piano_roll_int = np.uint8(piano_roll*255)

        b = piano_roll_int * (~note_played).astype(np.uint8)  # Note silenced
        g = np.zeros(piano_roll_int.shape, dtype=np.uint8)    # Empty channel
        r = piano_roll_int * note_played.astype(np.uint8)     # Notes played

        img = cv.merge((b, g, r))

        # TODO: We could insert a first column indicating the piano keys (black/white key)

        cv.imwrite(filename + '.png', img) 
Example #11
Source File: SplitMerge.py    From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def main():
    imageOne = cv2.imread("../data/house.tiff", 1)
    imageOne = cv2.cvtColor(imageOne, cv2.COLOR_BGR2RGB)

    red, green, blue = cv2.split(imageOne)

    images = [cv2.merge((red, green, blue)), red, green, blue]
    titles = ["Default RGB Image", "Only Red", "Only Blue", "Only Green"]
    cmaps = ["gray", "Reds", "Greens", "Blues"]

    for i in range(4):
        plt.subplot(2, 2, i + 1)

        plt.imshow(images[i], cmap=cmaps[i])
        plt.title(titles[i])
        plt.xticks([])
        plt.yticks([])

    plt.show() 
Example #12
Source File: cvutils.py    From 1ZLAB_PyEspCar with GNU General Public License v3.0 6 votes vote down vote up
def backprojection(target, roihist):
    '''图像预处理'''
    hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)
    dst = cv2.calcBackProject([hsvt],[0,1],roihist,[0,180,0,256],1)
    # Now convolute with circular disc
    disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
    cv2.filter2D(dst,-1,disc,dst)
    # threshold and binary AND
    ret,binary = cv2.threshold(dst,80,255,0)
    # 创建 核
    kernel = np.ones((5,5), np.uint8)
    iter_time = 1
    # 闭运算
    binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel,iterations=iter_time)

    thresh = cv2.merge((binary,binary,binary))
    target_filter = cv2.bitwise_and(target,thresh)
    
    return binary, target_filter 
Example #13
Source File: cv.py    From deepstar with BSD 3-Clause Clear License 6 votes vote down vote up
def overlay_transparent_image(bg, fg, x1, y1):
    # bg is 3 RGB
    # fg is 4 RGBA

    bg = bg.copy()
    fg = fg.copy()

    h, w = fg.shape[:2]
    t = bg[y1:y1 + h, x1:x1 + w]

    b, g, r, a = cv2.split(fg)
    mask = cv2.merge((a, a, a))
    fg = cv2.merge((b, g, r))
    overlaid = alpha_blend(t, fg, mask)

    bg[y1:y1 + h, x1:x1 + w] = overlaid

    return bg 
Example #14
Source File: FingerDetection.py    From Finger-Detection-and-Tracking with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def hist_masking(frame, hist):
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)

    disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (31, 31))
    cv2.filter2D(dst, -1, disc, dst)

    ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)

    # thresh = cv2.dilate(thresh, None, iterations=5)

    thresh = cv2.merge((thresh, thresh, thresh))

    return cv2.bitwise_and(frame, thresh) 
Example #15
Source File: renderer.py    From motion_reconstruction with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_alpha(imtmp, bgval=1.):
    h, w = imtmp.shape[:2]
    alpha = (~np.all(imtmp == bgval, axis=2)).astype(imtmp.dtype)

    b_channel, g_channel, r_channel = cv2.split(imtmp)

    im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha.astype(
        imtmp.dtype)))
    return im_RGBA 
Example #16
Source File: citypersons2.py    From Detectron-PYTORCH with Apache License 2.0 5 votes vote down vote up
def add_brightness(im):
    # distort brightness
    hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
    h_, s_, v_ = cv2.split(hsv)
    v_[v_ > 0] += 20
    v_[v_ > 255] = 255
    v_[v_ < 0] = 0
    hsv = cv2.merge((h_, s_, v_))
    im = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
    return im 
Example #17
Source File: image_processing.py    From DeepMosaics with GNU General Public License v3.0 5 votes vote down vote up
def ch_one2three(img):
    res = cv2.merge([img, img, img])
    return res 
Example #18
Source File: masterForgery.py    From signature_extractor with MIT License 5 votes vote down vote up
def writeImageToFile(img, mask):
    # The mask of the signature can be used as the alpha channel of the image
    b, g, r = cv2.split(img)
    imgWithAlpha = cv2.merge((b, g, r, mask))

    file = easygui.filesavebox()
    fileName = file + '.png'

    if fileName is None:
        errorPrompt('No Name Selected')

    cv2.imwrite(fileName, imgWithAlpha) 
Example #19
Source File: base_imagelist_dataset.py    From VAE-GAN with MIT License 5 votes vote down vote up
def _image_correct(self, img, image_fp):
		"""	correct the image shape to fixed shape [height, width, channel]
		
		1. the argument image_fp is just for debugging.
		2. for some image file has multiple images and with shape of [num, height, width, channel],
		this function will return the first image and discard others.
		"""
		if img is None:
			if self.show_warning:
				print('Warning : read image ' + image_fp + ' failed!')
			return None

		if img.ndim == 4:
			img = img[0]		# take the first image and discard others

		if img.ndim != 2 and img.ndim != 3:
			if self.show_warning:
				print('Warning : wrong image shape ' + image_fp + ' : ' + str(img.shape))
			return None

		if self.output_c == 3:
			if img.ndim == 2:   						
				img = cv2.merge([img, img, img])	# in case of single channel image
			elif img.ndim == 3 and img.shape[2] == 1:
				img = cv2.merge([img[:,:,0], img[:,:,0], img[:,:,0]])
			elif img.ndim == 3 and img.shape[2] == 4:
				img = img[:, :, 0:3]

		if img.ndim != 3 or img.shape[2] != 3:
			if self.show_warning:
				print('Warning : wrong image shape ' + image_fp + ' : ' + str(img.shape))
			return None

		return img 
Example #20
Source File: Utils.py    From siameseFC-pytorch-vot with Apache License 2.0 5 votes vote down vote up
def cv2_brg2rgb(bgr_img):
    """
    convert brg image to rgb
    """
    b, g, r = cv2.split(bgr_img)
    rgb_img = cv2.merge([r, g, b])
    
    return rgb_img 
Example #21
Source File: ImageMiniLab.py    From ImageMiniLab with GNU General Public License v3.0 5 votes vote down vote up
def channels_split(self):
        src = self.cv_read_img(self.src_file)
        if src is None:
            return
        b, g, r = cv.split(src)
        merge_image = cv.merge([b, g, r])
        """创建三维数组,0维为B,1维为G,2维为R"""
        height, width, channels = src.shape
        img = np.zeros([height*2, width*2, channels], np.uint8)
        img[0:height, 0:width] = np.expand_dims(b, axis=2)
        img[0:height, width:width*2] = np.expand_dims(g, axis=2)
        img[height:height*2, 0:width] = np.expand_dims(r, axis=2)
        img[height:height*2, width:width*2] = merge_image

        self.decode_and_show_dst(img) 
Example #22
Source File: AutoEncoder.py    From Machine-Learning-Study-Notes with Apache License 2.0 5 votes vote down vote up
def generateImage(self):
        source = cv2.imread(r"F:/tensorflow/automodel/scrawler/video/trainImg/3524.jpg")
        sourceWarp, sourceTarget = get_training_data(np.array([source]), 1)
        print(sourceWarp.shape, sourceWarp.shape)
        sourceWarp = sourceWarp / 255.0
        sourceTarget = sourceTarget / 255.0
        source = cv2.resize(source, (64, 64))
        source = np.array([source], dtype=np.float32)
        source = source / 255.0
        dest, loss = self._sess.run([self._reconstruct2, self._loss1],
                                    feed_dict={self._x: sourceTarget, self._input: source})
        print(loss)
        sourceTarget = np.reshape(source, [64, 64, 3])
        dest = np.reshape(dest, [64, 64, 3])
        dest = np.array(dest * 255, dtype=np.uint8)
        fig = plt.figure("compare")
        ax = fig.add_subplot(121)
        b, g, r = cv2.split(sourceTarget)
        source = cv2.merge([r, g, b])
        ax.imshow(source)
        ax.axis("off")
        bx = fig.add_subplot(122)
        bx.axis("off")
        b, g, r = cv2.split(dest)
        dest = cv2.merge([r, g, b])
        bx.imshow(dest)
        plt.show() 
Example #23
Source File: color_replace.py    From virtual-dressing-room with Apache License 2.0 5 votes vote down vote up
def replace_color(self,col=None):
        print self.hue[0][0]
        self.hue_val=col
        #cv2.imshow("hue",self.hue)
        if col!=None:
            cv.Set(cv.fromarray(self.hue),(self.hue_val),cv.fromarray(self.mask))
            
        self.scratch=cv2.merge([self.hue,self.sat,self.val])
        self.scratch=cv2.cvtColor(self.scratch,cv2.cv.CV_HSV2BGR)
        print 'replaced'
        return self.scratch 
Example #24
Source File: transforms.py    From kaggle_carvana_segmentation with MIT License 5 votes vote down vote up
def __call__(self, image):
        if random.random() < self.prob:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            h, s, v = cv2.split(image)
            hue_shift = np.random.uniform(self.hue_shift_limit[0], self.hue_shift_limit[1])
            h = cv2.add(h, hue_shift)
            sat_shift = np.random.uniform(self.sat_shift_limit[0], self.sat_shift_limit[1])
            s = cv2.add(s, sat_shift)
            val_shift = np.random.uniform(self.val_shift_limit[0], self.val_shift_limit[1])
            v = cv2.add(v, val_shift)
            image = cv2.merge((h, s, v))
            image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
        return image 
Example #25
Source File: mean_preprocessor.py    From aiexamples with Apache License 2.0 5 votes vote down vote up
def preprocess(self, image):
    # splite the image into its respective Red, Green and Blue channels
    (B, G, R) = cv2.split(image.astype("float32"))

    # subtract the means for each channel
    R -= self.r_mean
    G -= self.g_mean
    B -= self.b_mean

    # merge the channels back and return the image
    return cv2.merge([B, G, R]) 
Example #26
Source File: dataset.py    From pytorch-YOLO-v1 with MIT License 5 votes vote down vote up
def RandomHue(self,bgr):
        if random.random() < 0.5:
            hsv = self.BGR2HSV(bgr)
            h,s,v = cv2.split(hsv)
            adjust = random.choice([0.5,1.5])
            h = h*adjust
            h = np.clip(h, 0, 255).astype(hsv.dtype)
            hsv = cv2.merge((h,s,v))
            bgr = self.HSV2BGR(hsv)
        return bgr 
Example #27
Source File: anomalyMapGen.py    From neural-road-inspector with MIT License 5 votes vote down vote up
def _add_alpha_channel_mask(img, alpha=0.80):
	"""
	Parameters:
		img: the source image with bgr channels which has black or white colored pixels only.
		alpha: the alpha transparency [0.0, 1.0]

	Returns:
		An image with BGRA channels, in that order.
	"""
	b_channel, g_channel, r_channel = cv2.split(img)
	alpha_channel = np.zeros(b_channel.shape, dtype=b_channel.dtype)
	alpha_channel[r_channel > 126] = int(255 * alpha)
	return cv2.merge((b_channel, g_channel, r_channel, alpha_channel)) 
Example #28
Source File: image_class.py    From HistoGAN with GNU General Public License v3.0 5 votes vote down vote up
def increase_brightness(self, value):
        hsv = cv2.cvtColor(self.image, cv2.COLOR_RGB2HSV)
        h, s, v = cv2.split(hsv)
        lim = 255 - value
        v[v > lim] = 255
        v[v <= lim] += value
        final_hsv = cv2.merge((h, s, v))
        self.image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2RGB) 
Example #29
Source File: helpers.py    From ImageSimilarityUsingCntk with MIT License 5 votes vote down vote up
def imconvertCv2Numpy(img):
    (b,g,r) = cv2.split(img)
    return cv2.merge([r,g,b]) 
Example #30
Source File: anomalyMapGen.py    From neural-road-inspector with MIT License 5 votes vote down vote up
def _colorize_mask(bgra_img):
	"""
	Parameter:
		bgra_img: 4 channel images which has black or white colored pixels only.
	Returns:
		colorized image where all pixels which are not black color: (0,0,0) are turned to red color
	"""
	b_channel, g_channel, r_channel, alpha_channel = cv2.split(bgra_img)
	
	b_channel[:] = 0
	g_channel[:] = 0
	r_channel[r_channel > 126] = 255
	return cv2.merge((b_channel, g_channel, r_channel, alpha_channel))