Python cv2.split() Examples

The following are 30 code examples of cv2.split(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: squares.py    From OpenCV-Python-Tutorial with MIT License 9 votes vote down vote up
def find_squares(img):
    img = cv2.GaussianBlur(img, (5, 5), 0)
    squares = []
    for gray in cv2.split(img):
        for thrs in xrange(0, 255, 26):
            if thrs == 0:
                bin = cv2.Canny(gray, 0, 50, apertureSize=5)
                bin = cv2.dilate(bin, None)
            else:
                retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
            bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                cnt_len = cv2.arcLength(cnt, True)
                cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
                if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
                    cnt = cnt.reshape(-1, 2)
                    max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
                    if max_cos < 0.1:
                        squares.append(cnt)
    return squares 
Example #2
Source File: util.py    From smashscan with MIT License 8 votes vote down vote up
def get_image_and_mask(img_location, gray_flag):

    # Load image from file with alpha channel (UNCHANGED flag). If an alpha
    # channel does not exist, just return the base image.
    img = cv2.imread(img_location, cv2.IMREAD_UNCHANGED)
    if img.shape[2] <= 3:
        return img, None

    # Create an alpha channel matrix  with values between 0-255. Then
    # threshold the alpha channel to create a binary mask.
    channels = cv2.split(img)
    mask = np.array(channels[3])
    _, mask = cv2.threshold(mask, 250, 255, cv2.THRESH_BINARY)

    # Convert image and mask to grayscale or BGR based on input flag.
    if gray_flag:
        img = cv2.cvtColor(img, cv2.COLOR_BGRA2GRAY)
    else:
        img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
        mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)

    return img, mask


# Resize an image and mask based on an input scale ratio. 
Example #3
Source File: train.py    From kaggle_carvana_segmentation with MIT License 7 votes vote down vote up
def random_hue_saturation_value(image,
                                hue_shift_limit=(-180, 180),
                                sat_shift_limit=(-255, 255),
                                val_shift_limit=(-255, 255)):

    image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(image)
    hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
    h = cv2.add(h, hue_shift)
    sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
    s = cv2.add(s, sat_shift)
    val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
    v = cv2.add(v, val_shift)
    image = cv2.merge((h, s, v))
    image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)

    return image 
Example #4
Source File: neural_style.py    From neural-style-tf with GNU General Public License v3.0 7 votes vote down vote up
def convert_to_original_colors(content_img, stylized_img):
  content_img  = postprocess(content_img)
  stylized_img = postprocess(stylized_img)
  if args.color_convert_type == 'yuv':
    cvt_type = cv2.COLOR_BGR2YUV
    inv_cvt_type = cv2.COLOR_YUV2BGR
  elif args.color_convert_type == 'ycrcb':
    cvt_type = cv2.COLOR_BGR2YCR_CB
    inv_cvt_type = cv2.COLOR_YCR_CB2BGR
  elif args.color_convert_type == 'luv':
    cvt_type = cv2.COLOR_BGR2LUV
    inv_cvt_type = cv2.COLOR_LUV2BGR
  elif args.color_convert_type == 'lab':
    cvt_type = cv2.COLOR_BGR2LAB
    inv_cvt_type = cv2.COLOR_LAB2BGR
  content_cvt = cv2.cvtColor(content_img, cvt_type)
  stylized_cvt = cv2.cvtColor(stylized_img, cvt_type)
  c1, _, _ = cv2.split(stylized_cvt)
  _, c2, c3 = cv2.split(content_cvt)
  merged = cv2.merge((c1, c2, c3))
  dst = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32)
  dst = preprocess(dst)
  return dst 
Example #5
Source File: __init__.py    From aircv with MIT License 6 votes vote down vote up
def brightness(im):
    '''
    Return the brightness of an image
    Args:
        im(numpy): image

    Returns:
        float, average brightness of an image
    '''
    im_hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(im_hsv) 
    height, weight = v.shape[:2]
    total_bright = 0
    for i in v:
        total_bright = total_bright+sum(i)
    return float(total_bright)/(height*weight) 
Example #6
Source File: reinhard_color_normalizer.py    From StainTools with MIT License 6 votes vote down vote up
def lab_split(I):
        """
        Convert from RGB uint8 to LAB and split into channels.

        :param I: Image RGB uint8.
        :return:
        """
        assert is_uint8_image(I), "Should be a RGB uint8 image"
        I = cv.cvtColor(I, cv.COLOR_RGB2LAB)
        I_float = I.astype(np.float32)
        I1, I2, I3 = cv.split(I_float)
        I1 /= 2.55  # should now be in range [0,100]
        I2 -= 128.0  # should now be in range [-127,127]
        I3 -= 128.0  # should now be in range [-127,127]
        return I1, I2, I3 
Example #7
Source File: __init__.py    From uiautomator2 with MIT License 6 votes vote down vote up
def brightness(self, im):
        '''
        Return the brightness of an image
        Args:
            im(numpy): image

        Returns:
            float, average brightness of an image
        '''
        im_hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
        h, s, v = cv2.split(im_hsv)
        height, weight = v.shape[:2]
        total_bright = 0
        for i in v:
            total_bright = total_bright + sum(i)
        return float(total_bright) / (height * weight) 
Example #8
Source File: utils.py    From AdaIN-TF with MIT License 6 votes vote down vote up
def get_img_random_crop(src, resize=512, crop=256):
    '''Get & resize image and random crop'''
    img = get_img(src)
    img = resize_to(img, resize=resize)
    
    offset_h = random.randint(0, (img.shape[0]-crop))
    offset_w = random.randint(0, (img.shape[1]-crop))
    
    img = img[offset_h:offset_h+crop, offset_w:offset_w+crop, :]

    return img

# def preserve_colors(content_rgb, styled_rgb):
#     """Extract luminance from styled image and apply colors from content"""
#     if content_rgb.shape != styled_rgb.shape:
#       new_shape = (content_rgb.shape[1], content_rgb.shape[0])
#       styled_rgb = cv2.resize(styled_rgb, new_shape)
#     styled_yuv = cv2.cvtColor(styled_rgb, cv2.COLOR_RGB2YUV)
#     Y_s, U_s, V_s = cv2.split(styled_yuv)
#     image_YUV = cv2.cvtColor(content_rgb, cv2.COLOR_RGB2YUV)
#     Y_i, U_i, V_i = cv2.split(image_YUV)
#     styled_rgb = cv2.cvtColor(np.stack([Y_s, U_i, V_i], axis=-1), cv2.COLOR_YUV2RGB)
#     return styled_rgb 
Example #9
Source File: cv.py    From deepstar with BSD 3-Clause Clear License 6 votes vote down vote up
def overlay_transparent_image(bg, fg, x1, y1):
    # bg is 3 RGB
    # fg is 4 RGBA

    bg = bg.copy()
    fg = fg.copy()

    h, w = fg.shape[:2]
    t = bg[y1:y1 + h, x1:x1 + w]

    b, g, r, a = cv2.split(fg)
    mask = cv2.merge((a, a, a))
    fg = cv2.merge((b, g, r))
    overlaid = alpha_blend(t, fg, mask)

    bg[y1:y1 + h, x1:x1 + w] = overlaid

    return bg 
Example #10
Source File: __init__.py    From color_transfer with MIT License 6 votes vote down vote up
def image_stats(image):
	"""
	Parameters:
	-------
	image: NumPy array
		OpenCV image in L*a*b* color space

	Returns:
	-------
	Tuple of mean and standard deviations for the L*, a*, and b*
	channels, respectively
	"""
	# compute the mean and standard deviation of each channel
	(l, a, b) = cv2.split(image)
	(lMean, lStd) = (l.mean(), l.std())
	(aMean, aStd) = (a.mean(), a.std())
	(bMean, bStd) = (b.mean(), b.std())

	# return the color statistics
	return (lMean, lStd, aMean, aStd, bMean, bStd) 
Example #11
Source File: cal_confidence.py    From Airtest with Apache License 2.0 6 votes vote down vote up
def cal_rgb_confidence(img_src_rgb, img_sch_rgb):
    """同大小彩图计算相似度."""
    # BGR三通道心理学权重:
    weight = (0.114, 0.587, 0.299)
    src_bgr, sch_bgr = cv2.split(img_src_rgb), cv2.split(img_sch_rgb)

    # 计算BGR三通道的confidence,存入bgr_confidence:
    bgr_confidence = [0, 0, 0]
    for i in range(3):
        res_temp = cv2.matchTemplate(src_bgr[i], sch_bgr[i], cv2.TM_CCOEFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_temp)
        bgr_confidence[i] = max_val

    # 加权可信度
    weighted_confidence = bgr_confidence[0] * weight[0] + bgr_confidence[1] * weight[1] + bgr_confidence[2] * weight[2]

    return weighted_confidence 
Example #12
Source File: helpers.py    From ImageSimilarityUsingCntk with MIT License 5 votes vote down vote up
def splitString(string, delimiter='\t', columnsToKeepIndices=None):
    if string == None:
        return None
    items = string.split(delimiter)
    if columnsToKeepIndices != None:
        items = getColumns([items], columnsToKeepIndices)
        items = items[0]
    return items 
Example #13
Source File: imgio.py    From SickZil-Machine with GNU Affero General Public License v3.0 5 votes vote down vote up
def segmap2mask(segmap):
    '''
    convert segmap(snet output) to mask(gui, file)

    segmap: np.uint8, bgr,  {fg=white, bg=black}
    mask:   np.uint8, bgra, {fg=red, bg=transparent}
    '''
    _,_,r = cv2.split(segmap) # b=g=r, a=r
    b = g = np.zeros_like(r)
    return cv2.merge((b,g,r,r)) 
Example #14
Source File: imgio_test.py    From SickZil-Machine with GNU Affero General Public License v3.0 5 votes vote down vote up
def test_load_mask():
    path = './fixture/not_proj_dir/bgr1_mask.png'
    h,w = cv2.imread(path, cv2.IMREAD_UNCHANGED).shape[:2]
    b,g,r = cv2.split(io.load(path,io.MASK))
    assert h == b.shape[0]
    assert w == g.shape[1]
    assert(np.array_equal(b,g) 
       and np.array_equal(g,r)
       and np.array_equal(r,b)) 
Example #15
Source File: renderer.py    From Temporal-3D-Pose-Kinetics with Apache License 2.0 5 votes vote down vote up
def get_alpha(imtmp, bgval=1.):
  h, w = imtmp.shape[:2]
  alpha = (~np.all(imtmp == bgval, axis=2)).astype(imtmp.dtype)

  b_channel, g_channel, r_channel = cv2.split(imtmp)

  im_RGBA = cv2.merge(
      (b_channel, g_channel, r_channel, alpha.astype(imtmp.dtype)))
  return im_RGBA 
Example #16
Source File: image_class.py    From HistoGAN with GNU General Public License v3.0 5 votes vote down vote up
def increase_brightness(self, value):
        hsv = cv2.cvtColor(self.image, cv2.COLOR_RGB2HSV)
        h, s, v = cv2.split(hsv)
        lim = 255 - value
        v[v > lim] = 255
        v[v <= lim] += value
        final_hsv = cv2.merge((h, s, v))
        self.image = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2RGB) 
Example #17
Source File: combine.py    From unet-gan-matting with MIT License 5 votes vote down vote up
def combine_object_background(object_file, background_file, output_name):
    border = 20
    size = [960, 720]

    foreground = cv2.imread(object_file, cv2.IMREAD_UNCHANGED)
    if foreground is None:
        return False

    ratio = numpy.amin(numpy.divide(
            numpy.subtract(size, [2*border, 2*border]), foreground.shape[0:2]))
    forground_size = numpy.floor(numpy.multiply(foreground.shape[0:2], ratio)).astype(int)
    foreground = cv2.resize(foreground, (forground_size[1], forground_size[0]))
    foreground = image_fill(foreground,size,[0,0,0,0])

    foreground = foreground.astype(float)
    cv2.normalize(foreground, foreground, 0.0, 1.0, cv2.NORM_MINMAX)
    alpha = cv2.split(foreground)[3]

    #foreground = cv2.imread(object_file, cv2.IMREAD_COLOR)
    background = cv2.imread(background_file)
    if background is None:
        return False

    ratio = numpy.amax(numpy.divide(foreground.shape[0:2], background.shape[0:2]))
    background_size = numpy.ceil(numpy.multiply(background.shape[0:2], ratio)).astype(int)
    #print(numpy.multiply(background.shape[0:2], ratio).astype(int))
    background = cv2.resize(background, (background_size[1], background_size[0]))
    background = background[0:foreground.shape[0], 0:foreground.shape[1]]
    background = background.astype(float)

    for i in range(0, 3):
        foreground[:,:,i] = numpy.multiply(alpha, foreground[:,:,i]*255)
        background[:,:,i] = numpy.multiply(1.0 - alpha, background[:,:,i])
    outImage = numpy.add(foreground[:,:,0:3], background)

    cv2.imwrite(output_name, outImage)

    return True 
Example #18
Source File: combine.py    From unet-gan-matting with MIT License 5 votes vote down vote up
def generate_trimap(object_file, trimap_name):
    border = 20
    size = [960, 720]

    foreground = cv2.imread(object_file, cv2.IMREAD_UNCHANGED)
    if foreground is None:
        return False
    alpha = cv2.split(foreground)[3]

    ratio = numpy.amin(numpy.divide(
            numpy.subtract(size, [2*border, 2*border]), alpha.shape[0:2]))
    forground_size = numpy.floor(numpy.multiply(alpha.shape[0:2], ratio)).astype(int)
    alpha = cv2.resize(alpha, (forground_size[1], forground_size[0]))
    alpha = image_fill(alpha,size,[0,0,0,0])

    alpha = alpha.astype(float)
    cv2.normalize(alpha, alpha, 0.0, 1.0, cv2.NORM_MINMAX)

    _, inner_map = cv2.threshold(alpha, 0.999, 255, cv2.THRESH_BINARY)
    _, outer_map = cv2.threshold(alpha, 0.001, 255, cv2.THRESH_BINARY)

    inner_map = cv2.erode(inner_map, numpy.ones((5,5),numpy.uint8), iterations = 3)
    outer_map = cv2.dilate(outer_map, numpy.ones((5,5),numpy.uint8), iterations = 3)

    cv2.imwrite(trimap_name, inner_map + (outer_map - inner_map) /2)

    foreground = cv2.imread(object_file, cv2.IMREAD_UNCHANGED) 
Example #19
Source File: tools.py    From DL.EyeSight with GNU General Public License v3.0 5 votes vote down vote up
def fetch_anno_targets_info(abs_anno_path, is_label_text=False):
    if not os.path.exists(abs_anno_path):
        raise IOError("No Such annotation file !")
    with open(abs_anno_path, "r") as anno_reader:
        total_annos = list()
        for line in anno_reader:
            line = line.strip()
            sub_anno = re.split("\(|\,|\)", line)
            a = [int(item) for item in sub_anno if len(item)]
            if len(a) == 5:
                if is_label_text:
                    total_annos.append(a[:4]+[config.idx_sign_dict[a[-1]]])
                else:
                    total_annos.append(a)
        return total_annos 
Example #20
Source File: helpers.py    From ImageSimilarityUsingCntk with MIT License 5 votes vote down vote up
def imconvertCv2Numpy(img):
    (b,g,r) = cv2.split(img)
    return cv2.merge([r,g,b]) 
Example #21
Source File: pySaliencyMap.py    From pliers with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def SMExtractRGBI(self, inputImage):
        # convert scale of array elements
        src = np.float32(inputImage) * 1./255
        # split
        (B, G, R) = cv2.split(src)
        # extract an intensity image
        I = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
        # return
        return R, G, B, I

    # feature maps
    # constructing a Gaussian pyramid 
Example #22
Source File: Utils.py    From SiamFC-PyTorch with Apache License 2.0 5 votes vote down vote up
def cv2_brg2rgb(bgr_img):
    """
    convert brg image to rgb
    """
    b, g, r = cv2.split(bgr_img)
    rgb_img = cv2.merge([r, g, b])
    
    return rgb_img 
Example #23
Source File: tools.py    From DL.EyeSight with GNU General Public License v3.0 5 votes vote down vote up
def calc_rgb_mean():
    r_list, g_list, b_list = list(), list(), list()
    with open("/Volumes/projects/repos/RSI/LSD10/total.txt", "r") as reader:
        for line in reader.readlines():
            line = line.strip()
            src_img = cv2.imread(line)
            b, g, r = cv2.split(src_img)
            b_list.append(np.mean(b))
            g_list.append(np.mean(g))
            r_list.append(np.mean(r))
    print(np.mean(r_list))
    print(np.mean(g_list))
    print(np.mean(b_list)) 
Example #24
Source File: auto.py    From airtest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def hist_similarity(image_1, image_2):
    """color hist based image similarity
    
    @param image_1: np.array(the first input image)
    @param image_2: np.array(the second input image)
    @return similarity: float(range from [0,1], the bigger the more similar)
    """
    if image_1.ndim == 2 and image_2.ndim == 2:
        hist_1 = cv2.calcHist([image_1], [0], None, [256], [0.0, 255.0])
        hist_2 = cv2.calcHist([image_2], [0], None, [256], [0.0, 255.0])
        similarity = cv2.compareHist(hist_1, hist_2, cv2.cv.CV_COMP_CORREL)
    elif image_1.ndim == 3 and image_2.ndim == 3:
        """R,G,B split"""
        b_1, g_1, r_1 = cv2.split(image_1)
        b_2, g_2, r_2 = cv2.split(image_2)
        hist_b_1 = cv2.calcHist([b_1], [0], None, [256], [0.0, 255.0])
        hist_g_1 = cv2.calcHist([g_1], [0], None, [256], [0.0, 255.0])
        hist_r_1 = cv2.calcHist([r_1], [0], None, [256], [0.0, 255.0])
        hist_b_2 = cv2.calcHist([b_2], [0], None, [256], [0.0, 255.0])
        hist_g_2 = cv2.calcHist([g_2], [0], None, [256], [0.0, 255.0])
        hist_r_2 = cv2.calcHist([r_2], [0], None, [256], [0.0, 255.0])
        similarity_b = cv2.compareHist(hist_b_1,hist_b_2,cv2.cv.CV_COMP_CORREL)
        similarity_g = cv2.compareHist(hist_g_1,hist_g_2,cv2.cv.CV_COMP_CORREL)
        similarity_r = cv2.compareHist(hist_r_1,hist_r_2,cv2.cv.CV_COMP_CORREL)
        sum_bgr = similarity_b + similarity_g + similarity_r
        similarity = sum_bgr/3.
    else:
        gray_1 = cv2.cvtColor(image_1,cv2.cv.CV_RGB2GRAY)
        gray_2 = cv2.cvtColor(image_2,cv2.cv.CV_RGB2GRAY)
        hist_1 = cv2.calcHist([gray_1], [0], None, [256], [0.0, 255.0])
        hist_2 = cv2.calcHist([gray_2], [0], None, [256], [0.0, 255.0])
        similarity = cv2.compareHist(hist_1, hist_2, cv2.cv.CV_COMP_CORREL)
    return similarity

#SIFT based similarity 
Example #25
Source File: transforms.py    From pytorch-priv with MIT License 5 votes vote down vote up
def normalize(im, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0), rgb=False):
    if rgb:
        r, g, b = cv2.split(im)
    else:
        b, g, r = cv2.split(im)
    norm_im = cv2.merge([(b - mean[0]) / std[0], (g - mean[1]) / std[1], (r - mean[2]) / std[2]])
    return norm_im 
Example #26
Source File: plant_features.py    From bonnet with GNU General Public License v3.0 5 votes vote down vote up
def ndi(img):
  '''
    Get the normalized diference index
  '''
  # get channels
  B, G, R = cv2.split(img)

  # normalize
  B_ = B.astype(float) / np.median(B.astype(float))
  G_ = G.astype(float) / np.median(G.astype(float))
  R_ = R.astype(float) / np.median(R.astype(float))

  E = B_ + G_ + R_ + 0.001
  b = B_ / E
  g = G_ / E
  r = R_ / E

  # calculate ndi
  idx = (g - r) / (g + r)

  # expand contrast
  idx = contrast_stretch(idx)

  # convert to saveable image
  idx = idx.astype(np.uint8)

  return idx 
Example #27
Source File: plant_features.py    From bonnet with GNU General Public License v3.0 5 votes vote down vote up
def exred(img):
  '''
    Returns the excess green (inverted, to comply with other masks) of the image as:
      exred = 1.4 * R - G
  '''

  # get channels
  B, G, R = cv2.split(img)

  # normalize
  B_ = B.astype(float) / np.median(B.astype(float))
  G_ = G.astype(float) / np.median(G.astype(float))
  R_ = R.astype(float) / np.median(R.astype(float))

  E = B_ + G_ + R_ + 0.001
  b = B_ / E
  g = G_ / E
  r = R_ / E

  # calculate exgreen
  exr = 1.4 * r - g

  # expand contrast
  exr = contrast_stretch(exr)

  # convert to saveable image
  exr = exr.astype(np.uint8)

  return exr 
Example #28
Source File: plant_features.py    From bonnet with GNU General Public License v3.0 5 votes vote down vote up
def cive(img):
  '''
    Returns the inverse color index of vegetation extraction of the image as:
      cive = 0.881 * g - 0.441 * r - 0.385 * b - 18.78745
  '''

  # get channels
  B, G, R = cv2.split(img)

  # normalize
  B_ = B.astype(float) / np.median(B.astype(float))
  G_ = G.astype(float) / np.median(G.astype(float))
  R_ = R.astype(float) / np.median(R.astype(float))

  E = B_ + G_ + R_ + 0.001
  b = B_ / E
  g = G_ / E
  r = R_ / E

  # calculate cive
  c = 0.881 * g - 0.441 * r - 0.385 * b - 18.78745

  # expand contrast
  c = contrast_stretch(c)

  # convert to saveable image
  c = c.astype(np.uint8)

  return c 
Example #29
Source File: plant_features.py    From bonnet with GNU General Public License v3.0 5 votes vote down vote up
def exgreen(img):
  '''
    Returns the excess green of the image as:
      exgreen = 2 * G - R - B
  '''

  # get channels
  B, G, R = cv2.split(img)

  # normalize
  B_ = B.astype(float) / np.median(B.astype(float))
  G_ = G.astype(float) / np.median(G.astype(float))
  R_ = R.astype(float) / np.median(R.astype(float))

  E = B_ + G_ + R_ + 0.001
  b = B_ / E
  g = G_ / E
  r = R_ / E

  # calculate exgreen
  exgr = 2.8 * g - r - b

  # expand contrast
  exgr = contrast_stretch(exgr)

  # convert to saveable image
  exgr = exgr.astype(np.uint8)

  return exgr 
Example #30
Source File: renderer.py    From hmd with MIT License 5 votes vote down vote up
def append_alpha(imtmp):
    alpha = np.ones_like(imtmp[:, :, 0]).astype(imtmp.dtype)
    if np.issubdtype(imtmp.dtype, np.uint8):
        alpha = alpha * 255
    b_channel, g_channel, r_channel = cv2.split(imtmp)
    im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha))
    return im_RGBA