Python cv2.COLOR_BGR2RGBA Examples

The following are 7 code examples of cv2.COLOR_BGR2RGBA(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: VehicleMoniter.py    From Traffic-Rule-Violation-Detection-System with MIT License 6 votes vote down vote up
def getLicensePlateNumber(filer):
	try:
		js = api.recognize_file(filer, secret_key, country, recognize_vehicle=recognize_vehicle, state=state, return_image=return_image, topn=topn, prewarp=prewarp)

		js=js.to_dict()
		#js=list(str(js))
		X1=js['results'][0]['coordinates'][0]['x']
		Y1=js['results'][0]['coordinates'][0]['y']
		X2=js['results'][0]['coordinates'][2]['x']
		Y2=js['results'][0]['coordinates'][2]['y']
		img=cv2.imread(filer)
		rimg=img[Y1:Y2,X1:X2]
		frame3=rimg
		img3 = Image.fromarray(frame3)
		w,h=img3.size
		asprto=w/h
		frame3=cv2.resize(frame3,(150,int(150/asprto)))
		cv2image3 = cv2.cvtColor(frame3, cv2.COLOR_BGR2RGBA)
		img3 = Image.fromarray(cv2image3)
		imgtk3 = ImageTk.PhotoImage(image=img3)
		display4.imgtk = imgtk3 #Shows frame for display 1
		display4.configure(image=imgtk3)
		display5.configure(text=js['results'][0]['plate'])
	except ApiException as e:
	    print("Exception: \n", e) 
Example #2
Source File: VehicleMoniter.py    From Traffic-Rule-Violation-Detection-System with MIT License 5 votes vote down vote up
def checkRedLightCrossed(img):
	global count
	for v in vehicles:
		if v.crossed==False and len(v.points)>=2:
			x1,y1=v.points[0]
			x2,y2=v.points[-1]
			if y1>yl3 and y2<yl3:
				count+=1
				v.crossed=True
				bimg=img[int(v.rect[1]):int(v.rect[1]+v.rect[3]), int(v.rect[0]):int(v.rect[0]+v.rect[2])]
				frame2=bimg
				img2 = Image.fromarray(frame2)
				w,h=img2.size
				asprto=w/h
				frame2=cv2.resize(frame2,(250,int(250/asprto)))
				cv2image2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGBA)
				img2 = Image.fromarray(cv2image2)
				imgtk2 = ImageTk.PhotoImage(image=img2)
				display2.imgtk = imgtk2 #Shows frame for display 1
				display2.configure(image=imgtk2)
				#cv2.imshow('BROKE',bimg)
				name='Rule Breakers/culprit'+str(time.time())+'.jpg'
				cv2.imwrite(name,bimg)
				
				
				tstop = threading.Event()
				thread = threading.Thread(target=getLicensePlateNumber, args=(name,))
				thread.daemon = True
				thread.start()
				

				#cv2.imwrite('culprit.png',bimg)
	#display3.configure(text=count) 
Example #3
Source File: VehicleMoniter.py    From Traffic-Rule-Violation-Detection-System with MIT License 5 votes vote down vote up
def checkSpeed(ftime,img):
	for v in vehicles:
		if v.speedChecked==False and len(v.points)>=2:
			x1,y1=v.points[0]
			x2,y2=v.points[-1]
			if y2<yl1 and y2>yl3 and v.entered==False:
				v.enterTime=ftime
				v.entered=True
			elif  y2<yl3  and y2 > yl5 and v.exited==False:
				v.exitTime=ftime
				v.exited==False
				v.speedChecked=True
				speed=60/(v.exitTime-v.enterTime)
				print(speed)
				bimg=img[int(v.rect[1]):int(v.rect[1]+v.rect[3]), int(v.rect[0]):int(v.rect[0]+v.rect[2])]
				frame2=bimg
				img2 = Image.fromarray(frame2)
				w,h=img2.size
				asprto=w/h
				frame2=cv2.resize(frame2,(250,int(250/asprto)))
				cv2image2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGBA)
				img2 = Image.fromarray(cv2image2)
				imgtk2 = ImageTk.PhotoImage(image=img2)
				display2.imgtk = imgtk2 #Shows frame for display 1
				display2.configure(image=imgtk2)
				display3.configure(text=str(speed)[:5]+'Km/hr')
				if speed>60:
					
					#cv2.imshow('BROKE',bimg)
					name='Rule Breakers/culprit'+str(time.time())+'.jpg'
					cv2.imwrite(name,bimg)
					tstop = threading.Event()
					thread = threading.Thread(target=getLicensePlateNumber, args=(name,))
					thread.daemon = True
					thread.start() 
Example #4
Source File: cv2_backend.py    From nnabla with Apache License 2.0 5 votes vote down vote up
def convert_channel_from_bgr(img, num_channels):
        if num_channels in [0, 1]:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            if num_channels == 1:
                img = img[..., np.newaxis]

            return img

        elif num_channels in [-1, 3]:  # BGR => RGB
            return cv2.cvtColor(img, cv2.COLOR_BGR2RGB, dst=img)

        elif num_channels == 4:  # BGR => RGBA
            return cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)

        raise ValueError("num_channels must be [-1, 0, 1, 3, 4]") 
Example #5
Source File: factory.py    From pibooth with MIT License 5 votes vote down vote up
def _build_final_image(self, image):
        """See upper class description.
        """
        if self._overlay_image:
            overlay = cv2.cvtColor(cv2.imread(self._overlay_image, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGBA)
            overlay, _, _ = self._image_resize_keep_ratio(overlay, self.width, self.height, True)

            # Fix the overlay. Why we have to do this? If we don't, pixels are marked
            # as opaque when they shouldn't be. See:
            # https://www.pyimagesearch.com/2016/04/25/watermarking-images-with-opencv-and-python
            RR, GG, BB, A = cv2.split(overlay)
            RR = cv2.bitwise_and(RR, RR, mask=A)
            GG = cv2.bitwise_and(GG, GG, mask=A)
            BB = cv2.bitwise_and(BB, BB, mask=A)
            overlay = cv2.merge([RR, GG, BB, A])

            # Add an extra dimension to the image (i.e., the alpha transparency)
            if image.shape[2] == 3:
                image = cv2.cvtColor(image, cv2.COLOR_RGB2RGBA)

            # Now create a mask of overlay and create its inverse mask also
            img2gray = cv2.cvtColor(overlay, cv2.COLOR_RGB2GRAY)
            _ret, mask = cv2.threshold(img2gray, 30, 255, cv2.THRESH_BINARY)
            mask_inv = cv2.bitwise_not(mask)
            # Now black-out the area of overlay in ROI (ie image)
            img1_bg = cv2.bitwise_and(image, image, mask=mask_inv)
            # Take only region of overlay from overlay image
            img2_fg = cv2.bitwise_and(overlay, overlay, mask=mask)
            # Generate the main image
            image = cv2.add(img1_bg, img2_fg)
            # Remove alpha dimension
            image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)

        return Image.fromarray(image) 
Example #6
Source File: blue_filter.py    From voice-enabled-chatbot with MIT License 5 votes vote down vote up
def extractSkin(image):
    # Taking a copy of the image
    img =  image.copy()
    # Converting from BGR Colours Space to HSV
    img =  cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
  
    # Defining HSV Threadholds
    lower_threshold = np.array([0, 48, 80], dtype=np.uint8)
    upper_threshold = np.array([20, 255, 255], dtype=np.uint8)
  
    # Single Channel mask,denoting presence of colours in the specified threshold
    skinMask = cv2.inRange(img,lower_threshold,upper_threshold)
  
    # Cleaning up mask using Gaussian Filter
    skinMask = cv2.GaussianBlur(skinMask,(3,3),0)

    # Extracting skin from the threshold mask
    skin  =  cv2.bitwise_and(img,img,mask=skinMask)
  
    # Converting the image back to BRG color space
    img = cv2.cvtColor(skin,cv2.COLOR_HSV2BGR)

    # Observed BGR to RGBA conversion gives a more appropriate color tint that opencv colormask options
    # Added alpha channel to convert black pixels transparent and overlap (WIP) 
    img_a = cv2.cvtColor(img, cv2.COLOR_BGR2RGBA)
    
    # Return the Skin image
    return img_a 
Example #7
Source File: VehicleMoniter.py    From Traffic-Rule-Violation-Detection-System with MIT License 4 votes vote down vote up
def main(sess=sesser):
	'''global masterframe
	global started'''
	if True:
		fTime=time.time()
		_,image_np=cap.read(0)
		#image_np = imutils.resize(image_np, width=400)

		# Definite input and output Tensors for detection_graph


		# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
		image_np_expanded = np.expand_dims(image_np, axis=0)
		# Actual detection.
		(boxes, scores, classes, num) = sess.run(
			[detection_boxes, detection_scores, detection_classes, num_detections],
			feed_dict={image_tensor: image_np_expanded})


		# Visualization of the results of a detection.
		img=image_np
		imgF,coords=vis_util.visualize_boxes_and_labels_on_image_array(
			image_np,
			np.squeeze(boxes),
			np.squeeze(classes).astype(np.int32),
			np.squeeze(scores),
			category_index,
			use_normalized_coordinates=True,
			line_thickness=2)

		matchVehicles(coords,im_width,im_height,imgF)
		checkRedLightCrossed(imgF)
		checkSpeed(fTime,img)
		for v in vehicles:
			if v.getTracking()==True:

				for p in v.getPoints():
					cv2.circle(image_np,p,3,(200,150,75),6)

			#print(ymin*im_height,xmin*im_width,ymax*im_height,xmax*im_width)
			#cv2.rectangle(image_np,(int(xmin*im_width),int(ymin*im_height)),(int(xmax*im_width),int(ymax*im_height)),(255,0,0),2)
		cv2.line(image_np, (int(xl1),int(yl1)), (int(xl2),int(yl2)), (0,255,0),3)
		cv2.line(image_np, (int(xl3),int(yl3)), (int(xl4),int(yl4)), (0,0,255),3)
		cv2.line(image_np, (int(xl5),int(yl5)), (int(xl6),int(yl6)), (255,0,0),3)
		VideoFileOutput.write(image_np)
		#print('yola')
		frame=cv2.resize(image_np,(1020,647))
		cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
		img = Image.fromarray(cv2image)
		imgtk = ImageTk.PhotoImage(image=img)
		display1.imgtk = imgtk #Shows frame for display 1
		display1.configure(image=imgtk)
	window.after(1, main)