Python cv2.NORM_L1 Examples

The following are 8 code examples of cv2.NORM_L1(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cv2 , or try the search function .
Example #1
Source File: keypoint_matching_contrib.py    From Airtest with Apache License 2.0 6 votes vote down vote up
def init_detector(self):
        """Init keypoint detector object."""
        # BRIEF is a feature descriptor, recommand CenSurE as a fast detector:
        if check_cv_version_is_new():
            # OpenCV3/4, star/brief is in contrib module, you need to compile it seperately.
            try:
                self.star_detector = cv2.xfeatures2d.StarDetector_create()
                self.brief_extractor = cv2.xfeatures2d.BriefDescriptorExtractor_create()
            except:
                import traceback
                traceback.print_exc()
                print("to use %s, you should build contrib with opencv3.0" % self.METHOD_NAME)
                raise NoModuleError("There is no %s module in your OpenCV environment !" % self.METHOD_NAME)
        else:
            # OpenCV2.x
            self.star_detector = cv2.FeatureDetector_create("STAR")
            self.brief_extractor = cv2.DescriptorExtractor_create("BRIEF")

        # create BFMatcher object:
        self.matcher = cv2.BFMatcher(cv2.NORM_L1)  # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable) 
Example #2
Source File: clip_filter.py    From youtube-gesture-dataset with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def is_picture(self):
        sampling_interval = int(math.floor(self.scene_length / 5))
        sampling_frames = list(range(self.start_frame_no + sampling_interval,
                                     self.end_frame_no - sampling_interval + 1, sampling_interval))
        frames = []
        for frame_no in sampling_frames:
            self.video.set(cv2.CAP_PROP_POS_FRAMES, frame_no)
            ret, frame = self.video.read()
            frames.append(frame)

        diff = 0
        n_diff = 0
        for frame, next_frame in zip(frames, frames[1:]):
            diff += cv2.norm(frame, next_frame, cv2.NORM_L1)  # abs diff
            n_diff += 1
        diff /= n_diff
        self.debugging_info[4] = round(diff, 0)

        return diff < 3000000 
Example #3
Source File: event.py    From EVDodgeNet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_frame(self,frame_data):

		# print(frame_data.size)
		frame = np.rec.array(None, dtype=[('value', np.float16),('valid', np.bool_)], shape=(self.height, self.width))
		frame.valid.fill(False)
		frame.value.fill(0.)
		# print(frame.size)

		for datum in np.nditer(frame_data, flags=['zerosize_ok']):
			# print(datum['y'])
			ts_val = datum['ts']
			f_data = frame[datum['y'], datum['x']]
			f_data.value += 1

		img = frame.value/20*255
		img = img.astype('uint8')
		# img = np.piecewise(img, [img <= 0, (img > 0) & (img < 255), img >= 255], [0, lambda x: x, 255])
		# cv2.normalize(img,img,0,255,cv2.NORM_L1)
		cv2.normalize(img,img,0,255,cv2.NORM_MINMAX)
		img = cv2.flip(img, 1)
		img = np.rot90(img)
		# cv2.imshow('img_f', img)
		# cv2.waitKey(0)
		return img 
Example #4
Source File: keypoint_base.py    From Airtest with Apache License 2.0 5 votes vote down vote up
def init_detector(self):
        """Init keypoint detector object."""
        self.detector = cv2.KAZE_create()
        # create BFMatcher object:
        self.matcher = cv2.BFMatcher(cv2.NORM_L1)  # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable) 
Example #5
Source File: keypoint_matching.py    From Airtest with Apache License 2.0 5 votes vote down vote up
def init_detector(self):
        """Init keypoint detector object."""
        self.detector = cv2.BRISK_create()
        # create BFMatcher object:
        self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING)  # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable) 
Example #6
Source File: keypoint_matching.py    From Airtest with Apache License 2.0 5 votes vote down vote up
def init_detector(self):
        """Init keypoint detector object."""
        self.detector = cv2.AKAZE_create()
        # create BFMatcher object:
        self.matcher = cv2.BFMatcher(cv2.NORM_L1)  # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable) 
Example #7
Source File: keypoint_matching.py    From Airtest with Apache License 2.0 5 votes vote down vote up
def init_detector(self):
        """Init keypoint detector object."""
        self.detector = cv2.ORB_create()
        # create BFMatcher object:
        self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING)  # cv2.NORM_L1 cv2.NORM_L2 cv2.NORM_HAMMING(not useable) 
Example #8
Source File: event.py    From EVDodgeNet with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def get_projection_mat(self, dx, dy, dz, theta):

		print("inside get_projection", dx,dy,dz,theta)
		frame = np.rec.array(None, dtype=[('value', np.uint16)], shape=(self.height, self.width))
		frame.value.fill(0)

		dx = dx*1e-3
		dy = dy*1e-3
		dz = dz*1e-3
		#Project matrix 
		start = time.time()
		k = np.matrix([[dx, dy]])
		con_k = np.repeat(k.T, self.old_xy.size/2, axis=1)
		c, s = np.cos(theta), np.sin(theta)
		R = np.matrix([[c,-s], [s,c]])
		new = self.old_xy - np.multiply((self.ts),( con_k + (dz*np.dot(R, self.old_xy))))
		end = time.time()
		print("Projection time", end-start)

		#Converstion of 2D to 1D array
		i = np.array(new[0,:] + self.width * new[1,:])
		i.astype(int)
		u_ele, c_ele = np.unique(i.T,return_counts=True)
		u_c = np.asarray((u_ele, c_ele))
		print(u_c.shape, self.width, self.height)
		
		start = time.time()
		
		# inputs = range(new.size/2)

		# for i in inputs:
		# 	if((new[0,i] >= self.width) or (new[0,i]<0) or (new[1,i] >= self.height) or (new[1,i] < 0)):
		# 		continue
		# 	xy = frame[int(new[1,i]), int(new[0,i])]
		# 	xy.value += 1

		inputs = range(u_c.size/2)
		for i in inputs:
			x = int(u_c[0,i]%self.width)
			y = int(u_c[0,i]/self.width)

			if((x >= self.width) or (x<0) or (y >= self.height) or (y < 0)):
				continue
			xy = frame[y,x]
			xy.value = u_c[1,i]

		end = time.time()
		print("For loop time", end-start)
		img = frame.value * 10
		print(img.max())
		# cv2.normalize(img,img,0,255,cv2.NORM_MINMAX)
		img = img.astype('uint8')
		# cv2.normalize(img,img,0,255,cv2.NORM_L1)
		# cv2.imshow('img_p', img)
		# cv2.waitKey(0)
		return img