Java Code Examples for org.opencv.core.Mat#zeros()

The following examples show how to use org.opencv.core.Mat#zeros() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PerspectiveTransformation.java    From AndroidDocumentScanner with MIT License 6 votes vote down vote up
public Mat transform(Mat src, MatOfPoint2f corners) {
    MatOfPoint2f sortedCorners = sortCorners(corners);
    Size size = getRectangleSize(sortedCorners);

    Mat result = Mat.zeros(size, src.type());
    MatOfPoint2f imageOutline = getOutline(result);

    Mat transformation = Imgproc.getPerspectiveTransform(sortedCorners, imageOutline);
    Imgproc.warpPerspective(src, result, transformation, size);

    return result;
}
 
Example 2
Source File: AutoCalibrationManager.java    From ShootOFF with GNU General Public License v3.0 6 votes vote down vote up
private void blankRotatedRect(Mat mat, final RotatedRect rect) {
	final Mat tempMat = Mat.zeros(mat.size(), CvType.CV_8UC1);

	final Point points[] = new Point[4];
	rect.points(points);
	for (int i = 0; i < 4; ++i) {
		Core.line(tempMat, points[i], points[(i + 1) % 4], new Scalar(255, 255, 255));
	}

	final Mat tempMask = Mat.zeros((mat.rows() + 2), (mat.cols() + 2), CvType.CV_8UC1);
	Imgproc.floodFill(tempMat, tempMask, rect.center, new Scalar(255, 255, 255), null, new Scalar(0, 0, 0),
			new Scalar(254, 254, 254), 4);

	if (logger.isTraceEnabled()) {
		String filename = String.format("poly.png");
		final File file = new File(filename);
		filename = file.toString();
		Highgui.imwrite(filename, tempMat);
	}

	mat.setTo(new Scalar(0, 0, 0), tempMat);
}
 
Example 3
Source File: FeatureWeight.java    From OptimizedImageEnhance with MIT License 5 votes vote down vote up
public static Mat Saliency(Mat img) {
	// blur image with a 3x3 or 5x5 Gaussian filter
	Mat gfbgr = new Mat();
	Imgproc.GaussianBlur(img, gfbgr, new Size(3, 3), 3);
	// Perform sRGB to CIE Lab color space conversion
	Mat LabIm = new Mat();
	Imgproc.cvtColor(gfbgr, LabIm, Imgproc.COLOR_BGR2Lab);
	// Compute Lab average values (note that in the paper this average is found from the
	// un-blurred original image, but the results are quite similar)
	List<Mat> lab = new ArrayList<>();
	Core.split(LabIm, lab);
	Mat l = lab.get(0);
	l.convertTo(l, CvType.CV_32F);
	Mat a = lab.get(1);
	a.convertTo(a, CvType.CV_32F);
	Mat b = lab.get(2);
	b.convertTo(b, CvType.CV_32F);
	double lm = Core.mean(l).val[0];
	double am = Core.mean(a).val[0];
	double bm = Core.mean(b).val[0];
	// Finally compute the saliency map
	Mat sm = Mat.zeros(l.rows(), l.cols(), l.type());
	Core.subtract(l, new Scalar(lm), l);
	Core.subtract(a, new Scalar(am), a);
	Core.subtract(b, new Scalar(bm), b);
	Core.add(sm, l.mul(l), sm);
	Core.add(sm, a.mul(a), sm);
	Core.add(sm, b.mul(b), sm);
	return sm;
}
 
Example 4
Source File: FeatureWeight.java    From OptimizedImageEnhance with MIT License 5 votes vote down vote up
public static Mat Exposedness(Mat img) {
	double sigma = 0.25;
	double average = 0.5;
	int rows = img.rows();
	int cols = img.cols();
	Mat exposedness = Mat.zeros(rows, cols, img.type());
	// W = exp(-(img - aver).^2 / (2*sigma^2));
	for (int i = 0; i < rows; i++) {
		for (int j = 0; j < cols; j++) {
			double value = Math.exp(-1.0 * Math.pow(img.get(i, j)[0] - average, 2.0) / (2 * Math.pow(sigma, 2.0)));
			exposedness.put(i, j, value);
		}
	}
	return exposedness;
}
 
Example 5
Source File: WeightCalculate.java    From ImageEnhanceViaFusion with MIT License 5 votes vote down vote up
public static Mat Saliency(Mat img) {
	// blur image with a 3x3 or 5x5 Gaussian filter
	Mat gfbgr = new Mat();
	Imgproc.GaussianBlur(img, gfbgr, new Size(3, 3), 3);
	// Perform sRGB to CIE Lab color space conversion
	Mat LabIm = new Mat();
	Imgproc.cvtColor(gfbgr, LabIm, Imgproc.COLOR_BGR2Lab);
	// Compute Lab average values (note that in the paper this average is found from the
	// un-blurred original image, but the results are quite similar)
	List<Mat> lab = new ArrayList<Mat>();
	Core.split(LabIm, lab);
	Mat l = lab.get(0);
	l.convertTo(l, CvType.CV_32F);
	Mat a = lab.get(1);
	a.convertTo(a, CvType.CV_32F);
	Mat b = lab.get(2);
	b.convertTo(b, CvType.CV_32F);
	double lm = Core.mean(l).val[0];
	double am = Core.mean(a).val[0];
	double bm = Core.mean(b).val[0];
	// Finally compute the saliency map
	Mat sm = Mat.zeros(l.rows(), l.cols(), l.type());
	Core.subtract(l, new Scalar(lm), l);
	Core.subtract(a, new Scalar(am), a);
	Core.subtract(b, new Scalar(bm), b);
	Core.add(sm, l.mul(l), sm);
	Core.add(sm, a.mul(a), sm);
	Core.add(sm, b.mul(b), sm);
	return sm;
}
 
Example 6
Source File: WeightCalculate.java    From ImageEnhanceViaFusion with MIT License 5 votes vote down vote up
public static Mat Exposedness(Mat img) {
	double sigma = 0.25;
	double average = 0.5;
	int rows = img.rows();
	int cols = img.cols();
	Mat exposedness = Mat.zeros(rows, cols, img.type());
	// W = exp(-(img - aver).^2 / (2*sigma^2));
	for (int i = 0; i < rows; i++) {
		for (int j = 0; j < cols; j++) {
			double value = Math.exp(-1.0 * Math.pow(img.get(i, j)[0] - average, 2.0) / (2 * Math.pow(sigma, 2.0)));
			exposedness.put(i, j, value);
		}
	}
	return exposedness;
}
 
Example 7
Source File: ImgprocessUtils.java    From classchecks with Apache License 2.0 5 votes vote down vote up
/**
 * 其主要思路为:
	1、求取源图I的平均灰度,并记录rows和cols;
	2、按照一定大小,分为N*M个方块,求出每块的平均值,得到子块的亮度矩阵D;
	3、用矩阵D的每个元素减去源图的平均灰度,得到子块的亮度差值矩阵E;
	4、用双立方差值法,将矩阵E差值成与源图一样大小的亮度分布矩阵R;
	5、得到矫正后的图像result=I-R;
* @Title: unevenLightCompensate 
* @Description: 光线补偿 
* @param image
* @param blockSize
* void 
* @throws
 */
public static void unevenLightCompensate(Mat image, int blockSize) {
	if(image.channels() == 3) {
		Imgproc.cvtColor(image, image, 7);
	}
	double average = Core.mean(image).val[0];
	Scalar scalar = new Scalar(average);
	int rowsNew = (int) Math.ceil((double)image.rows() / (double)blockSize);
	int colsNew = (int) Math.ceil((double)image.cols() / (double)blockSize);
	Mat blockImage = new Mat();
	blockImage = Mat.zeros(rowsNew, colsNew, CvType.CV_32FC1);
	for(int i = 0; i < rowsNew; i ++) {
		for(int j = 0; j < colsNew; j ++) {
			int rowmin = i * blockSize;
			int rowmax = (i + 1) * blockSize;
			if(rowmax > image.rows()) rowmax = image.rows();
			int colmin = j * blockSize;
			int colmax = (j +1) * blockSize;
			if(colmax > image.cols()) colmax = image.cols();
			Range rangeRow = new Range(rowmin, rowmax);
			Range rangeCol = new Range(colmin, colmax);
			Mat imageROI = new Mat(image, rangeRow, rangeCol);
			double temaver = Core.mean(imageROI).val[0];
			blockImage.put(i, j, temaver);
		}
	}
	
	Core.subtract(blockImage, scalar, blockImage);
	Mat blockImage2 = new Mat();
	int INTER_CUBIC = 2;
	Imgproc.resize(blockImage, blockImage2, image.size(), 0, 0, INTER_CUBIC);
	Mat image2 = new Mat();
	image.convertTo(image2, CvType.CV_32FC1);
	Mat dst = new Mat();
	Core.subtract(image2, blockImage2, dst);
	dst.convertTo(image, CvType.CV_8UC1);
}
 
Example 8
Source File: MatOperation.java    From Android-Face-Recognition-with-Deep-Learning-Library with Apache License 2.0 5 votes vote down vote up
public static Rect[] rotateFaces(Mat img, Rect[] faces, int angle){
    Point center = new Point(img.cols()/2, img.rows()/2);
    Mat rotMat = Imgproc.getRotationMatrix2D(center, angle, 1);
    rotMat.convertTo(rotMat, CvType.CV_32FC1);
    float scale = img.cols()/img.rows();
    for(Rect face : faces){
        Mat m = new Mat(3, 1, CvType.CV_32FC1);
        m.put(0,0,face.x);
        m.put(1,0,face.y);
        m.put(2,0,1);
        Mat res = Mat.zeros(2,1,CvType.CV_32FC1);
        Core.gemm(rotMat, m, 1, new Mat(), 0, res, 0);
        face.x = (int)res.get(0,0)[0];
        face.y = (int)res.get(1,0)[0];
        if(angle == 270 || angle == -90){
            face.x = (int)(face.x * scale - face.width);
            face.x = face.x + face.width/4;
            face.y = face.y + face.height/4;
        }else if(angle == 180 || angle == -180){
            face.x = face.x - face.width;
            face.y = face.y - face.height;
        }else if(angle == 90 || angle == -270){
            face.y = (int)(face.y * scale - face.height);
            face.x = face.x - face.width/4;
            face.y = face.y - face.height/4;
        }
    }
    return faces;
}
 
Example 9
Source File: ImageMatchers.java    From onetwo with Apache License 2.0 5 votes vote down vote up
static public Core.MinMaxLocResult matchResult(Mat source, Mat destImage) {
       //创建于原图相同的大小,储存匹配度
       Mat result = Mat.zeros(source.rows() - destImage.rows() + 1, source.cols() - destImage.cols() + 1, CvType.CV_32FC1);
       //调用模板匹配方法
       Imgproc.matchTemplate(source, destImage, result, Imgproc.TM_SQDIFF_NORMED);
       //规格化
       Core.normalize(result, result, 0, 1, Core.NORM_MINMAX, -1);
       //获得最可能点,MinMaxLocResult是其数据格式,包括了最大、最小点的位置x、y
       Core.MinMaxLocResult mlr = Core.minMaxLoc(result);
       return mlr;
}
 
Example 10
Source File: IpCamera.java    From opencv-fun with GNU Affero General Public License v3.0 5 votes vote down vote up
public IpCamera(final String url) {
	this.frame = Mat.zeros(new Size(200, 200), CvType.CV_8UC3);
	this.url = url;
	thread = new Thread(new Runnable() {
		@Override
		public void run () {
			while(true) {
				try {
					ByteArrayOutputStream bytes = new ByteArrayOutputStream();
					byte[] buffer = new byte[10 * 1024];
					URLConnection con = new URL(url + "/shot.jpg").openConnection();
					InputStream in = con.getInputStream();
					int read = -1;
					while((read = in.read(buffer)) != -1) {
						bytes.write(buffer, 0, read);
					}
					DataOutputStream writer = new DataOutputStream(new FileOutputStream(new File("img.jpg")));
					writer.write(bytes.toByteArray());
					writer.close();
					Mat mat =  Highgui.imread("img.jpg");
					synchronized(this) {
						frame = mat;
					}
				} catch(Throwable t) {
					t.printStackTrace();
				}
			}
		}
	});
	thread.setDaemon(true);
	thread.start();
}
 
Example 11
Source File: Cluster.java    From opencv-fun with GNU Affero General Public License v3.0 5 votes vote down vote up
public static void main (String[] args) {		
	CVLoader.load();
	Mat img = Mat.zeros(200, 200, CvType.CV_8UC3);
	Core.rectangle(img, new Point(0, 0), new Point(100, 200), new Scalar(0, 255, 0), -1);
	Core.rectangle(img, new Point(100, 0), new Point(200, 200), new Scalar(0, 0, 255), -1);
	
	Mat clusters = cluster(img, 2).get(0);
	
	ImgWindow.newWindow(img).setTitle("img");;		
	ImgWindow.newWindow(clusters).setTitle("clusters");
}
 
Example 12
Source File: Vision.java    From PowerUp-2018 with GNU General Public License v3.0 4 votes vote down vote up
public Vision(){
    Size camSize = new Size(VisionConstants.CAM_WIDTH, VisionConstants.CAM_HEIGHT);
    failImage = Mat.zeros(camSize, 0);
}
 
Example 13
Source File: BallDetector.java    From opencv-fun with GNU Affero General Public License v3.0 4 votes vote down vote up
private void extractCircles (Mat mask, List<Circle> balls, List<BallCluster> ballClusters, List<MatOfPoint> contours) {
	// clear input
	balls.clear();
	ballClusters.clear();
	contours.clear();
	
	// find the contours
	Imgproc.findContours(mask.clone(), contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
	
	// iterate through the contours, find single balls and clusters of balls touching each other
	double minArea = Math.PI * (calib.getBallRadius() * 0.9f) * (calib.getBallRadius() * 0.9f); // minimal ball area
	double maxArea = Math.PI * (calib.getBallRadius() * 1.1f) * (calib.getBallRadius() * 1.1f); // maximal ball area

	for (int i = 0; i < contours.size(); i++) {
		double area = Imgproc.contourArea(contours.get(i));
		if (area > minArea) {				
			if (area < maxArea) {
				// we found a ball
				float[] radius = new float[1];
				Point center = new Point();
				Imgproc.minEnclosingCircle(new MatOfPoint2f(contours.get(i).toArray()), center, radius);
				balls.add(new Circle(center.x, center.y, calib.getBallRadius()));
			} else {
				// we found a cluster of balls
				int numBalls = (int)(area / (Math.PI * calib.getBallRadius() * calib.getBallRadius() * 0.9));
				
				// draw the contours to a bit mask
				Mat hough = Mat.zeros(mask.size(), CvType.CV_8U);
				Imgproc.drawContours(hough, contours, i, new Scalar(255, 255, 255), -2);
				
				// detect hough circles, try different params until we hit the number of balls
				Mat houghCircles = new Mat();
				int hit = 0;
				for(int j = 8; j < 20; j++) {
					Imgproc.HoughCircles(hough, houghCircles, Imgproc.CV_HOUGH_GRADIENT, 2, calib.getBallRadius() * 0.9 * 2, 255, j, (int)(calib.getBallRadius() * 0.9), (int)(calib.getBallRadius() * 1.1));
					if(houghCircles.cols() <= numBalls) {
						hit++;
						if(hit == 4) break;
					}
				}
				
				
				List<Circle> estimatedCircles = new ArrayList<Circle>();
				for(int j = 0; j < houghCircles.cols(); j++) {
					double[] circle = houghCircles.get(0, j);
					if(circle != null) {
						estimatedCircles.add(new Circle(circle[0], circle[1], calib.getBallRadius()));
					}
				}
				
				ballClusters.add(new BallCluster(contours.get(i), numBalls, estimatedCircles));
			}
		}
	}
}