Java Code Examples for org.opencv.core.Mat#put()

The following examples show how to use org.opencv.core.Mat#put() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Utils.java    From VIA-AI with MIT License 6 votes vote down vote up
public static Mat loadResource(Context context, int resourceId, int flags) throws IOException
{
    InputStream is = context.getResources().openRawResource(resourceId);
    ByteArrayOutputStream os = new ByteArrayOutputStream(is.available());

    byte[] buffer = new byte[4096];
    int bytesRead;
    while ((bytesRead = is.read(buffer)) != -1) {
        os.write(buffer, 0, bytesRead);
    }
    is.close();

    Mat encoded = new Mat(1, os.size(), CvType.CV_8U);
    encoded.put(0, 0, os.toByteArray());
    os.close();

    Mat decoded = Imgcodecs.imdecode(encoded, flags);
    encoded.release();

    return decoded;
}
 
Example 2
Source File: MainActivity.java    From MOAAP with MIT License 6 votes vote down vote up
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent imageReturnedIntent) {
    //Put it there, just in case:)
    super.onActivityResult(requestCode, resultCode, imageReturnedIntent);

    switch(requestCode) {
        case SELECT_PHOTO:
            if(resultCode == RESULT_OK && read_external_storage_granted){
                try {
                    final Uri imageUri = imageReturnedIntent.getData();
                    final InputStream imageStream = getContentResolver().openInputStream(imageUri);
                    final Bitmap selectedImage = BitmapFactory.decodeStream(imageStream);
                    src = new Mat(selectedImage.getHeight(), selectedImage.getWidth(), CvType.CV_8UC4);
                    Utils.bitmapToMat(selectedImage, src);
                    src_gray = new Mat(selectedImage.getHeight(), selectedImage.getWidth(), CvType.CV_8UC1);
                    switch (ACTION_MODE) {
                        case HomeActivity.GAUSSIAN_BLUR:
                            Imgproc.GaussianBlur(src, src, new Size(9, 9), 0);
                            break;
                        case HomeActivity.MEAN_BLUR:
                            Imgproc.blur(src, src, new Size(9, 9));
                            break;
                        case HomeActivity.MEDIAN_BLUR:
                            Imgproc.medianBlur(src, src, 9);
                            break;
                        case HomeActivity.SHARPEN:
                            Mat kernel = new Mat(3, 3, CvType.CV_16SC1);
                            //int[] values = {0, -1, 0, -1, 5, -1, 0, -1, 0};
                            Log.d("imageType", CvType.typeToString(src.type()) + "");
                            kernel.put(0, 0, 0, -1, 0, -1, 5, -1, 0, -1, 0);
                            Imgproc.filter2D(src, src, src_gray.depth(), kernel);
                            break;
                        case HomeActivity.DILATE:
                            Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
                            Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
                            Mat kernelDilate = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3));
                            Imgproc.dilate(src_gray, src_gray, kernelDilate);
                            Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
                            break;
                        case HomeActivity.ERODE:
                            Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
                            Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
                            Mat kernelErode = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5));
                            Imgproc.erode(src_gray, src_gray, kernelErode);
                            Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
                            break;
                        case HomeActivity.THRESHOLD:
                            Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
                            Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
                            Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
                            break;
                        case HomeActivity.ADAPTIVE_THRESHOLD:
                            Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
                            Imgproc.adaptiveThreshold(src_gray, src_gray, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, 3, 0);
                            Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
                            break;
                    }
                    Bitmap processedImage = Bitmap.createBitmap(src.cols(), src.rows(), Bitmap.Config.ARGB_8888);
                    Log.i("imageType", CvType.typeToString(src.type()) + "");
                    Utils.matToBitmap(src, processedImage);
                    ivImage.setImageBitmap(selectedImage);
                    ivImageProcessed.setImageBitmap(processedImage);
                    Log.i("process", "process done");
                } catch (FileNotFoundException e) {
                    e.printStackTrace();
                }
            }
            break;
    }
}
 
Example 3
Source File: Utils.java    From AndroidDocumentScanner with MIT License 6 votes vote down vote up
public static Mat loadResource(Context context, int resourceId, int flags) throws IOException
{
    InputStream is = context.getResources().openRawResource(resourceId);
    ByteArrayOutputStream os = new ByteArrayOutputStream(is.available());

    byte[] buffer = new byte[4096];
    int bytesRead;
    while ((bytesRead = is.read(buffer)) != -1) {
        os.write(buffer, 0, bytesRead);
    }
    is.close();

    Mat encoded = new Mat(1, os.size(), CvType.CV_8U);
    encoded.put(0, 0, os.toByteArray());
    os.close();

    Mat decoded = Imgcodecs.imdecode(encoded, flags);
    encoded.release();

    return decoded;
}
 
Example 4
Source File: CVProcessor.java    From CVScanner with GNU General Public License v3.0 6 votes vote down vote up
/**
 *
 * @param src - actual image
 * @param pts - points scaled up with respect to actual image
 * @return
 */
public static Mat fourPointTransform( Mat src , Point[] pts ) {
    Point tl = pts[0];
    Point tr = pts[1];
    Point br = pts[2];
    Point bl = pts[3];

    double widthA = Math.sqrt(Math.pow(br.x - bl.x, 2) + Math.pow(br.y - bl.y, 2));
    double widthB = Math.sqrt(Math.pow(tr.x - tl.x, 2) + Math.pow(tr.y - tl.y, 2));

    double dw = Math.max(widthA, widthB);
    int maxWidth = Double.valueOf(dw).intValue();


    double heightA = Math.sqrt(Math.pow(tr.x - br.x, 2) + Math.pow(tr.y - br.y, 2));
    double heightB = Math.sqrt(Math.pow(tl.x - bl.x, 2) + Math.pow(tl.y - bl.y, 2));

    double dh = Math.max(heightA, heightB);
    int maxHeight = Double.valueOf(dh).intValue();

    Mat doc = new Mat(maxHeight, maxWidth, CvType.CV_8UC4);

    Mat src_mat = new Mat(4, 1, CvType.CV_32FC2);
    Mat dst_mat = new Mat(4, 1, CvType.CV_32FC2);

    src_mat.put(0, 0, tl.x, tl.y, tr.x, tr.y, br.x, br.y, bl.x, bl.y);
    dst_mat.put(0, 0, 0.0, 0.0, dw, 0.0, dw, dh, 0.0, dh);

    Mat m = Imgproc.getPerspectiveTransform(src_mat, dst_mat);

    Imgproc.warpPerspective(src, doc, m, doc.size());

    return doc;
}
 
Example 5
Source File: Utils.java    From sudokufx with Apache License 2.0 6 votes vote down vote up
public static Mat loadResource(Context context, int resourceId, int flags) throws IOException
{
    InputStream is = context.getResources().openRawResource(resourceId);
    ByteArrayOutputStream os = new ByteArrayOutputStream(is.available());

    byte[] buffer = new byte[4096];
    int bytesRead;
    while ((bytesRead = is.read(buffer)) != -1) {
        os.write(buffer, 0, bytesRead);
    }
    is.close();

    Mat encoded = new Mat(1, os.size(), CvType.CV_8U);
    encoded.put(0, 0, os.toByteArray());
    os.close();

    Mat decoded = Imgcodecs.imdecode(encoded, flags);
    encoded.release();

    return decoded;
}
 
Example 6
Source File: Utils.java    From Camdroid with Apache License 2.0 6 votes vote down vote up
public static Mat loadResource(Context context, int resourceId, int flags) throws IOException
{
    InputStream is = context.getResources().openRawResource(resourceId);
    ByteArrayOutputStream os = new ByteArrayOutputStream(is.available());

    byte[] buffer = new byte[4096];
    int bytesRead;
    while ((bytesRead = is.read(buffer)) != -1) {
        os.write(buffer, 0, bytesRead);
    }
    is.close();

    Mat encoded = new Mat(1, os.size(), CvType.CV_8U);
    encoded.put(0, 0, os.toByteArray());
    os.close();

    Mat decoded = Imgcodecs.imdecode(encoded, flags);
    encoded.release();

    return decoded;
}
 
Example 7
Source File: Utils.java    From FaceDetectDemo with Apache License 2.0 6 votes vote down vote up
public static Mat loadResource(Context context, int resourceId, int flags) throws IOException
{
    InputStream is = context.getResources().openRawResource(resourceId);
    ByteArrayOutputStream os = new ByteArrayOutputStream(is.available());

    byte[] buffer = new byte[4096];
    int bytesRead;
    while ((bytesRead = is.read(buffer)) != -1) {
        os.write(buffer, 0, bytesRead);
    }
    is.close();

    Mat encoded = new Mat(1, os.size(), CvType.CV_8U);
    encoded.put(0, 0, os.toByteArray());
    os.close();

    Mat decoded = Imgcodecs.imdecode(encoded, flags);
    encoded.release();

    return decoded;
}
 
Example 8
Source File: Utils.java    From real_time_circle_detection_android with MIT License 6 votes vote down vote up
public static Mat loadResource(Context context, int resourceId, int flags) throws IOException
{
    InputStream is = context.getResources().openRawResource(resourceId);
    ByteArrayOutputStream os = new ByteArrayOutputStream(is.available());

    byte[] buffer = new byte[4096];
    int bytesRead;
    while ((bytesRead = is.read(buffer)) != -1) {
        os.write(buffer, 0, bytesRead);
    }
    is.close();

    Mat encoded = new Mat(1, os.size(), CvType.CV_8U);
    encoded.put(0, 0, os.toByteArray());
    os.close();

    Mat decoded = Imgcodecs.imdecode(encoded, flags);
    encoded.release();

    return decoded;
}
 
Example 9
Source File: Utils.java    From OpenCvFaceDetect with Apache License 2.0 6 votes vote down vote up
public static Mat loadResource(Context context, int resourceId, int flags) throws IOException
{
    InputStream is = context.getResources().openRawResource(resourceId);
    ByteArrayOutputStream os = new ByteArrayOutputStream(is.available());

    byte[] buffer = new byte[4096];
    int bytesRead;
    while ((bytesRead = is.read(buffer)) != -1) {
        os.write(buffer, 0, bytesRead);
    }
    is.close();

    Mat encoded = new Mat(1, os.size(), CvType.CV_8U);
    encoded.put(0, 0, os.toByteArray());
    os.close();

    Mat decoded = Imgcodecs.imdecode(encoded, flags);
    encoded.release();

    return decoded;
}
 
Example 10
Source File: WeightCalculate.java    From ImageEnhanceViaFusion with MIT License 6 votes vote down vote up
public static Mat LocalContrast(Mat img) {
	double[] h = { 1.0 / 16.0, 4.0 / 16.0, 6.0 / 16.0, 4.0 / 16.0, 1.0 / 16.0 };
	Mat mask = new Mat(h.length, h.length, img.type());
	for (int i = 0; i < h.length; i++) {
		for (int j = 0; j < h.length; j++) {
			mask.put(i, j, h[i] * h[j]);
		}
	}
	Mat localContrast = new Mat();
	Imgproc.filter2D(img, localContrast, img.depth(), mask);
	for (int i = 0; i < localContrast.rows(); i++) {
		for (int j = 0; j < localContrast.cols(); j++) {
			if (localContrast.get(i, j)[0] > Math.PI / 2.75)
				localContrast.put(i, j, Math.PI / 2.75);
		}
	}
	Core.subtract(img, localContrast, localContrast);
	return localContrast.mul(localContrast);
}
 
Example 11
Source File: UtilTest.java    From OpenTLDAndroid with Apache License 2.0 5 votes vote down vote up
public void testGetVar(){
	final Mat img = new Mat(5, 5, CvType.CV_32SC1);
	img.put(0, 0, IISUM);
	
	final Mat img2 = new Mat(5, 5, CvType.CV_64F);
	img2.put(0, 0, IISQSUM);
	
	final BoundingBox test_box = new BoundingBox(1, 1, 3, 3, 1, 0);
	
	assertEquals("Wrong Var calculation", -417.55555, Util.getVar(test_box, img, img2), 0.00001);
}
 
Example 12
Source File: KNearestNeighbor.java    From Android-Face-Recognition-with-Deep-Learning-Library with Apache License 2.0 5 votes vote down vote up
@Override
public void loadFromFile() {
    MatName mtrainingList = new MatName("TrainingList", trainingList);

    List<MatName> listMat = new ArrayList<MatName>();
    listMat.add(mtrainingList);

    labelList = fh.loadIntegerList(fh.createLabelFile(fh.KNN_PATH, "train"));
    labelMap = fh.getLabelMapFromFile(fh.KNN_PATH);
    trainingList = fh.getMatListFromXml(listMat, fh.KNN_PATH, trainingFile).get(0).getMat();

    labels = new Mat(labelList.size(), 1, CvType.CV_8UC1);
    for (int i=0; i<labelList.size(); i++) {
        Integer label = labelList.get(i);
        // Fill shorter labels with 0s
        labels.put(i, 0, label);
    }

    labels.convertTo(labels, CvType.CV_32F);
    PreferencesHelper preferencesHelper = new PreferencesHelper(context);
    k = preferencesHelper.getK();

    knn = KNearest.create();
    knn.setIsClassifier(true);
    knn.train(trainingList, 0,labels);

}
 
Example 13
Source File: UtilTest.java    From OpenTLDAndroid with Apache License 2.0 5 votes vote down vote up
public void testToByteArray(){
	final Mat greyMat = new Mat();
	Imgproc.cvtColor(getTestMat(), greyMat, Imgproc.COLOR_RGB2GRAY);
	// make sure we have some extreme values
	greyMat.put(0, 1, new byte[]{-128});
	greyMat.put(0, 2, new byte[]{-0});
	greyMat.put(0, 3, new byte[]{127});
	final byte[] array = Util.getByteArray(greyMat);
	
	assertEquals(2, array[0]);
	assertEquals(-128, array[1]);
	assertEquals(0, array[2]);
	assertEquals(127, array[3]);
	assertEquals(8, array[500]);
	assertEquals(9, array[1000]);
	assertEquals(9, array[1500]);
	assertEquals(17, array[2000]);
	assertEquals(18, array[3000]);
	assertEquals(3, array[4000]);
	assertEquals(8, array[5000]);
	assertEquals(9, array[6000]);
	assertEquals(3, array[7000]);
	assertEquals(5, array[8000]);
	assertEquals(15, array[9000]);
	assertEquals(9, array[12000]);
	assertEquals(4, array[15000]);
	
	final int cols = greyMat.cols();
	for(int row=0; row<greyMat.rows(); row++){
		for(int col=0; col<cols; col++){
			assertEquals(Util.getByte(row, col, greyMat), array[row * cols + col]);
		}
	}
}
 
Example 14
Source File: ImgprocessUtils.java    From classchecks with Apache License 2.0 5 votes vote down vote up
/**
 * 其主要思路为:
	1、求取源图I的平均灰度,并记录rows和cols;
	2、按照一定大小,分为N*M个方块,求出每块的平均值,得到子块的亮度矩阵D;
	3、用矩阵D的每个元素减去源图的平均灰度,得到子块的亮度差值矩阵E;
	4、用双立方差值法,将矩阵E差值成与源图一样大小的亮度分布矩阵R;
	5、得到矫正后的图像result=I-R;
* @Title: unevenLightCompensate 
* @Description: 光线补偿 
* @param image
* @param blockSize
* void 
* @throws
 */
public static void unevenLightCompensate(Mat image, int blockSize) {
	if(image.channels() == 3) {
		Imgproc.cvtColor(image, image, 7);
	}
	double average = Core.mean(image).val[0];
	Scalar scalar = new Scalar(average);
	int rowsNew = (int) Math.ceil((double)image.rows() / (double)blockSize);
	int colsNew = (int) Math.ceil((double)image.cols() / (double)blockSize);
	Mat blockImage = new Mat();
	blockImage = Mat.zeros(rowsNew, colsNew, CvType.CV_32FC1);
	for(int i = 0; i < rowsNew; i ++) {
		for(int j = 0; j < colsNew; j ++) {
			int rowmin = i * blockSize;
			int rowmax = (i + 1) * blockSize;
			if(rowmax > image.rows()) rowmax = image.rows();
			int colmin = j * blockSize;
			int colmax = (j +1) * blockSize;
			if(colmax > image.cols()) colmax = image.cols();
			Range rangeRow = new Range(rowmin, rowmax);
			Range rangeCol = new Range(colmin, colmax);
			Mat imageROI = new Mat(image, rangeRow, rangeCol);
			double temaver = Core.mean(imageROI).val[0];
			blockImage.put(i, j, temaver);
		}
	}
	
	Core.subtract(blockImage, scalar, blockImage);
	Mat blockImage2 = new Mat();
	int INTER_CUBIC = 2;
	Imgproc.resize(blockImage, blockImage2, image.size(), 0, 0, INTER_CUBIC);
	Mat image2 = new Mat();
	image.convertTo(image2, CvType.CV_32FC1);
	Mat dst = new Mat();
	Core.subtract(image2, blockImage2, dst);
	dst.convertTo(image, CvType.CV_8UC1);
}
 
Example 15
Source File: Tld.java    From OpenTLDAndroid with Apache License 2.0 4 votes vote down vote up
/**
 * @param boxClusterMap INPUT / OUTPUT
 * @return Total clusters count
 */
private int clusterBB(){
	final int size = _boxClusterMap.size();
	// need the data in arrays
	final DetectionStruct[] dbb = _boxClusterMap.keySet().toArray(new DetectionStruct[size]);
	final int[] indexes = new int[size];
	for(int i = 0; i < size; i++){
		indexes[i] = _boxClusterMap.get(dbb[i]);
	}
	
	// 1. Build proximity matrix
	final float[] data = new float[size * size];
	for(int i = 0; i < size; i++){
		for(int j = 0; j < size; j++){
			final float d = 1 - dbb[i].detectedBB.calcOverlap(dbb[j].detectedBB);
			data[i * size + j] = d;
			data[j * size + i] = d;
		}
	}
	Mat D = new Mat(size, size, CvType.CV_32F);
	D.put(0, 0, data);
	
	// 2. Initialise disjoint clustering
	final int[] belongs = new int[size];
	int m = size;
	for(int i = 0; i < size; i++){
		belongs[i] = i;
	}
	

	for(int it = 0; it < size - 1; it++){
		//3. Find nearest neighbour
		float min_d = 1;
		int node_a = -1, node_b = -1;
		for (int i = 0; i < D.rows(); i++){
			for (int j = i + 1 ;j < D.cols(); j++){
				if (data[i * size + j] < min_d && belongs[i] != belongs[j]){
					min_d = data[i * size + j];
					node_a = i;
					node_b = j;
				}
			}
		}
		
		// are we done ?
		if (min_d > 0.5){
			int max_idx =0;
			for (int j = 0; j < size; j++){
				boolean visited = false;
				for(int i = 0; i < 2 * size - 1; i++){
					if (belongs[j] == i){
						// populate the correct / aggregated cluster
						indexes[j] = max_idx;
						visited = true;
					}
				}
				
				if (visited){
					max_idx++;
				}
			}
			
			// update the main map before going back
			for(int i = 0; i < size; i++){
				_boxClusterMap.put(dbb[i], indexes[i]);
			}
			return max_idx;
		}

		//4. Merge clusters and assign level
		if(node_a >= 0 && node_b >= 0){  // this should always BE true, otherwise we would have returned
			for (int k = 0; k < size; k++){
				if (belongs[k] == belongs[node_a] || belongs[k] == belongs[node_b])
					belongs[k] = m;
			}
			m++;
		}
	}
	
	// there seem to be only 1 cluster
	for(int i = 0; i < size; i++){
		_boxClusterMap.put(dbb[i], 0);
	}
	return 1;
}
 
Example 16
Source File: ImageProcessor.java    From video-stream-classification with Apache License 2.0 4 votes vote down vote up
private static Mat getMat(VideoEventData ed) throws Exception{
	 Mat mat = new Mat(ed.getRows(), ed.getCols(), ed.getType());
	 mat.put(0, 0, Base64.getDecoder().decode(ed.getData()));   
	 return mat;
}
 
Example 17
Source File: PS3EyeCamera.java    From ShootOFF with GNU General Public License v3.0 4 votes vote down vote up
public Mat translateCameraArrayToMat(byte[] imageBuffer) {
	final Mat mat = new Mat(getViewHeight(), getViewWidth(), CvType.CV_8UC3);

	mat.put(0, 0, imageBuffer);
	return mat;
}
 
Example 18
Source File: TrainFaces.java    From classchecks with Apache License 2.0 4 votes vote down vote up
/**
 * 以CSV文件提供的人脸图片训练人脸模型并保存模型文件(以XML的形式)
 * @param CSVFilePath csv文件存储的绝对路径
 * @param trainModellSavePath 训练的模型文件的保存路径
 */
public static boolean trainAndSaveModel(String CSVFilePath, String trainModelSavePath, String facerecAlgorithm) {
	
	if(!new File(CSVFilePath).isFile()) {
		return false;
	}
	// 判断要保存的人脸模型文件是不是一个绝对路径名
	File modelFile = new File(trainModelSavePath);
	if(!modelFile.isAbsolute()) {
		System.out.println("TrainFaces->trainAndSaveModel->trainModelSavePath不是一个绝对路径名");
		return false;
	}
	// 如果trainModelSavePath的上一级文件夹不存在则创建
	if(!modelFile.getParentFile().exists()) {
		modelFile.getParentFile().mkdirs();
	}
	
	List<Mat> matLists = new ArrayList<Mat>(); // 待训练的人脸图片集合
	List<Integer> lableLists = new ArrayList<Integer>(); // 人脸图片对应的标签
	CSVFileUtils.CSVRead(CSVFilePath, matLists, lableLists);
	
	// opencv 在训练人脸模型时需要确保人脸与标签一一对应
	if(matLists.size() == lableLists.size()) {
		Mat labels = new Mat(lableLists.size(), 1, CvType.CV_32SC1, new Scalar(0));
		for(int i = 0; i < lableLists.size(); i ++) {
			labels.put(i, 0, new int[]{lableLists.get(i)});
		}
		BasicFaceRecognizer faceRecognizer = null;
		if("FaceRecognizer.Eigenfaces".equals(facerecAlgorithm)) {
			faceRecognizer = Face.createEigenFaceRecognizer();
		} else if("FaceRecognizer.Fisherfaces".equals(facerecAlgorithm)) {
			faceRecognizer = Face.createFisherFaceRecognizer();
		}
		
		faceRecognizer.train(matLists, labels);
		faceRecognizer.save(trainModelSavePath);
		return true;
	}
	
	return false;
}
 
Example 19
Source File: ClockinAsStudentServiceImpl.java    From classchecks with Apache License 2.0 4 votes vote down vote up
@Override
public BasicEntityVo<?> clockin(String jwAccount, String loginAccount, Double lng, Double lat,
		CommonsMultipartFile file) {
	
	LOG.info("学生端考勤Service");
	LOG.info("ContentType:" + file.getContentType() 
		+ " Filename:" + file.getOriginalFilename() + " Size:" + file.getSize());
	LOG.info("学生上传经纬度:lng=" + lng + " lat:" + lat);
	Integer teacherUId = clockinAsStudentMapper.getTeacherIDByStudentClock(jwAccount, 500);
	
	LOG.info("教师UID:" + teacherUId);
	SchoolCourseClockModel sccm = basicService.getSchoolCourseClockModelNow();
	PointVo gpsPoint = clockinAsStudentMapper.getGPSByTeacherID(teacherUId, sccm);
	LOG.info("教师最新考勤记录的GPS坐标:" + gpsPoint);
	
	double stuDistanceTea = PositionUtil.distance(gpsPoint.getLng(), gpsPoint.getLat(), lng, lat);
	
	LOG.info("学生与教师的距离:" + stuDistanceTea);
	
	if(stuDistanceTea > 550) {
		return new BasicEntityVo<>(StudentClockInBusinessCode.GPS_DISTANCE_GT_50[0], StudentClockInBusinessCode.GPS_DISTANCE_GT_50[1]);
	}
	
	Date date = new Date();
	// "yyyy-MM-dd"字符串
	String dtSimpleDate = DateUtil.dtSimpleFormat(date);
	// "yyyyMMddHHmmss"日期字符串
	String longTime = DateUtil.longDate(date);
	// 保存文件路径的每个文件夹名称数组,教师上传的图片以每天的日期作为文件夹
	String [] pathKey = {ImgStoragePath.STUDENT_CLOCK_IN_IMG_PATH, loginAccount, dtSimpleDate};
	// 保存图片的文件夹路径
	String dirSavePath = FileUtils.buildFilePath(pathKey);
	
	boolean isSaved = fileSave(file, dirSavePath, longTime);
	if(isSaved == false) { // 上传的图片保存失败
		return new BasicEntityVo<>(StudentClockInBusinessCode.BUSSINESS_IMAGE_SAVE_FAILED[0], StudentClockInBusinessCode.BUSSINESS_IMAGE_SAVE_FAILED[1]);
	}
	
	String absolutePath = FileUtils.buildFilePath(dirSavePath, longTime+".jpg");
	Mat imgSrc = Imgcodecs.imread(absolutePath, Imgcodecs.CV_LOAD_IMAGE_GRAYSCALE); // 加载上传的图片
	
	Mat procMat = PreProcessFace.smallProcessedFace(imgSrc);
	
	if(null == procMat) {
		return new BasicEntityVo<>(StudentClockInBusinessCode.BUSSINESS_NO_DETECT_FACE[0], StudentClockInBusinessCode.BUSSINESS_NO_DETECT_FACE[1]);
	}
	
	String collecteFaceRoute = FileUtils.buildFilePath(ImgStoragePath.PROC_FACE_IMG_SAVE_PATH, loginAccount);
	
	List<Mat> collectedMats = new ArrayList<Mat>();
	List<Integer> faceLabels = new ArrayList<Integer>();
	CSVFileUtils.loadImage(collecteFaceRoute, collectedMats, faceLabels);
	
	// 将人脸标签放入一个Mat对象,OpenCV提供的人脸识别算法中接收一个存有人脸标签的Mat
	Mat labelsMat = new Mat(faceLabels.size(), 1, CvType.CV_32SC1, new Scalar(0));
	for(int i = 0; i < faceLabels.size(); i ++) {
		labelsMat.put(i, 0, new int[]{faceLabels.get(i)});
	}
	
	// 训练人脸模型,这里使用的是特征脸算法
	BasicFaceRecognizer faceModel = Recognition.learnCollectedFaces(collectedMats, labelsMat);
	
	Mat reconstructedFace = Recognition.reconstructFace(faceModel, procMat);
	double similarity = Recognition.getSimilarity(reconstructedFace, procMat);
	LOG.info("similarity = " + similarity);
	LOG.info("predict_label: "+faceModel.predict_label(procMat));
	
	if(similarity > 0.13) {
		return new BasicEntityVo<>(StudentClockInBusinessCode.FACE_NON_EXISTENT[0], 
				StudentClockInBusinessCode.FACE_NON_EXISTENT[1]);
	}
	
	// 学生自己考勤成功后更新考勤记录
	clockinAsStudentMapper.updateStudentClockinRecord(jwAccount);
	
	StudentClockinVo vo = new StudentClockinVo();
	
	vo.setStuName(clockinAsStudentMapper.findStudentName(jwAccount));
	vo.setCurTime(DateUtil.hmsFormat(new Date()));
	// TODO 更新考勤记录
	return new BasicEntityVo<>(StudentClockInBusinessCode.BUSINESS_SUCCESS[0], 
			StudentClockInBusinessCode.BUSINESS_SUCCESS[1], vo);
}
 
Example 20
Source File: DarkChannelPriorDehaze.java    From OptimizedImageEnhance with MIT License 4 votes vote down vote up
public static Mat enhance(Mat image, double krnlRatio, double minAtmosLight, double eps) {
	image.convertTo(image, CvType.CV_32F);
	// extract each color channel
	List<Mat> rgb = new ArrayList<>();
	Core.split(image, rgb);
	Mat rChannel = rgb.get(0);
	Mat gChannel = rgb.get(1);
	Mat bChannel = rgb.get(2);
	int rows = rChannel.rows();
	int cols = rChannel.cols();
	// derive the dark channel from original image
	Mat dc = rChannel.clone();
	for (int i = 0; i < image.rows(); i++) {
		for (int j = 0; j < image.cols(); j++) {
			double min = Math.min(rChannel.get(i, j)[0], Math.min(gChannel.get(i, j)[0], bChannel.get(i, j)[0]));
			dc.put(i, j, min);
		}
	}
	// minimum filter
	int krnlSz = Double.valueOf(Math.max(Math.max(rows * krnlRatio, cols * krnlRatio), 3.0)).intValue();
	Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(krnlSz, krnlSz), new Point(-1, -1));
	Imgproc.erode(dc, dc, kernel);
	// get coarse transmission map
	Mat t = dc.clone();
	Core.subtract(t, new Scalar(255.0), t);
	Core.multiply(t, new Scalar(-1.0), t);
	Core.divide(t, new Scalar(255.0), t);
	// obtain gray scale image
	Mat gray = new Mat();
	Imgproc.cvtColor(image, gray, Imgproc.COLOR_RGB2GRAY);
	Core.divide(gray, new Scalar(255.0), gray);
	// refine transmission map
	int r = krnlSz * 4;
	t = Filters.GuidedImageFilter(gray, t, r, eps);
	// get minimum atmospheric light
	minAtmosLight = Math.min(minAtmosLight, Core.minMaxLoc(dc).maxVal);
	// dehaze each color channel
	rChannel = dehaze(rChannel, t, minAtmosLight);
	gChannel = dehaze(gChannel, t, minAtmosLight);
	bChannel = dehaze(bChannel, t, minAtmosLight);
	// merge three color channels to a image
	Mat outval = new Mat();
	Core.merge(new ArrayList<>(Arrays.asList(rChannel, gChannel, bChannel)), outval);
	outval.convertTo(outval, CvType.CV_8UC1);
	return outval;
}