Java Code Examples for org.opencv.imgproc.Imgproc#findContours()

The following examples show how to use org.opencv.imgproc.Imgproc#findContours() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MainActivity.java    From MOAAP with MIT License 8 votes vote down vote up
void Contours() {
    Mat grayMat = new Mat();
    Mat cannyEdges = new Mat();
    Mat hierarchy = new Mat();

    List<MatOfPoint> contourList = new ArrayList<MatOfPoint>(); //A list to store all the contours

    //Converting the image to grayscale
    Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);

    Imgproc.Canny(originalMat, cannyEdges, 10, 100);

    //finding contours
    Imgproc.findContours(cannyEdges, contourList, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);

    //Drawing contours on a new image
    Mat contours = new Mat();
    contours.create(cannyEdges.rows(), cannyEdges.cols(), CvType.CV_8UC3);
    Random r = new Random();
    for (int i = 0; i < contourList.size(); i++) {
        Imgproc.drawContours(contours, contourList, i, new Scalar(r.nextInt(255), r.nextInt(255), r.nextInt(255)), -1);
    }
    //Converting Mat back to Bitmap
    Utils.matToBitmap(contours, currentBitmap);
    imageView.setImageBitmap(currentBitmap);
}
 
Example 2
Source File: VideoMotionDetector.java    From video-stream-analytics with Apache License 2.0 6 votes vote down vote up
private static ArrayList<Rect> getContourArea(Mat mat) {
	Mat hierarchy = new Mat();
	Mat image = mat.clone();
	List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
	Imgproc.findContours(image, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
	Rect rect = null;
	double maxArea = 300;
	ArrayList<Rect> arr = new ArrayList<Rect>();
	for (int i = 0; i < contours.size(); i++) {
		Mat contour = contours.get(i);
		double contourArea = Imgproc.contourArea(contour);
		if (contourArea > maxArea) {
			rect = Imgproc.boundingRect(contours.get(i));
			arr.add(rect);
		}
	}
	return arr;
}
 
Example 3
Source File: MainActivity.java    From Android_OCV_Movement_Detection with MIT License 6 votes vote down vote up
public Mat onCameraFrame(CvCameraViewFrame inputFrame) 
{   
	contours.clear();
	//gray frame because it requires less resource to process
	mGray = inputFrame.gray(); 
	
	//this function converts the gray frame into the correct RGB format for the BackgroundSubtractorMOG apply function
	Imgproc.cvtColor(mGray, mRgb, Imgproc.COLOR_GRAY2RGB); 
	
	//apply detects objects moving and produces a foreground mask
	//the lRate updates dynamically dependent upon seekbar changes
	sub.apply(mRgb, mFGMask, lRate); 

	//erode and dilate are used  to remove noise from the foreground mask
	Imgproc.erode(mFGMask, mFGMask, new Mat());
	Imgproc.dilate(mFGMask, mFGMask, new Mat());
	
	//drawing contours around the objects by first called findContours and then calling drawContours
	//RETR_EXTERNAL retrieves only external contours
	//CHAIN_APPROX_NONE detects all pixels for each contour
	Imgproc.findContours(mFGMask, contours, new Mat(), Imgproc.RETR_EXTERNAL , Imgproc.CHAIN_APPROX_NONE);
	
	//draws all the contours in red with thickness of 2
	Imgproc.drawContours(mRgb, contours, -1, new Scalar(255, 0, 0), 2);
	
	return mRgb;
}
 
Example 4
Source File: Finder.java    From SikuliX1 with MIT License 5 votes vote down vote up
public static List<Region> findChanges(FindInput2 findInput) {
  findInput.setAttributes();
  Mat previousGray = SXOpenCV.newMat();
  Mat nextGray = SXOpenCV.newMat();
  Mat mDiffAbs = SXOpenCV.newMat();
  Mat mDiffThresh = SXOpenCV.newMat();
  Imgproc.cvtColor(findInput.getBase(), previousGray, toGray);
  Imgproc.cvtColor(findInput.getTarget(), nextGray, toGray);
  Core.absdiff(previousGray, nextGray, mDiffAbs);
  Imgproc.threshold(mDiffAbs, mDiffThresh, PIXEL_DIFF_THRESHOLD, 0.0, Imgproc.THRESH_TOZERO);

  List<Region> rectangles = new ArrayList<>();
  if (Core.countNonZero(mDiffThresh) > IMAGE_DIFF_THRESHOLD) {
    Imgproc.threshold(mDiffAbs, mDiffAbs, PIXEL_DIFF_THRESHOLD, 255, Imgproc.THRESH_BINARY);
    Imgproc.dilate(mDiffAbs, mDiffAbs, SXOpenCV.newMat());
    Mat se = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5));
    Imgproc.morphologyEx(mDiffAbs, mDiffAbs, Imgproc.MORPH_CLOSE, se);
    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    Mat mHierarchy = SXOpenCV.newMat();
    Imgproc.findContours(mDiffAbs, contours, mHierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
    for (MatOfPoint contour : contours) {
      int x1 = 99999;
      int y1 = 99999;
      int x2 = 0;
      int y2 = 0;
      List<org.opencv.core.Point> points = contour.toList();
      for (Point point : points) {
        int x = (int) point.x;
        int y = (int) point.y;
        if (x < x1) x1 = x;
        if (x > x2) x2 = x;
        if (y < y1) y1 = y;
        if (y > y2) y2 = y;
      }
      Region rect = new Region(x1, y1, x2 - x1, y2 - y1);
      rectangles.add(rect);
    }
  }
  return rectangles;
}
 
Example 5
Source File: MovementDetectionProcessor.java    From Camdroid with Apache License 2.0 5 votes vote down vote up
protected void execute() {
    out = gray();

    Imgproc.equalizeHist(out, out);

    synchronized (mog) {
        mog.apply(out, this.mask, (double) (-10 + learning_rate) / 10);
    }

    Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_DILATE,
            new Size(3, 3));
    Imgproc.dilate(mask, mask, kernel);

    ArrayList<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    Imgproc.findContours(this.mask, contours, new Mat(),
            Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);

    double maxheight = object_max_size * this.in.height() / 100;
    double minheight = object_min_size * this.in.height() / 100;

    Iterator<MatOfPoint> each = contours.iterator();
    each = contours.iterator();
    while (each.hasNext()) {
        MatOfPoint contour = each.next();
        Rect rect = Imgproc.boundingRect(contour);
        if (rect.height > minheight && rect.height < maxheight) {
            Imgproc.rectangle(out, rect.tl(), rect.br(), new Scalar(255,
                    0, 0), 1);
        }
    }
}
 
Example 6
Source File: ColorBlobDetector.java    From OpenCV-AndroidSamples with MIT License 5 votes vote down vote up
public void process(Mat rgbaImage) {
    Imgproc.pyrDown(rgbaImage, mPyrDownMat);
    Imgproc.pyrDown(mPyrDownMat, mPyrDownMat);

    Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL);

    Core.inRange(mHsvMat, mLowerBound, mUpperBound, mMask);
    Imgproc.dilate(mMask, mDilatedMask, new Mat());

    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();

    Imgproc.findContours(mDilatedMask, contours, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);

    // Find max contour area
    double maxArea = 0;
    Iterator<MatOfPoint> each = contours.iterator();
    while (each.hasNext()) {
        MatOfPoint wrapper = each.next();
        double area = Imgproc.contourArea(wrapper);
        if (area > maxArea)
            maxArea = area;
    }

    // Filter contours by area and resize to fit the original image size
    mContours.clear();
    each = contours.iterator();
    while (each.hasNext()) {
        MatOfPoint contour = each.next();
        if (Imgproc.contourArea(contour) > mMinContourArea*maxArea) {
            Core.multiply(contour, new Scalar(4,4), contour);
            mContours.add(contour);
        }
    }
}
 
Example 7
Source File: Finder.java    From SikuliNG with MIT License 5 votes vote down vote up
public static List<Element> detectChanges(Mat base, Mat mChanged) {
  int PIXEL_DIFF_THRESHOLD = 3;
  int IMAGE_DIFF_THRESHOLD = 5;
  Mat mBaseGray = Element.getNewMat();
  Mat mChangedGray = Element.getNewMat();
  Mat mDiffAbs = Element.getNewMat();
  Mat mDiffTresh = Element.getNewMat();
  Mat mChanges = Element.getNewMat();
  List<Element> rectangles = new ArrayList<>();

  Imgproc.cvtColor(base, mBaseGray, toGray);
  Imgproc.cvtColor(mChanged, mChangedGray, toGray);
  Core.absdiff(mBaseGray, mChangedGray, mDiffAbs);
  Imgproc.threshold(mDiffAbs, mDiffTresh, PIXEL_DIFF_THRESHOLD, 0.0, Imgproc.THRESH_TOZERO);
  if (Core.countNonZero(mDiffTresh) > IMAGE_DIFF_THRESHOLD) {
    Imgproc.threshold(mDiffAbs, mDiffAbs, PIXEL_DIFF_THRESHOLD, 255, Imgproc.THRESH_BINARY);
    Imgproc.dilate(mDiffAbs, mDiffAbs, Element.getNewMat());
    Mat se = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5));
    Imgproc.morphologyEx(mDiffAbs, mDiffAbs, Imgproc.MORPH_CLOSE, se);

    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    Mat mHierarchy = Element.getNewMat();
    Imgproc.findContours(mDiffAbs, contours, mHierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
    rectangles = contoursToRectangle(contours);

    Core.subtract(mDiffAbs, mDiffAbs, mChanges);
    Imgproc.drawContours(mChanges, contours, -1, new Scalar(255));
    //logShow(mDiffAbs);
  }
  return rectangles;
}
 
Example 8
Source File: ColorBlobDetector.java    From hand_finger_recognition_android with MIT License 5 votes vote down vote up
public void process(Mat rgbaImage) {
    Imgproc.pyrDown(rgbaImage, mPyrDownMat);
	Imgproc.pyrDown(mPyrDownMat, mPyrDownMat);

    Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL);

    Core.inRange(mHsvMat, mLowerBound, mUpperBound, mMask);
    Imgproc.dilate(mMask, mDilatedMask, new Mat());

    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();

    Imgproc.findContours(mDilatedMask, contours, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);

    // Find max contour area
    double maxArea = 0;
    Iterator<MatOfPoint> each = contours.iterator();
    while (each.hasNext()) {
        MatOfPoint wrapper = each.next();
        double area = Imgproc.contourArea(wrapper);
        if (area > maxArea)
            maxArea = area;
    }

    // Filter contours by area and resize to fit the original image size
    mContours.clear();
    each = contours.iterator();
    while (each.hasNext()) {
        MatOfPoint contour = each.next();
        if (Imgproc.contourArea(contour) > mMinContourArea*maxArea) {
            Core.multiply(contour, new Scalar(4,4), contour);
            mContours.add(contour);
        }
    }
}
 
Example 9
Source File: ContoursUtils.java    From super-cloudops with Apache License 2.0 5 votes vote down vote up
/**
 * 寻找轮廓,并按照递增排序
 *
 * @param cannyMat
 * @return
 */
public static List<MatOfPoint> findContours(Mat cannyMat) {
	List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
	Mat hierarchy = new Mat();

	// 寻找轮廓
	Imgproc.findContours(cannyMat, contours, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE, new Point(0, 0));

	if (contours.size() <= 0) {
		// throw new RuntimeException("未找到图像轮廓");
	} else {
		// 对contours进行了排序,按递增顺序
		contours.sort(new Comparator<MatOfPoint>() {
			@Override
			public int compare(MatOfPoint o1, MatOfPoint o2) {
				MatOfPoint2f mat1 = new MatOfPoint2f(o1.toArray());
				RotatedRect rect1 = Imgproc.minAreaRect(mat1);
				Rect r1 = rect1.boundingRect();

				MatOfPoint2f mat2 = new MatOfPoint2f(o2.toArray());
				RotatedRect rect2 = Imgproc.minAreaRect(mat2);
				Rect r2 = rect2.boundingRect();

				return (int) (r1.area() - r2.area());
			}
		});

	}
	return contours;
}
 
Example 10
Source File: SXOpenCV.java    From SikuliX1 with MIT License 5 votes vote down vote up
public static List<Match> doFindChanges(Image original, Image changed) {
  List<Match> changes = new ArrayList<>();
  if (changed.isValid()) {
    int PIXEL_DIFF_THRESHOLD = 3;
    int IMAGE_DIFF_THRESHOLD = 5;
    Mat previousGray = SXOpenCV.newMat();
    Mat nextGray = SXOpenCV.newMat();
    Mat mDiffAbs = SXOpenCV.newMat();
    Mat mDiffTresh = SXOpenCV.newMat();

    Imgproc.cvtColor(original.getContent(), previousGray, toGray);
    Imgproc.cvtColor(changed.getContent(), nextGray, toGray);
    Core.absdiff(previousGray, nextGray, mDiffAbs);
    Imgproc.threshold(mDiffAbs, mDiffTresh, PIXEL_DIFF_THRESHOLD, 0.0, Imgproc.THRESH_TOZERO);

    if (Core.countNonZero(mDiffTresh) > IMAGE_DIFF_THRESHOLD) {
      Imgproc.threshold(mDiffAbs, mDiffAbs, PIXEL_DIFF_THRESHOLD, 255, Imgproc.THRESH_BINARY);
      Imgproc.dilate(mDiffAbs, mDiffAbs, SXOpenCV.newMat());
      Mat se = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5));
      Imgproc.morphologyEx(mDiffAbs, mDiffAbs, Imgproc.MORPH_CLOSE, se);

      List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
      Mat mHierarchy = SXOpenCV.newMat();
      Imgproc.findContours(mDiffAbs, contours, mHierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
      changes = contoursToRectangle(contours);
    }
  }
  return changes;
}
 
Example 11
Source File: StoneDetector.java    From DogeCV with GNU General Public License v3.0 4 votes vote down vote up
@Override
public Mat process(Mat input) {
    screenPositions.clear();
    foundRects.clear();
    
    input.copyTo(rawImage);
    input.copyTo(workingMat);
    input.copyTo(displayMat);
    input.copyTo(yellowMask);

    // Imgproc.GaussianBlur(workingMat,workingMat,new Size(5,5),0);
    filter.process(workingMat.clone(), yellowMask);

    List<MatOfPoint> contoursYellow = new ArrayList<>();
    Imgproc.findContours(yellowMask, contoursYellow, hierarchy, Imgproc.RETR_TREE, Imgproc.CHAIN_APPROX_SIMPLE);
    Imgproc.drawContours(displayMat,contoursYellow,-1,new Scalar(230,70,70),2);

    // Current result
    ArrayList<Rect> bestRects = new ArrayList<>();
    double bestDifference = Double.MAX_VALUE; // MAX_VALUE since less difference = better

    Collections.sort(contoursYellow, new Comparator<MatOfPoint>() {
        @Override
        public int compare(MatOfPoint matOfPoint, MatOfPoint t1) {
            return calculateScore(matOfPoint) > calculateScore(t1) ? 1 : 0;
        }
    });

    List<MatOfPoint> subList = contoursYellow;

    if (contoursYellow.size() > stonesToFind) {
        subList = contoursYellow.subList(0, stonesToFind);
    }

    for (MatOfPoint contour : subList) {
        Rect rect = Imgproc.boundingRect(contour);

        // Show chosen result
        Imgproc.rectangle(displayMat, rect.tl(), rect.br(), new Scalar(255, 0, 0), 4);
        Imgproc.putText(displayMat, "Chosen", rect.tl(), 0, 1, new Scalar(255, 255, 255));

        screenPositions.add(new Point(rect.x, rect.y));
        foundRects.add(rect);
    }

    if (foundRects.size() > 0) {
        found = true;
    }
    else {
        found = false;
    }

    switch (stageToRenderToViewport) {
        case THRESHOLD: {
            Imgproc.cvtColor(yellowMask, yellowMask, Imgproc.COLOR_GRAY2BGR);

            return yellowMask;
        }
        case RAW_IMAGE: {
            return rawImage;
        }
        default: {
            return displayMat;
        }
    }
}
 
Example 12
Source File: MainActivity.java    From effective_android_sample with Apache License 2.0 4 votes vote down vote up
/**
 * OpenCVで
 * @param bmpOrig
 */
private void extractObject(Bitmap bmpOrig) {

    // まずオリジナルのビットマップを表示
    mImageView1.setImageBitmap(bmpOrig);
    // 高さと幅を取得
    int height = bmpOrig.getHeight();
    int width = bmpOrig.getWidth();
    
    // OpenCVオブジェクトの用意
    Mat matOrig = new Mat(height,width,CvType.CV_8UC4); 
    // ビットマップをOpenCVオブジェクトに変換
    Utils.bitmapToMat(bmpOrig, matOrig);
    
    /**
     * グレースケールに変換
     */
    Mat matGray = new Mat(height,width,CvType.CV_8UC1);
    Imgproc.cvtColor(matOrig, matGray, Imgproc.COLOR_RGB2GRAY);
    // 表示
    Bitmap bmpGray = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(matGray, bmpGray);
    mImageView2.setImageBitmap(bmpGray);

    /**
     * グレースケール→二値化    
     */
    Mat matBlack = new Mat(height,width,CvType.CV_8UC1);
    // 二値化
    Imgproc.threshold(matGray, matBlack, sTH, 255, Imgproc.THRESH_BINARY);
    // 表示
    Bitmap bmpBlack = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(matBlack, bmpBlack);
    mImageView3.setImageBitmap(bmpBlack);

    /**
     * グレースケール→二値化→輪郭塗りつぶし
     */
    // 輪郭を抽出する
    ArrayList<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    Mat hierarchy = new Mat(matBlack.height(),matBlack.width(),CvType.CV_8UC1);
    int mode = Imgproc.RETR_EXTERNAL;
    int method = Imgproc.CHAIN_APPROX_SIMPLE;

    // 輪郭を抽出する
    Imgproc.findContours(matBlack, contours, hierarchy, mode, method);
    // 輪郭を描く
    Scalar color = new Scalar(255.f, 0.f, 0.f, 0.f);
    Imgproc.drawContours(matBlack, contours, -1, color, 2);
    // 表示
    Bitmap bmpContour = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
    org.opencv.android.Utils.matToBitmap(matBlack, bmpContour);
    mImageView4.setImageBitmap(bmpContour);

    // 抽出した輪郭の内部を塗りつぶす
    Imgproc.drawContours(matBlack, contours, -1, color, -1);
    // 表示
    Bitmap bmpContour2 = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
    org.opencv.android.Utils.matToBitmap(matBlack, bmpContour2);
    mImageView5.setImageBitmap(bmpContour2);

    
    /**
     * 二値化したマスクを使ってオブジェクトだけをとりだす
     */
    Mat matObject = new Mat(height,width,CvType.CV_8UC4); 
    Core.add(matObject, matOrig, matObject, matBlack);  
    // 表示
    Bitmap bmpObject = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
    org.opencv.android.Utils.matToBitmap(matObject, bmpObject);
    mImageView6.setImageBitmap(bmpObject);

    /**
     * とりだしたオブジェクトに外接する矩形のみをBMPとして抜き出す
     */
    Rect rect = Imgproc.boundingRect(contours.get(0));
    Mat matCut = new Mat(matObject, rect);
    // 表示
    Bitmap bmpCut = Bitmap.createBitmap(matCut.cols(), matCut.rows(), Bitmap.Config.ARGB_8888);
    org.opencv.android.Utils.matToBitmap(matCut, bmpCut);
    mImageView7.setImageBitmap(bmpCut);
}
 
Example 13
Source File: CVProcessor.java    From CVScanner with GNU General Public License v3.0 4 votes vote down vote up
public static List<MatOfPoint> findContours(Mat src){
    Mat img = src.clone();

    //find contours
    double ratio = getScaleRatio(img.size());
    int width = (int) (img.size().width / ratio);
    int height = (int) (img.size().height / ratio);
    Size newSize = new Size(width, height);
    Mat resizedImg = new Mat(newSize, CvType.CV_8UC4);
    Imgproc.resize(img, resizedImg, newSize);
    img.release();

    Imgproc.medianBlur(resizedImg, resizedImg, 7);

    Mat cannedImg = new Mat(newSize, CvType.CV_8UC1);
    Imgproc.Canny(resizedImg, cannedImg, 70, 200, 3, true);
    resizedImg.release();

    Imgproc.threshold(cannedImg, cannedImg, 70, 255, Imgproc.THRESH_OTSU);

    Mat dilatedImg = new Mat(newSize, CvType.CV_8UC1);
    Mat morph = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3));
    Imgproc.dilate(cannedImg, dilatedImg, morph, new Point(-1, -1), 2, 1, new Scalar(1));
    cannedImg.release();
    morph.release();

    ArrayList<MatOfPoint> contours = new ArrayList<>();
    Mat hierarchy = new Mat();
    Imgproc.findContours(dilatedImg, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
    hierarchy.release();
    dilatedImg.release();

    Log.d(TAG, "contours found: " + contours.size());

    Collections.sort(contours, new Comparator<MatOfPoint>() {
        @Override
        public int compare(MatOfPoint o1, MatOfPoint o2) {
            return Double.valueOf(Imgproc.contourArea(o2)).compareTo(Imgproc.contourArea(o1));
        }
    });

    return contours;
}
 
Example 14
Source File: BallDetector.java    From opencv-fun with GNU Affero General Public License v3.0 4 votes vote down vote up
private void extractCircles (Mat mask, List<Circle> balls, List<BallCluster> ballClusters, List<MatOfPoint> contours) {
	// clear input
	balls.clear();
	ballClusters.clear();
	contours.clear();
	
	// find the contours
	Imgproc.findContours(mask.clone(), contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
	
	// iterate through the contours, find single balls and clusters of balls touching each other
	double minArea = Math.PI * (calib.getBallRadius() * 0.9f) * (calib.getBallRadius() * 0.9f); // minimal ball area
	double maxArea = Math.PI * (calib.getBallRadius() * 1.1f) * (calib.getBallRadius() * 1.1f); // maximal ball area

	for (int i = 0; i < contours.size(); i++) {
		double area = Imgproc.contourArea(contours.get(i));
		if (area > minArea) {				
			if (area < maxArea) {
				// we found a ball
				float[] radius = new float[1];
				Point center = new Point();
				Imgproc.minEnclosingCircle(new MatOfPoint2f(contours.get(i).toArray()), center, radius);
				balls.add(new Circle(center.x, center.y, calib.getBallRadius()));
			} else {
				// we found a cluster of balls
				int numBalls = (int)(area / (Math.PI * calib.getBallRadius() * calib.getBallRadius() * 0.9));
				
				// draw the contours to a bit mask
				Mat hough = Mat.zeros(mask.size(), CvType.CV_8U);
				Imgproc.drawContours(hough, contours, i, new Scalar(255, 255, 255), -2);
				
				// detect hough circles, try different params until we hit the number of balls
				Mat houghCircles = new Mat();
				int hit = 0;
				for(int j = 8; j < 20; j++) {
					Imgproc.HoughCircles(hough, houghCircles, Imgproc.CV_HOUGH_GRADIENT, 2, calib.getBallRadius() * 0.9 * 2, 255, j, (int)(calib.getBallRadius() * 0.9), (int)(calib.getBallRadius() * 1.1));
					if(houghCircles.cols() <= numBalls) {
						hit++;
						if(hit == 4) break;
					}
				}
				
				
				List<Circle> estimatedCircles = new ArrayList<Circle>();
				for(int j = 0; j < houghCircles.cols(); j++) {
					double[] circle = houghCircles.get(0, j);
					if(circle != null) {
						estimatedCircles.add(new Circle(circle[0], circle[1], calib.getBallRadius()));
					}
				}
				
				ballClusters.add(new BallCluster(contours.get(i), numBalls, estimatedCircles));
			}
		}
	}
}
 
Example 15
Source File: ColorBlobDetector.java    From FTCVision with MIT License 4 votes vote down vote up
/**
 * Process an rgba image. The results can be drawn on retrieved later.
 * This method does not modify the image.
 *
 * @param rgbaImage An RGBA image matrix
 */
public void process(Mat rgbaImage) {
    Imgproc.pyrDown(rgbaImage, mPyrDownMat);
    Imgproc.pyrDown(mPyrDownMat, mPyrDownMat);

    Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL);

    //Test whether we need two inRange operations (only if the hue crosses over 255)
    if (upperBound.getScalar().val[0] <= 255) {
        Core.inRange(mHsvMat, lowerBound.getScalar(), upperBound.getScalar(), mMask);
    } else {
        //We need two operations - we're going to OR the masks together
        Scalar lower = lowerBound.getScalar().clone();
        Scalar upper = upperBound.getScalar().clone();
        while (upper.val[0] > 255)
            upper.val[0] -= 255;
        double tmp = lower.val[0];
        lower.val[0] = 0;
        //Mask 1 - from 0 to n
        Core.inRange(mHsvMat, lower, upper, mMaskOne);
        //Mask 2 - from 255-n to 255
        lower.val[0] = tmp;
        upper.val[0] = 255;

        Core.inRange(mHsvMat, lower, upper, mMask);
        //OR the two masks
        Core.bitwise_or(mMaskOne, mMask, mMask);
    }

    //Dilate (blur) the mask to decrease processing power
    Imgproc.dilate(mMask, mDilatedMask, new Mat());

    List<MatOfPoint> contourListTemp = new ArrayList<>();

    Imgproc.findContours(mDilatedMask, contourListTemp, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);

    // Filter contours by area and resize to fit the original image size
    contours.clear();
    for (MatOfPoint c : contourListTemp) {
        Core.multiply(c, new Scalar(4, 4), c);
        contours.add(new Contour(c));
    }
}
 
Example 16
Source File: PrimitiveDetection.java    From FTCVision with MIT License 4 votes vote down vote up
/**
 * Locate rectangles in an image
 *
 * @param grayImage Grayscale image
 * @return Rectangle locations
 */
public RectangleLocationResult locateRectangles(Mat grayImage) {
    Mat gray = grayImage.clone();

    //Filter out some noise by halving then doubling size
    Filter.downsample(gray, 2);
    Filter.upsample(gray, 2);

    //Mat is short for Matrix, and here is used to store an image.
    //it is n-dimensional, but as an image, is two-dimensional
    Mat cacheHierarchy = new Mat();
    Mat grayTemp = new Mat();
    List<Rectangle> rectangles = new ArrayList<>();
    List<Contour> contours = new ArrayList<>();

    //This finds the edges using a Canny Edge Detector
    //It is sent the grayscale Image, a temp Mat, the lower detection threshold for an edge,
    //the higher detection threshold, the Aperture (blurring) of the image - higher is better
    //for long, smooth edges, and whether a more accurate version (but time-expensive) version
    //should be used (true = more accurate)
    //Note: the edges are stored in "grayTemp", which is an image where everything
    //is black except for gray-scale lines delineating the edges.
    Imgproc.Canny(gray, grayTemp, 0, THRESHOLD_CANNY, APERTURE_CANNY, true);
    //make the white lines twice as big, while leaving the image size constant
    Filter.dilate(gray, 2);

    List<MatOfPoint> contoursTemp = new ArrayList<>();
    //Find contours - the parameters here are very important to compression and retention
    //grayTemp is the image from which the contours are found,
    //contoursTemp is where the resultant contours are stored (note: color is not retained),
    //cacheHierarchy is the parent-child relationship between the contours (e.g. a contour
    //inside of another is its child),
    //Imgproc.CV_RETR_LIST disables the hierarchical relationships being returned,
    //Imgproc.CHAIN_APPROX_SIMPLE means that the contour is compressed from a massive chain of
    //paired coordinates to just the endpoints of each segment (e.g. an up-right rectangular
    //contour is encoded with 4 points.)
    Imgproc.findContours(grayTemp, contoursTemp, cacheHierarchy, Imgproc.CV_RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
    //MatOfPoint2f means that is a MatofPoint (Matrix of Points) represented by floats instead of ints
    MatOfPoint2f approx = new MatOfPoint2f();
    //For each contour, test whether the contour is a rectangle
    //List<Contour> contours = new ArrayList<>()
    for (MatOfPoint co : contoursTemp) {
        //converting the MatOfPoint to MatOfPoint2f
        MatOfPoint2f matOfPoint2f = new MatOfPoint2f(co.toArray());
        //converting the matrix to a Contour
        Contour c = new Contour(co);

        //Attempt to fit the contour to the best polygon
        //input: matOfPoint2f, which is the contour found earlier
        //output: approx, which is the MatOfPoint2f that holds the new polygon that has less vertices
        //basically, it smooths out the edges using the third parameter as its approximation accuracy
        //final parameter determines whether the new approximation must be closed (true=closed)
        Imgproc.approxPolyDP(matOfPoint2f, approx,
                c.arcLength(true) * EPLISON_APPROX_TOLERANCE_FACTOR, true);

        //converting the MatOfPoint2f to a contour
        Contour approxContour = new Contour(approx);

        //Make sure the contour is big enough, CLOSED (convex), and has exactly 4 points
        if (approx.toArray().length == 4 &&
                Math.abs(approxContour.area()) > 1000 &&
                approxContour.isClosed()) {

            //TODO contours and rectangles array may not match up, but why would they?
            contours.add(approxContour);

            //Check each angle to be approximately 90 degrees
            //Done by comparing the three points constituting the angle of each corner
            double maxCosine = 0;
            for (int j = 2; j < 5; j++) {
                double cosine = Math.abs(MathUtil.angle(approx.toArray()[j % 4],
                        approx.toArray()[j - 2], approx.toArray()[j - 1]));
                maxCosine = Math.max(maxCosine, cosine);
            }

            if (maxCosine < MAX_COSINE_VALUE) {
                //Convert the points to a rectangle instance
                rectangles.add(new Rectangle(approx.toArray()));
            }
        }
    }

    return new RectangleLocationResult(contours, rectangles);
}
 
Example 17
Source File: CropImage.java    From reader with MIT License 4 votes vote down vote up
private void makeDefault() {
            HighlightView hv = new HighlightView(mImageView);

            int width = mBitmap.getWidth();
            int height = mBitmap.getHeight();

            Rect imageRect = new Rect(0, 0, width, height);

            // make the default size about 4/5 of the width or height
//            int cropWidth = Math.min(width, height) * 4 / 5;
//            int cropHeight = cropWidth;
            int cropWidth = width;
            int cropHeight = height;

            if (mAspectX != 0 && mAspectY != 0) {
                if (mAspectX > mAspectY) {
                	// �����辩缉���
                    cropHeight = cropWidth * mAspectY ;// mAspectX;
                } else {
                    cropWidth = cropHeight * mAspectX ;// mAspectY;
                }
            }

            int x = (width - cropWidth) / 2;
            int y = (height - cropHeight) / 2;
            
            Mat imgSource = new Mat();
            Utils.bitmapToMat(mBitmap, imgSource);
            //convert the image to black and white 
            Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BGR2GRAY);
            //convert the image to black and white does (8 bit)
            Imgproc.Canny(imgSource, imgSource, 50, 50);

            //apply gaussian blur to smoothen lines of dots
            Imgproc.GaussianBlur(imgSource, imgSource, new  org.opencv.core.Size(5, 5), 5);

            //find the contours
            List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
            Imgproc.findContours(imgSource, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);

            double maxArea = -1;
            int maxAreaIdx = -1;
            Log.d("size",Integer.toString(contours.size()));
            MatOfPoint temp_contour = contours.get(0); //the largest is at the index 0 for starting point
            MatOfPoint2f approxCurve = new MatOfPoint2f();
            MatOfPoint largest_contour = contours.get(0);
            //largest_contour.ge
            List<MatOfPoint> largest_contours = new ArrayList<MatOfPoint>();
            //Imgproc.drawContours(imgSource,contours, -1, new Scalar(0, 255, 0), 1);

            for (int idx = 0; idx < contours.size(); idx++) {
                temp_contour = contours.get(idx);
                double contourarea = Imgproc.contourArea(temp_contour);
                //compare this contour to the previous largest contour found
                if (contourarea > maxArea) {
                    //check if this contour is a square
                    MatOfPoint2f new_mat = new MatOfPoint2f( temp_contour.toArray() );
                    int contourSize = (int)temp_contour.total();
                    MatOfPoint2f approxCurve_temp = new MatOfPoint2f();
                    Imgproc.approxPolyDP(new_mat, approxCurve_temp, contourSize*0.05, true);
                    if (approxCurve_temp.total() == 4) {
                        maxArea = contourarea;
                        maxAreaIdx = idx;
                        approxCurve=approxCurve_temp;
                        largest_contour = temp_contour;
                    }
                }
            }

           Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BayerBG2RGB);
           double[] temp_double;
           float x1, y1, x2, y2;
           temp_double = approxCurve.get(0,0);       
           Point p1 = new Point(temp_double[0], temp_double[1]);
           x1 = (float)temp_double[0];
           y1 = (float)temp_double[1];
           //Core.circle(imgSource,p1,55,new Scalar(0,0,255));
           //Imgproc.warpAffine(sourceImage, dummy, rotImage,sourceImage.size());
           temp_double = approxCurve.get(1,0);       
           Point p2 = new Point(temp_double[0], temp_double[1]);
          // Core.circle(imgSource,p2,150,new Scalar(255,255,255));
           temp_double = approxCurve.get(2,0);       
           Point p3 = new Point(temp_double[0], temp_double[1]);
           x2 = (float)temp_double[0];
           y2 = (float)temp_double[1];
           //Core.circle(imgSource,p3,200,new Scalar(255,0,0));
           temp_double = approxCurve.get(3,0);       
           Point p4 = new Point(temp_double[0], temp_double[1]);

            RectF cropRect = new RectF(x, y, x + cropWidth, y + cropHeight);
            //RectF cropRect = new RectF(x1, y1, x2, y2);
         // �����辩缉���
            
            hv.setup(mImageMatrix, imageRect, cropRect, mCircleCrop,false
                     /*mAspectX != 0 && mAspectY != 0*/);
            mImageView.add(hv);
        }
 
Example 18
Source File: BackgroundSubtractionOp.java    From StormCV with Apache License 2.0 4 votes vote down vote up
@Override
public List<CVParticle> execute(CVParticle input) throws Exception {
	ArrayList<CVParticle> result = new ArrayList<CVParticle>();
	
	// sanity check
	if( !( input instanceof Frame ) ) 
		return result;
	
	// initialize input and output result
	Frame frame  = (Frame) input;
	String streamId = frame.getStreamId();
		
	// check if input frame has an image
	if( frame.getImageType().equals(Frame.NO_IMAGE) ) 
		return result;

	// decode input image to OpenCV Mat
	Mat inputImage = ImageUtils.bytes2Mat(frame.getImageBytes());
	
	if(!mogs.containsKey(streamId) ){
		mogs.put(streamId, new BackgroundSubtractorMOG());
	}
	// update the background model
	Mat mogMask = new Mat();
    mogs.get(streamId).apply(inputImage, mogMask, 1f/framesHistory);
    
   	// find contours for the blobs
   	List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
   	Imgproc.findContours(mogMask.clone(), contours, new Mat(), 0 /*CV_RETR_EXTERNAL*/, 2 /*CV_CHAIN_APPROX_SIMPLE*/);
   	ArrayList<Descriptor> descriptors = new ArrayList<Descriptor>();
   	for(MatOfPoint contour : contours){
   		float[] polygon = new float[contour.rows()*2];
   		for(int row=0; row<contour.rows(); row++){
   			double[] point = contour.get(row, 0);
   			polygon[row*2] = (float)point[0];
   			polygon[row*2+1] = (float)point[1];
   		}
   		Rect rect = Imgproc.boundingRect(contour);
   		if(rect.width < 5 || rect.height < 5 ) continue;
   		descriptors.add(new Descriptor(streamId, input.getSequenceNr(), new Rectangle(rect.x, rect.y, rect.width, rect.height), 0, polygon));
   	}
   	Feature feature = new Feature(streamId, input.getSequenceNr(), featureName, 0, descriptors, null);
   	if(!outputFrame) {
   		result.add(feature);
   		return result;
   	}
   	if(binaryFrame){
   		byte[] outputBytes = ImageUtils.Mat2ImageBytes( mogMask, frame.getImageType() );
           frame.setImage( outputBytes, frame.getImageType() );
   	}
   	frame.getFeatures().add(feature);
   	result.add(frame);
   	return result;
}
 
Example 19
Source File: AAVActivity.java    From AAV with GNU General Public License v2.0 4 votes vote down vote up
@Override
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
	synchronized (inputFrame) {

		_rgbaImage = inputFrame.rgba();

		if (android.os.Build.MODEL.equalsIgnoreCase("Nexus 5X")) {
			Core.flip(_rgbaImage, _rgbaImage, -1);
		}

		double current_contour;

		// In contrast to the C++ interface, Android API captures images in the RGBA format.
		// Also, in HSV space, only the hue determines which color it is. Saturation determines
		// how 'white' the color is, and Value determines how 'dark' the color is.
		Imgproc.cvtColor(_rgbaImage, _hsvMat, Imgproc.COLOR_RGB2HSV_FULL);

		Core.inRange(_hsvMat, _lowerThreshold, _upperThreshold, _processedMat);

		// Imgproc.dilate(_processedMat, _dilatedMat, new Mat());
		Imgproc.erode(_processedMat, _dilatedMat, new Mat());
		Imgproc.findContours(_dilatedMat, contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
		MatOfPoint2f points = new MatOfPoint2f();
		_contourArea = 7;
		for (int i = 0, n = contours.size(); i < n; i++) {
			current_contour = Imgproc.contourArea(contours.get(i));
			if (current_contour > _contourArea) {
				_contourArea = current_contour;
				contours.get(i).convertTo(points, CvType.CV_32FC2); // contours.get(x) is a single MatOfPoint, but to use minEnclosingCircle we need to pass a MatOfPoint2f so we need to do a
				// conversion
			}
		}
		if (!points.empty() && _contourArea > MIN_CONTOUR_AREA) {
			Imgproc.minEnclosingCircle(points, _centerPoint, null);
			// Core.circle(_rgbaImage, _centerPoint, 3, new Scalar(255, 0, 0), Core.FILLED);
			if (_showContourEnable)
				Core.circle(_rgbaImage, _centerPoint, (int) Math.round(Math.sqrt(_contourArea / Math.PI)), new Scalar(255, 0, 0), 3, 8, 0);// Core.FILLED);
		}
		contours.clear();
	}
	return _rgbaImage;
}
 
Example 20
Source File: NativeClass.java    From AndroidDocumentScanner with MIT License 4 votes vote down vote up
public List<MatOfPoint2f> getPoints(Mat src) {

        // Blur the image to filter out the noise.
        Mat blurred = new Mat();
        Imgproc.medianBlur(src, blurred, 9);

        // Set up images to use.
        Mat gray0 = new Mat(blurred.size(), CvType.CV_8U);
        Mat gray = new Mat();

        // For Core.mixChannels.
        List<MatOfPoint> contours = new ArrayList<>();
        List<MatOfPoint2f> rectangles = new ArrayList<>();

        List<Mat> sources = new ArrayList<>();
        sources.add(blurred);
        List<Mat> destinations = new ArrayList<>();
        destinations.add(gray0);

        // To filter rectangles by their areas.
        int srcArea = src.rows() * src.cols();

        // Find squares in every color plane of the image.
        for (int c = 0; c < 3; c++) {
            int[] ch = {c, 0};
            MatOfInt fromTo = new MatOfInt(ch);

            Core.mixChannels(sources, destinations, fromTo);

            // Try several threshold levels.
            for (int l = 0; l < THRESHOLD_LEVEL; l++) {
                if (l == 0) {
                    // HACK: Use Canny instead of zero threshold level.
                    // Canny helps to catch squares with gradient shading.
                    // NOTE: No kernel size parameters on Java API.
                    Imgproc.Canny(gray0, gray, 10, 20);

                    // Dilate Canny output to remove potential holes between edge segments.
                    Imgproc.dilate(gray, gray, Mat.ones(new Size(3, 3), 0));
                } else {
                    int threshold = (l + 1) * 255 / THRESHOLD_LEVEL;
                    Imgproc.threshold(gray0, gray, threshold, 255, Imgproc.THRESH_BINARY);
                }

                // Find contours and store them all as a list.
                Imgproc.findContours(gray, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);

                for (MatOfPoint contour : contours) {
                    MatOfPoint2f contourFloat = MathUtils.toMatOfPointFloat(contour);
                    double arcLen = Imgproc.arcLength(contourFloat, true) * 0.02;

                    // Approximate polygonal curves.
                    MatOfPoint2f approx = new MatOfPoint2f();
                    Imgproc.approxPolyDP(contourFloat, approx, arcLen, true);

                    if (isRectangle(approx, srcArea)) {
                        rectangles.add(approx);
                    }
                }
            }
        }

        return rectangles;

    }