Java Code Examples for org.opencv.imgproc.Imgproc#threshold()

The following examples show how to use org.opencv.imgproc.Imgproc#threshold() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MainActivity.java    From MOAAP with MIT License 7 votes vote down vote up
public void DifferenceOfGaussian() {
    Mat grayMat = new Mat();
    Mat blur1 = new Mat();
    Mat blur2 = new Mat();

    //Converting the image to grayscale
    Imgproc.cvtColor(originalMat, grayMat, Imgproc.COLOR_BGR2GRAY);

    Imgproc.GaussianBlur(grayMat, blur1, new Size(15, 15), 5);
    Imgproc.GaussianBlur(grayMat, blur2, new Size(21, 21), 5);

    //Subtracting the two blurred images
    Mat DoG = new Mat();
    Core.absdiff(blur1, blur2, DoG);

    //Inverse Binary Thresholding
    Core.multiply(DoG, new Scalar(100), DoG);
    Imgproc.threshold(DoG, DoG, 50, 255, Imgproc.THRESH_BINARY_INV);

    //Converting Mat back to Bitmap
    Utils.matToBitmap(DoG, currentBitmap);
    imageView.setImageBitmap(currentBitmap);
}
 
Example 2
Source File: Masking.java    From Android-Face-Recognition-with-Deep-Learning-Library with Apache License 2.0 7 votes vote down vote up
public PreProcessor preprocessImage(PreProcessor preProcessor) {
    List<Mat> images = preProcessor.getImages();
    List<Mat> processed = new ArrayList<Mat>();
    for (Mat img : images){
        preProcessor.normalize0255(img);

        /***************************************************************************************
         *    Title: Automatic calculation of low and high thresholds for the Canny operation in opencv
         *    Author: VP
         *    Date: 16.04.2013
         *    Code version: -
         *    Availability: http://stackoverflow.com
         *
         ***************************************************************************************/

        double otsu_thresh_val = Imgproc.threshold(img, img, 0, 255, Imgproc.THRESH_OTSU);
        Imgproc.Canny(img, img, otsu_thresh_val * 0.5, otsu_thresh_val);
        processed.add(img);
    }
    preProcessor.setImages(processed);
    return preProcessor;
}
 
Example 3
Source File: MainActivity.java    From OCR-Test with Apache License 2.0 6 votes vote down vote up
public Bitmap convertToBlackWhite(Bitmap compressImage)
{
    Log.d("CV", "Before converting to black");
    Mat imageMat = new Mat();
    Utils.bitmapToMat(compressImage, imageMat);
    Imgproc.cvtColor(imageMat, imageMat, Imgproc.COLOR_BGR2GRAY);
    Imgproc.GaussianBlur(imageMat, imageMat, new Size(3, 3), 0);
    //Imgproc.adaptiveThreshold(imageMat, imageMat, 255, Imgproc.ADAPTIVE_THRESH_MEAN_C, Imgproc.THRESH_BINARY_INV, 5, 4);
    //Imgproc.medianBlur(imageMat, imageMat, 3);
    Imgproc.threshold(imageMat, imageMat, 0, 255, Imgproc.THRESH_OTSU);

    Bitmap newBitmap = compressImage;
    Utils.matToBitmap(imageMat, newBitmap);
    imageView.setImageBitmap(newBitmap);
    Log.d("CV", "After converting to black");


    return newBitmap;

}
 
Example 4
Source File: MainActivity.java    From MOAAP with MIT License 6 votes vote down vote up
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent imageReturnedIntent) {
    //Put it there, just in case:)
    super.onActivityResult(requestCode, resultCode, imageReturnedIntent);

    switch(requestCode) {
        case SELECT_PHOTO:
            if(resultCode == RESULT_OK && read_external_storage_granted){
                try {
                    final Uri imageUri = imageReturnedIntent.getData();
                    final InputStream imageStream = getContentResolver().openInputStream(imageUri);
                    final Bitmap selectedImage = BitmapFactory.decodeStream(imageStream);
                    src = new Mat(selectedImage.getHeight(), selectedImage.getWidth(), CvType.CV_8UC4);
                    Utils.bitmapToMat(selectedImage, src);
                    src_gray = new Mat(selectedImage.getHeight(), selectedImage.getWidth(), CvType.CV_8UC1);
                    switch (ACTION_MODE) {
                        case HomeActivity.GAUSSIAN_BLUR:
                            Imgproc.GaussianBlur(src, src, new Size(9, 9), 0);
                            break;
                        case HomeActivity.MEAN_BLUR:
                            Imgproc.blur(src, src, new Size(9, 9));
                            break;
                        case HomeActivity.MEDIAN_BLUR:
                            Imgproc.medianBlur(src, src, 9);
                            break;
                        case HomeActivity.SHARPEN:
                            Mat kernel = new Mat(3, 3, CvType.CV_16SC1);
                            //int[] values = {0, -1, 0, -1, 5, -1, 0, -1, 0};
                            Log.d("imageType", CvType.typeToString(src.type()) + "");
                            kernel.put(0, 0, 0, -1, 0, -1, 5, -1, 0, -1, 0);
                            Imgproc.filter2D(src, src, src_gray.depth(), kernel);
                            break;
                        case HomeActivity.DILATE:
                            Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
                            Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
                            Mat kernelDilate = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3));
                            Imgproc.dilate(src_gray, src_gray, kernelDilate);
                            Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
                            break;
                        case HomeActivity.ERODE:
                            Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
                            Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
                            Mat kernelErode = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5));
                            Imgproc.erode(src_gray, src_gray, kernelErode);
                            Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
                            break;
                        case HomeActivity.THRESHOLD:
                            Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
                            Imgproc.threshold(src_gray, src_gray, 100, 255, Imgproc.THRESH_BINARY);
                            Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
                            break;
                        case HomeActivity.ADAPTIVE_THRESHOLD:
                            Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
                            Imgproc.adaptiveThreshold(src_gray, src_gray, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, 3, 0);
                            Imgproc.cvtColor(src_gray, src, Imgproc.COLOR_GRAY2RGBA, 4);
                            break;
                    }
                    Bitmap processedImage = Bitmap.createBitmap(src.cols(), src.rows(), Bitmap.Config.ARGB_8888);
                    Log.i("imageType", CvType.typeToString(src.type()) + "");
                    Utils.matToBitmap(src, processedImage);
                    ivImage.setImageBitmap(selectedImage);
                    ivImageProcessed.setImageBitmap(processedImage);
                    Log.i("process", "process done");
                } catch (FileNotFoundException e) {
                    e.printStackTrace();
                }
            }
            break;
    }
}
 
Example 5
Source File: Finder.java    From SikuliX1 with MIT License 5 votes vote down vote up
public static List<Region> findChanges(FindInput2 findInput) {
  findInput.setAttributes();
  Mat previousGray = SXOpenCV.newMat();
  Mat nextGray = SXOpenCV.newMat();
  Mat mDiffAbs = SXOpenCV.newMat();
  Mat mDiffThresh = SXOpenCV.newMat();
  Imgproc.cvtColor(findInput.getBase(), previousGray, toGray);
  Imgproc.cvtColor(findInput.getTarget(), nextGray, toGray);
  Core.absdiff(previousGray, nextGray, mDiffAbs);
  Imgproc.threshold(mDiffAbs, mDiffThresh, PIXEL_DIFF_THRESHOLD, 0.0, Imgproc.THRESH_TOZERO);

  List<Region> rectangles = new ArrayList<>();
  if (Core.countNonZero(mDiffThresh) > IMAGE_DIFF_THRESHOLD) {
    Imgproc.threshold(mDiffAbs, mDiffAbs, PIXEL_DIFF_THRESHOLD, 255, Imgproc.THRESH_BINARY);
    Imgproc.dilate(mDiffAbs, mDiffAbs, SXOpenCV.newMat());
    Mat se = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5));
    Imgproc.morphologyEx(mDiffAbs, mDiffAbs, Imgproc.MORPH_CLOSE, se);
    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    Mat mHierarchy = SXOpenCV.newMat();
    Imgproc.findContours(mDiffAbs, contours, mHierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
    for (MatOfPoint contour : contours) {
      int x1 = 99999;
      int y1 = 99999;
      int x2 = 0;
      int y2 = 0;
      List<org.opencv.core.Point> points = contour.toList();
      for (Point point : points) {
        int x = (int) point.x;
        int y = (int) point.y;
        if (x < x1) x1 = x;
        if (x > x2) x2 = x;
        if (y < y1) y1 = y;
        if (y > y2) y2 = y;
      }
      Region rect = new Region(x1, y1, x2 - x1, y2 - y1);
      rectangles.add(rect);
    }
  }
  return rectangles;
}
 
Example 6
Source File: SXOpenCV.java    From SikuliX1 with MIT License 5 votes vote down vote up
public static List<Match> doFindChanges(Image original, Image changed) {
  List<Match> changes = new ArrayList<>();
  if (changed.isValid()) {
    int PIXEL_DIFF_THRESHOLD = 3;
    int IMAGE_DIFF_THRESHOLD = 5;
    Mat previousGray = SXOpenCV.newMat();
    Mat nextGray = SXOpenCV.newMat();
    Mat mDiffAbs = SXOpenCV.newMat();
    Mat mDiffTresh = SXOpenCV.newMat();

    Imgproc.cvtColor(original.getContent(), previousGray, toGray);
    Imgproc.cvtColor(changed.getContent(), nextGray, toGray);
    Core.absdiff(previousGray, nextGray, mDiffAbs);
    Imgproc.threshold(mDiffAbs, mDiffTresh, PIXEL_DIFF_THRESHOLD, 0.0, Imgproc.THRESH_TOZERO);

    if (Core.countNonZero(mDiffTresh) > IMAGE_DIFF_THRESHOLD) {
      Imgproc.threshold(mDiffAbs, mDiffAbs, PIXEL_DIFF_THRESHOLD, 255, Imgproc.THRESH_BINARY);
      Imgproc.dilate(mDiffAbs, mDiffAbs, SXOpenCV.newMat());
      Mat se = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5));
      Imgproc.morphologyEx(mDiffAbs, mDiffAbs, Imgproc.MORPH_CLOSE, se);

      List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
      Mat mHierarchy = SXOpenCV.newMat();
      Imgproc.findContours(mDiffAbs, contours, mHierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
      changes = contoursToRectangle(contours);
    }
  }
  return changes;
}
 
Example 7
Source File: CVProcessor.java    From CVScanner with GNU General Public License v3.0 5 votes vote down vote up
public static List<MatOfPoint> findContoursForMRZ(Mat src){
    Mat img = src.clone();
    src.release();
    double ratio = getScaleRatio(img.size());
    int width = (int) (img.size().width / ratio);
    int height = (int) (img.size().height / ratio);
    Size newSize = new Size(width, height);
    Mat resizedImg = new Mat(newSize, CvType.CV_8UC4);
    Imgproc.resize(img, resizedImg, newSize);

    Mat gray = new Mat();
    Imgproc.cvtColor(resizedImg, gray, Imgproc.COLOR_BGR2GRAY);
    Imgproc.medianBlur(gray, gray, 3);
    //Imgproc.blur(gray, gray, new Size(3, 3));

    Mat morph = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(13, 5));
    Mat dilatedImg = new Mat();
    Imgproc.morphologyEx(gray, dilatedImg, Imgproc.MORPH_BLACKHAT, morph);
    gray.release();

    Mat gradX = new Mat();
    Imgproc.Sobel(dilatedImg, gradX, CvType.CV_32F, 1, 0);
    dilatedImg.release();
    Core.convertScaleAbs(gradX, gradX, 1, 0);
    Core.MinMaxLocResult minMax = Core.minMaxLoc(gradX);
    Core.convertScaleAbs(gradX, gradX, (255/(minMax.maxVal - minMax.minVal)),
            - ((minMax.minVal * 255) / (minMax.maxVal - minMax.minVal)));
    Imgproc.morphologyEx(gradX, gradX, Imgproc.MORPH_CLOSE, morph);

    Mat thresh = new Mat();
    Imgproc.threshold(gradX, thresh, 0, 255, Imgproc.THRESH_OTSU);
    gradX.release();
    morph.release();

    morph = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(21, 21));
    Imgproc.morphologyEx(thresh, thresh, Imgproc.MORPH_CLOSE, morph);
    Imgproc.erode(thresh, thresh, new Mat(), new Point(-1, -1), 4);
    morph.release();

    int col = (int) resizedImg.size().width;
    int p = (int) (resizedImg.size().width * 0.05);
    int row = (int) resizedImg.size().height;
    for(int i = 0; i < row; i++)
    {
        for(int j = 0; j < p; j++){
            thresh.put(i, j, 0);
            thresh.put(i, col-j, 0);
        }
    }

    List<MatOfPoint> contours = new ArrayList<>();
    Mat hierarchy = new Mat();
    Imgproc.findContours(thresh, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
    hierarchy.release();

    Log.d(TAG, "contours found: " + contours.size());

    Collections.sort(contours, new Comparator<MatOfPoint>() {
        @Override
        public int compare(MatOfPoint o1, MatOfPoint o2) {
            return Double.valueOf(Imgproc.contourArea(o2)).compareTo(Imgproc.contourArea(o1));
        }
    });

    return contours;
}
 
Example 8
Source File: Finder.java    From SikuliNG with MIT License 5 votes vote down vote up
public static List<Element> detectChanges(Mat base, Mat mChanged) {
  int PIXEL_DIFF_THRESHOLD = 3;
  int IMAGE_DIFF_THRESHOLD = 5;
  Mat mBaseGray = Element.getNewMat();
  Mat mChangedGray = Element.getNewMat();
  Mat mDiffAbs = Element.getNewMat();
  Mat mDiffTresh = Element.getNewMat();
  Mat mChanges = Element.getNewMat();
  List<Element> rectangles = new ArrayList<>();

  Imgproc.cvtColor(base, mBaseGray, toGray);
  Imgproc.cvtColor(mChanged, mChangedGray, toGray);
  Core.absdiff(mBaseGray, mChangedGray, mDiffAbs);
  Imgproc.threshold(mDiffAbs, mDiffTresh, PIXEL_DIFF_THRESHOLD, 0.0, Imgproc.THRESH_TOZERO);
  if (Core.countNonZero(mDiffTresh) > IMAGE_DIFF_THRESHOLD) {
    Imgproc.threshold(mDiffAbs, mDiffAbs, PIXEL_DIFF_THRESHOLD, 255, Imgproc.THRESH_BINARY);
    Imgproc.dilate(mDiffAbs, mDiffAbs, Element.getNewMat());
    Mat se = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(5, 5));
    Imgproc.morphologyEx(mDiffAbs, mDiffAbs, Imgproc.MORPH_CLOSE, se);

    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    Mat mHierarchy = Element.getNewMat();
    Imgproc.findContours(mDiffAbs, contours, mHierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
    rectangles = contoursToRectangle(contours);

    Core.subtract(mDiffAbs, mDiffAbs, mChanges);
    Imgproc.drawContours(mChanges, contours, -1, new Scalar(255));
    //logShow(mDiffAbs);
  }
  return rectangles;
}
 
Example 9
Source File: MotionDetector.java    From opencv-fun with GNU Affero General Public License v3.0 5 votes vote down vote up
/**
 * @return true if motion was detected compared to the last frame
 */
public boolean detect(Mat frame) {
	if(lastImage == null) {
		lastImage = frame.clone();
		return true;
	}
	
	Mat diff = new Mat();
	Core.absdiff(lastImage, frame, diff);
	Imgproc.threshold(diff, diff, 35, 255, Imgproc.THRESH_BINARY);
	
	// extract color channels and merge them to single bitmask
	Mat r = ColorSpace.getChannel(diff, 2);
	Mat g = ColorSpace.getChannel(diff, 1);
	Mat b = ColorSpace.getChannel(diff, 0);

	mask = r.clone();
	Core.add(mask, g, mask);
	Core.add(mask, b, mask);
	
	float changes = Core.countNonZero(mask) / (float)( frame.cols() * frame.rows());
	r.release();
	g.release();
	b.release();
	lastImage.release();
	lastImage = frame.clone();
	return thresholdPercentage < changes;
}
 
Example 10
Source File: BackgroundSubtractor.java    From opencv-fun with GNU Affero General Public License v3.0 5 votes vote down vote up
public Mat createMask(Mat camera) {				
	// copy as we are going to destruct those images maybe
	Mat camBlur= camera.clone();
	Mat backgroundBlur = calib.getBackgroundImage().clone();

	// remove noise
	Imgproc.blur(backgroundBlur, backgroundBlur, new Size(calib.getBlurSize(), calib.getBlurSize()));
	Imgproc.blur(camBlur, camBlur, new Size(calib.getBlurSize(), calib.getBlurSize()));

	// take abs diff and create binary image in all 3 channels
	Mat diff = new Mat();
	Core.absdiff(backgroundBlur, camBlur, diff);
	Imgproc.threshold(diff, diff, calib.getSubtractionThreshold(), 255, Imgproc.THRESH_BINARY);

	// extract color channels and merge them to single bitmask
	Mat r = ColorSpace.getChannel(diff, 2);
	Mat g = ColorSpace.getChannel(diff, 1);
	Mat b = ColorSpace.getChannel(diff, 0);

	Mat mask = r.clone();
	Core.add(mask, g, mask);
	Core.add(mask, b, mask);
	
	// dilate to remove some black gaps within balls
	Imgproc.dilate(mask, mask, Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, new Size(calib.getMorphSize(), calib.getMorphSize())));

	return mask;
}
 
Example 11
Source File: NativeClass.java    From AndroidDocumentScanner with MIT License 4 votes vote down vote up
public List<MatOfPoint2f> getPoints(Mat src) {

        // Blur the image to filter out the noise.
        Mat blurred = new Mat();
        Imgproc.medianBlur(src, blurred, 9);

        // Set up images to use.
        Mat gray0 = new Mat(blurred.size(), CvType.CV_8U);
        Mat gray = new Mat();

        // For Core.mixChannels.
        List<MatOfPoint> contours = new ArrayList<>();
        List<MatOfPoint2f> rectangles = new ArrayList<>();

        List<Mat> sources = new ArrayList<>();
        sources.add(blurred);
        List<Mat> destinations = new ArrayList<>();
        destinations.add(gray0);

        // To filter rectangles by their areas.
        int srcArea = src.rows() * src.cols();

        // Find squares in every color plane of the image.
        for (int c = 0; c < 3; c++) {
            int[] ch = {c, 0};
            MatOfInt fromTo = new MatOfInt(ch);

            Core.mixChannels(sources, destinations, fromTo);

            // Try several threshold levels.
            for (int l = 0; l < THRESHOLD_LEVEL; l++) {
                if (l == 0) {
                    // HACK: Use Canny instead of zero threshold level.
                    // Canny helps to catch squares with gradient shading.
                    // NOTE: No kernel size parameters on Java API.
                    Imgproc.Canny(gray0, gray, 10, 20);

                    // Dilate Canny output to remove potential holes between edge segments.
                    Imgproc.dilate(gray, gray, Mat.ones(new Size(3, 3), 0));
                } else {
                    int threshold = (l + 1) * 255 / THRESHOLD_LEVEL;
                    Imgproc.threshold(gray0, gray, threshold, 255, Imgproc.THRESH_BINARY);
                }

                // Find contours and store them all as a list.
                Imgproc.findContours(gray, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);

                for (MatOfPoint contour : contours) {
                    MatOfPoint2f contourFloat = MathUtils.toMatOfPointFloat(contour);
                    double arcLen = Imgproc.arcLength(contourFloat, true) * 0.02;

                    // Approximate polygonal curves.
                    MatOfPoint2f approx = new MatOfPoint2f();
                    Imgproc.approxPolyDP(contourFloat, approx, arcLen, true);

                    if (isRectangle(approx, srcArea)) {
                        rectangles.add(approx);
                    }
                }
            }
        }

        return rectangles;

    }
 
Example 12
Source File: KMeansMatcher.java    From mvisc with GNU General Public License v3.0 4 votes vote down vote up
public void computeModel(ArrayList<MetaData> photos)
{
	numPhotos = photos.size();
	model.setNumPhotos(numPhotos);

	MatOfKeyPoint[] keypoints = new MatOfKeyPoint[numPhotos];
	Mat[] descriptors = new Mat[numPhotos];
	Mat allDescriptors = new Mat();
	ArrayList<Integer> descriptorLabels = new ArrayList<Integer>();

	// compute keypoints and descriptors
	Mat currentImg = null;
	for (int a = 0; a < numPhotos; a++)
	{
		// System.out.println("now:" + animalFiles.get(a));
		currentImg = Highgui.imread(photos.get(a).getZooName().toString(), 0);
		Imgproc.resize(currentImg, currentImg, new Size(150, 250));
		Imgproc.equalizeHist(currentImg, currentImg);
		Imgproc.threshold(currentImg, currentImg, 127, 255, Imgproc.THRESH_BINARY);

		featureDetector.detect(currentImg, keypoints[a]);
		descriptorExtractor.compute(currentImg, keypoints[a], descriptors[a]);

		allDescriptors.push_back(descriptors[a]);

		for (int i = 0; i < descriptors[a].rows(); i++)
			descriptorLabels.add(a);
	}
	System.out.println("label size:" + descriptorLabels.size());

	Mat clusterLabels = new Mat();
	Mat centers = new Mat();

	// set up all desriptors, init criteria
	allDescriptors.convertTo(allDescriptors, CvType.CV_32F);
	TermCriteria criteria = new TermCriteria(TermCriteria.EPS + TermCriteria.MAX_ITER, 100, 0.1);
	long before = System.currentTimeMillis();
	
	// compute clusters
	System.out.print("creating kmeans clusters...");
	Core.kmeans(allDescriptors, k, clusterLabels, criteria, 10, Core.KMEANS_PP_CENTERS, centers);
	System.out.println("done.");

	// map k-means centroid labels to descriptors of all images
	ArrayList<ArrayList<Integer>> clusterImageMap = new ArrayList<ArrayList<Integer>>();
	for (int nk = 0; nk < k + 1; nk++)
		clusterImageMap.add(new ArrayList<Integer>());
	for (int r = 0; r < clusterLabels.rows(); r++)
		clusterImageMap.get((int) clusterLabels.get(r, 0)[0]).add(descriptorLabels.get(r));

	model.setCentroids(centers);
	model.setLabels(clusterLabels);
	model.setClusterImageMap(clusterImageMap);
	model.setKeypoints(keypoints);
	model.setDescriptors(descriptors);

}
 
Example 13
Source File: LeviColorFilter.java    From DogeCV with GNU General Public License v3.0 4 votes vote down vote up
/**
 * Process a image and return a mask
 * @param input - Input image to process
 * @param mask - Output mask
 */
@Override
public void process(Mat input, Mat mask) {
    channels = new ArrayList<>();

    switch(color){
        case RED:
            if(threshold == -1){
                threshold = 164;
            }

            Imgproc.cvtColor(input, input, Imgproc.COLOR_RGB2Lab);
            Imgproc.GaussianBlur(input,input,new Size(3,3),0);
            Core.split(input, channels);
            Imgproc.threshold(channels.get(1), mask, threshold, 255, Imgproc.THRESH_BINARY);
            break;
        case BLUE:
            if(threshold == -1){
                threshold = 145;
            }

            Imgproc.cvtColor(input, input, Imgproc.COLOR_RGB2YUV);
            Imgproc.GaussianBlur(input,input,new Size(3,3),0);
            Core.split(input, channels);
            Imgproc.threshold(channels.get(1), mask, threshold, 255, Imgproc.THRESH_BINARY);
            break;
        case WHITE:
            if(threshold == -1) {
                threshold = 150;
            }

            Imgproc.cvtColor(input, input, Imgproc.COLOR_RGB2Lab);
            Imgproc.GaussianBlur(input,input,new Size(3,3),0);
            Core.split(input, channels);
            Core.inRange(channels.get(0), new Scalar(threshold, 150, 40), new Scalar(255, 150, 150), mask);
            break;
        case YELLOW:
            if(threshold == -1){
                threshold = 70;
            }
            
            Mat lab = new Mat(input.size(), 0);
            Imgproc.cvtColor(input, lab, Imgproc.COLOR_RGB2Lab);
            Mat temp = new Mat();
            Core.inRange(input, new Scalar(0,0,0), new Scalar(255,255,164), temp);
            Mat mask2 = new Mat(input.size(), 0);
            temp.copyTo(mask2);
            input.copyTo(input, mask2);
            mask2.release();
            temp.release();
            lab.release();
            
            Imgproc.cvtColor(input, input, Imgproc.COLOR_RGB2YUV);
            Imgproc.GaussianBlur(input,input,new Size(3,3),0);
            Core.split(input, channels);
            if(channels.size() > 0){
                Imgproc.threshold(channels.get(1), mask, threshold, 255, Imgproc.THRESH_BINARY_INV);
            }

            break;
    }

    for(int i=0;i<channels.size();i++){
        channels.get(i).release();
    }

    input.release();

}
 
Example 14
Source File: CVProcessor.java    From CVScanner with GNU General Public License v3.0 4 votes vote down vote up
public static List<MatOfPoint> findContours(Mat src){
    Mat img = src.clone();

    //find contours
    double ratio = getScaleRatio(img.size());
    int width = (int) (img.size().width / ratio);
    int height = (int) (img.size().height / ratio);
    Size newSize = new Size(width, height);
    Mat resizedImg = new Mat(newSize, CvType.CV_8UC4);
    Imgproc.resize(img, resizedImg, newSize);
    img.release();

    Imgproc.medianBlur(resizedImg, resizedImg, 7);

    Mat cannedImg = new Mat(newSize, CvType.CV_8UC1);
    Imgproc.Canny(resizedImg, cannedImg, 70, 200, 3, true);
    resizedImg.release();

    Imgproc.threshold(cannedImg, cannedImg, 70, 255, Imgproc.THRESH_OTSU);

    Mat dilatedImg = new Mat(newSize, CvType.CV_8UC1);
    Mat morph = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3));
    Imgproc.dilate(cannedImg, dilatedImg, morph, new Point(-1, -1), 2, 1, new Scalar(1));
    cannedImg.release();
    morph.release();

    ArrayList<MatOfPoint> contours = new ArrayList<>();
    Mat hierarchy = new Mat();
    Imgproc.findContours(dilatedImg, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
    hierarchy.release();
    dilatedImg.release();

    Log.d(TAG, "contours found: " + contours.size());

    Collections.sort(contours, new Comparator<MatOfPoint>() {
        @Override
        public int compare(MatOfPoint o1, MatOfPoint o2) {
            return Double.valueOf(Imgproc.contourArea(o2)).compareTo(Imgproc.contourArea(o1));
        }
    });

    return contours;
}
 
Example 15
Source File: MainActivity.java    From effective_android_sample with Apache License 2.0 4 votes vote down vote up
/**
 * OpenCVで
 * @param bmpOrig
 */
private void extractObject(Bitmap bmpOrig) {

    // まずオリジナルのビットマップを表示
    mImageView1.setImageBitmap(bmpOrig);
    // 高さと幅を取得
    int height = bmpOrig.getHeight();
    int width = bmpOrig.getWidth();
    
    // OpenCVオブジェクトの用意
    Mat matOrig = new Mat(height,width,CvType.CV_8UC4); 
    // ビットマップをOpenCVオブジェクトに変換
    Utils.bitmapToMat(bmpOrig, matOrig);
    
    /**
     * グレースケールに変換
     */
    Mat matGray = new Mat(height,width,CvType.CV_8UC1);
    Imgproc.cvtColor(matOrig, matGray, Imgproc.COLOR_RGB2GRAY);
    // 表示
    Bitmap bmpGray = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(matGray, bmpGray);
    mImageView2.setImageBitmap(bmpGray);

    /**
     * グレースケール→二値化    
     */
    Mat matBlack = new Mat(height,width,CvType.CV_8UC1);
    // 二値化
    Imgproc.threshold(matGray, matBlack, sTH, 255, Imgproc.THRESH_BINARY);
    // 表示
    Bitmap bmpBlack = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(matBlack, bmpBlack);
    mImageView3.setImageBitmap(bmpBlack);

    /**
     * グレースケール→二値化→輪郭塗りつぶし
     */
    // 輪郭を抽出する
    ArrayList<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    Mat hierarchy = new Mat(matBlack.height(),matBlack.width(),CvType.CV_8UC1);
    int mode = Imgproc.RETR_EXTERNAL;
    int method = Imgproc.CHAIN_APPROX_SIMPLE;

    // 輪郭を抽出する
    Imgproc.findContours(matBlack, contours, hierarchy, mode, method);
    // 輪郭を描く
    Scalar color = new Scalar(255.f, 0.f, 0.f, 0.f);
    Imgproc.drawContours(matBlack, contours, -1, color, 2);
    // 表示
    Bitmap bmpContour = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
    org.opencv.android.Utils.matToBitmap(matBlack, bmpContour);
    mImageView4.setImageBitmap(bmpContour);

    // 抽出した輪郭の内部を塗りつぶす
    Imgproc.drawContours(matBlack, contours, -1, color, -1);
    // 表示
    Bitmap bmpContour2 = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
    org.opencv.android.Utils.matToBitmap(matBlack, bmpContour2);
    mImageView5.setImageBitmap(bmpContour2);

    
    /**
     * 二値化したマスクを使ってオブジェクトだけをとりだす
     */
    Mat matObject = new Mat(height,width,CvType.CV_8UC4); 
    Core.add(matObject, matOrig, matObject, matBlack);  
    // 表示
    Bitmap bmpObject = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
    org.opencv.android.Utils.matToBitmap(matObject, bmpObject);
    mImageView6.setImageBitmap(bmpObject);

    /**
     * とりだしたオブジェクトに外接する矩形のみをBMPとして抜き出す
     */
    Rect rect = Imgproc.boundingRect(contours.get(0));
    Mat matCut = new Mat(matObject, rect);
    // 表示
    Bitmap bmpCut = Bitmap.createBitmap(matCut.cols(), matCut.rows(), Bitmap.Config.ARGB_8888);
    org.opencv.android.Utils.matToBitmap(matCut, bmpCut);
    mImageView7.setImageBitmap(bmpCut);
}