org.bytedeco.javacv.OpenCVFrameConverter Java Examples

The following examples show how to use org.bytedeco.javacv.OpenCVFrameConverter. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CifarLoader.java    From DataVec with Apache License 2.0 6 votes vote down vote up
/**
 * Preprocess and store cifar based on successful Torch approach by Sergey Zagoruyko
 * Reference: https://github.com/szagoruyko/cifar.torch
 */
public opencv_core.Mat convertCifar(Mat orgImage) {
    numExamples++;
    Mat resImage = new Mat();
    OpenCVFrameConverter.ToMat converter = new OpenCVFrameConverter.ToMat();
    //        ImageTransform yuvTransform = new ColorConversionTransform(new Random(seed), COLOR_BGR2Luv);
    //        ImageTransform histEqualization = new EqualizeHistTransform(new Random(seed), COLOR_BGR2Luv);
    ImageTransform yuvTransform = new ColorConversionTransform(new Random(seed), COLOR_BGR2YCrCb);
    ImageTransform histEqualization = new EqualizeHistTransform(new Random(seed), COLOR_BGR2YCrCb);

    if (converter != null) {
        ImageWritable writable = new ImageWritable(converter.convert(orgImage));
        // TODO determine if need to normalize y before transform - opencv docs rec but currently doing after
        writable = yuvTransform.transform(writable); // Converts to chrome color to help emphasize image objects
        writable = histEqualization.transform(writable); // Normalizes values to further clarify object of interest
        resImage = converter.convert(writable.getFrame());
    }

    return resImage;
}
 
Example #2
Source File: VideoPlayer.java    From Java-Machine-Learning-for-Computer-Vision with MIT License 6 votes vote down vote up
private void runVideoMainThread(String videoFileName, OpenCVFrameConverter.ToMat toMat) throws FrameGrabber.Exception {
    FFmpegFrameGrabber grabber = initFrameGrabber(videoFileName);
    while (!stop) {
        Frame frame = grabber.grab();
        if (frame == null) {
            log.info("Stopping");
            stop();
            break;
        }
        if (frame.image == null) {
            continue;
        }
        yolo.push(frame);
        opencv_core.Mat mat = toMat.convert(frame);
        yolo.drawBoundingBoxesRectangles(frame, mat);
        imshow(windowName, mat);
        char key = (char) waitKey(20);
        // Exit this loop on escape:
        if (key == 27) {
            stop();
            break;
        }
    }
}
 
Example #3
Source File: HaarFaceDetector.java    From javacv-cnn-example with MIT License 6 votes vote down vote up
public HaarFaceDetector() {
    iplImageConverter = new OpenCVFrameConverter.ToIplImage();
    toMatConverter = new OpenCVFrameConverter.ToMat();

    try {
        File haarCascade = new File(this.getClass().getResource("/detection/haarcascade_frontalface_alt.xml").toURI());
        logger.debug("Using Haar Cascade file located at : {}", haarCascade.getAbsolutePath());
        //haarClassifierCascade = new CvHaarClassifierCascade(cvload(haarCascade.getAbsolutePath()));
        faceCascade = new CascadeClassifier(haarCascade.getCanonicalPath());

    } catch (Exception e) {
        logger.error("Error when trying to get the haar cascade", e);
        throw new IllegalStateException("Error when trying to get the haar cascade", e);
    }
    storage = CvMemStorage.create();
}
 
Example #4
Source File: CifarLoader.java    From deeplearning4j with Apache License 2.0 6 votes vote down vote up
/**
 * Preprocess and store cifar based on successful Torch approach by Sergey Zagoruyko
 * Reference: <a href="https://github.com/szagoruyko/cifar.torch">https://github.com/szagoruyko/cifar.torch</a>
 */
public Mat convertCifar(Mat orgImage) {
    numExamples++;
    Mat resImage = new Mat();
    OpenCVFrameConverter.ToMat converter = new OpenCVFrameConverter.ToMat();
    //        ImageTransform yuvTransform = new ColorConversionTransform(new Random(seed), COLOR_BGR2Luv);
    //        ImageTransform histEqualization = new EqualizeHistTransform(new Random(seed), COLOR_BGR2Luv);
    ImageTransform yuvTransform = new ColorConversionTransform(new Random(seed), COLOR_BGR2YCrCb);
    ImageTransform histEqualization = new EqualizeHistTransform(new Random(seed), COLOR_BGR2YCrCb);

    if (converter != null) {
        ImageWritable writable = new ImageWritable(converter.convert(orgImage));
        // TODO determine if need to normalize y before transform - opencv docs rec but currently doing after
        writable = yuvTransform.transform(writable); // Converts to chrome color to help emphasize image objects
        writable = histEqualization.transform(writable); // Normalizes values to further clarify object of interest
        resImage = converter.convert(writable.getFrame());
    }

    return resImage;
}
 
Example #5
Source File: ImageConversionUtils.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
public static BufferedImage makeRandomBufferedImage(int height, int width, int channels) {
    Mat img = makeRandomImage(height, width, channels);

    OpenCVFrameConverter.ToMat c = new OpenCVFrameConverter.ToMat();
    Java2DFrameConverter c2 = new Java2DFrameConverter();

    return c2.convert(c.convert(img));
}
 
Example #6
Source File: ConverterUtil.java    From marvinproject with GNU Lesser General Public License v3.0 5 votes vote down vote up
public static IplImage bufferedToIplImage(BufferedImage bufImage) {

		ToIplImage iplConverter = new OpenCVFrameConverter.ToIplImage();
		Java2DFrameConverter java2dConverter = new Java2DFrameConverter();
		IplImage iplImage = iplConverter.convert(java2dConverter
				.convert(bufImage));
		return iplImage;
	}
 
Example #7
Source File: TestNativeImageLoader.java    From DataVec with Apache License 2.0 5 votes vote down vote up
BufferedImage makeRandomBufferedImage(int height, int width, int channels) {
    Mat img = makeRandomImage(height, width, channels);

    OpenCVFrameConverter.ToMat c = new OpenCVFrameConverter.ToMat();
    Java2DFrameConverter c2 = new Java2DFrameConverter();

    return c2.convert(c.convert(img));
}
 
Example #8
Source File: VirtualBall.java    From procamtracker with GNU General Public License v2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
        CanvasFrame frame = new CanvasFrame("Virtual Ball Test");
        OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage();
        IplImage image = IplImage.create(640, 960, IPL_DEPTH_8U, 3);
        cvSetZero(image);
        double[] roiPts = { 0,0, 640,0, 640,480, 0,400 };
        cvFillConvexPoly(image, new CvPoint(4).put((byte)16, roiPts), roiPts.length/2, CvScalar.WHITE, CV_AA, 16);
        VirtualBall virtualBall = new VirtualBall(new Settings(roiPts));

        for (int i = 0; i < 1000; i++) {
            Thread.sleep(100);
            cvSetZero(image);
            if (i == 50) {
                roiPts[5] -= 100;
            }
            if (i > 100 && i < 1200) {
                roiPts[3] += 1;
                roiPts[5] += 1;
            }
//if (i > 103) {
//    System.out.println(i);
//}
            cvFillConvexPoly(image, new CvPoint(4).put((byte)16, roiPts), roiPts.length/2, CvScalar.WHITE, CV_AA, 16);
            virtualBall.draw(image, roiPts);
            frame.showImage(converter.convert(image));
        }
    }
 
Example #9
Source File: Chronometer.java    From procamtracker with GNU General Public License v2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    CanvasFrame frame = new CanvasFrame("Chronometer Test");
    OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage();
    IplImage image = IplImage.create(640, 480, IPL_DEPTH_8U, 3);
    cvSetZero(image);
    Chronometer chronometer = new Chronometer(new Rectangle(100, 100, 100, 100), image);

    for (int i = 0; i < 1000; i++) {
        Thread.sleep(100);
        cvSetZero(image);
        chronometer.draw(image);
        frame.showImage(converter.convert(image));
    }
}
 
Example #10
Source File: ResizeImageTransform.java    From DataVec with Apache License 2.0 5 votes vote down vote up
/**
 * Returns new ResizeImageTransform object
 *
 * @param random Random
 * @param newWidth new Width for the outcome images
 * @param newHeight new Height for outcome images
 */
public ResizeImageTransform(Random random, int newWidth, int newHeight) {
    super(random);

    this.newWidth = newWidth;
    this.newHeight = newHeight;
    this.converter = new OpenCVFrameConverter.ToMat();
}
 
Example #11
Source File: RandomCropTransform.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
public RandomCropTransform(Random random, long seed, int height, int width) {
    super(random);
    this.outputHeight = height;
    this.outputWidth = width;
    this.rng = Nd4j.getRandom();
    this.rng.setSeed(seed);
    this.converter = new OpenCVFrameConverter.ToMat();
}
 
Example #12
Source File: WarpImageTransform.java    From DataVec with Apache License 2.0 5 votes vote down vote up
/**
 * Constructs an instance of the ImageTransform.
 *
 * @param random object to use (or null for deterministic)
 * @param dx1    maximum warping in x for the top-left corner (pixels)
 * @param dy1    maximum warping in y for the top-left corner (pixels)
 * @param dx2    maximum warping in x for the top-right corner (pixels)
 * @param dy2    maximum warping in y for the top-right corner (pixels)
 * @param dx3    maximum warping in x for the bottom-right corner (pixels)
 * @param dy3    maximum warping in y for the bottom-right corner (pixels)
 * @param dx4    maximum warping in x for the bottom-left corner (pixels)
 * @param dy4    maximum warping in y for the bottom-left corner (pixels)
 */
public WarpImageTransform(Random random, float dx1, float dy1, float dx2, float dy2, float dx3, float dy3,
                float dx4, float dy4) {
    super(random);
    deltas = new float[8];
    deltas[0] = dx1;
    deltas[1] = dy1;
    deltas[2] = dx2;
    deltas[3] = dy2;
    deltas[4] = dx3;
    deltas[5] = dy3;
    deltas[6] = dx4;
    deltas[7] = dy4;
    this.converter = new OpenCVFrameConverter.ToMat();
}
 
Example #13
Source File: LargestBlobCropTransform.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 *
 * @param random        Object to use (or null for deterministic)
 * @param mode          Contour retrieval mode
 * @param method        Contour approximation method
 * @param blurWidth     Width of blurring kernel size
 * @param blurHeight    Height of blurring kernel size
 * @param lowerThresh   Lower threshold for either Canny or Threshold
 * @param upperThresh   Upper threshold for either Canny or Threshold
 * @param isCanny       Whether the edge detector is Canny or Threshold
 */
public LargestBlobCropTransform(Random random, int mode, int method, int blurWidth, int blurHeight, int lowerThresh,
                int upperThresh, boolean isCanny) {
    super(random);
    this.rng = Nd4j.getRandom();
    this.mode = mode;
    this.method = method;
    this.blurWidth = blurWidth;
    this.blurHeight = blurHeight;
    this.lowerThresh = lowerThresh;
    this.upperThresh = upperThresh;
    this.isCanny = isCanny;
    this.converter = new OpenCVFrameConverter.ToMat();
}
 
Example #14
Source File: WarpImageTransform.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * Constructs an instance of the ImageTransform.
 *
 * @param random object to use (or null for deterministic)
 * @param dx1    maximum warping in x for the top-left corner (pixels)
 * @param dy1    maximum warping in y for the top-left corner (pixels)
 * @param dx2    maximum warping in x for the top-right corner (pixels)
 * @param dy2    maximum warping in y for the top-right corner (pixels)
 * @param dx3    maximum warping in x for the bottom-right corner (pixels)
 * @param dy3    maximum warping in y for the bottom-right corner (pixels)
 * @param dx4    maximum warping in x for the bottom-left corner (pixels)
 * @param dy4    maximum warping in y for the bottom-left corner (pixels)
 */
public WarpImageTransform(Random random, float dx1, float dy1, float dx2, float dy2, float dx3, float dy3,
                float dx4, float dy4) {
    super(random);
    deltas = new float[8];
    deltas[0] = dx1;
    deltas[1] = dy1;
    deltas[2] = dx2;
    deltas[3] = dy2;
    deltas[4] = dx3;
    deltas[5] = dy3;
    deltas[6] = dx4;
    deltas[7] = dy4;
    this.converter = new OpenCVFrameConverter.ToMat();
}
 
Example #15
Source File: LargestBlobCropTransform.java    From DataVec with Apache License 2.0 5 votes vote down vote up
/**
 *
 * @param random        Object to use (or null for deterministic)
 * @param mode          Contour retrieval mode
 * @param method        Contour approximation method
 * @param blurWidth     Width of blurring kernel size
 * @param blurHeight    Height of blurring kernel size
 * @param lowerThresh   Lower threshold for either Canny or Threshold
 * @param upperThresh   Upper threshold for either Canny or Threshold
 * @param isCanny       Whether the edge detector is Canny or Threshold
 */
public LargestBlobCropTransform(Random random, int mode, int method, int blurWidth, int blurHeight, int lowerThresh,
                int upperThresh, boolean isCanny) {
    super(random);
    this.rng = Nd4j.getRandom();
    this.mode = mode;
    this.method = method;
    this.blurWidth = blurWidth;
    this.blurHeight = blurHeight;
    this.lowerThresh = lowerThresh;
    this.upperThresh = upperThresh;
    this.isCanny = isCanny;
    this.converter = new OpenCVFrameConverter.ToMat();
}
 
Example #16
Source File: ResizeImageTransform.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
/**
 * Returns new ResizeImageTransform object
 *
 * @param random Random
 * @param newWidth new Width for the outcome images
 * @param newHeight new Height for outcome images
 */
public ResizeImageTransform(Random random, int newWidth, int newHeight) {
    super(random);

    this.newWidth = newWidth;
    this.newHeight = newHeight;
    this.converter = new OpenCVFrameConverter.ToMat();
}
 
Example #17
Source File: RandomCropTransform.java    From DataVec with Apache License 2.0 5 votes vote down vote up
public RandomCropTransform(Random random, long seed, int height, int width) {
    super(random);
    this.outputHeight = height;
    this.outputWidth = width;
    this.rng = Nd4j.getRandom();
    this.rng.setSeed(seed);
    this.converter = new OpenCVFrameConverter.ToMat();
}
 
Example #18
Source File: ColoredObjectTracker.java    From ExoVisix with MIT License 5 votes vote down vote up
public IplImage Equalize(BufferedImage bufferedimg) {
	Java2DFrameConverter converter1 = new Java2DFrameConverter();
	OpenCVFrameConverter.ToIplImage converter2 = new OpenCVFrameConverter.ToIplImage();
	IplImage iploriginal = converter2.convert(converter1.convert(bufferedimg));
	IplImage srcimg = IplImage.create(iploriginal.width(), iploriginal.height(), IPL_DEPTH_8U, 1);
	IplImage destimg = IplImage.create(iploriginal.width(), iploriginal.height(), IPL_DEPTH_8U, 1);
	cvCvtColor(iploriginal, srcimg, CV_BGR2GRAY);
	cvEqualizeHist(srcimg, destimg);
	return destimg;
}
 
Example #19
Source File: ColoredObjectTracker.java    From ExoVisix with MIT License 5 votes vote down vote up
public void run() {
	try {
		grabber = FrameGrabber.createDefault(CAMERA_NUM);
		converter = new OpenCVFrameConverter.ToIplImage();
		grabber.start();

		int posX = 0;
		int posY = 0;
		while (true) {
			img = converter.convert(grabber.grab());
			if (img != null) {
				// show image on window
				cvFlip(img, img, 1);// l-r = 90_degrees_steps_anti_clockwise
				canvas.showImage(converter.convert(img));
				IplImage detectThrs = getThresholdImage(img);

				CvMoments moments = new CvMoments();
				cvMoments(detectThrs, moments, 1);

				double mom10 = cvGetSpatialMoment(moments, 1, 0);
				double mom01 = cvGetSpatialMoment(moments, 0, 1);
				double area = cvGetCentralMoment(moments, 0, 0);
				posX = (int) (mom10 / area);
				posY = (int) (mom01 / area);
				// only if its a valid position
				if (posX > 0 && posY > 0) {
					paint(img, posX, posY);
				}
			}
			// Thread.sleep(INTERVAL);
		}
	} catch (Exception e) {
	}
}
 
Example #20
Source File: TestNativeImageLoader.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
BufferedImage makeRandomBufferedImage(int height, int width, int channels) {
    Mat img = makeRandomImage(height, width, channels);

    OpenCVFrameConverter.ToMat c = new OpenCVFrameConverter.ToMat();
    Java2DFrameConverter c2 = new Java2DFrameConverter();

    return c2.convert(c.convert(img));
}
 
Example #21
Source File: VideoPlayer.java    From Java-Machine-Learning-for-Computer-Vision with MIT License 5 votes vote down vote up
private void runVideoMainThread(Yolo yolo, String windowName,
                                String videoFileName,
                                OpenCVFrameConverter.ToMat toMat) throws Exception {
    FFmpegFrameGrabber grabber = initFrameGrabber(videoFileName);
    while (!stop) {
        Frame frame = grabber.grab();

        if (frame == null) {
            log.info("Stopping");
            stop();
            break;
        }
        if (frame.image == null) {
            continue;
        }

        Thread.sleep(60);
        opencv_core.Mat mat = toMat.convert(frame);
        opencv_core.Mat resizeMat = new opencv_core.Mat(yolo.getSelectedSpeed().height,
                yolo.getSelectedSpeed().width, mat.type());
        yolo.push(resizeMat, windowName);
        org.bytedeco.javacpp.opencv_imgproc.resize(mat, resizeMat, resizeMat.size());
        yolo.drawBoundingBoxesRectangles(frame, resizeMat, windowName);
        char key = (char) waitKey(20);
        // Exit this loop on escape:
        if (key == 27) {
            stop();
            break;
        }
    }
}
 
Example #22
Source File: TestNativeImageLoader.java    From deeplearning4j with Apache License 2.0 5 votes vote down vote up
org.opencv.core.Mat makeRandomOrgOpenCvCoreMatImage(int height, int width, int channels) {
    Mat img = makeRandomImage(height, width, channels);

    Loader.load(org.bytedeco.opencv.opencv_java.class);
    OpenCVFrameConverter.ToOrgOpenCvCoreMat c = new OpenCVFrameConverter.ToOrgOpenCvCoreMat();

    return c.convert(c.convert(img));
}
 
Example #23
Source File: AndroidNativeImageLoader.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
public INDArray asMatrix(Bitmap image) throws IOException {
    if (converter == null) {
        converter = new OpenCVFrameConverter.ToMat();
    }
    return asMatrix(converter.convert(converter2.convert(image)));
}
 
Example #24
Source File: TrackingWorker.java    From procamtracker with GNU General Public License v2.0 4 votes vote down vote up
public void run() {
        final int maxLevel = alignerSettings.getPyramidLevelMax();

        try {
            RealityAugmentor.VirtualSettings virtualSettings = realityAugmentor.getVirtualSettings();
            if (aligner == null || (virtualSettings != null && virtualSettings.projectionType !=
                    RealityAugmentor.ProjectionType.FIXED)) {
                ProjectorBuffer pb = projectorBufferRing.get(1);
                if (trackingSettings.useOpenCL) {
                    ((ProCamTransformerCL)transformer).setProjectorImageCL(pb.imageCL, 0, maxLevel);
                }
                if (aligner == null || !trackingSettings.useOpenCL) {
                    // used during initialization, even for OpenCL
                    transformer.setProjectorImage(pb.image, 0, maxLevel);
                }
            }

            grabbedImage = grabberConverter.convert(frameGrabber.getDelayedFrame());
            if (grabbedImage == null) {
                grabbedImage = grabberConverter.convert(frameGrabber.grab());
            }
            if (grabbedImage != null) {
                // gamma "uncorrection", linearization
                double gamma = frameGrabber.getGamma();
                if (gamma != 1.0) {
                    Buffer buffer = grabbedImage.createBuffer();
                    int depth = OpenCVFrameConverter.getFrameDepth(grabbedImage.depth());
                    int stride = grabbedImage.widthStep() * 8 / Math.abs(depth);
                    Java2DFrameConverter.applyGamma(buffer, depth, stride, gamma);
                }
                if (trackingSettings.useOpenCL) {
                    if (aligner != null && alignerSettings.getDisplacementMax() > 0) {
                        double[] pts = aligner.getTransformedRoiPts();
                        int width  = grabbedImageCL.width;
                        int height = grabbedImageCL.height;
                        roi.x(0).y(0).width(width).height(height);
                        int padX = (int)Math.round(alignerSettings.getDisplacementMax()*width);
                        int padY = (int)Math.round(alignerSettings.getDisplacementMax()*height);
                        int align = 1<<(maxLevel+1);
                        // add +3 all around because pyrDown() needs it for smoothing
                        JavaCV.boundingRect(pts, roi, padX+3, padY+3, align, align);
                        cvSetImageROI(grabbedImage, roi);
                    } else {
                        cvResetImageROI(grabbedImage);
                    }
                    contextCL.writeImage(grabbedImageCL, grabbedImage, false);
                    cvResetImageROI(grabbedImage);
                    contextCL.remap(grabbedImageCL, undistortedCameraImageCL,
                            cameraMapxCL, cameraMapyCL, frameGrabber.getSensorPattern());
//contextCL.readImage(undistortedCameraImageCL, cameraInitFloatImages[0], true);
//monitorWindows[1].showImage(cameraInitFloatImages[0], true);
                    if (aligner != null) {
                        ((GNImageAlignerCL)aligner).setTargetImageCL(undistortedCameraImageCL);
                    }
                } else {
                    cameraDevice.undistort(grabbedImage, undistortedCameraImage);
                    if (aligner != null) {
                        aligner.setTargetImage(undistortedCameraImage);
                    }
                }
            }
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
 
Example #25
Source File: CalibrationWorker.java    From procamcalib with GNU General Public License v2.0 4 votes vote down vote up
public void init() throws Exception {
    // create arrays and canvas frames on the Event Dispatcher Thread...
    CameraDevice.Settings[] cs = cameraSettings.toArray();
    if (cameraDevices == null) {
        cameraDevices = new CameraDevice[cs.length];
    } else {
        cameraDevices = Arrays.copyOf(cameraDevices, cs.length);
    }
    cameraCanvasFrames = new CanvasFrame[cs.length];
    frameGrabbers = new FrameGrabber[cs.length];
    cameraFrameConverters = new OpenCVFrameConverter.ToIplImage[cs.length];
    for (int i = 0; i < cs.length; i++) {
        if (cameraDevices[i] == null) {
            cameraDevices[i] = new CameraDevice(cs[i]);
        } else {
            cameraDevices[i].setSettings(cs[i]);
        }
        if (cameraSettings.getMonitorWindowsScale() > 0) {
            cameraCanvasFrames[i] = new CanvasFrame(cs[i].getName());
            cameraCanvasFrames[i].setCanvasScale(cameraSettings.getMonitorWindowsScale());
        }
    }

    ProjectorDevice.Settings[] ps = projectorSettings.toArray();
    if (projectorDevices == null) {
        projectorDevices = new ProjectorDevice[ps.length];
    } else {
        projectorDevices = Arrays.copyOf(projectorDevices, ps.length);
    }
    projectorCanvasFrames = new CanvasFrame[ps.length];
    projectorPlanes = new MarkedPlane[ps.length];
    projectorFrameConverters = new OpenCVFrameConverter.ToIplImage[ps.length];
    for (int i = 0; i < ps.length; i++) {
        if (projectorDevices[i] == null) {
            projectorDevices[i] = new ProjectorDevice(ps[i]);
        } else {
            projectorDevices[i].setSettings(ps[i]);
        }
        projectorCanvasFrames[i] = projectorDevices[i].createCanvasFrame();
        projectorCanvasFrames[i].showColor(Color.BLACK);
        projectorFrameConverters[i] = new OpenCVFrameConverter.ToIplImage();
        Dimension dim = projectorCanvasFrames[i].getSize();
        projectorPlanes[i] = new MarkedPlane(dim.width, dim.height, markers[1], true,
                cvScalarAll(((ProjectorDevice.CalibrationSettings)ps[0]).getBrightnessForeground()*255),
                cvScalarAll(((ProjectorDevice.CalibrationSettings)ps[0]).getBrightnessBackground()*255), 4);
    }
}
 
Example #26
Source File: ConverterUtil.java    From marvinproject with GNU Lesser General Public License v3.0 4 votes vote down vote up
public static IplImage frametoIplImage(Frame frame) {
	OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage();
	return converter.convert(frame);
}
 
Example #27
Source File: NativeCodecRecordReader.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public void setConf(Configuration conf) {
    super.setConf(conf);
    converter = new OpenCVFrameConverter.ToMat();
    imageLoader = new NativeImageLoader(rows, cols);
}
 
Example #28
Source File: CameraOpenCV.java    From PapARt with GNU Lesser General Public License v3.0 4 votes vote down vote up
protected CameraOpenCV(int cameraNo) {
    this.systemNumber = cameraNo;
    this.setPixelFormat(PixelFormat.BGR);
    converter = new OpenCVFrameConverter.ToIplImage();
}
 
Example #29
Source File: ConverterUtil.java    From marvinproject with GNU Lesser General Public License v3.0 4 votes vote down vote up
public static BufferedImage IplImageToBufferedImage(IplImage src) {
	OpenCVFrameConverter.ToIplImage grabberConverter = new OpenCVFrameConverter.ToIplImage();
	Java2DFrameConverter paintConverter = new Java2DFrameConverter();
	Frame frame = grabberConverter.convert(src);
	return paintConverter.getBufferedImage(frame, 1);
}
 
Example #30
Source File: ObjectFinder.java    From PapARt with GNU Lesser General Public License v3.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
//        Logger.getLogger("org.bytedeco.javacv").setLevel(Level.OFF);

//        String objectFilename = args.length == 2 ? args[0] : "/home/jiii/sketchbook/libraries/PapARt/data/markers/dlink.png";
        String objectFilename = args.length == 2 ? args[0] : "/home/jiii/repos/Papart-github/papart-examples/Camera/ExtractPlanarObjectForTracking/ExtractedView.bmp";
        String sceneFilename = args.length == 2 ? args[1] : "/home/jiii/my_photo-7.jpg";

        IplImage object = cvLoadImage(objectFilename, CV_LOAD_IMAGE_GRAYSCALE);
        IplImage image = cvLoadImage(sceneFilename, CV_LOAD_IMAGE_GRAYSCALE);
        if (object == null || image == null) {
            System.err.println("Can not load " + objectFilename + " and/or " + sceneFilename);
            System.exit(-1);
        }

        IplImage objectColor = IplImage.create(object.width(), object.height(), 8, 3);
        cvCvtColor(object, objectColor, CV_GRAY2BGR);

        IplImage correspond = IplImage.create(image.width(), object.height() + image.height(), 8, 1);
        cvSetImageROI(correspond, cvRect(0, 0, object.width(), object.height()));
        cvCopy(object, correspond);
        cvSetImageROI(correspond, cvRect(0, object.height(), correspond.width(), correspond.height()));
        cvCopy(image, correspond);
        cvResetImageROI(correspond);

        ObjectFinder.Settings settings = new ObjectFinder.Settings();
        settings.objectImage = object;
        settings.useFLANN = true;
        settings.ransacReprojThreshold = 5;
        ObjectFinder finder = new ObjectFinder(settings);

        long start = System.currentTimeMillis();
        double[] dst_corners = finder.find(image);
//        System.out.println("Finding time = " + (System.currentTimeMillis() - start) + " ms");

        if (dst_corners != null) {
            for (int i = 0; i < 4; i++) {
                int j = (i + 1) % 4;
                int x1 = (int) Math.round(dst_corners[2 * i]);
                int y1 = (int) Math.round(dst_corners[2 * i + 1]);
                int x2 = (int) Math.round(dst_corners[2 * j]);
                int y2 = (int) Math.round(dst_corners[2 * j + 1]);
                line(cvarrToMat(correspond), new Point(x1, y1 + object.height()),
                        new Point(x2, y2 + object.height()),
                        Scalar.WHITE, 1, 8, 0);
            }
        }

        for (int i = 0; i < finder.ptpairs.size(); i += 2) {
            Point2f pt1 = finder.objectKeypoints.get(finder.ptpairs.get(i)).pt();
            Point2f pt2 = finder.imageKeypoints.get(finder.ptpairs.get(i + 1)).pt();
            line(cvarrToMat(correspond), new Point(Math.round(pt1.x()), Math.round(pt1.y())),
                    new Point(Math.round(pt2.x()), Math.round(pt2.y() + object.height())),
                    Scalar.WHITE, 1, 8, 0);
        }

        CanvasFrame objectFrame = new CanvasFrame("Object");
        CanvasFrame correspondFrame = new CanvasFrame("Object Correspond");
        OpenCVFrameConverter converter = new OpenCVFrameConverter.ToIplImage();

        correspondFrame.showImage(converter.convert(correspond));
        for (int i = 0; i < finder.objectKeypoints.size(); i++) {
            KeyPoint r = finder.objectKeypoints.get(i);
            Point center = new Point(Math.round(r.pt().x()), Math.round(r.pt().y()));
            int radius = Math.round(r.size() / 2);
            circle(cvarrToMat(objectColor), center, radius, Scalar.RED, 1, 8, 0);
        }
        objectFrame.showImage(converter.convert(objectColor));

        objectFrame.waitKey();

        objectFrame.dispose();
        correspondFrame.dispose();
    }