com.google.android.gms.vision.Frame Java Examples

The following examples show how to use com.google.android.gms.vision.Frame. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ImageData.java    From PrivacyStreams with Apache License 2.0 7 votes vote down vote up
List<TextBlock> detectTextBlocks(UQI uqi) {
    List<TextBlock> result = new ArrayList<>();
    Bitmap bitmap = this.getBitmap(uqi);
    if (bitmap == null) return result;
    TextRecognizer textRecognizer = new TextRecognizer.Builder(uqi.getContext()).build();
    if (!textRecognizer.isOperational()) {
        Logging.warn("TextRecognizer is not operational");
        textRecognizer.release();
        return result;
    }
    Frame imageFrame = new Frame.Builder().setBitmap(bitmap).build();
    SparseArray<TextBlock> textBlocks = textRecognizer.detect(imageFrame);
    for (int i = 0; i < textBlocks.size(); i++) {
        TextBlock textBlock = textBlocks.get(textBlocks.keyAt(i));
        result.add(textBlock);
    }
    textRecognizer.release();
    return result;
}
 
Example #2
Source File: PhotoPaintView.java    From TelePlus-Android with GNU General Public License v2.0 6 votes vote down vote up
private int getFrameRotation() {
    switch (orientation) {
        case 90: {
            return Frame.ROTATION_90;
        }

        case 180: {
            return Frame.ROTATION_180;
        }

        case 270: {
            return Frame.ROTATION_270;
        }

        default: {
            return Frame.ROTATION_0;
        }
    }
}
 
Example #3
Source File: PhotoPaintView.java    From Telegram with GNU General Public License v2.0 6 votes vote down vote up
private int getFrameRotation() {
    switch (originalBitmapRotation) {
        case 90: {
            return Frame.ROTATION_90;
        }

        case 180: {
            return Frame.ROTATION_180;
        }

        case 270: {
            return Frame.ROTATION_270;
        }

        default: {
            return Frame.ROTATION_0;
        }
    }
}
 
Example #4
Source File: FaceOverlayView.java    From AndroidDemoProjects with Apache License 2.0 6 votes vote down vote up
public void setBitmap( Bitmap bitmap ) {
    mBitmap = bitmap;
    FaceDetector detector = new FaceDetector.Builder( getContext() )
            .setTrackingEnabled(true)
            .setLandmarkType(FaceDetector.ALL_LANDMARKS)
            .setMode(FaceDetector.ACCURATE_MODE)
            .build();

    if (!detector.isOperational()) {
        //Handle contingency
    } else {
        Frame frame = new Frame.Builder().setBitmap(bitmap).build();
        mFaces = detector.detect(frame);
        detector.release();
    }
    logFaceData();
    invalidate();
}
 
Example #5
Source File: DocumentDetector.java    From CVScanner with GNU General Public License v3.0 6 votes vote down vote up
Document detectDocument(Frame frame){
    Size imageSize = new Size(frame.getMetadata().getWidth(), frame.getMetadata().getHeight());
    Mat src = new Mat();
    Utils.bitmapToMat(frame.getBitmap(), src);
    List<MatOfPoint> contours = CVProcessor.findContours(src);
    src.release();

    if(!contours.isEmpty()){
        CVProcessor.Quadrilateral quad = CVProcessor.getQuadrilateral(contours, imageSize);

        if(quad != null){
            quad.points = CVProcessor.getUpscaledPoints(quad.points, CVProcessor.getScaleRatio(imageSize));
            return new Document(frame, quad);
        }
    }

    return null;
}
 
Example #6
Source File: Camera2Source.java    From Camera2Vision with Apache License 2.0 6 votes vote down vote up
private int getDetectorOrientation(int sensorOrientation) {
    switch (sensorOrientation) {
        case 0:
            return Frame.ROTATION_0;
        case 90:
            return Frame.ROTATION_90;
        case 180:
            return Frame.ROTATION_180;
        case 270:
            return Frame.ROTATION_270;
        case 360:
            return Frame.ROTATION_0;
        default:
            return Frame.ROTATION_90;
    }
}
 
Example #7
Source File: FaceOverlayView.java    From Eye-blink-detector with MIT License 6 votes vote down vote up
public void setBitmap( Bitmap bitmap ) {
    mBitmap = bitmap;

    if (!detector.isOperational()) {
        //Handle contingency
    } else {
        //Log.d("time1", SystemClock.currentThreadTimeMillis()+"");
        Frame frame = new Frame.Builder().setBitmap(bitmap).build();
        mFaces = detector.detect(frame);
    }

    if(isEyeBlinked()){
        Log.d("isEyeBlinked","eye blink is observed");
        blinkCount++;
        CameraActivity.showScore(blinkCount);
    }

    invalidate();
}
 
Example #8
Source File: PhotoPaintView.java    From TelePlus-Android with GNU General Public License v2.0 6 votes vote down vote up
private int getFrameRotation() {
    switch (orientation) {
        case 90: {
            return Frame.ROTATION_90;
        }

        case 180: {
            return Frame.ROTATION_180;
        }

        case 270: {
            return Frame.ROTATION_270;
        }

        default: {
            return Frame.ROTATION_0;
        }
    }
}
 
Example #9
Source File: Camera2Source.java    From Machine-Learning-Projects-for-Mobile-Applications with MIT License 6 votes vote down vote up
private int getDetectorOrientation(int sensorOrientation) {
    switch (sensorOrientation) {
        case 0:
            return Frame.ROTATION_0;
        case 90:
            return Frame.ROTATION_90;
        case 180:
            return Frame.ROTATION_180;
        case 270:
            return Frame.ROTATION_270;
        case 360:
            return Frame.ROTATION_0;
        default:
            return Frame.ROTATION_90;
    }
}
 
Example #10
Source File: CameraSource.java    From flutter_barcode_scanner with MIT License 5 votes vote down vote up
@Override
public void run() {
    Frame outputFrame;
    ByteBuffer data;

    while (true) {
        synchronized (mLock) {
            while (mActive && (mPendingFrameData == null)) {
                try {
                    mLock.wait();
                } catch (InterruptedException e) {
                    return;
                }
            }

            if (!mActive) {
                return;
            }

            outputFrame = new Frame.Builder()
                    .setImageData(mPendingFrameData, mPreviewSize.getWidth(),
                            mPreviewSize.getHeight(), ImageFormat.NV21)
                    .setId(mPendingFrameId)
                    .setTimestampMillis(mPendingTimeMillis)
                    .setRotation(mRotation)
                    .build();

            data = mPendingFrameData;
            mPendingFrameData = null;
        }


        try {
            mDetector.receiveFrame(outputFrame);
        } catch (Throwable t) {
        } finally {
            mCamera.addCallbackBuffer(data.array());
        }
    }
}
 
Example #11
Source File: MainActivity.java    From text-detector with MIT License 5 votes vote down vote up
public void detectText(View view) {
    Bitmap textBitmap = BitmapFactory.decodeResource(getResources(), R.drawable.cat);

    TextRecognizer textRecognizer = new TextRecognizer.Builder(this).build();

    if (!textRecognizer.isOperational()) {
        new AlertDialog.Builder(this)
                .setMessage("Text recognizer could not be set up on your device :(")
                .show();
        return;
    }

    Frame frame = new Frame.Builder().setBitmap(textBitmap).build();
    SparseArray<TextBlock> text = textRecognizer.detect(frame);

    for (int i = 0; i < text.size(); ++i) {
        TextBlock item = text.valueAt(i);
        if (item != null && item.getValue() != null) {
            detectedTextView.setText(item.getValue());
        }
    }
}
 
Example #12
Source File: DocumentDetector.java    From CVScanner with GNU General Public License v3.0 5 votes vote down vote up
@Override
public SparseArray<Document> detect(Frame frame) {
    SparseArray<Document> detections = new SparseArray<>();
    if(frame.getBitmap() != null) {
        Document doc = detectDocument(frame);

        if (doc != null) detections.append(frame.getMetadata().getId(), doc);
    }

    return detections;
}
 
Example #13
Source File: SafeFaceDetector.java    From face-detection-ane with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new frame based on the original frame, with additional height on the bottom to
 * increase the size to avoid the bug in the underlying face detector.
 */
private Frame padFrameBottom( Frame originalFrame, int newHeight ) {
	Frame.Metadata metadata = originalFrame.getMetadata();
	int width = metadata.getWidth();
	int height = metadata.getHeight();

	Log.i( TAG, "Padded image from: " + width + "x" + height + " to " + width + "x" + newHeight );

	ByteBuffer origBuffer = originalFrame.getGrayscaleImageData();
	int origOffset = origBuffer.arrayOffset();
	byte[] origBytes = origBuffer.array();

	// This can be changed to just .allocate in the future, when Frame supports non-direct
	// byte buffers.
	ByteBuffer paddedBuffer = ByteBuffer.allocateDirect( width * newHeight );
	int paddedOffset = paddedBuffer.arrayOffset();
	byte[] paddedBytes = paddedBuffer.array();
	Arrays.fill( paddedBytes, (byte) 0 );

	// Copy the image content from the original, without bothering to fill in the padded bottom
	// part.
	for( int y = 0; y < height; ++y ) {
		int origStride = origOffset + y * width;
		int paddedStride = paddedOffset + y * width;
		System.arraycopy( origBytes, origStride, paddedBytes, paddedStride, width );
	}

	return new Frame.Builder()
			.setImageData( paddedBuffer, width, newHeight, ImageFormat.NV21 )
			.setId( metadata.getId() )
			.setRotation( metadata.getRotation() )
			.setTimestampMillis( metadata.getTimestampMillis() )
			.build();
}
 
Example #14
Source File: SafeFaceDetector.java    From face-detection-ane with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new frame based on the original frame, with additional width on the right to
 * increase the size to avoid the bug in the underlying face detector.
 */
private Frame padFrameRight( Frame originalFrame, int newWidth ) {
	Frame.Metadata metadata = originalFrame.getMetadata();
	int width = metadata.getWidth();
	int height = metadata.getHeight();

	Log.i( TAG, "Padded image from: " + width + "x" + height + " to " + newWidth + "x" + height );

	ByteBuffer origBuffer = originalFrame.getGrayscaleImageData();
	int origOffset = origBuffer.arrayOffset();
	byte[] origBytes = origBuffer.array();

	// This can be changed to just .allocate in the future, when Frame supports non-direct
	// byte buffers.
	ByteBuffer paddedBuffer = ByteBuffer.allocateDirect( newWidth * height );
	int paddedOffset = paddedBuffer.arrayOffset();
	byte[] paddedBytes = paddedBuffer.array();
	Arrays.fill( paddedBytes, (byte) 0 );

	for( int y = 0; y < height; ++y ) {
		int origStride = origOffset + y * width;
		int paddedStride = paddedOffset + y * newWidth;
		System.arraycopy( origBytes, origStride, paddedBytes, paddedStride, width );
	}

	return new Frame.Builder()
			.setImageData( paddedBuffer, newWidth, height, ImageFormat.NV21 )
			.setId( metadata.getId() )
			.setRotation( metadata.getRotation() )
			.setTimestampMillis( metadata.getTimestampMillis() )
			.build();
}
 
Example #15
Source File: FaceRecognition.java    From MagicalCamera with Apache License 2.0 5 votes vote down vote up
/***
 * This method realize the face detection, and this call in another methods
 * for automatice the process
 * @param stroke the bold of line to show around the face
 * @param color the color of rectangle to recognizer the face
 * @param activity the currect activity
 * @param photo your photo
 * @return
 */
private Bitmap faceDetection(int stroke, int color, Activity activity, Bitmap photo){
     this.detector = new FaceDetector.Builder(activity)
            .setMode(FaceDetector.ACCURATE_MODE)
            .setLandmarkType(FaceDetector.ALL_LANDMARKS)
            .setClassificationType(FaceDetector.ALL_CLASSIFICATIONS)
            .setTrackingEnabled(false)
            .build();
        try {
            if (false == this.detector.isOperational()) {
                return null;
            }

            //Add the image on a Frame object
            Frame frame = new Frame.Builder()
                    .setBitmap(photo)
                    .build();

            //Detect all faces from Frame object
            SparseArray<Face> faceArray = detector.detect(frame);

            //Do some drawing on faces
            Bitmap outBitmap = drawOnFace(faceArray, photo, stroke, color);

            //Releasing the detector object
            this.detector.release();
            return (outBitmap != null) ? outBitmap : photo;
        }catch(Exception ev){
            return null;
        }
}
 
Example #16
Source File: DocumentScannerFragment.java    From CVScanner with GNU General Public License v3.0 5 votes vote down vote up
void detectDocumentManually(final byte[] data){
    Log.d("Scanner", "detecting document manually");
    new Thread(new Runnable() {
        @Override
        public void run() {
            Bitmap image = BitmapFactory.decodeByteArray(data, 0, data.length);
            if(image != null){
                final SparseArray<Document> docs = IDDetector.detect(new Frame.Builder()
                        .setBitmap(image)
                        .build());

                if(docs != null && docs.size() > 0){
                    Log.d("Scanner", "detected document manually");
                    final Document doc = docs.get(0);

                    getActivity().runOnUiThread(new Runnable() {
                        @Override
                        public void run() {
                            processDocument(doc);
                        }
                    });
                }
                else{
                    getActivity().finish();
                }
            }
        }
    }).start();
}
 
Example #17
Source File: PassportDetector.java    From CVScanner with GNU General Public License v3.0 5 votes vote down vote up
@Override
public SparseArray<Document> detect(Frame frame) {
    SparseArray<Document> detections = new SparseArray<>();
    Document doc = detectDocument(frame);

    if(doc != null) detections.append(frame.getMetadata().getId(), doc);

    return detections;
}
 
Example #18
Source File: SafeFaceDetector.java    From android-vision with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new frame based on the original frame, with additional width on the right to
 * increase the size to avoid the bug in the underlying face detector.
 */
private Frame padFrameRight(Frame originalFrame, int newWidth) {
    Frame.Metadata metadata = originalFrame.getMetadata();
    int width = metadata.getWidth();
    int height = metadata.getHeight();

    Log.i(TAG, "Padded image from: " + width + "x" + height + " to " + newWidth + "x" + height);

    ByteBuffer origBuffer = originalFrame.getGrayscaleImageData();
    int origOffset = origBuffer.arrayOffset();
    byte[] origBytes = origBuffer.array();

    // This can be changed to just .allocate in the future, when Frame supports non-direct
    // byte buffers.
    ByteBuffer paddedBuffer = ByteBuffer.allocateDirect(newWidth * height);
    int paddedOffset = paddedBuffer.arrayOffset();
    byte[] paddedBytes = paddedBuffer.array();
    Arrays.fill(paddedBytes, (byte) 0);

    for (int y = 0; y < height; ++y) {
        int origStride = origOffset + y * width;
        int paddedStride = paddedOffset + y * newWidth;
        System.arraycopy(origBytes, origStride, paddedBytes, paddedStride, width);
    }

    return new Frame.Builder()
            .setImageData(paddedBuffer, newWidth, height, ImageFormat.NV21)
            .setId(metadata.getId())
            .setRotation(metadata.getRotation())
            .setTimestampMillis(metadata.getTimestampMillis())
            .build();
}
 
Example #19
Source File: SafeFaceDetector.java    From android-vision with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new frame based on the original frame, with additional height on the bottom to
 * increase the size to avoid the bug in the underlying face detector.
 */
private Frame padFrameBottom(Frame originalFrame, int newHeight) {
    Frame.Metadata metadata = originalFrame.getMetadata();
    int width = metadata.getWidth();
    int height = metadata.getHeight();

    Log.i(TAG, "Padded image from: " + width + "x" + height + " to " + width + "x" + newHeight);

    ByteBuffer origBuffer = originalFrame.getGrayscaleImageData();
    int origOffset = origBuffer.arrayOffset();
    byte[] origBytes = origBuffer.array();

    // This can be changed to just .allocate in the future, when Frame supports non-direct
    // byte buffers.
    ByteBuffer paddedBuffer = ByteBuffer.allocateDirect(width * newHeight);
    int paddedOffset = paddedBuffer.arrayOffset();
    byte[] paddedBytes = paddedBuffer.array();
    Arrays.fill(paddedBytes, (byte) 0);

    // Copy the image content from the original, without bothering to fill in the padded bottom
    // part.
    for (int y = 0; y < height; ++y) {
        int origStride = origOffset + y * width;
        int paddedStride = paddedOffset + y * width;
        System.arraycopy(origBytes, origStride, paddedBytes, paddedStride, width);
    }

    return new Frame.Builder()
            .setImageData(paddedBuffer, width, newHeight, ImageFormat.NV21)
            .setId(metadata.getId())
            .setRotation(metadata.getRotation())
            .setTimestampMillis(metadata.getTimestampMillis())
            .build();
}
 
Example #20
Source File: MainActivity.java    From Beginner-Level-Android-Studio-Apps with GNU General Public License v3.0 5 votes vote down vote up
@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_main);
    imageView = (ImageView) findViewById(R.id.imageView);
    btnScan = (Button) findViewById(R.id.btnScan);
    txtResult = (TextView) findViewById(R.id.txtResult);

    final Bitmap myBitmap = BitmapFactory.decodeResource(getApplicationContext().
            getResources(), R.drawable.qrcode);
    imageView.setImageBitmap(myBitmap);

    btnScan.setOnClickListener(new View.OnClickListener(){

        @Override
        public void onClick(View v) {
            BarcodeDetector detector = new BarcodeDetector.Builder(getApplicationContext())
                    .setBarcodeFormats(Barcode.QR_CODE)
                    .build();

            Frame frame = new Frame.Builder()
                    .setBitmap(myBitmap)
                    .build();
            SparseArray<Barcode> barsCode = detector.detect(frame);

            Barcode result = barsCode.valueAt(0);
            txtResult.setText(result.rawValue);
        }
    });
}
 
Example #21
Source File: ImageTextReader.java    From loco-answers with GNU General Public License v3.0 5 votes vote down vote up
public String[] getTextFromBitmap2(Bitmap src) {
    if (textRecognizer.isOperational() && src != null) {
        Frame frame = new Frame.Builder().setBitmap(src).build();
        SparseArray<TextBlock> textBlocks = textRecognizer.detect(frame);
        String blocks = "";
        String lines = "";
        for (int index = 0; index < textBlocks.size(); index++) {
            TextBlock tBlock = textBlocks.valueAt(index);
            blocks = blocks + tBlock.getValue() + "\n";
            for (Text line : tBlock.getComponents()) {
                lines = lines + line.getValue() + "\n";
            }
        }

        if (textBlocks.size() == 0) {
            // Log.d(TAG, "getTextFromBitmap: Scan Failed: Found nothing to scan");
            return new String[]{"Scan Failed: Found nothing to scan"};
        } else {
            String[] textOnScreen = lines.split("\n");
            int lineCount = textOnScreen.length;
            if (lineCount > 3) {
                String question = "";
                for (int i = 0; i < lineCount - 3; i++) {
                    question += textOnScreen[i];
                }
                return new String[]{question, textOnScreen[lineCount - 3], textOnScreen[lineCount - 2], textOnScreen[lineCount - 1]};

            }
            return new String[]{"Scan Failed: Could not read options"};

        }
    } else {
        Log.d(TAG, "getTextFromBitmap: Could not set up the detector!");
        return new String[]{"Scan Failed:  Could not set up the detector!"};
    }
}
 
Example #22
Source File: CameraSource.java    From AndroidApp with GNU Affero General Public License v3.0 4 votes vote down vote up
/**
 * As long as the processing thread is active, this executes detection on frames
 * continuously.  The next pending frame is either immediately available or hasn't been
 * received yet.  Once it is available, we transfer the frame info to local variables and
 * run detection on that frame.  It immediately loops back for the next frame without
 * pausing.
 * <p/>
 * If detection takes longer than the time in between new frames from the camera, this will
 * mean that this loop will run without ever waiting on a frame, avoiding any context
 * switching or frame acquisition time latency.
 * <p/>
 * If you find that this is using more CPU than you'd like, you should probably decrease the
 * FPS setting above to allow for some idle time in between frames.
 */
@Override
public void run() {
    Frame outputFrame;
    ByteBuffer data;

    while (true) {
        synchronized (mLock) {
            while (mActive && (mPendingFrameData == null)) {
                try {
                    // Wait for the next frame to be received from the camera, since we
                    // don't have it yet.
                    mLock.wait();
                } catch (InterruptedException e) {
                    Log.d(TAG, "Frame processing loop terminated.", e);
                    return;
                }
            }

            if (!mActive) {
                // Exit the loop once this camera source is stopped or released.  We check
                // this here, immediately after the wait() above, to handle the case where
                // setActive(false) had been called, triggering the termination of this
                // loop.
                return;
            }

            outputFrame = new Frame.Builder()
                    .setImageData(mPendingFrameData, mPreviewSize.getWidth(),
                            mPreviewSize.getHeight(), ImageFormat.NV21)
                    .setId(mPendingFrameId)
                    .setTimestampMillis(mPendingTimeMillis)
                    .setRotation(mRotation)
                    .build();

            // Hold onto the frame data locally, so that we can use this for detection
            // below.  We need to clear mPendingFrameData to ensure that this buffer isn't
            // recycled back to the camera before we are done using that data.
            data = mPendingFrameData;
            mPendingFrameData = null;
        }

        // The code below needs to run outside of synchronization, because this will allow
        // the camera to add pending frame(s) while we are running detection on the current
        // frame.

        try {
            mDetector.receiveFrame(outputFrame);
        } catch (Throwable t) {
            Log.e(TAG, "Exception thrown from receiver.", t);
        } finally {
            mCamera.addCallbackBuffer(data.array());
        }
    }
}
 
Example #23
Source File: Document.java    From CVScanner with GNU General Public License v3.0 4 votes vote down vote up
public Frame getImage() {
    return image;
}
 
Example #24
Source File: DetectFacesFunction.java    From face-detection-ane with Apache License 2.0 4 votes vote down vote up
@Override
public FREObject call( FREContext context, FREObject[] args ) {
	super.call( context, args );

	AIR.log( "FaceDetection::detect" );

	final int callbackId = FREObjectUtils.getInt( args[1] );
	final Bitmap image;
	try {
		image = BitmapDataUtils.getBitmap( (FREBitmapData) args[0] );
	} catch( Exception e ) {
		e.printStackTrace();
		AIR.log( "Error creating Bitmap out of FREBitmapData" );
		AIR.dispatchEvent(
				FaceDetectionEvent.FACE_DETECTION_ERROR,
				StringUtils.getEventErrorJSON( callbackId, "Error creating Bitmap out of FREBitmapData" )
		);
		return null;
	}
	/* Mode (accuracy) */
	final int accuracy = FREObjectUtils.getInt( args[2] ); // Comes in as a ready-to-use value
	boolean detectOpenEyes = FREObjectUtils.getBoolean( args[3] );
	boolean detectSmile = FREObjectUtils.getBoolean( args[4] );
	final boolean prominentFaceOnly = FREObjectUtils.getBoolean( args[5] );
	/* Classification type (detect open eyes, detect smile) */
	final int classification = (detectOpenEyes || detectSmile) ? FaceDetector.ALL_CLASSIFICATIONS : FaceDetector.NO_CLASSIFICATIONS;

	final Activity activity = AIR.getContext().getActivity();

	new Thread(
			new Runnable() {
				@Override
				public void run() {
					AIR.log( "Running FaceDetection in new thread" );
					FaceDetector.Builder fb = new FaceDetector.Builder( activity.getApplicationContext() );
					fb.setClassificationType( classification )
							.setMode( accuracy )
							.setTrackingEnabled( false )
							.setLandmarkType( FaceDetector.ALL_LANDMARKS ) // We want to know about eye/mouth positions
							.setProminentFaceOnly( prominentFaceOnly );

					/* Wrap the detector in SafeFaceDetector */
					final FaceDetector detector = fb.build();
					Detector<Face> sd = new SafeFaceDetector( detector );
					if( !sd.isOperational() ) {
						sd.release();
						AIR.log( "Error, detector is not operational." );
						AIR.dispatchEvent(
                                   FaceDetectionEvent.FACE_DETECTION_ERROR,
                                   StringUtils.getEventErrorJSON( -1, "Detector is not operational. Dependencies may have not been downloaded yet. Please, try again later." )
                           );
						return;
					}

					/* Create Frame with bitmap */
					final Frame frame = new Frame.Builder().setBitmap( image ).build();
					SparseArray<Face> faces = sd.detect( frame );

					/* Build faces JSONArray */
					JSONArray facesResult = getFacesJSONArray( faces );
					dispatchResponse( facesResult, callbackId );

					sd.release();
				}
			}
	).start();

	return null;
}
 
Example #25
Source File: Document.java    From CVScanner with GNU General Public License v3.0 4 votes vote down vote up
public void setImage(Frame image) {
    this.image = image;
}
 
Example #26
Source File: CameraSource.java    From Bluefruit_LE_Connect_Android with MIT License 4 votes vote down vote up
/**
 * As long as the processing thread is active, this executes detection on frames
 * continuously.  The next pending frame is either immediately available or hasn't been
 * received yet.  Once it is available, we transfer the frame info to local variables and
 * run detection on that frame.  It immediately loops back for the next frame without
 * pausing.
 * <p/>
 * If detection takes longer than the time in between new frames from the camera, this will
 * mean that this loop will run without ever waiting on a frame, avoiding any context
 * switching or frame acquisition time latency.
 * <p/>
 * If you find that this is using more CPU than you'd like, you should probably decrease the
 * FPS setting above to allow for some idle time in between frames.
 */
@Override
public void run() {
    Frame outputFrame;
    ByteBuffer data;

    while (true) {
        synchronized (mLock) {
            if (mActive && (mPendingFrameData == null)) {
                try {
                    // Wait for the next frame to be received from the camera, since we
                    // don't have it yet.
                    mLock.wait();
                } catch (InterruptedException e) {
                    Log.d(TAG, "Frame processing loop terminated.", e);
                    return;
                }
            }

            if (!mActive) {
                // Exit the loop once this camera source is stopped or released.  We check
                // this here, immediately after the wait() above, to handle the case where
                // setActive(false) had been called, triggering the termination of this
                // loop.
                return;
            }

            outputFrame = new Frame.Builder()
                    .setImageData(mPendingFrameData, mPreviewSize.getWidth(),
                            mPreviewSize.getHeight(), ImageFormat.NV21)
                    .setId(mPendingFrameId)
                    .setTimestampMillis(mPendingTimeMillis)
                    .setRotation(mRotation)
                    .build();

            // Hold onto the frame data locally, so that we can use this for detection
            // below.  We need to clear mPendingFrameData to ensure that this buffer isn't
            // recycled back to the camera before we are done using that data.
            data = mPendingFrameData;
            mPendingFrameData = null;
        }

        // The code below needs to run outside of synchronization, because this will allow
        // the camera to add pending frame(s) while we are running detection on the current
        // frame.

        try {
            mDetector.receiveFrame(outputFrame);
        } catch (Throwable t) {
            Log.e(TAG, "Exception thrown from receiver.", t);
        } finally {
            mCamera.addCallbackBuffer(data.array());
        }
    }
}
 
Example #27
Source File: CameraSource.java    From Bluefruit_LE_Connect_Android_V2 with MIT License 4 votes vote down vote up
/**
 * As long as the processing thread is active, this executes detection on frames
 * continuously.  The next pending frame is either immediately available or hasn't been
 * received yet.  Once it is available, we transfer the frame info to local variables and
 * run detection on that frame.  It immediately loops back for the next frame without
 * pausing.
 * <p/>
 * If detection takes longer than the time in between new frames from the camera, this will
 * mean that this loop will run without ever waiting on a frame, avoiding any context
 * switching or frame acquisition time latency.
 * <p/>
 * If you find that this is using more CPU than you'd like, you should probably decrease the
 * FPS setting above to allow for some idle time in between frames.
 */
@Override
public void run() {
    Frame outputFrame;
    ByteBuffer data;

    while (true) {
        synchronized (mLock) {
            if (mActive && (mPendingFrameData == null)) {
                try {
                    // Wait for the next frame to be received from the camera, since we
                    // don't have it yet.
                    mLock.wait();
                } catch (InterruptedException e) {
                    Log.d(TAG, "Frame processing loop terminated.", e);
                    return;
                }
            }

            if (!mActive) {
                // Exit the loop once this camera source is stopped or released.  We check
                // this here, immediately after the wait() above, to handle the case where
                // setActive(false) had been called, triggering the termination of this
                // loop.
                return;
            }

            outputFrame = new Frame.Builder()
                    .setImageData(mPendingFrameData, mPreviewSize.getWidth(),
                            mPreviewSize.getHeight(), ImageFormat.NV21)
                    .setId(mPendingFrameId)
                    .setTimestampMillis(mPendingTimeMillis)
                    .setRotation(mRotation)
                    .build();

            // Hold onto the frame data locally, so that we can use this for detection
            // below.  We need to clear mPendingFrameData to ensure that this buffer isn't
            // recycled back to the camera before we are done using that data.
            data = mPendingFrameData;
            mPendingFrameData = null;
        }

        // The code below needs to run outside of synchronization, because this will allow
        // the camera to add pending frame(s) while we are running detection on the current
        // frame.

        try {
            mDetector.receiveFrame(outputFrame);
        } catch (Throwable t) {
            Log.e(TAG, "Exception thrown from receiver.", t);
        } finally {
            mCamera.addCallbackBuffer(data.array());
        }
    }
}
 
Example #28
Source File: CameraSource.java    From ETHWallet with GNU General Public License v3.0 4 votes vote down vote up
/**
 * As long as the processing thread is active, this executes detection on frames
 * continuously.  The next pending frame is either immediately available or hasn't been
 * received yet.  Once it is available, we transfer the frame info to local variables and
 * run detection on that frame.  It immediately loops back for the next frame without
 * pausing.
 * <p/>
 * If detection takes longer than the time in between new frames from the camera, this will
 * mean that this loop will run without ever waiting on a frame, avoiding any context
 * switching or frame acquisition time latency.
 * <p/>
 * If you find that this is using more CPU than you'd like, you should probably decrease the
 * FPS setting above to allow for some idle time in between frames.
 */
@Override
public void run() {
    Frame outputFrame;
    ByteBuffer data;

    while (true) {
        synchronized (mLock) {
            while (mActive && (mPendingFrameData == null)) {
                try {
                    // Wait for the next frame to be received from the camera, since we
                    // don't have it yet.
                    mLock.wait();
                } catch (InterruptedException e) {
                    Log.d(TAG, "Frame processing loop terminated.", e);
                    return;
                }
            }

            if (!mActive) {
                // Exit the loop once this camera source is stopped or released.  We check
                // this here, immediately after the wait() above, to handle the case where
                // setActive(false) had been called, triggering the termination of this
                // loop.
                return;
            }

            outputFrame = new Frame.Builder()
                    .setImageData(mPendingFrameData, mPreviewSize.getWidth(),
                            mPreviewSize.getHeight(), ImageFormat.NV21)
                    .setId(mPendingFrameId)
                    .setTimestampMillis(mPendingTimeMillis)
                    .setRotation(mRotation)
                    .build();

            // Hold onto the frame data locally, so that we can use this for detection
            // below.  We need to clear mPendingFrameData to ensure that this buffer isn't
            // recycled back to the camera before we are done using that data.
            data = mPendingFrameData;
            mPendingFrameData = null;
        }

        // The code below needs to run outside of synchronization, because this will allow
        // the camera to add pending frame(s) while we are running detection on the current
        // frame.

        try {
            mDetector.receiveFrame(outputFrame);
        } catch (Throwable t) {
            Log.e(TAG, "Exception thrown from receiver.", t);
        } finally {
            mCamera.addCallbackBuffer(data.array());
        }
    }
}
 
Example #29
Source File: CameraSource.java    From android-vision with Apache License 2.0 4 votes vote down vote up
/**
 * As long as the processing thread is active, this executes detection on frames
 * continuously.  The next pending frame is either immediately available or hasn't been
 * received yet.  Once it is available, we transfer the frame info to local variables and
 * run detection on that frame.  It immediately loops back for the next frame without
 * pausing.
 * <p/>
 * If detection takes longer than the time in between new frames from the camera, this will
 * mean that this loop will run without ever waiting on a frame, avoiding any context
 * switching or frame acquisition time latency.
 * <p/>
 * If you find that this is using more CPU than you'd like, you should probably decrease the
 * FPS setting above to allow for some idle time in between frames.
 */
@Override
public void run() {
    Frame outputFrame;
    ByteBuffer data;

    while (true) {
        synchronized (mLock) {
            while (mActive && (mPendingFrameData == null)) {
                try {
                    // Wait for the next frame to be received from the camera, since we
                    // don't have it yet.
                    mLock.wait();
                } catch (InterruptedException e) {
                    Log.d(TAG, "Frame processing loop terminated.", e);
                    return;
                }
            }

            if (!mActive) {
                // Exit the loop once this camera source is stopped or released.  We check
                // this here, immediately after the wait() above, to handle the case where
                // setActive(false) had been called, triggering the termination of this
                // loop.
                return;
            }

            outputFrame = new Frame.Builder()
                    .setImageData(mPendingFrameData, mPreviewSize.getWidth(),
                            mPreviewSize.getHeight(), ImageFormat.NV21)
                    .setId(mPendingFrameId)
                    .setTimestampMillis(mPendingTimeMillis)
                    .setRotation(mRotation)
                    .build();

            // Hold onto the frame data locally, so that we can use this for detection
            // below.  We need to clear mPendingFrameData to ensure that this buffer isn't
            // recycled back to the camera before we are done using that data.
            data = mPendingFrameData;
            mPendingFrameData = null;
        }

        // The code below needs to run outside of synchronization, because this will allow
        // the camera to add pending frame(s) while we are running detection on the current
        // frame.

        try {
            mDetector.receiveFrame(outputFrame);
        } catch (Throwable t) {
            Log.e(TAG, "Exception thrown from receiver.", t);
        } finally {
            mCamera.addCallbackBuffer(data.array());
        }
    }
}
 
Example #30
Source File: FaceDetectionImplGmsCore.java    From 365browser with Apache License 2.0 4 votes vote down vote up
@Override
public void detect(
        SharedBufferHandle frameData, int width, int height, DetectResponse callback) {
    // The vision library will be downloaded the first time the API is used
    // on the device; this happens "fast", but it might have not completed,
    // bail in this case.
    if (!mFaceDetector.isOperational()) {
        Log.e(TAG, "FaceDetector is not operational");

        // Fallback to Android's FaceDetectionImpl.
        FaceDetectorOptions options = new FaceDetectorOptions();
        options.fastMode = mFastMode;
        options.maxDetectedFaces = mMaxFaces;
        FaceDetectionImpl detector = new FaceDetectionImpl(options);
        detector.detect(frameData, width, height, callback);
        return;
    }

    Frame frame = SharedBufferUtils.convertToFrame(frameData, width, height);
    if (frame == null) {
        Log.e(TAG, "Error converting SharedMemory to Frame");
        callback.call(new FaceDetectionResult[0]);
        return;
    }

    final SparseArray<Face> faces = mFaceDetector.detect(frame);

    FaceDetectionResult[] faceArray = new FaceDetectionResult[faces.size()];
    for (int i = 0; i < faces.size(); i++) {
        faceArray[i] = new FaceDetectionResult();
        final Face face = faces.valueAt(i);

        final PointF corner = face.getPosition();
        faceArray[i].boundingBox = new RectF();
        faceArray[i].boundingBox.x = corner.x;
        faceArray[i].boundingBox.y = corner.y;
        faceArray[i].boundingBox.width = face.getWidth();
        faceArray[i].boundingBox.height = face.getHeight();

        final List<Landmark> landmarks = face.getLandmarks();
        ArrayList<org.chromium.shape_detection.mojom.Landmark> mojoLandmarks =
                new ArrayList<org.chromium.shape_detection.mojom.Landmark>(landmarks.size());

        for (int j = 0; j < landmarks.size(); j++) {
            final Landmark landmark = landmarks.get(j);
            final int landmarkType = landmark.getType();
            if (landmarkType == Landmark.LEFT_EYE || landmarkType == Landmark.RIGHT_EYE
                    || landmarkType == Landmark.BOTTOM_MOUTH) {
                org.chromium.shape_detection.mojom.Landmark mojoLandmark =
                        new org.chromium.shape_detection.mojom.Landmark();
                mojoLandmark.location = new org.chromium.gfx.mojom.PointF();
                mojoLandmark.location.x = landmark.getPosition().x;
                mojoLandmark.location.y = landmark.getPosition().y;
                mojoLandmark.type = landmarkType == Landmark.BOTTOM_MOUTH ? LandmarkType.MOUTH
                                                                          : LandmarkType.EYE;
                mojoLandmarks.add(mojoLandmark);
            }
        }
        faceArray[i].landmarks = mojoLandmarks.toArray(
                new org.chromium.shape_detection.mojom.Landmark[mojoLandmarks.size()]);
    }
    callback.call(faceArray);
}