org.tensorflow.contrib.android.TensorFlowInferenceInterface Java Examples

The following examples show how to use org.tensorflow.contrib.android.TensorFlowInferenceInterface. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: YoloV2Classifier.java    From pasm-yolov3-Android with GNU General Public License v3.0 6 votes vote down vote up
/** Initializes a native TensorFlow session for classifying images. */
public static Classifier create(
        final AssetManager assetManager,
        final String modelFilename,
        final int inputSize,
        final String inputName,
        final String outputName,
        final int blockSize) {
    YoloV2Classifier d = new YoloV2Classifier();
    d.inputName = inputName;
    d.inputSize = inputSize;

    // Pre-allocate buffers.
    d.outputNames = outputName.split(",");
    d.intValues = new int[inputSize * inputSize];
    d.floatValues = new float[inputSize * inputSize * 3];
    d.blockSize = blockSize;

    d.inferenceInterface = new TensorFlowInferenceInterface(assetManager, modelFilename);

    return d;
}
 
Example #2
Source File: TensorFlowObjectDetectionAPIModel.java    From FimiX8-RE with MIT License 5 votes vote down vote up
public static Classifier create(AssetManager assetManager, String modelFilename, String labelFilename, int inputSize) throws IOException {
    TensorFlowObjectDetectionAPIModel d = new TensorFlowObjectDetectionAPIModel();
    BufferedReader br = new BufferedReader(new InputStreamReader(assetManager.open(labelFilename.split("file:///android_asset/")[1])));
    while (true) {
        String line = br.readLine();
        if (line == null) {
            break;
        }
        d.labels.add(line);
    }
    br.close();
    d.inferenceInterface = new TensorFlowInferenceInterface(assetManager, modelFilename);
    Graph g = d.inferenceInterface.graph();
    d.inputName = "image_tensor";
    if (g.operation(d.inputName) == null) {
        throw new RuntimeException("Failed to find input Node '" + d.inputName + "'");
    }
    d.inputSize = inputSize;
    if (g.operation("detection_scores") == null) {
        throw new RuntimeException("Failed to find output Node 'detection_scores'");
    } else if (g.operation("detection_boxes") == null) {
        throw new RuntimeException("Failed to find output Node 'detection_boxes'");
    } else if (g.operation("detection_classes") == null) {
        throw new RuntimeException("Failed to find output Node 'detection_classes'");
    } else {
        d.outputNames = new String[]{"detection_boxes", "detection_scores", "detection_classes", "num_detections"};
        d.intValues = new int[(d.inputSize * d.inputSize)];
        d.byteValues = new byte[((d.inputSize * d.inputSize) * 3)];
        d.outputScores = new float[100];
        d.outputLocations = new float[400];
        d.outputClasses = new float[100];
        d.outputNumDetections = new float[1];
        return d;
    }
}
 
Example #3
Source File: YoloV3Classifier.java    From pasm-yolov3-Android with GNU General Public License v3.0 5 votes vote down vote up
/** Initializes a native TensorFlow session for classifying images. */
public static Classifier create(
        final AssetManager assetManager,
        final String modelName,
        final int inputSize,
        final String inputName,
        final String outputName,
        final int[] blockSize,
        final int centerOffset) throws IOException {
    YoloV3Classifier d = new YoloV3Classifier();
    d.inputName = inputName;
    d.inputSize = inputSize;

    // Pre-allocate buffers.
    d.outputNames = outputName.split(",");
    d.intValues = new int[inputSize * inputSize];
    d.floatValues = new float[inputSize * inputSize * 3];
    d.blockSize = blockSize;

    String modelFilename = modelName + ".bp";
    String labelsFilename = modelName + "-labels.txt";
    String anchorsFilename = modelName + "-anchors.txt";

    //
    d.inferenceInterface = new TensorFlowInferenceInterface(assetManager, FILE_ANDROID_ASSET + modelFilename);

    InputStream labelsFile = assetManager.open(labelsFilename);
    InputStream anchorsFile = assetManager.open(anchorsFilename);

    d.labels = streamToLabels(labelsFile);
    d.anchors = streamToAnchors(anchorsFile);

    d.centerOffset = centerOffset;

    return d;
}
 
Example #4
Source File: TensorFlowImageClassifier.java    From dbclf with Apache License 2.0 5 votes vote down vote up
/**
 * Initializes a native TensorFlow session for classifying images.
 *
 * @param assetManager  The asset manager to be used to load assets.
 * @param modelFilename The filepath of the model GraphDef protocol buffer.
 * @param labels        String array of labels.
 * @param inputSize     The input size. A square image of inputSize x inputSize is assumed.
 * @param imageMean     The assumed mean of the image values.
 * @param imageStd      The assumed std of the image values.
 * @param inputName     The label of the image input node.
 * @param outputName    The label of the output node.
 * @throws IOException
 */
public static Classifier create(
        AssetManager assetManager,
        String modelFilename,
        String[] labels,
        int inputSize,
        int imageMean,
        float imageStd,
        String inputName,
        String outputName) {
    final TensorFlowImageClassifier c = new TensorFlowImageClassifier();
    c.inputName = inputName;
    c.outputName = outputName;

    // Read the label names into memory.
    Collections.addAll(c.labels, labels);

    c.inferenceInterface = new TensorFlowInferenceInterface(assetManager, modelFilename);

    // The shape of the output is [N, NUM_CLASSES], where N is the batch size.
    final Operation operation = c.inferenceInterface.graphOperation(outputName);
    final int numClasses = (int) operation.output(0).shape().size(1);

    // Ideally, inputSize could have been retrieved from the shape of the input operation.  Alas,
    // the placeholder node for input in the graphdef typically used does not specify a shape, so it
    // must be passed in as a parameter.
    c.inputSize = inputSize;
    c.imageMean = imageMean;
    c.imageStd = imageStd;

    // Pre-allocate buffers.
    c.outputNames = new String[]{outputName};
    c.intValues = new int[inputSize * inputSize];
    c.floatValues = new float[inputSize * inputSize * 3];
    c.outputs = new float[numClasses];

    return c;
}
 
Example #5
Source File: Factory.java    From Flora with MIT License 5 votes vote down vote up
@Override
public Bitmap stylizeImage(Bitmap bitmap, int model) {

    Log.d("launched", "stylized in tensor module");
    TensorFlowInferenceInterface inferenceInterface = new TensorFlowInferenceInterface(context.getAssets(), MODEL_FILE);
    bitmap = Bitmap.createScaledBitmap(bitmap, desiredSize, desiredSize, false);
    bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());

    for (int i = 0; i < intValues.length; ++i) {
        final int val = intValues[i];
        floatValues[i * 3] = ((val >> 16) & 0xFF) / 255.0f;
        floatValues[i * 3 + 1] = ((val >> 8) & 0xFF) / 255.0f;
        floatValues[i * 3 + 2] = (val & 0xFF) / 255.0f;
    }
    for (int i = 0; i < NUM_STYLES; ++i) {
        styleVals[i] = 0f;
    }
    styleVals[model] = 1f;

    // Copy the input data into TensorFlow.
    Log.d("tensor", "Width: " + bitmap.getWidth() + ", Height: " + bitmap.getHeight());
    inferenceInterface.feed(
            INPUT_NODE, floatValues, 1, bitmap.getWidth(), bitmap.getHeight(), 3);
    inferenceInterface.feed(STYLE_NODE, styleVals, NUM_STYLES);

    inferenceInterface.run(new String[]{OUTPUT_NODE}, false);
    inferenceInterface.fetch(OUTPUT_NODE, floatValues);

    for (int i = 0; i < intValues.length; ++i) {
        intValues[i] =
                0xFF000000
                        | (((int) (floatValues[i * 3] * 255)) << 16)
                        | (((int) (floatValues[i * 3 + 1] * 255)) << 8)
                        | ((int) (floatValues[i * 3 + 2] * 255));
    }

    bitmap.setPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
    Log.d("launched", "return bitmap");
    return bitmap;
}
 
Example #6
Source File: Camera2BasicFragment.java    From Cam2Caption with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
TensorFlowInferenceInterface InitSession(){
    inferenceInterface = new TensorFlowInferenceInterface();
    inferenceInterface.initializeTensorFlow(getActivity().getAssets(), MODEL_FILE);
    OutputNodes = LoadFile(OUTPUT_NODES);
    WORD_MAP = LoadFile("idmap");
    return inferenceInterface;
}
 
Example #7
Source File: TensorFlowImageRecognizer.java    From android-yolo-v2 with Do What The F*ck You Want To Public License 5 votes vote down vote up
/**
 * Initializes a native TensorFlow session for classifying images.
 *
 * @param assetManager The asset manager to be used to load assets.
 * @throws IOException
 */
public static TensorFlowImageRecognizer create(AssetManager assetManager) {
    TensorFlowImageRecognizer recognizer = new TensorFlowImageRecognizer();
    recognizer.labels = ClassAttrProvider.newInstance(assetManager).getLabels();
    recognizer.inferenceInterface = new TensorFlowInferenceInterface(assetManager,
            "file:///android_asset/" + MODEL_FILE);
    recognizer.outputSize = YOLOClassifier.getInstance()
            .getOutputSizeByShape(recognizer.inferenceInterface.graphOperation(OUTPUT_NAME));
    return recognizer;
}
 
Example #8
Source File: TensorFlow.java    From Android-Face-Recognition-with-Deep-Learning-Library with Apache License 2.0 5 votes vote down vote up
public TensorFlow(Context context, int inputSize, int outputSize, String inputLayer, String outputLayer, String modelFile){
    this.inputSize = inputSize;
    this.outputSize = outputSize;
    this.inputLayer = inputLayer;
    this.outputLayer = outputLayer;

    inferenceInterface = new TensorFlowInferenceInterface(context.getAssets(), modelFile);
}
 
Example #9
Source File: Yolov2FromDarkFlowTest.java    From pasm-yolov3-Android with GNU General Public License v3.0 4 votes vote down vote up
@Test
public void instatiate(){

    TensorFlowInferenceInterface inferenceInterface = new TensorFlowInferenceInterface(appContext.getAssets(), MODEL_FILE);

    Iterator<Operation> operations = inferenceInterface.graph().operations();
    while (operations.hasNext()){
        Operation next = operations.next();
        Log.d(TAG, "operation name: " + next.name());
    }

    Operation in = inferenceInterface.graph().operation(YOLO_INPUT_NAME);
    Operation out = inferenceInterface.graph().operation(YOLO_OUTPUT_NAMES);

    Log.d(TAG, "in: " + in);
    Log.d(TAG, "out: " + out);


}
 
Example #10
Source File: Classifier.java    From Chinese-number-gestures-recognition with BSD 2-Clause "Simplified" License 4 votes vote down vote up
Classifier(AssetManager assetManager, String modePath) {
    //初始化TensorFlowInferenceInterface对象
    inferenceInterface = new TensorFlowInferenceInterface(assetManager,modePath);
    Log.e("tf","TensoFlow模型文件加载成功");
}
 
Example #11
Source File: LSTMClassifier.java    From Myna with Apache License 2.0 4 votes vote down vote up
public LSTMClassifier(final Context ctx) {
    AssetManager assetManager = ctx.getAssets();
    inferenceInterface = new TensorFlowInferenceInterface(assetManager, MODEL_FILE);
}
 
Example #12
Source File: MainActivity.java    From TFDroid with MIT License 3 votes vote down vote up
@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_main);

    inferenceInterface = new TensorFlowInferenceInterface();
    inferenceInterface.initializeTensorFlow(getAssets(), MODEL_FILE);


    final Button button = (Button) findViewById(R.id.button);

    button.setOnClickListener(new View.OnClickListener() {
        public void onClick(View v) {

            final EditText editNum1 = (EditText) findViewById(R.id.editNum1);
            final EditText editNum2 = (EditText) findViewById(R.id.editNum2);
            final EditText editNum3 = (EditText) findViewById(R.id.editNum3);

            float num1 = Float.parseFloat(editNum1.getText().toString());
            float num2 = Float.parseFloat(editNum2.getText().toString());
            float num3 = Float.parseFloat(editNum3.getText().toString());

            float[] inputFloats = {num1, num2, num3};

            inferenceInterface.fillNodeFloat(INPUT_NODE, INPUT_SIZE, inputFloats);

            inferenceInterface.runInference(new String[] {OUTPUT_NODE});

            float[] resu = {0, 0};
            inferenceInterface.readNodeFloat(OUTPUT_NODE, resu);

            final TextView textViewR = (TextView) findViewById(R.id.txtViewResult);
            textViewR.setText(Float.toString(resu[0]) + ", " + Float.toString(resu[1]));
        }
    });

}
 
Example #13
Source File: MainActivity.java    From Pytorch-Keras-ToAndroid with MIT License 2 votes vote down vote up
@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_main);


    Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
    setSupportActionBar(toolbar);


    //initialize tensorflow with the AssetManager and the Model
    tf = new TensorFlowInferenceInterface(getAssets(),MODEL_PATH);

    imageView = (ImageView) findViewById(R.id.imageview);
    resultView = (TextView) findViewById(R.id.results);

    progressBar = Snackbar.make(imageView,"PROCESSING IMAGE",Snackbar.LENGTH_INDEFINITE);


    final FloatingActionButton predict = (FloatingActionButton) findViewById(R.id.predict);
    predict.setOnClickListener(new View.OnClickListener() {
        @Override
        public void onClick(View view) {


            try{

                //READ THE IMAGE FROM ASSETS FOLDER
                InputStream imageStream = getAssets().open("testimage.jpg");

                Bitmap bitmap = BitmapFactory.decodeStream(imageStream);

                imageView.setImageBitmap(bitmap);

                progressBar.show();

                predict(bitmap);
            }
            catch (Exception e){

            }

        }
    });
}