Java Code Examples for android.graphics.Bitmap.getPixels()

The following are Jave code examples for showing how to use getPixels() of the android.graphics.Bitmap class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: GitHub   File: FasterGreyScalePostprocessor.java   View Source Code Vote up 6 votes
@Override
public void process(Bitmap bitmap) {
  final int w = bitmap.getWidth();
  final int h = bitmap.getHeight();
  final int[] pixels = new int[w * h];

  /*
   * Using {@link Bitmap#getPixels} reduces the number of Java-JNI calls and passes all the image
   * pixels in one call. This allows us to edit all the data in the Java world and then hand back
   * the final result later.
   */
  bitmap.getPixels(pixels, 0, w, 0, 0, w, h);

  for (int x = 0; x < w; x++) {
    for (int y = 0; y < h; y++) {
      final int offset = y * w + x;
      pixels[offset] = SlowGreyScalePostprocessor.getGreyColor(pixels[offset]);
    }
  }

  bitmap.setPixels(pixels, 0, w, 0, 0, w, h);
}
 
Example 2
Project: Mobike   File: QrUtils.java   View Source Code Vote up 6 votes
/**
 * YUV420sp
 *
 * @param inputWidth
 * @param inputHeight
 * @param scaled
 * @return
 */
public static byte[] getYUV420sp(int inputWidth, int inputHeight, Bitmap scaled) {
    int[] argb = new int[inputWidth * inputHeight];

    scaled.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);

    /**
     * 需要转换成偶数的像素点,否则编码YUV420的时候有可能导致分配的空间大小不够而溢出。
     */
    int requiredWidth = inputWidth % 2 == 0 ? inputWidth : inputWidth + 1;
    int requiredHeight = inputHeight % 2 == 0 ? inputHeight : inputHeight + 1;

    int byteLength = requiredWidth * requiredHeight * 3 / 2;
    if (yuvs == null || yuvs.length < byteLength) {
        yuvs = new byte[byteLength];
    } else {
        Arrays.fill(yuvs, (byte) 0);
    }

    encodeYUV420SP(yuvs, argb, inputWidth, inputHeight);

    scaled.recycle();

    return yuvs;
}
 
Example 3
Project: boohee_v5.6   File: BitmapHelper.java   View Source Code Vote up 6 votes
public static boolean isBlackBitmap(Bitmap bitmap) throws Throwable {
    if (bitmap == null || bitmap.isRecycled()) {
        return true;
    }
    boolean z;
    int[] iArr = new int[(bitmap.getWidth() * bitmap.getHeight())];
    bitmap.getPixels(iArr, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
    for (int i : iArr) {
        if ((i & ViewCompat.MEASURED_SIZE_MASK) != 0) {
            z = true;
            break;
        }
    }
    z = false;
    return !z;
}
 
Example 4
Project: XFrame   File: XBitmapUtils.java   View Source Code Vote up 6 votes
/**
 * 将彩色图转换为灰度图
 *
 * @param img 源Bitmap
 * @return 返回转换好的位图
 */
public static Bitmap convertGreyImg(Bitmap img) {
    int width = img.getWidth(); // 获取位图的宽
    int height = img.getHeight(); // 获取位图的高

    int[] pixels = new int[width * height]; // 通过位图的大小创建像素点数组

    img.getPixels(pixels, 0, width, 0, 0, width, height);
    int alpha = 0xFF << 24;
    for (int i = 0; i < height; i++) {
        for (int j = 0; j < width; j++) {
            int grey = pixels[width * i + j];

            int red = ((grey & 0x00FF0000) >> 16);
            int green = ((grey & 0x0000FF00) >> 8);
            int blue = (grey & 0x000000FF);

            grey = (int) ((float) red * 0.3 + (float) green * 0.59 + (float) blue * 0.11);
            grey = alpha | (grey << 16) | (grey << 8) | grey;
            pixels[width * i + j] = grey;
        }
    }
    Bitmap result = Bitmap.createBitmap(width, height, Config.RGB_565);
    result.setPixels(pixels, 0, width, 0, 0, width, height);
    return result;
}
 
Example 5
Project: Fatigue-Detection   File: STUtils.java   View Source Code Vote up 6 votes
/**
     * Get the pixels by byte[] of Bitmap
     * @param image
	 * @return pixels by byte[]
     */
    public static int[] getBGRAImageByte(Bitmap image) {
        int width = image.getWidth();
        int height = image.getHeight();

		if(image.getConfig().equals(Config.ARGB_8888)) {
	        int[] imgData = new int[width * height];
	        image.getPixels(imgData, 0, width, 0, 0, width, height);
	        return imgData;
	       
//	        byte[] imgPixels = new byte[width * height];
//	        for (int i = 0; i < imgData.length; ++i) {
//	        	int p = 0;
//	        	//p += ((imgData[i] >> 24) & 0xFF);
//	        	p += ((imgData[i] >> 16) & 0xFF);
//	        	p += ((imgData[i] >> 8) & 0xFF);
//	        	p += ((imgData[i] >> 0) & 0xFF);
//	            imgPixels[i] = (byte) (p/3);
//	        }
		} else {
			// TODO
		}

        return null;
    }
 
Example 6
Project: GravityBox   File: GraphicUtils.java   View Source Code Vote up 5 votes
public static Bitmap getAlplaBitmap(Bitmap sourceImg, int alpha) {
    int[] argb = new int[sourceImg.getWidth() * sourceImg.getHeight()];
    sourceImg.getPixels(argb, 0, sourceImg.getWidth(), 0, 0, sourceImg.getWidth(), sourceImg.getHeight());
    alpha = alpha * 255 / 100;
    for (int i = 0; i < argb.length; i++) {
        argb[i] = (alpha << 24) | (argb[i] & 0x00FFFFFF);
    }
    sourceImg = Bitmap.createBitmap(argb, sourceImg.getWidth(), sourceImg.getHeight(), Bitmap.Config.ARGB_8888);
    return sourceImg;
}
 
Example 7
Project: android-image-classification   File: Predictor.java   View Source Code Vote up 5 votes
static public float[] inputFromImage(Bitmap[] bmps, float meanR, float meanG, float meanB) {
  if (bmps.length == 0) return null;

  int width = bmps[0].getWidth();
  int height = bmps[0].getHeight();
  float[] buf = new float[height * width * 3 * bmps.length];
  for (int x=0; x<bmps.length; x++) {
    Bitmap bmp = bmps[x];
    if (bmp.getWidth() != width || bmp.getHeight() != height)
      return null;

    int[] pixels = new int[ height * width ];
    bmp.getPixels(pixels, 0, width, 0, 0, height, width);

    int start = width * height * 3 * x;
    for (int i=0; i<height; i++) {
      for (int j=0; j<width; j++) {
          int pos = i * width + j;
          int pixel = pixels[pos];
          buf[start + pos] = Color.red(pixel) - meanR;
          buf[start + width * height + pos] = Color.green(pixel) - meanG;
          buf[start + width * height * 2 + pos] = Color.blue(pixel) - meanB;
      }
    }
  }

  return buf;
}
 
Example 8
Project: ForeverLibrary   File: RGBLuminanceSource.java   View Source Code Vote up 5 votes
public RGBLuminanceSource(Bitmap bitmap) {
		super(null==bitmap?0:bitmap.getWidth(), null==bitmap?0:bitmap.getHeight());
		int width = null==bitmap?0:bitmap.getWidth();
		int height = null==bitmap?0:bitmap.getHeight();
//		super(bitmap.getWidth(), bitmap.getHeight());
//		int width = bitmap.getWidth();
//		int height = bitmap.getHeight();
		int[] pixels = new int[width * height];
		bitmap.getPixels(pixels, 0, width, 0, 0, width, height);
		// In order to measure pure decoding speed, we convert the entire image
		// to a greyscale array
		// up front, which is the same as the Y channel of the
		// YUVLuminanceSource in the real app.
		luminances = new byte[width * height];
		for (int y = 0; y < height; y++) {
			int offset = y * width;
			for (int x = 0; x < width; x++) {
				int pixel = pixels[offset + x];
				int r = (pixel >> 16) & 0xff;
				int g = (pixel >> 8) & 0xff;
				int b = pixel & 0xff;
				if (r == g && g == b) {
					// Image is already greyscale, so pick any channel.
					luminances[offset + x] = (byte) r;
				} else {
					// Calculate luminance cheaply, favoring green.
					luminances[offset + x] = (byte) ((r + g + g + b) >> 2);
				}
			}
		}
	}
 
Example 9
Project: Botanist   File: GifSequenceWriter.java   View Source Code Vote up 5 votes
/**
 * Convert the image into an array from a bitmap
 * @param img - image to convert
 * @return - Returns the array representation of the bitmap
 */
private int[] getImageData(Bitmap img) {
    int w = img.getWidth();
    int h = img.getHeight();
    int[] data = new int[w * h];
    img.getPixels(data, 0, w, 0, 0, w, h);
    return data;
}
 
Example 10
Project: Cam2Caption   File: Camera2BasicFragment.java   View Source Code Vote up 5 votes
float[] Preprocess(Bitmap imBitmap){
    imBitmap = Bitmap.createScaledBitmap(imBitmap, IMAGE_SIZE, IMAGE_SIZE, true);
    int[] intValues = new int[IMAGE_SIZE * IMAGE_SIZE];
    float[] floatValues = new float[IMAGE_SIZE * IMAGE_SIZE * 3];

    imBitmap.getPixels(intValues, 0, IMAGE_SIZE, 0, 0, IMAGE_SIZE, IMAGE_SIZE);

    for (int i = 0; i < intValues.length; ++i) {
        final int val = intValues[i];
        floatValues[i * 3] = ((float)((val >> 16) & 0xFF))/255;//R
        floatValues[i * 3 + 1] = ((float)((val >> 8) & 0xFF))/255;//G
        floatValues[i * 3 + 2] = ((float)((val & 0xFF)))/255;//B
    }
    return floatValues;
}
 
Example 11
Project: QuadTreeAndroid   File: QuadTreeSplitter.java   View Source Code Vote up 5 votes
/**
 * get pixel from bitmap and convert it in to two-dimensional array
 */
private int[][] getPixelsFromBitmap(Bitmap bitmap) {
    int[] imagePixels = new int[bitmap.getWidth() * bitmap.getHeight()];
    int[][] pixelsBitmap = new int[bitmap.getWidth()][bitmap.getHeight()];

    bitmap.getPixels(imagePixels, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());

    for (int i = 0; i < bitmap.getWidth(); i++) {
        for (int j = 0; j < bitmap.getHeight(); j++) {
            pixelsBitmap[i][j] = imagePixels[(j * bitmap.getWidth()) + i];
        }
    }
    return pixelsBitmap;
}
 
Example 12
Project: ZxingScan   File: BitmapLuminanceSource.java   View Source Code Vote up 5 votes
public BitmapLuminanceSource(Bitmap bitmap) {
    super(bitmap.getWidth(), bitmap.getHeight());

    // 首先,要取得该图片的像素数组内容
    int[] data = new int[bitmap.getWidth() * bitmap.getHeight()];
    this.bitmapPixels = new byte[bitmap.getWidth() * bitmap.getHeight()];
    bitmap.getPixels(data, 0, getWidth(), 0, 0, getWidth(), getHeight());

    // 将int数组转换为byte数组,也就是取像素值中蓝色值部分作为辨析内容
    for (int i = 0; i < data.length; i++) {
        this.bitmapPixels[i] = (byte) data[i];
    }
}
 
Example 13
Project: Tensorflow_Andriod_With_Audio_Output   File: TensorFlowMultiBoxDetector.java   View Source Code Vote up 4 votes
@Override
public List<Recognition> recognizeImage(final Bitmap bitmap) {
  // Log this method so that it can be analyzed with systrace.
  Trace.beginSection("recognizeImage");

  Trace.beginSection("preprocessBitmap");
  // Preprocess the image data from 0-255 int to normalized float based
  // on the provided parameters.
  bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());

  for (int i = 0; i < intValues.length; ++i) {
    floatValues[i * 3 + 0] = ((intValues[i] & 0xFF) - imageMean) / imageStd;
    floatValues[i * 3 + 1] = (((intValues[i] >> 8) & 0xFF) - imageMean) / imageStd;
    floatValues[i * 3 + 2] = (((intValues[i] >> 16) & 0xFF) - imageMean) / imageStd;
  }
  Trace.endSection(); // preprocessBitmap

  // Copy the input data into TensorFlow.
  Trace.beginSection("fillNodeFloat");
  inferenceInterface.fillNodeFloat(
      inputName, new int[] {1, inputSize, inputSize, 3}, floatValues);
  Trace.endSection();

  // Run the inference call.
  Trace.beginSection("runInference");
  inferenceInterface.runInference(outputNames);
  Trace.endSection();

  // Copy the output Tensor back into the output array.
  Trace.beginSection("readNodeFloat");
  final float[] outputScoresEncoding = new float[numLocations];
  final float[] outputLocationsEncoding = new float[numLocations * 4];
  inferenceInterface.readNodeFloat(outputNames[0], outputLocationsEncoding);
  inferenceInterface.readNodeFloat(outputNames[1], outputScoresEncoding);
  Trace.endSection();

  outputLocations = decodeLocationsEncoding(outputLocationsEncoding);
  outputScores = decodeScoresEncoding(outputScoresEncoding);

  // Find the best detections.
  final PriorityQueue<Recognition> pq =
      new PriorityQueue<Recognition>(
          1,
          new Comparator<Recognition>() {
            @Override
            public int compare(final Recognition lhs, final Recognition rhs) {
              // Intentionally reversed to put high confidence at the head of the queue.
              return Float.compare(rhs.getConfidence(), lhs.getConfidence());
            }
          });

  // Scale them back to the input size.
  for (int i = 0; i < outputScores.length; ++i) {
    final RectF detection =
        new RectF(
            outputLocations[4 * i] * inputSize,
            outputLocations[4 * i + 1] * inputSize,
            outputLocations[4 * i + 2] * inputSize,
            outputLocations[4 * i + 3] * inputSize);
    pq.add(new Recognition("" + i, null, outputScores[i], detection));
  }

  final ArrayList<Recognition> recognitions = new ArrayList<Recognition>();
  for (int i = 0; i < Math.min(pq.size(), MAX_RESULTS); ++i) {
    recognitions.add(pq.poll());
  }
  Trace.endSection(); // "recognizeImage"
  return recognitions;
}
 
Example 14
Project: MegviiFacepp-Android-SDK   File: ConUtil.java   View Source Code Vote up 4 votes
public static byte[] convertYUV21FromRGB(Bitmap bitmap){
	bitmap = rotaingImageView(90, bitmap);

	int inputWidth = bitmap.getWidth();
	int inputHeight = bitmap.getHeight();

	int[] argb = new int[inputWidth * inputHeight];

	bitmap.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);

	byte[] yuv = new byte[inputWidth * inputHeight * 3 / 2];

	encodeYUV420SP(yuv, argb, inputWidth, inputHeight);

	bitmap.recycle();

	return yuv;

}
 
Example 15
Project: XFrame   File: XBitmapUtils.java   View Source Code Vote up 4 votes
/**
 * 锐化效果处理
 *
 * @param bitmap 原图
 * @return 锐化效果处理后的图片
 */
public static Bitmap sharpen(Bitmap bitmap) {
    // 拉普拉斯矩阵
    int[] laplacian = new int[]{-1, -1, -1, -1, 9, -1, -1, -1, -1};

    int width = bitmap.getWidth();
    int height = bitmap.getHeight();
    Bitmap newBitmap = Bitmap.createBitmap(width, height,
            Config.RGB_565);

    int pixR = 0;
    int pixG = 0;
    int pixB = 0;

    int pixColor = 0;

    int newR = 0;
    int newG = 0;
    int newB = 0;

    int idx = 0;
    float alpha = 0.3F;
    int[] pixels = new int[width * height];
    bitmap.getPixels(pixels, 0, width, 0, 0, width, height);
    for (int i = 1, length = height - 1; i < length; i++) {
        for (int k = 1, len = width - 1; k < len; k++) {
            idx = 0;
            for (int m = -1; m <= 1; m++) {
                for (int n = -1; n <= 1; n++) {
                    pixColor = pixels[(i + n) * width + k + m];
                    pixR = Color.red(pixColor);
                    pixG = Color.green(pixColor);
                    pixB = Color.blue(pixColor);

                    newR = newR + (int) (pixR * laplacian[idx] * alpha);
                    newG = newG + (int) (pixG * laplacian[idx] * alpha);
                    newB = newB + (int) (pixB * laplacian[idx] * alpha);
                    idx++;
                }
            }

            newR = Math.min(255, Math.max(0, newR));
            newG = Math.min(255, Math.max(0, newG));
            newB = Math.min(255, Math.max(0, newB));

            pixels[i * width + k] = Color.argb(255, newR, newG, newB);
            newR = 0;
            newG = 0;
            newB = 0;
        }
    }

    newBitmap.setPixels(pixels, 0, width, 0, 0, width, height);
    return newBitmap;
}
 
Example 16
Project: AI_Calorie_Counter_Demo   File: TensorFlowImageClassifier.java   View Source Code Vote up 4 votes
@Override
public List<Recognition> recognizeImage(final Bitmap bitmap) {
  // Log this method so that it can be analyzed with systrace.
  Trace.beginSection("recognizeImage");

  Trace.beginSection("preprocessBitmap");
  // Preprocess the image data from 0-255 int to normalized float based
  // on the provided parameters.
  bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
  for (int i = 0; i < intValues.length; ++i) {
    final int val = intValues[i];
    floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - imageMean) / imageStd;
    floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - imageMean) / imageStd;
    floatValues[i * 3 + 2] = ((val & 0xFF) - imageMean) / imageStd;
  }
  Trace.endSection();

  // Copy the input data into TensorFlow.
  Trace.beginSection("feed");
  inferenceInterface.feed(inputName, floatValues, 1, inputSize, inputSize, 3);
  Trace.endSection();

  // Run the inference call.
  Trace.beginSection("run");
  inferenceInterface.run(outputNames, logStats);
  Trace.endSection();

  // Copy the output Tensor back into the output array.
  Trace.beginSection("fetch");
  inferenceInterface.fetch(outputName, outputs);
  Trace.endSection();

  // Find the best classifications.
  PriorityQueue<Recognition> pq =
      new PriorityQueue<Recognition>(
          3,
          new Comparator<Recognition>() {
            @Override
            public int compare(Recognition lhs, Recognition rhs) {
              // Intentionally reversed to put high confidence at the head of the queue.
              return Float.compare(rhs.getConfidence(), lhs.getConfidence());
            }
          });
  for (int i = 0; i < outputs.length; ++i) {
    if (outputs[i] > THRESHOLD) {
      pq.add(
          new Recognition(
              "" + i, labels.size() > i ? labels.get(i) : "unknown", outputs[i], null));
    }
  }
  final ArrayList<Recognition> recognitions = new ArrayList<Recognition>();
  int recognitionsSize = Math.min(pq.size(), MAX_RESULTS);
  for (int i = 0; i < recognitionsSize; ++i) {
    recognitions.add(pq.poll());
  }
  Trace.endSection(); // "recognizeImage"
  return recognitions;
}
 
Example 17
Project: react-native-tensorflow   File: ImageRecognizer.java   View Source Code Vote up 4 votes
public WritableArray recognizeImage(final String image,
                                    final String inputName,
                                    final Integer inputSize,
                                    final String outputName,
                                    final Integer maxResults,
                                    final Double threshold) {

    String inputNameResolved = inputName != null ? inputName : "input";
    String outputNameResolved = outputName != null ? outputName : "output";
    Integer maxResultsResolved = maxResults != null ? maxResults : MAX_RESULTS;
    Float thresholdResolved = threshold != null ? threshold.floatValue() : THRESHOLD;

    Bitmap bitmapRaw = loadImage(resourceManager.loadResource(image));

    int inputSizeResolved = inputSize != null ? inputSize : 224;
    int[] intValues = new int[inputSizeResolved * inputSizeResolved];
    float[] floatValues = new float[inputSizeResolved * inputSizeResolved * 3];

    Bitmap bitmap = Bitmap.createBitmap(inputSizeResolved, inputSizeResolved, Bitmap.Config.ARGB_8888);
    Matrix matrix = createMatrix(bitmapRaw.getWidth(), bitmapRaw.getHeight(), inputSizeResolved, inputSizeResolved);
    final Canvas canvas = new Canvas(bitmap);
    canvas.drawBitmap(bitmapRaw, matrix, null);
    bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
    for (int i = 0; i < intValues.length; ++i) {
        final int val = intValues[i];
        floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - imageMean) / imageStd;
        floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - imageMean) / imageStd;
        floatValues[i * 3 + 2] = ((val & 0xFF) - imageMean) / imageStd;
    }
    Tensor tensor = Tensor.create(new long[]{1, inputSizeResolved, inputSizeResolved, 3}, FloatBuffer.wrap(floatValues));
    inference.feed(inputNameResolved, tensor);
    inference.run(new String[] {outputNameResolved}, false);
    ReadableArray outputs = inference.fetch(outputNameResolved);

    List<WritableMap> results = new ArrayList<>();
    for (int i = 0; i < outputs.size(); ++i) {
        if (outputs.getDouble(i) > thresholdResolved) {
            WritableMap entry = new WritableNativeMap();
            entry.putString("id", String.valueOf(i));
            entry.putString("name", labels.length > i ? labels[i] : "unknown");
            entry.putDouble("confidence", outputs.getDouble(i));
            results.add(entry);
        }
    }

    Collections.sort(results, new Comparator<ReadableMap>() {
        @Override
        public int compare(ReadableMap first, ReadableMap second) {
            return Double.compare(second.getDouble("confidence"), first.getDouble("confidence"));
        }
    });
    int finalSize = Math.min(results.size(), maxResultsResolved);
    WritableArray array = new WritableNativeArray();
    for (int i = 0; i < finalSize; i++) {
        array.pushMap(results.get(i));
    }

    return array;
}
 
Example 18
Project: XFrame   File: XBitmapUtils.java   View Source Code Vote up 4 votes
/**
 * 浮雕效果处理
 *
 * @param bitmap 原图
 * @return 浮雕效果处理后的图片
 */
public static Bitmap emboss(Bitmap bitmap) {
    int width = bitmap.getWidth();
    int height = bitmap.getHeight();
    Bitmap newBitmap = Bitmap.createBitmap(width, height,
            Config.RGB_565);

    int pixR = 0;
    int pixG = 0;
    int pixB = 0;

    int pixColor = 0;

    int newR = 0;
    int newG = 0;
    int newB = 0;

    int[] pixels = new int[width * height];
    bitmap.getPixels(pixels, 0, width, 0, 0, width, height);
    int pos = 0;
    for (int i = 1, length = height - 1; i < length; i++) {
        for (int k = 1, len = width - 1; k < len; k++) {
            pos = i * width + k;
            pixColor = pixels[pos];

            pixR = Color.red(pixColor);
            pixG = Color.green(pixColor);
            pixB = Color.blue(pixColor);

            pixColor = pixels[pos + 1];
            newR = Color.red(pixColor) - pixR + 127;
            newG = Color.green(pixColor) - pixG + 127;
            newB = Color.blue(pixColor) - pixB + 127;

            newR = Math.min(255, Math.max(0, newR));
            newG = Math.min(255, Math.max(0, newG));
            newB = Math.min(255, Math.max(0, newB));

            pixels[pos] = Color.argb(255, newR, newG, newB);
        }
    }

    newBitmap.setPixels(pixels, 0, width, 0, 0, width, height);
    return newBitmap;
}
 
Example 19
Project: SortingHatAndroid   File: TensorFlowImageClassifier.java   View Source Code Vote up 4 votes
@Override
public List<Recognition> recognizeImage(final Bitmap bitmap) {
  // Log this method so that it can be analyzed with systrace.
  Trace.beginSection("recognizeImage");

  Trace.beginSection("preprocessBitmap");
  // Preprocess the image data from 0-255 int to normalized float based
  // on the provided parameters.
  bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
  for (int i = 0; i < intValues.length; ++i) {
    final int val = intValues[i];
    floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - imageMean) / imageStd;
    floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - imageMean) / imageStd;
    floatValues[i * 3 + 2] = ((val & 0xFF) - imageMean) / imageStd;
  }
  Trace.endSection();

  // Copy the input data into TensorFlow.
  Trace.beginSection("fillNodeFloat");
  inferenceInterface.fillNodeFloat(
      inputName, new int[] {1, inputSize, inputSize, 3}, floatValues);
  Trace.endSection();

  // Run the inference call.
  Trace.beginSection("runInference");
  inferenceInterface.runInference(outputNames);
  Trace.endSection();

  // Copy the output Tensor back into the output array.
  Trace.beginSection("readNodeFloat");
  inferenceInterface.readNodeFloat(outputName, outputs);
  Trace.endSection();

  // Find the best classifications.
  PriorityQueue<Recognition> pq =
      new PriorityQueue<Recognition>(
          3,
          new Comparator<Recognition>() {
            @Override
            public int compare(Recognition lhs, Recognition rhs) {
              // Intentionally reversed to put high confidence at the head of the queue.
              return Float.compare(rhs.getConfidence(), lhs.getConfidence());
            }
          });
  for (int i = 0; i < outputs.length; ++i) {
    if (outputs[i] > THRESHOLD) {
      pq.add(
          new Recognition(
              "" + i, labels.size() > i ? labels.get(i) : "unknown", outputs[i], null));
    }
  }
  final ArrayList<Recognition> recognitions = new ArrayList<Recognition>();
  int recognitionsSize = Math.min(pq.size(), MAX_RESULTS);
  for (int i = 0; i < recognitionsSize; ++i) {
    recognitions.add(pq.poll());
  }
  Trace.endSection(); // "recognizeImage"
  return recognitions;
}
 
Example 20
Project: PaoMovie   File: HoDragVideo.java   View Source Code Vote up 3 votes
/** bitmap转yuv nv21 **/
public static byte[] getNV21(int inputWidth, int inputHeight, Bitmap scaled) {

	int[] argb = new int[inputWidth * inputHeight];

	scaled.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);

	byte[] yuv = new byte[inputWidth * inputHeight * 3 / 2];
	encodeYUV420SP(yuv, argb, inputWidth, inputHeight);

	scaled.recycle();

	return yuv;
}