Java Code Examples for android.graphics.Bitmap#getPixels()

The following examples show how to use android.graphics.Bitmap#getPixels() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
Source Project: Slide   File: ImageUtil.java    License: GNU General Public License v3.0 6 votes vote down vote up
public static void drawWithTargetColor(Bitmap bm, Bitmap src, int targetcolor, int tolerance) {
    /*int r = (targetcolor >> 16) & 0xFF;
    int g = (targetcolor >>  8) & 0xFF;
    int b = (targetcolor >>  0) & 0xFF;*/

    int width = Math.max(bm.getWidth(), src.getWidth());
    int height = Math.max(bm.getHeight(), src.getHeight());

    int[] bmpixels = new int[width*height];
    bm.getPixels(bmpixels, 0, bm.getWidth(), 0, 0, width, height);

    int[] srcpixels = new int[width*height];
    src.getPixels(srcpixels, 0, src.getWidth(), 0, 0, width, height);

    for (int x = 0; x < width; x++) {
        for (int y = 0; y < height; y++) {
            if (bmpixels[x+width*y] == targetcolor) {
                bmpixels[x+width*y] = srcpixels[x+width*y];
            }
        }
    }
    bm.setPixels(bmpixels, 0, bm.getWidth(), 0, 0, width, height);
}
 
Example 2
public static Bitmap getGradientBitmap(int width, int height, @ColorInt int color) {
  Bitmap bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);

  int alpha = Color.alpha(color);
  int red = Color.red(color);
  int green = Color.green(color);
  int blue = Color.blue(color);

  int[] pixels = new int[width * height];
  bitmap.getPixels(pixels, 0, width, 0, 0, width, height);
  for (int y = 0; y < height; y++) {
    int gradientAlpha = (int) ((float) alpha * (float) (height - y) * (float) (height - y)
        / (float) height
        / (float) height);
    for (int x = 0; x < width; x++) {
      pixels[x + y * width] = Color.argb(gradientAlpha, red, green, blue);
    }
  }

  bitmap.setPixels(pixels, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
  return bitmap;
}
 
Example 3
/**
 * 归一化图片到[0, 1]
 * @param bitmap
 * @return
 */
public static float[][][] normalizeImage(Bitmap bitmap) {
    int h = bitmap.getHeight();
    int w = bitmap.getWidth();
    float[][][] floatValues = new float[h][w][3];

    float imageStd = 255;
    int[] pixels = new int[h * w];
    bitmap.getPixels(pixels, 0, bitmap.getWidth(), 0, 0, w, h);
    for (int i = 0; i < h; i++) { // 注意是先高后宽
        for (int j = 0; j < w; j++) {
            final int val = pixels[i * w + j];
            float r = ((val >> 16) & 0xFF) / imageStd;
            float g = ((val >> 8) & 0xFF) / imageStd;
            float b = (val & 0xFF) / imageStd;

            float[] arr = {r, g, b};
            floatValues[i][j] = arr;
        }
    }
    return floatValues;
}
 
Example 4
/** Writes Image data into a {@code ByteBuffer}. */
  private void convertBitmapToByteBuffer(Bitmap bitmap) {
    if (imgData == null) {
      return;
    }
    imgData.rewind();
    bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
    // Convert the image to floating point.
    int pixel = 0;
    long startTime = SystemClock.uptimeMillis();
    for (int i = 0; i < DIM_IMG_SIZE_X; ++i) {
      for (int j = 0; j < DIM_IMG_SIZE_Y; ++j) {
        final int val = intValues[pixel++];
        imgData.putFloat((((val >> 16) & 0xFF)-IMAGE_MEAN)/IMAGE_STD);
        imgData.putFloat((((val >> 8) & 0xFF)-IMAGE_MEAN)/IMAGE_STD);
        imgData.putFloat((((val) & 0xFF)-IMAGE_MEAN)/IMAGE_STD);
      }
    }
    long endTime = SystemClock.uptimeMillis();
//    Log.d(TAG, "Timecost to put values into ByteBuffer: " + Long.toString(endTime - startTime));
  }
 
Example 5
/**
 去色
 */
public static Bitmap toGray(Bitmap target) {
    Bitmap temp = target.copy(Bitmap.Config.ARGB_8888, true);
    int width = temp.getWidth(), height = temp.getHeight();
    int[] targetPixels = new int[width * height];
    //获取bitmap所有像素点
    temp.getPixels(targetPixels, 0, width, 0, 0, width, height);
    int index = 0;
    int pixelColor;
    int a, r, g, b;
    for (int y = 0; y < height; y++) {
        for (int x = 0; x < width; x++) {
            //获取rgb色值并与目标颜色相比较
            pixelColor = targetPixels[index];
            a = Color.alpha(pixelColor);
            r = Color.red(pixelColor);
            g = Color.green(pixelColor);
            b = Color.blue(pixelColor);
            int gray = (r + g + b) / 3;
            targetPixels[index] = Color.argb(a, gray, gray, gray);
            ++index;
        }
    }
    temp.setPixels(targetPixels, 0, width, 0, 0, width, height);
    return temp;
}
 
Example 6
Source Project: SimplifyReader   File: DecodeUtils.java    License: Apache License 2.0 6 votes vote down vote up
public String decodeWithZxing(Bitmap bitmap) {
    MultiFormatReader multiFormatReader = new MultiFormatReader();
    multiFormatReader.setHints(changeZXingDecodeDataMode());

    int width = bitmap.getWidth();
    int height = bitmap.getHeight();
    int[] pixels = new int[width * height];
    bitmap.getPixels(pixels, 0, width, 0, 0, width, height);

    Result rawResult = null;
    RGBLuminanceSource source = new RGBLuminanceSource(width, height, pixels);

    if (source != null) {
        BinaryBitmap binaryBitmap = new BinaryBitmap(new HybridBinarizer(source));
        try {
            rawResult = multiFormatReader.decodeWithState(binaryBitmap);
        } catch (ReaderException re) {
            // continue
        } finally {
            multiFormatReader.reset();
        }
    }

    return rawResult != null ? rawResult.getText() : null;
}
 
Example 7
/**
 * 将彩色图转换为灰度图
 *
 * @param img 源Bitmap
 * @return 返回转换好的位图
 */
public static Bitmap convertGreyImg(Bitmap img) {
    int width = img.getWidth(); // 获取位图的宽
    int height = img.getHeight(); // 获取位图的高

    int[] pixels = new int[width * height]; // 通过位图的大小创建像素点数组

    img.getPixels(pixels, 0, width, 0, 0, width, height);
    int alpha = 0xFF << 24;
    for (int i = 0; i < height; i++) {
        for (int j = 0; j < width; j++) {
            int grey = pixels[width * i + j];

            int red = ((grey & 0x00FF0000) >> 16);
            int green = ((grey & 0x0000FF00) >> 8);
            int blue = (grey & 0x000000FF);

            grey = (int) ((float) red * 0.3 + (float) green * 0.59 + (float) blue * 0.11);
            grey = alpha | (grey << 16) | (grey << 8) | grey;
            pixels[width * i + j] = grey;
        }
    }
    Bitmap result = Bitmap.createBitmap(width, height, Config.RGB_565);
    result.setPixels(pixels, 0, width, 0, 0, width, height);
    return result;
}
 
Example 8
@Override
public Bitmap transform(Bitmap source) {
    int width = source.getWidth();
    int height = source.getHeight();

    Bitmap bitmap = source.copy(source.getConfig(), true);
    source.recycle();

    originalSpace = new Rect(0, 0, width, height);
    transformedSpace = new Rect(0, 0, width, height);
    transformSpace(transformedSpace);

    int[] inPixels = new int[width * height];
    bitmap.getPixels(inPixels, 0, width, 0, 0, width, height);
    inPixels = filterPixels(width, height, inPixels, transformedSpace);

    bitmap.setPixels(inPixels, 0, transformedSpace.width(), 0, 0, transformedSpace.width(),
            transformedSpace.height());

    return bitmap;
}
 
Example 9
protected BitmapLuminanceSource(Bitmap bitmap) {
	super(bitmap.getWidth(), bitmap.getHeight());

	// 首先,要取得该图片的像素数组内容
	int[] data = new int[bitmap.getWidth() * bitmap.getHeight()];
	this.bitmapPixels = new byte[bitmap.getWidth() * bitmap.getHeight()];
	bitmap.getPixels(data, 0, getWidth(), 0, 0, getWidth(), getHeight());

	// 将int数组转换为byte数组,也就是取像素值中蓝色值部分作为辨析内容
	for (int i = 0; i < data.length; i++) {
		this.bitmapPixels[i] = (byte) data[i];
	}
}
 
Example 10
Source Project: sealrtc-android   File: BitmapUtil.java    License: MIT License 5 votes vote down vote up
/**
 * bitmap 转 NV21 数据
 *
 * @param inputWidth
 * @param inputHeight
 * @param scaled
 * @return
 */
public static byte[] getNV21(int inputWidth, int inputHeight, Bitmap scaled) {
    int[] argb = new int[inputWidth * inputHeight];
    scaled.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
    byte[] yuv =
            new byte
                    [inputHeight * inputWidth
                            + 2
                                    * (int) Math.ceil((float) inputHeight / 2)
                                    * (int) Math.ceil((float) inputWidth / 2)];
    encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
    scaled.recycle();
    return yuv;
}
 
Example 11
Source Project: Aegis   File: MainActivity.java    License: GNU General Public License v3.0 5 votes vote down vote up
private void onScanImageResult(Intent intent) {
    Uri inputFile = (intent.getData());
    Bitmap bitmap;

    try {
        BitmapFactory.Options bmOptions = new BitmapFactory.Options();

        try (InputStream inputStream = getContentResolver().openInputStream(inputFile)) {
            bitmap = BitmapFactory.decodeStream(inputStream, null, bmOptions);
        }

        int[] intArray = new int[bitmap.getWidth() * bitmap.getHeight()];
        bitmap.getPixels(intArray, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());

        LuminanceSource source = new RGBLuminanceSource(bitmap.getWidth(), bitmap.getHeight(), intArray);
        BinaryBitmap binaryBitmap = new BinaryBitmap(new HybridBinarizer(source));

        Reader reader = new MultiFormatReader();
        Result result = reader.decode(binaryBitmap);

        GoogleAuthInfo info = GoogleAuthInfo.parseUri(result.getText());
        VaultEntry entry = new VaultEntry(info);

        startEditEntryActivity(CODE_ADD_ENTRY, entry, true);
    } catch (NotFoundException | IOException | ChecksumException | FormatException | GoogleAuthInfoException e) {
        e.printStackTrace();
        Dialogs.showErrorDialog(this, R.string.unable_to_read_qrcode, e);
    }
}
 
Example 12
public BitmapLuminanceSource(Bitmap bitmap) {
    super(bitmap.getWidth(), bitmap.getHeight());

    // 首先,要取得该图片的像素数组内容
    int[] data = new int[bitmap.getWidth() * bitmap.getHeight()];
    this.bitmapPixels = new byte[bitmap.getWidth() * bitmap.getHeight()];
    bitmap.getPixels(data, 0, getWidth(), 0, 0, getWidth(), getHeight());

    // 将int数组转换为byte数组,也就是取像素值中蓝色值部分作为辨析内容
    for (int i = 0; i < data.length; i++) {
        this.bitmapPixels[i] = (byte) data[i];
    }
}
 
Example 13
public static Bitmap doBrightness(int value, Bitmap inputImage) {
    int width = inputImage.getWidth();
    int height = inputImage.getHeight();
    int[] pixels = new int[width * height];

    inputImage.getPixels(pixels, 0, width, 0, 0, width, height);
    NativeImageProcessor.doBrightness(pixels, value, width, height);
    inputImage.setPixels(pixels, 0, width, 0, 0, width, height);

    return inputImage;
}
 
Example 14
public static Bitmap doSaturation(Bitmap inputImage, float level) {
    int width = inputImage.getWidth();
    int height = inputImage.getHeight();
    int[] pixels = new int[width * height];

    inputImage.getPixels(pixels, 0, width, 0, 0, width, height);
    NativeImageProcessor.doSaturation(pixels, level, width, height);
    inputImage.setPixels(pixels, 0, width, 0, 0, width, height);
    return inputImage;
}
 
Example 15
Source Project: ssj   File: CameraUtil.java    License: GNU General Public License v3.0 5 votes vote down vote up
/**
 * Converts bitmap to corresponding byte array and writes it
 * to the output buffer.
 *
 * @param bitmap Bitmap to convert to byte array.
 * @param intOutput Integer output buffer
 * @param byteOutput Byte output buffer.
 */
public static void convertBitmapToByteArray(Bitmap bitmap, int[] intOutput, byte[] byteOutput)
{
    bitmap.getPixels(intOutput, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());

    for (int i = 0; i < bitmap.getWidth() * bitmap.getHeight(); ++i)
    {
        final int pixel = intOutput[i];
        byteOutput[i * 3] = (byte)((pixel >> 16) & 0xFF);
        byteOutput[i * 3 + 1] = (byte)((pixel >> 8) & 0xFF);
        byteOutput[i * 3 + 2] = (byte)(pixel & 0xFF);
    }
}
 
Example 16
public List<Recognition> recognizeImage(Bitmap bitmap) {
    int i;
    Trace.beginSection("recognizeImage");
    Trace.beginSection("preprocessBitmap");
    bitmap.getPixels(this.intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
    for (i = 0; i < this.intValues.length; i++) {
        this.byteValues[(i * 3) + 2] = (byte) (this.intValues[i] & 255);
        this.byteValues[(i * 3) + 1] = (byte) ((this.intValues[i] >> 8) & 255);
        this.byteValues[(i * 3) + 0] = (byte) ((this.intValues[i] >> 16) & 255);
    }
    Trace.endSection();
    Trace.beginSection("feed");
    this.inferenceInterface.feed(this.inputName, this.byteValues, 1, (long) this.inputSize, (long) this.inputSize, 3);
    Trace.endSection();
    Trace.beginSection("run");
    this.inferenceInterface.run(this.outputNames, this.logStats);
    Trace.endSection();
    Trace.beginSection("fetch");
    this.outputLocations = new float[400];
    this.outputScores = new float[100];
    this.outputClasses = new float[100];
    this.outputNumDetections = new float[1];
    this.inferenceInterface.fetch(this.outputNames[0], this.outputLocations);
    this.inferenceInterface.fetch(this.outputNames[1], this.outputScores);
    this.inferenceInterface.fetch(this.outputNames[2], this.outputClasses);
    this.inferenceInterface.fetch(this.outputNames[3], this.outputNumDetections);
    Trace.endSection();
    PriorityQueue<Recognition> pq = new PriorityQueue(1, new Comparator<Recognition>() {
        public int compare(Recognition lhs, Recognition rhs) {
            return Float.compare(rhs.getConfidence().floatValue(), lhs.getConfidence().floatValue());
        }
    });
    for (i = 0; i < this.outputScores.length; i++) {
        pq.add(new Recognition("" + ((int) this.outputClasses[i]), (String) this.labels.get((int) this.outputClasses[i]), Float.valueOf(this.outputScores[i]), new RectF(this.outputLocations[(i * 4) + 1] * ((float) this.inputSize), this.outputLocations[i * 4] * ((float) this.inputSize), this.outputLocations[(i * 4) + 3] * ((float) this.inputSize), this.outputLocations[(i * 4) + 2] * ((float) this.inputSize))));
    }
    ArrayList<Recognition> recognitions = new ArrayList();
    for (i = 0; i < Math.min(pq.size(), 100); i++) {
        recognitions.add(pq.poll());
    }
    Trace.endSection();
    return recognitions;
}
 
Example 17
Source Project: 365browser   File: FaceDetectionImpl.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public void detect(SharedBufferHandle frameData, final int width, final int height,
        final DetectResponse callback) {
    final long numPixels = (long) width * height;
    // TODO(xianglu): https://crbug.com/670028 homogeneize overflow checking.
    if (!frameData.isValid() || width <= 0 || height <= 0 || numPixels > (Long.MAX_VALUE / 4)) {
        Log.d(TAG, "Invalid argument(s).");
        callback.call(new FaceDetectionResult[0]);
        return;
    }

    ByteBuffer imageBuffer = frameData.map(0, numPixels * 4, MapFlags.none());
    if (imageBuffer.capacity() <= 0) {
        Log.d(TAG, "Failed to map from SharedBufferHandle.");
        callback.call(new FaceDetectionResult[0]);
        return;
    }

    Bitmap bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);

    // An int array is needed to construct a Bitmap. However the Bytebuffer
    // we get from |sharedBufferHandle| is directly allocated and does not
    // have a supporting array. Therefore we need to copy from |imageBuffer|
    // to create this intermediate Bitmap.
    // TODO(xianglu): Consider worker pool as appropriate threads.
    // http://crbug.com/655814
    bitmap.copyPixelsFromBuffer(imageBuffer);

    // A Bitmap must be in 565 format for findFaces() to work. See
    // http://androidxref.com/7.0.0_r1/xref/frameworks/base/media/java/android/media/FaceDetector.java#124
    //
    // It turns out that FaceDetector is not able to detect correctly if
    // simply using pixmap.setConfig(). The reason might be that findFaces()
    // needs non-premultiplied ARGB arrangement, while the alpha type in the
    // original image is premultiplied. We can use getPixels() which does
    // the unmultiplication while copying to a new array. See
    // http://androidxref.com/7.0.0_r1/xref/frameworks/base/graphics/java/android/graphics/Bitmap.java#538
    int[] pixels = new int[width * height];
    bitmap.getPixels(pixels, 0, width, 0, 0, width, height);
    final Bitmap unPremultipliedBitmap =
            Bitmap.createBitmap(pixels, width, height, Bitmap.Config.RGB_565);

    // FaceDetector creation and findFaces() might take a long time and trigger a
    // "StrictMode policy violation": they should happen in a background thread.
    AsyncTask.THREAD_POOL_EXECUTOR.execute(new Runnable() {
        @Override
        public void run() {
            final FaceDetector detector = new FaceDetector(width, height, mMaxFaces);
            Face[] detectedFaces = new Face[mMaxFaces];
            // findFaces() will stop at |mMaxFaces|.
            final int numberOfFaces = detector.findFaces(unPremultipliedBitmap, detectedFaces);

            FaceDetectionResult[] faceArray = new FaceDetectionResult[numberOfFaces];

            for (int i = 0; i < numberOfFaces; i++) {
                faceArray[i] = new FaceDetectionResult();

                final Face face = detectedFaces[i];
                final PointF midPoint = new PointF();
                face.getMidPoint(midPoint);
                final float eyesDistance = face.eyesDistance();

                faceArray[i].boundingBox = new RectF();
                faceArray[i].boundingBox.x = midPoint.x - eyesDistance;
                faceArray[i].boundingBox.y = midPoint.y - eyesDistance;
                faceArray[i].boundingBox.width = 2 * eyesDistance;
                faceArray[i].boundingBox.height = 2 * eyesDistance;
                // TODO(xianglu): Consider adding Face.confidence and Face.pose.

                faceArray[i].landmarks = new Landmark[0];
            }

            callback.call(faceArray);
        }
    });
}
 
Example 18
Source Project: PdfBox-Android   File: JPEGFactory.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Creates a grayscale Flate encoded PDImageXObject from the alpha channel
 * of an image.
 *
 * @param document the document where the image will be created.
 * @param image an ARGB image.
 *
 * @return the alpha channel of an image as a grayscale image.
 *
 * @throws IOException if something goes wrong
 */
private static PDImageXObject createAlphaFromARGBImage(PDDocument document, Bitmap image)
    throws IOException
{
    // this implementation makes the assumption that the raster uses
    // SinglePixelPackedSampleModel, i.e. the values can be used 1:1 for
    // the stream.
    // Sadly the type of the databuffer is TYPE_INT and not TYPE_BYTE.
    if (!image.hasAlpha())
    {
        return null;
    }

    int[] pixels = new int[image.getHeight() * image.getWidth()];
    image.getPixels(pixels, 0, image.getWidth(), 0, 0, image.getWidth(), image.getHeight());

    ByteArrayOutputStream bos = new ByteArrayOutputStream();
    int bpc;
    //        if (image.getTransparency() == Transparency.BITMASK)
    //        {
    //            bpc = 1;
    //            MemoryCacheImageOutputStream mcios = new MemoryCacheImageOutputStream(bos);
    //            int width = alphaRaster.getSampleModel().getWidth();
    //            int p = 0;
    //            for (int pixel : pixels)
    //            {
    //                mcios.writeBit(pixel);
    //                ++p;
    //                if (p % width == 0)
    //                {
    //                    while (mcios.getBitOffset() != 0)
    //                    {
    //                        mcios.writeBit(0);
    //                    }
    //                }
    //            }
    //            mcios.flush();
    //            mcios.close();
    //        }
    //        else
    //        {
    bpc = 8;
    for (int pixel : pixels)
    {
        bos.write(Color.alpha(pixel));
    }
    //        }

    PDImageXObject pdImage = prepareImageXObject(document, bos.toByteArray(),
        image.getWidth(), image.getHeight(), bpc, PDDeviceGray.INSTANCE);
    return pdImage;
}
 
Example 19
Source Project: dbclf   File: TensorFlowImageClassifier.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public List<Recognition> recognizeImage(final Bitmap bitmap) {

    // Preprocess the image data from 0-255 int to normalized float based
    // on the provided parameters.
    bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
    for (int i = 0; i < intValues.length; ++i) {
        final int val = intValues[i];
        floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - imageMean) / imageStd;
        floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - imageMean) / imageStd;
        floatValues[i * 3 + 2] = ((val & 0xFF) - imageMean) / imageStd;
    }

    // Copy the input data into TensorFlow.
    inferenceInterface.feed(inputName, floatValues, 1, inputSize, inputSize, 3);

    // Run the inference call.
    inferenceInterface.run(outputNames, logStats);

    // Copy the output Tensor back into the output array.
    inferenceInterface.fetch(outputName, outputs);

    // Find the best classifications.
    PriorityQueue<Recognition> pq =
            new PriorityQueue<Recognition>(
                    3,
                    (lhs, rhs) -> {
                        // Intentionally reversed to put high confidence at the head of the queue.
                        return Float.compare(rhs.getConfidence(), lhs.getConfidence());
                    });

    for (int i = 0; i < outputs.length; ++i) {
        if (outputs[i] > THRESHOLD) {
            pq.add(
                    new Recognition(
                            "" + i, labels.size() > i ? labels.get(i) : "unknown", outputs[i], null));
        }
    }
    final ArrayList<Recognition> recognitions = new ArrayList<Recognition>();
    final int recognitionsSize = Math.min(pq.size(), MAX_RESULTS);
    for (int i = 0; i < recognitionsSize; ++i) {
        recognitions.add(pq.poll());
    }
    return recognitions;
}
 
Example 20
Source Project: Dashchan   File: CommonUtils.java    License: Apache License 2.0 4 votes vote down vote up
@Public
public static Bitmap trimBitmap(Bitmap bitmap, int backgroundColor) {
	if (bitmap == null) {
		return null;
	}
	int width = bitmap.getWidth();
	int height = bitmap.getHeight();
	int[] pixels = new int[Math.max(width, height)];
	int actualLeft = 0;
	int actualRight = width;
	int actualTop = 0;
	int actualBottom = height;
	OUT: for (int i = 0; i < width; i++) {
		bitmap.getPixels(pixels, 0, 1, i, 0, 1, height);
		for (int j = 0; j < height; j++) {
			if (pixels[j] != backgroundColor) {
				actualLeft = i;
				break OUT;
			}
		}
	}
	OUT: for (int i = width - 1; i >= 0; i--) {
		bitmap.getPixels(pixels, 0, 1, i, 0, 1, height);
		for (int j = 0; j < height; j++) {
			if (pixels[j] != backgroundColor) {
				actualRight = i + 1;
				break OUT;
			}
		}
	}
	OUT: for (int i = 0; i < height; i++) {
		bitmap.getPixels(pixels, 0, width, 0, i, width, 1);
		for (int j = 0; j < width; j++) {
			if (pixels[j] != backgroundColor) {
				actualTop = i;
				break OUT;
			}
		}
	}
	OUT: for (int i = height - 1; i >= 0; i--) {
		bitmap.getPixels(pixels, 0, width, 0, i, width, 1);
		for (int j = 0; j < width; j++) {
			if (pixels[j] != backgroundColor) {
				actualBottom = i + 1;
				break OUT;
			}
		}
	}
	if (actualLeft != 0 || actualTop != 0 || actualRight != width || actualBottom != height) {
		if (actualRight > actualLeft && actualBottom > actualTop) {
			Bitmap newBitmap = Bitmap.createBitmap(actualRight - actualLeft, actualBottom - actualTop,
					Bitmap.Config.ARGB_8888);
			new Canvas(newBitmap).drawBitmap(bitmap, -actualLeft, -actualTop, null);
			return newBitmap;
		}
		return null;
	}
	return bitmap;
}