ImageFormat.java revision 2f17431194efc5a13981550b004dd8d5d2a42e5e
1/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.graphics;
18
19public class ImageFormat {
20    /*
21     * these constants are chosen to be binary compatible with their previous
22     * location in PixelFormat.java
23     */
24
25    public static final int UNKNOWN = 0;
26
27    /**
28     * RGB format used for pictures encoded as RGB_565. See
29     * {@link android.hardware.Camera.Parameters#setPictureFormat(int)}.
30     */
31    public static final int RGB_565 = 4;
32
33    /**
34     * <p>Android YUV format.</p>
35     *
36     * <p>This format is exposed to software decoders and applications.</p>
37     *
38     * <p>YV12 is a 4:2:0 YCrCb planar format comprised of a WxH Y plane followed
39     * by (W/2) x (H/2) Cr and Cb planes.</p>
40     *
41     * <p>This format assumes
42     * <ul>
43     * <li>an even width</li>
44     * <li>an even height</li>
45     * <li>a horizontal stride multiple of 16 pixels</li>
46     * <li>a vertical stride equal to the height</li>
47     * </ul>
48     * </p>
49     *
50     * <pre> y_size = stride * height
51     * c_stride = ALIGN(stride/2, 16)
52     * c_size = c_stride * height/2
53     * size = y_size + c_size * 2
54     * cr_offset = y_size
55     * cb_offset = y_size + c_size</pre>
56     *
57     * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
58     * recommended for YUV output instead.</p>
59     *
60     * <p>For the older camera API, this format is guaranteed to be supported for
61     * {@link android.hardware.Camera} preview images since API level 12; for earlier API versions,
62     * check {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.
63     *
64     * <p>Note that for camera preview callback use (see
65     * {@link android.hardware.Camera#setPreviewCallback}), the
66     * <var>stride</var> value is the smallest possible; that is, it is equal
67     * to:
68     *
69     * <pre>stride = ALIGN(width, 16)</pre>
70     *
71     * @see android.hardware.Camera.Parameters#setPreviewCallback
72     * @see android.hardware.Camera.Parameters#setPreviewFormat
73     * @see android.hardware.Camera.Parameters#getSupportedPreviewFormats
74     * </p>
75     */
76    public static final int YV12 = 0x32315659;
77
78    /**
79     * <p>Android Y8 format.</p>
80     *
81     * <p>Y8 is a YUV planar format comprised of a WxH Y plane only, with each pixel
82     * being represented by 8 bits. It is equivalent to just the Y plane from {@link #YV12}
83     * format.</p>
84     *
85     * <p>This format assumes
86     * <ul>
87     * <li>an even width</li>
88     * <li>an even height</li>
89     * <li>a horizontal stride multiple of 16 pixels</li>
90     * </ul>
91     * </p>
92     *
93     * <pre> y_size = stride * height </pre>
94     *
95     * <p>For example, the {@link android.media.Image} object can provide data
96     * in this format from a {@link android.hardware.camera2.CameraDevice}
97     * through a {@link android.media.ImageReader} object if this format is
98     * supported by {@link android.hardware.camera2.CameraDevice}.</p>
99     *
100     * @see android.media.Image
101     * @see android.media.ImageReader
102     * @see android.hardware.camera2.CameraDevice
103     *
104     * @hide
105     */
106    public static final int Y8 = 0x20203859;
107
108    /**
109     * <p>Android Y16 format.</p>
110     *
111     * Y16 is a YUV planar format comprised of a WxH Y plane, with each pixel
112     * being represented by 16 bits. It is just like {@link #Y8}, but has 16
113     * bits per pixel (little endian).</p>
114     *
115     * <p>This format assumes
116     * <ul>
117     * <li>an even width</li>
118     * <li>an even height</li>
119     * <li>a horizontal stride multiple of 16 pixels</li>
120     * </ul>
121     * </p>
122     *
123     * <pre> y_size = stride * height </pre>
124     *
125     * <p>For example, the {@link android.media.Image} object can provide data
126     * in this format from a {@link android.hardware.camera2.CameraDevice}
127     * through a {@link android.media.ImageReader} object if this format is
128     * supported by {@link android.hardware.camera2.CameraDevice}.</p>
129     *
130     * @see android.media.Image
131     * @see android.media.ImageReader
132     * @see android.hardware.camera2.CameraDevice
133     *
134     * @hide
135     */
136    public static final int Y16 = 0x20363159;
137
138    /**
139     * YCbCr format, used for video.
140     *
141     * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
142     * recommended for YUV output instead.</p>
143     *
144     * <p>Whether this format is supported by the old camera API can be determined by
145     * {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.</p>
146     *
147     */
148    public static final int NV16 = 0x10;
149
150    /**
151     * YCrCb format used for images, which uses the NV21 encoding format.
152     *
153     * <p>This is the default format
154     * for {@link android.hardware.Camera} preview images, when not otherwise set with
155     * {@link android.hardware.Camera.Parameters#setPreviewFormat(int)}.</p>
156     *
157     * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
158     * recommended for YUV output instead.</p>
159     */
160    public static final int NV21 = 0x11;
161
162    /**
163     * YCbCr format used for images, which uses YUYV (YUY2) encoding format.
164     *
165     * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
166     * recommended for YUV output instead.</p>
167     *
168     * <p>This is an alternative format for {@link android.hardware.Camera} preview images. Whether
169     * this format is supported by the camera hardware can be determined by
170     * {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.</p>
171     */
172    public static final int YUY2 = 0x14;
173
174    /**
175     * Compressed JPEG format.
176     *
177     * <p>This format is always supported as an output format for the
178     * {@link android.hardware.camera2} API, and as a picture format for the older
179     * {@link android.hardware.Camera} API</p>
180     */
181    public static final int JPEG = 0x100;
182
183    /**
184     * <p>Multi-plane Android YUV format</p>
185     *
186     * <p>This format is a generic YCbCr format, capable of describing any 4:2:0
187     * chroma-subsampled planar or semiplanar buffer (but not fully interleaved),
188     * with 8 bits per color sample.</p>
189     *
190     * <p>Images in this format are always represented by three separate buffers
191     * of data, one for each color plane. Additional information always
192     * accompanies the buffers, describing the row stride and the pixel stride
193     * for each plane.</p>
194     *
195     * <p>The order of planes in the array returned by
196     * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
197     * plane #0 is always Y, plane #1 is always U (Cb), and plane #2 is always V (Cr).</p>
198     *
199     * <p>The Y-plane is guaranteed not to be interleaved with the U/V planes
200     * (in particular, pixel stride is always 1 in
201     * {@link android.media.Image.Plane#getPixelStride() yPlane.getPixelStride()}).</p>
202     *
203     * <p>The U/V planes are guaranteed to have the same row stride and pixel stride
204     * (in particular,
205     * {@link android.media.Image.Plane#getRowStride() uPlane.getRowStride()}
206     * == {@link android.media.Image.Plane#getRowStride() vPlane.getRowStride()} and
207     * {@link android.media.Image.Plane#getPixelStride() uPlane.getPixelStride()}
208     * == {@link android.media.Image.Plane#getPixelStride() vPlane.getPixelStride()};
209     * ).</p>
210     *
211     * <p>For example, the {@link android.media.Image} object can provide data
212     * in this format from a {@link android.hardware.camera2.CameraDevice}
213     * through a {@link android.media.ImageReader} object.</p>
214     *
215     * @see android.media.Image
216     * @see android.media.ImageReader
217     * @see android.hardware.camera2.CameraDevice
218     */
219    public static final int YUV_420_888 = 0x23;
220
221    /**
222     * <p>General raw camera sensor image format, usually representing a
223     * single-channel Bayer-mosaic image. Each pixel color sample is stored with
224     * 16 bits of precision.</p>
225     *
226     * <p>The layout of the color mosaic, the maximum and minimum encoding
227     * values of the raw pixel data, the color space of the image, and all other
228     * needed information to interpret a raw sensor image must be queried from
229     * the {@link android.hardware.camera2.CameraDevice} which produced the
230     * image.</p>
231     */
232    public static final int RAW_SENSOR = 0x20;
233
234    /**
235     * <p>
236     * Android 10-bit raw format
237     * </p>
238     * <p>
239     * This is a single-plane, 10-bit per pixel, densely packed (in each row),
240     * unprocessed format, usually representing raw Bayer-pattern images coming
241     * from an image sensor.
242     * </p>
243     * <p>
244     * In an image buffer with this format, starting from the first pixel of
245     * each row, each 4 consecutive pixels are packed into 5 bytes (40 bits).
246     * Each one of the first 4 bytes contains the top 8 bits of each pixel, The
247     * fifth byte contains the 2 least significant bits of the 4 pixels, the
248     * exact layout data for each 4 consecutive pixels is illustrated below
249     * ({@code Pi[j]} stands for the jth bit of the ith pixel):
250     * </p>
251     * <table>
252     * <thead>
253     * <tr>
254     * <th align="center"></th>
255     * <th align="center">bit 7</th>
256     * <th align="center">bit 6</th>
257     * <th align="center">bit 5</th>
258     * <th align="center">bit 4</th>
259     * <th align="center">bit 3</th>
260     * <th align="center">bit 2</th>
261     * <th align="center">bit 1</th>
262     * <th align="center">bit 0</th>
263     * </tr>
264     * </thead> <tbody>
265     * <tr>
266     * <td align="center">Byte 0:</td>
267     * <td align="center">P0[9]</td>
268     * <td align="center">P0[8]</td>
269     * <td align="center">P0[7]</td>
270     * <td align="center">P0[6]</td>
271     * <td align="center">P0[5]</td>
272     * <td align="center">P0[4]</td>
273     * <td align="center">P0[3]</td>
274     * <td align="center">P0[2]</td>
275     * </tr>
276     * <tr>
277     * <td align="center">Byte 1:</td>
278     * <td align="center">P1[9]</td>
279     * <td align="center">P1[8]</td>
280     * <td align="center">P1[7]</td>
281     * <td align="center">P1[6]</td>
282     * <td align="center">P1[5]</td>
283     * <td align="center">P1[4]</td>
284     * <td align="center">P1[3]</td>
285     * <td align="center">P1[2]</td>
286     * </tr>
287     * <tr>
288     * <td align="center">Byte 2:</td>
289     * <td align="center">P2[9]</td>
290     * <td align="center">P2[8]</td>
291     * <td align="center">P2[7]</td>
292     * <td align="center">P2[6]</td>
293     * <td align="center">P2[5]</td>
294     * <td align="center">P2[4]</td>
295     * <td align="center">P2[3]</td>
296     * <td align="center">P2[2]</td>
297     * </tr>
298     * <tr>
299     * <td align="center">Byte 3:</td>
300     * <td align="center">P3[9]</td>
301     * <td align="center">P3[8]</td>
302     * <td align="center">P3[7]</td>
303     * <td align="center">P3[6]</td>
304     * <td align="center">P3[5]</td>
305     * <td align="center">P3[4]</td>
306     * <td align="center">P3[3]</td>
307     * <td align="center">P3[2]</td>
308     * </tr>
309     * <tr>
310     * <td align="center">Byte 4:</td>
311     * <td align="center">P3[1]</td>
312     * <td align="center">P3[0]</td>
313     * <td align="center">P2[1]</td>
314     * <td align="center">P2[0]</td>
315     * <td align="center">P1[1]</td>
316     * <td align="center">P1[0]</td>
317     * <td align="center">P0[1]</td>
318     * <td align="center">P0[0]</td>
319     * </tr>
320     * </tbody>
321     * </table>
322     * <p>
323     * This format assumes
324     * <ul>
325     * <li>a width multiple of 4 pixels</li>
326     * <li>an even height</li>
327     * </ul>
328     * </p>
329     *
330     * <pre>size = row stride * height</pre> where the row stride is in <em>bytes</em>,
331     * not pixels.
332     *
333     * <p>
334     * Since this is a densely packed format, the pixel stride is always 0. The
335     * application must use the pixel data layout defined in above table to
336     * access each row data. When row stride is equal to {@code width * (10 / 8)}, there
337     * will be no padding bytes at the end of each row, the entire image data is
338     * densely packed. When stride is larger than {@code width * (10 / 8)}, padding
339     * bytes will be present at the end of each row.
340     * </p>
341     * <p>
342     * For example, the {@link android.media.Image} object can provide data in
343     * this format from a {@link android.hardware.camera2.CameraDevice} (if
344     * supported) through a {@link android.media.ImageReader} object. The
345     * {@link android.media.Image#getPlanes() Image#getPlanes()} will return a
346     * single plane containing the pixel data. The pixel stride is always 0 in
347     * {@link android.media.Image.Plane#getPixelStride()}, and the
348     * {@link android.media.Image.Plane#getRowStride()} describes the vertical
349     * neighboring pixel distance (in bytes) between adjacent rows.
350     * </p>
351     *
352     * @see android.media.Image
353     * @see android.media.ImageReader
354     * @see android.hardware.camera2.CameraDevice
355     */
356    public static final int RAW10 = 0x25;
357
358    /**
359     * Android dense depth image format.
360     *
361     * Each pixel is 16 bits, representing a depth ranging measurement from
362     * a depth camera or similar sensor.
363     *
364     * <p>This format assumes
365     * <ul>
366     * <li>an even width</li>
367     * <li>an even height</li>
368     * <li>a horizontal stride multiple of 16 pixels</li>
369     * </ul>
370     * </p>
371     *
372     * <pre> y_size = stride * height </pre>
373     *
374     * When produced by a camera, the units are millimeters.
375     */
376    public static final int DEPTH16 = 0x44363159;
377
378    /**
379     * Android sparse depth point cloud format.
380     *
381     * <p>A variable-length list of 3D points, with each point represented
382     * by a triple of floats.</p>
383     *
384     * <p>The number of points is {@code (size of the buffer in bytes) / 12}.
385     *
386     * The coordinate system and units depend on the source of the point cloud data.
387     */
388    public static final int DEPTH_POINT_CLOUD = 0x101;
389
390    /**
391     * Android private opaque image format.
392     * <p>
393     * The choices of the actual format and pixel data layout are entirely up to
394     * the device-specific and framework internal implementations, and may vary
395     * depending on use cases even for the same device. The buffers of this
396     * format can be produced by components like
397     * {@link android.media.ImageWriter ImageWriter} , and interpreted correctly
398     * by consumers like {@link android.hardware.camera2.CameraDevice
399     * CameraDevice} based on the device/framework private information. However,
400     * these buffers are not directly accessible to the application.
401     * </p>
402     * <p>
403     * When an {@link android.media.Image Image} of this format is obtained from
404     * an {@link android.media.ImageReader ImageReader} or
405     * {@link android.media.ImageWriter ImageWriter}, the
406     * {@link android.media.Image#getPlanes() getPlanes()} method will return an
407     * empty {@link android.media.Image.Plane Plane} array.
408     * </p>
409     * <p>
410     * If a buffer of this format is to be used as an OpenGL ES texture, the
411     * framework will assume that sampling the texture will always return an
412     * alpha value of 1.0 (i.e. the buffer contains only opaque pixel values).
413     * </p>
414     */
415    public static final int PRIVATE = 0x22;
416
417    /**
418     * Use this function to retrieve the number of bits per pixel of an
419     * ImageFormat.
420     *
421     * @param format
422     * @return the number of bits per pixel of the given format or -1 if the
423     *         format doesn't exist or is not supported.
424     */
425    public static int getBitsPerPixel(int format) {
426        switch (format) {
427            case RGB_565:
428                return 16;
429            case NV16:
430                return 16;
431            case YUY2:
432                return 16;
433            case YV12:
434                return 12;
435            case Y8:
436                return 8;
437            case Y16:
438            case DEPTH16:
439                return 16;
440            case NV21:
441                return 12;
442            case YUV_420_888:
443                return 12;
444            case RAW_SENSOR:
445                return 16;
446            case RAW10:
447                return 10;
448        }
449        return -1;
450    }
451
452    /**
453     * Determine whether or not this is a public-visible {@code format}.
454     *
455     * <p>In particular, {@code @hide} formats will return {@code false}.</p>
456     *
457     * <p>Any other formats (including UNKNOWN) will return {@code false}.</p>
458     *
459     * @param format an integer format
460     * @return a boolean
461     *
462     * @hide
463     */
464    public static boolean isPublicFormat(int format) {
465        switch (format) {
466            case RGB_565:
467            case NV16:
468            case YUY2:
469            case YV12:
470            case JPEG:
471            case NV21:
472            case YUV_420_888:
473            case RAW_SENSOR:
474            case RAW10:
475            case DEPTH16:
476            case DEPTH_POINT_CLOUD:
477            case PRIVATE:
478                return true;
479        }
480
481        return false;
482    }
483}
484