1 /* 2 * Copyright (C) 2010 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package android.graphics; 18 19 import android.annotation.IntDef; 20 21 import java.lang.annotation.Retention; 22 import java.lang.annotation.RetentionPolicy; 23 24 public class ImageFormat { 25 /** @hide */ 26 @Retention(RetentionPolicy.SOURCE) 27 @IntDef(value = { 28 UNKNOWN, 29 RGB_565, 30 YV12, 31 Y8, 32 Y16, 33 NV16, 34 NV21, 35 YUY2, 36 JPEG, 37 DEPTH_JPEG, 38 YUV_420_888, 39 YUV_422_888, 40 YUV_444_888, 41 FLEX_RGB_888, 42 FLEX_RGBA_8888, 43 RAW_SENSOR, 44 RAW_PRIVATE, 45 RAW10, 46 RAW12, 47 DEPTH16, 48 DEPTH_POINT_CLOUD, 49 RAW_DEPTH, 50 RAW_DEPTH10, 51 PRIVATE, 52 HEIC 53 }) 54 public @interface Format { 55 } 56 57 /* 58 * these constants are chosen to be binary compatible with their previous 59 * location in PixelFormat.java 60 */ 61 62 public static final int UNKNOWN = 0; 63 64 /** 65 * RGB format used for pictures encoded as RGB_565. See 66 * {@link android.hardware.Camera.Parameters#setPictureFormat(int)}. 67 */ 68 public static final int RGB_565 = 4; 69 70 /** 71 * <p>Android YUV format.</p> 72 * 73 * <p>This format is exposed to software decoders and applications.</p> 74 * 75 * <p>YV12 is a 4:2:0 YCrCb planar format comprised of a WxH Y plane followed 76 * by (W/2) x (H/2) Cr and Cb planes.</p> 77 * 78 * <p>This format assumes 79 * <ul> 80 * <li>an even width</li> 81 * <li>an even height</li> 82 * <li>a horizontal stride multiple of 16 pixels</li> 83 * <li>a vertical stride equal to the height</li> 84 * </ul> 85 * </p> 86 * 87 * <pre> y_size = stride * height 88 * c_stride = ALIGN(stride/2, 16) 89 * c_size = c_stride * height/2 90 * size = y_size + c_size * 2 91 * cr_offset = y_size 92 * cb_offset = y_size + c_size</pre> 93 * 94 * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is 95 * recommended for YUV output instead.</p> 96 * 97 * <p>For the older camera API, this format is guaranteed to be supported for 98 * {@link android.hardware.Camera} preview images since API level 12; for earlier API versions, 99 * check {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}. 100 * 101 * <p>Note that for camera preview callback use (see 102 * {@link android.hardware.Camera#setPreviewCallback}), the 103 * <var>stride</var> value is the smallest possible; that is, it is equal 104 * to: 105 * 106 * <pre>stride = ALIGN(width, 16)</pre> 107 * 108 * @see android.hardware.Camera.Parameters#setPreviewCallback 109 * @see android.hardware.Camera.Parameters#setPreviewFormat 110 * @see android.hardware.Camera.Parameters#getSupportedPreviewFormats 111 * </p> 112 */ 113 public static final int YV12 = 0x32315659; 114 115 /** 116 * <p>Android Y8 format.</p> 117 * 118 * <p>Y8 is a YUV planar format comprised of a WxH Y plane only, with each pixel 119 * being represented by 8 bits. It is equivalent to just the Y plane from {@link #YV12} 120 * format.</p> 121 * 122 * <p>This format assumes 123 * <ul> 124 * <li>an even width</li> 125 * <li>an even height</li> 126 * <li>a horizontal stride multiple of 16 pixels</li> 127 * </ul> 128 * </p> 129 * 130 * <pre> size = stride * height </pre> 131 * 132 * <p>For example, the {@link android.media.Image} object can provide data 133 * in this format from a {@link android.hardware.camera2.CameraDevice} (if 134 * supported) through a {@link android.media.ImageReader} object. The 135 * {@link android.media.Image#getPlanes() Image#getPlanes()} will return a 136 * single plane containing the pixel data. The pixel stride is always 1 in 137 * {@link android.media.Image.Plane#getPixelStride()}, and the 138 * {@link android.media.Image.Plane#getRowStride()} describes the vertical 139 * neighboring pixel distance (in bytes) between adjacent rows.</p> 140 * 141 * @see android.media.Image 142 * @see android.media.ImageReader 143 * @see android.hardware.camera2.CameraDevice 144 */ 145 public static final int Y8 = 0x20203859; 146 147 /** 148 * <p>Android Y16 format.</p> 149 * 150 * Y16 is a YUV planar format comprised of a WxH Y plane, with each pixel 151 * being represented by 16 bits. It is just like {@link #Y8}, but has 16 152 * bits per pixel (little endian).</p> 153 * 154 * <p>This format assumes 155 * <ul> 156 * <li>an even width</li> 157 * <li>an even height</li> 158 * <li>a horizontal stride multiple of 16 pixels</li> 159 * </ul> 160 * </p> 161 * 162 * <pre> y_size = stride * height </pre> 163 * 164 * <p>For example, the {@link android.media.Image} object can provide data 165 * in this format from a {@link android.hardware.camera2.CameraDevice} 166 * through a {@link android.media.ImageReader} object if this format is 167 * supported by {@link android.hardware.camera2.CameraDevice}.</p> 168 * 169 * @see android.media.Image 170 * @see android.media.ImageReader 171 * @see android.hardware.camera2.CameraDevice 172 * 173 * @hide 174 */ 175 public static final int Y16 = 0x20363159; 176 177 /** 178 * <p>Android YUV P010 format.</p> 179 * 180 * P010 is a 4:2:0 YCbCr semiplanar format comprised of a WxH Y plane 181 * followed by a Wx(H/2) CbCr plane. Each sample is represented by a 16-bit 182 * little-endian value, with the lower 6 bits set to zero. 183 * 184 * <p>For example, the {@link android.media.Image} object can provide data 185 * in this format from a {@link android.hardware.camera2.CameraDevice} 186 * through a {@link android.media.ImageReader} object if this format is 187 * supported by {@link android.hardware.camera2.CameraDevice}.</p> 188 * 189 * @see android.media.Image 190 * @see android.media.ImageReader 191 * @see android.hardware.camera2.CameraDevice 192 * 193 */ 194 public static final int YCBCR_P010 = 0x36; 195 196 /** 197 * YCbCr format, used for video. 198 * 199 * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is 200 * recommended for YUV output instead.</p> 201 * 202 * <p>Whether this format is supported by the old camera API can be determined by 203 * {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.</p> 204 * 205 */ 206 public static final int NV16 = 0x10; 207 208 /** 209 * YCrCb format used for images, which uses the NV21 encoding format. 210 * 211 * <p>This is the default format 212 * for {@link android.hardware.Camera} preview images, when not otherwise set with 213 * {@link android.hardware.Camera.Parameters#setPreviewFormat(int)}.</p> 214 * 215 * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is 216 * recommended for YUV output instead.</p> 217 */ 218 public static final int NV21 = 0x11; 219 220 /** 221 * YCbCr format used for images, which uses YUYV (YUY2) encoding format. 222 * 223 * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is 224 * recommended for YUV output instead.</p> 225 * 226 * <p>This is an alternative format for {@link android.hardware.Camera} preview images. Whether 227 * this format is supported by the camera hardware can be determined by 228 * {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.</p> 229 */ 230 public static final int YUY2 = 0x14; 231 232 /** 233 * Compressed JPEG format. 234 * 235 * <p>This format is always supported as an output format for the 236 * {@link android.hardware.camera2} API, and as a picture format for the older 237 * {@link android.hardware.Camera} API</p> 238 */ 239 public static final int JPEG = 0x100; 240 241 /** 242 * Depth augmented compressed JPEG format. 243 * 244 * <p>JPEG compressed main image along with XMP embedded depth metadata 245 * following ISO 16684-1:2011(E).</p> 246 */ 247 public static final int DEPTH_JPEG = 0x69656963; 248 249 /** 250 * <p>Multi-plane Android YUV 420 format</p> 251 * 252 * <p>This format is a generic YCbCr format, capable of describing any 4:2:0 253 * chroma-subsampled planar or semiplanar buffer (but not fully interleaved), 254 * with 8 bits per color sample.</p> 255 * 256 * <p>Images in this format are always represented by three separate buffers 257 * of data, one for each color plane. Additional information always 258 * accompanies the buffers, describing the row stride and the pixel stride 259 * for each plane.</p> 260 * 261 * <p>The order of planes in the array returned by 262 * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that 263 * plane #0 is always Y, plane #1 is always U (Cb), and plane #2 is always V (Cr).</p> 264 * 265 * <p>The Y-plane is guaranteed not to be interleaved with the U/V planes 266 * (in particular, pixel stride is always 1 in 267 * {@link android.media.Image.Plane#getPixelStride() yPlane.getPixelStride()}).</p> 268 * 269 * <p>The U/V planes are guaranteed to have the same row stride and pixel stride 270 * (in particular, 271 * {@link android.media.Image.Plane#getRowStride() uPlane.getRowStride()} 272 * == {@link android.media.Image.Plane#getRowStride() vPlane.getRowStride()} and 273 * {@link android.media.Image.Plane#getPixelStride() uPlane.getPixelStride()} 274 * == {@link android.media.Image.Plane#getPixelStride() vPlane.getPixelStride()}; 275 * ).</p> 276 * 277 * <p>For example, the {@link android.media.Image} object can provide data 278 * in this format from a {@link android.hardware.camera2.CameraDevice} 279 * through a {@link android.media.ImageReader} object.</p> 280 * 281 * @see android.media.Image 282 * @see android.media.ImageReader 283 * @see android.hardware.camera2.CameraDevice 284 */ 285 public static final int YUV_420_888 = 0x23; 286 287 /** 288 * <p>Multi-plane Android YUV 422 format</p> 289 * 290 * <p>This format is a generic YCbCr format, capable of describing any 4:2:2 291 * chroma-subsampled (planar, semiplanar or interleaved) format, 292 * with 8 bits per color sample.</p> 293 * 294 * <p>Images in this format are always represented by three separate buffers 295 * of data, one for each color plane. Additional information always 296 * accompanies the buffers, describing the row stride and the pixel stride 297 * for each plane.</p> 298 * 299 * <p>The order of planes in the array returned by 300 * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that 301 * plane #0 is always Y, plane #1 is always U (Cb), and plane #2 is always V (Cr).</p> 302 * 303 * <p>In contrast to the {@link #YUV_420_888} format, the Y-plane may have a pixel 304 * stride greater than 1 in 305 * {@link android.media.Image.Plane#getPixelStride() yPlane.getPixelStride()}.</p> 306 * 307 * <p>The U/V planes are guaranteed to have the same row stride and pixel stride 308 * (in particular, 309 * {@link android.media.Image.Plane#getRowStride() uPlane.getRowStride()} 310 * == {@link android.media.Image.Plane#getRowStride() vPlane.getRowStride()} and 311 * {@link android.media.Image.Plane#getPixelStride() uPlane.getPixelStride()} 312 * == {@link android.media.Image.Plane#getPixelStride() vPlane.getPixelStride()}; 313 * ).</p> 314 * 315 * <p>For example, the {@link android.media.Image} object can provide data 316 * in this format from a {@link android.media.MediaCodec} 317 * through {@link android.media.MediaCodec#getOutputImage} object.</p> 318 * 319 * @see android.media.Image 320 * @see android.media.MediaCodec 321 */ 322 public static final int YUV_422_888 = 0x27; 323 324 /** 325 * <p>Multi-plane Android YUV 444 format</p> 326 * 327 * <p>This format is a generic YCbCr format, capable of describing any 4:4:4 328 * (planar, semiplanar or interleaved) format, 329 * with 8 bits per color sample.</p> 330 * 331 * <p>Images in this format are always represented by three separate buffers 332 * of data, one for each color plane. Additional information always 333 * accompanies the buffers, describing the row stride and the pixel stride 334 * for each plane.</p> 335 * 336 * <p>The order of planes in the array returned by 337 * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that 338 * plane #0 is always Y, plane #1 is always U (Cb), and plane #2 is always V (Cr).</p> 339 * 340 * <p>In contrast to the {@link #YUV_420_888} format, the Y-plane may have a pixel 341 * stride greater than 1 in 342 * {@link android.media.Image.Plane#getPixelStride() yPlane.getPixelStride()}.</p> 343 * 344 * <p>The U/V planes are guaranteed to have the same row stride and pixel stride 345 * (in particular, 346 * {@link android.media.Image.Plane#getRowStride() uPlane.getRowStride()} 347 * == {@link android.media.Image.Plane#getRowStride() vPlane.getRowStride()} and 348 * {@link android.media.Image.Plane#getPixelStride() uPlane.getPixelStride()} 349 * == {@link android.media.Image.Plane#getPixelStride() vPlane.getPixelStride()}; 350 * ).</p> 351 * 352 * <p>For example, the {@link android.media.Image} object can provide data 353 * in this format from a {@link android.media.MediaCodec} 354 * through {@link android.media.MediaCodec#getOutputImage} object.</p> 355 * 356 * @see android.media.Image 357 * @see android.media.MediaCodec 358 */ 359 public static final int YUV_444_888 = 0x28; 360 361 /** 362 * <p>Multi-plane Android RGB format</p> 363 * 364 * <p>This format is a generic RGB format, capable of describing most RGB formats, 365 * with 8 bits per color sample.</p> 366 * 367 * <p>Images in this format are always represented by three separate buffers 368 * of data, one for each color plane. Additional information always 369 * accompanies the buffers, describing the row stride and the pixel stride 370 * for each plane.</p> 371 * 372 * <p>The order of planes in the array returned by 373 * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that 374 * plane #0 is always R (red), plane #1 is always G (green), and plane #2 is always B 375 * (blue).</p> 376 * 377 * <p>All three planes are guaranteed to have the same row strides and pixel strides.</p> 378 * 379 * <p>For example, the {@link android.media.Image} object can provide data 380 * in this format from a {@link android.media.MediaCodec} 381 * through {@link android.media.MediaCodec#getOutputImage} object.</p> 382 * 383 * @see android.media.Image 384 * @see android.media.MediaCodec 385 */ 386 public static final int FLEX_RGB_888 = 0x29; 387 388 /** 389 * <p>Multi-plane Android RGBA format</p> 390 * 391 * <p>This format is a generic RGBA format, capable of describing most RGBA formats, 392 * with 8 bits per color sample.</p> 393 * 394 * <p>Images in this format are always represented by four separate buffers 395 * of data, one for each color plane. Additional information always 396 * accompanies the buffers, describing the row stride and the pixel stride 397 * for each plane.</p> 398 * 399 * <p>The order of planes in the array returned by 400 * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that 401 * plane #0 is always R (red), plane #1 is always G (green), plane #2 is always B (blue), 402 * and plane #3 is always A (alpha). This format may represent pre-multiplied or 403 * non-premultiplied alpha.</p> 404 * 405 * <p>All four planes are guaranteed to have the same row strides and pixel strides.</p> 406 * 407 * <p>For example, the {@link android.media.Image} object can provide data 408 * in this format from a {@link android.media.MediaCodec} 409 * through {@link android.media.MediaCodec#getOutputImage} object.</p> 410 * 411 * @see android.media.Image 412 * @see android.media.MediaCodec 413 */ 414 public static final int FLEX_RGBA_8888 = 0x2A; 415 416 /** 417 * <p>General raw camera sensor image format, usually representing a 418 * single-channel Bayer-mosaic image. Each pixel color sample is stored with 419 * 16 bits of precision.</p> 420 * 421 * <p>The layout of the color mosaic, the maximum and minimum encoding 422 * values of the raw pixel data, the color space of the image, and all other 423 * needed information to interpret a raw sensor image must be queried from 424 * the {@link android.hardware.camera2.CameraDevice} which produced the 425 * image.</p> 426 */ 427 public static final int RAW_SENSOR = 0x20; 428 429 /** 430 * <p>Private raw camera sensor image format, a single channel image with 431 * implementation depedent pixel layout.</p> 432 * 433 * <p>RAW_PRIVATE is a format for unprocessed raw image buffers coming from an 434 * image sensor. The actual structure of buffers of this format is 435 * implementation-dependent.</p> 436 * 437 */ 438 public static final int RAW_PRIVATE = 0x24; 439 440 /** 441 * <p> 442 * Android 10-bit raw format 443 * </p> 444 * <p> 445 * This is a single-plane, 10-bit per pixel, densely packed (in each row), 446 * unprocessed format, usually representing raw Bayer-pattern images coming 447 * from an image sensor. 448 * </p> 449 * <p> 450 * In an image buffer with this format, starting from the first pixel of 451 * each row, each 4 consecutive pixels are packed into 5 bytes (40 bits). 452 * Each one of the first 4 bytes contains the top 8 bits of each pixel, The 453 * fifth byte contains the 2 least significant bits of the 4 pixels, the 454 * exact layout data for each 4 consecutive pixels is illustrated below 455 * ({@code Pi[j]} stands for the jth bit of the ith pixel): 456 * </p> 457 * <table> 458 * <thead> 459 * <tr> 460 * <th align="center"></th> 461 * <th align="center">bit 7</th> 462 * <th align="center">bit 6</th> 463 * <th align="center">bit 5</th> 464 * <th align="center">bit 4</th> 465 * <th align="center">bit 3</th> 466 * <th align="center">bit 2</th> 467 * <th align="center">bit 1</th> 468 * <th align="center">bit 0</th> 469 * </tr> 470 * </thead> <tbody> 471 * <tr> 472 * <td align="center">Byte 0:</td> 473 * <td align="center">P0[9]</td> 474 * <td align="center">P0[8]</td> 475 * <td align="center">P0[7]</td> 476 * <td align="center">P0[6]</td> 477 * <td align="center">P0[5]</td> 478 * <td align="center">P0[4]</td> 479 * <td align="center">P0[3]</td> 480 * <td align="center">P0[2]</td> 481 * </tr> 482 * <tr> 483 * <td align="center">Byte 1:</td> 484 * <td align="center">P1[9]</td> 485 * <td align="center">P1[8]</td> 486 * <td align="center">P1[7]</td> 487 * <td align="center">P1[6]</td> 488 * <td align="center">P1[5]</td> 489 * <td align="center">P1[4]</td> 490 * <td align="center">P1[3]</td> 491 * <td align="center">P1[2]</td> 492 * </tr> 493 * <tr> 494 * <td align="center">Byte 2:</td> 495 * <td align="center">P2[9]</td> 496 * <td align="center">P2[8]</td> 497 * <td align="center">P2[7]</td> 498 * <td align="center">P2[6]</td> 499 * <td align="center">P2[5]</td> 500 * <td align="center">P2[4]</td> 501 * <td align="center">P2[3]</td> 502 * <td align="center">P2[2]</td> 503 * </tr> 504 * <tr> 505 * <td align="center">Byte 3:</td> 506 * <td align="center">P3[9]</td> 507 * <td align="center">P3[8]</td> 508 * <td align="center">P3[7]</td> 509 * <td align="center">P3[6]</td> 510 * <td align="center">P3[5]</td> 511 * <td align="center">P3[4]</td> 512 * <td align="center">P3[3]</td> 513 * <td align="center">P3[2]</td> 514 * </tr> 515 * <tr> 516 * <td align="center">Byte 4:</td> 517 * <td align="center">P3[1]</td> 518 * <td align="center">P3[0]</td> 519 * <td align="center">P2[1]</td> 520 * <td align="center">P2[0]</td> 521 * <td align="center">P1[1]</td> 522 * <td align="center">P1[0]</td> 523 * <td align="center">P0[1]</td> 524 * <td align="center">P0[0]</td> 525 * </tr> 526 * </tbody> 527 * </table> 528 * <p> 529 * This format assumes 530 * <ul> 531 * <li>a width multiple of 4 pixels</li> 532 * <li>an even height</li> 533 * </ul> 534 * </p> 535 * 536 * <pre>size = row stride * height</pre> where the row stride is in <em>bytes</em>, 537 * not pixels. 538 * 539 * <p> 540 * Since this is a densely packed format, the pixel stride is always 0. The 541 * application must use the pixel data layout defined in above table to 542 * access each row data. When row stride is equal to {@code width * (10 / 8)}, there 543 * will be no padding bytes at the end of each row, the entire image data is 544 * densely packed. When stride is larger than {@code width * (10 / 8)}, padding 545 * bytes will be present at the end of each row. 546 * </p> 547 * <p> 548 * For example, the {@link android.media.Image} object can provide data in 549 * this format from a {@link android.hardware.camera2.CameraDevice} (if 550 * supported) through a {@link android.media.ImageReader} object. The 551 * {@link android.media.Image#getPlanes() Image#getPlanes()} will return a 552 * single plane containing the pixel data. The pixel stride is always 0 in 553 * {@link android.media.Image.Plane#getPixelStride()}, and the 554 * {@link android.media.Image.Plane#getRowStride()} describes the vertical 555 * neighboring pixel distance (in bytes) between adjacent rows. 556 * </p> 557 * 558 * @see android.media.Image 559 * @see android.media.ImageReader 560 * @see android.hardware.camera2.CameraDevice 561 */ 562 public static final int RAW10 = 0x25; 563 564 /** 565 * <p> 566 * Android 12-bit raw format 567 * </p> 568 * <p> 569 * This is a single-plane, 12-bit per pixel, densely packed (in each row), 570 * unprocessed format, usually representing raw Bayer-pattern images coming 571 * from an image sensor. 572 * </p> 573 * <p> 574 * In an image buffer with this format, starting from the first pixel of each 575 * row, each two consecutive pixels are packed into 3 bytes (24 bits). The first 576 * and second byte contains the top 8 bits of first and second pixel. The third 577 * byte contains the 4 least significant bits of the two pixels, the exact layout 578 * data for each two consecutive pixels is illustrated below (Pi[j] stands for 579 * the jth bit of the ith pixel): 580 * </p> 581 * <table> 582 * <thead> 583 * <tr> 584 * <th align="center"></th> 585 * <th align="center">bit 7</th> 586 * <th align="center">bit 6</th> 587 * <th align="center">bit 5</th> 588 * <th align="center">bit 4</th> 589 * <th align="center">bit 3</th> 590 * <th align="center">bit 2</th> 591 * <th align="center">bit 1</th> 592 * <th align="center">bit 0</th> 593 * </tr> 594 * </thead> <tbody> 595 * <tr> 596 * <td align="center">Byte 0:</td> 597 * <td align="center">P0[11]</td> 598 * <td align="center">P0[10]</td> 599 * <td align="center">P0[ 9]</td> 600 * <td align="center">P0[ 8]</td> 601 * <td align="center">P0[ 7]</td> 602 * <td align="center">P0[ 6]</td> 603 * <td align="center">P0[ 5]</td> 604 * <td align="center">P0[ 4]</td> 605 * </tr> 606 * <tr> 607 * <td align="center">Byte 1:</td> 608 * <td align="center">P1[11]</td> 609 * <td align="center">P1[10]</td> 610 * <td align="center">P1[ 9]</td> 611 * <td align="center">P1[ 8]</td> 612 * <td align="center">P1[ 7]</td> 613 * <td align="center">P1[ 6]</td> 614 * <td align="center">P1[ 5]</td> 615 * <td align="center">P1[ 4]</td> 616 * </tr> 617 * <tr> 618 * <td align="center">Byte 2:</td> 619 * <td align="center">P1[ 3]</td> 620 * <td align="center">P1[ 2]</td> 621 * <td align="center">P1[ 1]</td> 622 * <td align="center">P1[ 0]</td> 623 * <td align="center">P0[ 3]</td> 624 * <td align="center">P0[ 2]</td> 625 * <td align="center">P0[ 1]</td> 626 * <td align="center">P0[ 0]</td> 627 * </tr> 628 * </tbody> 629 * </table> 630 * <p> 631 * This format assumes 632 * <ul> 633 * <li>a width multiple of 4 pixels</li> 634 * <li>an even height</li> 635 * </ul> 636 * </p> 637 * 638 * <pre>size = row stride * height</pre> where the row stride is in <em>bytes</em>, 639 * not pixels. 640 * 641 * <p> 642 * Since this is a densely packed format, the pixel stride is always 0. The 643 * application must use the pixel data layout defined in above table to 644 * access each row data. When row stride is equal to {@code width * (12 / 8)}, there 645 * will be no padding bytes at the end of each row, the entire image data is 646 * densely packed. When stride is larger than {@code width * (12 / 8)}, padding 647 * bytes will be present at the end of each row. 648 * </p> 649 * <p> 650 * For example, the {@link android.media.Image} object can provide data in 651 * this format from a {@link android.hardware.camera2.CameraDevice} (if 652 * supported) through a {@link android.media.ImageReader} object. The 653 * {@link android.media.Image#getPlanes() Image#getPlanes()} will return a 654 * single plane containing the pixel data. The pixel stride is always 0 in 655 * {@link android.media.Image.Plane#getPixelStride()}, and the 656 * {@link android.media.Image.Plane#getRowStride()} describes the vertical 657 * neighboring pixel distance (in bytes) between adjacent rows. 658 * </p> 659 * 660 * @see android.media.Image 661 * @see android.media.ImageReader 662 * @see android.hardware.camera2.CameraDevice 663 */ 664 public static final int RAW12 = 0x26; 665 666 /** 667 * <p>Android dense depth image format.</p> 668 * 669 * <p>Each pixel is 16 bits, representing a depth ranging measurement from a depth camera or 670 * similar sensor. The 16-bit sample consists of a confidence value and the actual ranging 671 * measurement.</p> 672 * 673 * <p>The confidence value is an estimate of correctness for this sample. It is encoded in the 674 * 3 most significant bits of the sample, with a value of 0 representing 100% confidence, a 675 * value of 1 representing 0% confidence, a value of 2 representing 1/7, a value of 3 676 * representing 2/7, and so on.</p> 677 * 678 * <p>As an example, the following sample extracts the range and confidence from the first pixel 679 * of a DEPTH16-format {@link android.media.Image}, and converts the confidence to a 680 * floating-point value between 0 and 1.f inclusive, with 1.f representing maximum confidence: 681 * 682 * <pre> 683 * ShortBuffer shortDepthBuffer = img.getPlanes()[0].getBuffer().asShortBuffer(); 684 * short depthSample = shortDepthBuffer.get() 685 * short depthRange = (short) (depthSample & 0x1FFF); 686 * short depthConfidence = (short) ((depthSample >> 13) & 0x7); 687 * float depthPercentage = depthConfidence == 0 ? 1.f : (depthConfidence - 1) / 7.f; 688 * </pre> 689 * </p> 690 * 691 * <p>This format assumes 692 * <ul> 693 * <li>an even width</li> 694 * <li>an even height</li> 695 * <li>a horizontal stride multiple of 16 pixels</li> 696 * </ul> 697 * </p> 698 * 699 * <pre> y_size = stride * height </pre> 700 * 701 * When produced by a camera, the units for the range are millimeters. 702 */ 703 public static final int DEPTH16 = 0x44363159; 704 705 /** 706 * Android sparse depth point cloud format. 707 * 708 * <p>A variable-length list of 3D points plus a confidence value, with each point represented 709 * by four floats; first the X, Y, Z position coordinates, and then the confidence value.</p> 710 * 711 * <p>The number of points is {@code (size of the buffer in bytes) / 16}. 712 * 713 * <p>The coordinate system and units of the position values depend on the source of the point 714 * cloud data. The confidence value is between 0.f and 1.f, inclusive, with 0 representing 0% 715 * confidence and 1.f representing 100% confidence in the measured position values.</p> 716 * 717 * <p>As an example, the following code extracts the first depth point in a DEPTH_POINT_CLOUD 718 * format {@link android.media.Image}: 719 * <pre> 720 * FloatBuffer floatDepthBuffer = img.getPlanes()[0].getBuffer().asFloatBuffer(); 721 * float x = floatDepthBuffer.get(); 722 * float y = floatDepthBuffer.get(); 723 * float z = floatDepthBuffer.get(); 724 * float confidence = floatDepthBuffer.get(); 725 * </pre> 726 * 727 * For camera devices that support the 728 * {@link android.hardware.camera2.CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT DEPTH_OUTPUT} 729 * capability, DEPTH_POINT_CLOUD coordinates have units of meters, and the coordinate system is 730 * defined by the camera's pose transforms: 731 * {@link android.hardware.camera2.CameraCharacteristics#LENS_POSE_TRANSLATION} and 732 * {@link android.hardware.camera2.CameraCharacteristics#LENS_POSE_ROTATION}. That means the origin is 733 * the optical center of the camera device, and the positive Z axis points along the camera's optical axis, 734 * toward the scene. 735 */ 736 public static final int DEPTH_POINT_CLOUD = 0x101; 737 738 /** 739 * Unprocessed implementation-dependent raw 740 * depth measurements, opaque with 16 bit 741 * samples. 742 * 743 * @hide 744 */ 745 public static final int RAW_DEPTH = 0x1002; 746 747 /** 748 * Unprocessed implementation-dependent raw 749 * depth measurements, opaque with 10 bit 750 * samples and device specific bit layout. 751 * 752 * @hide 753 */ 754 public static final int RAW_DEPTH10 = 0x1003; 755 756 /** 757 * Android private opaque image format. 758 * <p> 759 * The choices of the actual format and pixel data layout are entirely up to 760 * the device-specific and framework internal implementations, and may vary 761 * depending on use cases even for the same device. The buffers of this 762 * format can be produced by components like 763 * {@link android.media.ImageWriter ImageWriter} , and interpreted correctly 764 * by consumers like {@link android.hardware.camera2.CameraDevice 765 * CameraDevice} based on the device/framework private information. However, 766 * these buffers are not directly accessible to the application. 767 * </p> 768 * <p> 769 * When an {@link android.media.Image Image} of this format is obtained from 770 * an {@link android.media.ImageReader ImageReader} or 771 * {@link android.media.ImageWriter ImageWriter}, the 772 * {@link android.media.Image#getPlanes() getPlanes()} method will return an 773 * empty {@link android.media.Image.Plane Plane} array. 774 * </p> 775 * <p> 776 * If a buffer of this format is to be used as an OpenGL ES texture, the 777 * framework will assume that sampling the texture will always return an 778 * alpha value of 1.0 (i.e. the buffer contains only opaque pixel values). 779 * </p> 780 */ 781 public static final int PRIVATE = 0x22; 782 783 /** 784 * Compressed HEIC format. 785 * 786 * <p>This format defines the HEIC brand of High Efficiency Image File 787 * Format as described in ISO/IEC 23008-12.</p> 788 */ 789 public static final int HEIC = 0x48454946; 790 791 /** 792 * Use this function to retrieve the number of bits per pixel of an 793 * ImageFormat. 794 * 795 * @param format 796 * @return the number of bits per pixel of the given format or -1 if the 797 * format doesn't exist or is not supported. 798 */ getBitsPerPixel(@ormat int format)799 public static int getBitsPerPixel(@Format int format) { 800 switch (format) { 801 case RGB_565: 802 return 16; 803 case NV16: 804 return 16; 805 case YUY2: 806 return 16; 807 case YV12: 808 return 12; 809 case Y8: 810 return 8; 811 case Y16: 812 case DEPTH16: 813 return 16; 814 case NV21: 815 return 12; 816 case YUV_420_888: 817 return 12; 818 case YUV_422_888: 819 return 16; 820 case YUV_444_888: 821 return 24; 822 case FLEX_RGB_888: 823 return 24; 824 case FLEX_RGBA_8888: 825 return 32; 826 case RAW_DEPTH: 827 case RAW_SENSOR: 828 return 16; 829 case YCBCR_P010: 830 return 24; 831 case RAW_DEPTH10: 832 case RAW10: 833 return 10; 834 case RAW12: 835 return 12; 836 } 837 return -1; 838 } 839 840 /** 841 * Determine whether or not this is a public-visible {@code format}. 842 * 843 * <p>In particular, {@code @hide} formats will return {@code false}.</p> 844 * 845 * <p>Any other formats (including UNKNOWN) will return {@code false}.</p> 846 * 847 * @param format an integer format 848 * @return a boolean 849 * 850 * @hide 851 */ isPublicFormat(@ormat int format)852 public static boolean isPublicFormat(@Format int format) { 853 switch (format) { 854 case RGB_565: 855 case NV16: 856 case YUY2: 857 case YV12: 858 case JPEG: 859 case NV21: 860 case YUV_420_888: 861 case YUV_422_888: 862 case YUV_444_888: 863 case YCBCR_P010: 864 case FLEX_RGB_888: 865 case FLEX_RGBA_8888: 866 case RAW_SENSOR: 867 case RAW_PRIVATE: 868 case RAW10: 869 case RAW12: 870 case DEPTH16: 871 case DEPTH_POINT_CLOUD: 872 case PRIVATE: 873 case RAW_DEPTH: 874 case RAW_DEPTH10: 875 case Y8: 876 case DEPTH_JPEG: 877 case HEIC: 878 return true; 879 } 880 881 return false; 882 } 883 } 884