StreamConfigurationMap.java revision e365120aaead97567bdfbc53d3bfc2699bd2f886
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17package android.hardware.camera2.params; 18 19import android.graphics.ImageFormat; 20import android.graphics.PixelFormat; 21import android.hardware.camera2.CameraCharacteristics; 22import android.hardware.camera2.CameraDevice; 23import android.hardware.camera2.CaptureRequest; 24import android.hardware.camera2.utils.HashCodeHelpers; 25import android.hardware.camera2.utils.SurfaceUtils; 26import android.hardware.camera2.legacy.LegacyCameraDevice; 27import android.hardware.camera2.legacy.LegacyMetadataMapper; 28import android.view.Surface; 29import android.util.Range; 30import android.util.Size; 31import android.util.SparseIntArray; 32 33import java.util.Arrays; 34import java.util.HashMap; 35import java.util.Objects; 36import java.util.Set; 37 38import static com.android.internal.util.Preconditions.*; 39 40/** 41 * Immutable class to store the available stream 42 * {@link CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP configurations} to set up 43 * {@link android.view.Surface Surfaces} for creating a 44 * {@link android.hardware.camera2.CameraCaptureSession capture session} with 45 * {@link android.hardware.camera2.CameraDevice#createCaptureSession}. 46 * <!-- TODO: link to input stream configuration --> 47 * 48 * <p>This is the authoritative list for all <!-- input/ -->output formats (and sizes respectively 49 * for that format) that are supported by a camera device.</p> 50 * 51 * <p>This also contains the minimum frame durations and stall durations for each format/size 52 * combination that can be used to calculate effective frame rate when submitting multiple captures. 53 * </p> 54 * 55 * <p>An instance of this object is available from {@link CameraCharacteristics} using 56 * the {@link CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP} key and the 57 * {@link CameraCharacteristics#get} method.</p> 58 * 59 * <pre><code>{@code 60 * CameraCharacteristics characteristics = cameraManager.getCameraCharacteristics(cameraId); 61 * StreamConfigurationMap configs = characteristics.get( 62 * CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP); 63 * }</code></pre> 64 * 65 * @see CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP 66 * @see CameraDevice#createCaptureSession 67 */ 68public final class StreamConfigurationMap { 69 70 private static final String TAG = "StreamConfigurationMap"; 71 72 /** 73 * Create a new {@link StreamConfigurationMap}. 74 * 75 * <p>The array parameters ownership is passed to this object after creation; do not 76 * write to them after this constructor is invoked.</p> 77 * 78 * @param configurations a non-{@code null} array of {@link StreamConfiguration} 79 * @param minFrameDurations a non-{@code null} array of {@link StreamConfigurationDuration} 80 * @param stallDurations a non-{@code null} array of {@link StreamConfigurationDuration} 81 * @param highSpeedVideoConfigurations an array of {@link HighSpeedVideoConfiguration}, null if 82 * camera device does not support high speed video recording 83 * @param listHighResolution a flag indicating whether the device supports BURST_CAPTURE 84 * and thus needs a separate list of slow high-resolution output sizes 85 * @throws NullPointerException if any of the arguments except highSpeedVideoConfigurations 86 * were {@code null} or any subelements were {@code null} 87 * 88 * @hide 89 */ 90 public StreamConfigurationMap( 91 StreamConfiguration[] configurations, 92 StreamConfigurationDuration[] minFrameDurations, 93 StreamConfigurationDuration[] stallDurations, 94 StreamConfiguration[] depthConfigurations, 95 StreamConfigurationDuration[] depthMinFrameDurations, 96 StreamConfigurationDuration[] depthStallDurations, 97 HighSpeedVideoConfiguration[] highSpeedVideoConfigurations, 98 ReprocessFormatsMap inputOutputFormatsMap, 99 boolean listHighResolution) { 100 mConfigurations = checkArrayElementsNotNull(configurations, "configurations"); 101 mMinFrameDurations = checkArrayElementsNotNull(minFrameDurations, "minFrameDurations"); 102 mStallDurations = checkArrayElementsNotNull(stallDurations, "stallDurations"); 103 mListHighResolution = listHighResolution; 104 105 if (depthConfigurations == null) { 106 mDepthConfigurations = new StreamConfiguration[0]; 107 mDepthMinFrameDurations = new StreamConfigurationDuration[0]; 108 mDepthStallDurations = new StreamConfigurationDuration[0]; 109 } else { 110 mDepthConfigurations = checkArrayElementsNotNull(depthConfigurations, 111 "depthConfigurations"); 112 mDepthMinFrameDurations = checkArrayElementsNotNull(depthMinFrameDurations, 113 "depthMinFrameDurations"); 114 mDepthStallDurations = checkArrayElementsNotNull(depthStallDurations, 115 "depthStallDurations"); 116 } 117 118 if (highSpeedVideoConfigurations == null) { 119 mHighSpeedVideoConfigurations = new HighSpeedVideoConfiguration[0]; 120 } else { 121 mHighSpeedVideoConfigurations = checkArrayElementsNotNull( 122 highSpeedVideoConfigurations, "highSpeedVideoConfigurations"); 123 } 124 125 // For each format, track how many sizes there are available to configure 126 for (StreamConfiguration config : configurations) { 127 int fmt = config.getFormat(); 128 SparseIntArray map = null; 129 if (config.isOutput()) { 130 mAllOutputFormats.put(fmt, mAllOutputFormats.get(fmt) + 1); 131 long duration = 0; 132 if (mListHighResolution) { 133 for (StreamConfigurationDuration configurationDuration : mMinFrameDurations) { 134 if (configurationDuration.getFormat() == fmt && 135 configurationDuration.getWidth() == config.getSize().getWidth() && 136 configurationDuration.getHeight() == config.getSize().getHeight()) { 137 duration = configurationDuration.getDuration(); 138 break; 139 } 140 } 141 } 142 map = duration <= DURATION_20FPS_NS ? 143 mOutputFormats : mHighResOutputFormats; 144 } else { 145 map = mInputFormats; 146 } 147 map.put(fmt, map.get(fmt) + 1); 148 } 149 150 // For each depth format, track how many sizes there are available to configure 151 for (StreamConfiguration config : mDepthConfigurations) { 152 if (!config.isOutput()) { 153 // Ignoring input depth configs 154 continue; 155 } 156 157 mDepthOutputFormats.put(config.getFormat(), 158 mDepthOutputFormats.get(config.getFormat()) + 1); 159 } 160 161 if (mOutputFormats.indexOfKey(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) < 0) { 162 throw new AssertionError( 163 "At least one stream configuration for IMPLEMENTATION_DEFINED must exist"); 164 } 165 166 // For each Size/FPS range, track how many FPS range/Size there are available 167 for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) { 168 Size size = config.getSize(); 169 Range<Integer> fpsRange = config.getFpsRange(); 170 Integer fpsRangeCount = mHighSpeedVideoSizeMap.get(size); 171 if (fpsRangeCount == null) { 172 fpsRangeCount = 0; 173 } 174 mHighSpeedVideoSizeMap.put(size, fpsRangeCount + 1); 175 Integer sizeCount = mHighSpeedVideoFpsRangeMap.get(fpsRange); 176 if (sizeCount == null) { 177 sizeCount = 0; 178 } 179 mHighSpeedVideoFpsRangeMap.put(fpsRange, sizeCount + 1); 180 } 181 182 mInputOutputFormatsMap = inputOutputFormatsMap; 183 } 184 185 /** 186 * Get the image {@code format} output formats in this stream configuration. 187 * 188 * <p>All image formats returned by this function will be defined in either {@link ImageFormat} 189 * or in {@link PixelFormat} (and there is no possibility of collision).</p> 190 * 191 * <p>Formats listed in this array are guaranteed to return true if queried with 192 * {@link #isOutputSupportedFor(int)}.</p> 193 * 194 * @return an array of integer format 195 * 196 * @see ImageFormat 197 * @see PixelFormat 198 */ 199 public final int[] getOutputFormats() { 200 return getPublicFormats(/*output*/true); 201 } 202 203 /** 204 * Get the image {@code format} output formats for a reprocessing input format. 205 * 206 * <p>When submitting a {@link CaptureRequest} with an input Surface of a given format, 207 * the only allowed target outputs of the {@link CaptureRequest} are the ones with a format 208 * listed in the return value of this method. Including any other output Surface as a target 209 * will throw an IllegalArgumentException. If no output format is supported given the input 210 * format, an empty int[] will be returned.</p> 211 * 212 * <p>All image formats returned by this function will be defined in either {@link ImageFormat} 213 * or in {@link PixelFormat} (and there is no possibility of collision).</p> 214 * 215 * <p>Formats listed in this array are guaranteed to return true if queried with 216 * {@link #isOutputSupportedFor(int)}.</p> 217 * 218 * @return an array of integer format 219 * 220 * @see ImageFormat 221 * @see PixelFormat 222 */ 223 public final int[] getValidOutputFormatsForInput(int inputFormat) { 224 if (mInputOutputFormatsMap == null) { 225 return new int[0]; 226 } 227 return mInputOutputFormatsMap.getOutputs(inputFormat); 228 } 229 230 /** 231 * Get the image {@code format} input formats in this stream configuration. 232 * 233 * <p>All image formats returned by this function will be defined in either {@link ImageFormat} 234 * or in {@link PixelFormat} (and there is no possibility of collision).</p> 235 * 236 * @return an array of integer format 237 * 238 * @see ImageFormat 239 * @see PixelFormat 240 */ 241 public final int[] getInputFormats() { 242 return getPublicFormats(/*output*/false); 243 } 244 245 /** 246 * Get the supported input sizes for this input format. 247 * 248 * <p>The format must have come from {@link #getInputFormats}; otherwise 249 * {@code null} is returned.</p> 250 * 251 * @param format a format from {@link #getInputFormats} 252 * @return a non-empty array of sizes, or {@code null} if the format was not available. 253 */ 254 public Size[] getInputSizes(final int format) { 255 return getPublicFormatSizes(format, /*output*/false, /*highRes*/false); 256 } 257 258 /** 259 * Determine whether or not output surfaces with a particular user-defined format can be passed 260 * {@link CameraDevice#createCaptureSession createCaptureSession}. 261 * 262 * <p>This method determines that the output {@code format} is supported by the camera device; 263 * each output {@code surface} target may or may not itself support that {@code format}. 264 * Refer to the class which provides the surface for additional documentation.</p> 265 * 266 * <p>Formats for which this returns {@code true} are guaranteed to exist in the result 267 * returned by {@link #getOutputSizes}.</p> 268 * 269 * @param format an image format from either {@link ImageFormat} or {@link PixelFormat} 270 * @return 271 * {@code true} iff using a {@code surface} with this {@code format} will be 272 * supported with {@link CameraDevice#createCaptureSession} 273 * 274 * @throws IllegalArgumentException 275 * if the image format was not a defined named constant 276 * from either {@link ImageFormat} or {@link PixelFormat} 277 * 278 * @see ImageFormat 279 * @see PixelFormat 280 * @see CameraDevice#createCaptureSession 281 */ 282 public boolean isOutputSupportedFor(int format) { 283 checkArgumentFormat(format); 284 285 int internalFormat = imageFormatToInternal(format); 286 int dataspace = imageFormatToDataspace(format); 287 if (dataspace == HAL_DATASPACE_DEPTH) { 288 return mDepthOutputFormats.indexOfKey(internalFormat) >= 0; 289 } else { 290 return getFormatsMap(/*output*/true).indexOfKey(internalFormat) >= 0; 291 } 292 } 293 294 /** 295 * Determine whether or not output streams can be configured with a particular class 296 * as a consumer. 297 * 298 * <p>The following list is generally usable for outputs: 299 * <ul> 300 * <li>{@link android.media.ImageReader} - 301 * Recommended for image processing or streaming to external resources (such as a file or 302 * network) 303 * <li>{@link android.media.MediaRecorder} - 304 * Recommended for recording video (simple to use) 305 * <li>{@link android.media.MediaCodec} - 306 * Recommended for recording video (more complicated to use, with more flexibility) 307 * <li>{@link android.renderscript.Allocation} - 308 * Recommended for image processing with {@link android.renderscript RenderScript} 309 * <li>{@link android.view.SurfaceHolder} - 310 * Recommended for low-power camera preview with {@link android.view.SurfaceView} 311 * <li>{@link android.graphics.SurfaceTexture} - 312 * Recommended for OpenGL-accelerated preview processing or compositing with 313 * {@link android.view.TextureView} 314 * </ul> 315 * </p> 316 * 317 * <p>Generally speaking this means that creating a {@link Surface} from that class <i>may</i> 318 * provide a producer endpoint that is suitable to be used with 319 * {@link CameraDevice#createCaptureSession}.</p> 320 * 321 * <p>Since not all of the above classes support output of all format and size combinations, 322 * the particular combination should be queried with {@link #isOutputSupportedFor(Surface)}.</p> 323 * 324 * @param klass a non-{@code null} {@link Class} object reference 325 * @return {@code true} if this class is supported as an output, {@code false} otherwise 326 * 327 * @throws NullPointerException if {@code klass} was {@code null} 328 * 329 * @see CameraDevice#createCaptureSession 330 * @see #isOutputSupportedFor(Surface) 331 */ 332 public static <T> boolean isOutputSupportedFor(Class<T> klass) { 333 checkNotNull(klass, "klass must not be null"); 334 335 if (klass == android.media.ImageReader.class) { 336 return true; 337 } else if (klass == android.media.MediaRecorder.class) { 338 return true; 339 } else if (klass == android.media.MediaCodec.class) { 340 return true; 341 } else if (klass == android.renderscript.Allocation.class) { 342 return true; 343 } else if (klass == android.view.SurfaceHolder.class) { 344 return true; 345 } else if (klass == android.graphics.SurfaceTexture.class) { 346 return true; 347 } 348 349 return false; 350 } 351 352 /** 353 * Determine whether or not the {@code surface} in its current state is suitable to be included 354 * in a {@link CameraDevice#createCaptureSession capture session} as an output. 355 * 356 * <p>Not all surfaces are usable with the {@link CameraDevice}, and not all configurations 357 * of that {@code surface} are compatible. Some classes that provide the {@code surface} are 358 * compatible with the {@link CameraDevice} in general 359 * (see {@link #isOutputSupportedFor(Class)}, but it is the caller's responsibility to put the 360 * {@code surface} into a state that will be compatible with the {@link CameraDevice}.</p> 361 * 362 * <p>Reasons for a {@code surface} being specifically incompatible might be: 363 * <ul> 364 * <li>Using a format that's not listed by {@link #getOutputFormats} 365 * <li>Using a format/size combination that's not listed by {@link #getOutputSizes} 366 * <li>The {@code surface} itself is not in a state where it can service a new producer.</p> 367 * </li> 368 * </ul> 369 * 370 * <p>Surfaces from flexible sources will return true even if the exact size of the Surface does 371 * not match a camera-supported size, as long as the format (or class) is supported and the 372 * camera device supports a size that is equal to or less than 1080p in that format. If such as 373 * Surface is used to create a capture session, it will have its size rounded to the nearest 374 * supported size, below or equal to 1080p. Flexible sources include SurfaceView, SurfaceTexture, 375 * and ImageReader.</p> 376 * 377 * <p>This is not an exhaustive list; see the particular class's documentation for further 378 * possible reasons of incompatibility.</p> 379 * 380 * @param surface a non-{@code null} {@link Surface} object reference 381 * @return {@code true} if this is supported, {@code false} otherwise 382 * 383 * @throws NullPointerException if {@code surface} was {@code null} 384 * @throws IllegalArgumentException if the Surface endpoint is no longer valid 385 * 386 * @see CameraDevice#createCaptureSession 387 * @see #isOutputSupportedFor(Class) 388 */ 389 public boolean isOutputSupportedFor(Surface surface) { 390 checkNotNull(surface, "surface must not be null"); 391 392 Size surfaceSize = SurfaceUtils.getSurfaceSize(surface); 393 int surfaceFormat = SurfaceUtils.getSurfaceFormat(surface); 394 int surfaceDataspace = SurfaceUtils.getSurfaceDataspace(surface); 395 396 // See if consumer is flexible. 397 boolean isFlexible = SurfaceUtils.isFlexibleConsumer(surface); 398 399 // Override RGB formats to IMPLEMENTATION_DEFINED, b/9487482 400 if ((surfaceFormat >= LegacyMetadataMapper.HAL_PIXEL_FORMAT_RGBA_8888 && 401 surfaceFormat <= LegacyMetadataMapper.HAL_PIXEL_FORMAT_BGRA_8888)) { 402 surfaceFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED; 403 } 404 405 StreamConfiguration[] configs = 406 surfaceDataspace != HAL_DATASPACE_DEPTH ? mConfigurations : mDepthConfigurations; 407 for (StreamConfiguration config : configs) { 408 if (config.getFormat() == surfaceFormat && config.isOutput()) { 409 // Matching format, either need exact size match, or a flexible consumer 410 // and a size no bigger than MAX_DIMEN_FOR_ROUNDING 411 if (config.getSize().equals(surfaceSize)) { 412 return true; 413 } else if (isFlexible && 414 (config.getSize().getWidth() <= LegacyCameraDevice.MAX_DIMEN_FOR_ROUNDING)) { 415 return true; 416 } 417 } 418 } 419 return false; 420 } 421 422 /** 423 * Get a list of sizes compatible with {@code klass} to use as an output. 424 * 425 * <p>Some of the supported classes may support additional formats beyond 426 * {@link ImageFormat#PRIVATE}; this function only returns 427 * sizes for {@link ImageFormat#PRIVATE}. For example, {@link android.media.ImageReader} 428 * supports {@link ImageFormat#YUV_420_888} and {@link ImageFormat#PRIVATE}, this method will 429 * only return the sizes for {@link ImageFormat#PRIVATE} for {@link android.media.ImageReader} 430 * class.</p> 431 * 432 * <p>If a well-defined format such as {@code NV21} is required, use 433 * {@link #getOutputSizes(int)} instead.</p> 434 * 435 * <p>The {@code klass} should be a supported output, that querying 436 * {@code #isOutputSupportedFor(Class)} should return {@code true}.</p> 437 * 438 * @param klass 439 * a non-{@code null} {@link Class} object reference 440 * @return 441 * an array of supported sizes for {@link ImageFormat#PRIVATE} format, 442 * or {@code null} iff the {@code klass} is not a supported output. 443 * 444 * 445 * @throws NullPointerException if {@code klass} was {@code null} 446 * 447 * @see #isOutputSupportedFor(Class) 448 */ 449 public <T> Size[] getOutputSizes(Class<T> klass) { 450 if (isOutputSupportedFor(klass) == false) { 451 return null; 452 } 453 454 return getInternalFormatSizes(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 455 HAL_DATASPACE_UNKNOWN,/*output*/true, /*highRes*/false); 456 } 457 458 /** 459 * Get a list of sizes compatible with the requested image {@code format}. 460 * 461 * <p>The {@code format} should be a supported format (one of the formats returned by 462 * {@link #getOutputFormats}).</p> 463 * 464 * As of API level 23, the {@link #getHighResolutionOutputSizes} method can be used on devices 465 * that support the 466 * {@link android.hardware.camera2.CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE BURST_CAPTURE} 467 * capability to get a list of high-resolution output sizes that cannot operate at the preferred 468 * 20fps rate. This means that for some supported formats, this method will return an empty 469 * list, if all the supported resolutions operate at below 20fps. For devices that do not 470 * support the BURST_CAPTURE capability, all output resolutions are listed through this method. 471 * 472 * @param format an image format from {@link ImageFormat} or {@link PixelFormat} 473 * @return 474 * an array of supported sizes, 475 * or {@code null} if the {@code format} is not a supported output 476 * 477 * @see ImageFormat 478 * @see PixelFormat 479 * @see #getOutputFormats 480 */ 481 public Size[] getOutputSizes(int format) { 482 return getPublicFormatSizes(format, /*output*/true, /*highRes*/ false); 483 } 484 485 /** 486 * Get a list of supported high speed video recording sizes. 487 * 488 * <p> When HIGH_SPEED_VIDEO is supported in 489 * {@link CameraCharacteristics#CONTROL_AVAILABLE_SCENE_MODES available scene modes}, this 490 * method will list the supported high speed video size configurations. All the sizes listed 491 * will be a subset of the sizes reported by {@link #getOutputSizes} for processed non-stalling 492 * formats (typically ImageFormat#YUV_420_888, ImageFormat#NV21, ImageFormat#YV12)</p> 493 * 494 * <p> To enable high speed video recording, application must set 495 * {@link CaptureRequest#CONTROL_SCENE_MODE} to 496 * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture 497 * requests and select the video size from this method and 498 * {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS range} from 499 * {@link #getHighSpeedVideoFpsRangesFor} to configure the recording and preview streams and 500 * setup the recording requests. For example, if the application intends to do high speed 501 * recording, it can select the maximum size reported by this method to configure output 502 * streams. Note that for the use case of multiple output streams, application must select one 503 * unique size from this method to use. Otherwise a request error might occur. Once the size is 504 * selected, application can get the supported FPS ranges by 505 * {@link #getHighSpeedVideoFpsRangesFor}, and use these FPS ranges to setup the recording 506 * requests.</p> 507 * 508 * @return 509 * an array of supported high speed video recording sizes 510 * 511 * @see #getHighSpeedVideoFpsRangesFor(Size) 512 */ 513 public Size[] getHighSpeedVideoSizes() { 514 Set<Size> keySet = mHighSpeedVideoSizeMap.keySet(); 515 return keySet.toArray(new Size[keySet.size()]); 516 } 517 518 /** 519 * Get the frame per second ranges (fpsMin, fpsMax) for input high speed video size. 520 * 521 * <p> See {@link #getHighSpeedVideoSizes} for how to enable high speed recording.</p> 522 * 523 * <p> For normal video recording use case, where some application will NOT set 524 * {@link CaptureRequest#CONTROL_SCENE_MODE} to 525 * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture 526 * requests, the {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS ranges} reported in 527 * this method must not be used to setup capture requests, or it will cause request error.</p> 528 * 529 * @param size one of the sizes returned by {@link #getHighSpeedVideoSizes()} 530 * @return 531 * An array of FPS range to use with 532 * {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE TARGET_FPS_RANGE} when using 533 * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} scene 534 * mode. 535 * The upper bound of returned ranges is guaranteed to be larger or equal to 60. 536 * 537 * @throws IllegalArgumentException if input size does not exist in the return value of 538 * getHighSpeedVideoSizes 539 * @see #getHighSpeedVideoSizes() 540 */ 541 public Range<Integer>[] getHighSpeedVideoFpsRangesFor(Size size) { 542 Integer fpsRangeCount = mHighSpeedVideoSizeMap.get(size); 543 if (fpsRangeCount == null || fpsRangeCount == 0) { 544 throw new IllegalArgumentException(String.format( 545 "Size %s does not support high speed video recording", size)); 546 } 547 548 @SuppressWarnings("unchecked") 549 Range<Integer>[] fpsRanges = new Range[fpsRangeCount]; 550 int i = 0; 551 for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) { 552 if (size.equals(config.getSize())) { 553 fpsRanges[i++] = config.getFpsRange(); 554 } 555 } 556 return fpsRanges; 557 } 558 559 /** 560 * Get a list of supported high speed video recording FPS ranges. 561 * 562 * <p> When HIGH_SPEED_VIDEO is supported in 563 * {@link CameraCharacteristics#CONTROL_AVAILABLE_SCENE_MODES available scene modes}, this 564 * method will list the supported high speed video FPS range configurations. Application can 565 * then use {@link #getHighSpeedVideoSizesFor} to query available sizes for one of returned 566 * FPS range.</p> 567 * 568 * <p> To enable high speed video recording, application must set 569 * {@link CaptureRequest#CONTROL_SCENE_MODE} to 570 * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture 571 * requests and select the video size from {@link #getHighSpeedVideoSizesFor} and 572 * {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS range} from 573 * this method to configure the recording and preview streams and setup the recording requests. 574 * For example, if the application intends to do high speed recording, it can select one FPS 575 * range reported by this method, query the video sizes corresponding to this FPS range by 576 * {@link #getHighSpeedVideoSizesFor} and select one of reported sizes to configure output 577 * streams. Note that for the use case of multiple output streams, application must select one 578 * unique size from {@link #getHighSpeedVideoSizesFor}, and use it for all output streams. 579 * Otherwise a request error might occur when attempting to enable 580 * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO}. 581 * Once the stream is configured, application can set the FPS range in the recording requests. 582 * </p> 583 * 584 * @return 585 * an array of supported high speed video recording FPS ranges 586 * The upper bound of returned ranges is guaranteed to be larger or equal to 60. 587 * 588 * @see #getHighSpeedVideoSizesFor 589 */ 590 @SuppressWarnings("unchecked") 591 public Range<Integer>[] getHighSpeedVideoFpsRanges() { 592 Set<Range<Integer>> keySet = mHighSpeedVideoFpsRangeMap.keySet(); 593 return keySet.toArray(new Range[keySet.size()]); 594 } 595 596 /** 597 * Get the supported video sizes for input FPS range. 598 * 599 * <p> See {@link #getHighSpeedVideoFpsRanges} for how to enable high speed recording.</p> 600 * 601 * <p> For normal video recording use case, where the application will NOT set 602 * {@link CaptureRequest#CONTROL_SCENE_MODE} to 603 * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture 604 * requests, the {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS ranges} reported in 605 * this method must not be used to setup capture requests, or it will cause request error.</p> 606 * 607 * @param fpsRange one of the FPS range returned by {@link #getHighSpeedVideoFpsRanges()} 608 * @return 609 * An array of video sizes to configure output stream when using 610 * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} scene 611 * mode. 612 * 613 * @throws IllegalArgumentException if input FPS range does not exist in the return value of 614 * getHighSpeedVideoFpsRanges 615 * @see #getHighSpeedVideoFpsRanges() 616 */ 617 public Size[] getHighSpeedVideoSizesFor(Range<Integer> fpsRange) { 618 Integer sizeCount = mHighSpeedVideoFpsRangeMap.get(fpsRange); 619 if (sizeCount == null || sizeCount == 0) { 620 throw new IllegalArgumentException(String.format( 621 "FpsRange %s does not support high speed video recording", fpsRange)); 622 } 623 624 Size[] sizes = new Size[sizeCount]; 625 int i = 0; 626 for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) { 627 if (fpsRange.equals(config.getFpsRange())) { 628 sizes[i++] = config.getSize(); 629 } 630 } 631 return sizes; 632 } 633 634 /** 635 * Get a list of supported high resolution sizes, which cannot operate at full BURST_CAPTURE 636 * rate. 637 * 638 * <p>This includes all output sizes that cannot meet the 20 fps frame rate requirements for the 639 * {@link android.hardware.camera2.CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE BURST_CAPTURE} 640 * capability. This does not include the stall duration, so for example, a JPEG or RAW16 output 641 * resolution with a large stall duration but a minimum frame duration that's above 20 fps will 642 * still be listed in the regular {@link #getOutputSizes} list. All the sizes on this list are 643 * still guaranteed to operate at a rate of at least 10 fps, not including stall duration.</p> 644 * 645 * <p>For a device that does not support the BURST_CAPTURE capability, this list will be 646 * {@code null}, since resolutions in the {@link #getOutputSizes} list are already not 647 * guaranteed to meet >= 20 fps rate requirements. For a device that does support the 648 * BURST_CAPTURE capability, this list may be empty, if all supported resolutions meet the 20 649 * fps requirement.</p> 650 * 651 * @return an array of supported slower high-resolution sizes, or {@code null} if the 652 * BURST_CAPTURE capability is not supported 653 */ 654 public Size[] getHighResolutionOutputSizes(int format) { 655 if (!mListHighResolution) return null; 656 657 return getPublicFormatSizes(format, /*output*/true, /*highRes*/ true); 658 } 659 660 /** 661 * Get the minimum {@link CaptureRequest#SENSOR_FRAME_DURATION frame duration} 662 * for the format/size combination (in nanoseconds). 663 * 664 * <p>{@code format} should be one of the ones returned by {@link #getOutputFormats()}.</p> 665 * <p>{@code size} should be one of the ones returned by 666 * {@link #getOutputSizes(int)}.</p> 667 * 668 * <p>This should correspond to the frame duration when only that stream is active, with all 669 * processing (typically in {@code android.*.mode}) set to either {@code OFF} or {@code FAST}. 670 * </p> 671 * 672 * <p>When multiple streams are used in a request, the minimum frame duration will be 673 * {@code max(individual stream min durations)}.</p> 674 * 675 * <p>For devices that do not support manual sensor control 676 * ({@link android.hardware.camera2.CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR}), 677 * this function may return 0.</p> 678 * 679 * <!-- 680 * TODO: uncomment after adding input stream support 681 * <p>The minimum frame duration of a stream (of a particular format, size) is the same 682 * regardless of whether the stream is input or output.</p> 683 * --> 684 * 685 * @param format an image format from {@link ImageFormat} or {@link PixelFormat} 686 * @param size an output-compatible size 687 * @return a minimum frame duration {@code >} 0 in nanoseconds, or 688 * 0 if the minimum frame duration is not available. 689 * 690 * @throws IllegalArgumentException if {@code format} or {@code size} was not supported 691 * @throws NullPointerException if {@code size} was {@code null} 692 * 693 * @see CaptureRequest#SENSOR_FRAME_DURATION 694 * @see #getOutputStallDuration(int, Size) 695 * @see ImageFormat 696 * @see PixelFormat 697 */ 698 public long getOutputMinFrameDuration(int format, Size size) { 699 checkNotNull(size, "size must not be null"); 700 checkArgumentFormatSupported(format, /*output*/true); 701 702 return getInternalFormatDuration(imageFormatToInternal(format), 703 imageFormatToDataspace(format), 704 size, 705 DURATION_MIN_FRAME); 706 } 707 708 /** 709 * Get the minimum {@link CaptureRequest#SENSOR_FRAME_DURATION frame duration} 710 * for the class/size combination (in nanoseconds). 711 * 712 * <p>This assumes a the {@code klass} is set up to use {@link ImageFormat#PRIVATE}. 713 * For user-defined formats, use {@link #getOutputMinFrameDuration(int, Size)}.</p> 714 * 715 * <p>{@code klass} should be one of the ones which is supported by 716 * {@link #isOutputSupportedFor(Class)}.</p> 717 * 718 * <p>{@code size} should be one of the ones returned by 719 * {@link #getOutputSizes(int)}.</p> 720 * 721 * <p>This should correspond to the frame duration when only that stream is active, with all 722 * processing (typically in {@code android.*.mode}) set to either {@code OFF} or {@code FAST}. 723 * </p> 724 * 725 * <p>When multiple streams are used in a request, the minimum frame duration will be 726 * {@code max(individual stream min durations)}.</p> 727 * 728 * <p>For devices that do not support manual sensor control 729 * ({@link android.hardware.camera2.CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR}), 730 * this function may return 0.</p> 731 * 732 * <!-- 733 * TODO: uncomment after adding input stream support 734 * <p>The minimum frame duration of a stream (of a particular format, size) is the same 735 * regardless of whether the stream is input or output.</p> 736 * --> 737 * 738 * @param klass 739 * a class which is supported by {@link #isOutputSupportedFor(Class)} and has a 740 * non-empty array returned by {@link #getOutputSizes(Class)} 741 * @param size an output-compatible size 742 * @return a minimum frame duration {@code >} 0 in nanoseconds, or 743 * 0 if the minimum frame duration is not available. 744 * 745 * @throws IllegalArgumentException if {@code klass} or {@code size} was not supported 746 * @throws NullPointerException if {@code size} or {@code klass} was {@code null} 747 * 748 * @see CaptureRequest#SENSOR_FRAME_DURATION 749 * @see ImageFormat 750 * @see PixelFormat 751 */ 752 public <T> long getOutputMinFrameDuration(final Class<T> klass, final Size size) { 753 if (!isOutputSupportedFor(klass)) { 754 throw new IllegalArgumentException("klass was not supported"); 755 } 756 757 return getInternalFormatDuration(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 758 HAL_DATASPACE_UNKNOWN, 759 size, DURATION_MIN_FRAME); 760 } 761 762 /** 763 * Get the stall duration for the format/size combination (in nanoseconds). 764 * 765 * <p>{@code format} should be one of the ones returned by {@link #getOutputFormats()}.</p> 766 * <p>{@code size} should be one of the ones returned by 767 * {@link #getOutputSizes(int)}.</p> 768 * 769 * <p> 770 * A stall duration is how much extra time would get added to the normal minimum frame duration 771 * for a repeating request that has streams with non-zero stall. 772 * 773 * <p>For example, consider JPEG captures which have the following characteristics: 774 * 775 * <ul> 776 * <li>JPEG streams act like processed YUV streams in requests for which they are not included; 777 * in requests in which they are directly referenced, they act as JPEG streams. 778 * This is because supporting a JPEG stream requires the underlying YUV data to always be ready 779 * for use by a JPEG encoder, but the encoder will only be used (and impact frame duration) on 780 * requests that actually reference a JPEG stream. 781 * <li>The JPEG processor can run concurrently to the rest of the camera pipeline, but cannot 782 * process more than 1 capture at a time. 783 * </ul> 784 * 785 * <p>In other words, using a repeating YUV request would result in a steady frame rate 786 * (let's say it's 30 FPS). If a single JPEG request is submitted periodically, 787 * the frame rate will stay at 30 FPS (as long as we wait for the previous JPEG to return each 788 * time). If we try to submit a repeating YUV + JPEG request, then the frame rate will drop from 789 * 30 FPS.</p> 790 * 791 * <p>In general, submitting a new request with a non-0 stall time stream will <em>not</em> cause a 792 * frame rate drop unless there are still outstanding buffers for that stream from previous 793 * requests.</p> 794 * 795 * <p>Submitting a repeating request with streams (call this {@code S}) is the same as setting 796 * the minimum frame duration from the normal minimum frame duration corresponding to {@code S}, 797 * added with the maximum stall duration for {@code S}.</p> 798 * 799 * <p>If interleaving requests with and without a stall duration, a request will stall by the 800 * maximum of the remaining times for each can-stall stream with outstanding buffers.</p> 801 * 802 * <p>This means that a stalling request will not have an exposure start until the stall has 803 * completed.</p> 804 * 805 * <p>This should correspond to the stall duration when only that stream is active, with all 806 * processing (typically in {@code android.*.mode}) set to {@code FAST} or {@code OFF}. 807 * Setting any of the processing modes to {@code HIGH_QUALITY} effectively results in an 808 * indeterminate stall duration for all streams in a request (the regular stall calculation 809 * rules are ignored).</p> 810 * 811 * <p>The following formats may always have a stall duration: 812 * <ul> 813 * <li>{@link ImageFormat#JPEG JPEG} 814 * <li>{@link ImageFormat#RAW_SENSOR RAW16} 815 * </ul> 816 * </p> 817 * 818 * <p>The following formats will never have a stall duration: 819 * <ul> 820 * <li>{@link ImageFormat#YUV_420_888 YUV_420_888} 821 * <li>{@link #isOutputSupportedFor(Class) Implementation-Defined} 822 * </ul></p> 823 * 824 * <p> 825 * All other formats may or may not have an allowed stall duration on a per-capability basis; 826 * refer to {@link CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES 827 * android.request.availableCapabilities} for more details.</p> 828 * </p> 829 * 830 * <p>See {@link CaptureRequest#SENSOR_FRAME_DURATION android.sensor.frameDuration} 831 * for more information about calculating the max frame rate (absent stalls).</p> 832 * 833 * @param format an image format from {@link ImageFormat} or {@link PixelFormat} 834 * @param size an output-compatible size 835 * @return a stall duration {@code >=} 0 in nanoseconds 836 * 837 * @throws IllegalArgumentException if {@code format} or {@code size} was not supported 838 * @throws NullPointerException if {@code size} was {@code null} 839 * 840 * @see CaptureRequest#SENSOR_FRAME_DURATION 841 * @see ImageFormat 842 * @see PixelFormat 843 */ 844 public long getOutputStallDuration(int format, Size size) { 845 checkArgumentFormatSupported(format, /*output*/true); 846 847 return getInternalFormatDuration(imageFormatToInternal(format), 848 imageFormatToDataspace(format), 849 size, 850 DURATION_STALL); 851 } 852 853 /** 854 * Get the stall duration for the class/size combination (in nanoseconds). 855 * 856 * <p>This assumes a the {@code klass} is set up to use {@link ImageFormat#PRIVATE}. 857 * For user-defined formats, use {@link #getOutputMinFrameDuration(int, Size)}.</p> 858 * 859 * <p>{@code klass} should be one of the ones with a non-empty array returned by 860 * {@link #getOutputSizes(Class)}.</p> 861 * 862 * <p>{@code size} should be one of the ones returned by 863 * {@link #getOutputSizes(Class)}.</p> 864 * 865 * <p>See {@link #getOutputStallDuration(int, Size)} for a definition of a 866 * <em>stall duration</em>.</p> 867 * 868 * @param klass 869 * a class which is supported by {@link #isOutputSupportedFor(Class)} and has a 870 * non-empty array returned by {@link #getOutputSizes(Class)} 871 * @param size an output-compatible size 872 * @return a minimum frame duration {@code >=} 0 in nanoseconds 873 * 874 * @throws IllegalArgumentException if {@code klass} or {@code size} was not supported 875 * @throws NullPointerException if {@code size} or {@code klass} was {@code null} 876 * 877 * @see CaptureRequest#SENSOR_FRAME_DURATION 878 * @see ImageFormat 879 * @see PixelFormat 880 */ 881 public <T> long getOutputStallDuration(final Class<T> klass, final Size size) { 882 if (!isOutputSupportedFor(klass)) { 883 throw new IllegalArgumentException("klass was not supported"); 884 } 885 886 return getInternalFormatDuration(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 887 HAL_DATASPACE_UNKNOWN, size, DURATION_STALL); 888 } 889 890 /** 891 * Check if this {@link StreamConfigurationMap} is equal to another 892 * {@link StreamConfigurationMap}. 893 * 894 * <p>Two vectors are only equal if and only if each of the respective elements is equal.</p> 895 * 896 * @return {@code true} if the objects were equal, {@code false} otherwise 897 */ 898 @Override 899 public boolean equals(final Object obj) { 900 if (obj == null) { 901 return false; 902 } 903 if (this == obj) { 904 return true; 905 } 906 if (obj instanceof StreamConfigurationMap) { 907 final StreamConfigurationMap other = (StreamConfigurationMap) obj; 908 // XX: do we care about order? 909 return Arrays.equals(mConfigurations, other.mConfigurations) && 910 Arrays.equals(mMinFrameDurations, other.mMinFrameDurations) && 911 Arrays.equals(mStallDurations, other.mStallDurations) && 912 Arrays.equals(mDepthConfigurations, other.mDepthConfigurations) && 913 Arrays.equals(mHighSpeedVideoConfigurations, 914 other.mHighSpeedVideoConfigurations); 915 } 916 return false; 917 } 918 919 /** 920 * {@inheritDoc} 921 */ 922 @Override 923 public int hashCode() { 924 // XX: do we care about order? 925 return HashCodeHelpers.hashCodeGeneric( 926 mConfigurations, mMinFrameDurations, 927 mStallDurations, 928 mDepthConfigurations, mHighSpeedVideoConfigurations); 929 } 930 931 // Check that the argument is supported by #getOutputFormats or #getInputFormats 932 private int checkArgumentFormatSupported(int format, boolean output) { 933 checkArgumentFormat(format); 934 935 int internalFormat = imageFormatToInternal(format); 936 int internalDataspace = imageFormatToDataspace(format); 937 938 if (output) { 939 if (internalDataspace == HAL_DATASPACE_DEPTH) { 940 if (mDepthOutputFormats.indexOfKey(internalFormat) >= 0) { 941 return format; 942 } 943 } else { 944 if (mAllOutputFormats.indexOfKey(internalFormat) >= 0) { 945 return format; 946 } 947 } 948 } else { 949 if (mInputFormats.indexOfKey(internalFormat) >= 0) { 950 return format; 951 } 952 } 953 954 throw new IllegalArgumentException(String.format( 955 "format %x is not supported by this stream configuration map", format)); 956 } 957 958 /** 959 * Ensures that the format is either user-defined or implementation defined. 960 * 961 * <p>If a format has a different internal representation than the public representation, 962 * passing in the public representation here will fail.</p> 963 * 964 * <p>For example if trying to use {@link ImageFormat#JPEG}: 965 * it has a different public representation than the internal representation 966 * {@code HAL_PIXEL_FORMAT_BLOB}, this check will fail.</p> 967 * 968 * <p>Any invalid/undefined formats will raise an exception.</p> 969 * 970 * @param format image format 971 * @return the format 972 * 973 * @throws IllegalArgumentException if the format was invalid 974 */ 975 static int checkArgumentFormatInternal(int format) { 976 switch (format) { 977 case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: 978 case HAL_PIXEL_FORMAT_BLOB: 979 case HAL_PIXEL_FORMAT_RAW_OPAQUE: 980 case HAL_PIXEL_FORMAT_Y16: 981 return format; 982 case ImageFormat.JPEG: 983 throw new IllegalArgumentException( 984 "ImageFormat.JPEG is an unknown internal format"); 985 default: 986 return checkArgumentFormat(format); 987 } 988 } 989 990 /** 991 * Ensures that the format is publicly user-defined in either ImageFormat or PixelFormat. 992 * 993 * <p>If a format has a different public representation than the internal representation, 994 * passing in the internal representation here will fail.</p> 995 * 996 * <p>For example if trying to use {@code HAL_PIXEL_FORMAT_BLOB}: 997 * it has a different internal representation than the public representation 998 * {@link ImageFormat#JPEG}, this check will fail.</p> 999 * 1000 * <p>Any invalid/undefined formats will raise an exception, including implementation-defined. 1001 * </p> 1002 * 1003 * <p>Note that {@code @hide} and deprecated formats will not pass this check.</p> 1004 * 1005 * @param format image format 1006 * @return the format 1007 * 1008 * @throws IllegalArgumentException if the format was not user-defined 1009 */ 1010 static int checkArgumentFormat(int format) { 1011 if (!ImageFormat.isPublicFormat(format) && !PixelFormat.isPublicFormat(format)) { 1012 throw new IllegalArgumentException(String.format( 1013 "format 0x%x was not defined in either ImageFormat or PixelFormat", format)); 1014 } 1015 1016 return format; 1017 } 1018 1019 /** 1020 * Convert an internal format compatible with {@code graphics.h} into public-visible 1021 * {@code ImageFormat}. This assumes the dataspace of the format is not HAL_DATASPACE_DEPTH. 1022 * 1023 * <p>In particular these formats are converted: 1024 * <ul> 1025 * <li>HAL_PIXEL_FORMAT_BLOB => ImageFormat.JPEG</li> 1026 * </ul> 1027 * </p> 1028 * 1029 * <p>Passing in a format which has no public equivalent will fail; 1030 * as will passing in a public format which has a different internal format equivalent. 1031 * See {@link #checkArgumentFormat} for more details about a legal public format.</p> 1032 * 1033 * <p>All other formats are returned as-is, no further invalid check is performed.</p> 1034 * 1035 * <p>This function is the dual of {@link #imageFormatToInternal} for dataspaces other than 1036 * HAL_DATASPACE_DEPTH.</p> 1037 * 1038 * @param format image format from {@link ImageFormat} or {@link PixelFormat} 1039 * @return the converted image formats 1040 * 1041 * @throws IllegalArgumentException 1042 * if {@code format} is {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} or 1043 * {@link ImageFormat#JPEG} 1044 * 1045 * @see ImageFormat 1046 * @see PixelFormat 1047 * @see #checkArgumentFormat 1048 */ 1049 static int imageFormatToPublic(int format) { 1050 switch (format) { 1051 case HAL_PIXEL_FORMAT_BLOB: 1052 return ImageFormat.JPEG; 1053 case ImageFormat.JPEG: 1054 throw new IllegalArgumentException( 1055 "ImageFormat.JPEG is an unknown internal format"); 1056 default: 1057 return format; 1058 } 1059 } 1060 1061 /** 1062 * Convert an internal format compatible with {@code graphics.h} into public-visible 1063 * {@code ImageFormat}. This assumes the dataspace of the format is HAL_DATASPACE_DEPTH. 1064 * 1065 * <p>In particular these formats are converted: 1066 * <ul> 1067 * <li>HAL_PIXEL_FORMAT_BLOB => ImageFormat.DEPTH_POINT_CLOUD 1068 * <li>HAL_PIXEL_FORMAT_Y16 => ImageFormat.DEPTH16 1069 * </ul> 1070 * </p> 1071 * 1072 * <p>Passing in an implementation-defined format which has no public equivalent will fail; 1073 * as will passing in a public format which has a different internal format equivalent. 1074 * See {@link #checkArgumentFormat} for more details about a legal public format.</p> 1075 * 1076 * <p>All other formats are returned as-is, no further invalid check is performed.</p> 1077 * 1078 * <p>This function is the dual of {@link #imageFormatToInternal} for formats associated with 1079 * HAL_DATASPACE_DEPTH.</p> 1080 * 1081 * @param format image format from {@link ImageFormat} or {@link PixelFormat} 1082 * @return the converted image formats 1083 * 1084 * @throws IllegalArgumentException 1085 * if {@code format} is {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} or 1086 * {@link ImageFormat#JPEG} 1087 * 1088 * @see ImageFormat 1089 * @see PixelFormat 1090 * @see #checkArgumentFormat 1091 */ 1092 static int depthFormatToPublic(int format) { 1093 switch (format) { 1094 case HAL_PIXEL_FORMAT_BLOB: 1095 return ImageFormat.DEPTH_POINT_CLOUD; 1096 case HAL_PIXEL_FORMAT_Y16: 1097 return ImageFormat.DEPTH16; 1098 case ImageFormat.JPEG: 1099 throw new IllegalArgumentException( 1100 "ImageFormat.JPEG is an unknown internal format"); 1101 case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: 1102 throw new IllegalArgumentException( 1103 "IMPLEMENTATION_DEFINED must not leak to public API"); 1104 default: 1105 throw new IllegalArgumentException( 1106 "Unknown DATASPACE_DEPTH format " + format); 1107 } 1108 } 1109 1110 /** 1111 * Convert image formats from internal to public formats (in-place). 1112 * 1113 * @param formats an array of image formats 1114 * @return {@code formats} 1115 * 1116 * @see #imageFormatToPublic 1117 */ 1118 static int[] imageFormatToPublic(int[] formats) { 1119 if (formats == null) { 1120 return null; 1121 } 1122 1123 for (int i = 0; i < formats.length; ++i) { 1124 formats[i] = imageFormatToPublic(formats[i]); 1125 } 1126 1127 return formats; 1128 } 1129 1130 /** 1131 * Convert a public format compatible with {@code ImageFormat} to an internal format 1132 * from {@code graphics.h}. 1133 * 1134 * <p>In particular these formats are converted: 1135 * <ul> 1136 * <li>ImageFormat.JPEG => HAL_PIXEL_FORMAT_BLOB 1137 * <li>ImageFormat.DEPTH_POINT_CLOUD => HAL_PIXEL_FORMAT_BLOB 1138 * <li>ImageFormat.DEPTH16 => HAL_PIXEL_FORMAT_Y16 1139 * </ul> 1140 * </p> 1141 * 1142 * <p>Passing in an internal format which has a different public format equivalent will fail. 1143 * See {@link #checkArgumentFormat} for more details about a legal public format.</p> 1144 * 1145 * <p>All other formats are returned as-is, no invalid check is performed.</p> 1146 * 1147 * <p>This function is the dual of {@link #imageFormatToPublic}.</p> 1148 * 1149 * @param format public image format from {@link ImageFormat} or {@link PixelFormat} 1150 * @return the converted image formats 1151 * 1152 * @see ImageFormat 1153 * @see PixelFormat 1154 * 1155 * @throws IllegalArgumentException 1156 * if {@code format} was {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} 1157 */ 1158 static int imageFormatToInternal(int format) { 1159 switch (format) { 1160 case ImageFormat.JPEG: 1161 case ImageFormat.DEPTH_POINT_CLOUD: 1162 return HAL_PIXEL_FORMAT_BLOB; 1163 case ImageFormat.DEPTH16: 1164 return HAL_PIXEL_FORMAT_Y16; 1165 default: 1166 return format; 1167 } 1168 } 1169 1170 /** 1171 * Convert a public format compatible with {@code ImageFormat} to an internal dataspace 1172 * from {@code graphics.h}. 1173 * 1174 * <p>In particular these formats are converted: 1175 * <ul> 1176 * <li>ImageFormat.JPEG => HAL_DATASPACE_JFIF 1177 * <li>ImageFormat.DEPTH_POINT_CLOUD => HAL_DATASPACE_DEPTH 1178 * <li>ImageFormat.DEPTH16 => HAL_DATASPACE_DEPTH 1179 * <li>others => HAL_DATASPACE_UNKNOWN 1180 * </ul> 1181 * </p> 1182 * 1183 * <p>Passing in an implementation-defined format here will fail (it's not a public format); 1184 * as will passing in an internal format which has a different public format equivalent. 1185 * See {@link #checkArgumentFormat} for more details about a legal public format.</p> 1186 * 1187 * <p>All other formats are returned as-is, no invalid check is performed.</p> 1188 * 1189 * <p>This function is the dual of {@link #imageFormatToPublic}.</p> 1190 * 1191 * @param format public image format from {@link ImageFormat} or {@link PixelFormat} 1192 * @return the converted image formats 1193 * 1194 * @see ImageFormat 1195 * @see PixelFormat 1196 * 1197 * @throws IllegalArgumentException 1198 * if {@code format} was {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} 1199 */ 1200 static int imageFormatToDataspace(int format) { 1201 switch (format) { 1202 case ImageFormat.JPEG: 1203 return HAL_DATASPACE_JFIF; 1204 case ImageFormat.DEPTH_POINT_CLOUD: 1205 case ImageFormat.DEPTH16: 1206 return HAL_DATASPACE_DEPTH; 1207 default: 1208 return HAL_DATASPACE_UNKNOWN; 1209 } 1210 } 1211 1212 /** 1213 * Convert image formats from public to internal formats (in-place). 1214 * 1215 * @param formats an array of image formats 1216 * @return {@code formats} 1217 * 1218 * @see #imageFormatToInternal 1219 * 1220 * @hide 1221 */ 1222 public static int[] imageFormatToInternal(int[] formats) { 1223 if (formats == null) { 1224 return null; 1225 } 1226 1227 for (int i = 0; i < formats.length; ++i) { 1228 formats[i] = imageFormatToInternal(formats[i]); 1229 } 1230 1231 return formats; 1232 } 1233 1234 private Size[] getPublicFormatSizes(int format, boolean output, boolean highRes) { 1235 try { 1236 checkArgumentFormatSupported(format, output); 1237 } catch (IllegalArgumentException e) { 1238 return null; 1239 } 1240 1241 int internalFormat = imageFormatToInternal(format); 1242 int dataspace = imageFormatToDataspace(format); 1243 1244 return getInternalFormatSizes(internalFormat, dataspace, output, highRes); 1245 } 1246 1247 private Size[] getInternalFormatSizes(int format, int dataspace, 1248 boolean output, boolean highRes) { 1249 SparseIntArray formatsMap = 1250 !output ? mInputFormats : 1251 dataspace == HAL_DATASPACE_DEPTH ? mDepthOutputFormats : 1252 highRes ? mHighResOutputFormats : 1253 mOutputFormats; 1254 1255 int sizesCount = formatsMap.get(format); 1256 if ( ((!output || dataspace == HAL_DATASPACE_DEPTH) && sizesCount == 0) || 1257 (output && dataspace != HAL_DATASPACE_DEPTH && mAllOutputFormats.get(format) == 0)) { 1258 // Only throw if this is really not supported at all 1259 throw new IllegalArgumentException("format not available"); 1260 } 1261 1262 Size[] sizes = new Size[sizesCount]; 1263 int sizeIndex = 0; 1264 1265 StreamConfiguration[] configurations = 1266 (dataspace == HAL_DATASPACE_DEPTH) ? mDepthConfigurations : mConfigurations; 1267 1268 for (StreamConfiguration config : configurations) { 1269 int fmt = config.getFormat(); 1270 if (fmt == format && config.isOutput() == output) { 1271 if (output) { 1272 // Filter slow high-res output formats; include for 1273 // highRes, remove for !highRes 1274 long duration = 0; 1275 for (int i = 0; i < mMinFrameDurations.length; i++) { 1276 StreamConfigurationDuration d = mMinFrameDurations[i]; 1277 if (d.getFormat() == fmt && 1278 d.getWidth() == config.getSize().getWidth() && 1279 d.getHeight() == config.getSize().getHeight()) { 1280 duration = d.getDuration(); 1281 break; 1282 } 1283 } 1284 if (highRes != (duration > DURATION_20FPS_NS)) { 1285 continue; 1286 } 1287 } 1288 sizes[sizeIndex++] = config.getSize(); 1289 } 1290 } 1291 1292 if (sizeIndex != sizesCount) { 1293 throw new AssertionError( 1294 "Too few sizes (expected " + sizesCount + ", actual " + sizeIndex + ")"); 1295 } 1296 1297 return sizes; 1298 } 1299 1300 /** Get the list of publically visible output formats; does not include IMPL_DEFINED */ 1301 private int[] getPublicFormats(boolean output) { 1302 int[] formats = new int[getPublicFormatCount(output)]; 1303 1304 int i = 0; 1305 1306 SparseIntArray map = getFormatsMap(output); 1307 for (int j = 0; j < map.size(); j++) { 1308 int format = map.keyAt(j); 1309 if (format != HAL_PIXEL_FORMAT_RAW_OPAQUE) { 1310 formats[i++] = imageFormatToPublic(format); 1311 } 1312 } 1313 if (output) { 1314 for (int j = 0; j < mDepthOutputFormats.size(); j++) { 1315 formats[i++] = depthFormatToPublic(mDepthOutputFormats.keyAt(j)); 1316 } 1317 } 1318 if (formats.length != i) { 1319 throw new AssertionError("Too few formats " + i + ", expected " + formats.length); 1320 } 1321 1322 return formats; 1323 } 1324 1325 /** Get the format -> size count map for either output or input formats */ 1326 private SparseIntArray getFormatsMap(boolean output) { 1327 return output ? mAllOutputFormats : mInputFormats; 1328 } 1329 1330 private long getInternalFormatDuration(int format, int dataspace, Size size, int duration) { 1331 // assume format is already checked, since its internal 1332 1333 if (!isSupportedInternalConfiguration(format, dataspace, size)) { 1334 throw new IllegalArgumentException("size was not supported"); 1335 } 1336 1337 StreamConfigurationDuration[] durations = getDurations(duration, dataspace); 1338 1339 for (StreamConfigurationDuration configurationDuration : durations) { 1340 if (configurationDuration.getFormat() == format && 1341 configurationDuration.getWidth() == size.getWidth() && 1342 configurationDuration.getHeight() == size.getHeight()) { 1343 return configurationDuration.getDuration(); 1344 } 1345 } 1346 // Default duration is '0' (unsupported/no extra stall) 1347 return 0; 1348 } 1349 1350 /** 1351 * Get the durations array for the kind of duration 1352 * 1353 * @see #DURATION_MIN_FRAME 1354 * @see #DURATION_STALL 1355 * */ 1356 private StreamConfigurationDuration[] getDurations(int duration, int dataspace) { 1357 switch (duration) { 1358 case DURATION_MIN_FRAME: 1359 return (dataspace == HAL_DATASPACE_DEPTH) ? 1360 mDepthMinFrameDurations : mMinFrameDurations; 1361 case DURATION_STALL: 1362 return (dataspace == HAL_DATASPACE_DEPTH) ? 1363 mDepthStallDurations : mStallDurations; 1364 default: 1365 throw new IllegalArgumentException("duration was invalid"); 1366 } 1367 } 1368 1369 /** Count the number of publicly-visible output formats */ 1370 private int getPublicFormatCount(boolean output) { 1371 SparseIntArray formatsMap = getFormatsMap(output); 1372 int size = formatsMap.size(); 1373 if (formatsMap.indexOfKey(HAL_PIXEL_FORMAT_RAW_OPAQUE) >= 0) { 1374 size -= 1; 1375 } 1376 if (output) { 1377 size += mDepthOutputFormats.size(); 1378 } 1379 1380 return size; 1381 } 1382 1383 private static <T> boolean arrayContains(T[] array, T element) { 1384 if (array == null) { 1385 return false; 1386 } 1387 1388 for (T el : array) { 1389 if (Objects.equals(el, element)) { 1390 return true; 1391 } 1392 } 1393 1394 return false; 1395 } 1396 1397 private boolean isSupportedInternalConfiguration(int format, int dataspace, 1398 Size size) { 1399 StreamConfiguration[] configurations = 1400 (dataspace == HAL_DATASPACE_DEPTH) ? mDepthConfigurations : mConfigurations; 1401 1402 for (int i = 0; i < configurations.length; i++) { 1403 if (configurations[i].getFormat() == format && 1404 configurations[i].getSize().equals(size)) { 1405 return true; 1406 } 1407 } 1408 1409 return false; 1410 } 1411 1412 /** 1413 * Return this {@link StreamConfigurationMap} as a string representation. 1414 * 1415 * <p>{@code "StreamConfigurationMap(Outputs([w:%d, h:%d, format:%s(%d), min_duration:%d, 1416 * stall:%d], ... [w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d]), Inputs([w:%d, h:%d, 1417 * format:%s(%d)], ... [w:%d, h:%d, format:%s(%d)]), ValidOutputFormatsForInput( 1418 * [in:%d, out:%d, ... %d], ... [in:%d, out:%d, ... %d]), HighSpeedVideoConfigurations( 1419 * [w:%d, h:%d, min_fps:%d, max_fps:%d], ... [w:%d, h:%d, min_fps:%d, max_fps:%d]))"}.</p> 1420 * 1421 * <p>{@code Outputs([w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d], ... 1422 * [w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d])}, where 1423 * {@code [w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d]} represents an output 1424 * configuration's width, height, format, minimal frame duration in nanoseconds, and stall 1425 * duration in nanoseconds.</p> 1426 * 1427 * <p>{@code Inputs([w:%d, h:%d, format:%s(%d)], ... [w:%d, h:%d, format:%s(%d)])}, where 1428 * {@code [w:%d, h:%d, format:%s(%d)]} represents an input configuration's width, height, and 1429 * format.</p> 1430 * 1431 * <p>{@code ValidOutputFormatsForInput([in:%s(%d), out:%s(%d), ... %s(%d)], 1432 * ... [in:%s(%d), out:%s(%d), ... %s(%d)])}, where {@code [in:%s(%d), out:%s(%d), ... %s(%d)]} 1433 * represents an input fomat and its valid output formats.</p> 1434 * 1435 * <p>{@code HighSpeedVideoConfigurations([w:%d, h:%d, min_fps:%d, max_fps:%d], 1436 * ... [w:%d, h:%d, min_fps:%d, max_fps:%d])}, where 1437 * {@code [w:%d, h:%d, min_fps:%d, max_fps:%d]} represents a high speed video output 1438 * configuration's width, height, minimal frame rate, and maximal frame rate.</p> 1439 * 1440 * @return string representation of {@link StreamConfigurationMap} 1441 */ 1442 @Override 1443 public String toString() { 1444 StringBuilder sb = new StringBuilder("StreamConfiguration("); 1445 appendOutputsString(sb); 1446 sb.append(", "); 1447 appendHighResOutputsString(sb); 1448 sb.append(", "); 1449 appendInputsString(sb); 1450 sb.append(", "); 1451 appendValidOutputFormatsForInputString(sb); 1452 sb.append(", "); 1453 appendHighSpeedVideoConfigurationsString(sb); 1454 sb.append(")"); 1455 1456 return sb.toString(); 1457 } 1458 1459 private void appendOutputsString(StringBuilder sb) { 1460 sb.append("Outputs("); 1461 int[] formats = getOutputFormats(); 1462 for (int format : formats) { 1463 Size[] sizes = getOutputSizes(format); 1464 for (Size size : sizes) { 1465 long minFrameDuration = getOutputMinFrameDuration(format, size); 1466 long stallDuration = getOutputStallDuration(format, size); 1467 sb.append(String.format("[w:%d, h:%d, format:%s(%d), min_duration:%d, " + 1468 "stall:%d], ", size.getWidth(), size.getHeight(), formatToString(format), 1469 format, minFrameDuration, stallDuration)); 1470 } 1471 } 1472 // Remove the pending ", " 1473 if (sb.charAt(sb.length() - 1) == ' ') { 1474 sb.delete(sb.length() - 2, sb.length()); 1475 } 1476 sb.append(")"); 1477 } 1478 1479 private void appendHighResOutputsString(StringBuilder sb) { 1480 sb.append("HighResolutionOutputs("); 1481 int[] formats = getOutputFormats(); 1482 for (int format : formats) { 1483 Size[] sizes = getHighResolutionOutputSizes(format); 1484 if (sizes == null) continue; 1485 for (Size size : sizes) { 1486 long minFrameDuration = getOutputMinFrameDuration(format, size); 1487 long stallDuration = getOutputStallDuration(format, size); 1488 sb.append(String.format("[w:%d, h:%d, format:%s(%d), min_duration:%d, " + 1489 "stall:%d], ", size.getWidth(), size.getHeight(), formatToString(format), 1490 format, minFrameDuration, stallDuration)); 1491 } 1492 } 1493 // Remove the pending ", " 1494 if (sb.charAt(sb.length() - 1) == ' ') { 1495 sb.delete(sb.length() - 2, sb.length()); 1496 } 1497 sb.append(")"); 1498 } 1499 1500 private void appendInputsString(StringBuilder sb) { 1501 sb.append("Inputs("); 1502 int[] formats = getInputFormats(); 1503 for (int format : formats) { 1504 Size[] sizes = getInputSizes(format); 1505 for (Size size : sizes) { 1506 sb.append(String.format("[w:%d, h:%d, format:%s(%d)], ", size.getWidth(), 1507 size.getHeight(), formatToString(format), format)); 1508 } 1509 } 1510 // Remove the pending ", " 1511 if (sb.charAt(sb.length() - 1) == ' ') { 1512 sb.delete(sb.length() - 2, sb.length()); 1513 } 1514 sb.append(")"); 1515 } 1516 1517 private void appendValidOutputFormatsForInputString(StringBuilder sb) { 1518 sb.append("ValidOutputFormatsForInput("); 1519 int[] inputFormats = getInputFormats(); 1520 for (int inputFormat : inputFormats) { 1521 sb.append(String.format("[in:%s(%d), out:", formatToString(inputFormat), inputFormat)); 1522 int[] outputFormats = getValidOutputFormatsForInput(inputFormat); 1523 for (int i = 0; i < outputFormats.length; i++) { 1524 sb.append(String.format("%s(%d)", formatToString(outputFormats[i]), 1525 outputFormats[i])); 1526 if (i < outputFormats.length - 1) { 1527 sb.append(", "); 1528 } 1529 } 1530 sb.append("], "); 1531 } 1532 // Remove the pending ", " 1533 if (sb.charAt(sb.length() - 1) == ' ') { 1534 sb.delete(sb.length() - 2, sb.length()); 1535 } 1536 sb.append(")"); 1537 } 1538 1539 private void appendHighSpeedVideoConfigurationsString(StringBuilder sb) { 1540 sb.append("HighSpeedVideoConfigurations("); 1541 Size[] sizes = getHighSpeedVideoSizes(); 1542 for (Size size : sizes) { 1543 Range<Integer>[] ranges = getHighSpeedVideoFpsRangesFor(size); 1544 for (Range<Integer> range : ranges) { 1545 sb.append(String.format("[w:%d, h:%d, min_fps:%d, max_fps:%d], ", size.getWidth(), 1546 size.getHeight(), range.getLower(), range.getUpper())); 1547 } 1548 } 1549 // Remove the pending ", " 1550 if (sb.charAt(sb.length() - 1) == ' ') { 1551 sb.delete(sb.length() - 2, sb.length()); 1552 } 1553 sb.append(")"); 1554 } 1555 1556 private String formatToString(int format) { 1557 switch (format) { 1558 case ImageFormat.YV12: 1559 return "YV12"; 1560 case ImageFormat.YUV_420_888: 1561 return "YUV_420_888"; 1562 case ImageFormat.NV21: 1563 return "NV21"; 1564 case ImageFormat.NV16: 1565 return "NV16"; 1566 case PixelFormat.RGB_565: 1567 return "RGB_565"; 1568 case PixelFormat.RGBA_8888: 1569 return "RGBA_8888"; 1570 case PixelFormat.RGBX_8888: 1571 return "RGBX_8888"; 1572 case PixelFormat.RGB_888: 1573 return "RGB_888"; 1574 case ImageFormat.JPEG: 1575 return "JPEG"; 1576 case ImageFormat.YUY2: 1577 return "YUY2"; 1578 case ImageFormat.Y8: 1579 return "Y8"; 1580 case ImageFormat.Y16: 1581 return "Y16"; 1582 case ImageFormat.RAW_SENSOR: 1583 return "RAW_SENSOR"; 1584 case ImageFormat.RAW10: 1585 return "RAW10"; 1586 case ImageFormat.DEPTH16: 1587 return "DEPTH16"; 1588 case ImageFormat.DEPTH_POINT_CLOUD: 1589 return "DEPTH_POINT_CLOUD"; 1590 case ImageFormat.PRIVATE: 1591 return "PRIVATE"; 1592 default: 1593 return "UNKNOWN"; 1594 } 1595 } 1596 1597 // from system/core/include/system/graphics.h 1598 private static final int HAL_PIXEL_FORMAT_RAW16 = 0x20; 1599 private static final int HAL_PIXEL_FORMAT_BLOB = 0x21; 1600 private static final int HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED = 0x22; 1601 private static final int HAL_PIXEL_FORMAT_YCbCr_420_888 = 0x23; 1602 private static final int HAL_PIXEL_FORMAT_RAW_OPAQUE = 0x24; 1603 private static final int HAL_PIXEL_FORMAT_RAW10 = 0x25; 1604 private static final int HAL_PIXEL_FORMAT_RAW12 = 0x26; 1605 private static final int HAL_PIXEL_FORMAT_Y16 = 0x20363159; 1606 1607 1608 private static final int HAL_DATASPACE_UNKNOWN = 0x0; 1609 private static final int HAL_DATASPACE_JFIF = 0x101; 1610 private static final int HAL_DATASPACE_DEPTH = 0x1000; 1611 1612 private static final long DURATION_20FPS_NS = 50000000L; 1613 /** 1614 * @see #getDurations(int, int) 1615 */ 1616 private static final int DURATION_MIN_FRAME = 0; 1617 private static final int DURATION_STALL = 1; 1618 1619 private final StreamConfiguration[] mConfigurations; 1620 private final StreamConfigurationDuration[] mMinFrameDurations; 1621 private final StreamConfigurationDuration[] mStallDurations; 1622 1623 private final StreamConfiguration[] mDepthConfigurations; 1624 private final StreamConfigurationDuration[] mDepthMinFrameDurations; 1625 private final StreamConfigurationDuration[] mDepthStallDurations; 1626 1627 private final HighSpeedVideoConfiguration[] mHighSpeedVideoConfigurations; 1628 private final ReprocessFormatsMap mInputOutputFormatsMap; 1629 1630 private final boolean mListHighResolution; 1631 1632 /** internal format -> num output sizes mapping, not including slow high-res sizes, for 1633 * non-depth dataspaces */ 1634 private final SparseIntArray mOutputFormats = new SparseIntArray(); 1635 /** internal format -> num output sizes mapping for slow high-res sizes, for non-depth 1636 * dataspaces */ 1637 private final SparseIntArray mHighResOutputFormats = new SparseIntArray(); 1638 /** internal format -> num output sizes mapping for all non-depth dataspaces */ 1639 private final SparseIntArray mAllOutputFormats = new SparseIntArray(); 1640 /** internal format -> num input sizes mapping, for input reprocessing formats */ 1641 private final SparseIntArray mInputFormats = new SparseIntArray(); 1642 /** internal format -> num depth output sizes mapping, for HAL_DATASPACE_DEPTH */ 1643 private final SparseIntArray mDepthOutputFormats = new SparseIntArray(); 1644 /** High speed video Size -> FPS range count mapping*/ 1645 private final HashMap</*HighSpeedVideoSize*/Size, /*Count*/Integer> mHighSpeedVideoSizeMap = 1646 new HashMap<Size, Integer>(); 1647 /** High speed video FPS range -> Size count mapping*/ 1648 private final HashMap</*HighSpeedVideoFpsRange*/Range<Integer>, /*Count*/Integer> 1649 mHighSpeedVideoFpsRangeMap = new HashMap<Range<Integer>, Integer>(); 1650 1651} 1652