StreamConfigurationMap.java revision 0819c75680c81a4e9c8a1ec518ac62cceccf3f56
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.hardware.camera2.params;
18
19import android.graphics.ImageFormat;
20import android.graphics.PixelFormat;
21import android.hardware.camera2.CameraCharacteristics;
22import android.hardware.camera2.CameraDevice;
23import android.hardware.camera2.CaptureRequest;
24import android.hardware.camera2.utils.HashCodeHelpers;
25import android.hardware.camera2.legacy.LegacyCameraDevice;
26import android.hardware.camera2.legacy.LegacyMetadataMapper;
27import android.hardware.camera2.legacy.LegacyExceptionUtils.BufferQueueAbandonedException;
28import android.view.Surface;
29import android.util.Range;
30import android.util.Size;
31import android.util.SparseIntArray;
32
33import java.util.Arrays;
34import java.util.HashMap;
35import java.util.Objects;
36import java.util.Set;
37
38import static com.android.internal.util.Preconditions.*;
39
40/**
41 * Immutable class to store the available stream
42 * {@link CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP configurations} to set up
43 * {@link android.view.Surface Surfaces} for creating a
44 * {@link android.hardware.camera2.CameraCaptureSession capture session} with
45 * {@link android.hardware.camera2.CameraDevice#createCaptureSession}.
46 * <!-- TODO: link to input stream configuration -->
47 *
48 * <p>This is the authoritative list for all <!-- input/ -->output formats (and sizes respectively
49 * for that format) that are supported by a camera device.</p>
50 *
51 * <p>This also contains the minimum frame durations and stall durations for each format/size
52 * combination that can be used to calculate effective frame rate when submitting multiple captures.
53 * </p>
54 *
55 * <p>An instance of this object is available from {@link CameraCharacteristics} using
56 * the {@link CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP} key and the
57 * {@link CameraCharacteristics#get} method.</p>
58 *
59 * <pre><code>{@code
60 * CameraCharacteristics characteristics = cameraManager.getCameraCharacteristics(cameraId);
61 * StreamConfigurationMap configs = characteristics.get(
62 *         CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);
63 * }</code></pre>
64 *
65 * @see CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP
66 * @see CameraDevice#createCaptureSession
67 */
68public final class StreamConfigurationMap {
69
70    private static final String TAG = "StreamConfigurationMap";
71
72    /**
73     * Create a new {@link StreamConfigurationMap}.
74     *
75     * <p>The array parameters ownership is passed to this object after creation; do not
76     * write to them after this constructor is invoked.</p>
77     *
78     * @param configurations a non-{@code null} array of {@link StreamConfiguration}
79     * @param minFrameDurations a non-{@code null} array of {@link StreamConfigurationDuration}
80     * @param stallDurations a non-{@code null} array of {@link StreamConfigurationDuration}
81     * @param highSpeedVideoConfigurations an array of {@link HighSpeedVideoConfiguration}, null if
82     *        camera device does not support high speed video recording
83     * @param listHighResolution a flag indicating whether the device supports BURST_CAPTURE
84     *        and thus needs a separate list of slow high-resolution output sizes
85     * @throws NullPointerException if any of the arguments except highSpeedVideoConfigurations
86     *         were {@code null} or any subelements were {@code null}
87     *
88     * @hide
89     */
90    public StreamConfigurationMap(
91            StreamConfiguration[] configurations,
92            StreamConfigurationDuration[] minFrameDurations,
93            StreamConfigurationDuration[] stallDurations,
94            StreamConfiguration[] depthConfigurations,
95            StreamConfigurationDuration[] depthMinFrameDurations,
96            StreamConfigurationDuration[] depthStallDurations,
97            HighSpeedVideoConfiguration[] highSpeedVideoConfigurations,
98            ReprocessFormatsMap inputOutputFormatsMap,
99            boolean listHighResolution) {
100        mConfigurations = checkArrayElementsNotNull(configurations, "configurations");
101        mMinFrameDurations = checkArrayElementsNotNull(minFrameDurations, "minFrameDurations");
102        mStallDurations = checkArrayElementsNotNull(stallDurations, "stallDurations");
103        mListHighResolution = listHighResolution;
104
105        if (depthConfigurations == null) {
106            mDepthConfigurations = new StreamConfiguration[0];
107            mDepthMinFrameDurations = new StreamConfigurationDuration[0];
108            mDepthStallDurations = new StreamConfigurationDuration[0];
109        } else {
110            mDepthConfigurations = checkArrayElementsNotNull(depthConfigurations,
111                    "depthConfigurations");
112            mDepthMinFrameDurations = checkArrayElementsNotNull(depthMinFrameDurations,
113                    "depthMinFrameDurations");
114            mDepthStallDurations = checkArrayElementsNotNull(depthStallDurations,
115                    "depthStallDurations");
116        }
117
118        if (highSpeedVideoConfigurations == null) {
119            mHighSpeedVideoConfigurations = new HighSpeedVideoConfiguration[0];
120        } else {
121            mHighSpeedVideoConfigurations = checkArrayElementsNotNull(
122                    highSpeedVideoConfigurations, "highSpeedVideoConfigurations");
123        }
124
125        // For each format, track how many sizes there are available to configure
126        for (StreamConfiguration config : configurations) {
127            int fmt = config.getFormat();
128            SparseIntArray map = null;
129            if (config.isOutput()) {
130                mAllOutputFormats.put(fmt, mAllOutputFormats.get(fmt) + 1);
131                long duration = 0;
132                if (mListHighResolution) {
133                    for (StreamConfigurationDuration configurationDuration : mMinFrameDurations) {
134                        if (configurationDuration.getFormat() == fmt &&
135                                configurationDuration.getWidth() == config.getSize().getWidth() &&
136                                configurationDuration.getHeight() == config.getSize().getHeight()) {
137                            duration = configurationDuration.getDuration();
138                            break;
139                        }
140                    }
141                }
142                map = duration <= DURATION_20FPS_NS ?
143                        mOutputFormats : mHighResOutputFormats;
144            } else {
145                map = mInputFormats;
146            }
147            map.put(fmt, map.get(fmt) + 1);
148        }
149
150        // For each depth format, track how many sizes there are available to configure
151        for (StreamConfiguration config : mDepthConfigurations) {
152            if (!config.isOutput()) {
153                // Ignoring input depth configs
154                continue;
155            }
156
157            mDepthOutputFormats.put(config.getFormat(),
158                    mDepthOutputFormats.get(config.getFormat()) + 1);
159        }
160
161        if (mOutputFormats.indexOfKey(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) < 0) {
162            throw new AssertionError(
163                    "At least one stream configuration for IMPLEMENTATION_DEFINED must exist");
164        }
165
166        // For each Size/FPS range, track how many FPS range/Size there are available
167        for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) {
168            Size size = config.getSize();
169            Range<Integer> fpsRange = config.getFpsRange();
170            Integer fpsRangeCount = mHighSpeedVideoSizeMap.get(size);
171            if (fpsRangeCount == null) {
172                fpsRangeCount = 0;
173            }
174            mHighSpeedVideoSizeMap.put(size, fpsRangeCount + 1);
175            Integer sizeCount = mHighSpeedVideoFpsRangeMap.get(fpsRange);
176            if (sizeCount == null) {
177                sizeCount = 0;
178            }
179            mHighSpeedVideoFpsRangeMap.put(fpsRange, sizeCount + 1);
180        }
181
182        mInputOutputFormatsMap = inputOutputFormatsMap;
183    }
184
185    /**
186     * Get the image {@code format} output formats in this stream configuration.
187     *
188     * <p>All image formats returned by this function will be defined in either {@link ImageFormat}
189     * or in {@link PixelFormat} (and there is no possibility of collision).</p>
190     *
191     * <p>Formats listed in this array are guaranteed to return true if queried with
192     * {@link #isOutputSupportedFor(int)}.</p>
193     *
194     * @return an array of integer format
195     *
196     * @see ImageFormat
197     * @see PixelFormat
198     */
199    public final int[] getOutputFormats() {
200        return getPublicFormats(/*output*/true);
201    }
202
203    /**
204     * Get the image {@code format} output formats for a reprocessing input format.
205     *
206     * <p>When submitting a {@link CaptureRequest} with an input Surface of a given format,
207     * the only allowed target outputs of the {@link CaptureRequest} are the ones with a format
208     * listed in the return value of this method. Including any other output Surface as a target
209     * will throw an IllegalArgumentException. If no output format is supported given the input
210     * format, an empty int[] will be returned.</p>
211     *
212     * <p>All image formats returned by this function will be defined in either {@link ImageFormat}
213     * or in {@link PixelFormat} (and there is no possibility of collision).</p>
214     *
215     * <p>Formats listed in this array are guaranteed to return true if queried with
216     * {@link #isOutputSupportedFor(int)}.</p>
217     *
218     * @return an array of integer format
219     *
220     * @see ImageFormat
221     * @see PixelFormat
222     */
223    public final int[] getValidOutputFormatsForInput(int inputFormat) {
224        if (mInputOutputFormatsMap == null) {
225            return new int[0];
226        }
227        return mInputOutputFormatsMap.getOutputs(inputFormat);
228    }
229
230    /**
231     * Get the image {@code format} input formats in this stream configuration.
232     *
233     * <p>All image formats returned by this function will be defined in either {@link ImageFormat}
234     * or in {@link PixelFormat} (and there is no possibility of collision).</p>
235     *
236     * @return an array of integer format
237     *
238     * @see ImageFormat
239     * @see PixelFormat
240     */
241    public final int[] getInputFormats() {
242        return getPublicFormats(/*output*/false);
243    }
244
245    /**
246     * Get the supported input sizes for this input format.
247     *
248     * <p>The format must have come from {@link #getInputFormats}; otherwise
249     * {@code null} is returned.</p>
250     *
251     * @param format a format from {@link #getInputFormats}
252     * @return a non-empty array of sizes, or {@code null} if the format was not available.
253     */
254    public Size[] getInputSizes(final int format) {
255        return getPublicFormatSizes(format, /*output*/false, /*highRes*/false);
256    }
257
258    /**
259     * Determine whether or not output surfaces with a particular user-defined format can be passed
260     * {@link CameraDevice#createCaptureSession createCaptureSession}.
261     *
262     * <p>This method determines that the output {@code format} is supported by the camera device;
263     * each output {@code surface} target may or may not itself support that {@code format}.
264     * Refer to the class which provides the surface for additional documentation.</p>
265     *
266     * <p>Formats for which this returns {@code true} are guaranteed to exist in the result
267     * returned by {@link #getOutputSizes}.</p>
268     *
269     * @param format an image format from either {@link ImageFormat} or {@link PixelFormat}
270     * @return
271     *          {@code true} iff using a {@code surface} with this {@code format} will be
272     *          supported with {@link CameraDevice#createCaptureSession}
273     *
274     * @throws IllegalArgumentException
275     *          if the image format was not a defined named constant
276     *          from either {@link ImageFormat} or {@link PixelFormat}
277     *
278     * @see ImageFormat
279     * @see PixelFormat
280     * @see CameraDevice#createCaptureSession
281     */
282    public boolean isOutputSupportedFor(int format) {
283        checkArgumentFormat(format);
284
285        int internalFormat = imageFormatToInternal(format);
286        int dataspace = imageFormatToDataspace(format);
287        if (dataspace == HAL_DATASPACE_DEPTH) {
288            return mDepthOutputFormats.indexOfKey(internalFormat) >= 0;
289        } else {
290            return getFormatsMap(/*output*/true).indexOfKey(internalFormat) >= 0;
291        }
292    }
293
294    /**
295     * Determine whether or not output streams can be configured with a particular class
296     * as a consumer.
297     *
298     * <p>The following list is generally usable for outputs:
299     * <ul>
300     * <li>{@link android.media.ImageReader} -
301     * Recommended for image processing or streaming to external resources (such as a file or
302     * network)
303     * <li>{@link android.media.MediaRecorder} -
304     * Recommended for recording video (simple to use)
305     * <li>{@link android.media.MediaCodec} -
306     * Recommended for recording video (more complicated to use, with more flexibility)
307     * <li>{@link android.renderscript.Allocation} -
308     * Recommended for image processing with {@link android.renderscript RenderScript}
309     * <li>{@link android.view.SurfaceHolder} -
310     * Recommended for low-power camera preview with {@link android.view.SurfaceView}
311     * <li>{@link android.graphics.SurfaceTexture} -
312     * Recommended for OpenGL-accelerated preview processing or compositing with
313     * {@link android.view.TextureView}
314     * </ul>
315     * </p>
316     *
317     * <p>Generally speaking this means that creating a {@link Surface} from that class <i>may</i>
318     * provide a producer endpoint that is suitable to be used with
319     * {@link CameraDevice#createCaptureSession}.</p>
320     *
321     * <p>Since not all of the above classes support output of all format and size combinations,
322     * the particular combination should be queried with {@link #isOutputSupportedFor(Surface)}.</p>
323     *
324     * @param klass a non-{@code null} {@link Class} object reference
325     * @return {@code true} if this class is supported as an output, {@code false} otherwise
326     *
327     * @throws NullPointerException if {@code klass} was {@code null}
328     *
329     * @see CameraDevice#createCaptureSession
330     * @see #isOutputSupportedFor(Surface)
331     */
332    public static <T> boolean isOutputSupportedFor(Class<T> klass) {
333        checkNotNull(klass, "klass must not be null");
334
335        if (klass == android.media.ImageReader.class) {
336            return true;
337        } else if (klass == android.media.MediaRecorder.class) {
338            return true;
339        } else if (klass == android.media.MediaCodec.class) {
340            return true;
341        } else if (klass == android.renderscript.Allocation.class) {
342            return true;
343        } else if (klass == android.view.SurfaceHolder.class) {
344            return true;
345        } else if (klass == android.graphics.SurfaceTexture.class) {
346            return true;
347        }
348
349        return false;
350    }
351
352    /**
353     * Determine whether or not the {@code surface} in its current state is suitable to be included
354     * in a {@link CameraDevice#createCaptureSession capture session} as an output.
355     *
356     * <p>Not all surfaces are usable with the {@link CameraDevice}, and not all configurations
357     * of that {@code surface} are compatible. Some classes that provide the {@code surface} are
358     * compatible with the {@link CameraDevice} in general
359     * (see {@link #isOutputSupportedFor(Class)}, but it is the caller's responsibility to put the
360     * {@code surface} into a state that will be compatible with the {@link CameraDevice}.</p>
361     *
362     * <p>Reasons for a {@code surface} being specifically incompatible might be:
363     * <ul>
364     * <li>Using a format that's not listed by {@link #getOutputFormats}
365     * <li>Using a format/size combination that's not listed by {@link #getOutputSizes}
366     * <li>The {@code surface} itself is not in a state where it can service a new producer.</p>
367     * </li>
368     * </ul>
369     *
370     * <p>Surfaces from flexible sources will return true even if the exact size of the Surface does
371     * not match a camera-supported size, as long as the format (or class) is supported and the
372     * camera device supports a size that is equal to or less than 1080p in that format. If such as
373     * Surface is used to create a capture session, it will have its size rounded to the nearest
374     * supported size, below or equal to 1080p. Flexible sources include SurfaceView, SurfaceTexture,
375     * and ImageReader.</p>
376     *
377     * <p>This is not an exhaustive list; see the particular class's documentation for further
378     * possible reasons of incompatibility.</p>
379     *
380     * @param surface a non-{@code null} {@link Surface} object reference
381     * @return {@code true} if this is supported, {@code false} otherwise
382     *
383     * @throws NullPointerException if {@code surface} was {@code null}
384     * @throws IllegalArgumentException if the Surface endpoint is no longer valid
385     *
386     * @see CameraDevice#createCaptureSession
387     * @see #isOutputSupportedFor(Class)
388     */
389    public boolean isOutputSupportedFor(Surface surface) {
390        checkNotNull(surface, "surface must not be null");
391
392        Size surfaceSize;
393        int surfaceFormat = -1;
394        try {
395            surfaceSize = LegacyCameraDevice.getSurfaceSize(surface);
396            surfaceFormat = LegacyCameraDevice.detectSurfaceType(surface);
397        } catch(BufferQueueAbandonedException e) {
398            throw new IllegalArgumentException("Abandoned surface", e);
399        }
400
401        // See if consumer is flexible.
402        boolean isFlexible = LegacyCameraDevice.isFlexibleConsumer(surface);
403
404        // Override RGB formats to IMPLEMENTATION_DEFINED, b/9487482
405        if ((surfaceFormat >= LegacyMetadataMapper.HAL_PIXEL_FORMAT_RGBA_8888 &&
406                        surfaceFormat <= LegacyMetadataMapper.HAL_PIXEL_FORMAT_BGRA_8888)) {
407            surfaceFormat = LegacyMetadataMapper.HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
408        }
409
410        for (StreamConfiguration config : mConfigurations) {
411            if (config.getFormat() == surfaceFormat && config.isOutput()) {
412                // Mathing format, either need exact size match, or a flexible consumer
413                // and a size no bigger than MAX_DIMEN_FOR_ROUNDING
414                if (config.getSize().equals(surfaceSize)) {
415                    return true;
416                } else if (isFlexible &&
417                        (config.getSize().getWidth() <= LegacyCameraDevice.MAX_DIMEN_FOR_ROUNDING)) {
418                    return true;
419                }
420            }
421        }
422        return false;
423    }
424
425    /**
426     * Get a list of sizes compatible with {@code klass} to use as an output.
427     *
428     * <p>Some of the supported classes may support additional formats beyond
429     * {@link ImageFormat#PRIVATE}; this function only returns
430     * sizes for {@link ImageFormat#PRIVATE}. For example, {@link android.media.ImageReader}
431     * supports {@link ImageFormat#YUV_420_888} and {@link ImageFormat#PRIVATE}, this method will
432     * only return the sizes for {@link ImageFormat#PRIVATE} for {@link android.media.ImageReader}
433     * class.</p>
434     *
435     * <p>If a well-defined format such as {@code NV21} is required, use
436     * {@link #getOutputSizes(int)} instead.</p>
437     *
438     * <p>The {@code klass} should be a supported output, that querying
439     * {@code #isOutputSupportedFor(Class)} should return {@code true}.</p>
440     *
441     * @param klass
442     *          a non-{@code null} {@link Class} object reference
443     * @return
444     *          an array of supported sizes for {@link ImageFormat#PRIVATE} format,
445     *          or {@code null} iff the {@code klass} is not a supported output.
446     *
447     *
448     * @throws NullPointerException if {@code klass} was {@code null}
449     *
450     * @see #isOutputSupportedFor(Class)
451     */
452    public <T> Size[] getOutputSizes(Class<T> klass) {
453        if (isOutputSupportedFor(klass) == false) {
454            return null;
455        }
456
457        return getInternalFormatSizes(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
458                HAL_DATASPACE_UNKNOWN,/*output*/true, /*highRes*/false);
459    }
460
461    /**
462     * Get a list of sizes compatible with the requested image {@code format}.
463     *
464     * <p>The {@code format} should be a supported format (one of the formats returned by
465     * {@link #getOutputFormats}).</p>
466     *
467     * As of API level 23, the {@link #getHighResolutionOutputSizes} method can be used on devices
468     * that support the
469     * {@link android.hardware.camera2.CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE BURST_CAPTURE}
470     * capability to get a list of high-resolution output sizes that cannot operate at the preferred
471     * 20fps rate. This means that for some supported formats, this method will return an empty
472     * list, if all the supported resolutions operate at below 20fps.  For devices that do not
473     * support the BURST_CAPTURE capability, all output resolutions are listed through this method.
474     *
475     * @param format an image format from {@link ImageFormat} or {@link PixelFormat}
476     * @return
477     *          an array of supported sizes,
478     *          or {@code null} if the {@code format} is not a supported output
479     *
480     * @see ImageFormat
481     * @see PixelFormat
482     * @see #getOutputFormats
483     */
484    public Size[] getOutputSizes(int format) {
485        return getPublicFormatSizes(format, /*output*/true, /*highRes*/ false);
486    }
487
488    /**
489     * Get a list of supported high speed video recording sizes.
490     *
491     * <p> When HIGH_SPEED_VIDEO is supported in
492     * {@link CameraCharacteristics#CONTROL_AVAILABLE_SCENE_MODES available scene modes}, this
493     * method will list the supported high speed video size configurations. All the sizes listed
494     * will be a subset of the sizes reported by {@link #getOutputSizes} for processed non-stalling
495     * formats (typically ImageFormat#YUV_420_888, ImageFormat#NV21, ImageFormat#YV12)</p>
496     *
497     * <p> To enable high speed video recording, application must set
498     * {@link CaptureRequest#CONTROL_SCENE_MODE} to
499     * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture
500     * requests and select the video size from this method and
501     * {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS range} from
502     * {@link #getHighSpeedVideoFpsRangesFor} to configure the recording and preview streams and
503     * setup the recording requests. For example, if the application intends to do high speed
504     * recording, it can select the maximum size reported by this method to configure output
505     * streams. Note that for the use case of multiple output streams, application must select one
506     * unique size from this method to use. Otherwise a request error might occur. Once the size is
507     * selected, application can get the supported FPS ranges by
508     * {@link #getHighSpeedVideoFpsRangesFor}, and use these FPS ranges to setup the recording
509     * requests.</p>
510     *
511     * @return
512     *          an array of supported high speed video recording sizes
513     *
514     * @see #getHighSpeedVideoFpsRangesFor(Size)
515     */
516    public Size[] getHighSpeedVideoSizes() {
517        Set<Size> keySet = mHighSpeedVideoSizeMap.keySet();
518        return keySet.toArray(new Size[keySet.size()]);
519    }
520
521    /**
522     * Get the frame per second ranges (fpsMin, fpsMax) for input high speed video size.
523     *
524     * <p> See {@link #getHighSpeedVideoSizes} for how to enable high speed recording.</p>
525     *
526     * <p> For normal video recording use case, where some application will NOT set
527     * {@link CaptureRequest#CONTROL_SCENE_MODE} to
528     * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture
529     * requests, the {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS ranges} reported in
530     * this method must not be used to setup capture requests, or it will cause request error.</p>
531     *
532     * @param size one of the sizes returned by {@link #getHighSpeedVideoSizes()}
533     * @return
534     *          An array of FPS range to use with
535     *          {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE TARGET_FPS_RANGE} when using
536     *          {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} scene
537     *          mode.
538     *          The upper bound of returned ranges is guaranteed to be larger or equal to 60.
539     *
540     * @throws IllegalArgumentException if input size does not exist in the return value of
541     *         getHighSpeedVideoSizes
542     * @see #getHighSpeedVideoSizes()
543     */
544    public Range<Integer>[] getHighSpeedVideoFpsRangesFor(Size size) {
545        Integer fpsRangeCount = mHighSpeedVideoSizeMap.get(size);
546        if (fpsRangeCount == null || fpsRangeCount == 0) {
547            throw new IllegalArgumentException(String.format(
548                    "Size %s does not support high speed video recording", size));
549        }
550
551        @SuppressWarnings("unchecked")
552        Range<Integer>[] fpsRanges = new Range[fpsRangeCount];
553        int i = 0;
554        for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) {
555            if (size.equals(config.getSize())) {
556                fpsRanges[i++] = config.getFpsRange();
557            }
558        }
559        return fpsRanges;
560    }
561
562    /**
563     * Get a list of supported high speed video recording FPS ranges.
564     *
565     * <p> When HIGH_SPEED_VIDEO is supported in
566     * {@link CameraCharacteristics#CONTROL_AVAILABLE_SCENE_MODES available scene modes}, this
567     * method will list the supported high speed video FPS range configurations. Application can
568     * then use {@link #getHighSpeedVideoSizesFor} to query available sizes for one of returned
569     * FPS range.</p>
570     *
571     * <p> To enable high speed video recording, application must set
572     * {@link CaptureRequest#CONTROL_SCENE_MODE} to
573     * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture
574     * requests and select the video size from {@link #getHighSpeedVideoSizesFor} and
575     * {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS range} from
576     * this method to configure the recording and preview streams and setup the recording requests.
577     * For example, if the application intends to do high speed recording, it can select one FPS
578     * range reported by this method, query the video sizes corresponding to this FPS range  by
579     * {@link #getHighSpeedVideoSizesFor} and select one of reported sizes to configure output
580     * streams. Note that for the use case of multiple output streams, application must select one
581     * unique size from {@link #getHighSpeedVideoSizesFor}, and use it for all output streams.
582     * Otherwise a request error might occur when attempting to enable
583     * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO}.
584     * Once the stream is configured, application can set the FPS range in the recording requests.
585     * </p>
586     *
587     * @return
588     *          an array of supported high speed video recording FPS ranges
589     *          The upper bound of returned ranges is guaranteed to be larger or equal to 60.
590     *
591     * @see #getHighSpeedVideoSizesFor
592     */
593    @SuppressWarnings("unchecked")
594    public Range<Integer>[] getHighSpeedVideoFpsRanges() {
595        Set<Range<Integer>> keySet = mHighSpeedVideoFpsRangeMap.keySet();
596        return keySet.toArray(new Range[keySet.size()]);
597    }
598
599    /**
600     * Get the supported video sizes for input FPS range.
601     *
602     * <p> See {@link #getHighSpeedVideoFpsRanges} for how to enable high speed recording.</p>
603     *
604     * <p> For normal video recording use case, where the application will NOT set
605     * {@link CaptureRequest#CONTROL_SCENE_MODE} to
606     * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture
607     * requests, the {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS ranges} reported in
608     * this method must not be used to setup capture requests, or it will cause request error.</p>
609     *
610     * @param fpsRange one of the FPS range returned by {@link #getHighSpeedVideoFpsRanges()}
611     * @return
612     *          An array of video sizes to configure output stream when using
613     *          {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} scene
614     *          mode.
615     *
616     * @throws IllegalArgumentException if input FPS range does not exist in the return value of
617     *         getHighSpeedVideoFpsRanges
618     * @see #getHighSpeedVideoFpsRanges()
619     */
620    public Size[] getHighSpeedVideoSizesFor(Range<Integer> fpsRange) {
621        Integer sizeCount = mHighSpeedVideoFpsRangeMap.get(fpsRange);
622        if (sizeCount == null || sizeCount == 0) {
623            throw new IllegalArgumentException(String.format(
624                    "FpsRange %s does not support high speed video recording", fpsRange));
625        }
626
627        Size[] sizes = new Size[sizeCount];
628        int i = 0;
629        for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) {
630            if (fpsRange.equals(config.getFpsRange())) {
631                sizes[i++] = config.getSize();
632            }
633        }
634        return sizes;
635    }
636
637    /**
638     * Get a list of supported high resolution sizes, which cannot operate at full BURST_CAPTURE
639     * rate.
640     *
641     * <p>This includes all output sizes that cannot meet the 20 fps frame rate requirements for the
642     * {@link android.hardware.camera2.CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE BURST_CAPTURE}
643     * capability.  This does not include the stall duration, so for example, a JPEG or RAW16 output
644     * resolution with a large stall duration but a minimum frame duration that's above 20 fps will
645     * still be listed in the regular {@link #getOutputSizes} list. All the sizes on this list are
646     * still guaranteed to operate at a rate of at least 10 fps, not including stall duration.</p>
647     *
648     * <p>For a device that does not support the BURST_CAPTURE capability, this list will be
649     * {@code null}, since resolutions in the {@link #getOutputSizes} list are already not
650     * guaranteed to meet &gt;= 20 fps rate requirements. For a device that does support the
651     * BURST_CAPTURE capability, this list may be empty, if all supported resolutions meet the 20
652     * fps requirement.</p>
653     *
654     * @return an array of supported slower high-resolution sizes, or {@code null} if the
655     *         BURST_CAPTURE capability is not supported
656     */
657    public Size[] getHighResolutionOutputSizes(int format) {
658        if (!mListHighResolution) return null;
659
660        return getPublicFormatSizes(format, /*output*/true, /*highRes*/ true);
661    }
662
663    /**
664     * Get the minimum {@link CaptureRequest#SENSOR_FRAME_DURATION frame duration}
665     * for the format/size combination (in nanoseconds).
666     *
667     * <p>{@code format} should be one of the ones returned by {@link #getOutputFormats()}.</p>
668     * <p>{@code size} should be one of the ones returned by
669     * {@link #getOutputSizes(int)}.</p>
670     *
671     * <p>This should correspond to the frame duration when only that stream is active, with all
672     * processing (typically in {@code android.*.mode}) set to either {@code OFF} or {@code FAST}.
673     * </p>
674     *
675     * <p>When multiple streams are used in a request, the minimum frame duration will be
676     * {@code max(individual stream min durations)}.</p>
677     *
678     * <p>For devices that do not support manual sensor control
679     * ({@link android.hardware.camera2.CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR}),
680     * this function may return 0.</p>
681     *
682     * <!--
683     * TODO: uncomment after adding input stream support
684     * <p>The minimum frame duration of a stream (of a particular format, size) is the same
685     * regardless of whether the stream is input or output.</p>
686     * -->
687     *
688     * @param format an image format from {@link ImageFormat} or {@link PixelFormat}
689     * @param size an output-compatible size
690     * @return a minimum frame duration {@code >} 0 in nanoseconds, or
691     *          0 if the minimum frame duration is not available.
692     *
693     * @throws IllegalArgumentException if {@code format} or {@code size} was not supported
694     * @throws NullPointerException if {@code size} was {@code null}
695     *
696     * @see CaptureRequest#SENSOR_FRAME_DURATION
697     * @see #getOutputStallDuration(int, Size)
698     * @see ImageFormat
699     * @see PixelFormat
700     */
701    public long getOutputMinFrameDuration(int format, Size size) {
702        checkNotNull(size, "size must not be null");
703        checkArgumentFormatSupported(format, /*output*/true);
704
705        return getInternalFormatDuration(imageFormatToInternal(format),
706                imageFormatToDataspace(format),
707                size,
708                DURATION_MIN_FRAME);
709    }
710
711    /**
712     * Get the minimum {@link CaptureRequest#SENSOR_FRAME_DURATION frame duration}
713     * for the class/size combination (in nanoseconds).
714     *
715     * <p>This assumes a the {@code klass} is set up to use {@link ImageFormat#PRIVATE}.
716     * For user-defined formats, use {@link #getOutputMinFrameDuration(int, Size)}.</p>
717     *
718     * <p>{@code klass} should be one of the ones which is supported by
719     * {@link #isOutputSupportedFor(Class)}.</p>
720     *
721     * <p>{@code size} should be one of the ones returned by
722     * {@link #getOutputSizes(int)}.</p>
723     *
724     * <p>This should correspond to the frame duration when only that stream is active, with all
725     * processing (typically in {@code android.*.mode}) set to either {@code OFF} or {@code FAST}.
726     * </p>
727     *
728     * <p>When multiple streams are used in a request, the minimum frame duration will be
729     * {@code max(individual stream min durations)}.</p>
730     *
731     * <p>For devices that do not support manual sensor control
732     * ({@link android.hardware.camera2.CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR}),
733     * this function may return 0.</p>
734     *
735     * <!--
736     * TODO: uncomment after adding input stream support
737     * <p>The minimum frame duration of a stream (of a particular format, size) is the same
738     * regardless of whether the stream is input or output.</p>
739     * -->
740     *
741     * @param klass
742     *          a class which is supported by {@link #isOutputSupportedFor(Class)} and has a
743     *          non-empty array returned by {@link #getOutputSizes(Class)}
744     * @param size an output-compatible size
745     * @return a minimum frame duration {@code >} 0 in nanoseconds, or
746     *          0 if the minimum frame duration is not available.
747     *
748     * @throws IllegalArgumentException if {@code klass} or {@code size} was not supported
749     * @throws NullPointerException if {@code size} or {@code klass} was {@code null}
750     *
751     * @see CaptureRequest#SENSOR_FRAME_DURATION
752     * @see ImageFormat
753     * @see PixelFormat
754     */
755    public <T> long getOutputMinFrameDuration(final Class<T> klass, final Size size) {
756        if (!isOutputSupportedFor(klass)) {
757            throw new IllegalArgumentException("klass was not supported");
758        }
759
760        return getInternalFormatDuration(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
761                HAL_DATASPACE_UNKNOWN,
762                size, DURATION_MIN_FRAME);
763    }
764
765    /**
766     * Get the stall duration for the format/size combination (in nanoseconds).
767     *
768     * <p>{@code format} should be one of the ones returned by {@link #getOutputFormats()}.</p>
769     * <p>{@code size} should be one of the ones returned by
770     * {@link #getOutputSizes(int)}.</p>
771     *
772     * <p>
773     * A stall duration is how much extra time would get added to the normal minimum frame duration
774     * for a repeating request that has streams with non-zero stall.
775     *
776     * <p>For example, consider JPEG captures which have the following characteristics:
777     *
778     * <ul>
779     * <li>JPEG streams act like processed YUV streams in requests for which they are not included;
780     * in requests in which they are directly referenced, they act as JPEG streams.
781     * This is because supporting a JPEG stream requires the underlying YUV data to always be ready
782     * for use by a JPEG encoder, but the encoder will only be used (and impact frame duration) on
783     * requests that actually reference a JPEG stream.
784     * <li>The JPEG processor can run concurrently to the rest of the camera pipeline, but cannot
785     * process more than 1 capture at a time.
786     * </ul>
787     *
788     * <p>In other words, using a repeating YUV request would result in a steady frame rate
789     * (let's say it's 30 FPS). If a single JPEG request is submitted periodically,
790     * the frame rate will stay at 30 FPS (as long as we wait for the previous JPEG to return each
791     * time). If we try to submit a repeating YUV + JPEG request, then the frame rate will drop from
792     * 30 FPS.</p>
793     *
794     * <p>In general, submitting a new request with a non-0 stall time stream will <em>not</em> cause a
795     * frame rate drop unless there are still outstanding buffers for that stream from previous
796     * requests.</p>
797     *
798     * <p>Submitting a repeating request with streams (call this {@code S}) is the same as setting
799     * the minimum frame duration from the normal minimum frame duration corresponding to {@code S},
800     * added with the maximum stall duration for {@code S}.</p>
801     *
802     * <p>If interleaving requests with and without a stall duration, a request will stall by the
803     * maximum of the remaining times for each can-stall stream with outstanding buffers.</p>
804     *
805     * <p>This means that a stalling request will not have an exposure start until the stall has
806     * completed.</p>
807     *
808     * <p>This should correspond to the stall duration when only that stream is active, with all
809     * processing (typically in {@code android.*.mode}) set to {@code FAST} or {@code OFF}.
810     * Setting any of the processing modes to {@code HIGH_QUALITY} effectively results in an
811     * indeterminate stall duration for all streams in a request (the regular stall calculation
812     * rules are ignored).</p>
813     *
814     * <p>The following formats may always have a stall duration:
815     * <ul>
816     * <li>{@link ImageFormat#JPEG JPEG}
817     * <li>{@link ImageFormat#RAW_SENSOR RAW16}
818     * </ul>
819     * </p>
820     *
821     * <p>The following formats will never have a stall duration:
822     * <ul>
823     * <li>{@link ImageFormat#YUV_420_888 YUV_420_888}
824     * <li>{@link #isOutputSupportedFor(Class) Implementation-Defined}
825     * </ul></p>
826     *
827     * <p>
828     * All other formats may or may not have an allowed stall duration on a per-capability basis;
829     * refer to {@link CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES
830     * android.request.availableCapabilities} for more details.</p>
831     * </p>
832     *
833     * <p>See {@link CaptureRequest#SENSOR_FRAME_DURATION android.sensor.frameDuration}
834     * for more information about calculating the max frame rate (absent stalls).</p>
835     *
836     * @param format an image format from {@link ImageFormat} or {@link PixelFormat}
837     * @param size an output-compatible size
838     * @return a stall duration {@code >=} 0 in nanoseconds
839     *
840     * @throws IllegalArgumentException if {@code format} or {@code size} was not supported
841     * @throws NullPointerException if {@code size} was {@code null}
842     *
843     * @see CaptureRequest#SENSOR_FRAME_DURATION
844     * @see ImageFormat
845     * @see PixelFormat
846     */
847    public long getOutputStallDuration(int format, Size size) {
848        checkArgumentFormatSupported(format, /*output*/true);
849
850        return getInternalFormatDuration(imageFormatToInternal(format),
851                imageFormatToDataspace(format),
852                size,
853                DURATION_STALL);
854    }
855
856    /**
857     * Get the stall duration for the class/size combination (in nanoseconds).
858     *
859     * <p>This assumes a the {@code klass} is set up to use {@link ImageFormat#PRIVATE}.
860     * For user-defined formats, use {@link #getOutputMinFrameDuration(int, Size)}.</p>
861     *
862     * <p>{@code klass} should be one of the ones with a non-empty array returned by
863     * {@link #getOutputSizes(Class)}.</p>
864     *
865     * <p>{@code size} should be one of the ones returned by
866     * {@link #getOutputSizes(Class)}.</p>
867     *
868     * <p>See {@link #getOutputStallDuration(int, Size)} for a definition of a
869     * <em>stall duration</em>.</p>
870     *
871     * @param klass
872     *          a class which is supported by {@link #isOutputSupportedFor(Class)} and has a
873     *          non-empty array returned by {@link #getOutputSizes(Class)}
874     * @param size an output-compatible size
875     * @return a minimum frame duration {@code >=} 0 in nanoseconds
876     *
877     * @throws IllegalArgumentException if {@code klass} or {@code size} was not supported
878     * @throws NullPointerException if {@code size} or {@code klass} was {@code null}
879     *
880     * @see CaptureRequest#SENSOR_FRAME_DURATION
881     * @see ImageFormat
882     * @see PixelFormat
883     */
884    public <T> long getOutputStallDuration(final Class<T> klass, final Size size) {
885        if (!isOutputSupportedFor(klass)) {
886            throw new IllegalArgumentException("klass was not supported");
887        }
888
889        return getInternalFormatDuration(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
890                HAL_DATASPACE_UNKNOWN, size, DURATION_STALL);
891    }
892
893    /**
894     * Check if this {@link StreamConfigurationMap} is equal to another
895     * {@link StreamConfigurationMap}.
896     *
897     * <p>Two vectors are only equal if and only if each of the respective elements is equal.</p>
898     *
899     * @return {@code true} if the objects were equal, {@code false} otherwise
900     */
901    @Override
902    public boolean equals(final Object obj) {
903        if (obj == null) {
904            return false;
905        }
906        if (this == obj) {
907            return true;
908        }
909        if (obj instanceof StreamConfigurationMap) {
910            final StreamConfigurationMap other = (StreamConfigurationMap) obj;
911            // XX: do we care about order?
912            return Arrays.equals(mConfigurations, other.mConfigurations) &&
913                    Arrays.equals(mMinFrameDurations, other.mMinFrameDurations) &&
914                    Arrays.equals(mStallDurations, other.mStallDurations) &&
915                    Arrays.equals(mDepthConfigurations, other.mDepthConfigurations) &&
916                    Arrays.equals(mHighSpeedVideoConfigurations,
917                            other.mHighSpeedVideoConfigurations);
918        }
919        return false;
920    }
921
922    /**
923     * {@inheritDoc}
924     */
925    @Override
926    public int hashCode() {
927        // XX: do we care about order?
928        return HashCodeHelpers.hashCodeGeneric(
929                mConfigurations, mMinFrameDurations,
930                mStallDurations,
931                mDepthConfigurations, mHighSpeedVideoConfigurations);
932    }
933
934    // Check that the argument is supported by #getOutputFormats or #getInputFormats
935    private int checkArgumentFormatSupported(int format, boolean output) {
936        checkArgumentFormat(format);
937
938        int internalFormat = imageFormatToInternal(format);
939        int internalDataspace = imageFormatToDataspace(format);
940
941        if (output) {
942            if (internalDataspace == HAL_DATASPACE_DEPTH) {
943                if (mDepthOutputFormats.indexOfKey(internalFormat) >= 0) {
944                    return format;
945                }
946            } else {
947                if (mAllOutputFormats.indexOfKey(internalFormat) >= 0) {
948                    return format;
949                }
950            }
951        } else {
952            if (mInputFormats.indexOfKey(internalFormat) >= 0) {
953                return format;
954            }
955        }
956
957        throw new IllegalArgumentException(String.format(
958                "format %x is not supported by this stream configuration map", format));
959    }
960
961    /**
962     * Ensures that the format is either user-defined or implementation defined.
963     *
964     * <p>If a format has a different internal representation than the public representation,
965     * passing in the public representation here will fail.</p>
966     *
967     * <p>For example if trying to use {@link ImageFormat#JPEG}:
968     * it has a different public representation than the internal representation
969     * {@code HAL_PIXEL_FORMAT_BLOB}, this check will fail.</p>
970     *
971     * <p>Any invalid/undefined formats will raise an exception.</p>
972     *
973     * @param format image format
974     * @return the format
975     *
976     * @throws IllegalArgumentException if the format was invalid
977     */
978    static int checkArgumentFormatInternal(int format) {
979        switch (format) {
980            case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
981            case HAL_PIXEL_FORMAT_BLOB:
982            case HAL_PIXEL_FORMAT_RAW_OPAQUE:
983            case HAL_PIXEL_FORMAT_Y16:
984                return format;
985            case ImageFormat.JPEG:
986                throw new IllegalArgumentException(
987                        "ImageFormat.JPEG is an unknown internal format");
988            default:
989                return checkArgumentFormat(format);
990        }
991    }
992
993    /**
994     * Ensures that the format is publicly user-defined in either ImageFormat or PixelFormat.
995     *
996     * <p>If a format has a different public representation than the internal representation,
997     * passing in the internal representation here will fail.</p>
998     *
999     * <p>For example if trying to use {@code HAL_PIXEL_FORMAT_BLOB}:
1000     * it has a different internal representation than the public representation
1001     * {@link ImageFormat#JPEG}, this check will fail.</p>
1002     *
1003     * <p>Any invalid/undefined formats will raise an exception, including implementation-defined.
1004     * </p>
1005     *
1006     * <p>Note that {@code @hide} and deprecated formats will not pass this check.</p>
1007     *
1008     * @param format image format
1009     * @return the format
1010     *
1011     * @throws IllegalArgumentException if the format was not user-defined
1012     */
1013    static int checkArgumentFormat(int format) {
1014        if (!ImageFormat.isPublicFormat(format) && !PixelFormat.isPublicFormat(format)) {
1015            throw new IllegalArgumentException(String.format(
1016                    "format 0x%x was not defined in either ImageFormat or PixelFormat", format));
1017        }
1018
1019        return format;
1020    }
1021
1022    /**
1023     * Convert an internal format compatible with {@code graphics.h} into public-visible
1024     * {@code ImageFormat}. This assumes the dataspace of the format is not HAL_DATASPACE_DEPTH.
1025     *
1026     * <p>In particular these formats are converted:
1027     * <ul>
1028     * <li>HAL_PIXEL_FORMAT_BLOB => ImageFormat.JPEG</li>
1029     * </ul>
1030     * </p>
1031     *
1032     * <p>Passing in a format which has no public equivalent will fail;
1033     * as will passing in a public format which has a different internal format equivalent.
1034     * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
1035     *
1036     * <p>All other formats are returned as-is, no further invalid check is performed.</p>
1037     *
1038     * <p>This function is the dual of {@link #imageFormatToInternal} for dataspaces other than
1039     * HAL_DATASPACE_DEPTH.</p>
1040     *
1041     * @param format image format from {@link ImageFormat} or {@link PixelFormat}
1042     * @return the converted image formats
1043     *
1044     * @throws IllegalArgumentException
1045     *          if {@code format} is {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} or
1046     *          {@link ImageFormat#JPEG}
1047     *
1048     * @see ImageFormat
1049     * @see PixelFormat
1050     * @see #checkArgumentFormat
1051     */
1052    static int imageFormatToPublic(int format) {
1053        switch (format) {
1054            case HAL_PIXEL_FORMAT_BLOB:
1055                return ImageFormat.JPEG;
1056            case ImageFormat.JPEG:
1057                throw new IllegalArgumentException(
1058                        "ImageFormat.JPEG is an unknown internal format");
1059            default:
1060                return format;
1061        }
1062    }
1063
1064    /**
1065     * Convert an internal format compatible with {@code graphics.h} into public-visible
1066     * {@code ImageFormat}. This assumes the dataspace of the format is HAL_DATASPACE_DEPTH.
1067     *
1068     * <p>In particular these formats are converted:
1069     * <ul>
1070     * <li>HAL_PIXEL_FORMAT_BLOB => ImageFormat.DEPTH_POINT_CLOUD
1071     * <li>HAL_PIXEL_FORMAT_Y16 => ImageFormat.DEPTH16
1072     * </ul>
1073     * </p>
1074     *
1075     * <p>Passing in an implementation-defined format which has no public equivalent will fail;
1076     * as will passing in a public format which has a different internal format equivalent.
1077     * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
1078     *
1079     * <p>All other formats are returned as-is, no further invalid check is performed.</p>
1080     *
1081     * <p>This function is the dual of {@link #imageFormatToInternal} for formats associated with
1082     * HAL_DATASPACE_DEPTH.</p>
1083     *
1084     * @param format image format from {@link ImageFormat} or {@link PixelFormat}
1085     * @return the converted image formats
1086     *
1087     * @throws IllegalArgumentException
1088     *          if {@code format} is {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} or
1089     *          {@link ImageFormat#JPEG}
1090     *
1091     * @see ImageFormat
1092     * @see PixelFormat
1093     * @see #checkArgumentFormat
1094     */
1095    static int depthFormatToPublic(int format) {
1096        switch (format) {
1097            case HAL_PIXEL_FORMAT_BLOB:
1098                return ImageFormat.DEPTH_POINT_CLOUD;
1099            case HAL_PIXEL_FORMAT_Y16:
1100                return ImageFormat.DEPTH16;
1101            case ImageFormat.JPEG:
1102                throw new IllegalArgumentException(
1103                        "ImageFormat.JPEG is an unknown internal format");
1104            case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
1105                throw new IllegalArgumentException(
1106                        "IMPLEMENTATION_DEFINED must not leak to public API");
1107            default:
1108                throw new IllegalArgumentException(
1109                        "Unknown DATASPACE_DEPTH format " + format);
1110        }
1111    }
1112
1113    /**
1114     * Convert image formats from internal to public formats (in-place).
1115     *
1116     * @param formats an array of image formats
1117     * @return {@code formats}
1118     *
1119     * @see #imageFormatToPublic
1120     */
1121    static int[] imageFormatToPublic(int[] formats) {
1122        if (formats == null) {
1123            return null;
1124        }
1125
1126        for (int i = 0; i < formats.length; ++i) {
1127            formats[i] = imageFormatToPublic(formats[i]);
1128        }
1129
1130        return formats;
1131    }
1132
1133    /**
1134     * Convert a public format compatible with {@code ImageFormat} to an internal format
1135     * from {@code graphics.h}.
1136     *
1137     * <p>In particular these formats are converted:
1138     * <ul>
1139     * <li>ImageFormat.JPEG => HAL_PIXEL_FORMAT_BLOB
1140     * <li>ImageFormat.DEPTH_POINT_CLOUD => HAL_PIXEL_FORMAT_BLOB
1141     * <li>ImageFormat.DEPTH16 => HAL_PIXEL_FORMAT_Y16
1142     * </ul>
1143     * </p>
1144     *
1145     * <p>Passing in an internal format which has a different public format equivalent will fail.
1146     * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
1147     *
1148     * <p>All other formats are returned as-is, no invalid check is performed.</p>
1149     *
1150     * <p>This function is the dual of {@link #imageFormatToPublic}.</p>
1151     *
1152     * @param format public image format from {@link ImageFormat} or {@link PixelFormat}
1153     * @return the converted image formats
1154     *
1155     * @see ImageFormat
1156     * @see PixelFormat
1157     *
1158     * @throws IllegalArgumentException
1159     *              if {@code format} was {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED}
1160     */
1161    static int imageFormatToInternal(int format) {
1162        switch (format) {
1163            case ImageFormat.JPEG:
1164            case ImageFormat.DEPTH_POINT_CLOUD:
1165                return HAL_PIXEL_FORMAT_BLOB;
1166            case ImageFormat.DEPTH16:
1167                return HAL_PIXEL_FORMAT_Y16;
1168            default:
1169                return format;
1170        }
1171    }
1172
1173    /**
1174     * Convert a public format compatible with {@code ImageFormat} to an internal dataspace
1175     * from {@code graphics.h}.
1176     *
1177     * <p>In particular these formats are converted:
1178     * <ul>
1179     * <li>ImageFormat.JPEG => HAL_DATASPACE_JFIF
1180     * <li>ImageFormat.DEPTH_POINT_CLOUD => HAL_DATASPACE_DEPTH
1181     * <li>ImageFormat.DEPTH16 => HAL_DATASPACE_DEPTH
1182     * <li>others => HAL_DATASPACE_UNKNOWN
1183     * </ul>
1184     * </p>
1185     *
1186     * <p>Passing in an implementation-defined format here will fail (it's not a public format);
1187     * as will passing in an internal format which has a different public format equivalent.
1188     * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
1189     *
1190     * <p>All other formats are returned as-is, no invalid check is performed.</p>
1191     *
1192     * <p>This function is the dual of {@link #imageFormatToPublic}.</p>
1193     *
1194     * @param format public image format from {@link ImageFormat} or {@link PixelFormat}
1195     * @return the converted image formats
1196     *
1197     * @see ImageFormat
1198     * @see PixelFormat
1199     *
1200     * @throws IllegalArgumentException
1201     *              if {@code format} was {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED}
1202     */
1203    static int imageFormatToDataspace(int format) {
1204        switch (format) {
1205            case ImageFormat.JPEG:
1206                return HAL_DATASPACE_JFIF;
1207            case ImageFormat.DEPTH_POINT_CLOUD:
1208            case ImageFormat.DEPTH16:
1209                return HAL_DATASPACE_DEPTH;
1210            default:
1211                return HAL_DATASPACE_UNKNOWN;
1212        }
1213    }
1214
1215    /**
1216     * Convert image formats from public to internal formats (in-place).
1217     *
1218     * @param formats an array of image formats
1219     * @return {@code formats}
1220     *
1221     * @see #imageFormatToInternal
1222     *
1223     * @hide
1224     */
1225    public static int[] imageFormatToInternal(int[] formats) {
1226        if (formats == null) {
1227            return null;
1228        }
1229
1230        for (int i = 0; i < formats.length; ++i) {
1231            formats[i] = imageFormatToInternal(formats[i]);
1232        }
1233
1234        return formats;
1235    }
1236
1237    private Size[] getPublicFormatSizes(int format, boolean output, boolean highRes) {
1238        try {
1239            checkArgumentFormatSupported(format, output);
1240        } catch (IllegalArgumentException e) {
1241            return null;
1242        }
1243
1244        int internalFormat = imageFormatToInternal(format);
1245        int dataspace = imageFormatToDataspace(format);
1246
1247        return getInternalFormatSizes(internalFormat, dataspace, output, highRes);
1248    }
1249
1250    private Size[] getInternalFormatSizes(int format, int dataspace,
1251            boolean output, boolean highRes) {
1252        SparseIntArray formatsMap =
1253                !output ? mInputFormats :
1254                dataspace == HAL_DATASPACE_DEPTH ? mDepthOutputFormats :
1255                highRes ? mHighResOutputFormats :
1256                mOutputFormats;
1257
1258        int sizesCount = formatsMap.get(format);
1259        if ( ((!output || dataspace == HAL_DATASPACE_DEPTH) && sizesCount == 0) ||
1260                (output && dataspace != HAL_DATASPACE_DEPTH && mAllOutputFormats.get(format) == 0)) {
1261            // Only throw if this is really not supported at all
1262            throw new IllegalArgumentException("format not available");
1263        }
1264
1265        Size[] sizes = new Size[sizesCount];
1266        int sizeIndex = 0;
1267
1268        StreamConfiguration[] configurations =
1269                (dataspace == HAL_DATASPACE_DEPTH) ? mDepthConfigurations : mConfigurations;
1270
1271        for (StreamConfiguration config : configurations) {
1272            int fmt = config.getFormat();
1273            if (fmt == format && config.isOutput() == output) {
1274                if (output) {
1275                    // Filter slow high-res output formats; include for
1276                    // highRes, remove for !highRes
1277                    long duration = 0;
1278                    for (int i = 0; i < mMinFrameDurations.length; i++) {
1279                        StreamConfigurationDuration d = mMinFrameDurations[i];
1280                        if (d.getFormat() == fmt &&
1281                                d.getWidth() == config.getSize().getWidth() &&
1282                                d.getHeight() == config.getSize().getHeight()) {
1283                            duration = d.getDuration();
1284                            break;
1285                        }
1286                    }
1287                    if (highRes != (duration > DURATION_20FPS_NS)) {
1288                        continue;
1289                    }
1290                }
1291                sizes[sizeIndex++] = config.getSize();
1292            }
1293        }
1294
1295        if (sizeIndex != sizesCount) {
1296            throw new AssertionError(
1297                    "Too few sizes (expected " + sizesCount + ", actual " + sizeIndex + ")");
1298        }
1299
1300        return sizes;
1301    }
1302
1303    /** Get the list of publically visible output formats; does not include IMPL_DEFINED */
1304    private int[] getPublicFormats(boolean output) {
1305        int[] formats = new int[getPublicFormatCount(output)];
1306
1307        int i = 0;
1308
1309        SparseIntArray map = getFormatsMap(output);
1310        for (int j = 0; j < map.size(); j++) {
1311            int format = map.keyAt(j);
1312            if (format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
1313                formats[i++] = imageFormatToPublic(format);
1314            }
1315        }
1316        if (output) {
1317            for (int j = 0; j < mDepthOutputFormats.size(); j++) {
1318                formats[i++] = depthFormatToPublic(mDepthOutputFormats.keyAt(j));
1319            }
1320        }
1321        if (formats.length != i) {
1322            throw new AssertionError("Too few formats " + i + ", expected " + formats.length);
1323        }
1324
1325        return formats;
1326    }
1327
1328    /** Get the format -> size count map for either output or input formats */
1329    private SparseIntArray getFormatsMap(boolean output) {
1330        return output ? mAllOutputFormats : mInputFormats;
1331    }
1332
1333    private long getInternalFormatDuration(int format, int dataspace, Size size, int duration) {
1334        // assume format is already checked, since its internal
1335
1336        if (!isSupportedInternalConfiguration(format, dataspace, size)) {
1337            throw new IllegalArgumentException("size was not supported");
1338        }
1339
1340        StreamConfigurationDuration[] durations = getDurations(duration, dataspace);
1341
1342        for (StreamConfigurationDuration configurationDuration : durations) {
1343            if (configurationDuration.getFormat() == format &&
1344                    configurationDuration.getWidth() == size.getWidth() &&
1345                    configurationDuration.getHeight() == size.getHeight()) {
1346                return configurationDuration.getDuration();
1347            }
1348        }
1349        // Default duration is '0' (unsupported/no extra stall)
1350        return 0;
1351    }
1352
1353    /**
1354     * Get the durations array for the kind of duration
1355     *
1356     * @see #DURATION_MIN_FRAME
1357     * @see #DURATION_STALL
1358     * */
1359    private StreamConfigurationDuration[] getDurations(int duration, int dataspace) {
1360        switch (duration) {
1361            case DURATION_MIN_FRAME:
1362                return (dataspace == HAL_DATASPACE_DEPTH) ?
1363                        mDepthMinFrameDurations : mMinFrameDurations;
1364            case DURATION_STALL:
1365                return (dataspace == HAL_DATASPACE_DEPTH) ?
1366                        mDepthStallDurations : mStallDurations;
1367            default:
1368                throw new IllegalArgumentException("duration was invalid");
1369        }
1370    }
1371
1372    /** Count the number of publicly-visible output formats */
1373    private int getPublicFormatCount(boolean output) {
1374        SparseIntArray formatsMap = getFormatsMap(output);
1375        int size = formatsMap.size();
1376        if (formatsMap.indexOfKey(HAL_PIXEL_FORMAT_RAW_OPAQUE) >= 0) {
1377            size -= 1;
1378        }
1379        if (output) {
1380            size += mDepthOutputFormats.size();
1381        }
1382
1383        return size;
1384    }
1385
1386    private static <T> boolean arrayContains(T[] array, T element) {
1387        if (array == null) {
1388            return false;
1389        }
1390
1391        for (T el : array) {
1392            if (Objects.equals(el, element)) {
1393                return true;
1394            }
1395        }
1396
1397        return false;
1398    }
1399
1400    private boolean isSupportedInternalConfiguration(int format, int dataspace,
1401            Size size) {
1402        StreamConfiguration[] configurations =
1403                (dataspace == HAL_DATASPACE_DEPTH) ? mDepthConfigurations : mConfigurations;
1404
1405        for (int i = 0; i < configurations.length; i++) {
1406            if (configurations[i].getFormat() == format &&
1407                    configurations[i].getSize().equals(size)) {
1408                return true;
1409            }
1410        }
1411
1412        return false;
1413    }
1414
1415    /**
1416     * Return this {@link StreamConfigurationMap} as a string representation.
1417     *
1418     * <p>{@code "StreamConfigurationMap(Outputs([w:%d, h:%d, format:%s(%d), min_duration:%d,
1419     * stall:%d], ... [w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d]), Inputs([w:%d, h:%d,
1420     * format:%s(%d)], ... [w:%d, h:%d, format:%s(%d)]), ValidOutputFormatsForInput(
1421     * [in:%d, out:%d, ... %d], ... [in:%d, out:%d, ... %d]), HighSpeedVideoConfigurations(
1422     * [w:%d, h:%d, min_fps:%d, max_fps:%d], ... [w:%d, h:%d, min_fps:%d, max_fps:%d]))"}.</p>
1423     *
1424     * <p>{@code Outputs([w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d], ...
1425     * [w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d])}, where
1426     * {@code [w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d]} represents an output
1427     * configuration's width, height, format, minimal frame duration in nanoseconds, and stall
1428     * duration in nanoseconds.</p>
1429     *
1430     * <p>{@code Inputs([w:%d, h:%d, format:%s(%d)], ... [w:%d, h:%d, format:%s(%d)])}, where
1431     * {@code [w:%d, h:%d, format:%s(%d)]} represents an input configuration's width, height, and
1432     * format.</p>
1433     *
1434     * <p>{@code ValidOutputFormatsForInput([in:%s(%d), out:%s(%d), ... %s(%d)],
1435     * ... [in:%s(%d), out:%s(%d), ... %s(%d)])}, where {@code [in:%s(%d), out:%s(%d), ... %s(%d)]}
1436     * represents an input fomat and its valid output formats.</p>
1437     *
1438     * <p>{@code HighSpeedVideoConfigurations([w:%d, h:%d, min_fps:%d, max_fps:%d],
1439     * ... [w:%d, h:%d, min_fps:%d, max_fps:%d])}, where
1440     * {@code [w:%d, h:%d, min_fps:%d, max_fps:%d]} represents a high speed video output
1441     * configuration's width, height, minimal frame rate, and maximal frame rate.</p>
1442     *
1443     * @return string representation of {@link StreamConfigurationMap}
1444     */
1445    @Override
1446    public String toString() {
1447        StringBuilder sb = new StringBuilder("StreamConfiguration(");
1448        appendOutputsString(sb);
1449        sb.append(", ");
1450        appendHighResOutputsString(sb);
1451        sb.append(", ");
1452        appendInputsString(sb);
1453        sb.append(", ");
1454        appendValidOutputFormatsForInputString(sb);
1455        sb.append(", ");
1456        appendHighSpeedVideoConfigurationsString(sb);
1457        sb.append(")");
1458
1459        return sb.toString();
1460    }
1461
1462    private void appendOutputsString(StringBuilder sb) {
1463        sb.append("Outputs(");
1464        int[] formats = getOutputFormats();
1465        for (int format : formats) {
1466            Size[] sizes = getOutputSizes(format);
1467            for (Size size : sizes) {
1468                long minFrameDuration = getOutputMinFrameDuration(format, size);
1469                long stallDuration = getOutputStallDuration(format, size);
1470                sb.append(String.format("[w:%d, h:%d, format:%s(%d), min_duration:%d, " +
1471                        "stall:%d], ", size.getWidth(), size.getHeight(), formatToString(format),
1472                        format, minFrameDuration, stallDuration));
1473            }
1474        }
1475        // Remove the pending ", "
1476        if (sb.charAt(sb.length() - 1) == ' ') {
1477            sb.delete(sb.length() - 2, sb.length());
1478        }
1479        sb.append(")");
1480    }
1481
1482    private void appendHighResOutputsString(StringBuilder sb) {
1483        sb.append("HighResolutionOutputs(");
1484        int[] formats = getOutputFormats();
1485        for (int format : formats) {
1486            Size[] sizes = getHighResolutionOutputSizes(format);
1487            if (sizes == null) continue;
1488            for (Size size : sizes) {
1489                long minFrameDuration = getOutputMinFrameDuration(format, size);
1490                long stallDuration = getOutputStallDuration(format, size);
1491                sb.append(String.format("[w:%d, h:%d, format:%s(%d), min_duration:%d, " +
1492                        "stall:%d], ", size.getWidth(), size.getHeight(), formatToString(format),
1493                        format, minFrameDuration, stallDuration));
1494            }
1495        }
1496        // Remove the pending ", "
1497        if (sb.charAt(sb.length() - 1) == ' ') {
1498            sb.delete(sb.length() - 2, sb.length());
1499        }
1500        sb.append(")");
1501    }
1502
1503    private void appendInputsString(StringBuilder sb) {
1504        sb.append("Inputs(");
1505        int[] formats = getInputFormats();
1506        for (int format : formats) {
1507            Size[] sizes = getInputSizes(format);
1508            for (Size size : sizes) {
1509                sb.append(String.format("[w:%d, h:%d, format:%s(%d)], ", size.getWidth(),
1510                        size.getHeight(), formatToString(format), format));
1511            }
1512        }
1513        // Remove the pending ", "
1514        if (sb.charAt(sb.length() - 1) == ' ') {
1515            sb.delete(sb.length() - 2, sb.length());
1516        }
1517        sb.append(")");
1518    }
1519
1520    private void appendValidOutputFormatsForInputString(StringBuilder sb) {
1521        sb.append("ValidOutputFormatsForInput(");
1522        int[] inputFormats = getInputFormats();
1523        for (int inputFormat : inputFormats) {
1524            sb.append(String.format("[in:%s(%d), out:", formatToString(inputFormat), inputFormat));
1525            int[] outputFormats = getValidOutputFormatsForInput(inputFormat);
1526            for (int i = 0; i < outputFormats.length; i++) {
1527                sb.append(String.format("%s(%d)", formatToString(outputFormats[i]),
1528                        outputFormats[i]));
1529                if (i < outputFormats.length - 1) {
1530                    sb.append(", ");
1531                }
1532            }
1533            sb.append("], ");
1534        }
1535        // Remove the pending ", "
1536        if (sb.charAt(sb.length() - 1) == ' ') {
1537            sb.delete(sb.length() - 2, sb.length());
1538        }
1539        sb.append(")");
1540    }
1541
1542    private void appendHighSpeedVideoConfigurationsString(StringBuilder sb) {
1543        sb.append("HighSpeedVideoConfigurations(");
1544        Size[] sizes = getHighSpeedVideoSizes();
1545        for (Size size : sizes) {
1546            Range<Integer>[] ranges = getHighSpeedVideoFpsRangesFor(size);
1547            for (Range<Integer> range : ranges) {
1548                sb.append(String.format("[w:%d, h:%d, min_fps:%d, max_fps:%d], ", size.getWidth(),
1549                        size.getHeight(), range.getLower(), range.getUpper()));
1550            }
1551        }
1552        // Remove the pending ", "
1553        if (sb.charAt(sb.length() - 1) == ' ') {
1554            sb.delete(sb.length() - 2, sb.length());
1555        }
1556        sb.append(")");
1557    }
1558
1559    private String formatToString(int format) {
1560        switch (format) {
1561            case ImageFormat.YV12:
1562                return "YV12";
1563            case ImageFormat.YUV_420_888:
1564                return "YUV_420_888";
1565            case ImageFormat.NV21:
1566                return "NV21";
1567            case ImageFormat.NV16:
1568                return "NV16";
1569            case PixelFormat.RGB_565:
1570                return "RGB_565";
1571            case PixelFormat.RGBA_8888:
1572                return "RGBA_8888";
1573            case PixelFormat.RGBX_8888:
1574                return "RGBX_8888";
1575            case PixelFormat.RGB_888:
1576                return "RGB_888";
1577            case ImageFormat.JPEG:
1578                return "JPEG";
1579            case ImageFormat.YUY2:
1580                return "YUY2";
1581            case ImageFormat.Y8:
1582                return "Y8";
1583            case ImageFormat.Y16:
1584                return "Y16";
1585            case ImageFormat.RAW_SENSOR:
1586                return "RAW_SENSOR";
1587            case ImageFormat.RAW10:
1588                return "RAW10";
1589            case ImageFormat.DEPTH16:
1590                return "DEPTH16";
1591            case ImageFormat.DEPTH_POINT_CLOUD:
1592                return "DEPTH_POINT_CLOUD";
1593            case ImageFormat.PRIVATE:
1594                return "PRIVATE";
1595            default:
1596                return "UNKNOWN";
1597        }
1598    }
1599
1600    // from system/core/include/system/graphics.h
1601    private static final int HAL_PIXEL_FORMAT_RAW16 = 0x20;
1602    private static final int HAL_PIXEL_FORMAT_BLOB = 0x21;
1603    private static final int HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED = 0x22;
1604    private static final int HAL_PIXEL_FORMAT_YCbCr_420_888 = 0x23;
1605    private static final int HAL_PIXEL_FORMAT_RAW_OPAQUE = 0x24;
1606    private static final int HAL_PIXEL_FORMAT_RAW10 = 0x25;
1607    private static final int HAL_PIXEL_FORMAT_RAW12 = 0x26;
1608    private static final int HAL_PIXEL_FORMAT_Y16 = 0x20363159;
1609
1610
1611    private static final int HAL_DATASPACE_UNKNOWN = 0x0;
1612    private static final int HAL_DATASPACE_JFIF = 0x101;
1613    private static final int HAL_DATASPACE_DEPTH = 0x1000;
1614
1615    private static final long DURATION_20FPS_NS = 50000000L;
1616    /**
1617     * @see #getDurations(int, int)
1618     */
1619    private static final int DURATION_MIN_FRAME = 0;
1620    private static final int DURATION_STALL = 1;
1621
1622    private final StreamConfiguration[] mConfigurations;
1623    private final StreamConfigurationDuration[] mMinFrameDurations;
1624    private final StreamConfigurationDuration[] mStallDurations;
1625
1626    private final StreamConfiguration[] mDepthConfigurations;
1627    private final StreamConfigurationDuration[] mDepthMinFrameDurations;
1628    private final StreamConfigurationDuration[] mDepthStallDurations;
1629
1630    private final HighSpeedVideoConfiguration[] mHighSpeedVideoConfigurations;
1631    private final ReprocessFormatsMap mInputOutputFormatsMap;
1632
1633    private final boolean mListHighResolution;
1634
1635    /** internal format -> num output sizes mapping, not including slow high-res sizes, for
1636     * non-depth dataspaces */
1637    private final SparseIntArray mOutputFormats = new SparseIntArray();
1638    /** internal format -> num output sizes mapping for slow high-res sizes, for non-depth
1639     * dataspaces */
1640    private final SparseIntArray mHighResOutputFormats = new SparseIntArray();
1641    /** internal format -> num output sizes mapping for all non-depth dataspaces */
1642    private final SparseIntArray mAllOutputFormats = new SparseIntArray();
1643    /** internal format -> num input sizes mapping, for input reprocessing formats */
1644    private final SparseIntArray mInputFormats = new SparseIntArray();
1645    /** internal format -> num depth output sizes mapping, for HAL_DATASPACE_DEPTH */
1646    private final SparseIntArray mDepthOutputFormats = new SparseIntArray();
1647    /** High speed video Size -> FPS range count mapping*/
1648    private final HashMap</*HighSpeedVideoSize*/Size, /*Count*/Integer> mHighSpeedVideoSizeMap =
1649            new HashMap<Size, Integer>();
1650    /** High speed video FPS range -> Size count mapping*/
1651    private final HashMap</*HighSpeedVideoFpsRange*/Range<Integer>, /*Count*/Integer>
1652            mHighSpeedVideoFpsRangeMap = new HashMap<Range<Integer>, Integer>();
1653
1654}
1655