StreamConfigurationMap.java revision 72064af7e75f7e3b2eb2e58a3af408861eb8c4e9
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.hardware.camera2.params;
18
19import android.graphics.ImageFormat;
20import android.graphics.PixelFormat;
21import android.hardware.camera2.CameraCharacteristics;
22import android.hardware.camera2.CameraDevice;
23import android.hardware.camera2.CameraMetadata;
24import android.hardware.camera2.CaptureRequest;
25import android.hardware.camera2.utils.HashCodeHelpers;
26import android.hardware.camera2.utils.SurfaceUtils;
27import android.hardware.camera2.legacy.LegacyCameraDevice;
28import android.hardware.camera2.legacy.LegacyMetadataMapper;
29import android.view.Surface;
30import android.util.Range;
31import android.util.Size;
32import android.util.SparseIntArray;
33
34import java.util.Arrays;
35import java.util.HashMap;
36import java.util.Objects;
37import java.util.Set;
38
39import static com.android.internal.util.Preconditions.*;
40
41/**
42 * Immutable class to store the available stream
43 * {@link CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP configurations} to set up
44 * {@link android.view.Surface Surfaces} for creating a
45 * {@link android.hardware.camera2.CameraCaptureSession capture session} with
46 * {@link android.hardware.camera2.CameraDevice#createCaptureSession}.
47 * <!-- TODO: link to input stream configuration -->
48 *
49 * <p>This is the authoritative list for all <!-- input/ -->output formats (and sizes respectively
50 * for that format) that are supported by a camera device.</p>
51 *
52 * <p>This also contains the minimum frame durations and stall durations for each format/size
53 * combination that can be used to calculate effective frame rate when submitting multiple captures.
54 * </p>
55 *
56 * <p>An instance of this object is available from {@link CameraCharacteristics} using
57 * the {@link CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP} key and the
58 * {@link CameraCharacteristics#get} method.</p>
59 *
60 * <pre><code>{@code
61 * CameraCharacteristics characteristics = cameraManager.getCameraCharacteristics(cameraId);
62 * StreamConfigurationMap configs = characteristics.get(
63 *         CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);
64 * }</code></pre>
65 *
66 * @see CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP
67 * @see CameraDevice#createCaptureSession
68 */
69public final class StreamConfigurationMap {
70
71    private static final String TAG = "StreamConfigurationMap";
72
73    /**
74     * Create a new {@link StreamConfigurationMap}.
75     *
76     * <p>The array parameters ownership is passed to this object after creation; do not
77     * write to them after this constructor is invoked.</p>
78     *
79     * @param configurations a non-{@code null} array of {@link StreamConfiguration}
80     * @param minFrameDurations a non-{@code null} array of {@link StreamConfigurationDuration}
81     * @param stallDurations a non-{@code null} array of {@link StreamConfigurationDuration}
82     * @param highSpeedVideoConfigurations an array of {@link HighSpeedVideoConfiguration}, null if
83     *        camera device does not support high speed video recording
84     * @param listHighResolution a flag indicating whether the device supports BURST_CAPTURE
85     *        and thus needs a separate list of slow high-resolution output sizes
86     * @throws NullPointerException if any of the arguments except highSpeedVideoConfigurations
87     *         were {@code null} or any subelements were {@code null}
88     *
89     * @hide
90     */
91    public StreamConfigurationMap(
92            StreamConfiguration[] configurations,
93            StreamConfigurationDuration[] minFrameDurations,
94            StreamConfigurationDuration[] stallDurations,
95            StreamConfiguration[] depthConfigurations,
96            StreamConfigurationDuration[] depthMinFrameDurations,
97            StreamConfigurationDuration[] depthStallDurations,
98            HighSpeedVideoConfiguration[] highSpeedVideoConfigurations,
99            ReprocessFormatsMap inputOutputFormatsMap,
100            boolean listHighResolution) {
101
102        if (configurations == null) {
103            // If no color configurations exist, ensure depth ones do
104            checkArrayElementsNotNull(depthConfigurations, "depthConfigurations");
105            mConfigurations = new StreamConfiguration[0];
106            mMinFrameDurations = new StreamConfigurationDuration[0];
107            mStallDurations = new StreamConfigurationDuration[0];
108        } else {
109            mConfigurations = checkArrayElementsNotNull(configurations, "configurations");
110            mMinFrameDurations = checkArrayElementsNotNull(minFrameDurations, "minFrameDurations");
111            mStallDurations = checkArrayElementsNotNull(stallDurations, "stallDurations");
112        }
113
114        mListHighResolution = listHighResolution;
115
116        if (depthConfigurations == null) {
117            mDepthConfigurations = new StreamConfiguration[0];
118            mDepthMinFrameDurations = new StreamConfigurationDuration[0];
119            mDepthStallDurations = new StreamConfigurationDuration[0];
120        } else {
121            mDepthConfigurations = checkArrayElementsNotNull(depthConfigurations,
122                    "depthConfigurations");
123            mDepthMinFrameDurations = checkArrayElementsNotNull(depthMinFrameDurations,
124                    "depthMinFrameDurations");
125            mDepthStallDurations = checkArrayElementsNotNull(depthStallDurations,
126                    "depthStallDurations");
127        }
128
129        if (highSpeedVideoConfigurations == null) {
130            mHighSpeedVideoConfigurations = new HighSpeedVideoConfiguration[0];
131        } else {
132            mHighSpeedVideoConfigurations = checkArrayElementsNotNull(
133                    highSpeedVideoConfigurations, "highSpeedVideoConfigurations");
134        }
135
136        // For each format, track how many sizes there are available to configure
137        for (StreamConfiguration config : mConfigurations) {
138            int fmt = config.getFormat();
139            SparseIntArray map = null;
140            if (config.isOutput()) {
141                mAllOutputFormats.put(fmt, mAllOutputFormats.get(fmt) + 1);
142                long duration = 0;
143                if (mListHighResolution) {
144                    for (StreamConfigurationDuration configurationDuration : mMinFrameDurations) {
145                        if (configurationDuration.getFormat() == fmt &&
146                                configurationDuration.getWidth() == config.getSize().getWidth() &&
147                                configurationDuration.getHeight() == config.getSize().getHeight()) {
148                            duration = configurationDuration.getDuration();
149                            break;
150                        }
151                    }
152                }
153                map = duration <= DURATION_20FPS_NS ?
154                        mOutputFormats : mHighResOutputFormats;
155            } else {
156                map = mInputFormats;
157            }
158            map.put(fmt, map.get(fmt) + 1);
159        }
160
161        // For each depth format, track how many sizes there are available to configure
162        for (StreamConfiguration config : mDepthConfigurations) {
163            if (!config.isOutput()) {
164                // Ignoring input depth configs
165                continue;
166            }
167
168            mDepthOutputFormats.put(config.getFormat(),
169                    mDepthOutputFormats.get(config.getFormat()) + 1);
170        }
171
172        if (configurations != null &&
173                mOutputFormats.indexOfKey(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) < 0) {
174            throw new AssertionError(
175                    "At least one stream configuration for IMPLEMENTATION_DEFINED must exist");
176        }
177
178        // For each Size/FPS range, track how many FPS range/Size there are available
179        for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) {
180            Size size = config.getSize();
181            Range<Integer> fpsRange = config.getFpsRange();
182            Integer fpsRangeCount = mHighSpeedVideoSizeMap.get(size);
183            if (fpsRangeCount == null) {
184                fpsRangeCount = 0;
185            }
186            mHighSpeedVideoSizeMap.put(size, fpsRangeCount + 1);
187            Integer sizeCount = mHighSpeedVideoFpsRangeMap.get(fpsRange);
188            if (sizeCount == null) {
189                sizeCount = 0;
190            }
191            mHighSpeedVideoFpsRangeMap.put(fpsRange, sizeCount + 1);
192        }
193
194        mInputOutputFormatsMap = inputOutputFormatsMap;
195    }
196
197    /**
198     * Get the image {@code format} output formats in this stream configuration.
199     *
200     * <p>All image formats returned by this function will be defined in either {@link ImageFormat}
201     * or in {@link PixelFormat} (and there is no possibility of collision).</p>
202     *
203     * <p>Formats listed in this array are guaranteed to return true if queried with
204     * {@link #isOutputSupportedFor(int)}.</p>
205     *
206     * @return an array of integer format
207     *
208     * @see ImageFormat
209     * @see PixelFormat
210     */
211    public final int[] getOutputFormats() {
212        return getPublicFormats(/*output*/true);
213    }
214
215    /**
216     * Get the image {@code format} output formats for a reprocessing input format.
217     *
218     * <p>When submitting a {@link CaptureRequest} with an input Surface of a given format,
219     * the only allowed target outputs of the {@link CaptureRequest} are the ones with a format
220     * listed in the return value of this method. Including any other output Surface as a target
221     * will throw an IllegalArgumentException. If no output format is supported given the input
222     * format, an empty int[] will be returned.</p>
223     *
224     * <p>All image formats returned by this function will be defined in either {@link ImageFormat}
225     * or in {@link PixelFormat} (and there is no possibility of collision).</p>
226     *
227     * <p>Formats listed in this array are guaranteed to return true if queried with
228     * {@link #isOutputSupportedFor(int)}.</p>
229     *
230     * @return an array of integer format
231     *
232     * @see ImageFormat
233     * @see PixelFormat
234     */
235    public final int[] getValidOutputFormatsForInput(int inputFormat) {
236        if (mInputOutputFormatsMap == null) {
237            return new int[0];
238        }
239        return mInputOutputFormatsMap.getOutputs(inputFormat);
240    }
241
242    /**
243     * Get the image {@code format} input formats in this stream configuration.
244     *
245     * <p>All image formats returned by this function will be defined in either {@link ImageFormat}
246     * or in {@link PixelFormat} (and there is no possibility of collision).</p>
247     *
248     * @return an array of integer format
249     *
250     * @see ImageFormat
251     * @see PixelFormat
252     */
253    public final int[] getInputFormats() {
254        return getPublicFormats(/*output*/false);
255    }
256
257    /**
258     * Get the supported input sizes for this input format.
259     *
260     * <p>The format must have come from {@link #getInputFormats}; otherwise
261     * {@code null} is returned.</p>
262     *
263     * @param format a format from {@link #getInputFormats}
264     * @return a non-empty array of sizes, or {@code null} if the format was not available.
265     */
266    public Size[] getInputSizes(final int format) {
267        return getPublicFormatSizes(format, /*output*/false, /*highRes*/false);
268    }
269
270    /**
271     * Determine whether or not output surfaces with a particular user-defined format can be passed
272     * {@link CameraDevice#createCaptureSession createCaptureSession}.
273     *
274     * <p>This method determines that the output {@code format} is supported by the camera device;
275     * each output {@code surface} target may or may not itself support that {@code format}.
276     * Refer to the class which provides the surface for additional documentation.</p>
277     *
278     * <p>Formats for which this returns {@code true} are guaranteed to exist in the result
279     * returned by {@link #getOutputSizes}.</p>
280     *
281     * @param format an image format from either {@link ImageFormat} or {@link PixelFormat}
282     * @return
283     *          {@code true} iff using a {@code surface} with this {@code format} will be
284     *          supported with {@link CameraDevice#createCaptureSession}
285     *
286     * @throws IllegalArgumentException
287     *          if the image format was not a defined named constant
288     *          from either {@link ImageFormat} or {@link PixelFormat}
289     *
290     * @see ImageFormat
291     * @see PixelFormat
292     * @see CameraDevice#createCaptureSession
293     */
294    public boolean isOutputSupportedFor(int format) {
295        checkArgumentFormat(format);
296
297        int internalFormat = imageFormatToInternal(format);
298        int dataspace = imageFormatToDataspace(format);
299        if (dataspace == HAL_DATASPACE_DEPTH) {
300            return mDepthOutputFormats.indexOfKey(internalFormat) >= 0;
301        } else {
302            return getFormatsMap(/*output*/true).indexOfKey(internalFormat) >= 0;
303        }
304    }
305
306    /**
307     * Determine whether or not output streams can be configured with a particular class
308     * as a consumer.
309     *
310     * <p>The following list is generally usable for outputs:
311     * <ul>
312     * <li>{@link android.media.ImageReader} -
313     * Recommended for image processing or streaming to external resources (such as a file or
314     * network)
315     * <li>{@link android.media.MediaRecorder} -
316     * Recommended for recording video (simple to use)
317     * <li>{@link android.media.MediaCodec} -
318     * Recommended for recording video (more complicated to use, with more flexibility)
319     * <li>{@link android.renderscript.Allocation} -
320     * Recommended for image processing with {@link android.renderscript RenderScript}
321     * <li>{@link android.view.SurfaceHolder} -
322     * Recommended for low-power camera preview with {@link android.view.SurfaceView}
323     * <li>{@link android.graphics.SurfaceTexture} -
324     * Recommended for OpenGL-accelerated preview processing or compositing with
325     * {@link android.view.TextureView}
326     * </ul>
327     * </p>
328     *
329     * <p>Generally speaking this means that creating a {@link Surface} from that class <i>may</i>
330     * provide a producer endpoint that is suitable to be used with
331     * {@link CameraDevice#createCaptureSession}.</p>
332     *
333     * <p>Since not all of the above classes support output of all format and size combinations,
334     * the particular combination should be queried with {@link #isOutputSupportedFor(Surface)}.</p>
335     *
336     * @param klass a non-{@code null} {@link Class} object reference
337     * @return {@code true} if this class is supported as an output, {@code false} otherwise
338     *
339     * @throws NullPointerException if {@code klass} was {@code null}
340     *
341     * @see CameraDevice#createCaptureSession
342     * @see #isOutputSupportedFor(Surface)
343     */
344    public static <T> boolean isOutputSupportedFor(Class<T> klass) {
345        checkNotNull(klass, "klass must not be null");
346
347        if (klass == android.media.ImageReader.class) {
348            return true;
349        } else if (klass == android.media.MediaRecorder.class) {
350            return true;
351        } else if (klass == android.media.MediaCodec.class) {
352            return true;
353        } else if (klass == android.renderscript.Allocation.class) {
354            return true;
355        } else if (klass == android.view.SurfaceHolder.class) {
356            return true;
357        } else if (klass == android.graphics.SurfaceTexture.class) {
358            return true;
359        }
360
361        return false;
362    }
363
364    /**
365     * Determine whether or not the {@code surface} in its current state is suitable to be included
366     * in a {@link CameraDevice#createCaptureSession capture session} as an output.
367     *
368     * <p>Not all surfaces are usable with the {@link CameraDevice}, and not all configurations
369     * of that {@code surface} are compatible. Some classes that provide the {@code surface} are
370     * compatible with the {@link CameraDevice} in general
371     * (see {@link #isOutputSupportedFor(Class)}, but it is the caller's responsibility to put the
372     * {@code surface} into a state that will be compatible with the {@link CameraDevice}.</p>
373     *
374     * <p>Reasons for a {@code surface} being specifically incompatible might be:
375     * <ul>
376     * <li>Using a format that's not listed by {@link #getOutputFormats}
377     * <li>Using a format/size combination that's not listed by {@link #getOutputSizes}
378     * <li>The {@code surface} itself is not in a state where it can service a new producer.</p>
379     * </li>
380     * </ul>
381     *
382     * <p>Surfaces from flexible sources will return true even if the exact size of the Surface does
383     * not match a camera-supported size, as long as the format (or class) is supported and the
384     * camera device supports a size that is equal to or less than 1080p in that format. If such as
385     * Surface is used to create a capture session, it will have its size rounded to the nearest
386     * supported size, below or equal to 1080p. Flexible sources include SurfaceView, SurfaceTexture,
387     * and ImageReader.</p>
388     *
389     * <p>This is not an exhaustive list; see the particular class's documentation for further
390     * possible reasons of incompatibility.</p>
391     *
392     * @param surface a non-{@code null} {@link Surface} object reference
393     * @return {@code true} if this is supported, {@code false} otherwise
394     *
395     * @throws NullPointerException if {@code surface} was {@code null}
396     * @throws IllegalArgumentException if the Surface endpoint is no longer valid
397     *
398     * @see CameraDevice#createCaptureSession
399     * @see #isOutputSupportedFor(Class)
400     */
401    public boolean isOutputSupportedFor(Surface surface) {
402        checkNotNull(surface, "surface must not be null");
403
404        Size surfaceSize = SurfaceUtils.getSurfaceSize(surface);
405        int surfaceFormat = SurfaceUtils.getSurfaceFormat(surface);
406        int surfaceDataspace = SurfaceUtils.getSurfaceDataspace(surface);
407
408        // See if consumer is flexible.
409        boolean isFlexible = SurfaceUtils.isFlexibleConsumer(surface);
410
411        // Override RGB formats to IMPLEMENTATION_DEFINED, b/9487482
412        if ((surfaceFormat >= LegacyMetadataMapper.HAL_PIXEL_FORMAT_RGBA_8888 &&
413                        surfaceFormat <= LegacyMetadataMapper.HAL_PIXEL_FORMAT_BGRA_8888)) {
414            surfaceFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
415        }
416
417        StreamConfiguration[] configs =
418                surfaceDataspace != HAL_DATASPACE_DEPTH ? mConfigurations : mDepthConfigurations;
419        for (StreamConfiguration config : configs) {
420            if (config.getFormat() == surfaceFormat && config.isOutput()) {
421                // Matching format, either need exact size match, or a flexible consumer
422                // and a size no bigger than MAX_DIMEN_FOR_ROUNDING
423                if (config.getSize().equals(surfaceSize)) {
424                    return true;
425                } else if (isFlexible &&
426                        (config.getSize().getWidth() <= LegacyCameraDevice.MAX_DIMEN_FOR_ROUNDING)) {
427                    return true;
428                }
429            }
430        }
431        return false;
432    }
433
434    /**
435     * Get a list of sizes compatible with {@code klass} to use as an output.
436     *
437     * <p>Some of the supported classes may support additional formats beyond
438     * {@link ImageFormat#PRIVATE}; this function only returns
439     * sizes for {@link ImageFormat#PRIVATE}. For example, {@link android.media.ImageReader}
440     * supports {@link ImageFormat#YUV_420_888} and {@link ImageFormat#PRIVATE}, this method will
441     * only return the sizes for {@link ImageFormat#PRIVATE} for {@link android.media.ImageReader}
442     * class.</p>
443     *
444     * <p>If a well-defined format such as {@code NV21} is required, use
445     * {@link #getOutputSizes(int)} instead.</p>
446     *
447     * <p>The {@code klass} should be a supported output, that querying
448     * {@code #isOutputSupportedFor(Class)} should return {@code true}.</p>
449     *
450     * @param klass
451     *          a non-{@code null} {@link Class} object reference
452     * @return
453     *          an array of supported sizes for {@link ImageFormat#PRIVATE} format,
454     *          or {@code null} iff the {@code klass} is not a supported output.
455     *
456     *
457     * @throws NullPointerException if {@code klass} was {@code null}
458     *
459     * @see #isOutputSupportedFor(Class)
460     */
461    public <T> Size[] getOutputSizes(Class<T> klass) {
462        if (isOutputSupportedFor(klass) == false) {
463            return null;
464        }
465
466        return getInternalFormatSizes(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
467                HAL_DATASPACE_UNKNOWN,/*output*/true, /*highRes*/false);
468    }
469
470    /**
471     * Get a list of sizes compatible with the requested image {@code format}.
472     *
473     * <p>The {@code format} should be a supported format (one of the formats returned by
474     * {@link #getOutputFormats}).</p>
475     *
476     * As of API level 23, the {@link #getHighResolutionOutputSizes} method can be used on devices
477     * that support the
478     * {@link android.hardware.camera2.CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE BURST_CAPTURE}
479     * capability to get a list of high-resolution output sizes that cannot operate at the preferred
480     * 20fps rate. This means that for some supported formats, this method will return an empty
481     * list, if all the supported resolutions operate at below 20fps.  For devices that do not
482     * support the BURST_CAPTURE capability, all output resolutions are listed through this method.
483     *
484     * @param format an image format from {@link ImageFormat} or {@link PixelFormat}
485     * @return
486     *          an array of supported sizes,
487     *          or {@code null} if the {@code format} is not a supported output
488     *
489     * @see ImageFormat
490     * @see PixelFormat
491     * @see #getOutputFormats
492     */
493    public Size[] getOutputSizes(int format) {
494        return getPublicFormatSizes(format, /*output*/true, /*highRes*/ false);
495    }
496
497    /**
498     * Get a list of supported high speed video recording sizes.
499     * <p>
500     * When {@link CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO} is
501     * supported in {@link CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES}, this method will
502     * list the supported high speed video size configurations. All the sizes listed will be a
503     * subset of the sizes reported by {@link #getOutputSizes} for processed non-stalling formats
504     * (typically {@link ImageFormat#PRIVATE} {@link ImageFormat#YUV_420_888}, etc.)
505     * </p>
506     * <p>
507     * To enable high speed video recording, application must create a constrained create high speed
508     * capture session via {@link CameraDevice#createConstrainedHighSpeedCaptureSession}, and submit
509     * a CaptureRequest list created by
510     * {@link android.hardware.camera2.CameraConstrainedHighSpeedCaptureSession#createHighSpeedRequestList}
511     * to this session. The application must select the video size from this method and
512     * {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS range} from
513     * {@link #getHighSpeedVideoFpsRangesFor} to configure the constrained high speed session and
514     * generate the high speed request list. For example, if the application intends to do high
515     * speed recording, it can select the maximum size reported by this method to create high speed
516     * capture session. Note that for the use case of multiple output streams, application must
517     * select one unique size from this method to use (e.g., preview and recording streams must have
518     * the same size). Otherwise, the high speed session creation will fail. Once the size is
519     * selected, application can get the supported FPS ranges by
520     * {@link #getHighSpeedVideoFpsRangesFor}, and use these FPS ranges to setup the recording
521     * request lists via
522     * {@link android.hardware.camera2.CameraConstrainedHighSpeedCaptureSession#createHighSpeedRequestList}.
523     * </p>
524     *
525     * @return an array of supported high speed video recording sizes
526     * @see #getHighSpeedVideoFpsRangesFor(Size)
527     * @see CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO
528     * @see CameraDevice#createConstrainedHighSpeedCaptureSession
529     * @see android.hardware.camera2.CameraConstrainedHighSpeedCaptureSession#createHighSpeedRequestList
530     */
531    public Size[] getHighSpeedVideoSizes() {
532        Set<Size> keySet = mHighSpeedVideoSizeMap.keySet();
533        return keySet.toArray(new Size[keySet.size()]);
534    }
535
536    /**
537     * Get the frame per second ranges (fpsMin, fpsMax) for input high speed video size.
538     * <p>
539     * See {@link #getHighSpeedVideoFpsRanges} for how to enable high speed recording.
540     * </p>
541     * <p>
542     * The {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS ranges} reported in this method
543     * must not be used to setup capture requests that are submitted to unconstrained capture
544     * sessions, or it will result in {@link IllegalArgumentException IllegalArgumentExceptions}.
545     * </p>
546     * <p>
547     * See {@link #getHighSpeedVideoFpsRanges} for the characteristics of the returned FPS ranges.
548     * </p>
549     *
550     * @param size one of the sizes returned by {@link #getHighSpeedVideoSizes()}
551     * @return an array of supported high speed video recording FPS ranges The upper bound of
552     *         returned ranges is guaranteed to be greater than or equal to 120.
553     * @throws IllegalArgumentException if input size does not exist in the return value of
554     *             getHighSpeedVideoSizes
555     * @see #getHighSpeedVideoSizes()
556     * @see #getHighSpeedVideoFpsRanges()
557     */
558    public Range<Integer>[] getHighSpeedVideoFpsRangesFor(Size size) {
559        Integer fpsRangeCount = mHighSpeedVideoSizeMap.get(size);
560        if (fpsRangeCount == null || fpsRangeCount == 0) {
561            throw new IllegalArgumentException(String.format(
562                    "Size %s does not support high speed video recording", size));
563        }
564
565        @SuppressWarnings("unchecked")
566        Range<Integer>[] fpsRanges = new Range[fpsRangeCount];
567        int i = 0;
568        for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) {
569            if (size.equals(config.getSize())) {
570                fpsRanges[i++] = config.getFpsRange();
571            }
572        }
573        return fpsRanges;
574    }
575
576    /**
577     * Get a list of supported high speed video recording FPS ranges.
578     * <p>
579     * When {@link CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO} is
580     * supported in {@link CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES}, this method will
581     * list the supported high speed video FPS range configurations. Application can then use
582     * {@link #getHighSpeedVideoSizesFor} to query available sizes for one of returned FPS range.
583     * </p>
584     * <p>
585     * To enable high speed video recording, application must create a constrained create high speed
586     * capture session via {@link CameraDevice#createConstrainedHighSpeedCaptureSession}, and submit
587     * a CaptureRequest list created by
588     * {@link android.hardware.camera2.CameraConstrainedHighSpeedCaptureSession#createHighSpeedRequestList}
589     * to this session. The application must select the video size from this method and
590     * {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS range} from
591     * {@link #getHighSpeedVideoFpsRangesFor} to configure the constrained high speed session and
592     * generate the high speed request list. For example, if the application intends to do high
593     * speed recording, it can select one FPS range reported by this method, query the video sizes
594     * corresponding to this FPS range by {@link #getHighSpeedVideoSizesFor} and use one of reported
595     * sizes to create a high speed capture session. Note that for the use case of multiple output
596     * streams, application must select one unique size from this method to use (e.g., preview and
597     * recording streams must have the same size). Otherwise, the high speed session creation will
598     * fail. Once the high speed capture session is created, the application can set the FPS range
599     * in the recording request lists via
600     * {@link android.hardware.camera2.CameraConstrainedHighSpeedCaptureSession#createHighSpeedRequestList}.
601     * </p>
602     * <p>
603     * The FPS ranges reported by this method will have below characteristics:
604     * <li>The fpsMin and fpsMax will be a multiple 30fps.</li>
605     * <li>The fpsMin will be no less than 30fps, the fpsMax will be no less than 120fps.</li>
606     * <li>At least one range will be a fixed FPS range where fpsMin == fpsMax.</li>
607     * <li>For each fixed FPS range, there will be one corresponding variable FPS range [30,
608     * fps_max]. These kinds of FPS ranges are suitable for preview-only use cases where the
609     * application doesn't want the camera device always produce higher frame rate than the display
610     * refresh rate.</li>
611     * </p>
612     *
613     * @return an array of supported high speed video recording FPS ranges The upper bound of
614     *         returned ranges is guaranteed to be larger or equal to 120.
615     * @see #getHighSpeedVideoSizesFor
616     * @see CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO
617     * @see CameraDevice#createConstrainedHighSpeedCaptureSession
618     * @see CameraDevice#createHighSpeedRequestList
619     */
620    @SuppressWarnings("unchecked")
621    public Range<Integer>[] getHighSpeedVideoFpsRanges() {
622        Set<Range<Integer>> keySet = mHighSpeedVideoFpsRangeMap.keySet();
623        return keySet.toArray(new Range[keySet.size()]);
624    }
625
626    /**
627     * Get the supported video sizes for an input high speed FPS range.
628     *
629     * <p> See {@link #getHighSpeedVideoSizes} for how to enable high speed recording.</p>
630     *
631     * @param fpsRange one of the FPS range returned by {@link #getHighSpeedVideoFpsRanges()}
632     * @return An array of video sizes to create high speed capture sessions for high speed streaming
633     *         use cases.
634     *
635     * @throws IllegalArgumentException if input FPS range does not exist in the return value of
636     *         getHighSpeedVideoFpsRanges
637     * @see #getHighSpeedVideoFpsRanges()
638     */
639    public Size[] getHighSpeedVideoSizesFor(Range<Integer> fpsRange) {
640        Integer sizeCount = mHighSpeedVideoFpsRangeMap.get(fpsRange);
641        if (sizeCount == null || sizeCount == 0) {
642            throw new IllegalArgumentException(String.format(
643                    "FpsRange %s does not support high speed video recording", fpsRange));
644        }
645
646        Size[] sizes = new Size[sizeCount];
647        int i = 0;
648        for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) {
649            if (fpsRange.equals(config.getFpsRange())) {
650                sizes[i++] = config.getSize();
651            }
652        }
653        return sizes;
654    }
655
656    /**
657     * Get a list of supported high resolution sizes, which cannot operate at full BURST_CAPTURE
658     * rate.
659     *
660     * <p>This includes all output sizes that cannot meet the 20 fps frame rate requirements for the
661     * {@link android.hardware.camera2.CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE BURST_CAPTURE}
662     * capability.  This does not include the stall duration, so for example, a JPEG or RAW16 output
663     * resolution with a large stall duration but a minimum frame duration that's above 20 fps will
664     * still be listed in the regular {@link #getOutputSizes} list. All the sizes on this list are
665     * still guaranteed to operate at a rate of at least 10 fps, not including stall duration.</p>
666     *
667     * <p>For a device that does not support the BURST_CAPTURE capability, this list will be
668     * {@code null}, since resolutions in the {@link #getOutputSizes} list are already not
669     * guaranteed to meet &gt;= 20 fps rate requirements. For a device that does support the
670     * BURST_CAPTURE capability, this list may be empty, if all supported resolutions meet the 20
671     * fps requirement.</p>
672     *
673     * @return an array of supported slower high-resolution sizes, or {@code null} if the
674     *         BURST_CAPTURE capability is not supported
675     */
676    public Size[] getHighResolutionOutputSizes(int format) {
677        if (!mListHighResolution) return null;
678
679        return getPublicFormatSizes(format, /*output*/true, /*highRes*/ true);
680    }
681
682    /**
683     * Get the minimum {@link CaptureRequest#SENSOR_FRAME_DURATION frame duration}
684     * for the format/size combination (in nanoseconds).
685     *
686     * <p>{@code format} should be one of the ones returned by {@link #getOutputFormats()}.</p>
687     * <p>{@code size} should be one of the ones returned by
688     * {@link #getOutputSizes(int)}.</p>
689     *
690     * <p>This should correspond to the frame duration when only that stream is active, with all
691     * processing (typically in {@code android.*.mode}) set to either {@code OFF} or {@code FAST}.
692     * </p>
693     *
694     * <p>When multiple streams are used in a request, the minimum frame duration will be
695     * {@code max(individual stream min durations)}.</p>
696     *
697     * <p>For devices that do not support manual sensor control
698     * ({@link android.hardware.camera2.CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR}),
699     * this function may return 0.</p>
700     *
701     * <!--
702     * TODO: uncomment after adding input stream support
703     * <p>The minimum frame duration of a stream (of a particular format, size) is the same
704     * regardless of whether the stream is input or output.</p>
705     * -->
706     *
707     * @param format an image format from {@link ImageFormat} or {@link PixelFormat}
708     * @param size an output-compatible size
709     * @return a minimum frame duration {@code >} 0 in nanoseconds, or
710     *          0 if the minimum frame duration is not available.
711     *
712     * @throws IllegalArgumentException if {@code format} or {@code size} was not supported
713     * @throws NullPointerException if {@code size} was {@code null}
714     *
715     * @see CaptureRequest#SENSOR_FRAME_DURATION
716     * @see #getOutputStallDuration(int, Size)
717     * @see ImageFormat
718     * @see PixelFormat
719     */
720    public long getOutputMinFrameDuration(int format, Size size) {
721        checkNotNull(size, "size must not be null");
722        checkArgumentFormatSupported(format, /*output*/true);
723
724        return getInternalFormatDuration(imageFormatToInternal(format),
725                imageFormatToDataspace(format),
726                size,
727                DURATION_MIN_FRAME);
728    }
729
730    /**
731     * Get the minimum {@link CaptureRequest#SENSOR_FRAME_DURATION frame duration}
732     * for the class/size combination (in nanoseconds).
733     *
734     * <p>This assumes a the {@code klass} is set up to use {@link ImageFormat#PRIVATE}.
735     * For user-defined formats, use {@link #getOutputMinFrameDuration(int, Size)}.</p>
736     *
737     * <p>{@code klass} should be one of the ones which is supported by
738     * {@link #isOutputSupportedFor(Class)}.</p>
739     *
740     * <p>{@code size} should be one of the ones returned by
741     * {@link #getOutputSizes(int)}.</p>
742     *
743     * <p>This should correspond to the frame duration when only that stream is active, with all
744     * processing (typically in {@code android.*.mode}) set to either {@code OFF} or {@code FAST}.
745     * </p>
746     *
747     * <p>When multiple streams are used in a request, the minimum frame duration will be
748     * {@code max(individual stream min durations)}.</p>
749     *
750     * <p>For devices that do not support manual sensor control
751     * ({@link android.hardware.camera2.CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR}),
752     * this function may return 0.</p>
753     *
754     * <!--
755     * TODO: uncomment after adding input stream support
756     * <p>The minimum frame duration of a stream (of a particular format, size) is the same
757     * regardless of whether the stream is input or output.</p>
758     * -->
759     *
760     * @param klass
761     *          a class which is supported by {@link #isOutputSupportedFor(Class)} and has a
762     *          non-empty array returned by {@link #getOutputSizes(Class)}
763     * @param size an output-compatible size
764     * @return a minimum frame duration {@code >} 0 in nanoseconds, or
765     *          0 if the minimum frame duration is not available.
766     *
767     * @throws IllegalArgumentException if {@code klass} or {@code size} was not supported
768     * @throws NullPointerException if {@code size} or {@code klass} was {@code null}
769     *
770     * @see CaptureRequest#SENSOR_FRAME_DURATION
771     * @see ImageFormat
772     * @see PixelFormat
773     */
774    public <T> long getOutputMinFrameDuration(final Class<T> klass, final Size size) {
775        if (!isOutputSupportedFor(klass)) {
776            throw new IllegalArgumentException("klass was not supported");
777        }
778
779        return getInternalFormatDuration(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
780                HAL_DATASPACE_UNKNOWN,
781                size, DURATION_MIN_FRAME);
782    }
783
784    /**
785     * Get the stall duration for the format/size combination (in nanoseconds).
786     *
787     * <p>{@code format} should be one of the ones returned by {@link #getOutputFormats()}.</p>
788     * <p>{@code size} should be one of the ones returned by
789     * {@link #getOutputSizes(int)}.</p>
790     *
791     * <p>
792     * A stall duration is how much extra time would get added to the normal minimum frame duration
793     * for a repeating request that has streams with non-zero stall.
794     *
795     * <p>For example, consider JPEG captures which have the following characteristics:
796     *
797     * <ul>
798     * <li>JPEG streams act like processed YUV streams in requests for which they are not included;
799     * in requests in which they are directly referenced, they act as JPEG streams.
800     * This is because supporting a JPEG stream requires the underlying YUV data to always be ready
801     * for use by a JPEG encoder, but the encoder will only be used (and impact frame duration) on
802     * requests that actually reference a JPEG stream.
803     * <li>The JPEG processor can run concurrently to the rest of the camera pipeline, but cannot
804     * process more than 1 capture at a time.
805     * </ul>
806     *
807     * <p>In other words, using a repeating YUV request would result in a steady frame rate
808     * (let's say it's 30 FPS). If a single JPEG request is submitted periodically,
809     * the frame rate will stay at 30 FPS (as long as we wait for the previous JPEG to return each
810     * time). If we try to submit a repeating YUV + JPEG request, then the frame rate will drop from
811     * 30 FPS.</p>
812     *
813     * <p>In general, submitting a new request with a non-0 stall time stream will <em>not</em> cause a
814     * frame rate drop unless there are still outstanding buffers for that stream from previous
815     * requests.</p>
816     *
817     * <p>Submitting a repeating request with streams (call this {@code S}) is the same as setting
818     * the minimum frame duration from the normal minimum frame duration corresponding to {@code S},
819     * added with the maximum stall duration for {@code S}.</p>
820     *
821     * <p>If interleaving requests with and without a stall duration, a request will stall by the
822     * maximum of the remaining times for each can-stall stream with outstanding buffers.</p>
823     *
824     * <p>This means that a stalling request will not have an exposure start until the stall has
825     * completed.</p>
826     *
827     * <p>This should correspond to the stall duration when only that stream is active, with all
828     * processing (typically in {@code android.*.mode}) set to {@code FAST} or {@code OFF}.
829     * Setting any of the processing modes to {@code HIGH_QUALITY} effectively results in an
830     * indeterminate stall duration for all streams in a request (the regular stall calculation
831     * rules are ignored).</p>
832     *
833     * <p>The following formats may always have a stall duration:
834     * <ul>
835     * <li>{@link ImageFormat#JPEG JPEG}
836     * <li>{@link ImageFormat#RAW_SENSOR RAW16}
837     * </ul>
838     * </p>
839     *
840     * <p>The following formats will never have a stall duration:
841     * <ul>
842     * <li>{@link ImageFormat#YUV_420_888 YUV_420_888}
843     * <li>{@link #isOutputSupportedFor(Class) Implementation-Defined}
844     * </ul></p>
845     *
846     * <p>
847     * All other formats may or may not have an allowed stall duration on a per-capability basis;
848     * refer to {@link CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES
849     * android.request.availableCapabilities} for more details.</p>
850     * </p>
851     *
852     * <p>See {@link CaptureRequest#SENSOR_FRAME_DURATION android.sensor.frameDuration}
853     * for more information about calculating the max frame rate (absent stalls).</p>
854     *
855     * @param format an image format from {@link ImageFormat} or {@link PixelFormat}
856     * @param size an output-compatible size
857     * @return a stall duration {@code >=} 0 in nanoseconds
858     *
859     * @throws IllegalArgumentException if {@code format} or {@code size} was not supported
860     * @throws NullPointerException if {@code size} was {@code null}
861     *
862     * @see CaptureRequest#SENSOR_FRAME_DURATION
863     * @see ImageFormat
864     * @see PixelFormat
865     */
866    public long getOutputStallDuration(int format, Size size) {
867        checkArgumentFormatSupported(format, /*output*/true);
868
869        return getInternalFormatDuration(imageFormatToInternal(format),
870                imageFormatToDataspace(format),
871                size,
872                DURATION_STALL);
873    }
874
875    /**
876     * Get the stall duration for the class/size combination (in nanoseconds).
877     *
878     * <p>This assumes a the {@code klass} is set up to use {@link ImageFormat#PRIVATE}.
879     * For user-defined formats, use {@link #getOutputMinFrameDuration(int, Size)}.</p>
880     *
881     * <p>{@code klass} should be one of the ones with a non-empty array returned by
882     * {@link #getOutputSizes(Class)}.</p>
883     *
884     * <p>{@code size} should be one of the ones returned by
885     * {@link #getOutputSizes(Class)}.</p>
886     *
887     * <p>See {@link #getOutputStallDuration(int, Size)} for a definition of a
888     * <em>stall duration</em>.</p>
889     *
890     * @param klass
891     *          a class which is supported by {@link #isOutputSupportedFor(Class)} and has a
892     *          non-empty array returned by {@link #getOutputSizes(Class)}
893     * @param size an output-compatible size
894     * @return a minimum frame duration {@code >=} 0 in nanoseconds
895     *
896     * @throws IllegalArgumentException if {@code klass} or {@code size} was not supported
897     * @throws NullPointerException if {@code size} or {@code klass} was {@code null}
898     *
899     * @see CaptureRequest#SENSOR_FRAME_DURATION
900     * @see ImageFormat
901     * @see PixelFormat
902     */
903    public <T> long getOutputStallDuration(final Class<T> klass, final Size size) {
904        if (!isOutputSupportedFor(klass)) {
905            throw new IllegalArgumentException("klass was not supported");
906        }
907
908        return getInternalFormatDuration(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
909                HAL_DATASPACE_UNKNOWN, size, DURATION_STALL);
910    }
911
912    /**
913     * Check if this {@link StreamConfigurationMap} is equal to another
914     * {@link StreamConfigurationMap}.
915     *
916     * <p>Two vectors are only equal if and only if each of the respective elements is equal.</p>
917     *
918     * @return {@code true} if the objects were equal, {@code false} otherwise
919     */
920    @Override
921    public boolean equals(final Object obj) {
922        if (obj == null) {
923            return false;
924        }
925        if (this == obj) {
926            return true;
927        }
928        if (obj instanceof StreamConfigurationMap) {
929            final StreamConfigurationMap other = (StreamConfigurationMap) obj;
930            // XX: do we care about order?
931            return Arrays.equals(mConfigurations, other.mConfigurations) &&
932                    Arrays.equals(mMinFrameDurations, other.mMinFrameDurations) &&
933                    Arrays.equals(mStallDurations, other.mStallDurations) &&
934                    Arrays.equals(mDepthConfigurations, other.mDepthConfigurations) &&
935                    Arrays.equals(mHighSpeedVideoConfigurations,
936                            other.mHighSpeedVideoConfigurations);
937        }
938        return false;
939    }
940
941    /**
942     * {@inheritDoc}
943     */
944    @Override
945    public int hashCode() {
946        // XX: do we care about order?
947        return HashCodeHelpers.hashCodeGeneric(
948                mConfigurations, mMinFrameDurations,
949                mStallDurations,
950                mDepthConfigurations, mHighSpeedVideoConfigurations);
951    }
952
953    // Check that the argument is supported by #getOutputFormats or #getInputFormats
954    private int checkArgumentFormatSupported(int format, boolean output) {
955        checkArgumentFormat(format);
956
957        int internalFormat = imageFormatToInternal(format);
958        int internalDataspace = imageFormatToDataspace(format);
959
960        if (output) {
961            if (internalDataspace == HAL_DATASPACE_DEPTH) {
962                if (mDepthOutputFormats.indexOfKey(internalFormat) >= 0) {
963                    return format;
964                }
965            } else {
966                if (mAllOutputFormats.indexOfKey(internalFormat) >= 0) {
967                    return format;
968                }
969            }
970        } else {
971            if (mInputFormats.indexOfKey(internalFormat) >= 0) {
972                return format;
973            }
974        }
975
976        throw new IllegalArgumentException(String.format(
977                "format %x is not supported by this stream configuration map", format));
978    }
979
980    /**
981     * Ensures that the format is either user-defined or implementation defined.
982     *
983     * <p>If a format has a different internal representation than the public representation,
984     * passing in the public representation here will fail.</p>
985     *
986     * <p>For example if trying to use {@link ImageFormat#JPEG}:
987     * it has a different public representation than the internal representation
988     * {@code HAL_PIXEL_FORMAT_BLOB}, this check will fail.</p>
989     *
990     * <p>Any invalid/undefined formats will raise an exception.</p>
991     *
992     * @param format image format
993     * @return the format
994     *
995     * @throws IllegalArgumentException if the format was invalid
996     */
997    static int checkArgumentFormatInternal(int format) {
998        switch (format) {
999            case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
1000            case HAL_PIXEL_FORMAT_BLOB:
1001            case HAL_PIXEL_FORMAT_RAW_OPAQUE:
1002            case HAL_PIXEL_FORMAT_Y16:
1003                return format;
1004            case ImageFormat.JPEG:
1005                throw new IllegalArgumentException(
1006                        "ImageFormat.JPEG is an unknown internal format");
1007            default:
1008                return checkArgumentFormat(format);
1009        }
1010    }
1011
1012    /**
1013     * Ensures that the format is publicly user-defined in either ImageFormat or PixelFormat.
1014     *
1015     * <p>If a format has a different public representation than the internal representation,
1016     * passing in the internal representation here will fail.</p>
1017     *
1018     * <p>For example if trying to use {@code HAL_PIXEL_FORMAT_BLOB}:
1019     * it has a different internal representation than the public representation
1020     * {@link ImageFormat#JPEG}, this check will fail.</p>
1021     *
1022     * <p>Any invalid/undefined formats will raise an exception, including implementation-defined.
1023     * </p>
1024     *
1025     * <p>Note that {@code @hide} and deprecated formats will not pass this check.</p>
1026     *
1027     * @param format image format
1028     * @return the format
1029     *
1030     * @throws IllegalArgumentException if the format was not user-defined
1031     */
1032    static int checkArgumentFormat(int format) {
1033        if (!ImageFormat.isPublicFormat(format) && !PixelFormat.isPublicFormat(format)) {
1034            throw new IllegalArgumentException(String.format(
1035                    "format 0x%x was not defined in either ImageFormat or PixelFormat", format));
1036        }
1037
1038        return format;
1039    }
1040
1041    /**
1042     * Convert an internal format compatible with {@code graphics.h} into public-visible
1043     * {@code ImageFormat}. This assumes the dataspace of the format is not HAL_DATASPACE_DEPTH.
1044     *
1045     * <p>In particular these formats are converted:
1046     * <ul>
1047     * <li>HAL_PIXEL_FORMAT_BLOB => ImageFormat.JPEG</li>
1048     * </ul>
1049     * </p>
1050     *
1051     * <p>Passing in a format which has no public equivalent will fail;
1052     * as will passing in a public format which has a different internal format equivalent.
1053     * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
1054     *
1055     * <p>All other formats are returned as-is, no further invalid check is performed.</p>
1056     *
1057     * <p>This function is the dual of {@link #imageFormatToInternal} for dataspaces other than
1058     * HAL_DATASPACE_DEPTH.</p>
1059     *
1060     * @param format image format from {@link ImageFormat} or {@link PixelFormat}
1061     * @return the converted image formats
1062     *
1063     * @throws IllegalArgumentException
1064     *          if {@code format} is {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} or
1065     *          {@link ImageFormat#JPEG}
1066     *
1067     * @see ImageFormat
1068     * @see PixelFormat
1069     * @see #checkArgumentFormat
1070     */
1071    static int imageFormatToPublic(int format) {
1072        switch (format) {
1073            case HAL_PIXEL_FORMAT_BLOB:
1074                return ImageFormat.JPEG;
1075            case ImageFormat.JPEG:
1076                throw new IllegalArgumentException(
1077                        "ImageFormat.JPEG is an unknown internal format");
1078            default:
1079                return format;
1080        }
1081    }
1082
1083    /**
1084     * Convert an internal format compatible with {@code graphics.h} into public-visible
1085     * {@code ImageFormat}. This assumes the dataspace of the format is HAL_DATASPACE_DEPTH.
1086     *
1087     * <p>In particular these formats are converted:
1088     * <ul>
1089     * <li>HAL_PIXEL_FORMAT_BLOB => ImageFormat.DEPTH_POINT_CLOUD
1090     * <li>HAL_PIXEL_FORMAT_Y16 => ImageFormat.DEPTH16
1091     * </ul>
1092     * </p>
1093     *
1094     * <p>Passing in an implementation-defined format which has no public equivalent will fail;
1095     * as will passing in a public format which has a different internal format equivalent.
1096     * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
1097     *
1098     * <p>All other formats are returned as-is, no further invalid check is performed.</p>
1099     *
1100     * <p>This function is the dual of {@link #imageFormatToInternal} for formats associated with
1101     * HAL_DATASPACE_DEPTH.</p>
1102     *
1103     * @param format image format from {@link ImageFormat} or {@link PixelFormat}
1104     * @return the converted image formats
1105     *
1106     * @throws IllegalArgumentException
1107     *          if {@code format} is {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} or
1108     *          {@link ImageFormat#JPEG}
1109     *
1110     * @see ImageFormat
1111     * @see PixelFormat
1112     * @see #checkArgumentFormat
1113     */
1114    static int depthFormatToPublic(int format) {
1115        switch (format) {
1116            case HAL_PIXEL_FORMAT_BLOB:
1117                return ImageFormat.DEPTH_POINT_CLOUD;
1118            case HAL_PIXEL_FORMAT_Y16:
1119                return ImageFormat.DEPTH16;
1120            case ImageFormat.JPEG:
1121                throw new IllegalArgumentException(
1122                        "ImageFormat.JPEG is an unknown internal format");
1123            case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
1124                throw new IllegalArgumentException(
1125                        "IMPLEMENTATION_DEFINED must not leak to public API");
1126            default:
1127                throw new IllegalArgumentException(
1128                        "Unknown DATASPACE_DEPTH format " + format);
1129        }
1130    }
1131
1132    /**
1133     * Convert image formats from internal to public formats (in-place).
1134     *
1135     * @param formats an array of image formats
1136     * @return {@code formats}
1137     *
1138     * @see #imageFormatToPublic
1139     */
1140    static int[] imageFormatToPublic(int[] formats) {
1141        if (formats == null) {
1142            return null;
1143        }
1144
1145        for (int i = 0; i < formats.length; ++i) {
1146            formats[i] = imageFormatToPublic(formats[i]);
1147        }
1148
1149        return formats;
1150    }
1151
1152    /**
1153     * Convert a public format compatible with {@code ImageFormat} to an internal format
1154     * from {@code graphics.h}.
1155     *
1156     * <p>In particular these formats are converted:
1157     * <ul>
1158     * <li>ImageFormat.JPEG => HAL_PIXEL_FORMAT_BLOB
1159     * <li>ImageFormat.DEPTH_POINT_CLOUD => HAL_PIXEL_FORMAT_BLOB
1160     * <li>ImageFormat.DEPTH16 => HAL_PIXEL_FORMAT_Y16
1161     * </ul>
1162     * </p>
1163     *
1164     * <p>Passing in an internal format which has a different public format equivalent will fail.
1165     * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
1166     *
1167     * <p>All other formats are returned as-is, no invalid check is performed.</p>
1168     *
1169     * <p>This function is the dual of {@link #imageFormatToPublic}.</p>
1170     *
1171     * @param format public image format from {@link ImageFormat} or {@link PixelFormat}
1172     * @return the converted image formats
1173     *
1174     * @see ImageFormat
1175     * @see PixelFormat
1176     *
1177     * @throws IllegalArgumentException
1178     *              if {@code format} was {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED}
1179     */
1180    static int imageFormatToInternal(int format) {
1181        switch (format) {
1182            case ImageFormat.JPEG:
1183            case ImageFormat.DEPTH_POINT_CLOUD:
1184                return HAL_PIXEL_FORMAT_BLOB;
1185            case ImageFormat.DEPTH16:
1186                return HAL_PIXEL_FORMAT_Y16;
1187            default:
1188                return format;
1189        }
1190    }
1191
1192    /**
1193     * Convert a public format compatible with {@code ImageFormat} to an internal dataspace
1194     * from {@code graphics.h}.
1195     *
1196     * <p>In particular these formats are converted:
1197     * <ul>
1198     * <li>ImageFormat.JPEG => HAL_DATASPACE_JFIF
1199     * <li>ImageFormat.DEPTH_POINT_CLOUD => HAL_DATASPACE_DEPTH
1200     * <li>ImageFormat.DEPTH16 => HAL_DATASPACE_DEPTH
1201     * <li>others => HAL_DATASPACE_UNKNOWN
1202     * </ul>
1203     * </p>
1204     *
1205     * <p>Passing in an implementation-defined format here will fail (it's not a public format);
1206     * as will passing in an internal format which has a different public format equivalent.
1207     * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
1208     *
1209     * <p>All other formats are returned as-is, no invalid check is performed.</p>
1210     *
1211     * <p>This function is the dual of {@link #imageFormatToPublic}.</p>
1212     *
1213     * @param format public image format from {@link ImageFormat} or {@link PixelFormat}
1214     * @return the converted image formats
1215     *
1216     * @see ImageFormat
1217     * @see PixelFormat
1218     *
1219     * @throws IllegalArgumentException
1220     *              if {@code format} was {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED}
1221     */
1222    static int imageFormatToDataspace(int format) {
1223        switch (format) {
1224            case ImageFormat.JPEG:
1225                return HAL_DATASPACE_JFIF;
1226            case ImageFormat.DEPTH_POINT_CLOUD:
1227            case ImageFormat.DEPTH16:
1228                return HAL_DATASPACE_DEPTH;
1229            default:
1230                return HAL_DATASPACE_UNKNOWN;
1231        }
1232    }
1233
1234    /**
1235     * Convert image formats from public to internal formats (in-place).
1236     *
1237     * @param formats an array of image formats
1238     * @return {@code formats}
1239     *
1240     * @see #imageFormatToInternal
1241     *
1242     * @hide
1243     */
1244    public static int[] imageFormatToInternal(int[] formats) {
1245        if (formats == null) {
1246            return null;
1247        }
1248
1249        for (int i = 0; i < formats.length; ++i) {
1250            formats[i] = imageFormatToInternal(formats[i]);
1251        }
1252
1253        return formats;
1254    }
1255
1256    private Size[] getPublicFormatSizes(int format, boolean output, boolean highRes) {
1257        try {
1258            checkArgumentFormatSupported(format, output);
1259        } catch (IllegalArgumentException e) {
1260            return null;
1261        }
1262
1263        int internalFormat = imageFormatToInternal(format);
1264        int dataspace = imageFormatToDataspace(format);
1265
1266        return getInternalFormatSizes(internalFormat, dataspace, output, highRes);
1267    }
1268
1269    private Size[] getInternalFormatSizes(int format, int dataspace,
1270            boolean output, boolean highRes) {
1271        SparseIntArray formatsMap =
1272                !output ? mInputFormats :
1273                dataspace == HAL_DATASPACE_DEPTH ? mDepthOutputFormats :
1274                highRes ? mHighResOutputFormats :
1275                mOutputFormats;
1276
1277        int sizesCount = formatsMap.get(format);
1278        if ( ((!output || dataspace == HAL_DATASPACE_DEPTH) && sizesCount == 0) ||
1279                (output && dataspace != HAL_DATASPACE_DEPTH && mAllOutputFormats.get(format) == 0)) {
1280            // Only throw if this is really not supported at all
1281            throw new IllegalArgumentException("format not available");
1282        }
1283
1284        Size[] sizes = new Size[sizesCount];
1285        int sizeIndex = 0;
1286
1287        StreamConfiguration[] configurations =
1288                (dataspace == HAL_DATASPACE_DEPTH) ? mDepthConfigurations : mConfigurations;
1289
1290        for (StreamConfiguration config : configurations) {
1291            int fmt = config.getFormat();
1292            if (fmt == format && config.isOutput() == output) {
1293                if (output) {
1294                    // Filter slow high-res output formats; include for
1295                    // highRes, remove for !highRes
1296                    long duration = 0;
1297                    for (int i = 0; i < mMinFrameDurations.length; i++) {
1298                        StreamConfigurationDuration d = mMinFrameDurations[i];
1299                        if (d.getFormat() == fmt &&
1300                                d.getWidth() == config.getSize().getWidth() &&
1301                                d.getHeight() == config.getSize().getHeight()) {
1302                            duration = d.getDuration();
1303                            break;
1304                        }
1305                    }
1306                    if (highRes != (duration > DURATION_20FPS_NS)) {
1307                        continue;
1308                    }
1309                }
1310                sizes[sizeIndex++] = config.getSize();
1311            }
1312        }
1313
1314        if (sizeIndex != sizesCount) {
1315            throw new AssertionError(
1316                    "Too few sizes (expected " + sizesCount + ", actual " + sizeIndex + ")");
1317        }
1318
1319        return sizes;
1320    }
1321
1322    /** Get the list of publically visible output formats; does not include IMPL_DEFINED */
1323    private int[] getPublicFormats(boolean output) {
1324        int[] formats = new int[getPublicFormatCount(output)];
1325
1326        int i = 0;
1327
1328        SparseIntArray map = getFormatsMap(output);
1329        for (int j = 0; j < map.size(); j++) {
1330            int format = map.keyAt(j);
1331            if (format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
1332                formats[i++] = imageFormatToPublic(format);
1333            }
1334        }
1335        if (output) {
1336            for (int j = 0; j < mDepthOutputFormats.size(); j++) {
1337                formats[i++] = depthFormatToPublic(mDepthOutputFormats.keyAt(j));
1338            }
1339        }
1340        if (formats.length != i) {
1341            throw new AssertionError("Too few formats " + i + ", expected " + formats.length);
1342        }
1343
1344        return formats;
1345    }
1346
1347    /** Get the format -> size count map for either output or input formats */
1348    private SparseIntArray getFormatsMap(boolean output) {
1349        return output ? mAllOutputFormats : mInputFormats;
1350    }
1351
1352    private long getInternalFormatDuration(int format, int dataspace, Size size, int duration) {
1353        // assume format is already checked, since its internal
1354
1355        if (!isSupportedInternalConfiguration(format, dataspace, size)) {
1356            throw new IllegalArgumentException("size was not supported");
1357        }
1358
1359        StreamConfigurationDuration[] durations = getDurations(duration, dataspace);
1360
1361        for (StreamConfigurationDuration configurationDuration : durations) {
1362            if (configurationDuration.getFormat() == format &&
1363                    configurationDuration.getWidth() == size.getWidth() &&
1364                    configurationDuration.getHeight() == size.getHeight()) {
1365                return configurationDuration.getDuration();
1366            }
1367        }
1368        // Default duration is '0' (unsupported/no extra stall)
1369        return 0;
1370    }
1371
1372    /**
1373     * Get the durations array for the kind of duration
1374     *
1375     * @see #DURATION_MIN_FRAME
1376     * @see #DURATION_STALL
1377     * */
1378    private StreamConfigurationDuration[] getDurations(int duration, int dataspace) {
1379        switch (duration) {
1380            case DURATION_MIN_FRAME:
1381                return (dataspace == HAL_DATASPACE_DEPTH) ?
1382                        mDepthMinFrameDurations : mMinFrameDurations;
1383            case DURATION_STALL:
1384                return (dataspace == HAL_DATASPACE_DEPTH) ?
1385                        mDepthStallDurations : mStallDurations;
1386            default:
1387                throw new IllegalArgumentException("duration was invalid");
1388        }
1389    }
1390
1391    /** Count the number of publicly-visible output formats */
1392    private int getPublicFormatCount(boolean output) {
1393        SparseIntArray formatsMap = getFormatsMap(output);
1394        int size = formatsMap.size();
1395        if (formatsMap.indexOfKey(HAL_PIXEL_FORMAT_RAW_OPAQUE) >= 0) {
1396            size -= 1;
1397        }
1398        if (output) {
1399            size += mDepthOutputFormats.size();
1400        }
1401
1402        return size;
1403    }
1404
1405    private static <T> boolean arrayContains(T[] array, T element) {
1406        if (array == null) {
1407            return false;
1408        }
1409
1410        for (T el : array) {
1411            if (Objects.equals(el, element)) {
1412                return true;
1413            }
1414        }
1415
1416        return false;
1417    }
1418
1419    private boolean isSupportedInternalConfiguration(int format, int dataspace,
1420            Size size) {
1421        StreamConfiguration[] configurations =
1422                (dataspace == HAL_DATASPACE_DEPTH) ? mDepthConfigurations : mConfigurations;
1423
1424        for (int i = 0; i < configurations.length; i++) {
1425            if (configurations[i].getFormat() == format &&
1426                    configurations[i].getSize().equals(size)) {
1427                return true;
1428            }
1429        }
1430
1431        return false;
1432    }
1433
1434    /**
1435     * Return this {@link StreamConfigurationMap} as a string representation.
1436     *
1437     * <p>{@code "StreamConfigurationMap(Outputs([w:%d, h:%d, format:%s(%d), min_duration:%d,
1438     * stall:%d], ... [w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d]), Inputs([w:%d, h:%d,
1439     * format:%s(%d)], ... [w:%d, h:%d, format:%s(%d)]), ValidOutputFormatsForInput(
1440     * [in:%d, out:%d, ... %d], ... [in:%d, out:%d, ... %d]), HighSpeedVideoConfigurations(
1441     * [w:%d, h:%d, min_fps:%d, max_fps:%d], ... [w:%d, h:%d, min_fps:%d, max_fps:%d]))"}.</p>
1442     *
1443     * <p>{@code Outputs([w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d], ...
1444     * [w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d])}, where
1445     * {@code [w:%d, h:%d, format:%s(%d), min_duration:%d, stall:%d]} represents an output
1446     * configuration's width, height, format, minimal frame duration in nanoseconds, and stall
1447     * duration in nanoseconds.</p>
1448     *
1449     * <p>{@code Inputs([w:%d, h:%d, format:%s(%d)], ... [w:%d, h:%d, format:%s(%d)])}, where
1450     * {@code [w:%d, h:%d, format:%s(%d)]} represents an input configuration's width, height, and
1451     * format.</p>
1452     *
1453     * <p>{@code ValidOutputFormatsForInput([in:%s(%d), out:%s(%d), ... %s(%d)],
1454     * ... [in:%s(%d), out:%s(%d), ... %s(%d)])}, where {@code [in:%s(%d), out:%s(%d), ... %s(%d)]}
1455     * represents an input fomat and its valid output formats.</p>
1456     *
1457     * <p>{@code HighSpeedVideoConfigurations([w:%d, h:%d, min_fps:%d, max_fps:%d],
1458     * ... [w:%d, h:%d, min_fps:%d, max_fps:%d])}, where
1459     * {@code [w:%d, h:%d, min_fps:%d, max_fps:%d]} represents a high speed video output
1460     * configuration's width, height, minimal frame rate, and maximal frame rate.</p>
1461     *
1462     * @return string representation of {@link StreamConfigurationMap}
1463     */
1464    @Override
1465    public String toString() {
1466        StringBuilder sb = new StringBuilder("StreamConfiguration(");
1467        appendOutputsString(sb);
1468        sb.append(", ");
1469        appendHighResOutputsString(sb);
1470        sb.append(", ");
1471        appendInputsString(sb);
1472        sb.append(", ");
1473        appendValidOutputFormatsForInputString(sb);
1474        sb.append(", ");
1475        appendHighSpeedVideoConfigurationsString(sb);
1476        sb.append(")");
1477
1478        return sb.toString();
1479    }
1480
1481    private void appendOutputsString(StringBuilder sb) {
1482        sb.append("Outputs(");
1483        int[] formats = getOutputFormats();
1484        for (int format : formats) {
1485            Size[] sizes = getOutputSizes(format);
1486            for (Size size : sizes) {
1487                long minFrameDuration = getOutputMinFrameDuration(format, size);
1488                long stallDuration = getOutputStallDuration(format, size);
1489                sb.append(String.format("[w:%d, h:%d, format:%s(%d), min_duration:%d, " +
1490                        "stall:%d], ", size.getWidth(), size.getHeight(), formatToString(format),
1491                        format, minFrameDuration, stallDuration));
1492            }
1493        }
1494        // Remove the pending ", "
1495        if (sb.charAt(sb.length() - 1) == ' ') {
1496            sb.delete(sb.length() - 2, sb.length());
1497        }
1498        sb.append(")");
1499    }
1500
1501    private void appendHighResOutputsString(StringBuilder sb) {
1502        sb.append("HighResolutionOutputs(");
1503        int[] formats = getOutputFormats();
1504        for (int format : formats) {
1505            Size[] sizes = getHighResolutionOutputSizes(format);
1506            if (sizes == null) continue;
1507            for (Size size : sizes) {
1508                long minFrameDuration = getOutputMinFrameDuration(format, size);
1509                long stallDuration = getOutputStallDuration(format, size);
1510                sb.append(String.format("[w:%d, h:%d, format:%s(%d), min_duration:%d, " +
1511                        "stall:%d], ", size.getWidth(), size.getHeight(), formatToString(format),
1512                        format, minFrameDuration, stallDuration));
1513            }
1514        }
1515        // Remove the pending ", "
1516        if (sb.charAt(sb.length() - 1) == ' ') {
1517            sb.delete(sb.length() - 2, sb.length());
1518        }
1519        sb.append(")");
1520    }
1521
1522    private void appendInputsString(StringBuilder sb) {
1523        sb.append("Inputs(");
1524        int[] formats = getInputFormats();
1525        for (int format : formats) {
1526            Size[] sizes = getInputSizes(format);
1527            for (Size size : sizes) {
1528                sb.append(String.format("[w:%d, h:%d, format:%s(%d)], ", size.getWidth(),
1529                        size.getHeight(), formatToString(format), format));
1530            }
1531        }
1532        // Remove the pending ", "
1533        if (sb.charAt(sb.length() - 1) == ' ') {
1534            sb.delete(sb.length() - 2, sb.length());
1535        }
1536        sb.append(")");
1537    }
1538
1539    private void appendValidOutputFormatsForInputString(StringBuilder sb) {
1540        sb.append("ValidOutputFormatsForInput(");
1541        int[] inputFormats = getInputFormats();
1542        for (int inputFormat : inputFormats) {
1543            sb.append(String.format("[in:%s(%d), out:", formatToString(inputFormat), inputFormat));
1544            int[] outputFormats = getValidOutputFormatsForInput(inputFormat);
1545            for (int i = 0; i < outputFormats.length; i++) {
1546                sb.append(String.format("%s(%d)", formatToString(outputFormats[i]),
1547                        outputFormats[i]));
1548                if (i < outputFormats.length - 1) {
1549                    sb.append(", ");
1550                }
1551            }
1552            sb.append("], ");
1553        }
1554        // Remove the pending ", "
1555        if (sb.charAt(sb.length() - 1) == ' ') {
1556            sb.delete(sb.length() - 2, sb.length());
1557        }
1558        sb.append(")");
1559    }
1560
1561    private void appendHighSpeedVideoConfigurationsString(StringBuilder sb) {
1562        sb.append("HighSpeedVideoConfigurations(");
1563        Size[] sizes = getHighSpeedVideoSizes();
1564        for (Size size : sizes) {
1565            Range<Integer>[] ranges = getHighSpeedVideoFpsRangesFor(size);
1566            for (Range<Integer> range : ranges) {
1567                sb.append(String.format("[w:%d, h:%d, min_fps:%d, max_fps:%d], ", size.getWidth(),
1568                        size.getHeight(), range.getLower(), range.getUpper()));
1569            }
1570        }
1571        // Remove the pending ", "
1572        if (sb.charAt(sb.length() - 1) == ' ') {
1573            sb.delete(sb.length() - 2, sb.length());
1574        }
1575        sb.append(")");
1576    }
1577
1578    private String formatToString(int format) {
1579        switch (format) {
1580            case ImageFormat.YV12:
1581                return "YV12";
1582            case ImageFormat.YUV_420_888:
1583                return "YUV_420_888";
1584            case ImageFormat.NV21:
1585                return "NV21";
1586            case ImageFormat.NV16:
1587                return "NV16";
1588            case PixelFormat.RGB_565:
1589                return "RGB_565";
1590            case PixelFormat.RGBA_8888:
1591                return "RGBA_8888";
1592            case PixelFormat.RGBX_8888:
1593                return "RGBX_8888";
1594            case PixelFormat.RGB_888:
1595                return "RGB_888";
1596            case ImageFormat.JPEG:
1597                return "JPEG";
1598            case ImageFormat.YUY2:
1599                return "YUY2";
1600            case ImageFormat.Y8:
1601                return "Y8";
1602            case ImageFormat.Y16:
1603                return "Y16";
1604            case ImageFormat.RAW_SENSOR:
1605                return "RAW_SENSOR";
1606            case ImageFormat.RAW10:
1607                return "RAW10";
1608            case ImageFormat.DEPTH16:
1609                return "DEPTH16";
1610            case ImageFormat.DEPTH_POINT_CLOUD:
1611                return "DEPTH_POINT_CLOUD";
1612            case ImageFormat.PRIVATE:
1613                return "PRIVATE";
1614            default:
1615                return "UNKNOWN";
1616        }
1617    }
1618
1619    // from system/core/include/system/graphics.h
1620    private static final int HAL_PIXEL_FORMAT_RAW16 = 0x20;
1621    private static final int HAL_PIXEL_FORMAT_BLOB = 0x21;
1622    private static final int HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED = 0x22;
1623    private static final int HAL_PIXEL_FORMAT_YCbCr_420_888 = 0x23;
1624    private static final int HAL_PIXEL_FORMAT_RAW_OPAQUE = 0x24;
1625    private static final int HAL_PIXEL_FORMAT_RAW10 = 0x25;
1626    private static final int HAL_PIXEL_FORMAT_RAW12 = 0x26;
1627    private static final int HAL_PIXEL_FORMAT_Y16 = 0x20363159;
1628
1629
1630    private static final int HAL_DATASPACE_UNKNOWN = 0x0;
1631    private static final int HAL_DATASPACE_JFIF = 0x101;
1632    private static final int HAL_DATASPACE_DEPTH = 0x1000;
1633
1634    private static final long DURATION_20FPS_NS = 50000000L;
1635    /**
1636     * @see #getDurations(int, int)
1637     */
1638    private static final int DURATION_MIN_FRAME = 0;
1639    private static final int DURATION_STALL = 1;
1640
1641    private final StreamConfiguration[] mConfigurations;
1642    private final StreamConfigurationDuration[] mMinFrameDurations;
1643    private final StreamConfigurationDuration[] mStallDurations;
1644
1645    private final StreamConfiguration[] mDepthConfigurations;
1646    private final StreamConfigurationDuration[] mDepthMinFrameDurations;
1647    private final StreamConfigurationDuration[] mDepthStallDurations;
1648
1649    private final HighSpeedVideoConfiguration[] mHighSpeedVideoConfigurations;
1650    private final ReprocessFormatsMap mInputOutputFormatsMap;
1651
1652    private final boolean mListHighResolution;
1653
1654    /** internal format -> num output sizes mapping, not including slow high-res sizes, for
1655     * non-depth dataspaces */
1656    private final SparseIntArray mOutputFormats = new SparseIntArray();
1657    /** internal format -> num output sizes mapping for slow high-res sizes, for non-depth
1658     * dataspaces */
1659    private final SparseIntArray mHighResOutputFormats = new SparseIntArray();
1660    /** internal format -> num output sizes mapping for all non-depth dataspaces */
1661    private final SparseIntArray mAllOutputFormats = new SparseIntArray();
1662    /** internal format -> num input sizes mapping, for input reprocessing formats */
1663    private final SparseIntArray mInputFormats = new SparseIntArray();
1664    /** internal format -> num depth output sizes mapping, for HAL_DATASPACE_DEPTH */
1665    private final SparseIntArray mDepthOutputFormats = new SparseIntArray();
1666    /** High speed video Size -> FPS range count mapping*/
1667    private final HashMap</*HighSpeedVideoSize*/Size, /*Count*/Integer> mHighSpeedVideoSizeMap =
1668            new HashMap<Size, Integer>();
1669    /** High speed video FPS range -> Size count mapping*/
1670    private final HashMap</*HighSpeedVideoFpsRange*/Range<Integer>, /*Count*/Integer>
1671            mHighSpeedVideoFpsRangeMap = new HashMap<Range<Integer>, Integer>();
1672
1673}
1674