StreamConfigurationMap.java revision 456432ead7e262f72565d02ac46fd5e498844b92
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.hardware.camera2.params;
18
19import android.graphics.ImageFormat;
20import android.graphics.PixelFormat;
21import android.hardware.camera2.CameraCharacteristics;
22import android.hardware.camera2.CameraDevice;
23import android.hardware.camera2.CaptureRequest;
24import android.hardware.camera2.utils.HashCodeHelpers;
25import android.hardware.camera2.legacy.LegacyCameraDevice;
26import android.hardware.camera2.legacy.LegacyMetadataMapper;
27import android.hardware.camera2.legacy.LegacyExceptionUtils.BufferQueueAbandonedException;
28import android.view.Surface;
29import android.util.Range;
30import android.util.Size;
31
32import java.util.Arrays;
33import java.util.HashMap;
34import java.util.Objects;
35import java.util.Set;
36
37import static com.android.internal.util.Preconditions.*;
38
39/**
40 * Immutable class to store the available stream
41 * {@link CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP configurations} to set up
42 * {@link android.view.Surface Surfaces} for creating a
43 * {@link android.hardware.camera2.CameraCaptureSession capture session} with
44 * {@link android.hardware.camera2.CameraDevice#createCaptureSession}.
45 * <!-- TODO: link to input stream configuration -->
46 *
47 * <p>This is the authoritative list for all <!-- input/ -->output formats (and sizes respectively
48 * for that format) that are supported by a camera device.</p>
49 *
50 * <p>This also contains the minimum frame durations and stall durations for each format/size
51 * combination that can be used to calculate effective frame rate when submitting multiple captures.
52 * </p>
53 *
54 * <p>An instance of this object is available from {@link CameraCharacteristics} using
55 * the {@link CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP} key and the
56 * {@link CameraCharacteristics#get} method.</p>
57 *
58 * <pre><code>{@code
59 * CameraCharacteristics characteristics = cameraManager.getCameraCharacteristics(cameraId);
60 * StreamConfigurationMap configs = characteristics.get(
61 *         CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);
62 * }</code></pre>
63 *
64 * @see CameraCharacteristics#SCALER_STREAM_CONFIGURATION_MAP
65 * @see CameraDevice#createCaptureSession
66 */
67public final class StreamConfigurationMap {
68
69    private static final String TAG = "StreamConfigurationMap";
70
71    /**
72     * Create a new {@link StreamConfigurationMap}.
73     *
74     * <p>The array parameters ownership is passed to this object after creation; do not
75     * write to them after this constructor is invoked.</p>
76     *
77     * @param configurations a non-{@code null} array of {@link StreamConfiguration}
78     * @param minFrameDurations a non-{@code null} array of {@link StreamConfigurationDuration}
79     * @param stallDurations a non-{@code null} array of {@link StreamConfigurationDuration}
80     * @param highSpeedVideoConfigurations an array of {@link HighSpeedVideoConfiguration}, null if
81     *        camera device does not support high speed video recording
82     *
83     * @throws NullPointerException if any of the arguments except highSpeedVideoConfigurations
84     *         were {@code null} or any subelements were {@code null}
85     *
86     * @hide
87     */
88    public StreamConfigurationMap(
89            StreamConfiguration[] configurations,
90            StreamConfigurationDuration[] minFrameDurations,
91            StreamConfigurationDuration[] stallDurations,
92            StreamConfiguration[] depthConfigurations,
93            StreamConfigurationDuration[] depthMinFrameDurations,
94            StreamConfigurationDuration[] depthStallDurations,
95            HighSpeedVideoConfiguration[] highSpeedVideoConfigurations) {
96
97        mConfigurations = checkArrayElementsNotNull(configurations, "configurations");
98        mMinFrameDurations = checkArrayElementsNotNull(minFrameDurations, "minFrameDurations");
99        mStallDurations = checkArrayElementsNotNull(stallDurations, "stallDurations");
100
101        if (depthConfigurations == null) {
102            mDepthConfigurations = new StreamConfiguration[0];
103            mDepthMinFrameDurations = new StreamConfigurationDuration[0];
104            mDepthStallDurations = new StreamConfigurationDuration[0];
105        } else {
106            mDepthConfigurations = checkArrayElementsNotNull(depthConfigurations,
107                    "depthConfigurations");
108            mDepthMinFrameDurations = checkArrayElementsNotNull(depthMinFrameDurations,
109                    "depthMinFrameDurations");
110            mDepthStallDurations = checkArrayElementsNotNull(depthStallDurations,
111                    "depthStallDurations");
112        }
113
114        if (highSpeedVideoConfigurations == null) {
115            mHighSpeedVideoConfigurations = new HighSpeedVideoConfiguration[0];
116        } else {
117            mHighSpeedVideoConfigurations = checkArrayElementsNotNull(
118                    highSpeedVideoConfigurations, "highSpeedVideoConfigurations");
119        }
120
121        // For each format, track how many sizes there are available to configure
122        for (StreamConfiguration config : configurations) {
123            HashMap<Integer, Integer> map = config.isOutput() ? mOutputFormats : mInputFormats;
124
125            Integer count = map.get(config.getFormat());
126
127            if (count == null) {
128                count = 0;
129            }
130
131            map.put(config.getFormat(), count + 1);
132        }
133
134        // For each depth format, track how many sizes there are available to configure
135        for (StreamConfiguration config : mDepthConfigurations) {
136            if (!config.isOutput()) {
137                // Ignoring input depth configs
138                continue;
139            }
140
141            Integer count = mDepthOutputFormats.get(config.getFormat());
142
143            if (count == null) {
144                count = 0;
145            }
146
147            mDepthOutputFormats.put(config.getFormat(), count + 1);
148        }
149
150        if (!mOutputFormats.containsKey(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED)) {
151            throw new AssertionError(
152                    "At least one stream configuration for IMPLEMENTATION_DEFINED must exist");
153        }
154
155        // For each Size/FPS range, track how many FPS range/Size there are available
156        for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) {
157            Size size = config.getSize();
158            Range<Integer> fpsRange = config.getFpsRange();
159            Integer fpsRangeCount = mHighSpeedVideoSizeMap.get(size);
160            if (fpsRangeCount == null) {
161                fpsRangeCount = 0;
162            }
163            mHighSpeedVideoSizeMap.put(size, fpsRangeCount + 1);
164            Integer sizeCount = mHighSpeedVideoFpsRangeMap.get(fpsRange);
165            if (sizeCount == null) {
166                sizeCount = 0;
167            }
168            mHighSpeedVideoFpsRangeMap.put(fpsRange, sizeCount + 1);
169        }
170    }
171
172    /**
173     * Get the image {@code format} output formats in this stream configuration.
174     *
175     * <p>All image formats returned by this function will be defined in either {@link ImageFormat}
176     * or in {@link PixelFormat} (and there is no possibility of collision).</p>
177     *
178     * <p>Formats listed in this array are guaranteed to return true if queried with
179     * {@link #isOutputSupportedFor(int)}.</p>
180     *
181     * @return an array of integer format
182     *
183     * @see ImageFormat
184     * @see PixelFormat
185     */
186    public final int[] getOutputFormats() {
187        return getPublicFormats(/*output*/true);
188    }
189
190    /**
191     * Get the image {@code format} input formats in this stream configuration.
192     *
193     * <p>All image formats returned by this function will be defined in either {@link ImageFormat}
194     * or in {@link PixelFormat} (and there is no possibility of collision).</p>
195     *
196     * @return an array of integer format
197     *
198     * @see ImageFormat
199     * @see PixelFormat
200     *
201     * @hide
202     */
203    public final int[] getInputFormats() {
204        return getPublicFormats(/*output*/false);
205    }
206
207    /**
208     * Get the supported input sizes for this input format.
209     *
210     * <p>The format must have come from {@link #getInputFormats}; otherwise
211     * {@code null} is returned.</p>
212     *
213     * @param format a format from {@link #getInputFormats}
214     * @return a non-empty array of sizes, or {@code null} if the format was not available.
215     *
216     * @hide
217     */
218    public Size[] getInputSizes(final int format) {
219        return getPublicFormatSizes(format, /*output*/false);
220    }
221
222    /**
223     * Determine whether or not output surfaces with a particular user-defined format can be passed
224     * {@link CameraDevice#createCaptureSession createCaptureSession}.
225     *
226     * <p>This method determines that the output {@code format} is supported by the camera device;
227     * each output {@code surface} target may or may not itself support that {@code format}.
228     * Refer to the class which provides the surface for additional documentation.</p>
229     *
230     * <p>Formats for which this returns {@code true} are guaranteed to exist in the result
231     * returned by {@link #getOutputSizes}.</p>
232     *
233     * @param format an image format from either {@link ImageFormat} or {@link PixelFormat}
234     * @return
235     *          {@code true} iff using a {@code surface} with this {@code format} will be
236     *          supported with {@link CameraDevice#createCaptureSession}
237     *
238     * @throws IllegalArgumentException
239     *          if the image format was not a defined named constant
240     *          from either {@link ImageFormat} or {@link PixelFormat}
241     *
242     * @see ImageFormat
243     * @see PixelFormat
244     * @see CameraDevice#createCaptureSession
245     */
246    public boolean isOutputSupportedFor(int format) {
247        checkArgumentFormat(format);
248
249        int internalFormat = imageFormatToInternal(format);
250        int dataspace = imageFormatToDataspace(format);
251        if (dataspace == HAL_DATASPACE_DEPTH) {
252            return mDepthOutputFormats.containsKey(internalFormat);
253        } else {
254            return getFormatsMap(/*output*/true).containsKey(internalFormat);
255        }
256    }
257
258    /**
259     * Determine whether or not output streams can be configured with a particular class
260     * as a consumer.
261     *
262     * <p>The following list is generally usable for outputs:
263     * <ul>
264     * <li>{@link android.media.ImageReader} -
265     * Recommended for image processing or streaming to external resources (such as a file or
266     * network)
267     * <li>{@link android.media.MediaRecorder} -
268     * Recommended for recording video (simple to use)
269     * <li>{@link android.media.MediaCodec} -
270     * Recommended for recording video (more complicated to use, with more flexibility)
271     * <li>{@link android.renderscript.Allocation} -
272     * Recommended for image processing with {@link android.renderscript RenderScript}
273     * <li>{@link android.view.SurfaceHolder} -
274     * Recommended for low-power camera preview with {@link android.view.SurfaceView}
275     * <li>{@link android.graphics.SurfaceTexture} -
276     * Recommended for OpenGL-accelerated preview processing or compositing with
277     * {@link android.view.TextureView}
278     * </ul>
279     * </p>
280     *
281     * <p>Generally speaking this means that creating a {@link Surface} from that class <i>may</i>
282     * provide a producer endpoint that is suitable to be used with
283     * {@link CameraDevice#createCaptureSession}.</p>
284     *
285     * <p>Since not all of the above classes support output of all format and size combinations,
286     * the particular combination should be queried with {@link #isOutputSupportedFor(Surface)}.</p>
287     *
288     * @param klass a non-{@code null} {@link Class} object reference
289     * @return {@code true} if this class is supported as an output, {@code false} otherwise
290     *
291     * @throws NullPointerException if {@code klass} was {@code null}
292     *
293     * @see CameraDevice#createCaptureSession
294     * @see #isOutputSupportedFor(Surface)
295     */
296    public static <T> boolean isOutputSupportedFor(Class<T> klass) {
297        checkNotNull(klass, "klass must not be null");
298
299        if (klass == android.media.ImageReader.class) {
300            return true;
301        } else if (klass == android.media.MediaRecorder.class) {
302            return true;
303        } else if (klass == android.media.MediaCodec.class) {
304            return true;
305        } else if (klass == android.renderscript.Allocation.class) {
306            return true;
307        } else if (klass == android.view.SurfaceHolder.class) {
308            return true;
309        } else if (klass == android.graphics.SurfaceTexture.class) {
310            return true;
311        }
312
313        return false;
314    }
315
316    /**
317     * Determine whether or not the {@code surface} in its current state is suitable to be included
318     * in a {@link CameraDevice#createCaptureSession capture session} as an output.
319     *
320     * <p>Not all surfaces are usable with the {@link CameraDevice}, and not all configurations
321     * of that {@code surface} are compatible. Some classes that provide the {@code surface} are
322     * compatible with the {@link CameraDevice} in general
323     * (see {@link #isOutputSupportedFor(Class)}, but it is the caller's responsibility to put the
324     * {@code surface} into a state that will be compatible with the {@link CameraDevice}.</p>
325     *
326     * <p>Reasons for a {@code surface} being specifically incompatible might be:
327     * <ul>
328     * <li>Using a format that's not listed by {@link #getOutputFormats}
329     * <li>Using a format/size combination that's not listed by {@link #getOutputSizes}
330     * <li>The {@code surface} itself is not in a state where it can service a new producer.</p>
331     * </li>
332     * </ul>
333     *
334     * <p>Surfaces from flexible sources will return true even if the exact size of the Surface does
335     * not match a camera-supported size, as long as the format (or class) is supported and the
336     * camera device supports a size that is equal to or less than 1080p in that format. If such as
337     * Surface is used to create a capture session, it will have its size rounded to the nearest
338     * supported size, below or equal to 1080p. Flexible sources include SurfaceView, SurfaceTexture,
339     * and ImageReader.</p>
340     *
341     * <p>This is not an exhaustive list; see the particular class's documentation for further
342     * possible reasons of incompatibility.</p>
343     *
344     * @param surface a non-{@code null} {@link Surface} object reference
345     * @return {@code true} if this is supported, {@code false} otherwise
346     *
347     * @throws NullPointerException if {@code surface} was {@code null}
348     * @throws IllegalArgumentException if the Surface endpoint is no longer valid
349     *
350     * @see CameraDevice#createCaptureSession
351     * @see #isOutputSupportedFor(Class)
352     */
353    public boolean isOutputSupportedFor(Surface surface) {
354        checkNotNull(surface, "surface must not be null");
355
356        Size surfaceSize;
357        int surfaceFormat = -1;
358        try {
359            surfaceSize = LegacyCameraDevice.getSurfaceSize(surface);
360            surfaceFormat = LegacyCameraDevice.detectSurfaceType(surface);
361        } catch(BufferQueueAbandonedException e) {
362            throw new IllegalArgumentException("Abandoned surface", e);
363        }
364
365        // See if consumer is flexible.
366        boolean isFlexible = LegacyCameraDevice.isFlexibleConsumer(surface);
367
368        // Override RGB formats to IMPLEMENTATION_DEFINED, b/9487482
369        if ((surfaceFormat >= LegacyMetadataMapper.HAL_PIXEL_FORMAT_RGBA_8888 &&
370                        surfaceFormat <= LegacyMetadataMapper.HAL_PIXEL_FORMAT_BGRA_8888)) {
371            surfaceFormat = LegacyMetadataMapper.HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
372        }
373
374        for (StreamConfiguration config : mConfigurations) {
375            if (config.getFormat() == surfaceFormat && config.isOutput()) {
376                // Mathing format, either need exact size match, or a flexible consumer
377                // and a size no bigger than MAX_DIMEN_FOR_ROUNDING
378                if (config.getSize().equals(surfaceSize)) {
379                    return true;
380                } else if (isFlexible &&
381                        (config.getSize().getWidth() <= LegacyCameraDevice.MAX_DIMEN_FOR_ROUNDING)) {
382                    return true;
383                }
384            }
385        }
386        return false;
387    }
388
389    /**
390     * Get a list of sizes compatible with {@code klass} to use as an output.
391     *
392     * <p>Since some of the supported classes may support additional formats beyond
393     * an opaque/implementation-defined (under-the-hood) format; this function only returns
394     * sizes for the implementation-defined format.</p>
395     *
396     * <p>Some classes such as {@link android.media.ImageReader} may only support user-defined
397     * formats; in particular {@link #isOutputSupportedFor(Class)} will return {@code true} for
398     * that class and this method will return an empty array (but not {@code null}).</p>
399     *
400     * <p>If a well-defined format such as {@code NV21} is required, use
401     * {@link #getOutputSizes(int)} instead.</p>
402     *
403     * <p>The {@code klass} should be a supported output, that querying
404     * {@code #isOutputSupportedFor(Class)} should return {@code true}.</p>
405     *
406     * @param klass
407     *          a non-{@code null} {@link Class} object reference
408     * @return
409     *          an array of supported sizes for implementation-defined formats,
410     *          or {@code null} iff the {@code klass} is not a supported output
411     *
412     * @throws NullPointerException if {@code klass} was {@code null}
413     *
414     * @see #isOutputSupportedFor(Class)
415     */
416    public <T> Size[] getOutputSizes(Class<T> klass) {
417        // Image reader is "supported", but never for implementation-defined formats; return empty
418        if (android.media.ImageReader.class.isAssignableFrom(klass)) {
419            return new Size[0];
420        }
421
422        if (isOutputSupportedFor(klass) == false) {
423            return null;
424        }
425
426        return getInternalFormatSizes(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
427                HAL_DATASPACE_UNKNOWN,/*output*/true);
428    }
429
430    /**
431     * Get a list of sizes compatible with the requested image {@code format}.
432     *
433     * <p>The {@code format} should be a supported format (one of the formats returned by
434     * {@link #getOutputFormats}).</p>
435     *
436     * @param format an image format from {@link ImageFormat} or {@link PixelFormat}
437     * @return
438     *          an array of supported sizes,
439     *          or {@code null} if the {@code format} is not a supported output
440     *
441     * @see ImageFormat
442     * @see PixelFormat
443     * @see #getOutputFormats
444     */
445    public Size[] getOutputSizes(int format) {
446        return getPublicFormatSizes(format, /*output*/true);
447    }
448
449    /**
450     * Get a list of supported high speed video recording sizes.
451     *
452     * <p> When HIGH_SPEED_VIDEO is supported in
453     * {@link CameraCharacteristics#CONTROL_AVAILABLE_SCENE_MODES available scene modes}, this
454     * method will list the supported high speed video size configurations. All the sizes listed
455     * will be a subset of the sizes reported by {@link #getOutputSizes} for processed non-stalling
456     * formats (typically ImageFormat#YUV_420_888, ImageFormat#NV21, ImageFormat#YV12)</p>
457     *
458     * <p> To enable high speed video recording, application must set
459     * {@link CaptureRequest#CONTROL_SCENE_MODE} to
460     * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture
461     * requests and select the video size from this method and
462     * {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS range} from
463     * {@link #getHighSpeedVideoFpsRangesFor} to configure the recording and preview streams and
464     * setup the recording requests. For example, if the application intends to do high speed
465     * recording, it can select the maximum size reported by this method to configure output
466     * streams. Note that for the use case of multiple output streams, application must select one
467     * unique size from this method to use. Otherwise a request error might occur. Once the size is
468     * selected, application can get the supported FPS ranges by
469     * {@link #getHighSpeedVideoFpsRangesFor}, and use these FPS ranges to setup the recording
470     * requests.</p>
471     *
472     * @return
473     *          an array of supported high speed video recording sizes
474     *
475     * @see #getHighSpeedVideoFpsRangesFor(Size)
476     */
477    public Size[] getHighSpeedVideoSizes() {
478        Set<Size> keySet = mHighSpeedVideoSizeMap.keySet();
479        return keySet.toArray(new Size[keySet.size()]);
480    }
481
482    /**
483     * Get the frame per second ranges (fpsMin, fpsMax) for input high speed video size.
484     *
485     * <p> See {@link #getHighSpeedVideoSizes} for how to enable high speed recording.</p>
486     *
487     * <p> For normal video recording use case, where some application will NOT set
488     * {@link CaptureRequest#CONTROL_SCENE_MODE} to
489     * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture
490     * requests, the {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS ranges} reported in
491     * this method must not be used to setup capture requests, or it will cause request error.</p>
492     *
493     * @param size one of the sizes returned by {@link #getHighSpeedVideoSizes()}
494     * @return
495     *          An array of FPS range to use with
496     *          {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE TARGET_FPS_RANGE} when using
497     *          {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} scene
498     *          mode.
499     *          The upper bound of returned ranges is guaranteed to be larger or equal to 60.
500     *
501     * @throws IllegalArgumentException if input size does not exist in the return value of
502     *         getHighSpeedVideoSizes
503     * @see #getHighSpeedVideoSizes()
504     */
505    public Range<Integer>[] getHighSpeedVideoFpsRangesFor(Size size) {
506        Integer fpsRangeCount = mHighSpeedVideoSizeMap.get(size);
507        if (fpsRangeCount == null || fpsRangeCount == 0) {
508            throw new IllegalArgumentException(String.format(
509                    "Size %s does not support high speed video recording", size));
510        }
511
512        @SuppressWarnings("unchecked")
513        Range<Integer>[] fpsRanges = new Range[fpsRangeCount];
514        int i = 0;
515        for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) {
516            if (size.equals(config.getSize())) {
517                fpsRanges[i++] = config.getFpsRange();
518            }
519        }
520        return fpsRanges;
521    }
522
523    /**
524     * Get a list of supported high speed video recording FPS ranges.
525     *
526     * <p> When HIGH_SPEED_VIDEO is supported in
527     * {@link CameraCharacteristics#CONTROL_AVAILABLE_SCENE_MODES available scene modes}, this
528     * method will list the supported high speed video FPS range configurations. Application can
529     * then use {@link #getHighSpeedVideoSizesFor} to query available sizes for one of returned
530     * FPS range.</p>
531     *
532     * <p> To enable high speed video recording, application must set
533     * {@link CaptureRequest#CONTROL_SCENE_MODE} to
534     * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture
535     * requests and select the video size from {@link #getHighSpeedVideoSizesFor} and
536     * {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS range} from
537     * this method to configure the recording and preview streams and setup the recording requests.
538     * For example, if the application intends to do high speed recording, it can select one FPS
539     * range reported by this method, query the video sizes corresponding to this FPS range  by
540     * {@link #getHighSpeedVideoSizesFor} and select one of reported sizes to configure output
541     * streams. Note that for the use case of multiple output streams, application must select one
542     * unique size from {@link #getHighSpeedVideoSizesFor}, and use it for all output streams.
543     * Otherwise a request error might occur when attempting to enable
544     * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO}.
545     * Once the stream is configured, application can set the FPS range in the recording requests.
546     * </p>
547     *
548     * @return
549     *          an array of supported high speed video recording FPS ranges
550     *          The upper bound of returned ranges is guaranteed to be larger or equal to 60.
551     *
552     * @see #getHighSpeedVideoSizesFor
553     */
554    @SuppressWarnings("unchecked")
555    public Range<Integer>[] getHighSpeedVideoFpsRanges() {
556        Set<Range<Integer>> keySet = mHighSpeedVideoFpsRangeMap.keySet();
557        return keySet.toArray(new Range[keySet.size()]);
558    }
559
560    /**
561     * Get the supported video sizes for input FPS range.
562     *
563     * <p> See {@link #getHighSpeedVideoFpsRanges} for how to enable high speed recording.</p>
564     *
565     * <p> For normal video recording use case, where the application will NOT set
566     * {@link CaptureRequest#CONTROL_SCENE_MODE} to
567     * {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} in capture
568     * requests, the {@link CaptureRequest#CONTROL_AE_TARGET_FPS_RANGE FPS ranges} reported in
569     * this method must not be used to setup capture requests, or it will cause request error.</p>
570     *
571     * @param fpsRange one of the FPS range returned by {@link #getHighSpeedVideoFpsRanges()}
572     * @return
573     *          An array of video sizes to configure output stream when using
574     *          {@link CaptureRequest#CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO HIGH_SPEED_VIDEO} scene
575     *          mode.
576     *
577     * @throws IllegalArgumentException if input FPS range does not exist in the return value of
578     *         getHighSpeedVideoFpsRanges
579     * @see #getHighSpeedVideoFpsRanges()
580     */
581    public Size[] getHighSpeedVideoSizesFor(Range<Integer> fpsRange) {
582        Integer sizeCount = mHighSpeedVideoFpsRangeMap.get(fpsRange);
583        if (sizeCount == null || sizeCount == 0) {
584            throw new IllegalArgumentException(String.format(
585                    "FpsRange %s does not support high speed video recording", fpsRange));
586        }
587
588        Size[] sizes = new Size[sizeCount];
589        int i = 0;
590        for (HighSpeedVideoConfiguration config : mHighSpeedVideoConfigurations) {
591            if (fpsRange.equals(config.getFpsRange())) {
592                sizes[i++] = config.getSize();
593            }
594        }
595        return sizes;
596    }
597
598    /**
599     * Get the minimum {@link CaptureRequest#SENSOR_FRAME_DURATION frame duration}
600     * for the format/size combination (in nanoseconds).
601     *
602     * <p>{@code format} should be one of the ones returned by {@link #getOutputFormats()}.</p>
603     * <p>{@code size} should be one of the ones returned by
604     * {@link #getOutputSizes(int)}.</p>
605     *
606     * <p>This should correspond to the frame duration when only that stream is active, with all
607     * processing (typically in {@code android.*.mode}) set to either {@code OFF} or {@code FAST}.
608     * </p>
609     *
610     * <p>When multiple streams are used in a request, the minimum frame duration will be
611     * {@code max(individual stream min durations)}.</p>
612     *
613     * <p>For devices that do not support manual sensor control
614     * ({@link android.hardware.camera2.CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR}),
615     * this function may return 0.</p>
616     *
617     * <!--
618     * TODO: uncomment after adding input stream support
619     * <p>The minimum frame duration of a stream (of a particular format, size) is the same
620     * regardless of whether the stream is input or output.</p>
621     * -->
622     *
623     * @param format an image format from {@link ImageFormat} or {@link PixelFormat}
624     * @param size an output-compatible size
625     * @return a minimum frame duration {@code >} 0 in nanoseconds, or
626     *          0 if the minimum frame duration is not available.
627     *
628     * @throws IllegalArgumentException if {@code format} or {@code size} was not supported
629     * @throws NullPointerException if {@code size} was {@code null}
630     *
631     * @see CaptureRequest#SENSOR_FRAME_DURATION
632     * @see #getOutputStallDuration(int, Size)
633     * @see ImageFormat
634     * @see PixelFormat
635     */
636    public long getOutputMinFrameDuration(int format, Size size) {
637        checkNotNull(size, "size must not be null");
638        checkArgumentFormatSupported(format, /*output*/true);
639
640        return getInternalFormatDuration(imageFormatToInternal(format),
641                imageFormatToDataspace(format),
642                size,
643                DURATION_MIN_FRAME);
644    }
645
646    /**
647     * Get the minimum {@link CaptureRequest#SENSOR_FRAME_DURATION frame duration}
648     * for the class/size combination (in nanoseconds).
649     *
650     * <p>This assumes a the {@code klass} is set up to use an implementation-defined format.
651     * For user-defined formats, use {@link #getOutputMinFrameDuration(int, Size)}.</p>
652     *
653     * <p>{@code klass} should be one of the ones which is supported by
654     * {@link #isOutputSupportedFor(Class)}.</p>
655     *
656     * <p>{@code size} should be one of the ones returned by
657     * {@link #getOutputSizes(int)}.</p>
658     *
659     * <p>This should correspond to the frame duration when only that stream is active, with all
660     * processing (typically in {@code android.*.mode}) set to either {@code OFF} or {@code FAST}.
661     * </p>
662     *
663     * <p>When multiple streams are used in a request, the minimum frame duration will be
664     * {@code max(individual stream min durations)}.</p>
665     *
666     * <p>For devices that do not support manual sensor control
667     * ({@link android.hardware.camera2.CameraMetadata#REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR}),
668     * this function may return 0.</p>
669     *
670     * <!--
671     * TODO: uncomment after adding input stream support
672     * <p>The minimum frame duration of a stream (of a particular format, size) is the same
673     * regardless of whether the stream is input or output.</p>
674     * -->
675     *
676     * @param klass
677     *          a class which is supported by {@link #isOutputSupportedFor(Class)} and has a
678     *          non-empty array returned by {@link #getOutputSizes(Class)}
679     * @param size an output-compatible size
680     * @return a minimum frame duration {@code >} 0 in nanoseconds, or
681     *          0 if the minimum frame duration is not available.
682     *
683     * @throws IllegalArgumentException if {@code klass} or {@code size} was not supported
684     * @throws NullPointerException if {@code size} or {@code klass} was {@code null}
685     *
686     * @see CaptureRequest#SENSOR_FRAME_DURATION
687     * @see ImageFormat
688     * @see PixelFormat
689     */
690    public <T> long getOutputMinFrameDuration(final Class<T> klass, final Size size) {
691        if (!isOutputSupportedFor(klass)) {
692            throw new IllegalArgumentException("klass was not supported");
693        }
694
695        return getInternalFormatDuration(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
696                HAL_DATASPACE_UNKNOWN,
697                size, DURATION_MIN_FRAME);
698    }
699
700    /**
701     * Get the stall duration for the format/size combination (in nanoseconds).
702     *
703     * <p>{@code format} should be one of the ones returned by {@link #getOutputFormats()}.</p>
704     * <p>{@code size} should be one of the ones returned by
705     * {@link #getOutputSizes(int)}.</p>
706     *
707     * <p>
708     * A stall duration is how much extra time would get added to the normal minimum frame duration
709     * for a repeating request that has streams with non-zero stall.
710     *
711     * <p>For example, consider JPEG captures which have the following characteristics:
712     *
713     * <ul>
714     * <li>JPEG streams act like processed YUV streams in requests for which they are not included;
715     * in requests in which they are directly referenced, they act as JPEG streams.
716     * This is because supporting a JPEG stream requires the underlying YUV data to always be ready
717     * for use by a JPEG encoder, but the encoder will only be used (and impact frame duration) on
718     * requests that actually reference a JPEG stream.
719     * <li>The JPEG processor can run concurrently to the rest of the camera pipeline, but cannot
720     * process more than 1 capture at a time.
721     * </ul>
722     *
723     * <p>In other words, using a repeating YUV request would result in a steady frame rate
724     * (let's say it's 30 FPS). If a single JPEG request is submitted periodically,
725     * the frame rate will stay at 30 FPS (as long as we wait for the previous JPEG to return each
726     * time). If we try to submit a repeating YUV + JPEG request, then the frame rate will drop from
727     * 30 FPS.</p>
728     *
729     * <p>In general, submitting a new request with a non-0 stall time stream will <em>not</em> cause a
730     * frame rate drop unless there are still outstanding buffers for that stream from previous
731     * requests.</p>
732     *
733     * <p>Submitting a repeating request with streams (call this {@code S}) is the same as setting
734     * the minimum frame duration from the normal minimum frame duration corresponding to {@code S},
735     * added with the maximum stall duration for {@code S}.</p>
736     *
737     * <p>If interleaving requests with and without a stall duration, a request will stall by the
738     * maximum of the remaining times for each can-stall stream with outstanding buffers.</p>
739     *
740     * <p>This means that a stalling request will not have an exposure start until the stall has
741     * completed.</p>
742     *
743     * <p>This should correspond to the stall duration when only that stream is active, with all
744     * processing (typically in {@code android.*.mode}) set to {@code FAST} or {@code OFF}.
745     * Setting any of the processing modes to {@code HIGH_QUALITY} effectively results in an
746     * indeterminate stall duration for all streams in a request (the regular stall calculation
747     * rules are ignored).</p>
748     *
749     * <p>The following formats may always have a stall duration:
750     * <ul>
751     * <li>{@link ImageFormat#JPEG JPEG}
752     * <li>{@link ImageFormat#RAW_SENSOR RAW16}
753     * </ul>
754     * </p>
755     *
756     * <p>The following formats will never have a stall duration:
757     * <ul>
758     * <li>{@link ImageFormat#YUV_420_888 YUV_420_888}
759     * <li>{@link #isOutputSupportedFor(Class) Implementation-Defined}
760     * </ul></p>
761     *
762     * <p>
763     * All other formats may or may not have an allowed stall duration on a per-capability basis;
764     * refer to {@link CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES
765     * android.request.availableCapabilities} for more details.</p>
766     * </p>
767     *
768     * <p>See {@link CaptureRequest#SENSOR_FRAME_DURATION android.sensor.frameDuration}
769     * for more information about calculating the max frame rate (absent stalls).</p>
770     *
771     * @param format an image format from {@link ImageFormat} or {@link PixelFormat}
772     * @param size an output-compatible size
773     * @return a stall duration {@code >=} 0 in nanoseconds
774     *
775     * @throws IllegalArgumentException if {@code format} or {@code size} was not supported
776     * @throws NullPointerException if {@code size} was {@code null}
777     *
778     * @see CaptureRequest#SENSOR_FRAME_DURATION
779     * @see ImageFormat
780     * @see PixelFormat
781     */
782    public long getOutputStallDuration(int format, Size size) {
783        checkArgumentFormatSupported(format, /*output*/true);
784
785        return getInternalFormatDuration(imageFormatToInternal(format),
786                imageFormatToDataspace(format),
787                size,
788                DURATION_STALL);
789    }
790
791    /**
792     * Get the stall duration for the class/size combination (in nanoseconds).
793     *
794     * <p>This assumes a the {@code klass} is set up to use an implementation-defined format.
795     * For user-defined formats, use {@link #getOutputMinFrameDuration(int, Size)}.</p>
796     *
797     * <p>{@code klass} should be one of the ones with a non-empty array returned by
798     * {@link #getOutputSizes(Class)}.</p>
799     *
800     * <p>{@code size} should be one of the ones returned by
801     * {@link #getOutputSizes(Class)}.</p>
802     *
803     * <p>See {@link #getOutputStallDuration(int, Size)} for a definition of a
804     * <em>stall duration</em>.</p>
805     *
806     * @param klass
807     *          a class which is supported by {@link #isOutputSupportedFor(Class)} and has a
808     *          non-empty array returned by {@link #getOutputSizes(Class)}
809     * @param size an output-compatible size
810     * @return a minimum frame duration {@code >=} 0 in nanoseconds
811     *
812     * @throws IllegalArgumentException if {@code klass} or {@code size} was not supported
813     * @throws NullPointerException if {@code size} or {@code klass} was {@code null}
814     *
815     * @see CaptureRequest#SENSOR_FRAME_DURATION
816     * @see ImageFormat
817     * @see PixelFormat
818     */
819    public <T> long getOutputStallDuration(final Class<T> klass, final Size size) {
820        if (!isOutputSupportedFor(klass)) {
821            throw new IllegalArgumentException("klass was not supported");
822        }
823
824        return getInternalFormatDuration(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
825                HAL_DATASPACE_UNKNOWN, size, DURATION_STALL);
826    }
827
828    /**
829     * Check if this {@link StreamConfigurationMap} is equal to another
830     * {@link StreamConfigurationMap}.
831     *
832     * <p>Two vectors are only equal if and only if each of the respective elements is equal.</p>
833     *
834     * @return {@code true} if the objects were equal, {@code false} otherwise
835     */
836    @Override
837    public boolean equals(final Object obj) {
838        if (obj == null) {
839            return false;
840        }
841        if (this == obj) {
842            return true;
843        }
844        if (obj instanceof StreamConfigurationMap) {
845            final StreamConfigurationMap other = (StreamConfigurationMap) obj;
846            // XX: do we care about order?
847            return Arrays.equals(mConfigurations, other.mConfigurations) &&
848                    Arrays.equals(mMinFrameDurations, other.mMinFrameDurations) &&
849                    Arrays.equals(mStallDurations, other.mStallDurations) &&
850                    Arrays.equals(mHighSpeedVideoConfigurations,
851                            other.mHighSpeedVideoConfigurations);
852        }
853        return false;
854    }
855
856    /**
857     * {@inheritDoc}
858     */
859    @Override
860    public int hashCode() {
861        // XX: do we care about order?
862        return HashCodeHelpers.hashCode(
863                mConfigurations, mMinFrameDurations,
864                mStallDurations, mHighSpeedVideoConfigurations);
865    }
866
867    // Check that the argument is supported by #getOutputFormats or #getInputFormats
868    private int checkArgumentFormatSupported(int format, boolean output) {
869        checkArgumentFormat(format);
870
871        int[] formats = output ? getOutputFormats() : getInputFormats();
872        for (int i = 0; i < formats.length; ++i) {
873            if (format == formats[i]) {
874                return format;
875            }
876        }
877
878        throw new IllegalArgumentException(String.format(
879                "format %x is not supported by this stream configuration map", format));
880    }
881
882    /**
883     * Ensures that the format is either user-defined or implementation defined.
884     *
885     * <p>If a format has a different internal representation than the public representation,
886     * passing in the public representation here will fail.</p>
887     *
888     * <p>For example if trying to use {@link ImageFormat#JPEG}:
889     * it has a different public representation than the internal representation
890     * {@code HAL_PIXEL_FORMAT_BLOB}, this check will fail.</p>
891     *
892     * <p>Any invalid/undefined formats will raise an exception.</p>
893     *
894     * @param format image format
895     * @return the format
896     *
897     * @throws IllegalArgumentException if the format was invalid
898     */
899    static int checkArgumentFormatInternal(int format) {
900        switch (format) {
901            case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
902            case HAL_PIXEL_FORMAT_BLOB:
903            case HAL_PIXEL_FORMAT_RAW_OPAQUE:
904            case HAL_PIXEL_FORMAT_Y16:
905                return format;
906            case ImageFormat.JPEG:
907                throw new IllegalArgumentException(
908                        "ImageFormat.JPEG is an unknown internal format");
909            default:
910                return checkArgumentFormat(format);
911        }
912    }
913
914    /**
915     * Ensures that the format is publicly user-defined in either ImageFormat or PixelFormat.
916     *
917     * <p>If a format has a different public representation than the internal representation,
918     * passing in the internal representation here will fail.</p>
919     *
920     * <p>For example if trying to use {@code HAL_PIXEL_FORMAT_BLOB}:
921     * it has a different internal representation than the public representation
922     * {@link ImageFormat#JPEG}, this check will fail.</p>
923     *
924     * <p>Any invalid/undefined formats will raise an exception, including implementation-defined.
925     * </p>
926     *
927     * <p>Note that {@code @hide} and deprecated formats will not pass this check.</p>
928     *
929     * @param format image format
930     * @return the format
931     *
932     * @throws IllegalArgumentException if the format was not user-defined
933     */
934    static int checkArgumentFormat(int format) {
935        if (!ImageFormat.isPublicFormat(format) && !PixelFormat.isPublicFormat(format)) {
936            throw new IllegalArgumentException(String.format(
937                    "format 0x%x was not defined in either ImageFormat or PixelFormat", format));
938        }
939
940        return format;
941    }
942
943    /**
944     * Convert an internal format compatible with {@code graphics.h} into public-visible
945     * {@code ImageFormat}. This assumes the dataspace of the format is not HAL_DATASPACE_DEPTH.
946     *
947     * <p>In particular these formats are converted:
948     * <ul>
949     * <li>HAL_PIXEL_FORMAT_BLOB => ImageFormat.JPEG
950     * </ul>
951     * </p>
952     *
953     * <p>Passing in an implementation-defined format which has no public equivalent will fail;
954     * as will passing in a public format which has a different internal format equivalent.
955     * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
956     *
957     * <p>All other formats are returned as-is, no further invalid check is performed.</p>
958     *
959     * <p>This function is the dual of {@link #imageFormatToInternal} for dataspaces other than
960     * HAL_DATASPACE_DEPTH.</p>
961     *
962     * @param format image format from {@link ImageFormat} or {@link PixelFormat}
963     * @return the converted image formats
964     *
965     * @throws IllegalArgumentException
966     *          if {@code format} is {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} or
967     *          {@link ImageFormat#JPEG}
968     *
969     * @see ImageFormat
970     * @see PixelFormat
971     * @see #checkArgumentFormat
972     */
973    static int imageFormatToPublic(int format) {
974        switch (format) {
975            case HAL_PIXEL_FORMAT_BLOB:
976                return ImageFormat.JPEG;
977            case ImageFormat.JPEG:
978                throw new IllegalArgumentException(
979                        "ImageFormat.JPEG is an unknown internal format");
980            case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
981                throw new IllegalArgumentException(
982                        "IMPLEMENTATION_DEFINED must not leak to public API");
983            default:
984                return format;
985        }
986    }
987
988    /**
989     * Convert an internal format compatible with {@code graphics.h} into public-visible
990     * {@code ImageFormat}. This assumes the dataspace of the format is HAL_DATASPACE_DEPTH.
991     *
992     * <p>In particular these formats are converted:
993     * <ul>
994     * <li>HAL_PIXEL_FORMAT_BLOB => ImageFormat.DEPTH_POINT_CLOUD
995     * <li>HAL_PIXEL_FORMAT_Y16 => ImageFormat.DEPTH16
996     * </ul>
997     * </p>
998     *
999     * <p>Passing in an implementation-defined format which has no public equivalent will fail;
1000     * as will passing in a public format which has a different internal format equivalent.
1001     * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
1002     *
1003     * <p>All other formats are returned as-is, no further invalid check is performed.</p>
1004     *
1005     * <p>This function is the dual of {@link #imageFormatToInternal} for formats associated with
1006     * HAL_DATASPACE_DEPTH.</p>
1007     *
1008     * @param format image format from {@link ImageFormat} or {@link PixelFormat}
1009     * @return the converted image formats
1010     *
1011     * @throws IllegalArgumentException
1012     *          if {@code format} is {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED} or
1013     *          {@link ImageFormat#JPEG}
1014     *
1015     * @see ImageFormat
1016     * @see PixelFormat
1017     * @see #checkArgumentFormat
1018     */
1019    static int depthFormatToPublic(int format) {
1020        switch (format) {
1021            case HAL_PIXEL_FORMAT_BLOB:
1022                return ImageFormat.DEPTH_POINT_CLOUD;
1023            case HAL_PIXEL_FORMAT_Y16:
1024                return ImageFormat.DEPTH16;
1025            case ImageFormat.JPEG:
1026                throw new IllegalArgumentException(
1027                        "ImageFormat.JPEG is an unknown internal format");
1028            case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
1029                throw new IllegalArgumentException(
1030                        "IMPLEMENTATION_DEFINED must not leak to public API");
1031            default:
1032                throw new IllegalArgumentException(
1033                        "Unknown DATASPACE_DEPTH format " + format);
1034        }
1035    }
1036
1037    /**
1038     * Convert image formats from internal to public formats (in-place).
1039     *
1040     * @param formats an array of image formats
1041     * @return {@code formats}
1042     *
1043     * @see #imageFormatToPublic
1044     */
1045    static int[] imageFormatToPublic(int[] formats) {
1046        if (formats == null) {
1047            return null;
1048        }
1049
1050        for (int i = 0; i < formats.length; ++i) {
1051            formats[i] = imageFormatToPublic(formats[i]);
1052        }
1053
1054        return formats;
1055    }
1056
1057    /**
1058     * Convert a public format compatible with {@code ImageFormat} to an internal format
1059     * from {@code graphics.h}.
1060     *
1061     * <p>In particular these formats are converted:
1062     * <ul>
1063     * <li>ImageFormat.JPEG => HAL_PIXEL_FORMAT_BLOB
1064     * <li>ImageFormat.DEPTH_POINT_CLOUD => HAL_PIXEL_FORMAT_BLOB
1065     * <li>ImageFormat.DEPTH16 => HAL_PIXEL_FORMAT_Y16
1066     * </ul>
1067     * </p>
1068     *
1069     * <p>Passing in an implementation-defined format here will fail (it's not a public format);
1070     * as will passing in an internal format which has a different public format equivalent.
1071     * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
1072     *
1073     * <p>All other formats are returned as-is, no invalid check is performed.</p>
1074     *
1075     * <p>This function is the dual of {@link #imageFormatToPublic}.</p>
1076     *
1077     * @param format public image format from {@link ImageFormat} or {@link PixelFormat}
1078     * @return the converted image formats
1079     *
1080     * @see ImageFormat
1081     * @see PixelFormat
1082     *
1083     * @throws IllegalArgumentException
1084     *              if {@code format} was {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED}
1085     */
1086    static int imageFormatToInternal(int format) {
1087        switch (format) {
1088            case ImageFormat.JPEG:
1089            case ImageFormat.DEPTH_POINT_CLOUD:
1090                return HAL_PIXEL_FORMAT_BLOB;
1091            case ImageFormat.DEPTH16:
1092                return HAL_PIXEL_FORMAT_Y16;
1093            case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
1094                throw new IllegalArgumentException(
1095                        "IMPLEMENTATION_DEFINED is not allowed via public API");
1096            default:
1097                return format;
1098        }
1099    }
1100
1101    /**
1102     * Convert a public format compatible with {@code ImageFormat} to an internal dataspace
1103     * from {@code graphics.h}.
1104     *
1105     * <p>In particular these formats are converted:
1106     * <ul>
1107     * <li>ImageFormat.JPEG => HAL_DATASPACE_JFIF
1108     * <li>ImageFormat.DEPTH_POINT_CLOUD => HAL_DATASPACE_DEPTH
1109     * <li>ImageFormat.DEPTH16 => HAL_DATASPACE_DEPTH
1110     * <li>others => HAL_DATASPACE_UNKNOWN
1111     * </ul>
1112     * </p>
1113     *
1114     * <p>Passing in an implementation-defined format here will fail (it's not a public format);
1115     * as will passing in an internal format which has a different public format equivalent.
1116     * See {@link #checkArgumentFormat} for more details about a legal public format.</p>
1117     *
1118     * <p>All other formats are returned as-is, no invalid check is performed.</p>
1119     *
1120     * <p>This function is the dual of {@link #imageFormatToPublic}.</p>
1121     *
1122     * @param format public image format from {@link ImageFormat} or {@link PixelFormat}
1123     * @return the converted image formats
1124     *
1125     * @see ImageFormat
1126     * @see PixelFormat
1127     *
1128     * @throws IllegalArgumentException
1129     *              if {@code format} was {@code HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED}
1130     */
1131    static int imageFormatToDataspace(int format) {
1132        switch (format) {
1133            case ImageFormat.JPEG:
1134                return HAL_DATASPACE_JFIF;
1135            case ImageFormat.DEPTH_POINT_CLOUD:
1136            case ImageFormat.DEPTH16:
1137                return HAL_DATASPACE_DEPTH;
1138            default:
1139                return HAL_DATASPACE_UNKNOWN;
1140        }
1141    }
1142
1143    /**
1144     * Convert image formats from public to internal formats (in-place).
1145     *
1146     * @param formats an array of image formats
1147     * @return {@code formats}
1148     *
1149     * @see #imageFormatToInternal
1150     *
1151     * @hide
1152     */
1153    public static int[] imageFormatToInternal(int[] formats) {
1154        if (formats == null) {
1155            return null;
1156        }
1157
1158        for (int i = 0; i < formats.length; ++i) {
1159            formats[i] = imageFormatToInternal(formats[i]);
1160        }
1161
1162        return formats;
1163    }
1164
1165    private Size[] getPublicFormatSizes(int format, boolean output) {
1166        try {
1167            checkArgumentFormatSupported(format, output);
1168        } catch (IllegalArgumentException e) {
1169            return null;
1170        }
1171
1172        int internalFormat = imageFormatToInternal(format);
1173        int dataspace = imageFormatToDataspace(format);
1174
1175        return getInternalFormatSizes(internalFormat, dataspace, output);
1176    }
1177
1178    private Size[] getInternalFormatSizes(int format, int dataspace, boolean output) {
1179
1180        HashMap<Integer, Integer> formatsMap =
1181                (dataspace == HAL_DATASPACE_DEPTH) ? mDepthOutputFormats : getFormatsMap(output);
1182
1183        Integer sizesCount = formatsMap.get(format);
1184        if (sizesCount == null) {
1185            throw new IllegalArgumentException("format not available");
1186        }
1187
1188        int len = sizesCount;
1189        Size[] sizes = new Size[len];
1190        int sizeIndex = 0;
1191
1192        StreamConfiguration[] configurations =
1193                (dataspace == HAL_DATASPACE_DEPTH) ? mDepthConfigurations : mConfigurations;
1194
1195
1196        for (StreamConfiguration config : configurations) {
1197            if (config.getFormat() == format && config.isOutput() == output) {
1198                sizes[sizeIndex++] = config.getSize();
1199            }
1200        }
1201
1202        if (sizeIndex != len) {
1203            throw new AssertionError(
1204                    "Too few sizes (expected " + len + ", actual " + sizeIndex + ")");
1205        }
1206
1207        return sizes;
1208    }
1209
1210    /** Get the list of publically visible output formats; does not include IMPL_DEFINED */
1211    private int[] getPublicFormats(boolean output) {
1212        int[] formats = new int[getPublicFormatCount(output)];
1213
1214        int i = 0;
1215
1216        for (int format : getFormatsMap(output).keySet()) {
1217            if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
1218                format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
1219                formats[i++] = imageFormatToPublic(format);
1220            }
1221        }
1222        if (output) {
1223            for (int format : mDepthOutputFormats.keySet()) {
1224                formats[i++] = depthFormatToPublic(format);
1225            }
1226        }
1227        if (formats.length != i) {
1228            throw new AssertionError("Too few formats " + i + ", expected " + formats.length);
1229        }
1230
1231        return formats;
1232    }
1233
1234    /** Get the format -> size count map for either output or input formats */
1235    private HashMap<Integer, Integer> getFormatsMap(boolean output) {
1236        return output ? mOutputFormats : mInputFormats;
1237    }
1238
1239    private long getInternalFormatDuration(int format, int dataspace, Size size, int duration) {
1240        // assume format is already checked, since its internal
1241
1242        if (!arrayContains(getInternalFormatSizes(format, dataspace, /*output*/true), size)) {
1243            throw new IllegalArgumentException("size was not supported");
1244        }
1245
1246        StreamConfigurationDuration[] durations = getDurations(duration, dataspace);
1247
1248        for (StreamConfigurationDuration configurationDuration : durations) {
1249            if (configurationDuration.getFormat() == format &&
1250                    configurationDuration.getWidth() == size.getWidth() &&
1251                    configurationDuration.getHeight() == size.getHeight()) {
1252                return configurationDuration.getDuration();
1253            }
1254        }
1255        // Default duration is '0' (unsupported/no extra stall)
1256        return 0;
1257    }
1258
1259    /**
1260     * Get the durations array for the kind of duration
1261     *
1262     * @see #DURATION_MIN_FRAME
1263     * @see #DURATION_STALL
1264     * */
1265    private StreamConfigurationDuration[] getDurations(int duration, int dataspace) {
1266        switch (duration) {
1267            case DURATION_MIN_FRAME:
1268                return (dataspace == HAL_DATASPACE_DEPTH) ?
1269                        mDepthMinFrameDurations : mMinFrameDurations;
1270            case DURATION_STALL:
1271                return (dataspace == HAL_DATASPACE_DEPTH) ?
1272                        mDepthStallDurations : mStallDurations;
1273            default:
1274                throw new IllegalArgumentException("duration was invalid");
1275        }
1276    }
1277
1278    /** Count the number of publicly-visible output formats */
1279    private int getPublicFormatCount(boolean output) {
1280        HashMap<Integer, Integer> formatsMap = getFormatsMap(output);
1281
1282        int size = formatsMap.size();
1283        if (formatsMap.containsKey(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED)) {
1284            size -= 1;
1285        }
1286        if (formatsMap.containsKey(HAL_PIXEL_FORMAT_RAW_OPAQUE)) {
1287            size -= 1;
1288        }
1289        if (output) {
1290            size += mDepthOutputFormats.size();
1291        }
1292
1293        return size;
1294    }
1295
1296    private static <T> boolean arrayContains(T[] array, T element) {
1297        if (array == null) {
1298            return false;
1299        }
1300
1301        for (T el : array) {
1302            if (Objects.equals(el, element)) {
1303                return true;
1304            }
1305        }
1306
1307        return false;
1308    }
1309
1310    // from system/core/include/system/graphics.h
1311    private static final int HAL_PIXEL_FORMAT_BLOB = 0x21;
1312    private static final int HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED = 0x22;
1313    private static final int HAL_PIXEL_FORMAT_RAW_OPAQUE = 0x24;
1314    private static final int HAL_PIXEL_FORMAT_Y16 = 0x20363159;
1315
1316    private static final int HAL_DATASPACE_UNKNOWN = 0x0;
1317    private static final int HAL_DATASPACE_JFIF = 0x101;
1318    private static final int HAL_DATASPACE_DEPTH = 0x1000;
1319
1320    /**
1321     * @see #getDurations(int, int)
1322     */
1323    private static final int DURATION_MIN_FRAME = 0;
1324    private static final int DURATION_STALL = 1;
1325
1326    private final StreamConfiguration[] mConfigurations;
1327    private final StreamConfigurationDuration[] mMinFrameDurations;
1328    private final StreamConfigurationDuration[] mStallDurations;
1329
1330    private final StreamConfiguration[] mDepthConfigurations;
1331    private final StreamConfigurationDuration[] mDepthMinFrameDurations;
1332    private final StreamConfigurationDuration[] mDepthStallDurations;
1333
1334    private final HighSpeedVideoConfiguration[] mHighSpeedVideoConfigurations;
1335
1336    /** ImageFormat -> num output sizes mapping */
1337    private final HashMap</*ImageFormat*/Integer, /*Count*/Integer> mOutputFormats =
1338            new HashMap<Integer, Integer>();
1339    /** ImageFormat -> num input sizes mapping */
1340    private final HashMap</*ImageFormat*/Integer, /*Count*/Integer> mInputFormats =
1341            new HashMap<Integer, Integer>();
1342    /** ImageFormat -> num depth output sizes mapping */
1343    private final HashMap</*ImageFormat*/Integer, /*Count*/Integer> mDepthOutputFormats =
1344            new HashMap<Integer, Integer>();
1345    /** High speed video Size -> FPS range count mapping*/
1346    private final HashMap</*HighSpeedVideoSize*/Size, /*Count*/Integer> mHighSpeedVideoSizeMap =
1347            new HashMap<Size, Integer>();
1348    /** High speed video FPS range -> Size count mapping*/
1349    private final HashMap</*HighSpeedVideoFpsRange*/Range<Integer>, /*Count*/Integer>
1350            mHighSpeedVideoFpsRangeMap = new HashMap<Range<Integer>, Integer>();
1351
1352}
1353