TextToSpeech.java revision 795d777ee13405d8b6ba6c889ea3ef49713892a8
1/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License"); you may not
5 * use this file except in compliance with the License. You may obtain a copy of
6 * the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 * License for the specific language governing permissions and limitations under
14 * the License.
15 */
16package android.speech.tts;
17
18import android.annotation.SdkConstant;
19import android.annotation.SdkConstant.SdkConstantType;
20import android.content.ComponentName;
21import android.content.ContentResolver;
22import android.content.Context;
23import android.content.Intent;
24import android.content.ServiceConnection;
25import android.media.AudioManager;
26import android.net.Uri;
27import android.os.AsyncTask;
28import android.os.Bundle;
29import android.os.IBinder;
30import android.os.ParcelFileDescriptor;
31import android.os.RemoteException;
32import android.provider.Settings;
33import android.text.TextUtils;
34import android.util.Log;
35
36import java.io.File;
37import java.io.FileNotFoundException;
38import java.io.IOException;
39import java.util.Collections;
40import java.util.HashMap;
41import java.util.HashSet;
42import java.util.List;
43import java.util.Locale;
44import java.util.Map;
45import java.util.MissingResourceException;
46import java.util.Set;
47
48/**
49 *
50 * Synthesizes speech from text for immediate playback or to create a sound file.
51 * <p>A TextToSpeech instance can only be used to synthesize text once it has completed its
52 * initialization. Implement the {@link TextToSpeech.OnInitListener} to be
53 * notified of the completion of the initialization.<br>
54 * When you are done using the TextToSpeech instance, call the {@link #shutdown()} method
55 * to release the native resources used by the TextToSpeech engine.
56 */
57public class TextToSpeech {
58
59    private static final String TAG = "TextToSpeech";
60
61    /**
62     * Denotes a successful operation.
63     */
64    public static final int SUCCESS = 0;
65    /**
66     * Denotes a generic operation failure.
67     */
68    public static final int ERROR = -1;
69
70    /**
71     * Denotes a stop requested by a client. It's used only on the service side of the API,
72     * client should never expect to see this result code.
73     */
74    public static final int STOPPED = -2;
75
76    /**
77     * Denotes a failure of a TTS engine to synthesize the given input.
78     */
79    public static final int ERROR_SYNTHESIS = -3;
80
81    /**
82     * Denotes a failure of a TTS service.
83     */
84    public static final int ERROR_SERVICE = -4;
85
86    /**
87     * Denotes a failure related to the output (audio device or a file).
88     */
89    public static final int ERROR_OUTPUT = -5;
90
91    /**
92     * Denotes a failure caused by a network connectivity problems.
93     */
94    public static final int ERROR_NETWORK = -6;
95
96    /**
97     * Denotes a failure caused by network timeout.
98     */
99    public static final int ERROR_NETWORK_TIMEOUT = -7;
100
101    /**
102     * Denotes a failure caused by an invalid request.
103     */
104    public static final int ERROR_INVALID_REQUEST = -8;
105
106    /**
107     * Queue mode where all entries in the playback queue (media to be played
108     * and text to be synthesized) are dropped and replaced by the new entry.
109     * Queues are flushed with respect to a given calling app. Entries in the queue
110     * from other callees are not discarded.
111     */
112    public static final int QUEUE_FLUSH = 0;
113    /**
114     * Queue mode where the new entry is added at the end of the playback queue.
115     */
116    public static final int QUEUE_ADD = 1;
117    /**
118     * Queue mode where the entire playback queue is purged. This is different
119     * from {@link #QUEUE_FLUSH} in that all entries are purged, not just entries
120     * from a given caller.
121     *
122     * @hide
123     */
124    static final int QUEUE_DESTROY = 2;
125
126    /**
127     * Denotes the language is available exactly as specified by the locale.
128     */
129    public static final int LANG_COUNTRY_VAR_AVAILABLE = 2;
130
131    /**
132     * Denotes the language is available for the language and country specified
133     * by the locale, but not the variant.
134     */
135    public static final int LANG_COUNTRY_AVAILABLE = 1;
136
137    /**
138     * Denotes the language is available for the language by the locale,
139     * but not the country and variant.
140     */
141    public static final int LANG_AVAILABLE = 0;
142
143    /**
144     * Denotes the language data is missing.
145     */
146    public static final int LANG_MISSING_DATA = -1;
147
148    /**
149     * Denotes the language is not supported.
150     */
151    public static final int LANG_NOT_SUPPORTED = -2;
152
153    /**
154     * Broadcast Action: The TextToSpeech synthesizer has completed processing
155     * of all the text in the speech queue.
156     *
157     * Note that this notifies callers when the <b>engine</b> has finished has
158     * processing text data. Audio playback might not have completed (or even started)
159     * at this point. If you wish to be notified when this happens, see
160     * {@link OnUtteranceCompletedListener}.
161     */
162    @SdkConstant(SdkConstantType.BROADCAST_INTENT_ACTION)
163    public static final String ACTION_TTS_QUEUE_PROCESSING_COMPLETED =
164            "android.speech.tts.TTS_QUEUE_PROCESSING_COMPLETED";
165
166    /**
167     * Interface definition of a callback to be invoked indicating the completion of the
168     * TextToSpeech engine initialization.
169     */
170    public interface OnInitListener {
171        /**
172         * Called to signal the completion of the TextToSpeech engine initialization.
173         *
174         * @param status {@link TextToSpeech#SUCCESS} or {@link TextToSpeech#ERROR}.
175         */
176        public void onInit(int status);
177    }
178
179    /**
180     * Listener that will be called when the TTS service has
181     * completed synthesizing an utterance. This is only called if the utterance
182     * has an utterance ID (see {@link TextToSpeech.Engine#KEY_PARAM_UTTERANCE_ID}).
183     *
184     * @deprecated Use {@link UtteranceProgressListener} instead.
185     */
186    @Deprecated
187    public interface OnUtteranceCompletedListener {
188        /**
189         * Called when an utterance has been synthesized.
190         *
191         * @param utteranceId the identifier of the utterance.
192         */
193        public void onUtteranceCompleted(String utteranceId);
194    }
195
196    /**
197     * Constants and parameter names for controlling text-to-speech. These include:
198     *
199     * <ul>
200     *     <li>
201     *         Intents to ask engine to install data or check its data and
202     *         extras for a TTS engine's check data activity.
203     *     </li>
204     *     <li>
205     *         Keys for the parameters passed with speak commands, e.g.
206     *         {@link Engine#KEY_PARAM_UTTERANCE_ID}, {@link Engine#KEY_PARAM_STREAM}.
207     *     </li>
208     *     <li>
209     *         A list of feature strings that engines might support, e.g
210     *         {@link Engine#KEY_FEATURE_NETWORK_SYNTHESIS}). These values may be passed in to
211     *         {@link TextToSpeech#speak} and {@link TextToSpeech#synthesizeToFile} to modify
212     *         engine behaviour. The engine can be queried for the set of features it supports
213     *         through {@link TextToSpeech#getFeatures(java.util.Locale)}.
214     *     </li>
215     * </ul>
216     */
217    public class Engine {
218
219        /**
220         * Default speech rate.
221         * @hide
222         */
223        public static final int DEFAULT_RATE = 100;
224
225        /**
226         * Default pitch.
227         * @hide
228         */
229        public static final int DEFAULT_PITCH = 100;
230
231        /**
232         * Default volume.
233         * @hide
234         */
235        public static final float DEFAULT_VOLUME = 1.0f;
236
237        /**
238         * Default pan (centered).
239         * @hide
240         */
241        public static final float DEFAULT_PAN = 0.0f;
242
243        /**
244         * Default value for {@link Settings.Secure#TTS_USE_DEFAULTS}.
245         * @hide
246         */
247        public static final int USE_DEFAULTS = 0; // false
248
249        /**
250         * Package name of the default TTS engine.
251         *
252         * @hide
253         * @deprecated No longer in use, the default engine is determined by
254         *         the sort order defined in {@link TtsEngines}. Note that
255         *         this doesn't "break" anything because there is no guarantee that
256         *         the engine specified below is installed on a given build, let
257         *         alone be the default.
258         */
259        @Deprecated
260        public static final String DEFAULT_ENGINE = "com.svox.pico";
261
262        /**
263         * Default audio stream used when playing synthesized speech.
264         */
265        public static final int DEFAULT_STREAM = AudioManager.STREAM_MUSIC;
266
267        /**
268         * Indicates success when checking the installation status of the resources used by the
269         * TextToSpeech engine with the {@link #ACTION_CHECK_TTS_DATA} intent.
270         */
271        public static final int CHECK_VOICE_DATA_PASS = 1;
272
273        /**
274         * Indicates failure when checking the installation status of the resources used by the
275         * TextToSpeech engine with the {@link #ACTION_CHECK_TTS_DATA} intent.
276         */
277        public static final int CHECK_VOICE_DATA_FAIL = 0;
278
279        /**
280         * Indicates erroneous data when checking the installation status of the resources used by
281         * the TextToSpeech engine with the {@link #ACTION_CHECK_TTS_DATA} intent.
282         *
283         * @deprecated Use CHECK_VOICE_DATA_FAIL instead.
284         */
285        @Deprecated
286        public static final int CHECK_VOICE_DATA_BAD_DATA = -1;
287
288        /**
289         * Indicates missing resources when checking the installation status of the resources used
290         * by the TextToSpeech engine with the {@link #ACTION_CHECK_TTS_DATA} intent.
291         *
292         * @deprecated Use CHECK_VOICE_DATA_FAIL instead.
293         */
294        @Deprecated
295        public static final int CHECK_VOICE_DATA_MISSING_DATA = -2;
296
297        /**
298         * Indicates missing storage volume when checking the installation status of the resources
299         * used by the TextToSpeech engine with the {@link #ACTION_CHECK_TTS_DATA} intent.
300         *
301         * @deprecated Use CHECK_VOICE_DATA_FAIL instead.
302         */
303        @Deprecated
304        public static final int CHECK_VOICE_DATA_MISSING_VOLUME = -3;
305
306        /**
307         * Intent for starting a TTS service. Services that handle this intent must
308         * extend {@link TextToSpeechService}. Normal applications should not use this intent
309         * directly, instead they should talk to the TTS service using the the methods in this
310         * class.
311         */
312        @SdkConstant(SdkConstantType.SERVICE_ACTION)
313        public static final String INTENT_ACTION_TTS_SERVICE =
314                "android.intent.action.TTS_SERVICE";
315
316        /**
317         * Name under which a text to speech engine publishes information about itself.
318         * This meta-data should reference an XML resource containing a
319         * <code>&lt;{@link android.R.styleable#TextToSpeechEngine tts-engine}&gt;</code>
320         * tag.
321         */
322        public static final String SERVICE_META_DATA = "android.speech.tts";
323
324        // intents to ask engine to install data or check its data
325        /**
326         * Activity Action: Triggers the platform TextToSpeech engine to
327         * start the activity that installs the resource files on the device
328         * that are required for TTS to be operational. Since the installation
329         * of the data can be interrupted or declined by the user, the application
330         * shouldn't expect successful installation upon return from that intent,
331         * and if need be, should check installation status with
332         * {@link #ACTION_CHECK_TTS_DATA}.
333         */
334        @SdkConstant(SdkConstantType.ACTIVITY_INTENT_ACTION)
335        public static final String ACTION_INSTALL_TTS_DATA =
336                "android.speech.tts.engine.INSTALL_TTS_DATA";
337
338        /**
339         * Broadcast Action: broadcast to signal the change in the list of available
340         * languages or/and their features.
341         */
342        @SdkConstant(SdkConstantType.BROADCAST_INTENT_ACTION)
343        public static final String ACTION_TTS_DATA_INSTALLED =
344                "android.speech.tts.engine.TTS_DATA_INSTALLED";
345
346        /**
347         * Activity Action: Starts the activity from the platform TextToSpeech
348         * engine to verify the proper installation and availability of the
349         * resource files on the system. Upon completion, the activity will
350         * return one of the following codes:
351         * {@link #CHECK_VOICE_DATA_PASS},
352         * {@link #CHECK_VOICE_DATA_FAIL},
353         * <p> Moreover, the data received in the activity result will contain the following
354         * fields:
355         * <ul>
356         *   <li>{@link #EXTRA_AVAILABLE_VOICES} which contains an ArrayList<String> of all the
357         *   available voices. The format of each voice is: lang-COUNTRY-variant where COUNTRY and
358         *   variant are optional (ie, "eng" or "eng-USA" or "eng-USA-FEMALE").</li>
359         *   <li>{@link #EXTRA_UNAVAILABLE_VOICES} which contains an ArrayList<String> of all the
360         *   unavailable voices (ones that user can install). The format of each voice is:
361         *   lang-COUNTRY-variant where COUNTRY and variant are optional (ie, "eng" or
362         *   "eng-USA" or "eng-USA-FEMALE").</li>
363         * </ul>
364         */
365        @SdkConstant(SdkConstantType.ACTIVITY_INTENT_ACTION)
366        public static final String ACTION_CHECK_TTS_DATA =
367                "android.speech.tts.engine.CHECK_TTS_DATA";
368
369        /**
370         * Activity intent for getting some sample text to use for demonstrating TTS. Specific
371         * locale have to be requested by passing following extra parameters:
372         * <ul>
373         *   <li>language</li>
374         *   <li>country</li>
375         *   <li>variant</li>
376         * </ul>
377         *
378         * Upon completion, the activity result may contain the following fields:
379         * <ul>
380         *   <li>{@link #EXTRA_SAMPLE_TEXT} which contains an String with sample text.</li>
381         * </ul>
382         */
383        @SdkConstant(SdkConstantType.ACTIVITY_INTENT_ACTION)
384        public static final String ACTION_GET_SAMPLE_TEXT =
385                "android.speech.tts.engine.GET_SAMPLE_TEXT";
386
387        /**
388         * Extra information received with the {@link #ACTION_GET_SAMPLE_TEXT} intent result where
389         * the TextToSpeech engine returns an String with sample text for requested voice
390         */
391        public static final String EXTRA_SAMPLE_TEXT = "sampleText";
392
393
394        // extras for a TTS engine's check data activity
395        /**
396         * Extra information received with the {@link #ACTION_CHECK_TTS_DATA} intent result where
397         * the TextToSpeech engine returns an ArrayList<String> of all the available voices.
398         * The format of each voice is: lang-COUNTRY-variant where COUNTRY and variant are
399         * optional (ie, "eng" or "eng-USA" or "eng-USA-FEMALE").
400         */
401        public static final String EXTRA_AVAILABLE_VOICES = "availableVoices";
402
403        /**
404         * Extra information received with the {@link #ACTION_CHECK_TTS_DATA} intent result where
405         * the TextToSpeech engine returns an ArrayList<String> of all the unavailable voices.
406         * The format of each voice is: lang-COUNTRY-variant where COUNTRY and variant are
407         * optional (ie, "eng" or "eng-USA" or "eng-USA-FEMALE").
408         */
409        public static final String EXTRA_UNAVAILABLE_VOICES = "unavailableVoices";
410
411        /**
412         * Extra information received with the {@link #ACTION_CHECK_TTS_DATA} intent result where
413         * the TextToSpeech engine specifies the path to its resources.
414         *
415         * It may be used by language packages to find out where to put their data.
416         *
417         * @deprecated TTS engine implementation detail, this information has no use for
418         * text-to-speech API client.
419         */
420        @Deprecated
421        public static final String EXTRA_VOICE_DATA_ROOT_DIRECTORY = "dataRoot";
422
423        /**
424         * Extra information received with the {@link #ACTION_CHECK_TTS_DATA} intent result where
425         * the TextToSpeech engine specifies the file names of its resources under the
426         * resource path.
427         *
428         * @deprecated TTS engine implementation detail, this information has no use for
429         * text-to-speech API client.
430         */
431        @Deprecated
432        public static final String EXTRA_VOICE_DATA_FILES = "dataFiles";
433
434        /**
435         * Extra information received with the {@link #ACTION_CHECK_TTS_DATA} intent result where
436         * the TextToSpeech engine specifies the locale associated with each resource file.
437         *
438         * @deprecated TTS engine implementation detail, this information has no use for
439         * text-to-speech API client.
440         */
441        @Deprecated
442        public static final String EXTRA_VOICE_DATA_FILES_INFO = "dataFilesInfo";
443
444        /**
445         * Extra information sent with the {@link #ACTION_CHECK_TTS_DATA} intent where the
446         * caller indicates to the TextToSpeech engine which specific sets of voice data to
447         * check for by sending an ArrayList<String> of the voices that are of interest.
448         * The format of each voice is: lang-COUNTRY-variant where COUNTRY and variant are
449         * optional (ie, "eng" or "eng-USA" or "eng-USA-FEMALE").
450         *
451         * @deprecated Redundant functionality, checking for existence of specific sets of voice
452         * data can be done on client side.
453         */
454        @Deprecated
455        public static final String EXTRA_CHECK_VOICE_DATA_FOR = "checkVoiceDataFor";
456
457        // extras for a TTS engine's data installation
458        /**
459         * Extra information received with the {@link #ACTION_TTS_DATA_INSTALLED} intent result.
460         * It indicates whether the data files for the synthesis engine were successfully
461         * installed. The installation was initiated with the  {@link #ACTION_INSTALL_TTS_DATA}
462         * intent. The possible values for this extra are
463         * {@link TextToSpeech#SUCCESS} and {@link TextToSpeech#ERROR}.
464         *
465         * @deprecated No longer in use. If client ise interested in information about what
466         * changed, is should send ACTION_CHECK_TTS_DATA intent to discover available voices.
467         */
468        @Deprecated
469        public static final String EXTRA_TTS_DATA_INSTALLED = "dataInstalled";
470
471        // keys for the parameters passed with speak commands. Hidden keys are used internally
472        // to maintain engine state for each TextToSpeech instance.
473        /**
474         * @hide
475         */
476        public static final String KEY_PARAM_RATE = "rate";
477
478        /**
479         * @hide
480         */
481        public static final String KEY_PARAM_LANGUAGE = "language";
482
483        /**
484         * @hide
485         */
486        public static final String KEY_PARAM_COUNTRY = "country";
487
488        /**
489         * @hide
490         */
491        public static final String KEY_PARAM_VARIANT = "variant";
492
493        /**
494         * @hide
495         */
496        public static final String KEY_PARAM_ENGINE = "engine";
497
498        /**
499         * @hide
500         */
501        public static final String KEY_PARAM_PITCH = "pitch";
502
503        /**
504         * Parameter key to specify the audio stream type to be used when speaking text
505         * or playing back a file. The value should be one of the STREAM_ constants
506         * defined in {@link AudioManager}.
507         *
508         * @see TextToSpeech#speak(String, int, HashMap)
509         * @see TextToSpeech#playEarcon(String, int, HashMap)
510         */
511        public static final String KEY_PARAM_STREAM = "streamType";
512
513        /**
514         * Parameter key to identify an utterance in the
515         * {@link TextToSpeech.OnUtteranceCompletedListener} after text has been
516         * spoken, a file has been played back or a silence duration has elapsed.
517         *
518         * @see TextToSpeech#speak(String, int, HashMap)
519         * @see TextToSpeech#playEarcon(String, int, HashMap)
520         * @see TextToSpeech#synthesizeToFile(String, HashMap, String)
521         */
522        public static final String KEY_PARAM_UTTERANCE_ID = "utteranceId";
523
524        /**
525         * Parameter key to specify the speech volume relative to the current stream type
526         * volume used when speaking text. Volume is specified as a float ranging from 0 to 1
527         * where 0 is silence, and 1 is the maximum volume (the default behavior).
528         *
529         * @see TextToSpeech#speak(String, int, HashMap)
530         * @see TextToSpeech#playEarcon(String, int, HashMap)
531         */
532        public static final String KEY_PARAM_VOLUME = "volume";
533
534        /**
535         * Parameter key to specify how the speech is panned from left to right when speaking text.
536         * Pan is specified as a float ranging from -1 to +1 where -1 maps to a hard-left pan,
537         * 0 to center (the default behavior), and +1 to hard-right.
538         *
539         * @see TextToSpeech#speak(String, int, HashMap)
540         * @see TextToSpeech#playEarcon(String, int, HashMap)
541         */
542        public static final String KEY_PARAM_PAN = "pan";
543
544        /**
545         * Feature key for network synthesis. See {@link TextToSpeech#getFeatures(Locale)}
546         * for a description of how feature keys work. If set (and supported by the engine
547         * as per {@link TextToSpeech#getFeatures(Locale)}, the engine must
548         * use network based synthesis.
549         *
550         * @see TextToSpeech#speak(String, int, java.util.HashMap)
551         * @see TextToSpeech#synthesizeToFile(String, java.util.HashMap, String)
552         * @see TextToSpeech#getFeatures(java.util.Locale)
553         */
554        public static final String KEY_FEATURE_NETWORK_SYNTHESIS = "networkTts";
555
556        /**
557         * Feature key for embedded synthesis. See {@link TextToSpeech#getFeatures(Locale)}
558         * for a description of how feature keys work. If set and supported by the engine
559         * as per {@link TextToSpeech#getFeatures(Locale)}, the engine must synthesize
560         * text on-device (without making network requests).
561         *
562         * @see TextToSpeech#speak(String, int, java.util.HashMap)
563         * @see TextToSpeech#synthesizeToFile(String, java.util.HashMap, String)
564         * @see TextToSpeech#getFeatures(java.util.Locale)
565         */
566        public static final String KEY_FEATURE_EMBEDDED_SYNTHESIS = "embeddedTts";
567
568        /**
569         * Parameter key to specify an audio session identifier (obtained from
570         * {@link AudioManager#allocateAudioSessionId()}) that will be used by the request audio
571         * output. It can be used to associate one of the {@link android.media.audiofx.AudioEffect}
572         * objects with the synthesis (or earcon) output.
573         *
574         * @see TextToSpeech#speak(String, int, HashMap)
575         * @see TextToSpeech#playEarcon(String, int, HashMap)
576         */
577        public static final String KEY_PARAM_SESSION_ID = "sessionId";
578    }
579
580    private final Context mContext;
581    private Connection mConnectingServiceConnection;
582    private Connection mServiceConnection;
583    private OnInitListener mInitListener;
584    // Written from an unspecified application thread, read from
585    // a binder thread.
586    private volatile UtteranceProgressListener mUtteranceProgressListener;
587    private final Object mStartLock = new Object();
588
589    private String mRequestedEngine;
590    // Whether to initialize this TTS object with the default engine,
591    // if the requested engine is not available. Valid only if mRequestedEngine
592    // is not null. Used only for testing, though potentially useful API wise
593    // too.
594    private final boolean mUseFallback;
595    private final Map<String, Uri> mEarcons;
596    private final Map<CharSequence, Uri> mUtterances;
597    private final Bundle mParams = new Bundle();
598    private final TtsEngines mEnginesHelper;
599    private final String mPackageName;
600    private volatile String mCurrentEngine = null;
601
602    /**
603     * The constructor for the TextToSpeech class, using the default TTS engine.
604     * This will also initialize the associated TextToSpeech engine if it isn't already running.
605     *
606     * @param context
607     *            The context this instance is running in.
608     * @param listener
609     *            The {@link TextToSpeech.OnInitListener} that will be called when the
610     *            TextToSpeech engine has initialized. In a case of a failure the listener
611     *            may be called immediately, before TextToSpeech instance is fully constructed.
612     */
613    public TextToSpeech(Context context, OnInitListener listener) {
614        this(context, listener, null);
615    }
616
617    /**
618     * The constructor for the TextToSpeech class, using the given TTS engine.
619     * This will also initialize the associated TextToSpeech engine if it isn't already running.
620     *
621     * @param context
622     *            The context this instance is running in.
623     * @param listener
624     *            The {@link TextToSpeech.OnInitListener} that will be called when the
625     *            TextToSpeech engine has initialized. In a case of a failure the listener
626     *            may be called immediately, before TextToSpeech instance is fully constructed.
627     * @param engine Package name of the TTS engine to use.
628     */
629    public TextToSpeech(Context context, OnInitListener listener, String engine) {
630        this(context, listener, engine, null, true);
631    }
632
633    /**
634     * Used by the framework to instantiate TextToSpeech objects with a supplied
635     * package name, instead of using {@link android.content.Context#getPackageName()}
636     *
637     * @hide
638     */
639    public TextToSpeech(Context context, OnInitListener listener, String engine,
640            String packageName, boolean useFallback) {
641        mContext = context;
642        mInitListener = listener;
643        mRequestedEngine = engine;
644        mUseFallback = useFallback;
645
646        mEarcons = new HashMap<String, Uri>();
647        mUtterances = new HashMap<CharSequence, Uri>();
648        mUtteranceProgressListener = null;
649
650        mEnginesHelper = new TtsEngines(mContext);
651        if (packageName != null) {
652            mPackageName = packageName;
653        } else {
654            mPackageName = mContext.getPackageName();
655        }
656        initTts();
657    }
658
659    private <R> R runActionNoReconnect(Action<R> action, R errorResult, String method,
660            boolean onlyEstablishedConnection) {
661        return runAction(action, errorResult, method, false, onlyEstablishedConnection);
662    }
663
664    private <R> R runAction(Action<R> action, R errorResult, String method) {
665        return runAction(action, errorResult, method, true, true);
666    }
667
668    private <R> R runAction(Action<R> action, R errorResult, String method,
669            boolean reconnect, boolean onlyEstablishedConnection) {
670        synchronized (mStartLock) {
671            if (mServiceConnection == null) {
672                Log.w(TAG, method + " failed: not bound to TTS engine");
673                return errorResult;
674            }
675            return mServiceConnection.runAction(action, errorResult, method, reconnect,
676                    onlyEstablishedConnection);
677        }
678    }
679
680    private int initTts() {
681        // Step 1: Try connecting to the engine that was requested.
682        if (mRequestedEngine != null) {
683            if (mEnginesHelper.isEngineInstalled(mRequestedEngine)) {
684                if (connectToEngine(mRequestedEngine)) {
685                    mCurrentEngine = mRequestedEngine;
686                    return SUCCESS;
687                } else if (!mUseFallback) {
688                    mCurrentEngine = null;
689                    dispatchOnInit(ERROR);
690                    return ERROR;
691                }
692            } else if (!mUseFallback) {
693                Log.i(TAG, "Requested engine not installed: " + mRequestedEngine);
694                mCurrentEngine = null;
695                dispatchOnInit(ERROR);
696                return ERROR;
697            }
698        }
699
700        // Step 2: Try connecting to the user's default engine.
701        final String defaultEngine = getDefaultEngine();
702        if (defaultEngine != null && !defaultEngine.equals(mRequestedEngine)) {
703            if (connectToEngine(defaultEngine)) {
704                mCurrentEngine = defaultEngine;
705                return SUCCESS;
706            }
707        }
708
709        // Step 3: Try connecting to the highest ranked engine in the
710        // system.
711        final String highestRanked = mEnginesHelper.getHighestRankedEngineName();
712        if (highestRanked != null && !highestRanked.equals(mRequestedEngine) &&
713                !highestRanked.equals(defaultEngine)) {
714            if (connectToEngine(highestRanked)) {
715                mCurrentEngine = highestRanked;
716                return SUCCESS;
717            }
718        }
719
720        // NOTE: The API currently does not allow the caller to query whether
721        // they are actually connected to any engine. This might fail for various
722        // reasons like if the user disables all her TTS engines.
723
724        mCurrentEngine = null;
725        dispatchOnInit(ERROR);
726        return ERROR;
727    }
728
729    private boolean connectToEngine(String engine) {
730        Connection connection = new Connection();
731        Intent intent = new Intent(Engine.INTENT_ACTION_TTS_SERVICE);
732        intent.setPackage(engine);
733        boolean bound = mContext.bindService(intent, connection, Context.BIND_AUTO_CREATE);
734        if (!bound) {
735            Log.e(TAG, "Failed to bind to " + engine);
736            return false;
737        } else {
738            Log.i(TAG, "Sucessfully bound to " + engine);
739            mConnectingServiceConnection = connection;
740            return true;
741        }
742    }
743
744    private void dispatchOnInit(int result) {
745        synchronized (mStartLock) {
746            if (mInitListener != null) {
747                mInitListener.onInit(result);
748                mInitListener = null;
749            }
750        }
751    }
752
753    private IBinder getCallerIdentity() {
754        return mServiceConnection.getCallerIdentity();
755    }
756
757    /**
758     * Releases the resources used by the TextToSpeech engine.
759     * It is good practice for instance to call this method in the onDestroy() method of an Activity
760     * so the TextToSpeech engine can be cleanly stopped.
761     */
762    public void shutdown() {
763        // Special case, we are asked to shutdown connection that did finalize its connection.
764        synchronized (mStartLock) {
765            if (mConnectingServiceConnection != null) {
766                mContext.unbindService(mConnectingServiceConnection);
767                mConnectingServiceConnection = null;
768                return;
769            }
770        }
771
772        // Post connection case
773        runActionNoReconnect(new Action<Void>() {
774            @Override
775            public Void run(ITextToSpeechService service) throws RemoteException {
776                service.setCallback(getCallerIdentity(), null);
777                service.stop(getCallerIdentity());
778                mServiceConnection.disconnect();
779                // Context#unbindService does not result in a call to
780                // ServiceConnection#onServiceDisconnected. As a result, the
781                // service ends up being destroyed (if there are no other open
782                // connections to it) but the process lives on and the
783                // ServiceConnection continues to refer to the destroyed service.
784                //
785                // This leads to tons of log spam about SynthThread being dead.
786                mServiceConnection = null;
787                mCurrentEngine = null;
788                return null;
789            }
790        }, null, "shutdown", false);
791    }
792
793    /**
794     * Adds a mapping between a string of text and a sound resource in a
795     * package. After a call to this method, subsequent calls to
796     * {@link #speak(String, int, HashMap)} will play the specified sound resource
797     * if it is available, or synthesize the text it is missing.
798     *
799     * @param text
800     *            The string of text. Example: <code>"south_south_east"</code>
801     *
802     * @param packagename
803     *            Pass the packagename of the application that contains the
804     *            resource. If the resource is in your own application (this is
805     *            the most common case), then put the packagename of your
806     *            application here.<br/>
807     *            Example: <b>"com.google.marvin.compass"</b><br/>
808     *            The packagename can be found in the AndroidManifest.xml of
809     *            your application.
810     *            <p>
811     *            <code>&lt;manifest xmlns:android=&quot;...&quot;
812     *      package=&quot;<b>com.google.marvin.compass</b>&quot;&gt;</code>
813     *            </p>
814     *
815     * @param resourceId
816     *            Example: <code>R.raw.south_south_east</code>
817     *
818     * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
819     */
820    public int addSpeech(String text, String packagename, int resourceId) {
821        synchronized (mStartLock) {
822            mUtterances.put(text, makeResourceUri(packagename, resourceId));
823            return SUCCESS;
824        }
825    }
826
827    /**
828     * Adds a mapping between a CharSequence (may be spanned with TtsSpans) of text
829     * and a sound resource in a package. After a call to this method, subsequent calls to
830     * {@link #speak(String, int, HashMap)} will play the specified sound resource
831     * if it is available, or synthesize the text it is missing.
832     *
833     * @param text
834     *            The string of text. Example: <code>"south_south_east"</code>
835     *
836     * @param packagename
837     *            Pass the packagename of the application that contains the
838     *            resource. If the resource is in your own application (this is
839     *            the most common case), then put the packagename of your
840     *            application here.<br/>
841     *            Example: <b>"com.google.marvin.compass"</b><br/>
842     *            The packagename can be found in the AndroidManifest.xml of
843     *            your application.
844     *            <p>
845     *            <code>&lt;manifest xmlns:android=&quot;...&quot;
846     *      package=&quot;<b>com.google.marvin.compass</b>&quot;&gt;</code>
847     *            </p>
848     *
849     * @param resourceId
850     *            Example: <code>R.raw.south_south_east</code>
851     *
852     * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
853     */
854    public int addSpeech(CharSequence text, String packagename, int resourceId) {
855        synchronized (mStartLock) {
856            mUtterances.put(text, makeResourceUri(packagename, resourceId));
857            return SUCCESS;
858        }
859    }
860
861    /**
862     * Adds a mapping between a string of text and a sound file. Using this, it
863     * is possible to add custom pronounciations for a string of text.
864     * After a call to this method, subsequent calls to {@link #speak(String, int, HashMap)}
865     * will play the specified sound resource if it is available, or synthesize the text it is
866     * missing.
867     *
868     * @param text
869     *            The string of text. Example: <code>"south_south_east"</code>
870     * @param filename
871     *            The full path to the sound file (for example:
872     *            "/sdcard/mysounds/hello.wav")
873     *
874     * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
875     */
876    public int addSpeech(String text, String filename) {
877        synchronized (mStartLock) {
878            mUtterances.put(text, Uri.parse(filename));
879            return SUCCESS;
880        }
881    }
882
883    /**
884     * Adds a mapping between a CharSequence (may be spanned with TtsSpans and a sound file.
885     * Using this, it is possible to add custom pronounciations for a string of text.
886     * After a call to this method, subsequent calls to {@link #speak(String, int, HashMap)}
887     * will play the specified sound resource if it is available, or synthesize the text it is
888     * missing.
889     *
890     * @param text
891     *            The string of text. Example: <code>"south_south_east"</code>
892     * @param filename
893     *            The full path to the sound file (for example:
894     *            "/sdcard/mysounds/hello.wav")
895     *
896     * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
897     */
898    public int addSpeech(CharSequence text, String filename) {
899        synchronized (mStartLock) {
900            mUtterances.put(text, Uri.parse(filename));
901            return SUCCESS;
902        }
903    }
904
905
906    /**
907     * Adds a mapping between a string of text and a sound resource in a
908     * package. Use this to add custom earcons.
909     *
910     * @see #playEarcon(String, int, HashMap)
911     *
912     * @param earcon The name of the earcon.
913     *            Example: <code>"[tick]"</code><br/>
914     *
915     * @param packagename
916     *            the package name of the application that contains the
917     *            resource. This can for instance be the package name of your own application.
918     *            Example: <b>"com.google.marvin.compass"</b><br/>
919     *            The package name can be found in the AndroidManifest.xml of
920     *            the application containing the resource.
921     *            <p>
922     *            <code>&lt;manifest xmlns:android=&quot;...&quot;
923     *      package=&quot;<b>com.google.marvin.compass</b>&quot;&gt;</code>
924     *            </p>
925     *
926     * @param resourceId
927     *            Example: <code>R.raw.tick_snd</code>
928     *
929     * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
930     */
931    public int addEarcon(String earcon, String packagename, int resourceId) {
932        synchronized(mStartLock) {
933            mEarcons.put(earcon, makeResourceUri(packagename, resourceId));
934            return SUCCESS;
935        }
936    }
937
938    /**
939     * Adds a mapping between a string of text and a sound file.
940     * Use this to add custom earcons.
941     *
942     * @see #playEarcon(String, int, HashMap)
943     *
944     * @param earcon
945     *            The name of the earcon.
946     *            Example: <code>"[tick]"</code>
947     * @param filename
948     *            The full path to the sound file (for example:
949     *            "/sdcard/mysounds/tick.wav")
950     *
951     * @return Code indicating success or failure. See {@link #ERROR} and {@link #SUCCESS}.
952     */
953    public int addEarcon(String earcon, String filename) {
954        synchronized(mStartLock) {
955            mEarcons.put(earcon, Uri.parse(filename));
956            return SUCCESS;
957        }
958    }
959
960    private Uri makeResourceUri(String packageName, int resourceId) {
961        return new Uri.Builder()
962                .scheme(ContentResolver.SCHEME_ANDROID_RESOURCE)
963                .encodedAuthority(packageName)
964                .appendEncodedPath(String.valueOf(resourceId))
965                .build();
966    }
967
968    /**
969     * Speaks the text using the specified queuing strategy and speech parameters, the text may
970     * be spanned with TtsSpans.
971     * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
972     * requests and then returns. The synthesis might not have finished (or even started!) at the
973     * time when this method returns. In order to reliably detect errors during synthesis,
974     * we recommend setting an utterance progress listener (see
975     * {@link #setOnUtteranceProgressListener}) and using the
976     * {@link Engine#KEY_PARAM_UTTERANCE_ID} parameter.
977     *
978     * @param text The string of text to be spoken. No longer than
979     *            {@link #getMaxSpeechInputLength()} characters.
980     * @param queueMode The queuing strategy to use, {@link #QUEUE_ADD} or {@link #QUEUE_FLUSH}.
981     * @param params Parameters for the request. Can be null.
982     *            Supported parameter names:
983     *            {@link Engine#KEY_PARAM_STREAM},
984     *            {@link Engine#KEY_PARAM_VOLUME},
985     *            {@link Engine#KEY_PARAM_PAN}.
986     *            Engine specific parameters may be passed in but the parameter keys
987     *            must be prefixed by the name of the engine they are intended for. For example
988     *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
989     *            engine named "com.svox.pico" if it is being used.
990     * @param utteranceId An unique identifier for this request.
991     *
992     * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the speak operation.
993     */
994    public int speak(final CharSequence text,
995                     final int queueMode,
996                     final HashMap<String, String> params,
997                     final String utteranceId) {
998        return runAction(new Action<Integer>() {
999            @Override
1000            public Integer run(ITextToSpeechService service) throws RemoteException {
1001                Uri utteranceUri = mUtterances.get(text);
1002                if (utteranceUri != null) {
1003                    return service.playAudio(getCallerIdentity(), utteranceUri, queueMode,
1004                            getParams(params), utteranceId);
1005                } else {
1006                    return service.speak(getCallerIdentity(), text, queueMode, getParams(params),
1007                            utteranceId);
1008                }
1009            }
1010        }, ERROR, "speak");
1011    }
1012
1013    /**
1014     * Speaks the string using the specified queuing strategy and speech parameters.
1015     * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1016     * requests and then returns. The synthesis might not have finished (or even started!) at the
1017     * time when this method returns. In order to reliably detect errors during synthesis,
1018     * we recommend setting an utterance progress listener (see
1019     * {@link #setOnUtteranceProgressListener}) and using the
1020     * {@link Engine#KEY_PARAM_UTTERANCE_ID} parameter.
1021     *
1022     * @param text The string of text to be spoken. No longer than
1023     *            {@link #getMaxSpeechInputLength()} characters.
1024     * @param queueMode The queuing strategy to use, {@link #QUEUE_ADD} or {@link #QUEUE_FLUSH}.
1025     * @param params Parameters for the request. Can be null.
1026     *            Supported parameter names:
1027     *            {@link Engine#KEY_PARAM_STREAM},
1028     *            {@link Engine#KEY_PARAM_UTTERANCE_ID},
1029     *            {@link Engine#KEY_PARAM_VOLUME},
1030     *            {@link Engine#KEY_PARAM_PAN}.
1031     *            Engine specific parameters may be passed in but the parameter keys
1032     *            must be prefixed by the name of the engine they are intended for. For example
1033     *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
1034     *            engine named "com.svox.pico" if it is being used.
1035     *
1036     * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the speak operation.
1037     * @deprecated As of API level 20, replaced by
1038     *         {@link #speak(CharSequence, int, HashMap, String)}.
1039     */
1040    @Deprecated
1041    public int speak(final String text, final int queueMode, final HashMap<String, String> params) {
1042        return speak(text, queueMode, params,
1043                     params == null ? null : params.get(Engine.KEY_PARAM_UTTERANCE_ID));
1044    }
1045
1046    /**
1047     * Plays the earcon using the specified queueing mode and parameters.
1048     * The earcon must already have been added with {@link #addEarcon(String, String)} or
1049     * {@link #addEarcon(String, String, int)}.
1050     * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1051     * requests and then returns. The synthesis might not have finished (or even started!) at the
1052     * time when this method returns. In order to reliably detect errors during synthesis,
1053     * we recommend setting an utterance progress listener (see
1054     * {@link #setOnUtteranceProgressListener}) and using the
1055     * {@link Engine#KEY_PARAM_UTTERANCE_ID} parameter.
1056     *
1057     * @param earcon The earcon that should be played
1058     * @param queueMode {@link #QUEUE_ADD} or {@link #QUEUE_FLUSH}.
1059     * @param params Parameters for the request. Can be null.
1060     *            Supported parameter names:
1061     *            {@link Engine#KEY_PARAM_STREAM},
1062     *            Engine specific parameters may be passed in but the parameter keys
1063     *            must be prefixed by the name of the engine they are intended for. For example
1064     *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
1065     *            engine named "com.svox.pico" if it is being used.
1066     *
1067     * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the playEarcon operation.
1068     */
1069    public int playEarcon(final String earcon, final int queueMode,
1070            final HashMap<String, String> params, final String utteranceId) {
1071        return runAction(new Action<Integer>() {
1072            @Override
1073            public Integer run(ITextToSpeechService service) throws RemoteException {
1074                Uri earconUri = mEarcons.get(earcon);
1075                if (earconUri == null) {
1076                    return ERROR;
1077                }
1078                return service.playAudio(getCallerIdentity(), earconUri, queueMode,
1079                        getParams(params), utteranceId);
1080            }
1081        }, ERROR, "playEarcon");
1082    }
1083
1084    /**
1085     * Plays the earcon using the specified queueing mode and parameters.
1086     * The earcon must already have been added with {@link #addEarcon(String, String)} or
1087     * {@link #addEarcon(String, String, int)}.
1088     * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1089     * requests and then returns. The synthesis might not have finished (or even started!) at the
1090     * time when this method returns. In order to reliably detect errors during synthesis,
1091     * we recommend setting an utterance progress listener (see
1092     * {@link #setOnUtteranceProgressListener}) and using the
1093     * {@link Engine#KEY_PARAM_UTTERANCE_ID} parameter.
1094     *
1095     * @param earcon The earcon that should be played
1096     * @param queueMode {@link #QUEUE_ADD} or {@link #QUEUE_FLUSH}.
1097     * @param params Parameters for the request. Can be null.
1098     *            Supported parameter names:
1099     *            {@link Engine#KEY_PARAM_STREAM},
1100     *            {@link Engine#KEY_PARAM_UTTERANCE_ID}.
1101     *            Engine specific parameters may be passed in but the parameter keys
1102     *            must be prefixed by the name of the engine they are intended for. For example
1103     *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
1104     *            engine named "com.svox.pico" if it is being used.
1105     *
1106     * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the playEarcon operation.
1107     * @deprecated As of API level 20, replaced by
1108     *         {@link #playEarcon(String, int, HashMap, String)}.
1109     */
1110    @Deprecated
1111    public int playEarcon(final String earcon, final int queueMode,
1112            final HashMap<String, String> params) {
1113        return playEarcon(earcon, queueMode, params,
1114                          params == null ? null : params.get(Engine.KEY_PARAM_UTTERANCE_ID));
1115    }
1116
1117    /**
1118     * Plays silence for the specified amount of time using the specified
1119     * queue mode.
1120     * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1121     * requests and then returns. The synthesis might not have finished (or even started!) at the
1122     * time when this method returns. In order to reliably detect errors during synthesis,
1123     * we recommend setting an utterance progress listener (see
1124     * {@link #setOnUtteranceProgressListener}) and using the
1125     * {@link Engine#KEY_PARAM_UTTERANCE_ID} parameter.
1126     *
1127     * @param durationInMs The duration of the silence.
1128     * @param queueMode {@link #QUEUE_ADD} or {@link #QUEUE_FLUSH}.
1129     * @param params Parameters for the request. Can be null.
1130     *            Engine specific parameters may be passed in but the parameter keys
1131     *            must be prefixed by the name of the engine they are intended for. For example
1132     *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
1133     *            engine named "com.svox.pico" if it is being used.
1134     * @param utteranceId An unique identifier for this request.
1135     *
1136     * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the playSilence operation.
1137     */
1138    public int playSilence(final long durationInMs, final int queueMode,
1139            final HashMap<String, String> params, final String utteranceId) {
1140        return runAction(new Action<Integer>() {
1141            @Override
1142            public Integer run(ITextToSpeechService service) throws RemoteException {
1143                return service.playSilence(getCallerIdentity(), durationInMs,
1144                                           queueMode, utteranceId);
1145            }
1146        }, ERROR, "playSilence");
1147    }
1148
1149    /**
1150     * Plays silence for the specified amount of time using the specified
1151     * queue mode.
1152     * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1153     * requests and then returns. The synthesis might not have finished (or even started!) at the
1154     * time when this method returns. In order to reliably detect errors during synthesis,
1155     * we recommend setting an utterance progress listener (see
1156     * {@link #setOnUtteranceProgressListener}) and using the
1157     * {@link Engine#KEY_PARAM_UTTERANCE_ID} parameter.
1158     *
1159     * @param durationInMs The duration of the silence.
1160     * @param queueMode {@link #QUEUE_ADD} or {@link #QUEUE_FLUSH}.
1161     * @param params Parameters for the request. Can be null.
1162     *            Supported parameter names:
1163     *            {@link Engine#KEY_PARAM_UTTERANCE_ID}.
1164     *            Engine specific parameters may be passed in but the parameter keys
1165     *            must be prefixed by the name of the engine they are intended for. For example
1166     *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
1167     *            engine named "com.svox.pico" if it is being used.
1168     *
1169     * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the playSilence operation.
1170     * @deprecated As of API level 20, replaced by
1171     *         {@link #playSilence(String, int, HashMap, String)}.
1172     */
1173    @Deprecated
1174    public int playSilence(final long durationInMs, final int queueMode,
1175            final HashMap<String, String> params) {
1176        return playSilence(durationInMs, queueMode, params,
1177                           params == null ? null : params.get(Engine.KEY_PARAM_UTTERANCE_ID));
1178    }
1179
1180    /**
1181     * Queries the engine for the set of features it supports for a given locale.
1182     * Features can either be framework defined, e.g.
1183     * {@link TextToSpeech.Engine#KEY_FEATURE_NETWORK_SYNTHESIS} or engine specific.
1184     * Engine specific keys must be prefixed by the name of the engine they
1185     * are intended for. These keys can be used as parameters to
1186     * {@link TextToSpeech#speak(String, int, java.util.HashMap)} and
1187     * {@link TextToSpeech#synthesizeToFile(String, java.util.HashMap, String)}.
1188     *
1189     * Features are boolean flags, and their values in the synthesis parameters
1190     * must be behave as per {@link Boolean#parseBoolean(String)}.
1191     *
1192     * @param locale The locale to query features for.
1193     * @return Set instance. May return {@code null} on error.
1194     */
1195    public Set<String> getFeatures(final Locale locale) {
1196        return runAction(new Action<Set<String>>() {
1197            @Override
1198            public Set<String> run(ITextToSpeechService service) throws RemoteException {
1199                String[] features = null;
1200                try {
1201                    features = service.getFeaturesForLanguage(
1202                        locale.getISO3Language(), locale.getISO3Country(), locale.getVariant());
1203                } catch(MissingResourceException e) {
1204                    Log.w(TAG, "Couldn't retrieve 3 letter ISO 639-2/T language and/or ISO 3166 " +
1205                            "country code for locale: " + locale, e);
1206                    return null;
1207                }
1208
1209                if (features != null) {
1210                    final Set<String> featureSet = new HashSet<String>();
1211                    Collections.addAll(featureSet, features);
1212                    return featureSet;
1213                }
1214                return null;
1215            }
1216        }, null, "getFeatures");
1217    }
1218
1219    /**
1220     * Checks whether the TTS engine is busy speaking. Note that a speech item is
1221     * considered complete once it's audio data has been sent to the audio mixer, or
1222     * written to a file. There might be a finite lag between this point, and when
1223     * the audio hardware completes playback.
1224     *
1225     * @return {@code true} if the TTS engine is speaking.
1226     */
1227    public boolean isSpeaking() {
1228        return runAction(new Action<Boolean>() {
1229            @Override
1230            public Boolean run(ITextToSpeechService service) throws RemoteException {
1231                return service.isSpeaking();
1232            }
1233        }, false, "isSpeaking");
1234    }
1235
1236    /**
1237     * Interrupts the current utterance (whether played or rendered to file) and discards other
1238     * utterances in the queue.
1239     *
1240     * @return {@link #ERROR} or {@link #SUCCESS}.
1241     */
1242    public int stop() {
1243        return runAction(new Action<Integer>() {
1244            @Override
1245            public Integer run(ITextToSpeechService service) throws RemoteException {
1246                return service.stop(getCallerIdentity());
1247            }
1248        }, ERROR, "stop");
1249    }
1250
1251    /**
1252     * Sets the speech rate.
1253     *
1254     * This has no effect on any pre-recorded speech.
1255     *
1256     * @param speechRate Speech rate. {@code 1.0} is the normal speech rate,
1257     *            lower values slow down the speech ({@code 0.5} is half the normal speech rate),
1258     *            greater values accelerate it ({@code 2.0} is twice the normal speech rate).
1259     *
1260     * @return {@link #ERROR} or {@link #SUCCESS}.
1261     */
1262    public int setSpeechRate(float speechRate) {
1263        if (speechRate > 0.0f) {
1264            int intRate = (int)(speechRate * 100);
1265            if (intRate > 0) {
1266                synchronized (mStartLock) {
1267                    mParams.putInt(Engine.KEY_PARAM_RATE, intRate);
1268                }
1269                return SUCCESS;
1270            }
1271        }
1272        return ERROR;
1273    }
1274
1275    /**
1276     * Sets the speech pitch for the TextToSpeech engine.
1277     *
1278     * This has no effect on any pre-recorded speech.
1279     *
1280     * @param pitch Speech pitch. {@code 1.0} is the normal pitch,
1281     *            lower values lower the tone of the synthesized voice,
1282     *            greater values increase it.
1283     *
1284     * @return {@link #ERROR} or {@link #SUCCESS}.
1285     */
1286    public int setPitch(float pitch) {
1287        if (pitch > 0.0f) {
1288            int intPitch = (int)(pitch * 100);
1289            if (intPitch > 0) {
1290                synchronized (mStartLock) {
1291                    mParams.putInt(Engine.KEY_PARAM_PITCH, intPitch);
1292                }
1293                return SUCCESS;
1294            }
1295        }
1296        return ERROR;
1297    }
1298
1299    /**
1300     * @return the engine currently in use by this TextToSpeech instance.
1301     * @hide
1302     */
1303    public String getCurrentEngine() {
1304        return mCurrentEngine;
1305    }
1306
1307    /**
1308     * Returns a Locale instance describing the language currently being used as the default
1309     * Text-to-speech language.
1310     *
1311     * @return language, country (if any) and variant (if any) used by the client stored in a
1312     *     Locale instance, or {@code null} on error.
1313     */
1314    public Locale getDefaultLanguage() {
1315        return runAction(new Action<Locale>() {
1316            @Override
1317            public Locale run(ITextToSpeechService service) throws RemoteException {
1318                String[] defaultLanguage = service.getClientDefaultLanguage();
1319
1320                return new Locale(defaultLanguage[0], defaultLanguage[1], defaultLanguage[2]);
1321            }
1322        }, null, "getDefaultLanguage");
1323    }
1324
1325    /**
1326     * Sets the text-to-speech language.
1327     * The TTS engine will try to use the closest match to the specified
1328     * language as represented by the Locale, but there is no guarantee that the exact same Locale
1329     * will be used. Use {@link #isLanguageAvailable(Locale)} to check the level of support
1330     * before choosing the language to use for the next utterances.
1331     *
1332     * @param loc The locale describing the language to be used.
1333     *
1334     * @return Code indicating the support status for the locale. See {@link #LANG_AVAILABLE},
1335     *         {@link #LANG_COUNTRY_AVAILABLE}, {@link #LANG_COUNTRY_VAR_AVAILABLE},
1336     *         {@link #LANG_MISSING_DATA} and {@link #LANG_NOT_SUPPORTED}.
1337     */
1338    public int setLanguage(final Locale loc) {
1339        return runAction(new Action<Integer>() {
1340            @Override
1341            public Integer run(ITextToSpeechService service) throws RemoteException {
1342                if (loc == null) {
1343                    return LANG_NOT_SUPPORTED;
1344                }
1345                String language = null, country = null;
1346                try {
1347                    language = loc.getISO3Language();
1348                } catch (MissingResourceException e) {
1349                    Log.w(TAG, "Couldn't retrieve ISO 639-2/T language code for locale: " + loc, e);
1350                    return LANG_NOT_SUPPORTED;
1351                }
1352
1353                try {
1354                    country = loc.getISO3Country();
1355                } catch (MissingResourceException e) {
1356                    Log.w(TAG, "Couldn't retrieve ISO 3166 country code for locale: " + loc, e);
1357                    return LANG_NOT_SUPPORTED;
1358                }
1359
1360                String variant = loc.getVariant();
1361
1362                // Check if the language, country, variant are available, and cache
1363                // the available parts.
1364                // Note that the language is not actually set here, instead it is cached so it
1365                // will be associated with all upcoming utterances.
1366
1367                int result = service.loadLanguage(getCallerIdentity(), language, country, variant);
1368                if (result >= LANG_AVAILABLE){
1369                    if (result < LANG_COUNTRY_VAR_AVAILABLE) {
1370                        variant = "";
1371                        if (result < LANG_COUNTRY_AVAILABLE) {
1372                            country = "";
1373                        }
1374                    }
1375                    mParams.putString(Engine.KEY_PARAM_LANGUAGE, language);
1376                    mParams.putString(Engine.KEY_PARAM_COUNTRY, country);
1377                    mParams.putString(Engine.KEY_PARAM_VARIANT, variant);
1378                }
1379                return result;
1380            }
1381        }, LANG_NOT_SUPPORTED, "setLanguage");
1382    }
1383
1384    /**
1385     * Returns a Locale instance describing the language currently being used for synthesis
1386     * requests sent to the TextToSpeech engine.
1387     *
1388     * In Android 4.2 and before (API <= 17) this function returns the language that is currently
1389     * being used by the TTS engine. That is the last language set by this or any other
1390     * client by a {@link TextToSpeech#setLanguage} call to the same engine.
1391     *
1392     * In Android versions after 4.2 this function returns the language that is currently being
1393     * used for the synthesis requests sent from this client. That is the last language set
1394     * by a {@link TextToSpeech#setLanguage} call on this instance.
1395     *
1396     * @return language, country (if any) and variant (if any) used by the client stored in a
1397     *     Locale instance, or {@code null} on error.
1398     */
1399    public Locale getLanguage() {
1400        return runAction(new Action<Locale>() {
1401            @Override
1402            public Locale run(ITextToSpeechService service) {
1403                /* No service call, but we're accessing mParams, hence need for
1404                   wrapping it as an Action instance */
1405                String lang = mParams.getString(Engine.KEY_PARAM_LANGUAGE, "");
1406                String country = mParams.getString(Engine.KEY_PARAM_COUNTRY, "");
1407                String variant = mParams.getString(Engine.KEY_PARAM_VARIANT, "");
1408                return new Locale(lang, country, variant);
1409            }
1410        }, null, "getLanguage");
1411    }
1412
1413    /**
1414     * Checks if the specified language as represented by the Locale is available and supported.
1415     *
1416     * @param loc The Locale describing the language to be used.
1417     *
1418     * @return Code indicating the support status for the locale. See {@link #LANG_AVAILABLE},
1419     *         {@link #LANG_COUNTRY_AVAILABLE}, {@link #LANG_COUNTRY_VAR_AVAILABLE},
1420     *         {@link #LANG_MISSING_DATA} and {@link #LANG_NOT_SUPPORTED}.
1421     */
1422    public int isLanguageAvailable(final Locale loc) {
1423        return runAction(new Action<Integer>() {
1424            @Override
1425            public Integer run(ITextToSpeechService service) throws RemoteException {
1426                String language = null, country = null;
1427
1428                try {
1429                    language = loc.getISO3Language();
1430                } catch (MissingResourceException e) {
1431                    Log.w(TAG, "Couldn't retrieve ISO 639-2/T language code for locale: " + loc, e);
1432                    return LANG_NOT_SUPPORTED;
1433                }
1434
1435                try {
1436                    country = loc.getISO3Country();
1437                } catch (MissingResourceException e) {
1438                    Log.w(TAG, "Couldn't retrieve ISO 3166 country code for locale: " + loc, e);
1439                    return LANG_NOT_SUPPORTED;
1440                }
1441
1442                return service.isLanguageAvailable(language, country, loc.getVariant());
1443            }
1444        }, LANG_NOT_SUPPORTED, "isLanguageAvailable");
1445    }
1446
1447    /**
1448     * Synthesizes the given text to a file using the specified parameters.
1449     * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1450     * requests and then returns. The synthesis might not have finished (or even started!) at the
1451     * time when this method returns. In order to reliably detect errors during synthesis,
1452     * we recommend setting an utterance progress listener (see
1453     * {@link #setOnUtteranceProgressListener}).
1454     *
1455     * @param text The text that should be synthesized. No longer than
1456     *            {@link #getMaxSpeechInputLength()} characters.
1457     * @param params Parameters for the request. Can be null.
1458     *            Engine specific parameters may be passed in but the parameter keys
1459     *            must be prefixed by the name of the engine they are intended for. For example
1460     *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
1461     *            engine named "com.svox.pico" if it is being used.
1462     * @param filename Absolute file filename to write the generated audio data to.It should be
1463     *            something like "/sdcard/myappsounds/mysound.wav".
1464     * @param utteranceId An unique identifier for this request.
1465     * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the synthesizeToFile operation.
1466     */
1467    public int synthesizeToFile(final CharSequence text, final HashMap<String, String> params,
1468            final String filename, final String utteranceId) {
1469        return runAction(new Action<Integer>() {
1470            @Override
1471            public Integer run(ITextToSpeechService service) throws RemoteException {
1472                ParcelFileDescriptor fileDescriptor;
1473                int returnValue;
1474                try {
1475                    File file = new File(filename);
1476                    if(file.exists() && !file.canWrite()) {
1477                        Log.e(TAG, "Can't write to " + filename);
1478                        return ERROR;
1479                    }
1480                    fileDescriptor = ParcelFileDescriptor.open(file,
1481                            ParcelFileDescriptor.MODE_WRITE_ONLY |
1482                            ParcelFileDescriptor.MODE_CREATE |
1483                            ParcelFileDescriptor.MODE_TRUNCATE);
1484                    returnValue = service.synthesizeToFileDescriptor(getCallerIdentity(), text,
1485                            fileDescriptor, getParams(params), utteranceId);
1486                    fileDescriptor.close();
1487                    return returnValue;
1488                } catch (FileNotFoundException e) {
1489                    Log.e(TAG, "Opening file " + filename + " failed", e);
1490                    return ERROR;
1491                } catch (IOException e) {
1492                    Log.e(TAG, "Closing file " + filename + " failed", e);
1493                    return ERROR;
1494                }
1495            }
1496        }, ERROR, "synthesizeToFile");
1497    }
1498
1499    /**
1500     * Synthesizes the given text to a file using the specified parameters.
1501     * This method is asynchronous, i.e. the method just adds the request to the queue of TTS
1502     * requests and then returns. The synthesis might not have finished (or even started!) at the
1503     * time when this method returns. In order to reliably detect errors during synthesis,
1504     * we recommend setting an utterance progress listener (see
1505     * {@link #setOnUtteranceProgressListener}) and using the
1506     * {@link Engine#KEY_PARAM_UTTERANCE_ID} parameter.
1507     *
1508     * @param text The text that should be synthesized. No longer than
1509     *            {@link #getMaxSpeechInputLength()} characters.
1510     * @param params Parameters for the request. Can be null.
1511     *            Supported parameter names:
1512     *            {@link Engine#KEY_PARAM_UTTERANCE_ID}.
1513     *            Engine specific parameters may be passed in but the parameter keys
1514     *            must be prefixed by the name of the engine they are intended for. For example
1515     *            the keys "com.svox.pico_foo" and "com.svox.pico:bar" will be passed to the
1516     *            engine named "com.svox.pico" if it is being used.
1517     * @param filename Absolute file filename to write the generated audio data to.It should be
1518     *            something like "/sdcard/myappsounds/mysound.wav".
1519     *
1520     * @return {@link #ERROR} or {@link #SUCCESS} of <b>queuing</b> the synthesizeToFile operation.
1521     * @deprecated As of API level 20, replaced by
1522     *         {@link #synthesizeToFile(CharSequence, HashMap, String, String)}.
1523     */
1524    public int synthesizeToFile(final String text, final HashMap<String, String> params,
1525            final String filename) {
1526        return synthesizeToFile(text, params, filename, params.get(Engine.KEY_PARAM_UTTERANCE_ID));
1527    }
1528
1529    private Bundle getParams(HashMap<String, String> params) {
1530        if (params != null && !params.isEmpty()) {
1531            Bundle bundle = new Bundle(mParams);
1532            copyIntParam(bundle, params, Engine.KEY_PARAM_STREAM);
1533            copyIntParam(bundle, params, Engine.KEY_PARAM_SESSION_ID);
1534            copyStringParam(bundle, params, Engine.KEY_PARAM_UTTERANCE_ID);
1535            copyFloatParam(bundle, params, Engine.KEY_PARAM_VOLUME);
1536            copyFloatParam(bundle, params, Engine.KEY_PARAM_PAN);
1537
1538            // Copy feature strings defined by the framework.
1539            copyStringParam(bundle, params, Engine.KEY_FEATURE_NETWORK_SYNTHESIS);
1540            copyStringParam(bundle, params, Engine.KEY_FEATURE_EMBEDDED_SYNTHESIS);
1541
1542            // Copy over all parameters that start with the name of the
1543            // engine that we are currently connected to. The engine is
1544            // free to interpret them as it chooses.
1545            if (!TextUtils.isEmpty(mCurrentEngine)) {
1546                for (Map.Entry<String, String> entry : params.entrySet()) {
1547                    final String key = entry.getKey();
1548                    if (key != null && key.startsWith(mCurrentEngine)) {
1549                        bundle.putString(key, entry.getValue());
1550                    }
1551                }
1552            }
1553
1554            return bundle;
1555        } else {
1556            return mParams;
1557        }
1558    }
1559
1560    private void copyStringParam(Bundle bundle, HashMap<String, String> params, String key) {
1561        String value = params.get(key);
1562        if (value != null) {
1563            bundle.putString(key, value);
1564        }
1565    }
1566
1567    private void copyIntParam(Bundle bundle, HashMap<String, String> params, String key) {
1568        String valueString = params.get(key);
1569        if (!TextUtils.isEmpty(valueString)) {
1570            try {
1571                int value = Integer.parseInt(valueString);
1572                bundle.putInt(key, value);
1573            } catch (NumberFormatException ex) {
1574                // don't set the value in the bundle
1575            }
1576        }
1577    }
1578
1579    private void copyFloatParam(Bundle bundle, HashMap<String, String> params, String key) {
1580        String valueString = params.get(key);
1581        if (!TextUtils.isEmpty(valueString)) {
1582            try {
1583                float value = Float.parseFloat(valueString);
1584                bundle.putFloat(key, value);
1585            } catch (NumberFormatException ex) {
1586                // don't set the value in the bundle
1587            }
1588        }
1589    }
1590
1591    /**
1592     * Sets the listener that will be notified when synthesis of an utterance completes.
1593     *
1594     * @param listener The listener to use.
1595     *
1596     * @return {@link #ERROR} or {@link #SUCCESS}.
1597     *
1598     * @deprecated Use {@link #setOnUtteranceProgressListener(UtteranceProgressListener)}
1599     *        instead.
1600     */
1601    @Deprecated
1602    public int setOnUtteranceCompletedListener(final OnUtteranceCompletedListener listener) {
1603        mUtteranceProgressListener = UtteranceProgressListener.from(listener);
1604        return TextToSpeech.SUCCESS;
1605    }
1606
1607    /**
1608     * Sets the listener that will be notified of various events related to the
1609     * synthesis of a given utterance.
1610     *
1611     * See {@link UtteranceProgressListener} and
1612     * {@link TextToSpeech.Engine#KEY_PARAM_UTTERANCE_ID}.
1613     *
1614     * @param listener the listener to use.
1615     * @return {@link #ERROR} or {@link #SUCCESS}
1616     */
1617    public int setOnUtteranceProgressListener(UtteranceProgressListener listener) {
1618        mUtteranceProgressListener = listener;
1619        return TextToSpeech.SUCCESS;
1620    }
1621
1622    /**
1623     * Sets the TTS engine to use.
1624     *
1625     * @deprecated This doesn't inform callers when the TTS engine has been
1626     *        initialized. {@link #TextToSpeech(Context, OnInitListener, String)}
1627     *        can be used with the appropriate engine name. Also, there is no
1628     *        guarantee that the engine specified will be loaded. If it isn't
1629     *        installed or disabled, the user / system wide defaults will apply.
1630     *
1631     * @param enginePackageName The package name for the synthesis engine (e.g. "com.svox.pico")
1632     *
1633     * @return {@link #ERROR} or {@link #SUCCESS}.
1634     */
1635    @Deprecated
1636    public int setEngineByPackageName(String enginePackageName) {
1637        mRequestedEngine = enginePackageName;
1638        return initTts();
1639    }
1640
1641    /**
1642     * Gets the package name of the default speech synthesis engine.
1643     *
1644     * @return Package name of the TTS engine that the user has chosen
1645     *        as their default.
1646     */
1647    public String getDefaultEngine() {
1648        return mEnginesHelper.getDefaultEngine();
1649    }
1650
1651    /**
1652     * Checks whether the user's settings should override settings requested
1653     * by the calling application. As of the Ice cream sandwich release,
1654     * user settings never forcibly override the app's settings.
1655     */
1656    public boolean areDefaultsEnforced() {
1657        return false;
1658    }
1659
1660    /**
1661     * Gets a list of all installed TTS engines.
1662     *
1663     * @return A list of engine info objects. The list can be empty, but never {@code null}.
1664     */
1665    public List<EngineInfo> getEngines() {
1666        return mEnginesHelper.getEngines();
1667    }
1668
1669    private class Connection implements ServiceConnection {
1670        private ITextToSpeechService mService;
1671
1672        private SetupConnectionAsyncTask mOnSetupConnectionAsyncTask;
1673
1674        private boolean mEstablished;
1675
1676        private final ITextToSpeechCallback.Stub mCallback = new ITextToSpeechCallback.Stub() {
1677            public void onStop(String utteranceId) throws RemoteException {
1678                // do nothing
1679            };
1680
1681            @Override
1682            public void onFallback(String utteranceId) throws RemoteException {
1683                // do nothing
1684            }
1685
1686            @Override
1687            public void onSuccess(String utteranceId) {
1688                UtteranceProgressListener listener = mUtteranceProgressListener;
1689                if (listener != null) {
1690                    listener.onDone(utteranceId);
1691                }
1692            }
1693
1694            @Override
1695            public void onError(String utteranceId, int errorCode) {
1696                UtteranceProgressListener listener = mUtteranceProgressListener;
1697                if (listener != null) {
1698                    listener.onError(utteranceId);
1699                }
1700            }
1701
1702            @Override
1703            public void onStart(String utteranceId) {
1704                UtteranceProgressListener listener = mUtteranceProgressListener;
1705                if (listener != null) {
1706                    listener.onStart(utteranceId);
1707                }
1708            }
1709        };
1710
1711        private class SetupConnectionAsyncTask extends AsyncTask<Void, Void, Integer> {
1712            private final ComponentName mName;
1713
1714            public SetupConnectionAsyncTask(ComponentName name) {
1715                mName = name;
1716            }
1717
1718            @Override
1719            protected Integer doInBackground(Void... params) {
1720                synchronized(mStartLock) {
1721                    if (isCancelled()) {
1722                        return null;
1723                    }
1724
1725                    try {
1726                        mService.setCallback(getCallerIdentity(), mCallback);
1727
1728                        if (mParams.getString(Engine.KEY_PARAM_LANGUAGE) == null) {
1729                            String[] defaultLanguage = mService.getClientDefaultLanguage();
1730                            mParams.putString(Engine.KEY_PARAM_LANGUAGE, defaultLanguage[0]);
1731                            mParams.putString(Engine.KEY_PARAM_COUNTRY, defaultLanguage[1]);
1732                            mParams.putString(Engine.KEY_PARAM_VARIANT, defaultLanguage[2]);
1733                        }
1734
1735                        Log.i(TAG, "Set up connection to " + mName);
1736                        return SUCCESS;
1737                    } catch (RemoteException re) {
1738                        Log.e(TAG, "Error connecting to service, setCallback() failed");
1739                        return ERROR;
1740                    }
1741                }
1742            }
1743
1744            @Override
1745            protected void onPostExecute(Integer result) {
1746                synchronized(mStartLock) {
1747                    if (mOnSetupConnectionAsyncTask == this) {
1748                        mOnSetupConnectionAsyncTask = null;
1749                    }
1750                    mEstablished = true;
1751                    dispatchOnInit(result);
1752                }
1753            }
1754        }
1755
1756        @Override
1757        public void onServiceConnected(ComponentName name, IBinder service) {
1758            synchronized(mStartLock) {
1759                mConnectingServiceConnection = null;
1760
1761                Log.i(TAG, "Connected to " + name);
1762
1763                if (mOnSetupConnectionAsyncTask != null) {
1764                    mOnSetupConnectionAsyncTask.cancel(false);
1765                }
1766
1767                mService = ITextToSpeechService.Stub.asInterface(service);
1768                mServiceConnection = Connection.this;
1769
1770                mEstablished = false;
1771                mOnSetupConnectionAsyncTask = new SetupConnectionAsyncTask(name);
1772                mOnSetupConnectionAsyncTask.execute();
1773            }
1774        }
1775
1776        public IBinder getCallerIdentity() {
1777            return mCallback;
1778        }
1779
1780        /**
1781         * Clear connection related fields and cancel mOnServiceConnectedAsyncTask if set.
1782         *
1783         * @return true if we cancel mOnSetupConnectionAsyncTask in progress.
1784         */
1785        private boolean clearServiceConnection() {
1786            synchronized(mStartLock) {
1787                boolean result = false;
1788                if (mOnSetupConnectionAsyncTask != null) {
1789                    result = mOnSetupConnectionAsyncTask.cancel(false);
1790                    mOnSetupConnectionAsyncTask = null;
1791                }
1792
1793                mService = null;
1794                // If this is the active connection, clear it
1795                if (mServiceConnection == this) {
1796                    mServiceConnection = null;
1797                }
1798                return result;
1799            }
1800        }
1801
1802        @Override
1803        public void onServiceDisconnected(ComponentName name) {
1804            Log.i(TAG, "Asked to disconnect from " + name);
1805            if (clearServiceConnection()) {
1806                /* We need to protect against a rare case where engine
1807                 * dies just after successful connection - and we process onServiceDisconnected
1808                 * before OnServiceConnectedAsyncTask.onPostExecute. onServiceDisconnected cancels
1809                 * OnServiceConnectedAsyncTask.onPostExecute and we don't call dispatchOnInit
1810                 * with ERROR as argument.
1811                 */
1812                dispatchOnInit(ERROR);
1813            }
1814        }
1815
1816        public void disconnect() {
1817            mContext.unbindService(this);
1818            clearServiceConnection();
1819        }
1820
1821        public boolean isEstablished() {
1822            return mService != null && mEstablished;
1823        }
1824
1825        public <R> R runAction(Action<R> action, R errorResult, String method,
1826                boolean reconnect, boolean onlyEstablishedConnection) {
1827            synchronized (mStartLock) {
1828                try {
1829                    if (mService == null) {
1830                        Log.w(TAG, method + " failed: not connected to TTS engine");
1831                        return errorResult;
1832                    }
1833                    if (onlyEstablishedConnection && !isEstablished()) {
1834                        Log.w(TAG, method + " failed: TTS engine connection not fully set up");
1835                        return errorResult;
1836                    }
1837                    return action.run(mService);
1838                } catch (RemoteException ex) {
1839                    Log.e(TAG, method + " failed", ex);
1840                    if (reconnect) {
1841                        disconnect();
1842                        initTts();
1843                    }
1844                    return errorResult;
1845                }
1846            }
1847        }
1848    }
1849
1850    private interface Action<R> {
1851        R run(ITextToSpeechService service) throws RemoteException;
1852    }
1853
1854    /**
1855     * Information about an installed text-to-speech engine.
1856     *
1857     * @see TextToSpeech#getEngines
1858     */
1859    public static class EngineInfo {
1860        /**
1861         * Engine package name..
1862         */
1863        public String name;
1864        /**
1865         * Localized label for the engine.
1866         */
1867        public String label;
1868        /**
1869         * Icon for the engine.
1870         */
1871        public int icon;
1872        /**
1873         * Whether this engine is a part of the system
1874         * image.
1875         *
1876         * @hide
1877         */
1878        public boolean system;
1879        /**
1880         * The priority the engine declares for the the intent filter
1881         * {@code android.intent.action.TTS_SERVICE}
1882         *
1883         * @hide
1884         */
1885        public int priority;
1886
1887        @Override
1888        public String toString() {
1889            return "EngineInfo{name=" + name + "}";
1890        }
1891
1892    }
1893
1894    /**
1895     * Limit of length of input string passed to speak and synthesizeToFile.
1896     *
1897     * @see #speak
1898     * @see #synthesizeToFile
1899     */
1900    public static int getMaxSpeechInputLength() {
1901        return 4000;
1902    }
1903}
1904