TextToSpeechService.java revision b0cde2cabaab13bdc20dbbf1806b96760e666e5c
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 * use this file except in compliance with the License. You may obtain a copy of 6 * the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 * License for the specific language governing permissions and limitations under 14 * the License. 15 */ 16package android.speech.tts; 17 18import android.annotation.NonNull; 19import android.app.Service; 20import android.content.Intent; 21import android.media.AudioAttributes; 22import android.media.AudioManager; 23import android.net.Uri; 24import android.os.Binder; 25import android.os.Bundle; 26import android.os.Handler; 27import android.os.HandlerThread; 28import android.os.IBinder; 29import android.os.Looper; 30import android.os.Message; 31import android.os.MessageQueue; 32import android.os.ParcelFileDescriptor; 33import android.os.RemoteCallbackList; 34import android.os.RemoteException; 35import android.provider.Settings; 36import android.speech.tts.TextToSpeech.Engine; 37import android.text.TextUtils; 38import android.util.Log; 39 40import java.io.FileOutputStream; 41import java.io.IOException; 42import java.util.ArrayList; 43import java.util.HashMap; 44import java.util.HashSet; 45import java.util.List; 46import java.util.Locale; 47import java.util.MissingResourceException; 48import java.util.Set; 49 50 51/** 52 * Abstract base class for TTS engine implementations. The following methods 53 * need to be implemented: 54 * <ul> 55 * <li>{@link #onIsLanguageAvailable}</li> 56 * <li>{@link #onLoadLanguage}</li> 57 * <li>{@link #onGetLanguage}</li> 58 * <li>{@link #onSynthesizeText}</li> 59 * <li>{@link #onStop}</li> 60 * </ul> 61 * The first three deal primarily with language management, and are used to 62 * query the engine for it's support for a given language and indicate to it 63 * that requests in a given language are imminent. 64 * 65 * {@link #onSynthesizeText} is central to the engine implementation. The 66 * implementation should synthesize text as per the request parameters and 67 * return synthesized data via the supplied callback. This class and its helpers 68 * will then consume that data, which might mean queuing it for playback or writing 69 * it to a file or similar. All calls to this method will be on a single thread, 70 * which will be different from the main thread of the service. Synthesis must be 71 * synchronous which means the engine must NOT hold on to the callback or call any 72 * methods on it after the method returns. 73 * 74 * {@link #onStop} tells the engine that it should stop 75 * all ongoing synthesis, if any. Any pending data from the current synthesis 76 * will be discarded. 77 * 78 * {@link #onGetLanguage} is not required as of JELLYBEAN_MR2 (API 18) and later, it is only 79 * called on earlier versions of Android. 80 * 81 * API Level 20 adds support for Voice objects. Voices are an abstraction that allow the TTS 82 * service to expose multiple backends for a single locale. Each one of them can have a different 83 * features set. In order to fully take advantage of voices, an engine should implement 84 * the following methods: 85 * <ul> 86 * <li>{@link #onGetVoices()}</li> 87 * <li>{@link #onIsValidVoiceName(String)}</li> 88 * <li>{@link #onLoadVoice(String)}</li> 89 * <li>{@link #onGetDefaultVoiceNameFor(String, String, String)}</li> 90 * </ul> 91 * The first three methods are siblings of the {@link #onGetLanguage}, 92 * {@link #onIsLanguageAvailable} and {@link #onLoadLanguage} methods. The last one, 93 * {@link #onGetDefaultVoiceNameFor(String, String, String)} is a link between locale and voice 94 * based methods. Since API level 21 {@link TextToSpeech#setLanguage} is implemented by 95 * calling {@link TextToSpeech#setVoice} with the voice returned by 96 * {@link #onGetDefaultVoiceNameFor(String, String, String)}. 97 * 98 * If the client uses a voice instead of a locale, {@link SynthesisRequest} will contain the 99 * requested voice name. 100 * 101 * The default implementations of Voice-related methods implement them using the 102 * pre-existing locale-based implementation. 103 */ 104public abstract class TextToSpeechService extends Service { 105 106 private static final boolean DBG = false; 107 private static final String TAG = "TextToSpeechService"; 108 109 private static final String SYNTH_THREAD_NAME = "SynthThread"; 110 111 private SynthHandler mSynthHandler; 112 // A thread and it's associated handler for playing back any audio 113 // associated with this TTS engine. Will handle all requests except synthesis 114 // to file requests, which occur on the synthesis thread. 115 @NonNull private AudioPlaybackHandler mAudioPlaybackHandler; 116 private TtsEngines mEngineHelper; 117 118 private CallbackMap mCallbacks; 119 private String mPackageName; 120 121 private final Object mVoicesInfoLock = new Object(); 122 123 @Override 124 public void onCreate() { 125 if (DBG) Log.d(TAG, "onCreate()"); 126 super.onCreate(); 127 128 SynthThread synthThread = new SynthThread(); 129 synthThread.start(); 130 mSynthHandler = new SynthHandler(synthThread.getLooper()); 131 132 mAudioPlaybackHandler = new AudioPlaybackHandler(); 133 mAudioPlaybackHandler.start(); 134 135 mEngineHelper = new TtsEngines(this); 136 137 mCallbacks = new CallbackMap(); 138 139 mPackageName = getApplicationInfo().packageName; 140 141 String[] defaultLocale = getSettingsLocale(); 142 143 // Load default language 144 onLoadLanguage(defaultLocale[0], defaultLocale[1], defaultLocale[2]); 145 } 146 147 @Override 148 public void onDestroy() { 149 if (DBG) Log.d(TAG, "onDestroy()"); 150 151 // Tell the synthesizer to stop 152 mSynthHandler.quit(); 153 // Tell the audio playback thread to stop. 154 mAudioPlaybackHandler.quit(); 155 // Unregister all callbacks. 156 mCallbacks.kill(); 157 158 super.onDestroy(); 159 } 160 161 /** 162 * Checks whether the engine supports a given language. 163 * 164 * Can be called on multiple threads. 165 * 166 * Its return values HAVE to be consistent with onLoadLanguage. 167 * 168 * @param lang ISO-3 language code. 169 * @param country ISO-3 country code. May be empty or null. 170 * @param variant Language variant. May be empty or null. 171 * @return Code indicating the support status for the locale. 172 * One of {@link TextToSpeech#LANG_AVAILABLE}, 173 * {@link TextToSpeech#LANG_COUNTRY_AVAILABLE}, 174 * {@link TextToSpeech#LANG_COUNTRY_VAR_AVAILABLE}, 175 * {@link TextToSpeech#LANG_MISSING_DATA} 176 * {@link TextToSpeech#LANG_NOT_SUPPORTED}. 177 */ 178 protected abstract int onIsLanguageAvailable(String lang, String country, String variant); 179 180 /** 181 * Returns the language, country and variant currently being used by the TTS engine. 182 * 183 * This method will be called only on Android 4.2 and before (API <= 17). In later versions 184 * this method is not called by the Android TTS framework. 185 * 186 * Can be called on multiple threads. 187 * 188 * @return A 3-element array, containing language (ISO 3-letter code), 189 * country (ISO 3-letter code) and variant used by the engine. 190 * The country and variant may be {@code ""}. If country is empty, then variant must 191 * be empty too. 192 * @see Locale#getISO3Language() 193 * @see Locale#getISO3Country() 194 * @see Locale#getVariant() 195 */ 196 protected abstract String[] onGetLanguage(); 197 198 /** 199 * Notifies the engine that it should load a speech synthesis language. There is no guarantee 200 * that this method is always called before the language is used for synthesis. It is merely 201 * a hint to the engine that it will probably get some synthesis requests for this language 202 * at some point in the future. 203 * 204 * Can be called on multiple threads. 205 * In <= Android 4.2 (<= API 17) can be called on main and service binder threads. 206 * In > Android 4.2 (> API 17) can be called on main and synthesis threads. 207 * 208 * @param lang ISO-3 language code. 209 * @param country ISO-3 country code. May be empty or null. 210 * @param variant Language variant. May be empty or null. 211 * @return Code indicating the support status for the locale. 212 * One of {@link TextToSpeech#LANG_AVAILABLE}, 213 * {@link TextToSpeech#LANG_COUNTRY_AVAILABLE}, 214 * {@link TextToSpeech#LANG_COUNTRY_VAR_AVAILABLE}, 215 * {@link TextToSpeech#LANG_MISSING_DATA} 216 * {@link TextToSpeech#LANG_NOT_SUPPORTED}. 217 */ 218 protected abstract int onLoadLanguage(String lang, String country, String variant); 219 220 /** 221 * Notifies the service that it should stop any in-progress speech synthesis. 222 * This method can be called even if no speech synthesis is currently in progress. 223 * 224 * Can be called on multiple threads, but not on the synthesis thread. 225 */ 226 protected abstract void onStop(); 227 228 /** 229 * Tells the service to synthesize speech from the given text. This method should block until 230 * the synthesis is finished. Called on the synthesis thread. 231 * 232 * @param request The synthesis request. 233 * @param callback The callback that the engine must use to make data available for playback or 234 * for writing to a file. 235 */ 236 protected abstract void onSynthesizeText(SynthesisRequest request, SynthesisCallback callback); 237 238 /** 239 * Queries the service for a set of features supported for a given language. 240 * 241 * Can be called on multiple threads. 242 * 243 * @param lang ISO-3 language code. 244 * @param country ISO-3 country code. May be empty or null. 245 * @param variant Language variant. May be empty or null. 246 * @return A list of features supported for the given language. 247 */ 248 protected Set<String> onGetFeaturesForLanguage(String lang, String country, String variant) { 249 return new HashSet<String>(); 250 } 251 252 private int getExpectedLanguageAvailableStatus(Locale locale) { 253 int expectedStatus = TextToSpeech.LANG_COUNTRY_VAR_AVAILABLE; 254 if (locale.getVariant().isEmpty()) { 255 if (locale.getCountry().isEmpty()) { 256 expectedStatus = TextToSpeech.LANG_AVAILABLE; 257 } else { 258 expectedStatus = TextToSpeech.LANG_COUNTRY_AVAILABLE; 259 } 260 } 261 return expectedStatus; 262 } 263 264 /** 265 * Queries the service for a set of supported voices. 266 * 267 * Can be called on multiple threads. 268 * 269 * The default implementation tries to enumerate all available locales, pass them to 270 * {@link #onIsLanguageAvailable(String, String, String)} and create Voice instances (using 271 * the locale's BCP-47 language tag as the voice name) for the ones that are supported. 272 * Note, that this implementation is suitable only for engines that don't have multiple voices 273 * for a single locale. Also, this implementation won't work with Locales not listed in the 274 * set returned by the {@link Locale#getAvailableLocales()} method. 275 * 276 * @return A list of voices supported. 277 */ 278 public List<Voice> onGetVoices() { 279 // Enumerate all locales and check if they are available 280 ArrayList<Voice> voices = new ArrayList<Voice>(); 281 for (Locale locale : Locale.getAvailableLocales()) { 282 int expectedStatus = getExpectedLanguageAvailableStatus(locale); 283 try { 284 int localeStatus = onIsLanguageAvailable(locale.getISO3Language(), 285 locale.getISO3Country(), locale.getVariant()); 286 if (localeStatus != expectedStatus) { 287 continue; 288 } 289 } catch (MissingResourceException e) { 290 // Ignore locale without iso 3 codes 291 continue; 292 } 293 Set<String> features = onGetFeaturesForLanguage(locale.getISO3Language(), 294 locale.getISO3Country(), locale.getVariant()); 295 String voiceName = onGetDefaultVoiceNameFor(locale.getISO3Language(), 296 locale.getISO3Country(), locale.getVariant()); 297 voices.add(new Voice(voiceName, locale, Voice.QUALITY_NORMAL, 298 Voice.LATENCY_NORMAL, false, features)); 299 } 300 return voices; 301 } 302 303 /** 304 * Return a name of the default voice for a given locale. 305 * 306 * This method provides a mapping between locales and available voices. This method is 307 * used in {@link TextToSpeech#setLanguage}, which calls this method and then calls 308 * {@link TextToSpeech#setVoice} with the voice returned by this method. 309 * 310 * Also, it's used by {@link TextToSpeech#getDefaultVoice()} to find a default voice for 311 * the default locale. 312 * 313 * @param lang ISO-3 language code. 314 * @param country ISO-3 country code. May be empty or null. 315 * @param variant Language variant. May be empty or null. 316 317 * @return A name of the default voice for a given locale. 318 */ 319 public String onGetDefaultVoiceNameFor(String lang, String country, String variant) { 320 int localeStatus = onIsLanguageAvailable(lang, country, variant); 321 Locale iso3Locale = null; 322 switch (localeStatus) { 323 case TextToSpeech.LANG_AVAILABLE: 324 iso3Locale = new Locale(lang); 325 break; 326 case TextToSpeech.LANG_COUNTRY_AVAILABLE: 327 iso3Locale = new Locale(lang, country); 328 break; 329 case TextToSpeech.LANG_COUNTRY_VAR_AVAILABLE: 330 iso3Locale = new Locale(lang, country, variant); 331 break; 332 default: 333 return null; 334 } 335 Locale properLocale = TtsEngines.normalizeTTSLocale(iso3Locale); 336 String voiceName = properLocale.toLanguageTag(); 337 if (onIsValidVoiceName(voiceName) == TextToSpeech.SUCCESS) { 338 return voiceName; 339 } else { 340 return null; 341 } 342 } 343 344 /** 345 * Notifies the engine that it should load a speech synthesis voice. There is no guarantee 346 * that this method is always called before the voice is used for synthesis. It is merely 347 * a hint to the engine that it will probably get some synthesis requests for this voice 348 * at some point in the future. 349 * 350 * Will be called only on synthesis thread. 351 * 352 * The default implementation creates a Locale from the voice name (by interpreting the name as 353 * a BCP-47 tag for the locale), and passes it to 354 * {@link #onLoadLanguage(String, String, String)}. 355 * 356 * @param voiceName Name of the voice. 357 * @return {@link TextToSpeech#ERROR} or {@link TextToSpeech#SUCCESS}. 358 */ 359 public int onLoadVoice(String voiceName) { 360 Locale locale = Locale.forLanguageTag(voiceName); 361 if (locale == null) { 362 return TextToSpeech.ERROR; 363 } 364 int expectedStatus = getExpectedLanguageAvailableStatus(locale); 365 try { 366 int localeStatus = onIsLanguageAvailable(locale.getISO3Language(), 367 locale.getISO3Country(), locale.getVariant()); 368 if (localeStatus != expectedStatus) { 369 return TextToSpeech.ERROR; 370 } 371 onLoadLanguage(locale.getISO3Language(), 372 locale.getISO3Country(), locale.getVariant()); 373 return TextToSpeech.SUCCESS; 374 } catch (MissingResourceException e) { 375 return TextToSpeech.ERROR; 376 } 377 } 378 379 /** 380 * Checks whether the engine supports a voice with a given name. 381 * 382 * Can be called on multiple threads. 383 * 384 * The default implementation treats the voice name as a language tag, creating a Locale from 385 * the voice name, and passes it to {@link #onIsLanguageAvailable(String, String, String)}. 386 * 387 * @param voiceName Name of the voice. 388 * @return {@link TextToSpeech#ERROR} or {@link TextToSpeech#SUCCESS}. 389 */ 390 public int onIsValidVoiceName(String voiceName) { 391 Locale locale = Locale.forLanguageTag(voiceName); 392 if (locale == null) { 393 return TextToSpeech.ERROR; 394 } 395 int expectedStatus = getExpectedLanguageAvailableStatus(locale); 396 try { 397 int localeStatus = onIsLanguageAvailable(locale.getISO3Language(), 398 locale.getISO3Country(), locale.getVariant()); 399 if (localeStatus != expectedStatus) { 400 return TextToSpeech.ERROR; 401 } 402 return TextToSpeech.SUCCESS; 403 } catch (MissingResourceException e) { 404 return TextToSpeech.ERROR; 405 } 406 } 407 408 private int getDefaultSpeechRate() { 409 return getSecureSettingInt(Settings.Secure.TTS_DEFAULT_RATE, Engine.DEFAULT_RATE); 410 } 411 412 private int getDefaultPitch() { 413 return getSecureSettingInt(Settings.Secure.TTS_DEFAULT_PITCH, Engine.DEFAULT_PITCH); 414 } 415 416 private String[] getSettingsLocale() { 417 final Locale locale = mEngineHelper.getLocalePrefForEngine(mPackageName); 418 return TtsEngines.toOldLocaleStringFormat(locale); 419 } 420 421 private int getSecureSettingInt(String name, int defaultValue) { 422 return Settings.Secure.getInt(getContentResolver(), name, defaultValue); 423 } 424 425 /** 426 * Synthesizer thread. This thread is used to run {@link SynthHandler}. 427 */ 428 private class SynthThread extends HandlerThread implements MessageQueue.IdleHandler { 429 430 private boolean mFirstIdle = true; 431 432 public SynthThread() { 433 super(SYNTH_THREAD_NAME, android.os.Process.THREAD_PRIORITY_DEFAULT); 434 } 435 436 @Override 437 protected void onLooperPrepared() { 438 getLooper().getQueue().addIdleHandler(this); 439 } 440 441 @Override 442 public boolean queueIdle() { 443 if (mFirstIdle) { 444 mFirstIdle = false; 445 } else { 446 broadcastTtsQueueProcessingCompleted(); 447 } 448 return true; 449 } 450 451 private void broadcastTtsQueueProcessingCompleted() { 452 Intent i = new Intent(TextToSpeech.ACTION_TTS_QUEUE_PROCESSING_COMPLETED); 453 if (DBG) Log.d(TAG, "Broadcasting: " + i); 454 sendBroadcast(i); 455 } 456 } 457 458 private class SynthHandler extends Handler { 459 private SpeechItem mCurrentSpeechItem = null; 460 461 // When a message with QUEUE_FLUSH arrives we add the caller identity to the List and when a 462 // message with QUEUE_DESTROY arrives we increment mFlushAll. Then a message is added to the 463 // handler queue that removes the caller identify from the list and decrements the mFlushAll 464 // counter. This is so that when a message is processed and the caller identity is in the 465 // list or mFlushAll is not zero, we know that the message should be flushed. 466 // It's important that mFlushedObjects is a List and not a Set, and that mFlushAll is an 467 // int and not a bool. This is because when multiple messages arrive with QUEUE_FLUSH or 468 // QUEUE_DESTROY, we want to keep flushing messages until we arrive at the last QUEUE_FLUSH 469 // or QUEUE_DESTROY message. 470 private List<Object> mFlushedObjects = new ArrayList<>(); 471 private int mFlushAll = 0; 472 473 public SynthHandler(Looper looper) { 474 super(looper); 475 } 476 477 private void startFlushingSpeechItems(Object callerIdentity) { 478 synchronized (mFlushedObjects) { 479 if (callerIdentity == null) { 480 mFlushAll += 1; 481 } else { 482 mFlushedObjects.add(callerIdentity); 483 } 484 } 485 } 486 private void endFlushingSpeechItems(Object callerIdentity) { 487 synchronized (mFlushedObjects) { 488 if (callerIdentity == null) { 489 mFlushAll -= 1; 490 } else { 491 mFlushedObjects.remove(callerIdentity); 492 } 493 } 494 } 495 private boolean isFlushed(SpeechItem speechItem) { 496 synchronized (mFlushedObjects) { 497 return mFlushAll > 0 || mFlushedObjects.contains(speechItem.getCallerIdentity()); 498 } 499 } 500 501 private synchronized SpeechItem getCurrentSpeechItem() { 502 return mCurrentSpeechItem; 503 } 504 505 private synchronized SpeechItem setCurrentSpeechItem(SpeechItem speechItem) { 506 SpeechItem old = mCurrentSpeechItem; 507 mCurrentSpeechItem = speechItem; 508 return old; 509 } 510 511 private synchronized SpeechItem maybeRemoveCurrentSpeechItem(Object callerIdentity) { 512 if (mCurrentSpeechItem != null && 513 (mCurrentSpeechItem.getCallerIdentity() == callerIdentity)) { 514 SpeechItem current = mCurrentSpeechItem; 515 mCurrentSpeechItem = null; 516 return current; 517 } 518 519 return null; 520 } 521 522 public boolean isSpeaking() { 523 return getCurrentSpeechItem() != null; 524 } 525 526 public void quit() { 527 // Don't process any more speech items 528 getLooper().quit(); 529 // Stop the current speech item 530 SpeechItem current = setCurrentSpeechItem(null); 531 if (current != null) { 532 current.stop(); 533 } 534 // The AudioPlaybackHandler will be destroyed by the caller. 535 } 536 537 /** 538 * Adds a speech item to the queue. 539 * 540 * Called on a service binder thread. 541 */ 542 public int enqueueSpeechItem(int queueMode, final SpeechItem speechItem) { 543 UtteranceProgressDispatcher utterenceProgress = null; 544 if (speechItem instanceof UtteranceProgressDispatcher) { 545 utterenceProgress = (UtteranceProgressDispatcher) speechItem; 546 } 547 548 if (!speechItem.isValid()) { 549 if (utterenceProgress != null) { 550 utterenceProgress.dispatchOnError( 551 TextToSpeech.ERROR_INVALID_REQUEST); 552 } 553 return TextToSpeech.ERROR; 554 } 555 556 if (queueMode == TextToSpeech.QUEUE_FLUSH) { 557 stopForApp(speechItem.getCallerIdentity()); 558 } else if (queueMode == TextToSpeech.QUEUE_DESTROY) { 559 stopAll(); 560 } 561 Runnable runnable = new Runnable() { 562 @Override 563 public void run() { 564 if (isFlushed(speechItem)) { 565 speechItem.stop(); 566 } else { 567 setCurrentSpeechItem(speechItem); 568 speechItem.play(); 569 setCurrentSpeechItem(null); 570 } 571 } 572 }; 573 Message msg = Message.obtain(this, runnable); 574 575 // The obj is used to remove all callbacks from the given app in 576 // stopForApp(String). 577 // 578 // Note that this string is interned, so the == comparison works. 579 msg.obj = speechItem.getCallerIdentity(); 580 581 if (sendMessage(msg)) { 582 return TextToSpeech.SUCCESS; 583 } else { 584 Log.w(TAG, "SynthThread has quit"); 585 if (utterenceProgress != null) { 586 utterenceProgress.dispatchOnError(TextToSpeech.ERROR_SERVICE); 587 } 588 return TextToSpeech.ERROR; 589 } 590 } 591 592 /** 593 * Stops all speech output and removes any utterances still in the queue for 594 * the calling app. 595 * 596 * Called on a service binder thread. 597 */ 598 public int stopForApp(final Object callerIdentity) { 599 if (callerIdentity == null) { 600 return TextToSpeech.ERROR; 601 } 602 603 // Flush pending messages from callerIdentity 604 startFlushingSpeechItems(callerIdentity); 605 606 // This stops writing data to the file / or publishing 607 // items to the audio playback handler. 608 // 609 // Note that the current speech item must be removed only if it 610 // belongs to the callingApp, else the item will be "orphaned" and 611 // not stopped correctly if a stop request comes along for the item 612 // from the app it belongs to. 613 SpeechItem current = maybeRemoveCurrentSpeechItem(callerIdentity); 614 if (current != null) { 615 current.stop(); 616 } 617 618 // Remove any enqueued audio too. 619 mAudioPlaybackHandler.stopForApp(callerIdentity); 620 621 // Stop flushing pending messages 622 Runnable runnable = new Runnable() { 623 @Override 624 public void run() { 625 endFlushingSpeechItems(callerIdentity); 626 } 627 }; 628 sendMessage(Message.obtain(this, runnable)); 629 return TextToSpeech.SUCCESS; 630 } 631 632 public int stopAll() { 633 // Order to flush pending messages 634 startFlushingSpeechItems(null); 635 636 // Stop the current speech item unconditionally . 637 SpeechItem current = setCurrentSpeechItem(null); 638 if (current != null) { 639 current.stop(); 640 } 641 // Remove all pending playback as well. 642 mAudioPlaybackHandler.stop(); 643 644 // Message to stop flushing pending messages 645 Runnable runnable = new Runnable() { 646 @Override 647 public void run() { 648 endFlushingSpeechItems(null); 649 } 650 }; 651 sendMessage(Message.obtain(this, runnable)); 652 653 654 return TextToSpeech.SUCCESS; 655 } 656 } 657 658 interface UtteranceProgressDispatcher { 659 void dispatchOnStop(); 660 661 void dispatchOnSuccess(); 662 663 void dispatchOnStart(); 664 665 void dispatchOnError(int errorCode); 666 667 void dispatchOnBeginSynthesis(int sampleRateInHz, int audioFormat, int channelCount); 668 669 void dispatchOnAudioAvailable(byte[] audio); 670 } 671 672 /** Set of parameters affecting audio output. */ 673 static class AudioOutputParams { 674 /** 675 * Audio session identifier. May be used to associate audio playback with one of the 676 * {@link android.media.audiofx.AudioEffect} objects. If not specified by client, 677 * it should be equal to {@link AudioManager#AUDIO_SESSION_ID_GENERATE}. 678 */ 679 public final int mSessionId; 680 681 /** 682 * Volume, in the range [0.0f, 1.0f]. The default value is 683 * {@link TextToSpeech.Engine#DEFAULT_VOLUME} (1.0f). 684 */ 685 public final float mVolume; 686 687 /** 688 * Left/right position of the audio, in the range [-1.0f, 1.0f]. 689 * The default value is {@link TextToSpeech.Engine#DEFAULT_PAN} (0.0f). 690 */ 691 public final float mPan; 692 693 694 /** 695 * Audio attributes, set by {@link TextToSpeech#setAudioAttributes} 696 * or created from the value of {@link TextToSpeech.Engine#KEY_PARAM_STREAM}. 697 */ 698 public final AudioAttributes mAudioAttributes; 699 700 /** Create AudioOutputParams with default values */ 701 AudioOutputParams() { 702 mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 703 mVolume = Engine.DEFAULT_VOLUME; 704 mPan = Engine.DEFAULT_PAN; 705 mAudioAttributes = null; 706 } 707 708 AudioOutputParams(int sessionId, float volume, float pan, 709 AudioAttributes audioAttributes) { 710 mSessionId = sessionId; 711 mVolume = volume; 712 mPan = pan; 713 mAudioAttributes = audioAttributes; 714 } 715 716 /** Create AudioOutputParams from A {@link SynthesisRequest#getParams()} bundle */ 717 static AudioOutputParams createFromParamsBundle(Bundle paramsBundle, boolean isSpeech) { 718 if (paramsBundle == null) { 719 return new AudioOutputParams(); 720 } 721 722 AudioAttributes audioAttributes = 723 (AudioAttributes) paramsBundle.getParcelable( 724 Engine.KEY_PARAM_AUDIO_ATTRIBUTES); 725 if (audioAttributes == null) { 726 int streamType = paramsBundle.getInt( 727 Engine.KEY_PARAM_STREAM, Engine.DEFAULT_STREAM); 728 audioAttributes = (new AudioAttributes.Builder()) 729 .setLegacyStreamType(streamType) 730 .setContentType((isSpeech ? 731 AudioAttributes.CONTENT_TYPE_SPEECH : 732 AudioAttributes.CONTENT_TYPE_SONIFICATION)) 733 .build(); 734 } 735 736 return new AudioOutputParams( 737 paramsBundle.getInt( 738 Engine.KEY_PARAM_SESSION_ID, 739 AudioManager.AUDIO_SESSION_ID_GENERATE), 740 paramsBundle.getFloat( 741 Engine.KEY_PARAM_VOLUME, 742 Engine.DEFAULT_VOLUME), 743 paramsBundle.getFloat( 744 Engine.KEY_PARAM_PAN, 745 Engine.DEFAULT_PAN), 746 audioAttributes); 747 } 748 } 749 750 751 /** 752 * An item in the synth thread queue. 753 */ 754 private abstract class SpeechItem { 755 private final Object mCallerIdentity; 756 private final int mCallerUid; 757 private final int mCallerPid; 758 private boolean mStarted = false; 759 private boolean mStopped = false; 760 761 public SpeechItem(Object caller, int callerUid, int callerPid) { 762 mCallerIdentity = caller; 763 mCallerUid = callerUid; 764 mCallerPid = callerPid; 765 } 766 767 public Object getCallerIdentity() { 768 return mCallerIdentity; 769 } 770 771 public int getCallerUid() { 772 return mCallerUid; 773 } 774 775 public int getCallerPid() { 776 return mCallerPid; 777 } 778 779 /** 780 * Checker whether the item is valid. If this method returns false, the item should not 781 * be played. 782 */ 783 public abstract boolean isValid(); 784 785 /** 786 * Plays the speech item. Blocks until playback is finished. 787 * Must not be called more than once. 788 * 789 * Only called on the synthesis thread. 790 */ 791 public void play() { 792 synchronized (this) { 793 if (mStarted) { 794 throw new IllegalStateException("play() called twice"); 795 } 796 mStarted = true; 797 } 798 playImpl(); 799 } 800 801 protected abstract void playImpl(); 802 803 /** 804 * Stops the speech item. 805 * Must not be called more than once. 806 * 807 * Can be called on multiple threads, but not on the synthesis thread. 808 */ 809 public void stop() { 810 synchronized (this) { 811 if (mStopped) { 812 throw new IllegalStateException("stop() called twice"); 813 } 814 mStopped = true; 815 } 816 stopImpl(); 817 } 818 819 protected abstract void stopImpl(); 820 821 protected synchronized boolean isStopped() { 822 return mStopped; 823 } 824 825 protected synchronized boolean isStarted() { 826 return mStarted; 827 } 828 } 829 830 /** 831 * An item in the synth thread queue that process utterance (and call back to client about 832 * progress). 833 */ 834 private abstract class UtteranceSpeechItem extends SpeechItem 835 implements UtteranceProgressDispatcher { 836 837 public UtteranceSpeechItem(Object caller, int callerUid, int callerPid) { 838 super(caller, callerUid, callerPid); 839 } 840 841 @Override 842 public void dispatchOnSuccess() { 843 final String utteranceId = getUtteranceId(); 844 if (utteranceId != null) { 845 mCallbacks.dispatchOnSuccess(getCallerIdentity(), utteranceId); 846 } 847 } 848 849 @Override 850 public void dispatchOnStop() { 851 final String utteranceId = getUtteranceId(); 852 if (utteranceId != null) { 853 mCallbacks.dispatchOnStop(getCallerIdentity(), utteranceId, isStarted()); 854 } 855 } 856 857 @Override 858 public void dispatchOnStart() { 859 final String utteranceId = getUtteranceId(); 860 if (utteranceId != null) { 861 mCallbacks.dispatchOnStart(getCallerIdentity(), utteranceId); 862 } 863 } 864 865 @Override 866 public void dispatchOnError(int errorCode) { 867 final String utteranceId = getUtteranceId(); 868 if (utteranceId != null) { 869 mCallbacks.dispatchOnError(getCallerIdentity(), utteranceId, errorCode); 870 } 871 } 872 873 @Override 874 public void dispatchOnBeginSynthesis(int sampleRateInHz, int audioFormat, int channelCount) { 875 final String utteranceId = getUtteranceId(); 876 if (utteranceId != null) { 877 mCallbacks.dispatchOnBeginSynthesis(getCallerIdentity(), utteranceId, sampleRateInHz, audioFormat, channelCount); 878 } 879 } 880 881 @Override 882 public void dispatchOnAudioAvailable(byte[] audio) { 883 final String utteranceId = getUtteranceId(); 884 if (utteranceId != null) { 885 mCallbacks.dispatchOnAudioAvailable(getCallerIdentity(), utteranceId, audio); 886 } 887 } 888 889 abstract public String getUtteranceId(); 890 891 String getStringParam(Bundle params, String key, String defaultValue) { 892 return params == null ? defaultValue : params.getString(key, defaultValue); 893 } 894 895 int getIntParam(Bundle params, String key, int defaultValue) { 896 return params == null ? defaultValue : params.getInt(key, defaultValue); 897 } 898 899 float getFloatParam(Bundle params, String key, float defaultValue) { 900 return params == null ? defaultValue : params.getFloat(key, defaultValue); 901 } 902 } 903 904 /** 905 * Synthesis parameters are kept in a single Bundle passed as parameter. This class allow 906 * subclasses to access them conveniently. 907 */ 908 private abstract class UtteranceSpeechItemWithParams extends UtteranceSpeechItem { 909 protected final Bundle mParams; 910 protected final String mUtteranceId; 911 912 UtteranceSpeechItemWithParams( 913 Object callerIdentity, 914 int callerUid, 915 int callerPid, 916 Bundle params, 917 String utteranceId) { 918 super(callerIdentity, callerUid, callerPid); 919 mParams = params; 920 mUtteranceId = utteranceId; 921 } 922 923 boolean hasLanguage() { 924 return !TextUtils.isEmpty(getStringParam(mParams, Engine.KEY_PARAM_LANGUAGE, null)); 925 } 926 927 int getSpeechRate() { 928 return getIntParam(mParams, Engine.KEY_PARAM_RATE, getDefaultSpeechRate()); 929 } 930 931 int getPitch() { 932 return getIntParam(mParams, Engine.KEY_PARAM_PITCH, getDefaultPitch()); 933 } 934 935 @Override 936 public String getUtteranceId() { 937 return mUtteranceId; 938 } 939 940 AudioOutputParams getAudioParams() { 941 return AudioOutputParams.createFromParamsBundle(mParams, true); 942 } 943 } 944 945 class SynthesisSpeechItem extends UtteranceSpeechItemWithParams { 946 // Never null. 947 private final CharSequence mText; 948 private final SynthesisRequest mSynthesisRequest; 949 private final String[] mDefaultLocale; 950 // Non null after synthesis has started, and all accesses 951 // guarded by 'this'. 952 private AbstractSynthesisCallback mSynthesisCallback; 953 private final EventLogger mEventLogger; 954 private final int mCallerUid; 955 956 public SynthesisSpeechItem( 957 Object callerIdentity, 958 int callerUid, 959 int callerPid, 960 Bundle params, 961 String utteranceId, 962 CharSequence text) { 963 super(callerIdentity, callerUid, callerPid, params, utteranceId); 964 mText = text; 965 mCallerUid = callerUid; 966 mSynthesisRequest = new SynthesisRequest(mText, mParams); 967 mDefaultLocale = getSettingsLocale(); 968 setRequestParams(mSynthesisRequest); 969 mEventLogger = new EventLogger(mSynthesisRequest, callerUid, callerPid, mPackageName); 970 } 971 972 public CharSequence getText() { 973 return mText; 974 } 975 976 @Override 977 public boolean isValid() { 978 if (mText == null) { 979 Log.e(TAG, "null synthesis text"); 980 return false; 981 } 982 if (mText.length() >= TextToSpeech.getMaxSpeechInputLength()) { 983 Log.w(TAG, "Text too long: " + mText.length() + " chars"); 984 return false; 985 } 986 return true; 987 } 988 989 @Override 990 protected void playImpl() { 991 AbstractSynthesisCallback synthesisCallback; 992 mEventLogger.onRequestProcessingStart(); 993 synchronized (this) { 994 // stop() might have been called before we enter this 995 // synchronized block. 996 if (isStopped()) { 997 return; 998 } 999 mSynthesisCallback = createSynthesisCallback(); 1000 synthesisCallback = mSynthesisCallback; 1001 } 1002 1003 TextToSpeechService.this.onSynthesizeText(mSynthesisRequest, synthesisCallback); 1004 1005 // Fix for case where client called .start() & .error(), but did not called .done() 1006 if (synthesisCallback.hasStarted() && !synthesisCallback.hasFinished()) { 1007 synthesisCallback.done(); 1008 } 1009 } 1010 1011 protected AbstractSynthesisCallback createSynthesisCallback() { 1012 return new PlaybackSynthesisCallback(getAudioParams(), 1013 mAudioPlaybackHandler, this, getCallerIdentity(), mEventLogger, false); 1014 } 1015 1016 private void setRequestParams(SynthesisRequest request) { 1017 String voiceName = getVoiceName(); 1018 request.setLanguage(getLanguage(), getCountry(), getVariant()); 1019 if (!TextUtils.isEmpty(voiceName)) { 1020 request.setVoiceName(getVoiceName()); 1021 } 1022 request.setSpeechRate(getSpeechRate()); 1023 request.setCallerUid(mCallerUid); 1024 request.setPitch(getPitch()); 1025 } 1026 1027 @Override 1028 protected void stopImpl() { 1029 AbstractSynthesisCallback synthesisCallback; 1030 synchronized (this) { 1031 synthesisCallback = mSynthesisCallback; 1032 } 1033 if (synthesisCallback != null) { 1034 // If the synthesis callback is null, it implies that we haven't 1035 // entered the synchronized(this) block in playImpl which in 1036 // turn implies that synthesis would not have started. 1037 synthesisCallback.stop(); 1038 TextToSpeechService.this.onStop(); 1039 } else { 1040 dispatchOnStop(); 1041 } 1042 } 1043 1044 private String getCountry() { 1045 if (!hasLanguage()) return mDefaultLocale[1]; 1046 return getStringParam(mParams, Engine.KEY_PARAM_COUNTRY, ""); 1047 } 1048 1049 private String getVariant() { 1050 if (!hasLanguage()) return mDefaultLocale[2]; 1051 return getStringParam(mParams, Engine.KEY_PARAM_VARIANT, ""); 1052 } 1053 1054 public String getLanguage() { 1055 return getStringParam(mParams, Engine.KEY_PARAM_LANGUAGE, mDefaultLocale[0]); 1056 } 1057 1058 public String getVoiceName() { 1059 return getStringParam(mParams, Engine.KEY_PARAM_VOICE_NAME, ""); 1060 } 1061 } 1062 1063 private class SynthesisToFileOutputStreamSpeechItem extends SynthesisSpeechItem { 1064 private final FileOutputStream mFileOutputStream; 1065 1066 public SynthesisToFileOutputStreamSpeechItem( 1067 Object callerIdentity, 1068 int callerUid, 1069 int callerPid, 1070 Bundle params, 1071 String utteranceId, 1072 CharSequence text, 1073 FileOutputStream fileOutputStream) { 1074 super(callerIdentity, callerUid, callerPid, params, utteranceId, text); 1075 mFileOutputStream = fileOutputStream; 1076 } 1077 1078 @Override 1079 protected AbstractSynthesisCallback createSynthesisCallback() { 1080 return new FileSynthesisCallback(mFileOutputStream.getChannel(), this, false); 1081 } 1082 1083 @Override 1084 protected void playImpl() { 1085 dispatchOnStart(); 1086 super.playImpl(); 1087 try { 1088 mFileOutputStream.close(); 1089 } catch(IOException e) { 1090 Log.w(TAG, "Failed to close output file", e); 1091 } 1092 } 1093 } 1094 1095 private class AudioSpeechItem extends UtteranceSpeechItemWithParams { 1096 private final AudioPlaybackQueueItem mItem; 1097 1098 public AudioSpeechItem( 1099 Object callerIdentity, 1100 int callerUid, 1101 int callerPid, 1102 Bundle params, 1103 String utteranceId, 1104 Uri uri) { 1105 super(callerIdentity, callerUid, callerPid, params, utteranceId); 1106 mItem = new AudioPlaybackQueueItem(this, getCallerIdentity(), 1107 TextToSpeechService.this, uri, getAudioParams()); 1108 } 1109 1110 @Override 1111 public boolean isValid() { 1112 return true; 1113 } 1114 1115 @Override 1116 protected void playImpl() { 1117 mAudioPlaybackHandler.enqueue(mItem); 1118 } 1119 1120 @Override 1121 protected void stopImpl() { 1122 // Do nothing. 1123 } 1124 1125 @Override 1126 public String getUtteranceId() { 1127 return getStringParam(mParams, Engine.KEY_PARAM_UTTERANCE_ID, null); 1128 } 1129 1130 @Override 1131 AudioOutputParams getAudioParams() { 1132 return AudioOutputParams.createFromParamsBundle(mParams, false); 1133 } 1134 } 1135 1136 private class SilenceSpeechItem extends UtteranceSpeechItem { 1137 private final long mDuration; 1138 private final String mUtteranceId; 1139 1140 public SilenceSpeechItem(Object callerIdentity, int callerUid, int callerPid, 1141 String utteranceId, long duration) { 1142 super(callerIdentity, callerUid, callerPid); 1143 mUtteranceId = utteranceId; 1144 mDuration = duration; 1145 } 1146 1147 @Override 1148 public boolean isValid() { 1149 return true; 1150 } 1151 1152 @Override 1153 protected void playImpl() { 1154 mAudioPlaybackHandler.enqueue(new SilencePlaybackQueueItem( 1155 this, getCallerIdentity(), mDuration)); 1156 } 1157 1158 @Override 1159 protected void stopImpl() { 1160 1161 } 1162 1163 @Override 1164 public String getUtteranceId() { 1165 return mUtteranceId; 1166 } 1167 } 1168 1169 /** 1170 * Call {@link TextToSpeechService#onLoadLanguage} on synth thread. 1171 */ 1172 private class LoadLanguageItem extends SpeechItem { 1173 private final String mLanguage; 1174 private final String mCountry; 1175 private final String mVariant; 1176 1177 public LoadLanguageItem(Object callerIdentity, int callerUid, int callerPid, 1178 String language, String country, String variant) { 1179 super(callerIdentity, callerUid, callerPid); 1180 mLanguage = language; 1181 mCountry = country; 1182 mVariant = variant; 1183 } 1184 1185 @Override 1186 public boolean isValid() { 1187 return true; 1188 } 1189 1190 @Override 1191 protected void playImpl() { 1192 TextToSpeechService.this.onLoadLanguage(mLanguage, mCountry, mVariant); 1193 } 1194 1195 @Override 1196 protected void stopImpl() { 1197 // No-op 1198 } 1199 } 1200 1201 /** 1202 * Call {@link TextToSpeechService#onLoadLanguage} on synth thread. 1203 */ 1204 private class LoadVoiceItem extends SpeechItem { 1205 private final String mVoiceName; 1206 1207 public LoadVoiceItem(Object callerIdentity, int callerUid, int callerPid, 1208 String voiceName) { 1209 super(callerIdentity, callerUid, callerPid); 1210 mVoiceName = voiceName; 1211 } 1212 1213 @Override 1214 public boolean isValid() { 1215 return true; 1216 } 1217 1218 @Override 1219 protected void playImpl() { 1220 TextToSpeechService.this.onLoadVoice(mVoiceName); 1221 } 1222 1223 @Override 1224 protected void stopImpl() { 1225 // No-op 1226 } 1227 } 1228 1229 1230 @Override 1231 public IBinder onBind(Intent intent) { 1232 if (TextToSpeech.Engine.INTENT_ACTION_TTS_SERVICE.equals(intent.getAction())) { 1233 return mBinder; 1234 } 1235 return null; 1236 } 1237 1238 /** 1239 * Binder returned from {@code #onBind(Intent)}. The methods in this class can be called called 1240 * from several different threads. 1241 */ 1242 // NOTE: All calls that are passed in a calling app are interned so that 1243 // they can be used as message objects (which are tested for equality using ==). 1244 private final ITextToSpeechService.Stub mBinder = 1245 new ITextToSpeechService.Stub() { 1246 @Override 1247 public int speak( 1248 IBinder caller, 1249 CharSequence text, 1250 int queueMode, 1251 Bundle params, 1252 String utteranceId) { 1253 if (!checkNonNull(caller, text, params)) { 1254 return TextToSpeech.ERROR; 1255 } 1256 1257 SpeechItem item = 1258 new SynthesisSpeechItem( 1259 caller, 1260 Binder.getCallingUid(), 1261 Binder.getCallingPid(), 1262 params, 1263 utteranceId, 1264 text); 1265 return mSynthHandler.enqueueSpeechItem(queueMode, item); 1266 } 1267 1268 @Override 1269 public int synthesizeToFileDescriptor( 1270 IBinder caller, 1271 CharSequence text, 1272 ParcelFileDescriptor fileDescriptor, 1273 Bundle params, 1274 String utteranceId) { 1275 if (!checkNonNull(caller, text, fileDescriptor, params)) { 1276 return TextToSpeech.ERROR; 1277 } 1278 1279 // In test env, ParcelFileDescriptor instance may be EXACTLY the same 1280 // one that is used by client. And it will be closed by a client, thus 1281 // preventing us from writing anything to it. 1282 final ParcelFileDescriptor sameFileDescriptor = 1283 ParcelFileDescriptor.adoptFd(fileDescriptor.detachFd()); 1284 1285 SpeechItem item = 1286 new SynthesisToFileOutputStreamSpeechItem( 1287 caller, 1288 Binder.getCallingUid(), 1289 Binder.getCallingPid(), 1290 params, 1291 utteranceId, 1292 text, 1293 new ParcelFileDescriptor.AutoCloseOutputStream( 1294 sameFileDescriptor)); 1295 return mSynthHandler.enqueueSpeechItem(TextToSpeech.QUEUE_ADD, item); 1296 } 1297 1298 @Override 1299 public int playAudio( 1300 IBinder caller, 1301 Uri audioUri, 1302 int queueMode, 1303 Bundle params, 1304 String utteranceId) { 1305 if (!checkNonNull(caller, audioUri, params)) { 1306 return TextToSpeech.ERROR; 1307 } 1308 1309 SpeechItem item = 1310 new AudioSpeechItem( 1311 caller, 1312 Binder.getCallingUid(), 1313 Binder.getCallingPid(), 1314 params, 1315 utteranceId, 1316 audioUri); 1317 return mSynthHandler.enqueueSpeechItem(queueMode, item); 1318 } 1319 1320 @Override 1321 public int playSilence( 1322 IBinder caller, long duration, int queueMode, String utteranceId) { 1323 if (!checkNonNull(caller)) { 1324 return TextToSpeech.ERROR; 1325 } 1326 1327 SpeechItem item = 1328 new SilenceSpeechItem( 1329 caller, 1330 Binder.getCallingUid(), 1331 Binder.getCallingPid(), 1332 utteranceId, 1333 duration); 1334 return mSynthHandler.enqueueSpeechItem(queueMode, item); 1335 } 1336 1337 @Override 1338 public boolean isSpeaking() { 1339 return mSynthHandler.isSpeaking() || mAudioPlaybackHandler.isSpeaking(); 1340 } 1341 1342 @Override 1343 public int stop(IBinder caller) { 1344 if (!checkNonNull(caller)) { 1345 return TextToSpeech.ERROR; 1346 } 1347 1348 return mSynthHandler.stopForApp(caller); 1349 } 1350 1351 @Override 1352 public String[] getLanguage() { 1353 return onGetLanguage(); 1354 } 1355 1356 @Override 1357 public String[] getClientDefaultLanguage() { 1358 return getSettingsLocale(); 1359 } 1360 1361 /* 1362 * If defaults are enforced, then no language is "available" except 1363 * perhaps the default language selected by the user. 1364 */ 1365 @Override 1366 public int isLanguageAvailable(String lang, String country, String variant) { 1367 if (!checkNonNull(lang)) { 1368 return TextToSpeech.ERROR; 1369 } 1370 1371 return onIsLanguageAvailable(lang, country, variant); 1372 } 1373 1374 @Override 1375 public String[] getFeaturesForLanguage( 1376 String lang, String country, String variant) { 1377 Set<String> features = onGetFeaturesForLanguage(lang, country, variant); 1378 String[] featuresArray = null; 1379 if (features != null) { 1380 featuresArray = new String[features.size()]; 1381 features.toArray(featuresArray); 1382 } else { 1383 featuresArray = new String[0]; 1384 } 1385 return featuresArray; 1386 } 1387 1388 /* 1389 * There is no point loading a non default language if defaults 1390 * are enforced. 1391 */ 1392 @Override 1393 public int loadLanguage( 1394 IBinder caller, String lang, String country, String variant) { 1395 if (!checkNonNull(lang)) { 1396 return TextToSpeech.ERROR; 1397 } 1398 int retVal = onIsLanguageAvailable(lang, country, variant); 1399 1400 if (retVal == TextToSpeech.LANG_AVAILABLE 1401 || retVal == TextToSpeech.LANG_COUNTRY_AVAILABLE 1402 || retVal == TextToSpeech.LANG_COUNTRY_VAR_AVAILABLE) { 1403 1404 SpeechItem item = 1405 new LoadLanguageItem( 1406 caller, 1407 Binder.getCallingUid(), 1408 Binder.getCallingPid(), 1409 lang, 1410 country, 1411 variant); 1412 1413 if (mSynthHandler.enqueueSpeechItem(TextToSpeech.QUEUE_ADD, item) 1414 != TextToSpeech.SUCCESS) { 1415 return TextToSpeech.ERROR; 1416 } 1417 } 1418 return retVal; 1419 } 1420 1421 @Override 1422 public List<Voice> getVoices() { 1423 return onGetVoices(); 1424 } 1425 1426 @Override 1427 public int loadVoice(IBinder caller, String voiceName) { 1428 if (!checkNonNull(voiceName)) { 1429 return TextToSpeech.ERROR; 1430 } 1431 int retVal = onIsValidVoiceName(voiceName); 1432 1433 if (retVal == TextToSpeech.SUCCESS) { 1434 SpeechItem item = 1435 new LoadVoiceItem( 1436 caller, 1437 Binder.getCallingUid(), 1438 Binder.getCallingPid(), 1439 voiceName); 1440 if (mSynthHandler.enqueueSpeechItem(TextToSpeech.QUEUE_ADD, item) 1441 != TextToSpeech.SUCCESS) { 1442 return TextToSpeech.ERROR; 1443 } 1444 } 1445 return retVal; 1446 } 1447 1448 public String getDefaultVoiceNameFor(String lang, String country, String variant) { 1449 if (!checkNonNull(lang)) { 1450 return null; 1451 } 1452 int retVal = onIsLanguageAvailable(lang, country, variant); 1453 1454 if (retVal == TextToSpeech.LANG_AVAILABLE 1455 || retVal == TextToSpeech.LANG_COUNTRY_AVAILABLE 1456 || retVal == TextToSpeech.LANG_COUNTRY_VAR_AVAILABLE) { 1457 return onGetDefaultVoiceNameFor(lang, country, variant); 1458 } else { 1459 return null; 1460 } 1461 } 1462 1463 @Override 1464 public void setCallback(IBinder caller, ITextToSpeechCallback cb) { 1465 // Note that passing in a null callback is a valid use case. 1466 if (!checkNonNull(caller)) { 1467 return; 1468 } 1469 1470 mCallbacks.setCallback(caller, cb); 1471 } 1472 1473 private String intern(String in) { 1474 // The input parameter will be non null. 1475 return in.intern(); 1476 } 1477 1478 private boolean checkNonNull(Object... args) { 1479 for (Object o : args) { 1480 if (o == null) return false; 1481 } 1482 return true; 1483 } 1484 }; 1485 1486 private class CallbackMap extends RemoteCallbackList<ITextToSpeechCallback> { 1487 private final HashMap<IBinder, ITextToSpeechCallback> mCallerToCallback 1488 = new HashMap<IBinder, ITextToSpeechCallback>(); 1489 1490 public void setCallback(IBinder caller, ITextToSpeechCallback cb) { 1491 synchronized (mCallerToCallback) { 1492 ITextToSpeechCallback old; 1493 if (cb != null) { 1494 register(cb, caller); 1495 old = mCallerToCallback.put(caller, cb); 1496 } else { 1497 old = mCallerToCallback.remove(caller); 1498 } 1499 if (old != null && old != cb) { 1500 unregister(old); 1501 } 1502 } 1503 } 1504 1505 public void dispatchOnStop(Object callerIdentity, String utteranceId, boolean started) { 1506 ITextToSpeechCallback cb = getCallbackFor(callerIdentity); 1507 if (cb == null) return; 1508 try { 1509 cb.onStop(utteranceId, started); 1510 } catch (RemoteException e) { 1511 Log.e(TAG, "Callback onStop failed: " + e); 1512 } 1513 } 1514 1515 public void dispatchOnSuccess(Object callerIdentity, String utteranceId) { 1516 ITextToSpeechCallback cb = getCallbackFor(callerIdentity); 1517 if (cb == null) return; 1518 try { 1519 cb.onSuccess(utteranceId); 1520 } catch (RemoteException e) { 1521 Log.e(TAG, "Callback onDone failed: " + e); 1522 } 1523 } 1524 1525 public void dispatchOnStart(Object callerIdentity, String utteranceId) { 1526 ITextToSpeechCallback cb = getCallbackFor(callerIdentity); 1527 if (cb == null) return; 1528 try { 1529 cb.onStart(utteranceId); 1530 } catch (RemoteException e) { 1531 Log.e(TAG, "Callback onStart failed: " + e); 1532 } 1533 } 1534 1535 public void dispatchOnError(Object callerIdentity, String utteranceId, 1536 int errorCode) { 1537 ITextToSpeechCallback cb = getCallbackFor(callerIdentity); 1538 if (cb == null) return; 1539 try { 1540 cb.onError(utteranceId, errorCode); 1541 } catch (RemoteException e) { 1542 Log.e(TAG, "Callback onError failed: " + e); 1543 } 1544 } 1545 1546 public void dispatchOnBeginSynthesis(Object callerIdentity, String utteranceId, int sampleRateInHz, int audioFormat, int channelCount) { 1547 ITextToSpeechCallback cb = getCallbackFor(callerIdentity); 1548 if (cb == null) return; 1549 try { 1550 cb.onBeginSynthesis(utteranceId, sampleRateInHz, audioFormat, channelCount); 1551 } catch (RemoteException e) { 1552 Log.e(TAG, "Callback dispatchOnBeginSynthesis(String, int, int, int) failed: " + e); 1553 } 1554 } 1555 1556 public void dispatchOnAudioAvailable(Object callerIdentity, String utteranceId, byte[] buffer) { 1557 ITextToSpeechCallback cb = getCallbackFor(callerIdentity); 1558 if (cb == null) return; 1559 try { 1560 cb.onAudioAvailable(utteranceId, buffer); 1561 } catch (RemoteException e) { 1562 Log.e(TAG, "Callback dispatchOnAudioAvailable(String, byte[]) failed: " + e); 1563 } 1564 } 1565 1566 @Override 1567 public void onCallbackDied(ITextToSpeechCallback callback, Object cookie) { 1568 IBinder caller = (IBinder) cookie; 1569 synchronized (mCallerToCallback) { 1570 mCallerToCallback.remove(caller); 1571 } 1572 //mSynthHandler.stopForApp(caller); 1573 } 1574 1575 @Override 1576 public void kill() { 1577 synchronized (mCallerToCallback) { 1578 mCallerToCallback.clear(); 1579 super.kill(); 1580 } 1581 } 1582 1583 private ITextToSpeechCallback getCallbackFor(Object caller) { 1584 ITextToSpeechCallback cb; 1585 IBinder asBinder = (IBinder) caller; 1586 synchronized (mCallerToCallback) { 1587 cb = mCallerToCallback.get(asBinder); 1588 } 1589 1590 return cb; 1591 } 1592 } 1593} 1594