TextToSpeechService.java revision 65c50784564d0bae9276fde5472dd8898a781bcd
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 * use this file except in compliance with the License. You may obtain a copy of 6 * the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 * License for the specific language governing permissions and limitations under 14 * the License. 15 */ 16package android.speech.tts; 17 18import android.annotation.NonNull; 19import android.app.Service; 20import android.content.Intent; 21import android.media.AudioAttributes; 22import android.media.AudioManager; 23import android.net.Uri; 24import android.os.Binder; 25import android.os.Bundle; 26import android.os.Handler; 27import android.os.HandlerThread; 28import android.os.IBinder; 29import android.os.Looper; 30import android.os.Message; 31import android.os.MessageQueue; 32import android.os.ParcelFileDescriptor; 33import android.os.RemoteCallbackList; 34import android.os.RemoteException; 35import android.provider.Settings; 36import android.speech.tts.TextToSpeech.Engine; 37import android.text.TextUtils; 38import android.util.Log; 39 40import java.io.FileOutputStream; 41import java.io.IOException; 42import java.util.ArrayList; 43import java.util.HashMap; 44import java.util.HashSet; 45import java.util.List; 46import java.util.Locale; 47import java.util.MissingResourceException; 48import java.util.Set; 49 50 51/** 52 * Abstract base class for TTS engine implementations. The following methods 53 * need to be implemented: 54 * <ul> 55 * <li>{@link #onIsLanguageAvailable}</li> 56 * <li>{@link #onLoadLanguage}</li> 57 * <li>{@link #onGetLanguage}</li> 58 * <li>{@link #onSynthesizeText}</li> 59 * <li>{@link #onStop}</li> 60 * </ul> 61 * The first three deal primarily with language management, and are used to 62 * query the engine for it's support for a given language and indicate to it 63 * that requests in a given language are imminent. 64 * 65 * {@link #onSynthesizeText} is central to the engine implementation. The 66 * implementation should synthesize text as per the request parameters and 67 * return synthesized data via the supplied callback. This class and its helpers 68 * will then consume that data, which might mean queuing it for playback or writing 69 * it to a file or similar. All calls to this method will be on a single thread, 70 * which will be different from the main thread of the service. Synthesis must be 71 * synchronous which means the engine must NOT hold on to the callback or call any 72 * methods on it after the method returns. 73 * 74 * {@link #onStop} tells the engine that it should stop 75 * all ongoing synthesis, if any. Any pending data from the current synthesis 76 * will be discarded. 77 * 78 * {@link #onGetLanguage} is not required as of JELLYBEAN_MR2 (API 18) and later, it is only 79 * called on earlier versions of Android. 80 * 81 * API Level 20 adds support for Voice objects. Voices are an abstraction that allow the TTS 82 * service to expose multiple backends for a single locale. Each one of them can have a different 83 * features set. In order to fully take advantage of voices, an engine should implement 84 * the following methods: 85 * <ul> 86 * <li>{@link #onGetVoices()}</li> 87 * <li>{@link #onIsValidVoiceName(String)}</li> 88 * <li>{@link #onLoadVoice(String)}</li> 89 * <li>{@link #onGetDefaultVoiceNameFor(String, String, String)}</li> 90 * </ul> 91 * The first three methods are siblings of the {@link #onGetLanguage}, 92 * {@link #onIsLanguageAvailable} and {@link #onLoadLanguage} methods. The last one, 93 * {@link #onGetDefaultVoiceNameFor(String, String, String)} is a link between locale and voice 94 * based methods. Since API level 21 {@link TextToSpeech#setLanguage} is implemented by 95 * calling {@link TextToSpeech#setVoice} with the voice returned by 96 * {@link #onGetDefaultVoiceNameFor(String, String, String)}. 97 * 98 * If the client uses a voice instead of a locale, {@link SynthesisRequest} will contain the 99 * requested voice name. 100 * 101 * The default implementations of Voice-related methods implement them using the 102 * pre-existing locale-based implementation. 103 */ 104public abstract class TextToSpeechService extends Service { 105 106 private static final boolean DBG = false; 107 private static final String TAG = "TextToSpeechService"; 108 109 private static final String SYNTH_THREAD_NAME = "SynthThread"; 110 111 private SynthHandler mSynthHandler; 112 // A thread and it's associated handler for playing back any audio 113 // associated with this TTS engine. Will handle all requests except synthesis 114 // to file requests, which occur on the synthesis thread. 115 @NonNull private AudioPlaybackHandler mAudioPlaybackHandler; 116 private TtsEngines mEngineHelper; 117 118 private CallbackMap mCallbacks; 119 private String mPackageName; 120 121 private final Object mVoicesInfoLock = new Object(); 122 123 @Override 124 public void onCreate() { 125 if (DBG) Log.d(TAG, "onCreate()"); 126 super.onCreate(); 127 128 SynthThread synthThread = new SynthThread(); 129 synthThread.start(); 130 mSynthHandler = new SynthHandler(synthThread.getLooper()); 131 132 mAudioPlaybackHandler = new AudioPlaybackHandler(); 133 mAudioPlaybackHandler.start(); 134 135 mEngineHelper = new TtsEngines(this); 136 137 mCallbacks = new CallbackMap(); 138 139 mPackageName = getApplicationInfo().packageName; 140 141 String[] defaultLocale = getSettingsLocale(); 142 143 // Load default language 144 onLoadLanguage(defaultLocale[0], defaultLocale[1], defaultLocale[2]); 145 } 146 147 @Override 148 public void onDestroy() { 149 if (DBG) Log.d(TAG, "onDestroy()"); 150 151 // Tell the synthesizer to stop 152 mSynthHandler.quit(); 153 // Tell the audio playback thread to stop. 154 mAudioPlaybackHandler.quit(); 155 // Unregister all callbacks. 156 mCallbacks.kill(); 157 158 super.onDestroy(); 159 } 160 161 /** 162 * Checks whether the engine supports a given language. 163 * 164 * Can be called on multiple threads. 165 * 166 * Its return values HAVE to be consistent with onLoadLanguage. 167 * 168 * @param lang ISO-3 language code. 169 * @param country ISO-3 country code. May be empty or null. 170 * @param variant Language variant. May be empty or null. 171 * @return Code indicating the support status for the locale. 172 * One of {@link TextToSpeech#LANG_AVAILABLE}, 173 * {@link TextToSpeech#LANG_COUNTRY_AVAILABLE}, 174 * {@link TextToSpeech#LANG_COUNTRY_VAR_AVAILABLE}, 175 * {@link TextToSpeech#LANG_MISSING_DATA} 176 * {@link TextToSpeech#LANG_NOT_SUPPORTED}. 177 */ 178 protected abstract int onIsLanguageAvailable(String lang, String country, String variant); 179 180 /** 181 * Returns the language, country and variant currently being used by the TTS engine. 182 * 183 * This method will be called only on Android 4.2 and before (API <= 17). In later versions 184 * this method is not called by the Android TTS framework. 185 * 186 * Can be called on multiple threads. 187 * 188 * @return A 3-element array, containing language (ISO 3-letter code), 189 * country (ISO 3-letter code) and variant used by the engine. 190 * The country and variant may be {@code ""}. If country is empty, then variant must 191 * be empty too. 192 * @see Locale#getISO3Language() 193 * @see Locale#getISO3Country() 194 * @see Locale#getVariant() 195 */ 196 protected abstract String[] onGetLanguage(); 197 198 /** 199 * Notifies the engine that it should load a speech synthesis language. There is no guarantee 200 * that this method is always called before the language is used for synthesis. It is merely 201 * a hint to the engine that it will probably get some synthesis requests for this language 202 * at some point in the future. 203 * 204 * Can be called on multiple threads. 205 * In <= Android 4.2 (<= API 17) can be called on main and service binder threads. 206 * In > Android 4.2 (> API 17) can be called on main and synthesis threads. 207 * 208 * @param lang ISO-3 language code. 209 * @param country ISO-3 country code. May be empty or null. 210 * @param variant Language variant. May be empty or null. 211 * @return Code indicating the support status for the locale. 212 * One of {@link TextToSpeech#LANG_AVAILABLE}, 213 * {@link TextToSpeech#LANG_COUNTRY_AVAILABLE}, 214 * {@link TextToSpeech#LANG_COUNTRY_VAR_AVAILABLE}, 215 * {@link TextToSpeech#LANG_MISSING_DATA} 216 * {@link TextToSpeech#LANG_NOT_SUPPORTED}. 217 */ 218 protected abstract int onLoadLanguage(String lang, String country, String variant); 219 220 /** 221 * Notifies the service that it should stop any in-progress speech synthesis. 222 * This method can be called even if no speech synthesis is currently in progress. 223 * 224 * Can be called on multiple threads, but not on the synthesis thread. 225 */ 226 protected abstract void onStop(); 227 228 /** 229 * Tells the service to synthesize speech from the given text. This method should block until 230 * the synthesis is finished. Called on the synthesis thread. 231 * 232 * @param request The synthesis request. 233 * @param callback The callback that the engine must use to make data available for playback or 234 * for writing to a file. 235 */ 236 protected abstract void onSynthesizeText(SynthesisRequest request, SynthesisCallback callback); 237 238 /** 239 * Queries the service for a set of features supported for a given language. 240 * 241 * Can be called on multiple threads. 242 * 243 * @param lang ISO-3 language code. 244 * @param country ISO-3 country code. May be empty or null. 245 * @param variant Language variant. May be empty or null. 246 * @return A list of features supported for the given language. 247 */ 248 protected Set<String> onGetFeaturesForLanguage(String lang, String country, String variant) { 249 return new HashSet<String>(); 250 } 251 252 private int getExpectedLanguageAvailableStatus(Locale locale) { 253 int expectedStatus = TextToSpeech.LANG_COUNTRY_VAR_AVAILABLE; 254 if (locale.getVariant().isEmpty()) { 255 if (locale.getCountry().isEmpty()) { 256 expectedStatus = TextToSpeech.LANG_AVAILABLE; 257 } else { 258 expectedStatus = TextToSpeech.LANG_COUNTRY_AVAILABLE; 259 } 260 } 261 return expectedStatus; 262 } 263 264 /** 265 * Queries the service for a set of supported voices. 266 * 267 * Can be called on multiple threads. 268 * 269 * The default implementation tries to enumerate all available locales, pass them to 270 * {@link #onIsLanguageAvailable(String, String, String)} and create Voice instances (using 271 * the locale's BCP-47 language tag as the voice name) for the ones that are supported. 272 * Note, that this implementation is suitable only for engines that don't have multiple voices 273 * for a single locale. Also, this implementation won't work with Locales not listed in the 274 * set returned by the {@link Locale#getAvailableLocales()} method. 275 * 276 * @return A list of voices supported. 277 */ 278 public List<Voice> onGetVoices() { 279 // Enumerate all locales and check if they are available 280 ArrayList<Voice> voices = new ArrayList<Voice>(); 281 for (Locale locale : Locale.getAvailableLocales()) { 282 int expectedStatus = getExpectedLanguageAvailableStatus(locale); 283 try { 284 int localeStatus = onIsLanguageAvailable(locale.getISO3Language(), 285 locale.getISO3Country(), locale.getVariant()); 286 if (localeStatus != expectedStatus) { 287 continue; 288 } 289 } catch (MissingResourceException e) { 290 // Ignore locale without iso 3 codes 291 continue; 292 } 293 Set<String> features = onGetFeaturesForLanguage(locale.getISO3Language(), 294 locale.getISO3Country(), locale.getVariant()); 295 String voiceName = onGetDefaultVoiceNameFor(locale.getISO3Language(), 296 locale.getISO3Country(), locale.getVariant()); 297 voices.add(new Voice(voiceName, locale, Voice.QUALITY_NORMAL, 298 Voice.LATENCY_NORMAL, false, features)); 299 } 300 return voices; 301 } 302 303 /** 304 * Return a name of the default voice for a given locale. 305 * 306 * This method provides a mapping between locales and available voices. This method is 307 * used in {@link TextToSpeech#setLanguage}, which calls this method and then calls 308 * {@link TextToSpeech#setVoice} with the voice returned by this method. 309 * 310 * Also, it's used by {@link TextToSpeech#getDefaultVoice()} to find a default voice for 311 * the default locale. 312 * 313 * @param lang ISO-3 language code. 314 * @param country ISO-3 country code. May be empty or null. 315 * @param variant Language variant. May be empty or null. 316 317 * @return A name of the default voice for a given locale. 318 */ 319 public String onGetDefaultVoiceNameFor(String lang, String country, String variant) { 320 int localeStatus = onIsLanguageAvailable(lang, country, variant); 321 Locale iso3Locale = null; 322 switch (localeStatus) { 323 case TextToSpeech.LANG_AVAILABLE: 324 iso3Locale = new Locale(lang); 325 break; 326 case TextToSpeech.LANG_COUNTRY_AVAILABLE: 327 iso3Locale = new Locale(lang, country); 328 break; 329 case TextToSpeech.LANG_COUNTRY_VAR_AVAILABLE: 330 iso3Locale = new Locale(lang, country, variant); 331 break; 332 default: 333 return null; 334 } 335 Locale properLocale = TtsEngines.normalizeTTSLocale(iso3Locale); 336 String voiceName = properLocale.toLanguageTag(); 337 if (onIsValidVoiceName(voiceName) == TextToSpeech.SUCCESS) { 338 return voiceName; 339 } else { 340 return null; 341 } 342 } 343 344 /** 345 * Notifies the engine that it should load a speech synthesis voice. There is no guarantee 346 * that this method is always called before the voice is used for synthesis. It is merely 347 * a hint to the engine that it will probably get some synthesis requests for this voice 348 * at some point in the future. 349 * 350 * Will be called only on synthesis thread. 351 * 352 * The default implementation creates a Locale from the voice name (by interpreting the name as 353 * a BCP-47 tag for the locale), and passes it to 354 * {@link #onLoadLanguage(String, String, String)}. 355 * 356 * @param voiceName Name of the voice. 357 * @return {@link TextToSpeech#ERROR} or {@link TextToSpeech#SUCCESS}. 358 */ 359 public int onLoadVoice(String voiceName) { 360 Locale locale = Locale.forLanguageTag(voiceName); 361 if (locale == null) { 362 return TextToSpeech.ERROR; 363 } 364 int expectedStatus = getExpectedLanguageAvailableStatus(locale); 365 try { 366 int localeStatus = onIsLanguageAvailable(locale.getISO3Language(), 367 locale.getISO3Country(), locale.getVariant()); 368 if (localeStatus != expectedStatus) { 369 return TextToSpeech.ERROR; 370 } 371 onLoadLanguage(locale.getISO3Language(), 372 locale.getISO3Country(), locale.getVariant()); 373 return TextToSpeech.SUCCESS; 374 } catch (MissingResourceException e) { 375 return TextToSpeech.ERROR; 376 } 377 } 378 379 /** 380 * Checks whether the engine supports a voice with a given name. 381 * 382 * Can be called on multiple threads. 383 * 384 * The default implementation treats the voice name as a language tag, creating a Locale from 385 * the voice name, and passes it to {@link #onIsLanguageAvailable(String, String, String)}. 386 * 387 * @param voiceName Name of the voice. 388 * @return {@link TextToSpeech#ERROR} or {@link TextToSpeech#SUCCESS}. 389 */ 390 public int onIsValidVoiceName(String voiceName) { 391 Locale locale = Locale.forLanguageTag(voiceName); 392 if (locale == null) { 393 return TextToSpeech.ERROR; 394 } 395 int expectedStatus = getExpectedLanguageAvailableStatus(locale); 396 try { 397 int localeStatus = onIsLanguageAvailable(locale.getISO3Language(), 398 locale.getISO3Country(), locale.getVariant()); 399 if (localeStatus != expectedStatus) { 400 return TextToSpeech.ERROR; 401 } 402 return TextToSpeech.SUCCESS; 403 } catch (MissingResourceException e) { 404 return TextToSpeech.ERROR; 405 } 406 } 407 408 private int getDefaultSpeechRate() { 409 return getSecureSettingInt(Settings.Secure.TTS_DEFAULT_RATE, Engine.DEFAULT_RATE); 410 } 411 412 private String[] getSettingsLocale() { 413 final Locale locale = mEngineHelper.getLocalePrefForEngine(mPackageName); 414 return TtsEngines.toOldLocaleStringFormat(locale); 415 } 416 417 private int getSecureSettingInt(String name, int defaultValue) { 418 return Settings.Secure.getInt(getContentResolver(), name, defaultValue); 419 } 420 421 /** 422 * Synthesizer thread. This thread is used to run {@link SynthHandler}. 423 */ 424 private class SynthThread extends HandlerThread implements MessageQueue.IdleHandler { 425 426 private boolean mFirstIdle = true; 427 428 public SynthThread() { 429 super(SYNTH_THREAD_NAME, android.os.Process.THREAD_PRIORITY_DEFAULT); 430 } 431 432 @Override 433 protected void onLooperPrepared() { 434 getLooper().getQueue().addIdleHandler(this); 435 } 436 437 @Override 438 public boolean queueIdle() { 439 if (mFirstIdle) { 440 mFirstIdle = false; 441 } else { 442 broadcastTtsQueueProcessingCompleted(); 443 } 444 return true; 445 } 446 447 private void broadcastTtsQueueProcessingCompleted() { 448 Intent i = new Intent(TextToSpeech.ACTION_TTS_QUEUE_PROCESSING_COMPLETED); 449 if (DBG) Log.d(TAG, "Broadcasting: " + i); 450 sendBroadcast(i); 451 } 452 } 453 454 private class SynthHandler extends Handler { 455 private SpeechItem mCurrentSpeechItem = null; 456 457 // When a message with QUEUE_FLUSH arrives we add the caller identity to the List and when a 458 // message with QUEUE_DESTROY arrives we increment mFlushAll. Then a message is added to the 459 // handler queue that removes the caller identify from the list and decrements the mFlushAll 460 // counter. This is so that when a message is processed and the caller identity is in the 461 // list or mFlushAll is not zero, we know that the message should be flushed. 462 // It's important that mFlushedObjects is a List and not a Set, and that mFlushAll is an 463 // int and not a bool. This is because when multiple messages arrive with QUEUE_FLUSH or 464 // QUEUE_DESTROY, we want to keep flushing messages until we arrive at the last QUEUE_FLUSH 465 // or QUEUE_DESTROY message. 466 private List<Object> mFlushedObjects = new ArrayList<>(); 467 private int mFlushAll = 0; 468 469 public SynthHandler(Looper looper) { 470 super(looper); 471 } 472 473 private void startFlushingSpeechItems(Object callerIdentity) { 474 synchronized (mFlushedObjects) { 475 if (callerIdentity == null) { 476 mFlushAll += 1; 477 } else { 478 mFlushedObjects.add(callerIdentity); 479 } 480 } 481 } 482 private void endFlushingSpeechItems(Object callerIdentity) { 483 synchronized (mFlushedObjects) { 484 if (callerIdentity == null) { 485 mFlushAll -= 1; 486 } else { 487 mFlushedObjects.remove(callerIdentity); 488 } 489 } 490 } 491 private boolean isFlushed(SpeechItem speechItem) { 492 synchronized (mFlushedObjects) { 493 return mFlushAll > 0 || mFlushedObjects.contains(speechItem.getCallerIdentity()); 494 } 495 } 496 497 private synchronized SpeechItem getCurrentSpeechItem() { 498 return mCurrentSpeechItem; 499 } 500 501 private synchronized SpeechItem setCurrentSpeechItem(SpeechItem speechItem) { 502 SpeechItem old = mCurrentSpeechItem; 503 mCurrentSpeechItem = speechItem; 504 return old; 505 } 506 507 private synchronized SpeechItem maybeRemoveCurrentSpeechItem(Object callerIdentity) { 508 if (mCurrentSpeechItem != null && 509 (mCurrentSpeechItem.getCallerIdentity() == callerIdentity)) { 510 SpeechItem current = mCurrentSpeechItem; 511 mCurrentSpeechItem = null; 512 return current; 513 } 514 515 return null; 516 } 517 518 public boolean isSpeaking() { 519 return getCurrentSpeechItem() != null; 520 } 521 522 public void quit() { 523 // Don't process any more speech items 524 getLooper().quit(); 525 // Stop the current speech item 526 SpeechItem current = setCurrentSpeechItem(null); 527 if (current != null) { 528 current.stop(); 529 } 530 // The AudioPlaybackHandler will be destroyed by the caller. 531 } 532 533 /** 534 * Adds a speech item to the queue. 535 * 536 * Called on a service binder thread. 537 */ 538 public int enqueueSpeechItem(int queueMode, final SpeechItem speechItem) { 539 UtteranceProgressDispatcher utterenceProgress = null; 540 if (speechItem instanceof UtteranceProgressDispatcher) { 541 utterenceProgress = (UtteranceProgressDispatcher) speechItem; 542 } 543 544 if (!speechItem.isValid()) { 545 if (utterenceProgress != null) { 546 utterenceProgress.dispatchOnError( 547 TextToSpeech.ERROR_INVALID_REQUEST); 548 } 549 return TextToSpeech.ERROR; 550 } 551 552 if (queueMode == TextToSpeech.QUEUE_FLUSH) { 553 stopForApp(speechItem.getCallerIdentity()); 554 } else if (queueMode == TextToSpeech.QUEUE_DESTROY) { 555 stopAll(); 556 } 557 Runnable runnable = new Runnable() { 558 @Override 559 public void run() { 560 if (isFlushed(speechItem)) { 561 speechItem.stop(); 562 } else { 563 setCurrentSpeechItem(speechItem); 564 speechItem.play(); 565 setCurrentSpeechItem(null); 566 } 567 } 568 }; 569 Message msg = Message.obtain(this, runnable); 570 571 // The obj is used to remove all callbacks from the given app in 572 // stopForApp(String). 573 // 574 // Note that this string is interned, so the == comparison works. 575 msg.obj = speechItem.getCallerIdentity(); 576 577 if (sendMessage(msg)) { 578 return TextToSpeech.SUCCESS; 579 } else { 580 Log.w(TAG, "SynthThread has quit"); 581 if (utterenceProgress != null) { 582 utterenceProgress.dispatchOnError(TextToSpeech.ERROR_SERVICE); 583 } 584 return TextToSpeech.ERROR; 585 } 586 } 587 588 /** 589 * Stops all speech output and removes any utterances still in the queue for 590 * the calling app. 591 * 592 * Called on a service binder thread. 593 */ 594 public int stopForApp(final Object callerIdentity) { 595 if (callerIdentity == null) { 596 return TextToSpeech.ERROR; 597 } 598 599 // Flush pending messages from callerIdentity 600 startFlushingSpeechItems(callerIdentity); 601 602 // This stops writing data to the file / or publishing 603 // items to the audio playback handler. 604 // 605 // Note that the current speech item must be removed only if it 606 // belongs to the callingApp, else the item will be "orphaned" and 607 // not stopped correctly if a stop request comes along for the item 608 // from the app it belongs to. 609 SpeechItem current = maybeRemoveCurrentSpeechItem(callerIdentity); 610 if (current != null) { 611 current.stop(); 612 } 613 614 // Remove any enqueued audio too. 615 mAudioPlaybackHandler.stopForApp(callerIdentity); 616 617 // Stop flushing pending messages 618 Runnable runnable = new Runnable() { 619 @Override 620 public void run() { 621 endFlushingSpeechItems(callerIdentity); 622 } 623 }; 624 sendMessage(Message.obtain(this, runnable)); 625 return TextToSpeech.SUCCESS; 626 } 627 628 public int stopAll() { 629 // Order to flush pending messages 630 startFlushingSpeechItems(null); 631 632 // Stop the current speech item unconditionally . 633 SpeechItem current = setCurrentSpeechItem(null); 634 if (current != null) { 635 current.stop(); 636 } 637 // Remove all pending playback as well. 638 mAudioPlaybackHandler.stop(); 639 640 // Message to stop flushing pending messages 641 Runnable runnable = new Runnable() { 642 @Override 643 public void run() { 644 endFlushingSpeechItems(null); 645 } 646 }; 647 sendMessage(Message.obtain(this, runnable)); 648 649 650 return TextToSpeech.SUCCESS; 651 } 652 } 653 654 interface UtteranceProgressDispatcher { 655 void dispatchOnStop(); 656 657 void dispatchOnSuccess(); 658 659 void dispatchOnStart(); 660 661 void dispatchOnError(int errorCode); 662 663 void dispatchOnBeginSynthesis(int sampleRateInHz, int audioFormat, int channelCount); 664 665 void dispatchOnAudioAvailable(byte[] audio); 666 667 public void dispatchOnUtteranceRangeStart(int start, int end); 668 } 669 670 /** Set of parameters affecting audio output. */ 671 static class AudioOutputParams { 672 /** 673 * Audio session identifier. May be used to associate audio playback with one of the 674 * {@link android.media.audiofx.AudioEffect} objects. If not specified by client, 675 * it should be equal to {@link AudioManager#AUDIO_SESSION_ID_GENERATE}. 676 */ 677 public final int mSessionId; 678 679 /** 680 * Volume, in the range [0.0f, 1.0f]. The default value is 681 * {@link TextToSpeech.Engine#DEFAULT_VOLUME} (1.0f). 682 */ 683 public final float mVolume; 684 685 /** 686 * Left/right position of the audio, in the range [-1.0f, 1.0f]. 687 * The default value is {@link TextToSpeech.Engine#DEFAULT_PAN} (0.0f). 688 */ 689 public final float mPan; 690 691 692 /** 693 * Audio attributes, set by {@link TextToSpeech#setAudioAttributes} 694 * or created from the value of {@link TextToSpeech.Engine#KEY_PARAM_STREAM}. 695 */ 696 public final AudioAttributes mAudioAttributes; 697 698 /** Create AudioOutputParams with default values */ 699 AudioOutputParams() { 700 mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE; 701 mVolume = Engine.DEFAULT_VOLUME; 702 mPan = Engine.DEFAULT_PAN; 703 mAudioAttributes = null; 704 } 705 706 AudioOutputParams(int sessionId, float volume, float pan, 707 AudioAttributes audioAttributes) { 708 mSessionId = sessionId; 709 mVolume = volume; 710 mPan = pan; 711 mAudioAttributes = audioAttributes; 712 } 713 714 /** Create AudioOutputParams from A {@link SynthesisRequest#getParams()} bundle */ 715 static AudioOutputParams createFromParamsBundle(Bundle paramsBundle, boolean isSpeech) { 716 if (paramsBundle == null) { 717 return new AudioOutputParams(); 718 } 719 720 AudioAttributes audioAttributes = 721 (AudioAttributes) paramsBundle.getParcelable( 722 Engine.KEY_PARAM_AUDIO_ATTRIBUTES); 723 if (audioAttributes == null) { 724 int streamType = paramsBundle.getInt( 725 Engine.KEY_PARAM_STREAM, Engine.DEFAULT_STREAM); 726 audioAttributes = (new AudioAttributes.Builder()) 727 .setLegacyStreamType(streamType) 728 .setContentType((isSpeech ? 729 AudioAttributes.CONTENT_TYPE_SPEECH : 730 AudioAttributes.CONTENT_TYPE_SONIFICATION)) 731 .build(); 732 } 733 734 return new AudioOutputParams( 735 paramsBundle.getInt( 736 Engine.KEY_PARAM_SESSION_ID, 737 AudioManager.AUDIO_SESSION_ID_GENERATE), 738 paramsBundle.getFloat( 739 Engine.KEY_PARAM_VOLUME, 740 Engine.DEFAULT_VOLUME), 741 paramsBundle.getFloat( 742 Engine.KEY_PARAM_PAN, 743 Engine.DEFAULT_PAN), 744 audioAttributes); 745 } 746 } 747 748 749 /** 750 * An item in the synth thread queue. 751 */ 752 private abstract class SpeechItem { 753 private final Object mCallerIdentity; 754 private final int mCallerUid; 755 private final int mCallerPid; 756 private boolean mStarted = false; 757 private boolean mStopped = false; 758 759 public SpeechItem(Object caller, int callerUid, int callerPid) { 760 mCallerIdentity = caller; 761 mCallerUid = callerUid; 762 mCallerPid = callerPid; 763 } 764 765 public Object getCallerIdentity() { 766 return mCallerIdentity; 767 } 768 769 public int getCallerUid() { 770 return mCallerUid; 771 } 772 773 public int getCallerPid() { 774 return mCallerPid; 775 } 776 777 /** 778 * Checker whether the item is valid. If this method returns false, the item should not 779 * be played. 780 */ 781 public abstract boolean isValid(); 782 783 /** 784 * Plays the speech item. Blocks until playback is finished. 785 * Must not be called more than once. 786 * 787 * Only called on the synthesis thread. 788 */ 789 public void play() { 790 synchronized (this) { 791 if (mStarted) { 792 throw new IllegalStateException("play() called twice"); 793 } 794 mStarted = true; 795 } 796 playImpl(); 797 } 798 799 protected abstract void playImpl(); 800 801 /** 802 * Stops the speech item. 803 * Must not be called more than once. 804 * 805 * Can be called on multiple threads, but not on the synthesis thread. 806 */ 807 public void stop() { 808 synchronized (this) { 809 if (mStopped) { 810 throw new IllegalStateException("stop() called twice"); 811 } 812 mStopped = true; 813 } 814 stopImpl(); 815 } 816 817 protected abstract void stopImpl(); 818 819 protected synchronized boolean isStopped() { 820 return mStopped; 821 } 822 823 protected synchronized boolean isStarted() { 824 return mStarted; 825 } 826 } 827 828 /** 829 * An item in the synth thread queue that process utterance (and call back to client about 830 * progress). 831 */ 832 private abstract class UtteranceSpeechItem extends SpeechItem 833 implements UtteranceProgressDispatcher { 834 835 public UtteranceSpeechItem(Object caller, int callerUid, int callerPid) { 836 super(caller, callerUid, callerPid); 837 } 838 839 @Override 840 public void dispatchOnSuccess() { 841 final String utteranceId = getUtteranceId(); 842 if (utteranceId != null) { 843 mCallbacks.dispatchOnSuccess(getCallerIdentity(), utteranceId); 844 } 845 } 846 847 @Override 848 public void dispatchOnStop() { 849 final String utteranceId = getUtteranceId(); 850 if (utteranceId != null) { 851 mCallbacks.dispatchOnStop(getCallerIdentity(), utteranceId, isStarted()); 852 } 853 } 854 855 @Override 856 public void dispatchOnStart() { 857 final String utteranceId = getUtteranceId(); 858 if (utteranceId != null) { 859 mCallbacks.dispatchOnStart(getCallerIdentity(), utteranceId); 860 } 861 } 862 863 @Override 864 public void dispatchOnError(int errorCode) { 865 final String utteranceId = getUtteranceId(); 866 if (utteranceId != null) { 867 mCallbacks.dispatchOnError(getCallerIdentity(), utteranceId, errorCode); 868 } 869 } 870 871 @Override 872 public void dispatchOnBeginSynthesis(int sampleRateInHz, int audioFormat, int channelCount) { 873 final String utteranceId = getUtteranceId(); 874 if (utteranceId != null) { 875 mCallbacks.dispatchOnBeginSynthesis(getCallerIdentity(), utteranceId, sampleRateInHz, audioFormat, channelCount); 876 } 877 } 878 879 @Override 880 public void dispatchOnAudioAvailable(byte[] audio) { 881 final String utteranceId = getUtteranceId(); 882 if (utteranceId != null) { 883 mCallbacks.dispatchOnAudioAvailable(getCallerIdentity(), utteranceId, audio); 884 } 885 } 886 887 @Override 888 public void dispatchOnUtteranceRangeStart(int start, int end) { 889 final String utteranceId = getUtteranceId(); 890 if (utteranceId != null) { 891 mCallbacks.dispatchOnUtteranceRangeStart( 892 getCallerIdentity(), utteranceId, start, end); 893 } 894 } 895 896 abstract public String getUtteranceId(); 897 898 String getStringParam(Bundle params, String key, String defaultValue) { 899 return params == null ? defaultValue : params.getString(key, defaultValue); 900 } 901 902 int getIntParam(Bundle params, String key, int defaultValue) { 903 return params == null ? defaultValue : params.getInt(key, defaultValue); 904 } 905 906 float getFloatParam(Bundle params, String key, float defaultValue) { 907 return params == null ? defaultValue : params.getFloat(key, defaultValue); 908 } 909 } 910 911 /** 912 * Synthesis parameters are kept in a single Bundle passed as parameter. This class allow 913 * subclasses to access them conveniently. 914 */ 915 private abstract class UtteranceSpeechItemWithParams extends UtteranceSpeechItem { 916 protected final Bundle mParams; 917 protected final String mUtteranceId; 918 919 UtteranceSpeechItemWithParams( 920 Object callerIdentity, 921 int callerUid, 922 int callerPid, 923 Bundle params, 924 String utteranceId) { 925 super(callerIdentity, callerUid, callerPid); 926 mParams = params; 927 mUtteranceId = utteranceId; 928 } 929 930 boolean hasLanguage() { 931 return !TextUtils.isEmpty(getStringParam(mParams, Engine.KEY_PARAM_LANGUAGE, null)); 932 } 933 934 int getSpeechRate() { 935 return getIntParam(mParams, Engine.KEY_PARAM_RATE, getDefaultSpeechRate()); 936 } 937 938 int getPitch() { 939 return getIntParam(mParams, Engine.KEY_PARAM_PITCH, Engine.DEFAULT_PITCH); 940 } 941 942 @Override 943 public String getUtteranceId() { 944 return mUtteranceId; 945 } 946 947 AudioOutputParams getAudioParams() { 948 return AudioOutputParams.createFromParamsBundle(mParams, true); 949 } 950 } 951 952 class SynthesisSpeechItem extends UtteranceSpeechItemWithParams { 953 // Never null. 954 private final CharSequence mText; 955 private final SynthesisRequest mSynthesisRequest; 956 private final String[] mDefaultLocale; 957 // Non null after synthesis has started, and all accesses 958 // guarded by 'this'. 959 private AbstractSynthesisCallback mSynthesisCallback; 960 private final EventLogger mEventLogger; 961 private final int mCallerUid; 962 963 public SynthesisSpeechItem( 964 Object callerIdentity, 965 int callerUid, 966 int callerPid, 967 Bundle params, 968 String utteranceId, 969 CharSequence text) { 970 super(callerIdentity, callerUid, callerPid, params, utteranceId); 971 mText = text; 972 mCallerUid = callerUid; 973 mSynthesisRequest = new SynthesisRequest(mText, mParams); 974 mDefaultLocale = getSettingsLocale(); 975 setRequestParams(mSynthesisRequest); 976 mEventLogger = new EventLogger(mSynthesisRequest, callerUid, callerPid, mPackageName); 977 } 978 979 public CharSequence getText() { 980 return mText; 981 } 982 983 @Override 984 public boolean isValid() { 985 if (mText == null) { 986 Log.e(TAG, "null synthesis text"); 987 return false; 988 } 989 if (mText.length() >= TextToSpeech.getMaxSpeechInputLength()) { 990 Log.w(TAG, "Text too long: " + mText.length() + " chars"); 991 return false; 992 } 993 return true; 994 } 995 996 @Override 997 protected void playImpl() { 998 AbstractSynthesisCallback synthesisCallback; 999 mEventLogger.onRequestProcessingStart(); 1000 synchronized (this) { 1001 // stop() might have been called before we enter this 1002 // synchronized block. 1003 if (isStopped()) { 1004 return; 1005 } 1006 mSynthesisCallback = createSynthesisCallback(); 1007 synthesisCallback = mSynthesisCallback; 1008 } 1009 1010 TextToSpeechService.this.onSynthesizeText(mSynthesisRequest, synthesisCallback); 1011 1012 // Fix for case where client called .start() & .error(), but did not called .done() 1013 if (synthesisCallback.hasStarted() && !synthesisCallback.hasFinished()) { 1014 synthesisCallback.done(); 1015 } 1016 } 1017 1018 protected AbstractSynthesisCallback createSynthesisCallback() { 1019 return new PlaybackSynthesisCallback(getAudioParams(), 1020 mAudioPlaybackHandler, this, getCallerIdentity(), mEventLogger, false); 1021 } 1022 1023 private void setRequestParams(SynthesisRequest request) { 1024 String voiceName = getVoiceName(); 1025 request.setLanguage(getLanguage(), getCountry(), getVariant()); 1026 if (!TextUtils.isEmpty(voiceName)) { 1027 request.setVoiceName(getVoiceName()); 1028 } 1029 request.setSpeechRate(getSpeechRate()); 1030 request.setCallerUid(mCallerUid); 1031 request.setPitch(getPitch()); 1032 } 1033 1034 @Override 1035 protected void stopImpl() { 1036 AbstractSynthesisCallback synthesisCallback; 1037 synchronized (this) { 1038 synthesisCallback = mSynthesisCallback; 1039 } 1040 if (synthesisCallback != null) { 1041 // If the synthesis callback is null, it implies that we haven't 1042 // entered the synchronized(this) block in playImpl which in 1043 // turn implies that synthesis would not have started. 1044 synthesisCallback.stop(); 1045 TextToSpeechService.this.onStop(); 1046 } else { 1047 dispatchOnStop(); 1048 } 1049 } 1050 1051 private String getCountry() { 1052 if (!hasLanguage()) return mDefaultLocale[1]; 1053 return getStringParam(mParams, Engine.KEY_PARAM_COUNTRY, ""); 1054 } 1055 1056 private String getVariant() { 1057 if (!hasLanguage()) return mDefaultLocale[2]; 1058 return getStringParam(mParams, Engine.KEY_PARAM_VARIANT, ""); 1059 } 1060 1061 public String getLanguage() { 1062 return getStringParam(mParams, Engine.KEY_PARAM_LANGUAGE, mDefaultLocale[0]); 1063 } 1064 1065 public String getVoiceName() { 1066 return getStringParam(mParams, Engine.KEY_PARAM_VOICE_NAME, ""); 1067 } 1068 } 1069 1070 private class SynthesisToFileOutputStreamSpeechItem extends SynthesisSpeechItem { 1071 private final FileOutputStream mFileOutputStream; 1072 1073 public SynthesisToFileOutputStreamSpeechItem( 1074 Object callerIdentity, 1075 int callerUid, 1076 int callerPid, 1077 Bundle params, 1078 String utteranceId, 1079 CharSequence text, 1080 FileOutputStream fileOutputStream) { 1081 super(callerIdentity, callerUid, callerPid, params, utteranceId, text); 1082 mFileOutputStream = fileOutputStream; 1083 } 1084 1085 @Override 1086 protected AbstractSynthesisCallback createSynthesisCallback() { 1087 return new FileSynthesisCallback(mFileOutputStream.getChannel(), this, false); 1088 } 1089 1090 @Override 1091 protected void playImpl() { 1092 dispatchOnStart(); 1093 super.playImpl(); 1094 try { 1095 mFileOutputStream.close(); 1096 } catch(IOException e) { 1097 Log.w(TAG, "Failed to close output file", e); 1098 } 1099 } 1100 } 1101 1102 private class AudioSpeechItem extends UtteranceSpeechItemWithParams { 1103 private final AudioPlaybackQueueItem mItem; 1104 1105 public AudioSpeechItem( 1106 Object callerIdentity, 1107 int callerUid, 1108 int callerPid, 1109 Bundle params, 1110 String utteranceId, 1111 Uri uri) { 1112 super(callerIdentity, callerUid, callerPid, params, utteranceId); 1113 mItem = new AudioPlaybackQueueItem(this, getCallerIdentity(), 1114 TextToSpeechService.this, uri, getAudioParams()); 1115 } 1116 1117 @Override 1118 public boolean isValid() { 1119 return true; 1120 } 1121 1122 @Override 1123 protected void playImpl() { 1124 mAudioPlaybackHandler.enqueue(mItem); 1125 } 1126 1127 @Override 1128 protected void stopImpl() { 1129 // Do nothing. 1130 } 1131 1132 @Override 1133 public String getUtteranceId() { 1134 return getStringParam(mParams, Engine.KEY_PARAM_UTTERANCE_ID, null); 1135 } 1136 1137 @Override 1138 AudioOutputParams getAudioParams() { 1139 return AudioOutputParams.createFromParamsBundle(mParams, false); 1140 } 1141 } 1142 1143 private class SilenceSpeechItem extends UtteranceSpeechItem { 1144 private final long mDuration; 1145 private final String mUtteranceId; 1146 1147 public SilenceSpeechItem(Object callerIdentity, int callerUid, int callerPid, 1148 String utteranceId, long duration) { 1149 super(callerIdentity, callerUid, callerPid); 1150 mUtteranceId = utteranceId; 1151 mDuration = duration; 1152 } 1153 1154 @Override 1155 public boolean isValid() { 1156 return true; 1157 } 1158 1159 @Override 1160 protected void playImpl() { 1161 mAudioPlaybackHandler.enqueue(new SilencePlaybackQueueItem( 1162 this, getCallerIdentity(), mDuration)); 1163 } 1164 1165 @Override 1166 protected void stopImpl() { 1167 1168 } 1169 1170 @Override 1171 public String getUtteranceId() { 1172 return mUtteranceId; 1173 } 1174 } 1175 1176 /** 1177 * Call {@link TextToSpeechService#onLoadLanguage} on synth thread. 1178 */ 1179 private class LoadLanguageItem extends SpeechItem { 1180 private final String mLanguage; 1181 private final String mCountry; 1182 private final String mVariant; 1183 1184 public LoadLanguageItem(Object callerIdentity, int callerUid, int callerPid, 1185 String language, String country, String variant) { 1186 super(callerIdentity, callerUid, callerPid); 1187 mLanguage = language; 1188 mCountry = country; 1189 mVariant = variant; 1190 } 1191 1192 @Override 1193 public boolean isValid() { 1194 return true; 1195 } 1196 1197 @Override 1198 protected void playImpl() { 1199 TextToSpeechService.this.onLoadLanguage(mLanguage, mCountry, mVariant); 1200 } 1201 1202 @Override 1203 protected void stopImpl() { 1204 // No-op 1205 } 1206 } 1207 1208 /** 1209 * Call {@link TextToSpeechService#onLoadLanguage} on synth thread. 1210 */ 1211 private class LoadVoiceItem extends SpeechItem { 1212 private final String mVoiceName; 1213 1214 public LoadVoiceItem(Object callerIdentity, int callerUid, int callerPid, 1215 String voiceName) { 1216 super(callerIdentity, callerUid, callerPid); 1217 mVoiceName = voiceName; 1218 } 1219 1220 @Override 1221 public boolean isValid() { 1222 return true; 1223 } 1224 1225 @Override 1226 protected void playImpl() { 1227 TextToSpeechService.this.onLoadVoice(mVoiceName); 1228 } 1229 1230 @Override 1231 protected void stopImpl() { 1232 // No-op 1233 } 1234 } 1235 1236 1237 @Override 1238 public IBinder onBind(Intent intent) { 1239 if (TextToSpeech.Engine.INTENT_ACTION_TTS_SERVICE.equals(intent.getAction())) { 1240 return mBinder; 1241 } 1242 return null; 1243 } 1244 1245 /** 1246 * Binder returned from {@code #onBind(Intent)}. The methods in this class can be called called 1247 * from several different threads. 1248 */ 1249 // NOTE: All calls that are passed in a calling app are interned so that 1250 // they can be used as message objects (which are tested for equality using ==). 1251 private final ITextToSpeechService.Stub mBinder = 1252 new ITextToSpeechService.Stub() { 1253 @Override 1254 public int speak( 1255 IBinder caller, 1256 CharSequence text, 1257 int queueMode, 1258 Bundle params, 1259 String utteranceId) { 1260 if (!checkNonNull(caller, text, params)) { 1261 return TextToSpeech.ERROR; 1262 } 1263 1264 SpeechItem item = 1265 new SynthesisSpeechItem( 1266 caller, 1267 Binder.getCallingUid(), 1268 Binder.getCallingPid(), 1269 params, 1270 utteranceId, 1271 text); 1272 return mSynthHandler.enqueueSpeechItem(queueMode, item); 1273 } 1274 1275 @Override 1276 public int synthesizeToFileDescriptor( 1277 IBinder caller, 1278 CharSequence text, 1279 ParcelFileDescriptor fileDescriptor, 1280 Bundle params, 1281 String utteranceId) { 1282 if (!checkNonNull(caller, text, fileDescriptor, params)) { 1283 return TextToSpeech.ERROR; 1284 } 1285 1286 // In test env, ParcelFileDescriptor instance may be EXACTLY the same 1287 // one that is used by client. And it will be closed by a client, thus 1288 // preventing us from writing anything to it. 1289 final ParcelFileDescriptor sameFileDescriptor = 1290 ParcelFileDescriptor.adoptFd(fileDescriptor.detachFd()); 1291 1292 SpeechItem item = 1293 new SynthesisToFileOutputStreamSpeechItem( 1294 caller, 1295 Binder.getCallingUid(), 1296 Binder.getCallingPid(), 1297 params, 1298 utteranceId, 1299 text, 1300 new ParcelFileDescriptor.AutoCloseOutputStream( 1301 sameFileDescriptor)); 1302 return mSynthHandler.enqueueSpeechItem(TextToSpeech.QUEUE_ADD, item); 1303 } 1304 1305 @Override 1306 public int playAudio( 1307 IBinder caller, 1308 Uri audioUri, 1309 int queueMode, 1310 Bundle params, 1311 String utteranceId) { 1312 if (!checkNonNull(caller, audioUri, params)) { 1313 return TextToSpeech.ERROR; 1314 } 1315 1316 SpeechItem item = 1317 new AudioSpeechItem( 1318 caller, 1319 Binder.getCallingUid(), 1320 Binder.getCallingPid(), 1321 params, 1322 utteranceId, 1323 audioUri); 1324 return mSynthHandler.enqueueSpeechItem(queueMode, item); 1325 } 1326 1327 @Override 1328 public int playSilence( 1329 IBinder caller, long duration, int queueMode, String utteranceId) { 1330 if (!checkNonNull(caller)) { 1331 return TextToSpeech.ERROR; 1332 } 1333 1334 SpeechItem item = 1335 new SilenceSpeechItem( 1336 caller, 1337 Binder.getCallingUid(), 1338 Binder.getCallingPid(), 1339 utteranceId, 1340 duration); 1341 return mSynthHandler.enqueueSpeechItem(queueMode, item); 1342 } 1343 1344 @Override 1345 public boolean isSpeaking() { 1346 return mSynthHandler.isSpeaking() || mAudioPlaybackHandler.isSpeaking(); 1347 } 1348 1349 @Override 1350 public int stop(IBinder caller) { 1351 if (!checkNonNull(caller)) { 1352 return TextToSpeech.ERROR; 1353 } 1354 1355 return mSynthHandler.stopForApp(caller); 1356 } 1357 1358 @Override 1359 public String[] getLanguage() { 1360 return onGetLanguage(); 1361 } 1362 1363 @Override 1364 public String[] getClientDefaultLanguage() { 1365 return getSettingsLocale(); 1366 } 1367 1368 /* 1369 * If defaults are enforced, then no language is "available" except 1370 * perhaps the default language selected by the user. 1371 */ 1372 @Override 1373 public int isLanguageAvailable(String lang, String country, String variant) { 1374 if (!checkNonNull(lang)) { 1375 return TextToSpeech.ERROR; 1376 } 1377 1378 return onIsLanguageAvailable(lang, country, variant); 1379 } 1380 1381 @Override 1382 public String[] getFeaturesForLanguage( 1383 String lang, String country, String variant) { 1384 Set<String> features = onGetFeaturesForLanguage(lang, country, variant); 1385 String[] featuresArray = null; 1386 if (features != null) { 1387 featuresArray = new String[features.size()]; 1388 features.toArray(featuresArray); 1389 } else { 1390 featuresArray = new String[0]; 1391 } 1392 return featuresArray; 1393 } 1394 1395 /* 1396 * There is no point loading a non default language if defaults 1397 * are enforced. 1398 */ 1399 @Override 1400 public int loadLanguage( 1401 IBinder caller, String lang, String country, String variant) { 1402 if (!checkNonNull(lang)) { 1403 return TextToSpeech.ERROR; 1404 } 1405 int retVal = onIsLanguageAvailable(lang, country, variant); 1406 1407 if (retVal == TextToSpeech.LANG_AVAILABLE 1408 || retVal == TextToSpeech.LANG_COUNTRY_AVAILABLE 1409 || retVal == TextToSpeech.LANG_COUNTRY_VAR_AVAILABLE) { 1410 1411 SpeechItem item = 1412 new LoadLanguageItem( 1413 caller, 1414 Binder.getCallingUid(), 1415 Binder.getCallingPid(), 1416 lang, 1417 country, 1418 variant); 1419 1420 if (mSynthHandler.enqueueSpeechItem(TextToSpeech.QUEUE_ADD, item) 1421 != TextToSpeech.SUCCESS) { 1422 return TextToSpeech.ERROR; 1423 } 1424 } 1425 return retVal; 1426 } 1427 1428 @Override 1429 public List<Voice> getVoices() { 1430 return onGetVoices(); 1431 } 1432 1433 @Override 1434 public int loadVoice(IBinder caller, String voiceName) { 1435 if (!checkNonNull(voiceName)) { 1436 return TextToSpeech.ERROR; 1437 } 1438 int retVal = onIsValidVoiceName(voiceName); 1439 1440 if (retVal == TextToSpeech.SUCCESS) { 1441 SpeechItem item = 1442 new LoadVoiceItem( 1443 caller, 1444 Binder.getCallingUid(), 1445 Binder.getCallingPid(), 1446 voiceName); 1447 if (mSynthHandler.enqueueSpeechItem(TextToSpeech.QUEUE_ADD, item) 1448 != TextToSpeech.SUCCESS) { 1449 return TextToSpeech.ERROR; 1450 } 1451 } 1452 return retVal; 1453 } 1454 1455 public String getDefaultVoiceNameFor(String lang, String country, String variant) { 1456 if (!checkNonNull(lang)) { 1457 return null; 1458 } 1459 int retVal = onIsLanguageAvailable(lang, country, variant); 1460 1461 if (retVal == TextToSpeech.LANG_AVAILABLE 1462 || retVal == TextToSpeech.LANG_COUNTRY_AVAILABLE 1463 || retVal == TextToSpeech.LANG_COUNTRY_VAR_AVAILABLE) { 1464 return onGetDefaultVoiceNameFor(lang, country, variant); 1465 } else { 1466 return null; 1467 } 1468 } 1469 1470 @Override 1471 public void setCallback(IBinder caller, ITextToSpeechCallback cb) { 1472 // Note that passing in a null callback is a valid use case. 1473 if (!checkNonNull(caller)) { 1474 return; 1475 } 1476 1477 mCallbacks.setCallback(caller, cb); 1478 } 1479 1480 private String intern(String in) { 1481 // The input parameter will be non null. 1482 return in.intern(); 1483 } 1484 1485 private boolean checkNonNull(Object... args) { 1486 for (Object o : args) { 1487 if (o == null) return false; 1488 } 1489 return true; 1490 } 1491 }; 1492 1493 private class CallbackMap extends RemoteCallbackList<ITextToSpeechCallback> { 1494 private final HashMap<IBinder, ITextToSpeechCallback> mCallerToCallback 1495 = new HashMap<IBinder, ITextToSpeechCallback>(); 1496 1497 public void setCallback(IBinder caller, ITextToSpeechCallback cb) { 1498 synchronized (mCallerToCallback) { 1499 ITextToSpeechCallback old; 1500 if (cb != null) { 1501 register(cb, caller); 1502 old = mCallerToCallback.put(caller, cb); 1503 } else { 1504 old = mCallerToCallback.remove(caller); 1505 } 1506 if (old != null && old != cb) { 1507 unregister(old); 1508 } 1509 } 1510 } 1511 1512 public void dispatchOnStop(Object callerIdentity, String utteranceId, boolean started) { 1513 ITextToSpeechCallback cb = getCallbackFor(callerIdentity); 1514 if (cb == null) return; 1515 try { 1516 cb.onStop(utteranceId, started); 1517 } catch (RemoteException e) { 1518 Log.e(TAG, "Callback onStop failed: " + e); 1519 } 1520 } 1521 1522 public void dispatchOnSuccess(Object callerIdentity, String utteranceId) { 1523 ITextToSpeechCallback cb = getCallbackFor(callerIdentity); 1524 if (cb == null) return; 1525 try { 1526 cb.onSuccess(utteranceId); 1527 } catch (RemoteException e) { 1528 Log.e(TAG, "Callback onDone failed: " + e); 1529 } 1530 } 1531 1532 public void dispatchOnStart(Object callerIdentity, String utteranceId) { 1533 ITextToSpeechCallback cb = getCallbackFor(callerIdentity); 1534 if (cb == null) return; 1535 try { 1536 cb.onStart(utteranceId); 1537 } catch (RemoteException e) { 1538 Log.e(TAG, "Callback onStart failed: " + e); 1539 } 1540 } 1541 1542 public void dispatchOnError(Object callerIdentity, String utteranceId, 1543 int errorCode) { 1544 ITextToSpeechCallback cb = getCallbackFor(callerIdentity); 1545 if (cb == null) return; 1546 try { 1547 cb.onError(utteranceId, errorCode); 1548 } catch (RemoteException e) { 1549 Log.e(TAG, "Callback onError failed: " + e); 1550 } 1551 } 1552 1553 public void dispatchOnBeginSynthesis(Object callerIdentity, String utteranceId, int sampleRateInHz, int audioFormat, int channelCount) { 1554 ITextToSpeechCallback cb = getCallbackFor(callerIdentity); 1555 if (cb == null) return; 1556 try { 1557 cb.onBeginSynthesis(utteranceId, sampleRateInHz, audioFormat, channelCount); 1558 } catch (RemoteException e) { 1559 Log.e(TAG, "Callback dispatchOnBeginSynthesis(String, int, int, int) failed: " + e); 1560 } 1561 } 1562 1563 public void dispatchOnAudioAvailable(Object callerIdentity, String utteranceId, byte[] buffer) { 1564 ITextToSpeechCallback cb = getCallbackFor(callerIdentity); 1565 if (cb == null) return; 1566 try { 1567 cb.onAudioAvailable(utteranceId, buffer); 1568 } catch (RemoteException e) { 1569 Log.e(TAG, "Callback dispatchOnAudioAvailable(String, byte[]) failed: " + e); 1570 } 1571 } 1572 1573 public void dispatchOnUtteranceRangeStart( 1574 Object callerIdentity, String utteranceId, int start, int end) { 1575 ITextToSpeechCallback cb = getCallbackFor(callerIdentity); 1576 if (cb == null) return; 1577 try { 1578 cb.onUtteranceRangeStart(utteranceId, start, end); 1579 } catch (RemoteException e) { 1580 Log.e(TAG, "Callback dispatchOnUtteranceRangeStart(String, int, int) failed: " + e); 1581 } 1582 } 1583 1584 @Override 1585 public void onCallbackDied(ITextToSpeechCallback callback, Object cookie) { 1586 IBinder caller = (IBinder) cookie; 1587 synchronized (mCallerToCallback) { 1588 mCallerToCallback.remove(caller); 1589 } 1590 //mSynthHandler.stopForApp(caller); 1591 } 1592 1593 @Override 1594 public void kill() { 1595 synchronized (mCallerToCallback) { 1596 mCallerToCallback.clear(); 1597 super.kill(); 1598 } 1599 } 1600 1601 private ITextToSpeechCallback getCallbackFor(Object caller) { 1602 ITextToSpeechCallback cb; 1603 IBinder asBinder = (IBinder) caller; 1604 synchronized (mCallerToCallback) { 1605 cb = mCallerToCallback.get(asBinder); 1606 } 1607 1608 return cb; 1609 } 1610 } 1611} 1612