TtsService.java revision bbd63cb28595bd6b1fa62d331d9373b5c798c267
1/* 2 * Copyright (C) 2009 Google Inc. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); you may not 5 * use this file except in compliance with the License. You may obtain a copy of 6 * the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 * License for the specific language governing permissions and limitations under 14 * the License. 15 */ 16package android.tts; 17 18import android.app.Service; 19import android.content.ContentResolver; 20import android.content.Context; 21import android.content.Intent; 22import android.content.SharedPreferences; 23import android.content.pm.PackageManager; 24import android.content.pm.PackageManager.NameNotFoundException; 25import android.media.MediaPlayer; 26import android.media.MediaPlayer.OnCompletionListener; 27import android.net.Uri; 28import android.os.IBinder; 29import android.os.RemoteCallbackList; 30import android.os.RemoteException; 31import android.preference.PreferenceManager; 32import android.speech.tts.ITts.Stub; 33import android.speech.tts.ITtsCallback; 34import android.speech.tts.TextToSpeech; 35import android.util.Log; 36import java.util.ArrayList; 37import java.util.Arrays; 38import java.util.HashMap; 39import java.util.concurrent.locks.ReentrantLock; 40 41/** 42 * @hide Synthesizes speech from text. This is implemented as a service so that 43 * other applications can call the TTS without needing to bundle the TTS 44 * in the build. 45 * 46 */ 47public class TtsService extends Service implements OnCompletionListener { 48 49 private static class SpeechItem { 50 public static final int TEXT = 0; 51 public static final int IPA = 1; 52 public static final int EARCON = 2; 53 public static final int SILENCE = 3; 54 public String mText = null; 55 public ArrayList<String> mParams = null; 56 public int mType = TEXT; 57 public long mDuration = 0; 58 59 public SpeechItem(String text, ArrayList<String> params, int itemType) { 60 mText = text; 61 mParams = params; 62 mType = itemType; 63 } 64 65 public SpeechItem(long silenceTime) { 66 mDuration = silenceTime; 67 } 68 } 69 70 /** 71 * Contains the information needed to access a sound resource; the name of 72 * the package that contains the resource and the resID of the resource 73 * within that package. 74 */ 75 private static class SoundResource { 76 public String mSourcePackageName = null; 77 public int mResId = -1; 78 public String mFilename = null; 79 80 public SoundResource(String packageName, int id) { 81 mSourcePackageName = packageName; 82 mResId = id; 83 mFilename = null; 84 } 85 86 public SoundResource(String file) { 87 mSourcePackageName = null; 88 mResId = -1; 89 mFilename = file; 90 } 91 } 92 93 private static final int MAX_SPEECH_ITEM_CHAR_LENGTH = 4000; 94 private static final int MAX_FILENAME_LENGTH = 250; 95 96 private static final String ACTION = "android.intent.action.USE_TTS"; 97 private static final String CATEGORY = "android.intent.category.TTS"; 98 private static final String PKGNAME = "android.tts"; 99 100 final RemoteCallbackList<android.speech.tts.ITtsCallback> mCallbacks = new RemoteCallbackList<ITtsCallback>(); 101 102 private Boolean mIsSpeaking; 103 private ArrayList<SpeechItem> mSpeechQueue; 104 private HashMap<String, SoundResource> mEarcons; 105 private HashMap<String, SoundResource> mUtterances; 106 private MediaPlayer mPlayer; 107 private TtsService mSelf; 108 109 private ContentResolver mResolver; 110 111 private final ReentrantLock speechQueueLock = new ReentrantLock(); 112 private final ReentrantLock synthesizerLock = new ReentrantLock(); 113 114 private SynthProxy nativeSynth; 115 @Override 116 public void onCreate() { 117 super.onCreate(); 118 Log.i("TTS", "TTS starting"); 119 120 mResolver = getContentResolver(); 121 122 String soLibPath = "/system/lib/libttspico.so"; 123 nativeSynth = new SynthProxy(soLibPath); 124 125 mSelf = this; 126 mIsSpeaking = false; 127 128 mEarcons = new HashMap<String, SoundResource>(); 129 mUtterances = new HashMap<String, SoundResource>(); 130 131 mSpeechQueue = new ArrayList<SpeechItem>(); 132 mPlayer = null; 133 134 setDefaultSettings(); 135 } 136 137 @Override 138 public void onDestroy() { 139 super.onDestroy(); 140 // Don't hog the media player 141 cleanUpPlayer(); 142 143 nativeSynth.shutdown(); 144 145 // Unregister all callbacks. 146 mCallbacks.kill(); 147 } 148 149 150 private void setDefaultSettings() { 151 setLanguage(this.getDefaultLanguage(), getDefaultCountry(), getDefaultLocVariant()); 152 153 // speech rate 154 setSpeechRate(getDefaultRate()); 155 } 156 157 158 private boolean isDefaultEnforced() { 159 return (android.provider.Settings.Secure.getInt(mResolver, 160 android.provider.Settings.Secure.TTS_USE_DEFAULTS, 161 TextToSpeech.Engine.FALLBACK_TTS_USE_DEFAULTS) 162 == 1 ); 163 } 164 165 166 private int getDefaultRate() { 167 return android.provider.Settings.Secure.getInt(mResolver, 168 android.provider.Settings.Secure.TTS_DEFAULT_RATE, 169 TextToSpeech.Engine.FALLBACK_TTS_DEFAULT_RATE); 170 } 171 172 173 private String getDefaultLanguage() { 174 String defaultLang = android.provider.Settings.Secure.getString(mResolver, 175 android.provider.Settings.Secure.TTS_DEFAULT_LANG); 176 if (defaultLang == null) { 177 return TextToSpeech.Engine.FALLBACK_TTS_DEFAULT_LANG; 178 } else { 179 return defaultLang; 180 } 181 } 182 183 184 private String getDefaultCountry() { 185 String defaultCountry = android.provider.Settings.Secure.getString(mResolver, 186 android.provider.Settings.Secure.TTS_DEFAULT_COUNTRY); 187 if (defaultCountry == null) { 188 return TextToSpeech.Engine.FALLBACK_TTS_DEFAULT_COUNTRY; 189 } else { 190 return defaultCountry; 191 } 192 } 193 194 195 private String getDefaultLocVariant() { 196 String defaultVar = android.provider.Settings.Secure.getString(mResolver, 197 android.provider.Settings.Secure.TTS_DEFAULT_VARIANT); 198 if (defaultVar == null) { 199 return TextToSpeech.Engine.FALLBACK_TTS_DEFAULT_VARIANT; 200 } else { 201 return defaultVar; 202 } 203 } 204 205 206 private void setSpeechRate(int rate) { 207 if (isDefaultEnforced()) { 208 nativeSynth.setSpeechRate(getDefaultRate()); 209 } else { 210 nativeSynth.setSpeechRate(rate); 211 } 212 } 213 214 215 private void setPitch(int pitch) { 216 nativeSynth.setPitch(pitch); 217 } 218 219 220 private int isLanguageAvailable(String lang, String country, String variant) { 221 Log.v("TTS", "TtsService.isLanguageAvailable(" + lang + ", " + country + ", " +variant+")"); 222 return nativeSynth.isLanguageAvailable(lang, country, variant); 223 } 224 225 226 private String[] getLanguage() { 227 return nativeSynth.getLanguage(); 228 } 229 230 231 private void setLanguage(String lang, String country, String variant) { 232 Log.v("TTS", "TtsService.setLanguage(" + lang + ", " + country + ", " + variant + ")"); 233 if (isDefaultEnforced()) { 234 nativeSynth.setLanguage(getDefaultLanguage(), getDefaultCountry(), 235 getDefaultLocVariant()); 236 } else { 237 nativeSynth.setLanguage(lang, country, variant); 238 } 239 } 240 241 242 /** 243 * Adds a sound resource to the TTS. 244 * 245 * @param text 246 * The text that should be associated with the sound resource 247 * @param packageName 248 * The name of the package which has the sound resource 249 * @param resId 250 * The resource ID of the sound within its package 251 */ 252 private void addSpeech(String text, String packageName, int resId) { 253 mUtterances.put(text, new SoundResource(packageName, resId)); 254 } 255 256 /** 257 * Adds a sound resource to the TTS. 258 * 259 * @param text 260 * The text that should be associated with the sound resource 261 * @param filename 262 * The filename of the sound resource. This must be a complete 263 * path like: (/sdcard/mysounds/mysoundbite.mp3). 264 */ 265 private void addSpeech(String text, String filename) { 266 mUtterances.put(text, new SoundResource(filename)); 267 } 268 269 /** 270 * Adds a sound resource to the TTS as an earcon. 271 * 272 * @param earcon 273 * The text that should be associated with the sound resource 274 * @param packageName 275 * The name of the package which has the sound resource 276 * @param resId 277 * The resource ID of the sound within its package 278 */ 279 private void addEarcon(String earcon, String packageName, int resId) { 280 mEarcons.put(earcon, new SoundResource(packageName, resId)); 281 } 282 283 /** 284 * Adds a sound resource to the TTS as an earcon. 285 * 286 * @param earcon 287 * The text that should be associated with the sound resource 288 * @param filename 289 * The filename of the sound resource. This must be a complete 290 * path like: (/sdcard/mysounds/mysoundbite.mp3). 291 */ 292 private void addEarcon(String earcon, String filename) { 293 mEarcons.put(earcon, new SoundResource(filename)); 294 } 295 296 /** 297 * Speaks the given text using the specified queueing mode and parameters. 298 * 299 * @param text 300 * The text that should be spoken 301 * @param queueMode 302 * 0 for no queue (interrupts all previous utterances), 1 for 303 * queued 304 * @param params 305 * An ArrayList of parameters. This is not implemented for all 306 * engines. 307 */ 308 private void speak(String text, int queueMode, ArrayList<String> params) { 309 if (queueMode == 0) { 310 stop(); 311 } 312 mSpeechQueue.add(new SpeechItem(text, params, SpeechItem.TEXT)); 313 if (!mIsSpeaking) { 314 processSpeechQueue(); 315 } 316 } 317 318 /** 319 * Speaks the given IPA text using the specified queueing mode and parameters. 320 * 321 * @param ipaText 322 * The IPA text that should be spoken 323 * @param queueMode 324 * 0 for no queue (interrupts all previous utterances), 1 for 325 * queued 326 * @param params 327 * An ArrayList of parameters. This is not implemented for all 328 * engines. 329 */ 330 private void speakIpa(String ipaText, int queueMode, ArrayList<String> params) { 331 if (queueMode == 0) { 332 stop(); 333 } 334 mSpeechQueue.add(new SpeechItem(ipaText, params, SpeechItem.IPA)); 335 if (!mIsSpeaking) { 336 processSpeechQueue(); 337 } 338 } 339 340 /** 341 * Plays the earcon using the specified queueing mode and parameters. 342 * 343 * @param earcon 344 * The earcon that should be played 345 * @param queueMode 346 * 0 for no queue (interrupts all previous utterances), 1 for 347 * queued 348 * @param params 349 * An ArrayList of parameters. This is not implemented for all 350 * engines. 351 */ 352 private void playEarcon(String earcon, int queueMode, 353 ArrayList<String> params) { 354 if (queueMode == 0) { 355 stop(); 356 } 357 mSpeechQueue.add(new SpeechItem(earcon, params, SpeechItem.EARCON)); 358 if (!mIsSpeaking) { 359 processSpeechQueue(); 360 } 361 } 362 363 /** 364 * Stops all speech output and removes any utterances still in the queue. 365 */ 366 private void stop() { 367 Log.i("TTS", "Stopping"); 368 mSpeechQueue.clear(); 369 370 nativeSynth.stop(); 371 mIsSpeaking = false; 372 if (mPlayer != null) { 373 try { 374 mPlayer.stop(); 375 } catch (IllegalStateException e) { 376 // Do nothing, the player is already stopped. 377 } 378 } 379 Log.i("TTS", "Stopped"); 380 } 381 382 public void onCompletion(MediaPlayer arg0) { 383 processSpeechQueue(); 384 } 385 386 private void playSilence(long duration, int queueMode, 387 ArrayList<String> params) { 388 if (queueMode == 0) { 389 stop(); 390 } 391 mSpeechQueue.add(new SpeechItem(duration)); 392 if (!mIsSpeaking) { 393 processSpeechQueue(); 394 } 395 } 396 397 private void silence(final long duration) { 398 class SilenceThread implements Runnable { 399 public void run() { 400 try { 401 Thread.sleep(duration); 402 } catch (InterruptedException e) { 403 e.printStackTrace(); 404 } finally { 405 processSpeechQueue(); 406 } 407 } 408 } 409 Thread slnc = (new Thread(new SilenceThread())); 410 slnc.setPriority(Thread.MIN_PRIORITY); 411 slnc.start(); 412 } 413 414 private void speakInternalOnly(final String text, 415 final ArrayList<String> params) { 416 class SynthThread implements Runnable { 417 public void run() { 418 boolean synthAvailable = false; 419 try { 420 synthAvailable = synthesizerLock.tryLock(); 421 if (!synthAvailable) { 422 Thread.sleep(100); 423 Thread synth = (new Thread(new SynthThread())); 424 synth.setPriority(Thread.MIN_PRIORITY); 425 synth.start(); 426 return; 427 } 428 if (params != null){ 429 String language = ""; 430 String country = ""; 431 String variant = ""; 432 for (int i = 0; i < params.size() - 1; i = i + 2){ 433 String param = params.get(i); 434 if (param.equals(TextToSpeech.Engine.TTS_KEY_PARAM_RATE)){ 435 setSpeechRate(Integer.parseInt(params.get(i+1))); 436 } else if (param.equals(TextToSpeech.Engine.TTS_KEY_PARAM_LANGUAGE)){ 437 language = params.get(i+1); 438 } else if (param.equals(TextToSpeech.Engine.TTS_KEY_PARAM_COUNTRY)){ 439 country = params.get(i+1); 440 } else if (param.equals(TextToSpeech.Engine.TTS_KEY_PARAM_VARIANT)){ 441 variant = params.get(i+1); 442 } 443 } 444 if (language.length() > 0){ 445 setLanguage(language, country, variant); 446 } 447 } 448 nativeSynth.speak(text); 449 } catch (InterruptedException e) { 450 e.printStackTrace(); 451 } finally { 452 // This check is needed because finally will always run; 453 // even if the 454 // method returns somewhere in the try block. 455 if (synthAvailable) { 456 synthesizerLock.unlock(); 457 } 458 processSpeechQueue(); 459 } 460 } 461 } 462 Thread synth = (new Thread(new SynthThread())); 463 synth.setPriority(Thread.MIN_PRIORITY); 464 synth.start(); 465 } 466 467 private SoundResource getSoundResource(SpeechItem speechItem) { 468 SoundResource sr = null; 469 String text = speechItem.mText; 470 if (speechItem.mType == SpeechItem.SILENCE) { 471 // Do nothing if this is just silence 472 } else if (speechItem.mType == SpeechItem.EARCON) { 473 sr = mEarcons.get(text); 474 } else { 475 sr = mUtterances.get(text); 476 } 477 return sr; 478 } 479 480 private void broadcastTtsQueueProcessingCompleted(){ 481 Intent i = new Intent(Intent.ACTION_TTS_QUEUE_PROCESSING_COMPLETED); 482 sendBroadcast(i); 483 } 484 485 private void dispatchSpeechCompletedCallbacks(String mark) { 486 Log.i("TTS callback", "dispatch started"); 487 // Broadcast to all clients the new value. 488 final int N = mCallbacks.beginBroadcast(); 489 for (int i = 0; i < N; i++) { 490 try { 491 mCallbacks.getBroadcastItem(i).markReached(mark); 492 } catch (RemoteException e) { 493 // The RemoteCallbackList will take care of removing 494 // the dead object for us. 495 } 496 } 497 mCallbacks.finishBroadcast(); 498 Log.i("TTS callback", "dispatch completed to " + N); 499 } 500 501 private SpeechItem splitCurrentTextIfNeeded(SpeechItem currentSpeechItem){ 502 if (currentSpeechItem.mText.length() < MAX_SPEECH_ITEM_CHAR_LENGTH){ 503 return currentSpeechItem; 504 } else { 505 ArrayList<SpeechItem> splitItems = new ArrayList<SpeechItem>(); 506 int start = 0; 507 int end = start + MAX_SPEECH_ITEM_CHAR_LENGTH - 1; 508 String splitText; 509 SpeechItem splitItem; 510 while (end < currentSpeechItem.mText.length()){ 511 splitText = currentSpeechItem.mText.substring(start, end); 512 splitItem = new SpeechItem(splitText, null, SpeechItem.TEXT); 513 splitItems.add(splitItem); 514 start = end; 515 end = start + MAX_SPEECH_ITEM_CHAR_LENGTH - 1; 516 } 517 splitText = currentSpeechItem.mText.substring(start); 518 splitItem = new SpeechItem(splitText, null, SpeechItem.TEXT); 519 splitItems.add(splitItem); 520 mSpeechQueue.remove(0); 521 for (int i = splitItems.size() - 1; i >= 0; i--){ 522 mSpeechQueue.add(0, splitItems.get(i)); 523 } 524 return mSpeechQueue.get(0); 525 } 526 } 527 528 private void processSpeechQueue() { 529 boolean speechQueueAvailable = false; 530 try { 531 speechQueueAvailable = speechQueueLock.tryLock(); 532 if (!speechQueueAvailable) { 533 return; 534 } 535 if (mSpeechQueue.size() < 1) { 536 mIsSpeaking = false; 537 broadcastTtsQueueProcessingCompleted(); 538 return; 539 } 540 541 SpeechItem currentSpeechItem = mSpeechQueue.get(0); 542 mIsSpeaking = true; 543 SoundResource sr = getSoundResource(currentSpeechItem); 544 // Synth speech as needed - synthesizer should call 545 // processSpeechQueue to continue running the queue 546 Log.i("TTS processing: ", currentSpeechItem.mText); 547 if (sr == null) { 548 if (currentSpeechItem.mType == SpeechItem.TEXT) { 549 currentSpeechItem = splitCurrentTextIfNeeded(currentSpeechItem); 550 speakInternalOnly(currentSpeechItem.mText, 551 currentSpeechItem.mParams); 552 } else if (currentSpeechItem.mType == SpeechItem.IPA) { 553 // TODO Implement IPA support 554 } else { 555 // This is either silence or an earcon that was missing 556 silence(currentSpeechItem.mDuration); 557 } 558 } else { 559 cleanUpPlayer(); 560 if (sr.mSourcePackageName == PKGNAME) { 561 // Utterance is part of the TTS library 562 mPlayer = MediaPlayer.create(this, sr.mResId); 563 } else if (sr.mSourcePackageName != null) { 564 // Utterance is part of the app calling the library 565 Context ctx; 566 try { 567 ctx = this.createPackageContext(sr.mSourcePackageName, 568 0); 569 } catch (NameNotFoundException e) { 570 e.printStackTrace(); 571 mSpeechQueue.remove(0); // Remove it from the queue and 572 // move on 573 mIsSpeaking = false; 574 return; 575 } 576 mPlayer = MediaPlayer.create(ctx, sr.mResId); 577 } else { 578 // Utterance is coming from a file 579 mPlayer = MediaPlayer.create(this, Uri.parse(sr.mFilename)); 580 } 581 582 // Check if Media Server is dead; if it is, clear the queue and 583 // give up for now - hopefully, it will recover itself. 584 if (mPlayer == null) { 585 mSpeechQueue.clear(); 586 mIsSpeaking = false; 587 return; 588 } 589 mPlayer.setOnCompletionListener(this); 590 try { 591 mPlayer.start(); 592 } catch (IllegalStateException e) { 593 mSpeechQueue.clear(); 594 mIsSpeaking = false; 595 cleanUpPlayer(); 596 return; 597 } 598 } 599 if (mSpeechQueue.size() > 0) { 600 mSpeechQueue.remove(0); 601 } 602 } finally { 603 // This check is needed because finally will always run; even if the 604 // method returns somewhere in the try block. 605 if (speechQueueAvailable) { 606 speechQueueLock.unlock(); 607 } 608 } 609 } 610 611 private void cleanUpPlayer() { 612 if (mPlayer != null) { 613 mPlayer.release(); 614 mPlayer = null; 615 } 616 } 617 618 /** 619 * Synthesizes the given text to a file using the specified parameters. 620 * 621 * @param text 622 * The String of text that should be synthesized 623 * @param params 624 * An ArrayList of parameters. The first element of this array 625 * controls the type of voice to use. 626 * @param filename 627 * The string that gives the full output filename; it should be 628 * something like "/sdcard/myappsounds/mysound.wav". 629 * @return A boolean that indicates if the synthesis succeeded 630 */ 631 private boolean synthesizeToFile(String text, ArrayList<String> params, 632 String filename, boolean calledFromApi) { 633 // Only stop everything if this is a call made by an outside app trying 634 // to 635 // use the API. Do NOT stop if this is a call from within the service as 636 // clearing the speech queue here would be a mistake. 637 if (calledFromApi) { 638 stop(); 639 } 640 Log.i("TTS", "Synthesizing to " + filename); 641 boolean synthAvailable = false; 642 try { 643 synthAvailable = synthesizerLock.tryLock(); 644 if (!synthAvailable) { 645 return false; 646 } 647 // Don't allow a filename that is too long 648 if (filename.length() > MAX_FILENAME_LENGTH) { 649 return false; 650 } 651 nativeSynth.synthesizeToFile(text, filename); 652 } finally { 653 // This check is needed because finally will always run; even if the 654 // method returns somewhere in the try block. 655 if (synthAvailable) { 656 synthesizerLock.unlock(); 657 } 658 } 659 Log.i("TTS", "Completed synthesis for " + filename); 660 return true; 661 } 662 663 /** 664 * Synthesizes the given IPA text to a file using the specified parameters. 665 * 666 * @param ipaText 667 * The String of IPA text that should be synthesized 668 * @param params 669 * An ArrayList of parameters. The first element of this array 670 * controls the type of voice to use. 671 * @param filename 672 * The string that gives the full output filename; it should be 673 * something like "/sdcard/myappsounds/mysound.wav". 674 * @return A boolean that indicates if the synthesis succeeded 675 */ 676 private boolean synthesizeIpaToFile(String ipaText, ArrayList<String> params, 677 String filename, boolean calledFromApi) { 678 // Only stop everything if this is a call made by an outside app trying 679 // to 680 // use the API. Do NOT stop if this is a call from within the service as 681 // clearing the speech queue here would be a mistake. 682 if (calledFromApi) { 683 stop(); 684 } 685 Log.i("TTS", "Synthesizing IPA to " + filename); 686 boolean synthAvailable = false; 687 try { 688 synthAvailable = synthesizerLock.tryLock(); 689 if (!synthAvailable) { 690 return false; 691 } 692 // Don't allow a filename that is too long 693 if (filename.length() > MAX_FILENAME_LENGTH) { 694 return false; 695 } 696 // TODO: Add nativeSynth.synthesizeIpaToFile(text, filename); 697 } finally { 698 // This check is needed because finally will always run; even if the 699 // method returns somewhere in the try block. 700 if (synthAvailable) { 701 synthesizerLock.unlock(); 702 } 703 } 704 Log.i("TTS", "Completed synthesis for " + filename); 705 return true; 706 } 707 708 @Override 709 public IBinder onBind(Intent intent) { 710 if (ACTION.equals(intent.getAction())) { 711 for (String category : intent.getCategories()) { 712 if (category.equals(CATEGORY)) { 713 return mBinder; 714 } 715 } 716 } 717 return null; 718 } 719 720 private final android.speech.tts.ITts.Stub mBinder = new Stub() { 721 722 public void registerCallback(ITtsCallback cb) { 723 if (cb != null) 724 mCallbacks.register(cb); 725 } 726 727 public void unregisterCallback(ITtsCallback cb) { 728 if (cb != null) 729 mCallbacks.unregister(cb); 730 } 731 732 /** 733 * Speaks the given text using the specified queueing mode and 734 * parameters. 735 * 736 * @param text 737 * The text that should be spoken 738 * @param queueMode 739 * 0 for no queue (interrupts all previous utterances), 1 for 740 * queued 741 * @param params 742 * An ArrayList of parameters. The first element of this 743 * array controls the type of voice to use. 744 */ 745 public void speak(String text, int queueMode, String[] params) { 746 ArrayList<String> speakingParams = new ArrayList<String>(); 747 if (params != null) { 748 speakingParams = new ArrayList<String>(Arrays.asList(params)); 749 } 750 mSelf.speak(text, queueMode, speakingParams); 751 } 752 753 /** 754 * Speaks the given IPA text using the specified queueing mode and 755 * parameters. 756 * 757 * @param ipaText 758 * The IPA text that should be spoken 759 * @param queueMode 760 * 0 for no queue (interrupts all previous utterances), 1 for 761 * queued 762 * @param params 763 * An ArrayList of parameters. The first element of this 764 * array controls the type of voice to use. 765 */ 766 public void speakIpa(String ipaText, int queueMode, String[] params) { 767 ArrayList<String> speakingParams = new ArrayList<String>(); 768 if (params != null) { 769 speakingParams = new ArrayList<String>(Arrays.asList(params)); 770 } 771 mSelf.speakIpa(ipaText, queueMode, speakingParams); 772 } 773 774 /** 775 * Plays the earcon using the specified queueing mode and parameters. 776 * 777 * @param earcon 778 * The earcon that should be played 779 * @param queueMode 780 * 0 for no queue (interrupts all previous utterances), 1 for 781 * queued 782 * @param params 783 * An ArrayList of parameters. 784 */ 785 public void playEarcon(String earcon, int queueMode, String[] params) { 786 ArrayList<String> speakingParams = new ArrayList<String>(); 787 if (params != null) { 788 speakingParams = new ArrayList<String>(Arrays.asList(params)); 789 } 790 mSelf.playEarcon(earcon, queueMode, speakingParams); 791 } 792 793 /** 794 * Plays the silence using the specified queueing mode and parameters. 795 * 796 * @param duration 797 * The duration of the silence that should be played 798 * @param queueMode 799 * 0 for no queue (interrupts all previous utterances), 1 for 800 * queued 801 * @param params 802 * An ArrayList of parameters. 803 */ 804 public void playSilence(long duration, int queueMode, String[] params) { 805 ArrayList<String> speakingParams = new ArrayList<String>(); 806 if (params != null) { 807 speakingParams = new ArrayList<String>(Arrays.asList(params)); 808 } 809 mSelf.playSilence(duration, queueMode, speakingParams); 810 } 811 812 /** 813 * Stops all speech output and removes any utterances still in the 814 * queue. 815 */ 816 public void stop() { 817 mSelf.stop(); 818 } 819 820 /** 821 * Returns whether or not the TTS is speaking. 822 * 823 * @return Boolean to indicate whether or not the TTS is speaking 824 */ 825 public boolean isSpeaking() { 826 return (mSelf.mIsSpeaking && (mSpeechQueue.size() < 1)); 827 } 828 829 /** 830 * Adds a sound resource to the TTS. 831 * 832 * @param text 833 * The text that should be associated with the sound resource 834 * @param packageName 835 * The name of the package which has the sound resource 836 * @param resId 837 * The resource ID of the sound within its package 838 */ 839 public void addSpeech(String text, String packageName, int resId) { 840 mSelf.addSpeech(text, packageName, resId); 841 } 842 843 /** 844 * Adds a sound resource to the TTS. 845 * 846 * @param text 847 * The text that should be associated with the sound resource 848 * @param filename 849 * The filename of the sound resource. This must be a 850 * complete path like: (/sdcard/mysounds/mysoundbite.mp3). 851 */ 852 public void addSpeechFile(String text, String filename) { 853 mSelf.addSpeech(text, filename); 854 } 855 856 /** 857 * Adds a sound resource to the TTS as an earcon. 858 * 859 * @param earcon 860 * The text that should be associated with the sound resource 861 * @param packageName 862 * The name of the package which has the sound resource 863 * @param resId 864 * The resource ID of the sound within its package 865 */ 866 public void addEarcon(String earcon, String packageName, int resId) { 867 mSelf.addEarcon(earcon, packageName, resId); 868 } 869 870 /** 871 * Adds a sound resource to the TTS as an earcon. 872 * 873 * @param earcon 874 * The text that should be associated with the sound resource 875 * @param filename 876 * The filename of the sound resource. This must be a 877 * complete path like: (/sdcard/mysounds/mysoundbite.mp3). 878 */ 879 public void addEarconFile(String earcon, String filename) { 880 mSelf.addEarcon(earcon, filename); 881 } 882 883 /** 884 * Sets the speech rate for the TTS. Note that this will only have an 885 * effect on synthesized speech; it will not affect pre-recorded speech. 886 * 887 * @param speechRate 888 * The speech rate that should be used 889 */ 890 public void setSpeechRate(int speechRate) { 891 mSelf.setSpeechRate(speechRate); 892 } 893 894 /** 895 * Sets the pitch for the TTS. Note that this will only have an 896 * effect on synthesized speech; it will not affect pre-recorded speech. 897 * 898 * @param pitch 899 * The pitch that should be used for the synthesized voice 900 */ 901 public void setPitch(int pitch) { 902 mSelf.setPitch(pitch); 903 } 904 905 /** 906 * Returns the level of support for the specified language. 907 * 908 * @param lang the three letter ISO language code. 909 * @param country the three letter ISO country code. 910 * @param variant the variant code associated with the country and language pair. 911 * @return one of TTS_LANG_NOT_SUPPORTED, TTS_LANG_MISSING_DATA, TTS_LANG_AVAILABLE, 912 * TTS_LANG_COUNTRY_AVAILABLE, TTS_LANG_COUNTRY_VAR_AVAILABLE as defined in 913 * android.speech.tts.TextToSpeech. 914 */ 915 public int isLanguageAvailable(String lang, String country, String variant) { 916 return mSelf.isLanguageAvailable(lang, country, variant); 917 } 918 919 /** 920 * Returns the currently set language / country / variant strings representing the 921 * language used by the TTS engine. 922 * @return null is no language is set, or an array of 3 string containing respectively 923 * the language, country and variant. 924 */ 925 public String[] getLanguage() { 926 return mSelf.getLanguage(); 927 } 928 929 /** 930 * Sets the speech rate for the TTS, which affects the synthesized voice. 931 * 932 * @param lang the three letter ISO language code. 933 * @param country the three letter ISO country code. 934 * @param variant the variant code associated with the country and language pair. 935 */ 936 public void setLanguage(String lang, String country, String variant) { 937 mSelf.setLanguage(lang, country, variant); 938 } 939 940 /** 941 * Synthesizes the given text to a file using the specified 942 * parameters. 943 * 944 * @param text 945 * The String of text that should be synthesized 946 * @param params 947 * An ArrayList of parameters. The first element of this 948 * array controls the type of voice to use. 949 * @param filename 950 * The string that gives the full output filename; it should 951 * be something like "/sdcard/myappsounds/mysound.wav". 952 * @return A boolean that indicates if the synthesis succeeded 953 */ 954 public boolean synthesizeToFile(String text, String[] params, 955 String filename) { 956 ArrayList<String> speakingParams = new ArrayList<String>(); 957 if (params != null) { 958 speakingParams = new ArrayList<String>(Arrays.asList(params)); 959 } 960 return mSelf.synthesizeToFile(text, speakingParams, filename, true); 961 } 962 963 /** 964 * Synthesizes the given IPA text to a file using the specified 965 * parameters. 966 * 967 * @param ipaText 968 * The String of IPA text that should be synthesized 969 * @param params 970 * An ArrayList of parameters. The first element of this 971 * array controls the type of voice to use. 972 * @param filename 973 * The string that gives the full output filename; it should 974 * be something like "/sdcard/myappsounds/mysound.wav". 975 * @return A boolean that indicates if the synthesis succeeded 976 */ 977 public boolean synthesizeIpaToFile(String ipaText, String[] params, 978 String filename) { 979 ArrayList<String> speakingParams = new ArrayList<String>(); 980 if (params != null) { 981 speakingParams = new ArrayList<String>(Arrays.asList(params)); 982 } 983 return mSelf.synthesizeIpaToFile(ipaText, speakingParams, filename, true); 984 } 985 }; 986 987} 988