spellcheck_worditerator.cc revision 868fa2fe829687343ffae624259930155e16dbd8
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// Implements a custom word iterator used for our spellchecker.
6
7#include "chrome/renderer/spellchecker/spellcheck_worditerator.h"
8
9#include <map>
10#include <string>
11
12#include "base/basictypes.h"
13#include "base/logging.h"
14#include "base/stringprintf.h"
15#include "base/strings/utf_string_conversions.h"
16#include "chrome/renderer/spellchecker/spellcheck.h"
17#include "third_party/icu/public/common/unicode/normlzr.h"
18#include "third_party/icu/public/common/unicode/schriter.h"
19#include "third_party/icu/public/common/unicode/uscript.h"
20#include "third_party/icu/public/i18n/unicode/ulocdata.h"
21
22// SpellcheckCharAttribute implementation:
23
24SpellcheckCharAttribute::SpellcheckCharAttribute()
25    : script_code_(USCRIPT_LATIN) {
26}
27
28SpellcheckCharAttribute::~SpellcheckCharAttribute() {
29}
30
31void SpellcheckCharAttribute::SetDefaultLanguage(const std::string& language) {
32  CreateRuleSets(language);
33}
34
35string16 SpellcheckCharAttribute::GetRuleSet(bool allow_contraction) const {
36  return allow_contraction ?
37      ruleset_allow_contraction_ : ruleset_disallow_contraction_;
38}
39
40void SpellcheckCharAttribute::CreateRuleSets(const std::string& language) {
41  // The template for our custom rule sets, which is based on the word-break
42  // rules of ICU 4.0:
43  // <http://source.icu-project.org/repos/icu/icu/tags/release-4-0/source/data/brkitr/word.txt>.
44  // The major differences from the original one are listed below:
45  // * It discards comments in the original rules.
46  // * It discards characters not needed by our spellchecker (e.g. numbers,
47  //   punctuation characters, Hiraganas, Katakanas, CJK Ideographs, and so on).
48  // * It allows customization of the $ALetter value (i.e. word characters).
49  // * It allows customization of the $ALetterPlus value (i.e. whether or not to
50  //   use the dictionary data).
51  // * It allows choosing whether or not to split a text at contraction
52  //   characters.
53  // This template only changes the forward-iteration rules. So, calling
54  // ubrk_prev() returns the same results as the original template.
55  static const char kRuleTemplate[] =
56      "!!chain;"
57      "$CR           = [\\p{Word_Break = CR}];"
58      "$LF           = [\\p{Word_Break = LF}];"
59      "$Newline      = [\\p{Word_Break = Newline}];"
60      "$Extend       = [\\p{Word_Break = Extend}];"
61      "$Format       = [\\p{Word_Break = Format}];"
62      "$Katakana     = [\\p{Word_Break = Katakana}];"
63      // Not all the characters in a given script are ALetter.
64      // For instance, U+05F4 is MidLetter. So, this may be
65      // better, but it leads to an empty set error in Thai.
66      // "$ALetter   = [[\\p{script=%s}] & [\\p{Word_Break = ALetter}]];"
67      "$ALetter      = [\\p{script=%s}%s];"
68      "$MidNumLet    = [\\p{Word_Break = MidNumLet}];"
69      "$MidLetter    = [\\p{Word_Break = MidLetter}%s];"
70      "$MidNum       = [\\p{Word_Break = MidNum}];"
71      "$Numeric      = [\\p{Word_Break = Numeric}];"
72      "$ExtendNumLet = [\\p{Word_Break = ExtendNumLet}];"
73
74      "$Control        = [\\p{Grapheme_Cluster_Break = Control}]; "
75      "%s"  // ALetterPlus
76
77      "$KatakanaEx     = $Katakana     ($Extend |  $Format)*;"
78      "$ALetterEx      = $ALetterPlus  ($Extend |  $Format)*;"
79      "$MidNumLetEx    = $MidNumLet    ($Extend |  $Format)*;"
80      "$MidLetterEx    = $MidLetter    ($Extend |  $Format)*;"
81      "$MidNumEx       = $MidNum       ($Extend |  $Format)*;"
82      "$NumericEx      = $Numeric      ($Extend |  $Format)*;"
83      "$ExtendNumLetEx = $ExtendNumLet ($Extend |  $Format)*;"
84
85      "$Hiragana       = [\\p{script=Hiragana}];"
86      "$Ideographic    = [\\p{Ideographic}];"
87      "$HiraganaEx     = $Hiragana     ($Extend |  $Format)*;"
88      "$IdeographicEx  = $Ideographic  ($Extend |  $Format)*;"
89
90      "!!forward;"
91      "$CR $LF;"
92      "[^$CR $LF $Newline]? ($Extend |  $Format)+;"
93      "$ALetterEx {200};"
94      "$ALetterEx $ALetterEx {200};"
95      "%s"  // (Allow|Disallow) Contraction
96
97      "!!reverse;"
98      "$BackALetterEx     = ($Format | $Extend)* $ALetterPlus;"
99      "$BackMidNumLetEx   = ($Format | $Extend)* $MidNumLet;"
100      "$BackNumericEx     = ($Format | $Extend)* $Numeric;"
101      "$BackMidNumEx      = ($Format | $Extend)* $MidNum;"
102      "$BackMidLetterEx   = ($Format | $Extend)* $MidLetter;"
103      "$BackKatakanaEx    = ($Format | $Extend)* $Katakana;"
104      "$BackExtendNumLetEx= ($Format | $Extend)* $ExtendNumLet;"
105      "$LF $CR;"
106      "($Format | $Extend)*  [^$CR $LF $Newline]?;"
107      "$BackALetterEx $BackALetterEx;"
108      "$BackALetterEx ($BackMidLetterEx | $BackMidNumLetEx) $BackALetterEx;"
109      "$BackNumericEx $BackNumericEx;"
110      "$BackNumericEx $BackALetterEx;"
111      "$BackALetterEx $BackNumericEx;"
112      "$BackNumericEx ($BackMidNumEx | $BackMidNumLetEx) $BackNumericEx;"
113      "$BackKatakanaEx $BackKatakanaEx;"
114      "$BackExtendNumLetEx ($BackALetterEx | $BackNumericEx |"
115      " $BackKatakanaEx | $BackExtendNumLetEx);"
116      "($BackALetterEx | $BackNumericEx | $BackKatakanaEx)"
117      " $BackExtendNumLetEx;"
118
119      "!!safe_reverse;"
120      "($Extend | $Format)+ .?;"
121      "($MidLetter | $MidNumLet) $BackALetterEx;"
122      "($MidNum | $MidNumLet) $BackNumericEx;"
123
124      "!!safe_forward;"
125      "($Extend | $Format)+ .?;"
126      "($MidLetterEx | $MidNumLetEx) $ALetterEx;"
127      "($MidNumEx | $MidNumLetEx) $NumericEx;";
128
129  // Retrieve the script codes used by the given language from ICU. When the
130  // given language consists of two or more scripts, we just use the first
131  // script. The size of returned script codes is always < 8. Therefore, we use
132  // an array of size 8 so we can include all script codes without insufficient
133  // buffer errors.
134  UErrorCode error = U_ZERO_ERROR;
135  UScriptCode script_code[8];
136  int scripts = uscript_getCode(language.c_str(), script_code,
137                                arraysize(script_code), &error);
138  if (U_SUCCESS(error) && scripts >= 1)
139    script_code_ = script_code[0];
140
141  // Retrieve the values for $ALetter and $ALetterPlus. We use the dictionary
142  // only for the languages which need it (i.e. Korean and Thai) to prevent ICU
143  // from returning dictionary words (i.e. Korean or Thai words) for languages
144  // which don't need them.
145  const char* aletter = uscript_getName(script_code_);
146  if (!aletter)
147    aletter = "Latin";
148
149  const char kWithDictionary[] =
150      "$dictionary   = [:LineBreak = Complex_Context:];"
151      "$ALetterPlus  = [$ALetter [$dictionary-$Extend-$Control]];";
152  const char kWithoutDictionary[] = "$ALetterPlus  = $ALetter;";
153  const char* aletter_plus = kWithoutDictionary;
154  if (script_code_ == USCRIPT_HANGUL || script_code_ == USCRIPT_THAI)
155    aletter_plus = kWithDictionary;
156
157  // Treat numbers as word characters except for Arabic and Hebrew.
158  const char* aletter_extra = " [0123456789]";
159  if (script_code_ == USCRIPT_HEBREW || script_code_ == USCRIPT_ARABIC)
160    aletter_extra = "";
161
162  const char kMidLetterExtra[] = "";
163  // For Hebrew, treat single/double quoation marks as MidLetter.
164  const char kMidLetterExtraHebrew[] = "\"'";
165  const char* midletter_extra = kMidLetterExtra;
166  if (script_code_ == USCRIPT_HEBREW)
167    midletter_extra = kMidLetterExtraHebrew;
168
169  // Create two custom rule-sets: one allows contraction and the other does not.
170  // We save these strings in UTF-16 so we can use it without conversions. (ICU
171  // needs UTF-16 strings.)
172  const char kAllowContraction[] =
173      "$ALetterEx ($MidLetterEx | $MidNumLetEx) $ALetterEx {200};";
174  const char kDisallowContraction[] = "";
175
176  ruleset_allow_contraction_ = ASCIIToUTF16(
177      base::StringPrintf(kRuleTemplate,
178                         aletter,
179                         aletter_extra,
180                         midletter_extra,
181                         aletter_plus,
182                         kAllowContraction));
183  ruleset_disallow_contraction_ = ASCIIToUTF16(
184      base::StringPrintf(kRuleTemplate,
185                         aletter,
186                         aletter_extra,
187                         midletter_extra,
188                         aletter_plus,
189                         kDisallowContraction));
190}
191
192bool SpellcheckCharAttribute::OutputChar(UChar c, string16* output) const {
193  // Call the language-specific function if necessary.
194  // Otherwise, we call the default one.
195  switch (script_code_) {
196    case USCRIPT_ARABIC:
197      return OutputArabic(c, output);
198
199    case USCRIPT_HANGUL:
200      return OutputHangul(c, output);
201
202    case USCRIPT_HEBREW:
203      return OutputHebrew(c, output);
204
205    default:
206      return OutputDefault(c, output);
207  }
208}
209
210bool SpellcheckCharAttribute::OutputArabic(UChar c, string16* output) const {
211  // Discard characters not from Arabic alphabets. We also discard vowel marks
212  // of Arabic (Damma, Fatha, Kasra, etc.) to prevent our Arabic dictionary from
213  // marking an Arabic word including vowel marks as misspelled. (We need to
214  // check these vowel marks manually and filter them out since their script
215  // codes are USCRIPT_ARABIC.)
216  if (0x0621 <= c && c <= 0x064D)
217    output->push_back(c);
218  return true;
219}
220
221bool SpellcheckCharAttribute::OutputHangul(UChar c, string16* output) const {
222  // Decompose a Hangul character to a Hangul vowel and consonants used by our
223  // spellchecker. A Hangul character of Unicode is a ligature consisting of a
224  // Hangul vowel and consonants, e.g. U+AC01 "Gag" consists of U+1100 "G",
225  // U+1161 "a", and U+11A8 "g". That is, we can treat each Hangul character as
226  // a point of a cubic linear space consisting of (first consonant, vowel, last
227  // consonant). Therefore, we can compose a Hangul character from a vowel and
228  // two consonants with linear composition:
229  //   character =  0xAC00 +
230  //                (first consonant - 0x1100) * 28 * 21 +
231  //                (vowel           - 0x1161) * 28 +
232  //                (last consonant  - 0x11A7);
233  // We can also decompose a Hangul character with linear decomposition:
234  //   first consonant = (character - 0xAC00) / 28 / 21;
235  //   vowel           = (character - 0xAC00) / 28 % 21;
236  //   last consonant  = (character - 0xAC00) % 28;
237  // This code is copied from Unicode Standard Annex #15
238  // <http://unicode.org/reports/tr15> and added some comments.
239  const int kSBase = 0xAC00;  // U+AC00: the top of Hangul characters.
240  const int kLBase = 0x1100;  // U+1100: the top of Hangul first consonants.
241  const int kVBase = 0x1161;  // U+1161: the top of Hangul vowels.
242  const int kTBase = 0x11A7;  // U+11A7: the top of Hangul last consonants.
243  const int kLCount = 19;     // The number of Hangul first consonants.
244  const int kVCount = 21;     // The number of Hangul vowels.
245  const int kTCount = 28;     // The number of Hangul last consonants.
246  const int kNCount = kVCount * kTCount;
247  const int kSCount = kLCount * kNCount;
248
249  int index = c - kSBase;
250  if (index < 0 || index >= kSBase + kSCount) {
251    // This is not a Hangul syllable. Call the default output function since we
252    // should output this character when it is a Hangul syllable.
253    return OutputDefault(c, output);
254  }
255
256  // This is a Hangul character. Decompose this characters into Hangul vowels
257  // and consonants.
258  int l = kLBase + index / kNCount;
259  int v = kVBase + (index % kNCount) / kTCount;
260  int t = kTBase + index % kTCount;
261  output->push_back(l);
262  output->push_back(v);
263  if (t != kTBase)
264    output->push_back(t);
265  return true;
266}
267
268bool SpellcheckCharAttribute::OutputHebrew(UChar c, string16* output) const {
269  // Discard characters except Hebrew alphabets. We also discard Hebrew niqquds
270  // to prevent our Hebrew dictionary from marking a Hebrew word including
271  // niqquds as misspelled. (Same as Arabic vowel marks, we need to check
272  // niqquds manually and filter them out since their script codes are
273  // USCRIPT_HEBREW.)
274  // Pass through ASCII single/double quotation marks and Hebrew Geresh and
275  // Gershayim.
276  if ((0x05D0 <= c && c <= 0x05EA) || c == 0x22 || c == 0x27 ||
277      c == 0x05F4 || c == 0x05F3)
278    output->push_back(c);
279  return true;
280}
281
282bool SpellcheckCharAttribute::OutputDefault(UChar c, string16* output) const {
283  // Check the script code of this character and output only if it is the one
284  // used by the spellchecker language.
285  UErrorCode status = U_ZERO_ERROR;
286  UScriptCode script_code = uscript_getScript(c, &status);
287  if (script_code == script_code_ || script_code == USCRIPT_COMMON)
288    output->push_back(c);
289  return true;
290}
291
292// SpellcheckWordIterator implementation:
293
294SpellcheckWordIterator::SpellcheckWordIterator()
295    : text_(NULL),
296      length_(0),
297      position_(UBRK_DONE),
298      attribute_(NULL),
299      iterator_(NULL) {
300}
301
302SpellcheckWordIterator::~SpellcheckWordIterator() {
303  Reset();
304}
305
306bool SpellcheckWordIterator::Initialize(
307    const SpellcheckCharAttribute* attribute,
308    bool allow_contraction) {
309  // Create a custom ICU break iterator with empty text used in this object. (We
310  // allow setting text later so we can re-use this iterator.)
311  DCHECK(attribute);
312  UErrorCode open_status = U_ZERO_ERROR;
313  UParseError parse_status;
314  string16 rule(attribute->GetRuleSet(allow_contraction));
315
316  // If there is no rule set, the attributes were invalid.
317  if (rule.empty())
318    return false;
319
320  iterator_ = ubrk_openRules(rule.c_str(), rule.length(), NULL, 0,
321                             &parse_status, &open_status);
322  if (U_FAILURE(open_status))
323    return false;
324
325  // Set the character attributes so we can normalize the words extracted by
326  // this iterator.
327  attribute_ = attribute;
328  return true;
329}
330
331bool SpellcheckWordIterator::IsInitialized() const {
332  // Return true if we have an ICU custom iterator.
333  return !!iterator_;
334}
335
336bool SpellcheckWordIterator::SetText(const char16* text, size_t length) {
337  DCHECK(!!iterator_);
338
339  // Set the text to be split by this iterator.
340  UErrorCode status = U_ZERO_ERROR;
341  ubrk_setText(iterator_, text, length, &status);
342  if (U_FAILURE(status))
343    return false;
344
345  // Retrieve the position to the first word in this text. We return false if
346  // this text does not have any words. (For example, The input text consists
347  // only of Chinese characters while the spellchecker language is English.)
348  position_ = ubrk_first(iterator_);
349  if (position_ == UBRK_DONE)
350    return false;
351
352  text_ = text;
353  length_ = static_cast<int>(length);
354  return true;
355}
356
357bool SpellcheckWordIterator::GetNextWord(string16* word_string,
358                                         int* word_start,
359                                         int* word_length) {
360  DCHECK(!!text_ && length_ > 0);
361
362  word_string->clear();
363  *word_start = 0;
364  *word_length = 0;
365
366  if (!text_ || position_ == UBRK_DONE)
367    return false;
368
369  // Find a word that can be checked for spelling. Our rule sets filter out
370  // invalid words (e.g. numbers and characters not supported by the
371  // spellchecker language) so this ubrk_getRuleStatus() call returns
372  // UBRK_WORD_NONE when this iterator finds an invalid word. So, we skip such
373  // words until we can find a valid word or reach the end of the input string.
374  int next = ubrk_next(iterator_);
375  while (next != UBRK_DONE) {
376    if (ubrk_getRuleStatus(iterator_) != UBRK_WORD_NONE) {
377      if (Normalize(position_, next - position_, word_string)) {
378        *word_start = position_;
379        *word_length = next - position_;
380        position_ = next;
381        return true;
382      }
383    }
384    position_ = next;
385    next = ubrk_next(iterator_);
386  }
387
388  // There aren't any more words in the given text. Set the position to
389  // UBRK_DONE to prevent from calling ubrk_next() next time when this function
390  // is called.
391  position_ = UBRK_DONE;
392  return false;
393}
394
395void SpellcheckWordIterator::Reset() {
396  if (iterator_) {
397    ubrk_close(iterator_);
398    iterator_ = NULL;
399  }
400}
401
402bool SpellcheckWordIterator::Normalize(int input_start,
403                                       int input_length,
404                                       string16* output_string) const {
405  // We use NFKC (Normalization Form, Compatible decomposition, followed by
406  // canonical Composition) defined in Unicode Standard Annex #15 to normalize
407  // this token because it it the most suitable normalization algorithm for our
408  // spellchecker. Nevertheless, it is not a perfect algorithm for our
409  // spellchecker and we need manual normalization as well. The normalized
410  // text does not have to be NUL-terminated since its characters are copied to
411  // string16, which adds a NUL character when we need.
412  icu::UnicodeString input(FALSE, &text_[input_start], input_length);
413  UErrorCode status = U_ZERO_ERROR;
414  icu::UnicodeString output;
415  icu::Normalizer::normalize(input, UNORM_NFKC, 0, output, status);
416  if (status != U_ZERO_ERROR && status != U_STRING_NOT_TERMINATED_WARNING)
417    return false;
418
419  // Copy the normalized text to the output.
420  icu::StringCharacterIterator it(output);
421  for (UChar c = it.first(); c != icu::CharacterIterator::DONE; c = it.next())
422    attribute_->OutputChar(c, output_string);
423
424  return !output_string->empty();
425}
426