Lexer.cpp revision 24f0e48c0aa62f2268e061aad70f9b19a59e7b52
1//===--- Lexer.cpp - C Language Family Lexer ------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//  This file implements the Lexer and Token interfaces.
11//
12//===----------------------------------------------------------------------===//
13//
14// TODO: GCC Diagnostics emitted by the lexer:
15// PEDWARN: (form feed|vertical tab) in preprocessing directive
16//
17// Universal characters, unicode, char mapping:
18// WARNING: `%.*s' is not in NFKC
19// WARNING: `%.*s' is not in NFC
20//
21// Other:
22// TODO: Options to support:
23//    -fexec-charset,-fwide-exec-charset
24//
25//===----------------------------------------------------------------------===//
26
27#include "clang/Lex/Lexer.h"
28#include "clang/Lex/Preprocessor.h"
29#include "clang/Lex/LexDiagnostic.h"
30#include "clang/Basic/SourceManager.h"
31#include "llvm/Support/Compiler.h"
32#include "llvm/Support/MemoryBuffer.h"
33#include <cctype>
34using namespace clang;
35
36static void InitCharacterInfo();
37
38//===----------------------------------------------------------------------===//
39// Token Class Implementation
40//===----------------------------------------------------------------------===//
41
42/// isObjCAtKeyword - Return true if we have an ObjC keyword identifier.
43bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const {
44  if (IdentifierInfo *II = getIdentifierInfo())
45    return II->getObjCKeywordID() == objcKey;
46  return false;
47}
48
49/// getObjCKeywordID - Return the ObjC keyword kind.
50tok::ObjCKeywordKind Token::getObjCKeywordID() const {
51  IdentifierInfo *specId = getIdentifierInfo();
52  return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword;
53}
54
55
56//===----------------------------------------------------------------------===//
57// Lexer Class Implementation
58//===----------------------------------------------------------------------===//
59
60void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
61                      const char *BufEnd) {
62  InitCharacterInfo();
63
64  BufferStart = BufStart;
65  BufferPtr = BufPtr;
66  BufferEnd = BufEnd;
67
68  assert(BufEnd[0] == 0 &&
69         "We assume that the input buffer has a null character at the end"
70         " to simplify lexing!");
71
72  Is_PragmaLexer = false;
73
74  // Start of the file is a start of line.
75  IsAtStartOfLine = true;
76
77  // We are not after parsing a #.
78  ParsingPreprocessorDirective = false;
79
80  // We are not after parsing #include.
81  ParsingFilename = false;
82
83  // We are not in raw mode.  Raw mode disables diagnostics and interpretation
84  // of tokens (e.g. identifiers, thus disabling macro expansion).  It is used
85  // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block
86  // or otherwise skipping over tokens.
87  LexingRawMode = false;
88
89  // Default to not keeping comments.
90  ExtendedTokenMode = 0;
91}
92
93/// Lexer constructor - Create a new lexer object for the specified buffer
94/// with the specified preprocessor managing the lexing process.  This lexer
95/// assumes that the associated file buffer and Preprocessor objects will
96/// outlive it, so it doesn't take ownership of either of them.
97Lexer::Lexer(FileID FID, Preprocessor &PP)
98  : PreprocessorLexer(&PP, FID),
99    FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
100    Features(PP.getLangOptions()) {
101
102  const llvm::MemoryBuffer *InputFile = PP.getSourceManager().getBuffer(FID);
103
104  InitLexer(InputFile->getBufferStart(), InputFile->getBufferStart(),
105            InputFile->getBufferEnd());
106
107  // Default to keeping comments if the preprocessor wants them.
108  SetCommentRetentionState(PP.getCommentRetentionState());
109}
110
111/// Lexer constructor - Create a new raw lexer object.  This object is only
112/// suitable for calls to 'LexRawToken'.  This lexer assumes that the text
113/// range will outlive it, so it doesn't take ownership of it.
114Lexer::Lexer(SourceLocation fileloc, const LangOptions &features,
115             const char *BufStart, const char *BufPtr, const char *BufEnd)
116  : FileLoc(fileloc), Features(features) {
117
118  InitLexer(BufStart, BufPtr, BufEnd);
119
120  // We *are* in raw mode.
121  LexingRawMode = true;
122}
123
124/// Lexer constructor - Create a new raw lexer object.  This object is only
125/// suitable for calls to 'LexRawToken'.  This lexer assumes that the text
126/// range will outlive it, so it doesn't take ownership of it.
127Lexer::Lexer(FileID FID, const SourceManager &SM, const LangOptions &features)
128  : FileLoc(SM.getLocForStartOfFile(FID)), Features(features) {
129  const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
130
131  InitLexer(FromFile->getBufferStart(), FromFile->getBufferStart(),
132            FromFile->getBufferEnd());
133
134  // We *are* in raw mode.
135  LexingRawMode = true;
136}
137
138/// Create_PragmaLexer: Lexer constructor - Create a new lexer object for
139/// _Pragma expansion.  This has a variety of magic semantics that this method
140/// sets up.  It returns a new'd Lexer that must be delete'd when done.
141///
142/// On entrance to this routine, TokStartLoc is a macro location which has a
143/// spelling loc that indicates the bytes to be lexed for the token and an
144/// instantiation location that indicates where all lexed tokens should be
145/// "expanded from".
146///
147/// FIXME: It would really be nice to make _Pragma just be a wrapper around a
148/// normal lexer that remaps tokens as they fly by.  This would require making
149/// Preprocessor::Lex virtual.  Given that, we could just dump in a magic lexer
150/// interface that could handle this stuff.  This would pull GetMappedTokenLoc
151/// out of the critical path of the lexer!
152///
153Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
154                                 SourceLocation InstantiationLocStart,
155                                 SourceLocation InstantiationLocEnd,
156                                 unsigned TokLen, Preprocessor &PP) {
157  SourceManager &SM = PP.getSourceManager();
158
159  // Create the lexer as if we were going to lex the file normally.
160  FileID SpellingFID = SM.getFileID(SpellingLoc);
161  Lexer *L = new Lexer(SpellingFID, PP);
162
163  // Now that the lexer is created, change the start/end locations so that we
164  // just lex the subsection of the file that we want.  This is lexing from a
165  // scratch buffer.
166  const char *StrData = SM.getCharacterData(SpellingLoc);
167
168  L->BufferPtr = StrData;
169  L->BufferEnd = StrData+TokLen;
170  assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!");
171
172  // Set the SourceLocation with the remapping information.  This ensures that
173  // GetMappedTokenLoc will remap the tokens as they are lexed.
174  L->FileLoc = SM.createInstantiationLoc(SM.getLocForStartOfFile(SpellingFID),
175                                         InstantiationLocStart,
176                                         InstantiationLocEnd, TokLen);
177
178  // Ensure that the lexer thinks it is inside a directive, so that end \n will
179  // return an EOM token.
180  L->ParsingPreprocessorDirective = true;
181
182  // This lexer really is for _Pragma.
183  L->Is_PragmaLexer = true;
184  return L;
185}
186
187
188/// Stringify - Convert the specified string into a C string, with surrounding
189/// ""'s, and with escaped \ and " characters.
190std::string Lexer::Stringify(const std::string &Str, bool Charify) {
191  std::string Result = Str;
192  char Quote = Charify ? '\'' : '"';
193  for (unsigned i = 0, e = Result.size(); i != e; ++i) {
194    if (Result[i] == '\\' || Result[i] == Quote) {
195      Result.insert(Result.begin()+i, '\\');
196      ++i; ++e;
197    }
198  }
199  return Result;
200}
201
202/// Stringify - Convert the specified string into a C string by escaping '\'
203/// and " characters.  This does not add surrounding ""'s to the string.
204void Lexer::Stringify(llvm::SmallVectorImpl<char> &Str) {
205  for (unsigned i = 0, e = Str.size(); i != e; ++i) {
206    if (Str[i] == '\\' || Str[i] == '"') {
207      Str.insert(Str.begin()+i, '\\');
208      ++i; ++e;
209    }
210  }
211}
212
213
214/// MeasureTokenLength - Relex the token at the specified location and return
215/// its length in bytes in the input file.  If the token needs cleaning (e.g.
216/// includes a trigraph or an escaped newline) then this count includes bytes
217/// that are part of that.
218unsigned Lexer::MeasureTokenLength(SourceLocation Loc,
219                                   const SourceManager &SM,
220                                   const LangOptions &LangOpts) {
221  // TODO: this could be special cased for common tokens like identifiers, ')',
222  // etc to make this faster, if it mattered.  Just look at StrData[0] to handle
223  // all obviously single-char tokens.  This could use
224  // Lexer::isObviouslySimpleCharacter for example to handle identifiers or
225  // something.
226
227  // If this comes from a macro expansion, we really do want the macro name, not
228  // the token this macro expanded to.
229  Loc = SM.getInstantiationLoc(Loc);
230  std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
231  std::pair<const char *,const char *> Buffer = SM.getBufferData(LocInfo.first);
232  const char *StrData = Buffer.first+LocInfo.second;
233
234  // Create a lexer starting at the beginning of this token.
235  Lexer TheLexer(Loc, LangOpts, Buffer.first, StrData, Buffer.second);
236  Token TheTok;
237  TheLexer.LexFromRawLexer(TheTok);
238  return TheTok.getLength();
239}
240
241//===----------------------------------------------------------------------===//
242// Character information.
243//===----------------------------------------------------------------------===//
244
245static unsigned char CharInfo[256];
246
247enum {
248  CHAR_HORZ_WS  = 0x01,  // ' ', '\t', '\f', '\v'.  Note, no '\0'
249  CHAR_VERT_WS  = 0x02,  // '\r', '\n'
250  CHAR_LETTER   = 0x04,  // a-z,A-Z
251  CHAR_NUMBER   = 0x08,  // 0-9
252  CHAR_UNDER    = 0x10,  // _
253  CHAR_PERIOD   = 0x20   // .
254};
255
256static void InitCharacterInfo() {
257  static bool isInited = false;
258  if (isInited) return;
259  isInited = true;
260
261  // Intiialize the CharInfo table.
262  // TODO: statically initialize this.
263  CharInfo[(int)' '] = CharInfo[(int)'\t'] =
264  CharInfo[(int)'\f'] = CharInfo[(int)'\v'] = CHAR_HORZ_WS;
265  CharInfo[(int)'\n'] = CharInfo[(int)'\r'] = CHAR_VERT_WS;
266
267  CharInfo[(int)'_'] = CHAR_UNDER;
268  CharInfo[(int)'.'] = CHAR_PERIOD;
269  for (unsigned i = 'a'; i <= 'z'; ++i)
270    CharInfo[i] = CharInfo[i+'A'-'a'] = CHAR_LETTER;
271  for (unsigned i = '0'; i <= '9'; ++i)
272    CharInfo[i] = CHAR_NUMBER;
273}
274
275/// isIdentifierBody - Return true if this is the body character of an
276/// identifier, which is [a-zA-Z0-9_].
277static inline bool isIdentifierBody(unsigned char c) {
278  return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER)) ? true : false;
279}
280
281/// isHorizontalWhitespace - Return true if this character is horizontal
282/// whitespace: ' ', '\t', '\f', '\v'.  Note that this returns false for '\0'.
283static inline bool isHorizontalWhitespace(unsigned char c) {
284  return (CharInfo[c] & CHAR_HORZ_WS) ? true : false;
285}
286
287/// isWhitespace - Return true if this character is horizontal or vertical
288/// whitespace: ' ', '\t', '\f', '\v', '\n', '\r'.  Note that this returns false
289/// for '\0'.
290static inline bool isWhitespace(unsigned char c) {
291  return (CharInfo[c] & (CHAR_HORZ_WS|CHAR_VERT_WS)) ? true : false;
292}
293
294/// isNumberBody - Return true if this is the body character of an
295/// preprocessing number, which is [a-zA-Z0-9_.].
296static inline bool isNumberBody(unsigned char c) {
297  return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD)) ?
298    true : false;
299}
300
301
302//===----------------------------------------------------------------------===//
303// Diagnostics forwarding code.
304//===----------------------------------------------------------------------===//
305
306/// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the
307/// lexer buffer was all instantiated at a single point, perform the mapping.
308/// This is currently only used for _Pragma implementation, so it is the slow
309/// path of the hot getSourceLocation method.  Do not allow it to be inlined.
310static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
311                                        SourceLocation FileLoc,
312                                        unsigned CharNo,
313                                        unsigned TokLen) DISABLE_INLINE;
314static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
315                                        SourceLocation FileLoc,
316                                        unsigned CharNo, unsigned TokLen) {
317  assert(FileLoc.isMacroID() && "Must be an instantiation");
318
319  // Otherwise, we're lexing "mapped tokens".  This is used for things like
320  // _Pragma handling.  Combine the instantiation location of FileLoc with the
321  // spelling location.
322  SourceManager &SM = PP.getSourceManager();
323
324  // Create a new SLoc which is expanded from Instantiation(FileLoc) but whose
325  // characters come from spelling(FileLoc)+Offset.
326  SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc);
327  SpellingLoc = SpellingLoc.getFileLocWithOffset(CharNo);
328
329  // Figure out the expansion loc range, which is the range covered by the
330  // original _Pragma(...) sequence.
331  std::pair<SourceLocation,SourceLocation> II =
332    SM.getImmediateInstantiationRange(FileLoc);
333
334  return SM.createInstantiationLoc(SpellingLoc, II.first, II.second, TokLen);
335}
336
337/// getSourceLocation - Return a source location identifier for the specified
338/// offset in the current file.
339SourceLocation Lexer::getSourceLocation(const char *Loc,
340                                        unsigned TokLen) const {
341  assert(Loc >= BufferStart && Loc <= BufferEnd &&
342         "Location out of range for this buffer!");
343
344  // In the normal case, we're just lexing from a simple file buffer, return
345  // the file id from FileLoc with the offset specified.
346  unsigned CharNo = Loc-BufferStart;
347  if (FileLoc.isFileID())
348    return FileLoc.getFileLocWithOffset(CharNo);
349
350  // Otherwise, this is the _Pragma lexer case, which pretends that all of the
351  // tokens are lexed from where the _Pragma was defined.
352  assert(PP && "This doesn't work on raw lexers");
353  return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen);
354}
355
356/// Diag - Forwarding function for diagnostics.  This translate a source
357/// position in the current buffer into a SourceLocation object for rendering.
358DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const {
359  return PP->Diag(getSourceLocation(Loc), DiagID);
360}
361
362//===----------------------------------------------------------------------===//
363// Trigraph and Escaped Newline Handling Code.
364//===----------------------------------------------------------------------===//
365
366/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair,
367/// return the decoded trigraph letter it corresponds to, or '\0' if nothing.
368static char GetTrigraphCharForLetter(char Letter) {
369  switch (Letter) {
370  default:   return 0;
371  case '=':  return '#';
372  case ')':  return ']';
373  case '(':  return '[';
374  case '!':  return '|';
375  case '\'': return '^';
376  case '>':  return '}';
377  case '/':  return '\\';
378  case '<':  return '{';
379  case '-':  return '~';
380  }
381}
382
383/// DecodeTrigraphChar - If the specified character is a legal trigraph when
384/// prefixed with ??, emit a trigraph warning.  If trigraphs are enabled,
385/// return the result character.  Finally, emit a warning about trigraph use
386/// whether trigraphs are enabled or not.
387static char DecodeTrigraphChar(const char *CP, Lexer *L) {
388  char Res = GetTrigraphCharForLetter(*CP);
389  if (!Res || !L) return Res;
390
391  if (!L->getFeatures().Trigraphs) {
392    if (!L->isLexingRawMode())
393      L->Diag(CP-2, diag::trigraph_ignored);
394    return 0;
395  }
396
397  if (!L->isLexingRawMode())
398    L->Diag(CP-2, diag::trigraph_converted) << std::string()+Res;
399  return Res;
400}
401
402/// getEscapedNewLineSize - Return the size of the specified escaped newline,
403/// or 0 if it is not an escaped newline. P[-1] is known to be a "\" or a
404/// trigraph equivalent on entry to this function.
405unsigned Lexer::getEscapedNewLineSize(const char *Ptr) {
406  unsigned Size = 0;
407  while (isWhitespace(Ptr[Size])) {
408    ++Size;
409
410    if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r')
411      continue;
412
413    // If this is a \r\n or \n\r, skip the other half.
414    if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') &&
415        Ptr[Size-1] != Ptr[Size])
416      ++Size;
417
418    return Size;
419  }
420
421  // Not an escaped newline, must be a \t or something else.
422  return 0;
423}
424
425
426/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer,
427/// get its size, and return it.  This is tricky in several cases:
428///   1. If currently at the start of a trigraph, we warn about the trigraph,
429///      then either return the trigraph (skipping 3 chars) or the '?',
430///      depending on whether trigraphs are enabled or not.
431///   2. If this is an escaped newline (potentially with whitespace between
432///      the backslash and newline), implicitly skip the newline and return
433///      the char after it.
434///   3. If this is a UCN, return it.  FIXME: C++ UCN's?
435///
436/// This handles the slow/uncommon case of the getCharAndSize method.  Here we
437/// know that we can accumulate into Size, and that we have already incremented
438/// Ptr by Size bytes.
439///
440/// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should
441/// be updated to match.
442///
443char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
444                               Token *Tok) {
445  // If we have a slash, look for an escaped newline.
446  if (Ptr[0] == '\\') {
447    ++Size;
448    ++Ptr;
449Slash:
450    // Common case, backslash-char where the char is not whitespace.
451    if (!isWhitespace(Ptr[0])) return '\\';
452
453    // See if we have optional whitespace characters followed by a newline.
454    if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
455      // Remember that this token needs to be cleaned.
456      if (Tok) Tok->setFlag(Token::NeedsCleaning);
457
458      // Warn if there was whitespace between the backslash and newline.
459      if (EscapedNewLineSize != 1 && Tok && !isLexingRawMode())
460        Diag(Ptr, diag::backslash_newline_space);
461
462      // Found backslash<whitespace><newline>.  Parse the char after it.
463      Size += EscapedNewLineSize;
464      Ptr  += EscapedNewLineSize;
465      // Use slow version to accumulate a correct size field.
466      return getCharAndSizeSlow(Ptr, Size, Tok);
467    }
468
469    // Otherwise, this is not an escaped newline, just return the slash.
470    return '\\';
471  }
472
473  // If this is a trigraph, process it.
474  if (Ptr[0] == '?' && Ptr[1] == '?') {
475    // If this is actually a legal trigraph (not something like "??x"), emit
476    // a trigraph warning.  If so, and if trigraphs are enabled, return it.
477    if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) {
478      // Remember that this token needs to be cleaned.
479      if (Tok) Tok->setFlag(Token::NeedsCleaning);
480
481      Ptr += 3;
482      Size += 3;
483      if (C == '\\') goto Slash;
484      return C;
485    }
486  }
487
488  // If this is neither, return a single character.
489  ++Size;
490  return *Ptr;
491}
492
493
494/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the
495/// getCharAndSizeNoWarn method.  Here we know that we can accumulate into Size,
496/// and that we have already incremented Ptr by Size bytes.
497///
498/// NOTE: When this method is updated, getCharAndSizeSlow (above) should
499/// be updated to match.
500char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
501                                     const LangOptions &Features) {
502  // If we have a slash, look for an escaped newline.
503  if (Ptr[0] == '\\') {
504    ++Size;
505    ++Ptr;
506Slash:
507    // Common case, backslash-char where the char is not whitespace.
508    if (!isWhitespace(Ptr[0])) return '\\';
509
510    // See if we have optional whitespace characters followed by a newline.
511    if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
512      // Found backslash<whitespace><newline>.  Parse the char after it.
513      Size += EscapedNewLineSize;
514      Ptr  += EscapedNewLineSize;
515
516      // Use slow version to accumulate a correct size field.
517      return getCharAndSizeSlowNoWarn(Ptr, Size, Features);
518    }
519
520    // Otherwise, this is not an escaped newline, just return the slash.
521    return '\\';
522  }
523
524  // If this is a trigraph, process it.
525  if (Features.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
526    // If this is actually a legal trigraph (not something like "??x"), return
527    // it.
528    if (char C = GetTrigraphCharForLetter(Ptr[2])) {
529      Ptr += 3;
530      Size += 3;
531      if (C == '\\') goto Slash;
532      return C;
533    }
534  }
535
536  // If this is neither, return a single character.
537  ++Size;
538  return *Ptr;
539}
540
541//===----------------------------------------------------------------------===//
542// Helper methods for lexing.
543//===----------------------------------------------------------------------===//
544
545void Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
546  // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
547  unsigned Size;
548  unsigned char C = *CurPtr++;
549  while (isIdentifierBody(C)) {
550    C = *CurPtr++;
551  }
552  --CurPtr;   // Back up over the skipped character.
553
554  // Fast path, no $,\,? in identifier found.  '\' might be an escaped newline
555  // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN.
556  // FIXME: UCNs.
557  if (C != '\\' && C != '?' && (C != '$' || !Features.DollarIdents)) {
558FinishIdentifier:
559    const char *IdStart = BufferPtr;
560    FormTokenWithChars(Result, CurPtr, tok::identifier);
561
562    // If we are in raw mode, return this identifier raw.  There is no need to
563    // look up identifier information or attempt to macro expand it.
564    if (LexingRawMode) return;
565
566    // Fill in Result.IdentifierInfo, looking up the identifier in the
567    // identifier table.
568    IdentifierInfo *II = PP->LookUpIdentifierInfo(Result, IdStart);
569
570    // Change the kind of this identifier to the appropriate token kind, e.g.
571    // turning "for" into a keyword.
572    Result.setKind(II->getTokenID());
573
574    // Finally, now that we know we have an identifier, pass this off to the
575    // preprocessor, which may macro expand it or something.
576    if (II->isHandleIdentifierCase())
577      PP->HandleIdentifier(Result);
578    return;
579  }
580
581  // Otherwise, $,\,? in identifier found.  Enter slower path.
582
583  C = getCharAndSize(CurPtr, Size);
584  while (1) {
585    if (C == '$') {
586      // If we hit a $ and they are not supported in identifiers, we are done.
587      if (!Features.DollarIdents) goto FinishIdentifier;
588
589      // Otherwise, emit a diagnostic and continue.
590      if (!isLexingRawMode())
591        Diag(CurPtr, diag::ext_dollar_in_identifier);
592      CurPtr = ConsumeChar(CurPtr, Size, Result);
593      C = getCharAndSize(CurPtr, Size);
594      continue;
595    } else if (!isIdentifierBody(C)) { // FIXME: UCNs.
596      // Found end of identifier.
597      goto FinishIdentifier;
598    }
599
600    // Otherwise, this character is good, consume it.
601    CurPtr = ConsumeChar(CurPtr, Size, Result);
602
603    C = getCharAndSize(CurPtr, Size);
604    while (isIdentifierBody(C)) { // FIXME: UCNs.
605      CurPtr = ConsumeChar(CurPtr, Size, Result);
606      C = getCharAndSize(CurPtr, Size);
607    }
608  }
609}
610
611
612/// LexNumericConstant - Lex the remainder of a integer or floating point
613/// constant. From[-1] is the first character lexed.  Return the end of the
614/// constant.
615void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
616  unsigned Size;
617  char C = getCharAndSize(CurPtr, Size);
618  char PrevCh = 0;
619  while (isNumberBody(C)) { // FIXME: UCNs?
620    CurPtr = ConsumeChar(CurPtr, Size, Result);
621    PrevCh = C;
622    C = getCharAndSize(CurPtr, Size);
623  }
624
625  // If we fell out, check for a sign, due to 1e+12.  If we have one, continue.
626  if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e'))
627    return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
628
629  // If we have a hex FP constant, continue.
630  if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p') &&
631      (Features.HexFloats || !Features.NoExtensions))
632    return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
633
634  // Update the location of token as well as BufferPtr.
635  const char *TokStart = BufferPtr;
636  FormTokenWithChars(Result, CurPtr, tok::numeric_constant);
637  Result.setLiteralData(TokStart);
638}
639
640/// LexStringLiteral - Lex the remainder of a string literal, after having lexed
641/// either " or L".
642void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide) {
643  const char *NulCharacter = 0; // Does this string contain the \0 character?
644
645  char C = getAndAdvanceChar(CurPtr, Result);
646  while (C != '"') {
647    // Skip escaped characters.
648    if (C == '\\') {
649      // Skip the escaped character.
650      C = getAndAdvanceChar(CurPtr, Result);
651    } else if (C == '\n' || C == '\r' ||             // Newline.
652               (C == 0 && CurPtr-1 == BufferEnd)) {  // End of file.
653      if (!isLexingRawMode() && !Features.AsmPreprocessor)
654        Diag(BufferPtr, diag::err_unterminated_string);
655      FormTokenWithChars(Result, CurPtr-1, tok::unknown);
656      return;
657    } else if (C == 0) {
658      NulCharacter = CurPtr-1;
659    }
660    C = getAndAdvanceChar(CurPtr, Result);
661  }
662
663  // If a nul character existed in the string, warn about it.
664  if (NulCharacter && !isLexingRawMode())
665    Diag(NulCharacter, diag::null_in_string);
666
667  // Update the location of the token as well as the BufferPtr instance var.
668  const char *TokStart = BufferPtr;
669  FormTokenWithChars(Result, CurPtr,
670                     Wide ? tok::wide_string_literal : tok::string_literal);
671  Result.setLiteralData(TokStart);
672}
673
674/// LexAngledStringLiteral - Lex the remainder of an angled string literal,
675/// after having lexed the '<' character.  This is used for #include filenames.
676void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
677  const char *NulCharacter = 0; // Does this string contain the \0 character?
678  const char *AfterLessPos = CurPtr;
679  char C = getAndAdvanceChar(CurPtr, Result);
680  while (C != '>') {
681    // Skip escaped characters.
682    if (C == '\\') {
683      // Skip the escaped character.
684      C = getAndAdvanceChar(CurPtr, Result);
685    } else if (C == '\n' || C == '\r' ||             // Newline.
686               (C == 0 && CurPtr-1 == BufferEnd)) {  // End of file.
687      // If the filename is unterminated, then it must just be a lone <
688      // character.  Return this as such.
689      FormTokenWithChars(Result, AfterLessPos, tok::less);
690      return;
691    } else if (C == 0) {
692      NulCharacter = CurPtr-1;
693    }
694    C = getAndAdvanceChar(CurPtr, Result);
695  }
696
697  // If a nul character existed in the string, warn about it.
698  if (NulCharacter && !isLexingRawMode())
699    Diag(NulCharacter, diag::null_in_string);
700
701  // Update the location of token as well as BufferPtr.
702  const char *TokStart = BufferPtr;
703  FormTokenWithChars(Result, CurPtr, tok::angle_string_literal);
704  Result.setLiteralData(TokStart);
705}
706
707
708/// LexCharConstant - Lex the remainder of a character constant, after having
709/// lexed either ' or L'.
710void Lexer::LexCharConstant(Token &Result, const char *CurPtr) {
711  const char *NulCharacter = 0; // Does this character contain the \0 character?
712
713  // Handle the common case of 'x' and '\y' efficiently.
714  char C = getAndAdvanceChar(CurPtr, Result);
715  if (C == '\'') {
716    if (!isLexingRawMode() && !Features.AsmPreprocessor)
717      Diag(BufferPtr, diag::err_empty_character);
718    FormTokenWithChars(Result, CurPtr, tok::unknown);
719    return;
720  } else if (C == '\\') {
721    // Skip the escaped character.
722    // FIXME: UCN's.
723    C = getAndAdvanceChar(CurPtr, Result);
724  }
725
726  if (C && C != '\n' && C != '\r' && CurPtr[0] == '\'') {
727    ++CurPtr;
728  } else {
729    // Fall back on generic code for embedded nulls, newlines, wide chars.
730    do {
731      // Skip escaped characters.
732      if (C == '\\') {
733        // Skip the escaped character.
734        C = getAndAdvanceChar(CurPtr, Result);
735      } else if (C == '\n' || C == '\r' ||               // Newline.
736                 (C == 0 && CurPtr-1 == BufferEnd)) {    // End of file.
737        if (!isLexingRawMode() && !Features.AsmPreprocessor)
738          Diag(BufferPtr, diag::err_unterminated_char);
739        FormTokenWithChars(Result, CurPtr-1, tok::unknown);
740        return;
741      } else if (C == 0) {
742        NulCharacter = CurPtr-1;
743      }
744      C = getAndAdvanceChar(CurPtr, Result);
745    } while (C != '\'');
746  }
747
748  if (NulCharacter && !isLexingRawMode())
749    Diag(NulCharacter, diag::null_in_char);
750
751  // Update the location of token as well as BufferPtr.
752  const char *TokStart = BufferPtr;
753  FormTokenWithChars(Result, CurPtr, tok::char_constant);
754  Result.setLiteralData(TokStart);
755}
756
757/// SkipWhitespace - Efficiently skip over a series of whitespace characters.
758/// Update BufferPtr to point to the next non-whitespace character and return.
759///
760/// This method forms a token and returns true if KeepWhitespaceMode is enabled.
761///
762bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr) {
763  // Whitespace - Skip it, then return the token after the whitespace.
764  unsigned char Char = *CurPtr;  // Skip consequtive spaces efficiently.
765  while (1) {
766    // Skip horizontal whitespace very aggressively.
767    while (isHorizontalWhitespace(Char))
768      Char = *++CurPtr;
769
770    // Otherwise if we have something other than whitespace, we're done.
771    if (Char != '\n' && Char != '\r')
772      break;
773
774    if (ParsingPreprocessorDirective) {
775      // End of preprocessor directive line, let LexTokenInternal handle this.
776      BufferPtr = CurPtr;
777      return false;
778    }
779
780    // ok, but handle newline.
781    // The returned token is at the start of the line.
782    Result.setFlag(Token::StartOfLine);
783    // No leading whitespace seen so far.
784    Result.clearFlag(Token::LeadingSpace);
785    Char = *++CurPtr;
786  }
787
788  // If this isn't immediately after a newline, there is leading space.
789  char PrevChar = CurPtr[-1];
790  if (PrevChar != '\n' && PrevChar != '\r')
791    Result.setFlag(Token::LeadingSpace);
792
793  // If the client wants us to return whitespace, return it now.
794  if (isKeepWhitespaceMode()) {
795    FormTokenWithChars(Result, CurPtr, tok::unknown);
796    return true;
797  }
798
799  BufferPtr = CurPtr;
800  return false;
801}
802
803// SkipBCPLComment - We have just read the // characters from input.  Skip until
804// we find the newline character thats terminate the comment.  Then update
805/// BufferPtr and return.  If we're in KeepCommentMode, this will form the token
806/// and return true.
807bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
808  // If BCPL comments aren't explicitly enabled for this language, emit an
809  // extension warning.
810  if (!Features.BCPLComment && !isLexingRawMode()) {
811    Diag(BufferPtr, diag::ext_bcpl_comment);
812
813    // Mark them enabled so we only emit one warning for this translation
814    // unit.
815    Features.BCPLComment = true;
816  }
817
818  // Scan over the body of the comment.  The common case, when scanning, is that
819  // the comment contains normal ascii characters with nothing interesting in
820  // them.  As such, optimize for this case with the inner loop.
821  char C;
822  do {
823    C = *CurPtr;
824    // FIXME: Speedup BCPL comment lexing.  Just scan for a \n or \r character.
825    // If we find a \n character, scan backwards, checking to see if it's an
826    // escaped newline, like we do for block comments.
827
828    // Skip over characters in the fast loop.
829    while (C != 0 &&                // Potentially EOF.
830           C != '\\' &&             // Potentially escaped newline.
831           C != '?' &&              // Potentially trigraph.
832           C != '\n' && C != '\r')  // Newline or DOS-style newline.
833      C = *++CurPtr;
834
835    // If this is a newline, we're done.
836    if (C == '\n' || C == '\r')
837      break;  // Found the newline? Break out!
838
839    // Otherwise, this is a hard case.  Fall back on getAndAdvanceChar to
840    // properly decode the character.  Read it in raw mode to avoid emitting
841    // diagnostics about things like trigraphs.  If we see an escaped newline,
842    // we'll handle it below.
843    const char *OldPtr = CurPtr;
844    bool OldRawMode = isLexingRawMode();
845    LexingRawMode = true;
846    C = getAndAdvanceChar(CurPtr, Result);
847    LexingRawMode = OldRawMode;
848
849    // If the char that we finally got was a \n, then we must have had something
850    // like \<newline><newline>.  We don't want to have consumed the second
851    // newline, we want CurPtr, to end up pointing to it down below.
852    if (C == '\n' || C == '\r') {
853      --CurPtr;
854      C = 'x'; // doesn't matter what this is.
855    }
856
857    // If we read multiple characters, and one of those characters was a \r or
858    // \n, then we had an escaped newline within the comment.  Emit diagnostic
859    // unless the next line is also a // comment.
860    if (CurPtr != OldPtr+1 && C != '/' && CurPtr[0] != '/') {
861      for (; OldPtr != CurPtr; ++OldPtr)
862        if (OldPtr[0] == '\n' || OldPtr[0] == '\r') {
863          // Okay, we found a // comment that ends in a newline, if the next
864          // line is also a // comment, but has spaces, don't emit a diagnostic.
865          if (isspace(C)) {
866            const char *ForwardPtr = CurPtr;
867            while (isspace(*ForwardPtr))  // Skip whitespace.
868              ++ForwardPtr;
869            if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/')
870              break;
871          }
872
873          if (!isLexingRawMode())
874            Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment);
875          break;
876        }
877    }
878
879    if (CurPtr == BufferEnd+1) { --CurPtr; break; }
880  } while (C != '\n' && C != '\r');
881
882  // Found but did not consume the newline.
883
884  // If we are returning comments as tokens, return this comment as a token.
885  if (inKeepCommentMode())
886    return SaveBCPLComment(Result, CurPtr);
887
888  // If we are inside a preprocessor directive and we see the end of line,
889  // return immediately, so that the lexer can return this as an EOM token.
890  if (ParsingPreprocessorDirective || CurPtr == BufferEnd) {
891    BufferPtr = CurPtr;
892    return false;
893  }
894
895  // Otherwise, eat the \n character.  We don't care if this is a \n\r or
896  // \r\n sequence.  This is an efficiency hack (because we know the \n can't
897  // contribute to another token), it isn't needed for correctness.  Note that
898  // this is ok even in KeepWhitespaceMode, because we would have returned the
899  /// comment above in that mode.
900  ++CurPtr;
901
902  // The next returned token is at the start of the line.
903  Result.setFlag(Token::StartOfLine);
904  // No leading whitespace seen so far.
905  Result.clearFlag(Token::LeadingSpace);
906  BufferPtr = CurPtr;
907  return false;
908}
909
910/// SaveBCPLComment - If in save-comment mode, package up this BCPL comment in
911/// an appropriate way and return it.
912bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) {
913  // If we're not in a preprocessor directive, just return the // comment
914  // directly.
915  FormTokenWithChars(Result, CurPtr, tok::comment);
916
917  if (!ParsingPreprocessorDirective)
918    return true;
919
920  // If this BCPL-style comment is in a macro definition, transmogrify it into
921  // a C-style block comment.
922  std::string Spelling = PP->getSpelling(Result);
923  assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not bcpl comment?");
924  Spelling[1] = '*';   // Change prefix to "/*".
925  Spelling += "*/";    // add suffix.
926
927  Result.setKind(tok::comment);
928  PP->CreateString(&Spelling[0], Spelling.size(), Result,
929                   Result.getLocation());
930  return true;
931}
932
933/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
934/// character (either \n or \r) is part of an escaped newline sequence.  Issue a
935/// diagnostic if so.  We know that the newline is inside of a block comment.
936static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
937                                                  Lexer *L) {
938  assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
939
940  // Back up off the newline.
941  --CurPtr;
942
943  // If this is a two-character newline sequence, skip the other character.
944  if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
945    // \n\n or \r\r -> not escaped newline.
946    if (CurPtr[0] == CurPtr[1])
947      return false;
948    // \n\r or \r\n -> skip the newline.
949    --CurPtr;
950  }
951
952  // If we have horizontal whitespace, skip over it.  We allow whitespace
953  // between the slash and newline.
954  bool HasSpace = false;
955  while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
956    --CurPtr;
957    HasSpace = true;
958  }
959
960  // If we have a slash, we know this is an escaped newline.
961  if (*CurPtr == '\\') {
962    if (CurPtr[-1] != '*') return false;
963  } else {
964    // It isn't a slash, is it the ?? / trigraph?
965    if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' ||
966        CurPtr[-3] != '*')
967      return false;
968
969    // This is the trigraph ending the comment.  Emit a stern warning!
970    CurPtr -= 2;
971
972    // If no trigraphs are enabled, warn that we ignored this trigraph and
973    // ignore this * character.
974    if (!L->getFeatures().Trigraphs) {
975      if (!L->isLexingRawMode())
976        L->Diag(CurPtr, diag::trigraph_ignored_block_comment);
977      return false;
978    }
979    if (!L->isLexingRawMode())
980      L->Diag(CurPtr, diag::trigraph_ends_block_comment);
981  }
982
983  // Warn about having an escaped newline between the */ characters.
984  if (!L->isLexingRawMode())
985    L->Diag(CurPtr, diag::escaped_newline_block_comment_end);
986
987  // If there was space between the backslash and newline, warn about it.
988  if (HasSpace && !L->isLexingRawMode())
989    L->Diag(CurPtr, diag::backslash_newline_space);
990
991  return true;
992}
993
994#ifdef __SSE2__
995#include <emmintrin.h>
996#elif __ALTIVEC__
997#include <altivec.h>
998#undef bool
999#endif
1000
1001/// SkipBlockComment - We have just read the /* characters from input.  Read
1002/// until we find the */ characters that terminate the comment.  Note that we
1003/// don't bother decoding trigraphs or escaped newlines in block comments,
1004/// because they cannot cause the comment to end.  The only thing that can
1005/// happen is the comment could end with an escaped newline between the */ end
1006/// of comment.
1007///
1008/// If KeepCommentMode is enabled, this forms a token from the comment and
1009/// returns true.
1010bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) {
1011  // Scan one character past where we should, looking for a '/' character.  Once
1012  // we find it, check to see if it was preceeded by a *.  This common
1013  // optimization helps people who like to put a lot of * characters in their
1014  // comments.
1015
1016  // The first character we get with newlines and trigraphs skipped to handle
1017  // the degenerate /*/ case below correctly if the * has an escaped newline
1018  // after it.
1019  unsigned CharSize;
1020  unsigned char C = getCharAndSize(CurPtr, CharSize);
1021  CurPtr += CharSize;
1022  if (C == 0 && CurPtr == BufferEnd+1) {
1023    if (!isLexingRawMode())
1024      Diag(BufferPtr, diag::err_unterminated_block_comment);
1025    --CurPtr;
1026
1027    // KeepWhitespaceMode should return this broken comment as a token.  Since
1028    // it isn't a well formed comment, just return it as an 'unknown' token.
1029    if (isKeepWhitespaceMode()) {
1030      FormTokenWithChars(Result, CurPtr, tok::unknown);
1031      return true;
1032    }
1033
1034    BufferPtr = CurPtr;
1035    return false;
1036  }
1037
1038  // Check to see if the first character after the '/*' is another /.  If so,
1039  // then this slash does not end the block comment, it is part of it.
1040  if (C == '/')
1041    C = *CurPtr++;
1042
1043  while (1) {
1044    // Skip over all non-interesting characters until we find end of buffer or a
1045    // (probably ending) '/' character.
1046    if (CurPtr + 24 < BufferEnd) {
1047      // While not aligned to a 16-byte boundary.
1048      while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0)
1049        C = *CurPtr++;
1050
1051      if (C == '/') goto FoundSlash;
1052
1053#ifdef __SSE2__
1054      __m128i Slashes = _mm_set_epi8('/', '/', '/', '/', '/', '/', '/', '/',
1055                                     '/', '/', '/', '/', '/', '/', '/', '/');
1056      while (CurPtr+16 <= BufferEnd &&
1057             _mm_movemask_epi8(_mm_cmpeq_epi8(*(__m128i*)CurPtr, Slashes)) == 0)
1058        CurPtr += 16;
1059#elif __ALTIVEC__
1060      __vector unsigned char Slashes = {
1061        '/', '/', '/', '/',  '/', '/', '/', '/',
1062        '/', '/', '/', '/',  '/', '/', '/', '/'
1063      };
1064      while (CurPtr+16 <= BufferEnd &&
1065             !vec_any_eq(*(vector unsigned char*)CurPtr, Slashes))
1066        CurPtr += 16;
1067#else
1068      // Scan for '/' quickly.  Many block comments are very large.
1069      while (CurPtr[0] != '/' &&
1070             CurPtr[1] != '/' &&
1071             CurPtr[2] != '/' &&
1072             CurPtr[3] != '/' &&
1073             CurPtr+4 < BufferEnd) {
1074        CurPtr += 4;
1075      }
1076#endif
1077
1078      // It has to be one of the bytes scanned, increment to it and read one.
1079      C = *CurPtr++;
1080    }
1081
1082    // Loop to scan the remainder.
1083    while (C != '/' && C != '\0')
1084      C = *CurPtr++;
1085
1086  FoundSlash:
1087    if (C == '/') {
1088      if (CurPtr[-2] == '*')  // We found the final */.  We're done!
1089        break;
1090
1091      if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
1092        if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) {
1093          // We found the final */, though it had an escaped newline between the
1094          // * and /.  We're done!
1095          break;
1096        }
1097      }
1098      if (CurPtr[0] == '*' && CurPtr[1] != '/') {
1099        // If this is a /* inside of the comment, emit a warning.  Don't do this
1100        // if this is a /*/, which will end the comment.  This misses cases with
1101        // embedded escaped newlines, but oh well.
1102        if (!isLexingRawMode())
1103          Diag(CurPtr-1, diag::warn_nested_block_comment);
1104      }
1105    } else if (C == 0 && CurPtr == BufferEnd+1) {
1106      if (!isLexingRawMode())
1107        Diag(BufferPtr, diag::err_unterminated_block_comment);
1108      // Note: the user probably forgot a */.  We could continue immediately
1109      // after the /*, but this would involve lexing a lot of what really is the
1110      // comment, which surely would confuse the parser.
1111      --CurPtr;
1112
1113      // KeepWhitespaceMode should return this broken comment as a token.  Since
1114      // it isn't a well formed comment, just return it as an 'unknown' token.
1115      if (isKeepWhitespaceMode()) {
1116        FormTokenWithChars(Result, CurPtr, tok::unknown);
1117        return true;
1118      }
1119
1120      BufferPtr = CurPtr;
1121      return false;
1122    }
1123    C = *CurPtr++;
1124  }
1125
1126  // If we are returning comments as tokens, return this comment as a token.
1127  if (inKeepCommentMode()) {
1128    FormTokenWithChars(Result, CurPtr, tok::comment);
1129    return true;
1130  }
1131
1132  // It is common for the tokens immediately after a /**/ comment to be
1133  // whitespace.  Instead of going through the big switch, handle it
1134  // efficiently now.  This is safe even in KeepWhitespaceMode because we would
1135  // have already returned above with the comment as a token.
1136  if (isHorizontalWhitespace(*CurPtr)) {
1137    Result.setFlag(Token::LeadingSpace);
1138    SkipWhitespace(Result, CurPtr+1);
1139    return false;
1140  }
1141
1142  // Otherwise, just return so that the next character will be lexed as a token.
1143  BufferPtr = CurPtr;
1144  Result.setFlag(Token::LeadingSpace);
1145  return false;
1146}
1147
1148//===----------------------------------------------------------------------===//
1149// Primary Lexing Entry Points
1150//===----------------------------------------------------------------------===//
1151
1152/// ReadToEndOfLine - Read the rest of the current preprocessor line as an
1153/// uninterpreted string.  This switches the lexer out of directive mode.
1154std::string Lexer::ReadToEndOfLine() {
1155  assert(ParsingPreprocessorDirective && ParsingFilename == false &&
1156         "Must be in a preprocessing directive!");
1157  std::string Result;
1158  Token Tmp;
1159
1160  // CurPtr - Cache BufferPtr in an automatic variable.
1161  const char *CurPtr = BufferPtr;
1162  while (1) {
1163    char Char = getAndAdvanceChar(CurPtr, Tmp);
1164    switch (Char) {
1165    default:
1166      Result += Char;
1167      break;
1168    case 0:  // Null.
1169      // Found end of file?
1170      if (CurPtr-1 != BufferEnd) {
1171        // Nope, normal character, continue.
1172        Result += Char;
1173        break;
1174      }
1175      // FALL THROUGH.
1176    case '\r':
1177    case '\n':
1178      // Okay, we found the end of the line. First, back up past the \0, \r, \n.
1179      assert(CurPtr[-1] == Char && "Trigraphs for newline?");
1180      BufferPtr = CurPtr-1;
1181
1182      // Next, lex the character, which should handle the EOM transition.
1183      Lex(Tmp);
1184      assert(Tmp.is(tok::eom) && "Unexpected token!");
1185
1186      // Finally, we're done, return the string we found.
1187      return Result;
1188    }
1189  }
1190}
1191
1192/// LexEndOfFile - CurPtr points to the end of this file.  Handle this
1193/// condition, reporting diagnostics and handling other edge cases as required.
1194/// This returns true if Result contains a token, false if PP.Lex should be
1195/// called again.
1196bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
1197  // If we hit the end of the file while parsing a preprocessor directive,
1198  // end the preprocessor directive first.  The next token returned will
1199  // then be the end of file.
1200  if (ParsingPreprocessorDirective) {
1201    // Done parsing the "line".
1202    ParsingPreprocessorDirective = false;
1203    // Update the location of token as well as BufferPtr.
1204    FormTokenWithChars(Result, CurPtr, tok::eom);
1205
1206    // Restore comment saving mode, in case it was disabled for directive.
1207    SetCommentRetentionState(PP->getCommentRetentionState());
1208    return true;  // Have a token.
1209  }
1210
1211  // If we are in raw mode, return this event as an EOF token.  Let the caller
1212  // that put us in raw mode handle the event.
1213  if (isLexingRawMode()) {
1214    Result.startToken();
1215    BufferPtr = BufferEnd;
1216    FormTokenWithChars(Result, BufferEnd, tok::eof);
1217    return true;
1218  }
1219
1220  // Otherwise, issue diagnostics for unterminated #if and missing newline.
1221
1222  // If we are in a #if directive, emit an error.
1223  while (!ConditionalStack.empty()) {
1224    PP->Diag(ConditionalStack.back().IfLoc,
1225             diag::err_pp_unterminated_conditional);
1226    ConditionalStack.pop_back();
1227  }
1228
1229  // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue
1230  // a pedwarn.
1231  if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r'))
1232    Diag(BufferEnd, diag::ext_no_newline_eof)
1233      << CodeModificationHint::CreateInsertion(getSourceLocation(BufferEnd),
1234                                               "\n");
1235
1236  BufferPtr = CurPtr;
1237
1238  // Finally, let the preprocessor handle this.
1239  return PP->HandleEndOfFile(Result);
1240}
1241
1242/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from
1243/// the specified lexer will return a tok::l_paren token, 0 if it is something
1244/// else and 2 if there are no more tokens in the buffer controlled by the
1245/// lexer.
1246unsigned Lexer::isNextPPTokenLParen() {
1247  assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?");
1248
1249  // Switch to 'skipping' mode.  This will ensure that we can lex a token
1250  // without emitting diagnostics, disables macro expansion, and will cause EOF
1251  // to return an EOF token instead of popping the include stack.
1252  LexingRawMode = true;
1253
1254  // Save state that can be changed while lexing so that we can restore it.
1255  const char *TmpBufferPtr = BufferPtr;
1256
1257  Token Tok;
1258  Tok.startToken();
1259  LexTokenInternal(Tok);
1260
1261  // Restore state that may have changed.
1262  BufferPtr = TmpBufferPtr;
1263
1264  // Restore the lexer back to non-skipping mode.
1265  LexingRawMode = false;
1266
1267  if (Tok.is(tok::eof))
1268    return 2;
1269  return Tok.is(tok::l_paren);
1270}
1271
1272
1273/// LexTokenInternal - This implements a simple C family lexer.  It is an
1274/// extremely performance critical piece of code.  This assumes that the buffer
1275/// has a null character at the end of the file.  Return true if an error
1276/// occurred and compilation should terminate, false if normal.  This returns a
1277/// preprocessing token, not a normal token, as such, it is an internal
1278/// interface.  It assumes that the Flags of result have been cleared before
1279/// calling this.
1280void Lexer::LexTokenInternal(Token &Result) {
1281LexNextToken:
1282  // New token, can't need cleaning yet.
1283  Result.clearFlag(Token::NeedsCleaning);
1284  Result.setIdentifierInfo(0);
1285
1286  // CurPtr - Cache BufferPtr in an automatic variable.
1287  const char *CurPtr = BufferPtr;
1288
1289  // Small amounts of horizontal whitespace is very common between tokens.
1290  if ((*CurPtr == ' ') || (*CurPtr == '\t')) {
1291    ++CurPtr;
1292    while ((*CurPtr == ' ') || (*CurPtr == '\t'))
1293      ++CurPtr;
1294
1295    // If we are keeping whitespace and other tokens, just return what we just
1296    // skipped.  The next lexer invocation will return the token after the
1297    // whitespace.
1298    if (isKeepWhitespaceMode()) {
1299      FormTokenWithChars(Result, CurPtr, tok::unknown);
1300      return;
1301    }
1302
1303    BufferPtr = CurPtr;
1304    Result.setFlag(Token::LeadingSpace);
1305  }
1306
1307  unsigned SizeTmp, SizeTmp2;   // Temporaries for use in cases below.
1308
1309  // Read a character, advancing over it.
1310  char Char = getAndAdvanceChar(CurPtr, Result);
1311  tok::TokenKind Kind;
1312
1313  switch (Char) {
1314  case 0:  // Null.
1315    // Found end of file?
1316    if (CurPtr-1 == BufferEnd) {
1317      // Read the PP instance variable into an automatic variable, because
1318      // LexEndOfFile will often delete 'this'.
1319      Preprocessor *PPCache = PP;
1320      if (LexEndOfFile(Result, CurPtr-1))  // Retreat back into the file.
1321        return;   // Got a token to return.
1322      assert(PPCache && "Raw buffer::LexEndOfFile should return a token");
1323      return PPCache->Lex(Result);
1324    }
1325
1326    if (!isLexingRawMode())
1327      Diag(CurPtr-1, diag::null_in_file);
1328    Result.setFlag(Token::LeadingSpace);
1329    if (SkipWhitespace(Result, CurPtr))
1330      return; // KeepWhitespaceMode
1331
1332    goto LexNextToken;   // GCC isn't tail call eliminating.
1333  case '\n':
1334  case '\r':
1335    // If we are inside a preprocessor directive and we see the end of line,
1336    // we know we are done with the directive, so return an EOM token.
1337    if (ParsingPreprocessorDirective) {
1338      // Done parsing the "line".
1339      ParsingPreprocessorDirective = false;
1340
1341      // Restore comment saving mode, in case it was disabled for directive.
1342      SetCommentRetentionState(PP->getCommentRetentionState());
1343
1344      // Since we consumed a newline, we are back at the start of a line.
1345      IsAtStartOfLine = true;
1346
1347      Kind = tok::eom;
1348      break;
1349    }
1350    // The returned token is at the start of the line.
1351    Result.setFlag(Token::StartOfLine);
1352    // No leading whitespace seen so far.
1353    Result.clearFlag(Token::LeadingSpace);
1354
1355    if (SkipWhitespace(Result, CurPtr))
1356      return; // KeepWhitespaceMode
1357    goto LexNextToken;   // GCC isn't tail call eliminating.
1358  case ' ':
1359  case '\t':
1360  case '\f':
1361  case '\v':
1362  SkipHorizontalWhitespace:
1363    Result.setFlag(Token::LeadingSpace);
1364    if (SkipWhitespace(Result, CurPtr))
1365      return; // KeepWhitespaceMode
1366
1367  SkipIgnoredUnits:
1368    CurPtr = BufferPtr;
1369
1370    // If the next token is obviously a // or /* */ comment, skip it efficiently
1371    // too (without going through the big switch stmt).
1372    if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
1373        Features.BCPLComment) {
1374      SkipBCPLComment(Result, CurPtr+2);
1375      goto SkipIgnoredUnits;
1376    } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) {
1377      SkipBlockComment(Result, CurPtr+2);
1378      goto SkipIgnoredUnits;
1379    } else if (isHorizontalWhitespace(*CurPtr)) {
1380      goto SkipHorizontalWhitespace;
1381    }
1382    goto LexNextToken;   // GCC isn't tail call eliminating.
1383
1384  // C99 6.4.4.1: Integer Constants.
1385  // C99 6.4.4.2: Floating Constants.
1386  case '0': case '1': case '2': case '3': case '4':
1387  case '5': case '6': case '7': case '8': case '9':
1388    // Notify MIOpt that we read a non-whitespace/non-comment token.
1389    MIOpt.ReadToken();
1390    return LexNumericConstant(Result, CurPtr);
1391
1392  case 'L':   // Identifier (Loony) or wide literal (L'x' or L"xyz").
1393    // Notify MIOpt that we read a non-whitespace/non-comment token.
1394    MIOpt.ReadToken();
1395    Char = getCharAndSize(CurPtr, SizeTmp);
1396
1397    // Wide string literal.
1398    if (Char == '"')
1399      return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
1400                              true);
1401
1402    // Wide character constant.
1403    if (Char == '\'')
1404      return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
1405    // FALL THROUGH, treating L like the start of an identifier.
1406
1407  // C99 6.4.2: Identifiers.
1408  case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
1409  case 'H': case 'I': case 'J': case 'K':    /*'L'*/case 'M': case 'N':
1410  case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U':
1411  case 'V': case 'W': case 'X': case 'Y': case 'Z':
1412  case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g':
1413  case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n':
1414  case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u':
1415  case 'v': case 'w': case 'x': case 'y': case 'z':
1416  case '_':
1417    // Notify MIOpt that we read a non-whitespace/non-comment token.
1418    MIOpt.ReadToken();
1419    return LexIdentifier(Result, CurPtr);
1420
1421  case '$':   // $ in identifiers.
1422    if (Features.DollarIdents) {
1423      if (!isLexingRawMode())
1424        Diag(CurPtr-1, diag::ext_dollar_in_identifier);
1425      // Notify MIOpt that we read a non-whitespace/non-comment token.
1426      MIOpt.ReadToken();
1427      return LexIdentifier(Result, CurPtr);
1428    }
1429
1430    Kind = tok::unknown;
1431    break;
1432
1433  // C99 6.4.4: Character Constants.
1434  case '\'':
1435    // Notify MIOpt that we read a non-whitespace/non-comment token.
1436    MIOpt.ReadToken();
1437    return LexCharConstant(Result, CurPtr);
1438
1439  // C99 6.4.5: String Literals.
1440  case '"':
1441    // Notify MIOpt that we read a non-whitespace/non-comment token.
1442    MIOpt.ReadToken();
1443    return LexStringLiteral(Result, CurPtr, false);
1444
1445  // C99 6.4.6: Punctuators.
1446  case '?':
1447    Kind = tok::question;
1448    break;
1449  case '[':
1450    Kind = tok::l_square;
1451    break;
1452  case ']':
1453    Kind = tok::r_square;
1454    break;
1455  case '(':
1456    Kind = tok::l_paren;
1457    break;
1458  case ')':
1459    Kind = tok::r_paren;
1460    break;
1461  case '{':
1462    Kind = tok::l_brace;
1463    break;
1464  case '}':
1465    Kind = tok::r_brace;
1466    break;
1467  case '.':
1468    Char = getCharAndSize(CurPtr, SizeTmp);
1469    if (Char >= '0' && Char <= '9') {
1470      // Notify MIOpt that we read a non-whitespace/non-comment token.
1471      MIOpt.ReadToken();
1472
1473      return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
1474    } else if (Features.CPlusPlus && Char == '*') {
1475      Kind = tok::periodstar;
1476      CurPtr += SizeTmp;
1477    } else if (Char == '.' &&
1478               getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') {
1479      Kind = tok::ellipsis;
1480      CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1481                           SizeTmp2, Result);
1482    } else {
1483      Kind = tok::period;
1484    }
1485    break;
1486  case '&':
1487    Char = getCharAndSize(CurPtr, SizeTmp);
1488    if (Char == '&') {
1489      Kind = tok::ampamp;
1490      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1491    } else if (Char == '=') {
1492      Kind = tok::ampequal;
1493      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1494    } else {
1495      Kind = tok::amp;
1496    }
1497    break;
1498  case '*':
1499    if (getCharAndSize(CurPtr, SizeTmp) == '=') {
1500      Kind = tok::starequal;
1501      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1502    } else {
1503      Kind = tok::star;
1504    }
1505    break;
1506  case '+':
1507    Char = getCharAndSize(CurPtr, SizeTmp);
1508    if (Char == '+') {
1509      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1510      Kind = tok::plusplus;
1511    } else if (Char == '=') {
1512      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1513      Kind = tok::plusequal;
1514    } else {
1515      Kind = tok::plus;
1516    }
1517    break;
1518  case '-':
1519    Char = getCharAndSize(CurPtr, SizeTmp);
1520    if (Char == '-') {      // --
1521      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1522      Kind = tok::minusminus;
1523    } else if (Char == '>' && Features.CPlusPlus &&
1524               getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') {  // C++ ->*
1525      CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1526                           SizeTmp2, Result);
1527      Kind = tok::arrowstar;
1528    } else if (Char == '>') {   // ->
1529      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1530      Kind = tok::arrow;
1531    } else if (Char == '=') {   // -=
1532      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1533      Kind = tok::minusequal;
1534    } else {
1535      Kind = tok::minus;
1536    }
1537    break;
1538  case '~':
1539    Kind = tok::tilde;
1540    break;
1541  case '!':
1542    if (getCharAndSize(CurPtr, SizeTmp) == '=') {
1543      Kind = tok::exclaimequal;
1544      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1545    } else {
1546      Kind = tok::exclaim;
1547    }
1548    break;
1549  case '/':
1550    // 6.4.9: Comments
1551    Char = getCharAndSize(CurPtr, SizeTmp);
1552    if (Char == '/') {         // BCPL comment.
1553      // Even if BCPL comments are disabled (e.g. in C89 mode), we generally
1554      // want to lex this as a comment.  There is one problem with this though,
1555      // that in one particular corner case, this can change the behavior of the
1556      // resultant program.  For example, In  "foo //**/ bar", C89 would lex
1557      // this as "foo / bar" and langauges with BCPL comments would lex it as
1558      // "foo".  Check to see if the character after the second slash is a '*'.
1559      // If so, we will lex that as a "/" instead of the start of a comment.
1560      if (Features.BCPLComment ||
1561          getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*') {
1562        if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
1563          return; // KeepCommentMode
1564
1565        // It is common for the tokens immediately after a // comment to be
1566        // whitespace (indentation for the next line).  Instead of going through
1567        // the big switch, handle it efficiently now.
1568        goto SkipIgnoredUnits;
1569      }
1570    }
1571
1572    if (Char == '*') {  // /**/ comment.
1573      if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
1574        return; // KeepCommentMode
1575      goto LexNextToken;   // GCC isn't tail call eliminating.
1576    }
1577
1578    if (Char == '=') {
1579      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1580      Kind = tok::slashequal;
1581    } else {
1582      Kind = tok::slash;
1583    }
1584    break;
1585  case '%':
1586    Char = getCharAndSize(CurPtr, SizeTmp);
1587    if (Char == '=') {
1588      Kind = tok::percentequal;
1589      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1590    } else if (Features.Digraphs && Char == '>') {
1591      Kind = tok::r_brace;                             // '%>' -> '}'
1592      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1593    } else if (Features.Digraphs && Char == ':') {
1594      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1595      Char = getCharAndSize(CurPtr, SizeTmp);
1596      if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') {
1597        Kind = tok::hashhash;                          // '%:%:' -> '##'
1598        CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1599                             SizeTmp2, Result);
1600      } else if (Char == '@' && Features.Microsoft) {  // %:@ -> #@ -> Charize
1601        CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1602        if (!isLexingRawMode())
1603          Diag(BufferPtr, diag::charize_microsoft_ext);
1604        Kind = tok::hashat;
1605      } else {                                         // '%:' -> '#'
1606        // We parsed a # character.  If this occurs at the start of the line,
1607        // it's actually the start of a preprocessing directive.  Callback to
1608        // the preprocessor to handle it.
1609        // FIXME: -fpreprocessed mode??
1610        if (Result.isAtStartOfLine() && !LexingRawMode) {
1611          FormTokenWithChars(Result, CurPtr, tok::hash);
1612          PP->HandleDirective(Result);
1613
1614          // As an optimization, if the preprocessor didn't switch lexers, tail
1615          // recurse.
1616          if (PP->isCurrentLexer(this)) {
1617            // Start a new token. If this is a #include or something, the PP may
1618            // want us starting at the beginning of the line again.  If so, set
1619            // the StartOfLine flag.
1620            if (IsAtStartOfLine) {
1621              Result.setFlag(Token::StartOfLine);
1622              IsAtStartOfLine = false;
1623            }
1624            goto LexNextToken;   // GCC isn't tail call eliminating.
1625          }
1626
1627          return PP->Lex(Result);
1628        }
1629
1630        Kind = tok::hash;
1631      }
1632    } else {
1633      Kind = tok::percent;
1634    }
1635    break;
1636  case '<':
1637    Char = getCharAndSize(CurPtr, SizeTmp);
1638    if (ParsingFilename) {
1639      return LexAngledStringLiteral(Result, CurPtr);
1640    } else if (Char == '<' &&
1641               getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') {
1642      Kind = tok::lesslessequal;
1643      CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1644                           SizeTmp2, Result);
1645    } else if (Char == '<') {
1646      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1647      Kind = tok::lessless;
1648    } else if (Char == '=') {
1649      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1650      Kind = tok::lessequal;
1651    } else if (Features.Digraphs && Char == ':') {     // '<:' -> '['
1652      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1653      Kind = tok::l_square;
1654    } else if (Features.Digraphs && Char == '%') {     // '<%' -> '{'
1655      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1656      Kind = tok::l_brace;
1657    } else {
1658      Kind = tok::less;
1659    }
1660    break;
1661  case '>':
1662    Char = getCharAndSize(CurPtr, SizeTmp);
1663    if (Char == '=') {
1664      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1665      Kind = tok::greaterequal;
1666    } else if (Char == '>' &&
1667               getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') {
1668      CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1669                           SizeTmp2, Result);
1670      Kind = tok::greatergreaterequal;
1671    } else if (Char == '>') {
1672      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1673      Kind = tok::greatergreater;
1674    } else {
1675      Kind = tok::greater;
1676    }
1677    break;
1678  case '^':
1679    Char = getCharAndSize(CurPtr, SizeTmp);
1680    if (Char == '=') {
1681      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1682      Kind = tok::caretequal;
1683    } else {
1684      Kind = tok::caret;
1685    }
1686    break;
1687  case '|':
1688    Char = getCharAndSize(CurPtr, SizeTmp);
1689    if (Char == '=') {
1690      Kind = tok::pipeequal;
1691      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1692    } else if (Char == '|') {
1693      Kind = tok::pipepipe;
1694      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1695    } else {
1696      Kind = tok::pipe;
1697    }
1698    break;
1699  case ':':
1700    Char = getCharAndSize(CurPtr, SizeTmp);
1701    if (Features.Digraphs && Char == '>') {
1702      Kind = tok::r_square; // ':>' -> ']'
1703      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1704    } else if (Features.CPlusPlus && Char == ':') {
1705      Kind = tok::coloncolon;
1706      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1707    } else {
1708      Kind = tok::colon;
1709    }
1710    break;
1711  case ';':
1712    Kind = tok::semi;
1713    break;
1714  case '=':
1715    Char = getCharAndSize(CurPtr, SizeTmp);
1716    if (Char == '=') {
1717      Kind = tok::equalequal;
1718      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1719    } else {
1720      Kind = tok::equal;
1721    }
1722    break;
1723  case ',':
1724    Kind = tok::comma;
1725    break;
1726  case '#':
1727    Char = getCharAndSize(CurPtr, SizeTmp);
1728    if (Char == '#') {
1729      Kind = tok::hashhash;
1730      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1731    } else if (Char == '@' && Features.Microsoft) {  // #@ -> Charize
1732      Kind = tok::hashat;
1733      if (!isLexingRawMode())
1734        Diag(BufferPtr, diag::charize_microsoft_ext);
1735      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1736    } else {
1737      // We parsed a # character.  If this occurs at the start of the line,
1738      // it's actually the start of a preprocessing directive.  Callback to
1739      // the preprocessor to handle it.
1740      // FIXME: -fpreprocessed mode??
1741      if (Result.isAtStartOfLine() && !LexingRawMode) {
1742        FormTokenWithChars(Result, CurPtr, tok::hash);
1743        PP->HandleDirective(Result);
1744
1745        // As an optimization, if the preprocessor didn't switch lexers, tail
1746        // recurse.
1747        if (PP->isCurrentLexer(this)) {
1748          // Start a new token.  If this is a #include or something, the PP may
1749          // want us starting at the beginning of the line again.  If so, set
1750          // the StartOfLine flag.
1751          if (IsAtStartOfLine) {
1752            Result.setFlag(Token::StartOfLine);
1753            IsAtStartOfLine = false;
1754          }
1755          goto LexNextToken;   // GCC isn't tail call eliminating.
1756        }
1757        return PP->Lex(Result);
1758      }
1759
1760      Kind = tok::hash;
1761    }
1762    break;
1763
1764  case '@':
1765    // Objective C support.
1766    if (CurPtr[-1] == '@' && Features.ObjC1)
1767      Kind = tok::at;
1768    else
1769      Kind = tok::unknown;
1770    break;
1771
1772  case '\\':
1773    // FIXME: UCN's.
1774    // FALL THROUGH.
1775  default:
1776    Kind = tok::unknown;
1777    break;
1778  }
1779
1780  // Notify MIOpt that we read a non-whitespace/non-comment token.
1781  MIOpt.ReadToken();
1782
1783  // Update the location of token as well as BufferPtr.
1784  FormTokenWithChars(Result, CurPtr, Kind);
1785}
1786