Lexer.cpp revision 86d9a52c24d390631a888d4ff812e1b15445e0a0
1//===--- Lexer.cpp - C Language Family Lexer ------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//  This file implements the Lexer and Token interfaces.
11//
12//===----------------------------------------------------------------------===//
13//
14// TODO: GCC Diagnostics emitted by the lexer:
15// PEDWARN: (form feed|vertical tab) in preprocessing directive
16//
17// Universal characters, unicode, char mapping:
18// WARNING: `%.*s' is not in NFKC
19// WARNING: `%.*s' is not in NFC
20//
21// Other:
22// TODO: Options to support:
23//    -fexec-charset,-fwide-exec-charset
24//
25//===----------------------------------------------------------------------===//
26
27#include "clang/Lex/Lexer.h"
28#include "clang/Lex/Preprocessor.h"
29#include "clang/Lex/LexDiagnostic.h"
30#include "clang/Basic/SourceManager.h"
31#include "llvm/Support/Compiler.h"
32#include "llvm/Support/MemoryBuffer.h"
33#include <cctype>
34using namespace clang;
35
36static void InitCharacterInfo();
37
38//===----------------------------------------------------------------------===//
39// Token Class Implementation
40//===----------------------------------------------------------------------===//
41
42/// isObjCAtKeyword - Return true if we have an ObjC keyword identifier.
43bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const {
44  if (IdentifierInfo *II = getIdentifierInfo())
45    return II->getObjCKeywordID() == objcKey;
46  return false;
47}
48
49/// getObjCKeywordID - Return the ObjC keyword kind.
50tok::ObjCKeywordKind Token::getObjCKeywordID() const {
51  IdentifierInfo *specId = getIdentifierInfo();
52  return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword;
53}
54
55
56//===----------------------------------------------------------------------===//
57// Lexer Class Implementation
58//===----------------------------------------------------------------------===//
59
60void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
61                      const char *BufEnd) {
62  InitCharacterInfo();
63
64  BufferStart = BufStart;
65  BufferPtr = BufPtr;
66  BufferEnd = BufEnd;
67
68  assert(BufEnd[0] == 0 &&
69         "We assume that the input buffer has a null character at the end"
70         " to simplify lexing!");
71
72  Is_PragmaLexer = false;
73  IsEofCodeCompletion = false;
74
75  // Start of the file is a start of line.
76  IsAtStartOfLine = true;
77
78  // We are not after parsing a #.
79  ParsingPreprocessorDirective = false;
80
81  // We are not after parsing #include.
82  ParsingFilename = false;
83
84  // We are not in raw mode.  Raw mode disables diagnostics and interpretation
85  // of tokens (e.g. identifiers, thus disabling macro expansion).  It is used
86  // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block
87  // or otherwise skipping over tokens.
88  LexingRawMode = false;
89
90  // Default to not keeping comments.
91  ExtendedTokenMode = 0;
92}
93
94/// Lexer constructor - Create a new lexer object for the specified buffer
95/// with the specified preprocessor managing the lexing process.  This lexer
96/// assumes that the associated file buffer and Preprocessor objects will
97/// outlive it, so it doesn't take ownership of either of them.
98Lexer::Lexer(FileID FID, Preprocessor &PP)
99  : PreprocessorLexer(&PP, FID),
100    FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
101    Features(PP.getLangOptions()) {
102
103  const llvm::MemoryBuffer *InputFile = PP.getSourceManager().getBuffer(FID);
104
105  InitLexer(InputFile->getBufferStart(), InputFile->getBufferStart(),
106            InputFile->getBufferEnd());
107
108  // Default to keeping comments if the preprocessor wants them.
109  SetCommentRetentionState(PP.getCommentRetentionState());
110}
111
112/// Lexer constructor - Create a new raw lexer object.  This object is only
113/// suitable for calls to 'LexRawToken'.  This lexer assumes that the text
114/// range will outlive it, so it doesn't take ownership of it.
115Lexer::Lexer(SourceLocation fileloc, const LangOptions &features,
116             const char *BufStart, const char *BufPtr, const char *BufEnd)
117  : FileLoc(fileloc), Features(features) {
118
119  InitLexer(BufStart, BufPtr, BufEnd);
120
121  // We *are* in raw mode.
122  LexingRawMode = true;
123}
124
125/// Lexer constructor - Create a new raw lexer object.  This object is only
126/// suitable for calls to 'LexRawToken'.  This lexer assumes that the text
127/// range will outlive it, so it doesn't take ownership of it.
128Lexer::Lexer(FileID FID, const SourceManager &SM, const LangOptions &features)
129  : FileLoc(SM.getLocForStartOfFile(FID)), Features(features) {
130  const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
131
132  InitLexer(FromFile->getBufferStart(), FromFile->getBufferStart(),
133            FromFile->getBufferEnd());
134
135  // We *are* in raw mode.
136  LexingRawMode = true;
137}
138
139/// Create_PragmaLexer: Lexer constructor - Create a new lexer object for
140/// _Pragma expansion.  This has a variety of magic semantics that this method
141/// sets up.  It returns a new'd Lexer that must be delete'd when done.
142///
143/// On entrance to this routine, TokStartLoc is a macro location which has a
144/// spelling loc that indicates the bytes to be lexed for the token and an
145/// instantiation location that indicates where all lexed tokens should be
146/// "expanded from".
147///
148/// FIXME: It would really be nice to make _Pragma just be a wrapper around a
149/// normal lexer that remaps tokens as they fly by.  This would require making
150/// Preprocessor::Lex virtual.  Given that, we could just dump in a magic lexer
151/// interface that could handle this stuff.  This would pull GetMappedTokenLoc
152/// out of the critical path of the lexer!
153///
154Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
155                                 SourceLocation InstantiationLocStart,
156                                 SourceLocation InstantiationLocEnd,
157                                 unsigned TokLen, Preprocessor &PP) {
158  SourceManager &SM = PP.getSourceManager();
159
160  // Create the lexer as if we were going to lex the file normally.
161  FileID SpellingFID = SM.getFileID(SpellingLoc);
162  Lexer *L = new Lexer(SpellingFID, PP);
163
164  // Now that the lexer is created, change the start/end locations so that we
165  // just lex the subsection of the file that we want.  This is lexing from a
166  // scratch buffer.
167  const char *StrData = SM.getCharacterData(SpellingLoc);
168
169  L->BufferPtr = StrData;
170  L->BufferEnd = StrData+TokLen;
171  assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!");
172
173  // Set the SourceLocation with the remapping information.  This ensures that
174  // GetMappedTokenLoc will remap the tokens as they are lexed.
175  L->FileLoc = SM.createInstantiationLoc(SM.getLocForStartOfFile(SpellingFID),
176                                         InstantiationLocStart,
177                                         InstantiationLocEnd, TokLen);
178
179  // Ensure that the lexer thinks it is inside a directive, so that end \n will
180  // return an EOM token.
181  L->ParsingPreprocessorDirective = true;
182
183  // This lexer really is for _Pragma.
184  L->Is_PragmaLexer = true;
185  return L;
186}
187
188
189/// Stringify - Convert the specified string into a C string, with surrounding
190/// ""'s, and with escaped \ and " characters.
191std::string Lexer::Stringify(const std::string &Str, bool Charify) {
192  std::string Result = Str;
193  char Quote = Charify ? '\'' : '"';
194  for (unsigned i = 0, e = Result.size(); i != e; ++i) {
195    if (Result[i] == '\\' || Result[i] == Quote) {
196      Result.insert(Result.begin()+i, '\\');
197      ++i; ++e;
198    }
199  }
200  return Result;
201}
202
203/// Stringify - Convert the specified string into a C string by escaping '\'
204/// and " characters.  This does not add surrounding ""'s to the string.
205void Lexer::Stringify(llvm::SmallVectorImpl<char> &Str) {
206  for (unsigned i = 0, e = Str.size(); i != e; ++i) {
207    if (Str[i] == '\\' || Str[i] == '"') {
208      Str.insert(Str.begin()+i, '\\');
209      ++i; ++e;
210    }
211  }
212}
213
214
215/// MeasureTokenLength - Relex the token at the specified location and return
216/// its length in bytes in the input file.  If the token needs cleaning (e.g.
217/// includes a trigraph or an escaped newline) then this count includes bytes
218/// that are part of that.
219unsigned Lexer::MeasureTokenLength(SourceLocation Loc,
220                                   const SourceManager &SM,
221                                   const LangOptions &LangOpts) {
222  // TODO: this could be special cased for common tokens like identifiers, ')',
223  // etc to make this faster, if it mattered.  Just look at StrData[0] to handle
224  // all obviously single-char tokens.  This could use
225  // Lexer::isObviouslySimpleCharacter for example to handle identifiers or
226  // something.
227
228  // If this comes from a macro expansion, we really do want the macro name, not
229  // the token this macro expanded to.
230  Loc = SM.getInstantiationLoc(Loc);
231  std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
232  std::pair<const char *,const char *> Buffer = SM.getBufferData(LocInfo.first);
233  const char *StrData = Buffer.first+LocInfo.second;
234
235  // Create a lexer starting at the beginning of this token.
236  Lexer TheLexer(Loc, LangOpts, Buffer.first, StrData, Buffer.second);
237  Token TheTok;
238  TheLexer.LexFromRawLexer(TheTok);
239  return TheTok.getLength();
240}
241
242//===----------------------------------------------------------------------===//
243// Character information.
244//===----------------------------------------------------------------------===//
245
246enum {
247  CHAR_HORZ_WS  = 0x01,  // ' ', '\t', '\f', '\v'.  Note, no '\0'
248  CHAR_VERT_WS  = 0x02,  // '\r', '\n'
249  CHAR_LETTER   = 0x04,  // a-z,A-Z
250  CHAR_NUMBER   = 0x08,  // 0-9
251  CHAR_UNDER    = 0x10,  // _
252  CHAR_PERIOD   = 0x20   // .
253};
254
255// Statically initialize CharInfo table based on ASCII character set
256// Reference: FreeBSD 7.2 /usr/share/misc/ascii
257static const unsigned char CharInfo[256] =
258{
259// 0 NUL         1 SOH         2 STX         3 ETX
260// 4 EOT         5 ENQ         6 ACK         7 BEL
261   0           , 0           , 0           , 0           ,
262   0           , 0           , 0           , 0           ,
263// 8 BS          9 HT         10 NL         11 VT
264//12 NP         13 CR         14 SO         15 SI
265   0           , CHAR_HORZ_WS, CHAR_VERT_WS, CHAR_HORZ_WS,
266   CHAR_HORZ_WS, CHAR_VERT_WS, 0           , 0           ,
267//16 DLE        17 DC1        18 DC2        19 DC3
268//20 DC4        21 NAK        22 SYN        23 ETB
269   0           , 0           , 0           , 0           ,
270   0           , 0           , 0           , 0           ,
271//24 CAN        25 EM         26 SUB        27 ESC
272//28 FS         29 GS         30 RS         31 US
273   0           , 0           , 0           , 0           ,
274   0           , 0           , 0           , 0           ,
275//32 SP         33  !         34  "         35  #
276//36  $         37  %         38  &         39  '
277   CHAR_HORZ_WS, 0           , 0           , 0           ,
278   0           , 0           , 0           , 0           ,
279//40  (         41  )         42  *         43  +
280//44  ,         45  -         46  .         47  /
281   0           , 0           , 0           , 0           ,
282   0           , 0           , CHAR_PERIOD , 0           ,
283//48  0         49  1         50  2         51  3
284//52  4         53  5         54  6         55  7
285   CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER ,
286   CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER ,
287//56  8         57  9         58  :         59  ;
288//60  <         61  =         62  >         63  ?
289   CHAR_NUMBER , CHAR_NUMBER , 0           , 0           ,
290   0           , 0           , 0           , 0           ,
291//64  @         65  A         66  B         67  C
292//68  D         69  E         70  F         71  G
293   0           , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
294   CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
295//72  H         73  I         74  J         75  K
296//76  L         77  M         78  N         79  O
297   CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
298   CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
299//80  P         81  Q         82  R         83  S
300//84  T         85  U         86  V         87  W
301   CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
302   CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
303//88  X         89  Y         90  Z         91  [
304//92  \         93  ]         94  ^         95  _
305   CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 0           ,
306   0           , 0           , 0           , CHAR_UNDER  ,
307//96  `         97  a         98  b         99  c
308//100  d       101  e        102  f        103  g
309   0           , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
310   CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
311//104  h       105  i        106  j        107  k
312//108  l       109  m        110  n        111  o
313   CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
314   CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
315//112  p       113  q        114  r        115  s
316//116  t       117  u        118  v        119  w
317   CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
318   CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
319//120  x       121  y        122  z        123  {
320//124  |        125  }        126  ~        127 DEL
321   CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 0           ,
322   0           , 0           , 0           , 0
323};
324
325static void InitCharacterInfo() {
326  static bool isInited = false;
327  if (isInited) return;
328  // check the statically-initialized CharInfo table
329  assert(CHAR_HORZ_WS == CharInfo[(int)' ']);
330  assert(CHAR_HORZ_WS == CharInfo[(int)'\t']);
331  assert(CHAR_HORZ_WS == CharInfo[(int)'\f']);
332  assert(CHAR_HORZ_WS == CharInfo[(int)'\v']);
333  assert(CHAR_VERT_WS == CharInfo[(int)'\n']);
334  assert(CHAR_VERT_WS == CharInfo[(int)'\r']);
335  assert(CHAR_UNDER   == CharInfo[(int)'_']);
336  assert(CHAR_PERIOD  == CharInfo[(int)'.']);
337  for (unsigned i = 'a'; i <= 'z'; ++i) {
338    assert(CHAR_LETTER == CharInfo[i]);
339    assert(CHAR_LETTER == CharInfo[i+'A'-'a']);
340  }
341  for (unsigned i = '0'; i <= '9'; ++i)
342    assert(CHAR_NUMBER == CharInfo[i]);
343  isInited = true;
344}
345
346
347/// isIdentifierBody - Return true if this is the body character of an
348/// identifier, which is [a-zA-Z0-9_].
349static inline bool isIdentifierBody(unsigned char c) {
350  return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER)) ? true : false;
351}
352
353/// isHorizontalWhitespace - Return true if this character is horizontal
354/// whitespace: ' ', '\t', '\f', '\v'.  Note that this returns false for '\0'.
355static inline bool isHorizontalWhitespace(unsigned char c) {
356  return (CharInfo[c] & CHAR_HORZ_WS) ? true : false;
357}
358
359/// isWhitespace - Return true if this character is horizontal or vertical
360/// whitespace: ' ', '\t', '\f', '\v', '\n', '\r'.  Note that this returns false
361/// for '\0'.
362static inline bool isWhitespace(unsigned char c) {
363  return (CharInfo[c] & (CHAR_HORZ_WS|CHAR_VERT_WS)) ? true : false;
364}
365
366/// isNumberBody - Return true if this is the body character of an
367/// preprocessing number, which is [a-zA-Z0-9_.].
368static inline bool isNumberBody(unsigned char c) {
369  return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD)) ?
370    true : false;
371}
372
373
374//===----------------------------------------------------------------------===//
375// Diagnostics forwarding code.
376//===----------------------------------------------------------------------===//
377
378/// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the
379/// lexer buffer was all instantiated at a single point, perform the mapping.
380/// This is currently only used for _Pragma implementation, so it is the slow
381/// path of the hot getSourceLocation method.  Do not allow it to be inlined.
382static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
383                                        SourceLocation FileLoc,
384                                        unsigned CharNo,
385                                        unsigned TokLen) DISABLE_INLINE;
386static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
387                                        SourceLocation FileLoc,
388                                        unsigned CharNo, unsigned TokLen) {
389  assert(FileLoc.isMacroID() && "Must be an instantiation");
390
391  // Otherwise, we're lexing "mapped tokens".  This is used for things like
392  // _Pragma handling.  Combine the instantiation location of FileLoc with the
393  // spelling location.
394  SourceManager &SM = PP.getSourceManager();
395
396  // Create a new SLoc which is expanded from Instantiation(FileLoc) but whose
397  // characters come from spelling(FileLoc)+Offset.
398  SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc);
399  SpellingLoc = SpellingLoc.getFileLocWithOffset(CharNo);
400
401  // Figure out the expansion loc range, which is the range covered by the
402  // original _Pragma(...) sequence.
403  std::pair<SourceLocation,SourceLocation> II =
404    SM.getImmediateInstantiationRange(FileLoc);
405
406  return SM.createInstantiationLoc(SpellingLoc, II.first, II.second, TokLen);
407}
408
409/// getSourceLocation - Return a source location identifier for the specified
410/// offset in the current file.
411SourceLocation Lexer::getSourceLocation(const char *Loc,
412                                        unsigned TokLen) const {
413  assert(Loc >= BufferStart && Loc <= BufferEnd &&
414         "Location out of range for this buffer!");
415
416  // In the normal case, we're just lexing from a simple file buffer, return
417  // the file id from FileLoc with the offset specified.
418  unsigned CharNo = Loc-BufferStart;
419  if (FileLoc.isFileID())
420    return FileLoc.getFileLocWithOffset(CharNo);
421
422  // Otherwise, this is the _Pragma lexer case, which pretends that all of the
423  // tokens are lexed from where the _Pragma was defined.
424  assert(PP && "This doesn't work on raw lexers");
425  return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen);
426}
427
428/// Diag - Forwarding function for diagnostics.  This translate a source
429/// position in the current buffer into a SourceLocation object for rendering.
430DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const {
431  return PP->Diag(getSourceLocation(Loc), DiagID);
432}
433
434//===----------------------------------------------------------------------===//
435// Trigraph and Escaped Newline Handling Code.
436//===----------------------------------------------------------------------===//
437
438/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair,
439/// return the decoded trigraph letter it corresponds to, or '\0' if nothing.
440static char GetTrigraphCharForLetter(char Letter) {
441  switch (Letter) {
442  default:   return 0;
443  case '=':  return '#';
444  case ')':  return ']';
445  case '(':  return '[';
446  case '!':  return '|';
447  case '\'': return '^';
448  case '>':  return '}';
449  case '/':  return '\\';
450  case '<':  return '{';
451  case '-':  return '~';
452  }
453}
454
455/// DecodeTrigraphChar - If the specified character is a legal trigraph when
456/// prefixed with ??, emit a trigraph warning.  If trigraphs are enabled,
457/// return the result character.  Finally, emit a warning about trigraph use
458/// whether trigraphs are enabled or not.
459static char DecodeTrigraphChar(const char *CP, Lexer *L) {
460  char Res = GetTrigraphCharForLetter(*CP);
461  if (!Res || !L) return Res;
462
463  if (!L->getFeatures().Trigraphs) {
464    if (!L->isLexingRawMode())
465      L->Diag(CP-2, diag::trigraph_ignored);
466    return 0;
467  }
468
469  if (!L->isLexingRawMode())
470    L->Diag(CP-2, diag::trigraph_converted) << std::string()+Res;
471  return Res;
472}
473
474/// getEscapedNewLineSize - Return the size of the specified escaped newline,
475/// or 0 if it is not an escaped newline. P[-1] is known to be a "\" or a
476/// trigraph equivalent on entry to this function.
477unsigned Lexer::getEscapedNewLineSize(const char *Ptr) {
478  unsigned Size = 0;
479  while (isWhitespace(Ptr[Size])) {
480    ++Size;
481
482    if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r')
483      continue;
484
485    // If this is a \r\n or \n\r, skip the other half.
486    if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') &&
487        Ptr[Size-1] != Ptr[Size])
488      ++Size;
489
490    return Size;
491  }
492
493  // Not an escaped newline, must be a \t or something else.
494  return 0;
495}
496
497/// SkipEscapedNewLines - If P points to an escaped newline (or a series of
498/// them), skip over them and return the first non-escaped-newline found,
499/// otherwise return P.
500const char *Lexer::SkipEscapedNewLines(const char *P) {
501  while (1) {
502    const char *AfterEscape;
503    if (*P == '\\') {
504      AfterEscape = P+1;
505    } else if (*P == '?') {
506      // If not a trigraph for escape, bail out.
507      if (P[1] != '?' || P[2] != '/')
508        return P;
509      AfterEscape = P+3;
510    } else {
511      return P;
512    }
513
514    unsigned NewLineSize = Lexer::getEscapedNewLineSize(AfterEscape);
515    if (NewLineSize == 0) return P;
516    P = AfterEscape+NewLineSize;
517  }
518}
519
520
521/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer,
522/// get its size, and return it.  This is tricky in several cases:
523///   1. If currently at the start of a trigraph, we warn about the trigraph,
524///      then either return the trigraph (skipping 3 chars) or the '?',
525///      depending on whether trigraphs are enabled or not.
526///   2. If this is an escaped newline (potentially with whitespace between
527///      the backslash and newline), implicitly skip the newline and return
528///      the char after it.
529///   3. If this is a UCN, return it.  FIXME: C++ UCN's?
530///
531/// This handles the slow/uncommon case of the getCharAndSize method.  Here we
532/// know that we can accumulate into Size, and that we have already incremented
533/// Ptr by Size bytes.
534///
535/// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should
536/// be updated to match.
537///
538char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
539                               Token *Tok) {
540  // If we have a slash, look for an escaped newline.
541  if (Ptr[0] == '\\') {
542    ++Size;
543    ++Ptr;
544Slash:
545    // Common case, backslash-char where the char is not whitespace.
546    if (!isWhitespace(Ptr[0])) return '\\';
547
548    // See if we have optional whitespace characters between the slash and
549    // newline.
550    if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
551      // Remember that this token needs to be cleaned.
552      if (Tok) Tok->setFlag(Token::NeedsCleaning);
553
554      // Warn if there was whitespace between the backslash and newline.
555      if (Ptr[0] != '\n' && Ptr[0] != '\r' && Tok && !isLexingRawMode())
556        Diag(Ptr, diag::backslash_newline_space);
557
558      // Found backslash<whitespace><newline>.  Parse the char after it.
559      Size += EscapedNewLineSize;
560      Ptr  += EscapedNewLineSize;
561      // Use slow version to accumulate a correct size field.
562      return getCharAndSizeSlow(Ptr, Size, Tok);
563    }
564
565    // Otherwise, this is not an escaped newline, just return the slash.
566    return '\\';
567  }
568
569  // If this is a trigraph, process it.
570  if (Ptr[0] == '?' && Ptr[1] == '?') {
571    // If this is actually a legal trigraph (not something like "??x"), emit
572    // a trigraph warning.  If so, and if trigraphs are enabled, return it.
573    if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) {
574      // Remember that this token needs to be cleaned.
575      if (Tok) Tok->setFlag(Token::NeedsCleaning);
576
577      Ptr += 3;
578      Size += 3;
579      if (C == '\\') goto Slash;
580      return C;
581    }
582  }
583
584  // If this is neither, return a single character.
585  ++Size;
586  return *Ptr;
587}
588
589
590/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the
591/// getCharAndSizeNoWarn method.  Here we know that we can accumulate into Size,
592/// and that we have already incremented Ptr by Size bytes.
593///
594/// NOTE: When this method is updated, getCharAndSizeSlow (above) should
595/// be updated to match.
596char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
597                                     const LangOptions &Features) {
598  // If we have a slash, look for an escaped newline.
599  if (Ptr[0] == '\\') {
600    ++Size;
601    ++Ptr;
602Slash:
603    // Common case, backslash-char where the char is not whitespace.
604    if (!isWhitespace(Ptr[0])) return '\\';
605
606    // See if we have optional whitespace characters followed by a newline.
607    if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
608      // Found backslash<whitespace><newline>.  Parse the char after it.
609      Size += EscapedNewLineSize;
610      Ptr  += EscapedNewLineSize;
611
612      // Use slow version to accumulate a correct size field.
613      return getCharAndSizeSlowNoWarn(Ptr, Size, Features);
614    }
615
616    // Otherwise, this is not an escaped newline, just return the slash.
617    return '\\';
618  }
619
620  // If this is a trigraph, process it.
621  if (Features.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
622    // If this is actually a legal trigraph (not something like "??x"), return
623    // it.
624    if (char C = GetTrigraphCharForLetter(Ptr[2])) {
625      Ptr += 3;
626      Size += 3;
627      if (C == '\\') goto Slash;
628      return C;
629    }
630  }
631
632  // If this is neither, return a single character.
633  ++Size;
634  return *Ptr;
635}
636
637//===----------------------------------------------------------------------===//
638// Helper methods for lexing.
639//===----------------------------------------------------------------------===//
640
641void Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
642  // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
643  unsigned Size;
644  unsigned char C = *CurPtr++;
645  while (isIdentifierBody(C)) {
646    C = *CurPtr++;
647  }
648  --CurPtr;   // Back up over the skipped character.
649
650  // Fast path, no $,\,? in identifier found.  '\' might be an escaped newline
651  // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN.
652  // FIXME: UCNs.
653  if (C != '\\' && C != '?' && (C != '$' || !Features.DollarIdents)) {
654FinishIdentifier:
655    const char *IdStart = BufferPtr;
656    FormTokenWithChars(Result, CurPtr, tok::identifier);
657
658    // If we are in raw mode, return this identifier raw.  There is no need to
659    // look up identifier information or attempt to macro expand it.
660    if (LexingRawMode) return;
661
662    // Fill in Result.IdentifierInfo, looking up the identifier in the
663    // identifier table.
664    IdentifierInfo *II = PP->LookUpIdentifierInfo(Result, IdStart);
665
666    // Change the kind of this identifier to the appropriate token kind, e.g.
667    // turning "for" into a keyword.
668    Result.setKind(II->getTokenID());
669
670    // Finally, now that we know we have an identifier, pass this off to the
671    // preprocessor, which may macro expand it or something.
672    if (II->isHandleIdentifierCase())
673      PP->HandleIdentifier(Result);
674    return;
675  }
676
677  // Otherwise, $,\,? in identifier found.  Enter slower path.
678
679  C = getCharAndSize(CurPtr, Size);
680  while (1) {
681    if (C == '$') {
682      // If we hit a $ and they are not supported in identifiers, we are done.
683      if (!Features.DollarIdents) goto FinishIdentifier;
684
685      // Otherwise, emit a diagnostic and continue.
686      if (!isLexingRawMode())
687        Diag(CurPtr, diag::ext_dollar_in_identifier);
688      CurPtr = ConsumeChar(CurPtr, Size, Result);
689      C = getCharAndSize(CurPtr, Size);
690      continue;
691    } else if (!isIdentifierBody(C)) { // FIXME: UCNs.
692      // Found end of identifier.
693      goto FinishIdentifier;
694    }
695
696    // Otherwise, this character is good, consume it.
697    CurPtr = ConsumeChar(CurPtr, Size, Result);
698
699    C = getCharAndSize(CurPtr, Size);
700    while (isIdentifierBody(C)) { // FIXME: UCNs.
701      CurPtr = ConsumeChar(CurPtr, Size, Result);
702      C = getCharAndSize(CurPtr, Size);
703    }
704  }
705}
706
707
708/// LexNumericConstant - Lex the remainder of a integer or floating point
709/// constant. From[-1] is the first character lexed.  Return the end of the
710/// constant.
711void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
712  unsigned Size;
713  char C = getCharAndSize(CurPtr, Size);
714  char PrevCh = 0;
715  while (isNumberBody(C)) { // FIXME: UCNs?
716    CurPtr = ConsumeChar(CurPtr, Size, Result);
717    PrevCh = C;
718    C = getCharAndSize(CurPtr, Size);
719  }
720
721  // If we fell out, check for a sign, due to 1e+12.  If we have one, continue.
722  if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e'))
723    return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
724
725  // If we have a hex FP constant, continue.
726  if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p'))
727    return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
728
729  // Update the location of token as well as BufferPtr.
730  const char *TokStart = BufferPtr;
731  FormTokenWithChars(Result, CurPtr, tok::numeric_constant);
732  Result.setLiteralData(TokStart);
733}
734
735/// LexStringLiteral - Lex the remainder of a string literal, after having lexed
736/// either " or L".
737void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide) {
738  const char *NulCharacter = 0; // Does this string contain the \0 character?
739
740  char C = getAndAdvanceChar(CurPtr, Result);
741  while (C != '"') {
742    // Skip escaped characters.
743    if (C == '\\') {
744      // Skip the escaped character.
745      C = getAndAdvanceChar(CurPtr, Result);
746    } else if (C == '\n' || C == '\r' ||             // Newline.
747               (C == 0 && CurPtr-1 == BufferEnd)) {  // End of file.
748      if (!isLexingRawMode() && !Features.AsmPreprocessor)
749        Diag(BufferPtr, diag::err_unterminated_string);
750      FormTokenWithChars(Result, CurPtr-1, tok::unknown);
751      return;
752    } else if (C == 0) {
753      NulCharacter = CurPtr-1;
754    }
755    C = getAndAdvanceChar(CurPtr, Result);
756  }
757
758  // If a nul character existed in the string, warn about it.
759  if (NulCharacter && !isLexingRawMode())
760    Diag(NulCharacter, diag::null_in_string);
761
762  // Update the location of the token as well as the BufferPtr instance var.
763  const char *TokStart = BufferPtr;
764  FormTokenWithChars(Result, CurPtr,
765                     Wide ? tok::wide_string_literal : tok::string_literal);
766  Result.setLiteralData(TokStart);
767}
768
769/// LexAngledStringLiteral - Lex the remainder of an angled string literal,
770/// after having lexed the '<' character.  This is used for #include filenames.
771void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
772  const char *NulCharacter = 0; // Does this string contain the \0 character?
773  const char *AfterLessPos = CurPtr;
774  char C = getAndAdvanceChar(CurPtr, Result);
775  while (C != '>') {
776    // Skip escaped characters.
777    if (C == '\\') {
778      // Skip the escaped character.
779      C = getAndAdvanceChar(CurPtr, Result);
780    } else if (C == '\n' || C == '\r' ||             // Newline.
781               (C == 0 && CurPtr-1 == BufferEnd)) {  // End of file.
782      // If the filename is unterminated, then it must just be a lone <
783      // character.  Return this as such.
784      FormTokenWithChars(Result, AfterLessPos, tok::less);
785      return;
786    } else if (C == 0) {
787      NulCharacter = CurPtr-1;
788    }
789    C = getAndAdvanceChar(CurPtr, Result);
790  }
791
792  // If a nul character existed in the string, warn about it.
793  if (NulCharacter && !isLexingRawMode())
794    Diag(NulCharacter, diag::null_in_string);
795
796  // Update the location of token as well as BufferPtr.
797  const char *TokStart = BufferPtr;
798  FormTokenWithChars(Result, CurPtr, tok::angle_string_literal);
799  Result.setLiteralData(TokStart);
800}
801
802
803/// LexCharConstant - Lex the remainder of a character constant, after having
804/// lexed either ' or L'.
805void Lexer::LexCharConstant(Token &Result, const char *CurPtr) {
806  const char *NulCharacter = 0; // Does this character contain the \0 character?
807
808  // Handle the common case of 'x' and '\y' efficiently.
809  char C = getAndAdvanceChar(CurPtr, Result);
810  if (C == '\'') {
811    if (!isLexingRawMode() && !Features.AsmPreprocessor)
812      Diag(BufferPtr, diag::err_empty_character);
813    FormTokenWithChars(Result, CurPtr, tok::unknown);
814    return;
815  } else if (C == '\\') {
816    // Skip the escaped character.
817    // FIXME: UCN's.
818    C = getAndAdvanceChar(CurPtr, Result);
819  }
820
821  if (C && C != '\n' && C != '\r' && CurPtr[0] == '\'') {
822    ++CurPtr;
823  } else {
824    // Fall back on generic code for embedded nulls, newlines, wide chars.
825    do {
826      // Skip escaped characters.
827      if (C == '\\') {
828        // Skip the escaped character.
829        C = getAndAdvanceChar(CurPtr, Result);
830      } else if (C == '\n' || C == '\r' ||               // Newline.
831                 (C == 0 && CurPtr-1 == BufferEnd)) {    // End of file.
832        if (!isLexingRawMode() && !Features.AsmPreprocessor)
833          Diag(BufferPtr, diag::err_unterminated_char);
834        FormTokenWithChars(Result, CurPtr-1, tok::unknown);
835        return;
836      } else if (C == 0) {
837        NulCharacter = CurPtr-1;
838      }
839      C = getAndAdvanceChar(CurPtr, Result);
840    } while (C != '\'');
841  }
842
843  if (NulCharacter && !isLexingRawMode())
844    Diag(NulCharacter, diag::null_in_char);
845
846  // Update the location of token as well as BufferPtr.
847  const char *TokStart = BufferPtr;
848  FormTokenWithChars(Result, CurPtr, tok::char_constant);
849  Result.setLiteralData(TokStart);
850}
851
852/// SkipWhitespace - Efficiently skip over a series of whitespace characters.
853/// Update BufferPtr to point to the next non-whitespace character and return.
854///
855/// This method forms a token and returns true if KeepWhitespaceMode is enabled.
856///
857bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr) {
858  // Whitespace - Skip it, then return the token after the whitespace.
859  unsigned char Char = *CurPtr;  // Skip consequtive spaces efficiently.
860  while (1) {
861    // Skip horizontal whitespace very aggressively.
862    while (isHorizontalWhitespace(Char))
863      Char = *++CurPtr;
864
865    // Otherwise if we have something other than whitespace, we're done.
866    if (Char != '\n' && Char != '\r')
867      break;
868
869    if (ParsingPreprocessorDirective) {
870      // End of preprocessor directive line, let LexTokenInternal handle this.
871      BufferPtr = CurPtr;
872      return false;
873    }
874
875    // ok, but handle newline.
876    // The returned token is at the start of the line.
877    Result.setFlag(Token::StartOfLine);
878    // No leading whitespace seen so far.
879    Result.clearFlag(Token::LeadingSpace);
880    Char = *++CurPtr;
881  }
882
883  // If this isn't immediately after a newline, there is leading space.
884  char PrevChar = CurPtr[-1];
885  if (PrevChar != '\n' && PrevChar != '\r')
886    Result.setFlag(Token::LeadingSpace);
887
888  // If the client wants us to return whitespace, return it now.
889  if (isKeepWhitespaceMode()) {
890    FormTokenWithChars(Result, CurPtr, tok::unknown);
891    return true;
892  }
893
894  BufferPtr = CurPtr;
895  return false;
896}
897
898// SkipBCPLComment - We have just read the // characters from input.  Skip until
899// we find the newline character thats terminate the comment.  Then update
900/// BufferPtr and return.  If we're in KeepCommentMode, this will form the token
901/// and return true.
902bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
903  // If BCPL comments aren't explicitly enabled for this language, emit an
904  // extension warning.
905  if (!Features.BCPLComment && !isLexingRawMode()) {
906    Diag(BufferPtr, diag::ext_bcpl_comment);
907
908    // Mark them enabled so we only emit one warning for this translation
909    // unit.
910    Features.BCPLComment = true;
911  }
912
913  // Scan over the body of the comment.  The common case, when scanning, is that
914  // the comment contains normal ascii characters with nothing interesting in
915  // them.  As such, optimize for this case with the inner loop.
916  char C;
917  do {
918    C = *CurPtr;
919    // FIXME: Speedup BCPL comment lexing.  Just scan for a \n or \r character.
920    // If we find a \n character, scan backwards, checking to see if it's an
921    // escaped newline, like we do for block comments.
922
923    // Skip over characters in the fast loop.
924    while (C != 0 &&                // Potentially EOF.
925           C != '\\' &&             // Potentially escaped newline.
926           C != '?' &&              // Potentially trigraph.
927           C != '\n' && C != '\r')  // Newline or DOS-style newline.
928      C = *++CurPtr;
929
930    // If this is a newline, we're done.
931    if (C == '\n' || C == '\r')
932      break;  // Found the newline? Break out!
933
934    // Otherwise, this is a hard case.  Fall back on getAndAdvanceChar to
935    // properly decode the character.  Read it in raw mode to avoid emitting
936    // diagnostics about things like trigraphs.  If we see an escaped newline,
937    // we'll handle it below.
938    const char *OldPtr = CurPtr;
939    bool OldRawMode = isLexingRawMode();
940    LexingRawMode = true;
941    C = getAndAdvanceChar(CurPtr, Result);
942    LexingRawMode = OldRawMode;
943
944    // If the char that we finally got was a \n, then we must have had something
945    // like \<newline><newline>.  We don't want to have consumed the second
946    // newline, we want CurPtr, to end up pointing to it down below.
947    if (C == '\n' || C == '\r') {
948      --CurPtr;
949      C = 'x'; // doesn't matter what this is.
950    }
951
952    // If we read multiple characters, and one of those characters was a \r or
953    // \n, then we had an escaped newline within the comment.  Emit diagnostic
954    // unless the next line is also a // comment.
955    if (CurPtr != OldPtr+1 && C != '/' && CurPtr[0] != '/') {
956      for (; OldPtr != CurPtr; ++OldPtr)
957        if (OldPtr[0] == '\n' || OldPtr[0] == '\r') {
958          // Okay, we found a // comment that ends in a newline, if the next
959          // line is also a // comment, but has spaces, don't emit a diagnostic.
960          if (isspace(C)) {
961            const char *ForwardPtr = CurPtr;
962            while (isspace(*ForwardPtr))  // Skip whitespace.
963              ++ForwardPtr;
964            if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/')
965              break;
966          }
967
968          if (!isLexingRawMode())
969            Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment);
970          break;
971        }
972    }
973
974    if (CurPtr == BufferEnd+1) { --CurPtr; break; }
975  } while (C != '\n' && C != '\r');
976
977  // Found but did not consume the newline.
978  if (PP)
979    PP->HandleComment(SourceRange(getSourceLocation(BufferPtr),
980                                  getSourceLocation(CurPtr)));
981
982  // If we are returning comments as tokens, return this comment as a token.
983  if (inKeepCommentMode())
984    return SaveBCPLComment(Result, CurPtr);
985
986  // If we are inside a preprocessor directive and we see the end of line,
987  // return immediately, so that the lexer can return this as an EOM token.
988  if (ParsingPreprocessorDirective || CurPtr == BufferEnd) {
989    BufferPtr = CurPtr;
990    return false;
991  }
992
993  // Otherwise, eat the \n character.  We don't care if this is a \n\r or
994  // \r\n sequence.  This is an efficiency hack (because we know the \n can't
995  // contribute to another token), it isn't needed for correctness.  Note that
996  // this is ok even in KeepWhitespaceMode, because we would have returned the
997  /// comment above in that mode.
998  ++CurPtr;
999
1000  // The next returned token is at the start of the line.
1001  Result.setFlag(Token::StartOfLine);
1002  // No leading whitespace seen so far.
1003  Result.clearFlag(Token::LeadingSpace);
1004  BufferPtr = CurPtr;
1005  return false;
1006}
1007
1008/// SaveBCPLComment - If in save-comment mode, package up this BCPL comment in
1009/// an appropriate way and return it.
1010bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) {
1011  // If we're not in a preprocessor directive, just return the // comment
1012  // directly.
1013  FormTokenWithChars(Result, CurPtr, tok::comment);
1014
1015  if (!ParsingPreprocessorDirective)
1016    return true;
1017
1018  // If this BCPL-style comment is in a macro definition, transmogrify it into
1019  // a C-style block comment.
1020  std::string Spelling = PP->getSpelling(Result);
1021  assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not bcpl comment?");
1022  Spelling[1] = '*';   // Change prefix to "/*".
1023  Spelling += "*/";    // add suffix.
1024
1025  Result.setKind(tok::comment);
1026  PP->CreateString(&Spelling[0], Spelling.size(), Result,
1027                   Result.getLocation());
1028  return true;
1029}
1030
1031/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
1032/// character (either \n or \r) is part of an escaped newline sequence.  Issue a
1033/// diagnostic if so.  We know that the newline is inside of a block comment.
1034static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
1035                                                  Lexer *L) {
1036  assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
1037
1038  // Back up off the newline.
1039  --CurPtr;
1040
1041  // If this is a two-character newline sequence, skip the other character.
1042  if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
1043    // \n\n or \r\r -> not escaped newline.
1044    if (CurPtr[0] == CurPtr[1])
1045      return false;
1046    // \n\r or \r\n -> skip the newline.
1047    --CurPtr;
1048  }
1049
1050  // If we have horizontal whitespace, skip over it.  We allow whitespace
1051  // between the slash and newline.
1052  bool HasSpace = false;
1053  while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
1054    --CurPtr;
1055    HasSpace = true;
1056  }
1057
1058  // If we have a slash, we know this is an escaped newline.
1059  if (*CurPtr == '\\') {
1060    if (CurPtr[-1] != '*') return false;
1061  } else {
1062    // It isn't a slash, is it the ?? / trigraph?
1063    if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' ||
1064        CurPtr[-3] != '*')
1065      return false;
1066
1067    // This is the trigraph ending the comment.  Emit a stern warning!
1068    CurPtr -= 2;
1069
1070    // If no trigraphs are enabled, warn that we ignored this trigraph and
1071    // ignore this * character.
1072    if (!L->getFeatures().Trigraphs) {
1073      if (!L->isLexingRawMode())
1074        L->Diag(CurPtr, diag::trigraph_ignored_block_comment);
1075      return false;
1076    }
1077    if (!L->isLexingRawMode())
1078      L->Diag(CurPtr, diag::trigraph_ends_block_comment);
1079  }
1080
1081  // Warn about having an escaped newline between the */ characters.
1082  if (!L->isLexingRawMode())
1083    L->Diag(CurPtr, diag::escaped_newline_block_comment_end);
1084
1085  // If there was space between the backslash and newline, warn about it.
1086  if (HasSpace && !L->isLexingRawMode())
1087    L->Diag(CurPtr, diag::backslash_newline_space);
1088
1089  return true;
1090}
1091
1092#ifdef __SSE2__
1093#include <emmintrin.h>
1094#elif __ALTIVEC__
1095#include <altivec.h>
1096#undef bool
1097#endif
1098
1099/// SkipBlockComment - We have just read the /* characters from input.  Read
1100/// until we find the */ characters that terminate the comment.  Note that we
1101/// don't bother decoding trigraphs or escaped newlines in block comments,
1102/// because they cannot cause the comment to end.  The only thing that can
1103/// happen is the comment could end with an escaped newline between the */ end
1104/// of comment.
1105///
1106/// If KeepCommentMode is enabled, this forms a token from the comment and
1107/// returns true.
1108bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) {
1109  // Scan one character past where we should, looking for a '/' character.  Once
1110  // we find it, check to see if it was preceeded by a *.  This common
1111  // optimization helps people who like to put a lot of * characters in their
1112  // comments.
1113
1114  // The first character we get with newlines and trigraphs skipped to handle
1115  // the degenerate /*/ case below correctly if the * has an escaped newline
1116  // after it.
1117  unsigned CharSize;
1118  unsigned char C = getCharAndSize(CurPtr, CharSize);
1119  CurPtr += CharSize;
1120  if (C == 0 && CurPtr == BufferEnd+1) {
1121    if (!isLexingRawMode())
1122      Diag(BufferPtr, diag::err_unterminated_block_comment);
1123    --CurPtr;
1124
1125    // KeepWhitespaceMode should return this broken comment as a token.  Since
1126    // it isn't a well formed comment, just return it as an 'unknown' token.
1127    if (isKeepWhitespaceMode()) {
1128      FormTokenWithChars(Result, CurPtr, tok::unknown);
1129      return true;
1130    }
1131
1132    BufferPtr = CurPtr;
1133    return false;
1134  }
1135
1136  // Check to see if the first character after the '/*' is another /.  If so,
1137  // then this slash does not end the block comment, it is part of it.
1138  if (C == '/')
1139    C = *CurPtr++;
1140
1141  while (1) {
1142    // Skip over all non-interesting characters until we find end of buffer or a
1143    // (probably ending) '/' character.
1144    if (CurPtr + 24 < BufferEnd) {
1145      // While not aligned to a 16-byte boundary.
1146      while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0)
1147        C = *CurPtr++;
1148
1149      if (C == '/') goto FoundSlash;
1150
1151#ifdef __SSE2__
1152      __m128i Slashes = _mm_set_epi8('/', '/', '/', '/', '/', '/', '/', '/',
1153                                     '/', '/', '/', '/', '/', '/', '/', '/');
1154      while (CurPtr+16 <= BufferEnd &&
1155             _mm_movemask_epi8(_mm_cmpeq_epi8(*(__m128i*)CurPtr, Slashes)) == 0)
1156        CurPtr += 16;
1157#elif __ALTIVEC__
1158      __vector unsigned char Slashes = {
1159        '/', '/', '/', '/',  '/', '/', '/', '/',
1160        '/', '/', '/', '/',  '/', '/', '/', '/'
1161      };
1162      while (CurPtr+16 <= BufferEnd &&
1163             !vec_any_eq(*(vector unsigned char*)CurPtr, Slashes))
1164        CurPtr += 16;
1165#else
1166      // Scan for '/' quickly.  Many block comments are very large.
1167      while (CurPtr[0] != '/' &&
1168             CurPtr[1] != '/' &&
1169             CurPtr[2] != '/' &&
1170             CurPtr[3] != '/' &&
1171             CurPtr+4 < BufferEnd) {
1172        CurPtr += 4;
1173      }
1174#endif
1175
1176      // It has to be one of the bytes scanned, increment to it and read one.
1177      C = *CurPtr++;
1178    }
1179
1180    // Loop to scan the remainder.
1181    while (C != '/' && C != '\0')
1182      C = *CurPtr++;
1183
1184  FoundSlash:
1185    if (C == '/') {
1186      if (CurPtr[-2] == '*')  // We found the final */.  We're done!
1187        break;
1188
1189      if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
1190        if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) {
1191          // We found the final */, though it had an escaped newline between the
1192          // * and /.  We're done!
1193          break;
1194        }
1195      }
1196      if (CurPtr[0] == '*' && CurPtr[1] != '/') {
1197        // If this is a /* inside of the comment, emit a warning.  Don't do this
1198        // if this is a /*/, which will end the comment.  This misses cases with
1199        // embedded escaped newlines, but oh well.
1200        if (!isLexingRawMode())
1201          Diag(CurPtr-1, diag::warn_nested_block_comment);
1202      }
1203    } else if (C == 0 && CurPtr == BufferEnd+1) {
1204      if (!isLexingRawMode())
1205        Diag(BufferPtr, diag::err_unterminated_block_comment);
1206      // Note: the user probably forgot a */.  We could continue immediately
1207      // after the /*, but this would involve lexing a lot of what really is the
1208      // comment, which surely would confuse the parser.
1209      --CurPtr;
1210
1211      // KeepWhitespaceMode should return this broken comment as a token.  Since
1212      // it isn't a well formed comment, just return it as an 'unknown' token.
1213      if (isKeepWhitespaceMode()) {
1214        FormTokenWithChars(Result, CurPtr, tok::unknown);
1215        return true;
1216      }
1217
1218      BufferPtr = CurPtr;
1219      return false;
1220    }
1221    C = *CurPtr++;
1222  }
1223
1224  if (PP)
1225    PP->HandleComment(SourceRange(getSourceLocation(BufferPtr),
1226                                  getSourceLocation(CurPtr)));
1227
1228  // If we are returning comments as tokens, return this comment as a token.
1229  if (inKeepCommentMode()) {
1230    FormTokenWithChars(Result, CurPtr, tok::comment);
1231    return true;
1232  }
1233
1234  // It is common for the tokens immediately after a /**/ comment to be
1235  // whitespace.  Instead of going through the big switch, handle it
1236  // efficiently now.  This is safe even in KeepWhitespaceMode because we would
1237  // have already returned above with the comment as a token.
1238  if (isHorizontalWhitespace(*CurPtr)) {
1239    Result.setFlag(Token::LeadingSpace);
1240    SkipWhitespace(Result, CurPtr+1);
1241    return false;
1242  }
1243
1244  // Otherwise, just return so that the next character will be lexed as a token.
1245  BufferPtr = CurPtr;
1246  Result.setFlag(Token::LeadingSpace);
1247  return false;
1248}
1249
1250//===----------------------------------------------------------------------===//
1251// Primary Lexing Entry Points
1252//===----------------------------------------------------------------------===//
1253
1254/// ReadToEndOfLine - Read the rest of the current preprocessor line as an
1255/// uninterpreted string.  This switches the lexer out of directive mode.
1256std::string Lexer::ReadToEndOfLine() {
1257  assert(ParsingPreprocessorDirective && ParsingFilename == false &&
1258         "Must be in a preprocessing directive!");
1259  std::string Result;
1260  Token Tmp;
1261
1262  // CurPtr - Cache BufferPtr in an automatic variable.
1263  const char *CurPtr = BufferPtr;
1264  while (1) {
1265    char Char = getAndAdvanceChar(CurPtr, Tmp);
1266    switch (Char) {
1267    default:
1268      Result += Char;
1269      break;
1270    case 0:  // Null.
1271      // Found end of file?
1272      if (CurPtr-1 != BufferEnd) {
1273        // Nope, normal character, continue.
1274        Result += Char;
1275        break;
1276      }
1277      // FALL THROUGH.
1278    case '\r':
1279    case '\n':
1280      // Okay, we found the end of the line. First, back up past the \0, \r, \n.
1281      assert(CurPtr[-1] == Char && "Trigraphs for newline?");
1282      BufferPtr = CurPtr-1;
1283
1284      // Next, lex the character, which should handle the EOM transition.
1285      Lex(Tmp);
1286      assert(Tmp.is(tok::eom) && "Unexpected token!");
1287
1288      // Finally, we're done, return the string we found.
1289      return Result;
1290    }
1291  }
1292}
1293
1294/// LexEndOfFile - CurPtr points to the end of this file.  Handle this
1295/// condition, reporting diagnostics and handling other edge cases as required.
1296/// This returns true if Result contains a token, false if PP.Lex should be
1297/// called again.
1298bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
1299  // If we hit the end of the file while parsing a preprocessor directive,
1300  // end the preprocessor directive first.  The next token returned will
1301  // then be the end of file.
1302  if (ParsingPreprocessorDirective) {
1303    // Done parsing the "line".
1304    ParsingPreprocessorDirective = false;
1305    // Update the location of token as well as BufferPtr.
1306    FormTokenWithChars(Result, CurPtr, tok::eom);
1307
1308    // Restore comment saving mode, in case it was disabled for directive.
1309    SetCommentRetentionState(PP->getCommentRetentionState());
1310    return true;  // Have a token.
1311  }
1312
1313  // If we are in raw mode, return this event as an EOF token.  Let the caller
1314  // that put us in raw mode handle the event.
1315  if (isLexingRawMode()) {
1316    Result.startToken();
1317    BufferPtr = BufferEnd;
1318    FormTokenWithChars(Result, BufferEnd, tok::eof);
1319    return true;
1320  }
1321
1322  // Otherwise, check if we are code-completing, then issue diagnostics for
1323  // unterminated #if and missing newline.
1324
1325  if (IsEofCodeCompletion) {
1326    // We're at the end of the file, but we've been asked to conside the
1327    // end of the file to be a code-completion token. Return the
1328    // code-completion token.
1329    Result.startToken();
1330    FormTokenWithChars(Result, CurPtr, tok::code_completion);
1331
1332    // Only do the eof -> code_completion translation once.
1333    IsEofCodeCompletion = false;
1334    return true;
1335  }
1336
1337  // If we are in a #if directive, emit an error.
1338  while (!ConditionalStack.empty()) {
1339    PP->Diag(ConditionalStack.back().IfLoc,
1340             diag::err_pp_unterminated_conditional);
1341    ConditionalStack.pop_back();
1342  }
1343
1344  // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue
1345  // a pedwarn.
1346  if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r'))
1347    Diag(BufferEnd, diag::ext_no_newline_eof)
1348      << CodeModificationHint::CreateInsertion(getSourceLocation(BufferEnd),
1349                                               "\n");
1350
1351  BufferPtr = CurPtr;
1352
1353  // Finally, let the preprocessor handle this.
1354  return PP->HandleEndOfFile(Result);
1355}
1356
1357/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from
1358/// the specified lexer will return a tok::l_paren token, 0 if it is something
1359/// else and 2 if there are no more tokens in the buffer controlled by the
1360/// lexer.
1361unsigned Lexer::isNextPPTokenLParen() {
1362  assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?");
1363
1364  // Switch to 'skipping' mode.  This will ensure that we can lex a token
1365  // without emitting diagnostics, disables macro expansion, and will cause EOF
1366  // to return an EOF token instead of popping the include stack.
1367  LexingRawMode = true;
1368
1369  // Save state that can be changed while lexing so that we can restore it.
1370  const char *TmpBufferPtr = BufferPtr;
1371  bool inPPDirectiveMode = ParsingPreprocessorDirective;
1372
1373  Token Tok;
1374  Tok.startToken();
1375  LexTokenInternal(Tok);
1376
1377  // Restore state that may have changed.
1378  BufferPtr = TmpBufferPtr;
1379  ParsingPreprocessorDirective = inPPDirectiveMode;
1380
1381  // Restore the lexer back to non-skipping mode.
1382  LexingRawMode = false;
1383
1384  if (Tok.is(tok::eof))
1385    return 2;
1386  return Tok.is(tok::l_paren);
1387}
1388
1389
1390/// LexTokenInternal - This implements a simple C family lexer.  It is an
1391/// extremely performance critical piece of code.  This assumes that the buffer
1392/// has a null character at the end of the file.  This returns a preprocessing
1393/// token, not a normal token, as such, it is an internal interface.  It assumes
1394/// that the Flags of result have been cleared before calling this.
1395void Lexer::LexTokenInternal(Token &Result) {
1396LexNextToken:
1397  // New token, can't need cleaning yet.
1398  Result.clearFlag(Token::NeedsCleaning);
1399  Result.setIdentifierInfo(0);
1400
1401  // CurPtr - Cache BufferPtr in an automatic variable.
1402  const char *CurPtr = BufferPtr;
1403
1404  // Small amounts of horizontal whitespace is very common between tokens.
1405  if ((*CurPtr == ' ') || (*CurPtr == '\t')) {
1406    ++CurPtr;
1407    while ((*CurPtr == ' ') || (*CurPtr == '\t'))
1408      ++CurPtr;
1409
1410    // If we are keeping whitespace and other tokens, just return what we just
1411    // skipped.  The next lexer invocation will return the token after the
1412    // whitespace.
1413    if (isKeepWhitespaceMode()) {
1414      FormTokenWithChars(Result, CurPtr, tok::unknown);
1415      return;
1416    }
1417
1418    BufferPtr = CurPtr;
1419    Result.setFlag(Token::LeadingSpace);
1420  }
1421
1422  unsigned SizeTmp, SizeTmp2;   // Temporaries for use in cases below.
1423
1424  // Read a character, advancing over it.
1425  char Char = getAndAdvanceChar(CurPtr, Result);
1426  tok::TokenKind Kind;
1427
1428  switch (Char) {
1429  case 0:  // Null.
1430    // Found end of file?
1431    if (CurPtr-1 == BufferEnd) {
1432      // Read the PP instance variable into an automatic variable, because
1433      // LexEndOfFile will often delete 'this'.
1434      Preprocessor *PPCache = PP;
1435      if (LexEndOfFile(Result, CurPtr-1))  // Retreat back into the file.
1436        return;   // Got a token to return.
1437      assert(PPCache && "Raw buffer::LexEndOfFile should return a token");
1438      return PPCache->Lex(Result);
1439    }
1440
1441    if (!isLexingRawMode())
1442      Diag(CurPtr-1, diag::null_in_file);
1443    Result.setFlag(Token::LeadingSpace);
1444    if (SkipWhitespace(Result, CurPtr))
1445      return; // KeepWhitespaceMode
1446
1447    goto LexNextToken;   // GCC isn't tail call eliminating.
1448  case '\n':
1449  case '\r':
1450    // If we are inside a preprocessor directive and we see the end of line,
1451    // we know we are done with the directive, so return an EOM token.
1452    if (ParsingPreprocessorDirective) {
1453      // Done parsing the "line".
1454      ParsingPreprocessorDirective = false;
1455
1456      // Restore comment saving mode, in case it was disabled for directive.
1457      SetCommentRetentionState(PP->getCommentRetentionState());
1458
1459      // Since we consumed a newline, we are back at the start of a line.
1460      IsAtStartOfLine = true;
1461
1462      Kind = tok::eom;
1463      break;
1464    }
1465    // The returned token is at the start of the line.
1466    Result.setFlag(Token::StartOfLine);
1467    // No leading whitespace seen so far.
1468    Result.clearFlag(Token::LeadingSpace);
1469
1470    if (SkipWhitespace(Result, CurPtr))
1471      return; // KeepWhitespaceMode
1472    goto LexNextToken;   // GCC isn't tail call eliminating.
1473  case ' ':
1474  case '\t':
1475  case '\f':
1476  case '\v':
1477  SkipHorizontalWhitespace:
1478    Result.setFlag(Token::LeadingSpace);
1479    if (SkipWhitespace(Result, CurPtr))
1480      return; // KeepWhitespaceMode
1481
1482  SkipIgnoredUnits:
1483    CurPtr = BufferPtr;
1484
1485    // If the next token is obviously a // or /* */ comment, skip it efficiently
1486    // too (without going through the big switch stmt).
1487    if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
1488        Features.BCPLComment) {
1489      SkipBCPLComment(Result, CurPtr+2);
1490      goto SkipIgnoredUnits;
1491    } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) {
1492      SkipBlockComment(Result, CurPtr+2);
1493      goto SkipIgnoredUnits;
1494    } else if (isHorizontalWhitespace(*CurPtr)) {
1495      goto SkipHorizontalWhitespace;
1496    }
1497    goto LexNextToken;   // GCC isn't tail call eliminating.
1498
1499  // C99 6.4.4.1: Integer Constants.
1500  // C99 6.4.4.2: Floating Constants.
1501  case '0': case '1': case '2': case '3': case '4':
1502  case '5': case '6': case '7': case '8': case '9':
1503    // Notify MIOpt that we read a non-whitespace/non-comment token.
1504    MIOpt.ReadToken();
1505    return LexNumericConstant(Result, CurPtr);
1506
1507  case 'L':   // Identifier (Loony) or wide literal (L'x' or L"xyz").
1508    // Notify MIOpt that we read a non-whitespace/non-comment token.
1509    MIOpt.ReadToken();
1510    Char = getCharAndSize(CurPtr, SizeTmp);
1511
1512    // Wide string literal.
1513    if (Char == '"')
1514      return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
1515                              true);
1516
1517    // Wide character constant.
1518    if (Char == '\'')
1519      return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
1520    // FALL THROUGH, treating L like the start of an identifier.
1521
1522  // C99 6.4.2: Identifiers.
1523  case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
1524  case 'H': case 'I': case 'J': case 'K':    /*'L'*/case 'M': case 'N':
1525  case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U':
1526  case 'V': case 'W': case 'X': case 'Y': case 'Z':
1527  case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g':
1528  case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n':
1529  case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u':
1530  case 'v': case 'w': case 'x': case 'y': case 'z':
1531  case '_':
1532    // Notify MIOpt that we read a non-whitespace/non-comment token.
1533    MIOpt.ReadToken();
1534    return LexIdentifier(Result, CurPtr);
1535
1536  case '$':   // $ in identifiers.
1537    if (Features.DollarIdents) {
1538      if (!isLexingRawMode())
1539        Diag(CurPtr-1, diag::ext_dollar_in_identifier);
1540      // Notify MIOpt that we read a non-whitespace/non-comment token.
1541      MIOpt.ReadToken();
1542      return LexIdentifier(Result, CurPtr);
1543    }
1544
1545    Kind = tok::unknown;
1546    break;
1547
1548  // C99 6.4.4: Character Constants.
1549  case '\'':
1550    // Notify MIOpt that we read a non-whitespace/non-comment token.
1551    MIOpt.ReadToken();
1552    return LexCharConstant(Result, CurPtr);
1553
1554  // C99 6.4.5: String Literals.
1555  case '"':
1556    // Notify MIOpt that we read a non-whitespace/non-comment token.
1557    MIOpt.ReadToken();
1558    return LexStringLiteral(Result, CurPtr, false);
1559
1560  // C99 6.4.6: Punctuators.
1561  case '?':
1562    Kind = tok::question;
1563    break;
1564  case '[':
1565    Kind = tok::l_square;
1566    break;
1567  case ']':
1568    Kind = tok::r_square;
1569    break;
1570  case '(':
1571    Kind = tok::l_paren;
1572    break;
1573  case ')':
1574    Kind = tok::r_paren;
1575    break;
1576  case '{':
1577    Kind = tok::l_brace;
1578    break;
1579  case '}':
1580    Kind = tok::r_brace;
1581    break;
1582  case '.':
1583    Char = getCharAndSize(CurPtr, SizeTmp);
1584    if (Char >= '0' && Char <= '9') {
1585      // Notify MIOpt that we read a non-whitespace/non-comment token.
1586      MIOpt.ReadToken();
1587
1588      return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
1589    } else if (Features.CPlusPlus && Char == '*') {
1590      Kind = tok::periodstar;
1591      CurPtr += SizeTmp;
1592    } else if (Char == '.' &&
1593               getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') {
1594      Kind = tok::ellipsis;
1595      CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1596                           SizeTmp2, Result);
1597    } else {
1598      Kind = tok::period;
1599    }
1600    break;
1601  case '&':
1602    Char = getCharAndSize(CurPtr, SizeTmp);
1603    if (Char == '&') {
1604      Kind = tok::ampamp;
1605      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1606    } else if (Char == '=') {
1607      Kind = tok::ampequal;
1608      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1609    } else {
1610      Kind = tok::amp;
1611    }
1612    break;
1613  case '*':
1614    if (getCharAndSize(CurPtr, SizeTmp) == '=') {
1615      Kind = tok::starequal;
1616      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1617    } else {
1618      Kind = tok::star;
1619    }
1620    break;
1621  case '+':
1622    Char = getCharAndSize(CurPtr, SizeTmp);
1623    if (Char == '+') {
1624      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1625      Kind = tok::plusplus;
1626    } else if (Char == '=') {
1627      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1628      Kind = tok::plusequal;
1629    } else {
1630      Kind = tok::plus;
1631    }
1632    break;
1633  case '-':
1634    Char = getCharAndSize(CurPtr, SizeTmp);
1635    if (Char == '-') {      // --
1636      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1637      Kind = tok::minusminus;
1638    } else if (Char == '>' && Features.CPlusPlus &&
1639               getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') {  // C++ ->*
1640      CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1641                           SizeTmp2, Result);
1642      Kind = tok::arrowstar;
1643    } else if (Char == '>') {   // ->
1644      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1645      Kind = tok::arrow;
1646    } else if (Char == '=') {   // -=
1647      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1648      Kind = tok::minusequal;
1649    } else {
1650      Kind = tok::minus;
1651    }
1652    break;
1653  case '~':
1654    Kind = tok::tilde;
1655    break;
1656  case '!':
1657    if (getCharAndSize(CurPtr, SizeTmp) == '=') {
1658      Kind = tok::exclaimequal;
1659      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1660    } else {
1661      Kind = tok::exclaim;
1662    }
1663    break;
1664  case '/':
1665    // 6.4.9: Comments
1666    Char = getCharAndSize(CurPtr, SizeTmp);
1667    if (Char == '/') {         // BCPL comment.
1668      // Even if BCPL comments are disabled (e.g. in C89 mode), we generally
1669      // want to lex this as a comment.  There is one problem with this though,
1670      // that in one particular corner case, this can change the behavior of the
1671      // resultant program.  For example, In  "foo //**/ bar", C89 would lex
1672      // this as "foo / bar" and langauges with BCPL comments would lex it as
1673      // "foo".  Check to see if the character after the second slash is a '*'.
1674      // If so, we will lex that as a "/" instead of the start of a comment.
1675      if (Features.BCPLComment ||
1676          getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*') {
1677        if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
1678          return; // KeepCommentMode
1679
1680        // It is common for the tokens immediately after a // comment to be
1681        // whitespace (indentation for the next line).  Instead of going through
1682        // the big switch, handle it efficiently now.
1683        goto SkipIgnoredUnits;
1684      }
1685    }
1686
1687    if (Char == '*') {  // /**/ comment.
1688      if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
1689        return; // KeepCommentMode
1690      goto LexNextToken;   // GCC isn't tail call eliminating.
1691    }
1692
1693    if (Char == '=') {
1694      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1695      Kind = tok::slashequal;
1696    } else {
1697      Kind = tok::slash;
1698    }
1699    break;
1700  case '%':
1701    Char = getCharAndSize(CurPtr, SizeTmp);
1702    if (Char == '=') {
1703      Kind = tok::percentequal;
1704      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1705    } else if (Features.Digraphs && Char == '>') {
1706      Kind = tok::r_brace;                             // '%>' -> '}'
1707      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1708    } else if (Features.Digraphs && Char == ':') {
1709      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1710      Char = getCharAndSize(CurPtr, SizeTmp);
1711      if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') {
1712        Kind = tok::hashhash;                          // '%:%:' -> '##'
1713        CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1714                             SizeTmp2, Result);
1715      } else if (Char == '@' && Features.Microsoft) {  // %:@ -> #@ -> Charize
1716        CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1717        if (!isLexingRawMode())
1718          Diag(BufferPtr, diag::charize_microsoft_ext);
1719        Kind = tok::hashat;
1720      } else {                                         // '%:' -> '#'
1721        // We parsed a # character.  If this occurs at the start of the line,
1722        // it's actually the start of a preprocessing directive.  Callback to
1723        // the preprocessor to handle it.
1724        // FIXME: -fpreprocessed mode??
1725        if (Result.isAtStartOfLine() && !LexingRawMode && !Is_PragmaLexer) {
1726          FormTokenWithChars(Result, CurPtr, tok::hash);
1727          PP->HandleDirective(Result);
1728
1729          // As an optimization, if the preprocessor didn't switch lexers, tail
1730          // recurse.
1731          if (PP->isCurrentLexer(this)) {
1732            // Start a new token. If this is a #include or something, the PP may
1733            // want us starting at the beginning of the line again.  If so, set
1734            // the StartOfLine flag.
1735            if (IsAtStartOfLine) {
1736              Result.setFlag(Token::StartOfLine);
1737              IsAtStartOfLine = false;
1738            }
1739            goto LexNextToken;   // GCC isn't tail call eliminating.
1740          }
1741
1742          return PP->Lex(Result);
1743        }
1744
1745        Kind = tok::hash;
1746      }
1747    } else {
1748      Kind = tok::percent;
1749    }
1750    break;
1751  case '<':
1752    Char = getCharAndSize(CurPtr, SizeTmp);
1753    if (ParsingFilename) {
1754      return LexAngledStringLiteral(Result, CurPtr);
1755    } else if (Char == '<' &&
1756               getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') {
1757      Kind = tok::lesslessequal;
1758      CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1759                           SizeTmp2, Result);
1760    } else if (Char == '<') {
1761      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1762      Kind = tok::lessless;
1763    } else if (Char == '=') {
1764      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1765      Kind = tok::lessequal;
1766    } else if (Features.Digraphs && Char == ':') {     // '<:' -> '['
1767      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1768      Kind = tok::l_square;
1769    } else if (Features.Digraphs && Char == '%') {     // '<%' -> '{'
1770      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1771      Kind = tok::l_brace;
1772    } else {
1773      Kind = tok::less;
1774    }
1775    break;
1776  case '>':
1777    Char = getCharAndSize(CurPtr, SizeTmp);
1778    if (Char == '=') {
1779      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1780      Kind = tok::greaterequal;
1781    } else if (Char == '>' &&
1782               getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') {
1783      CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1784                           SizeTmp2, Result);
1785      Kind = tok::greatergreaterequal;
1786    } else if (Char == '>') {
1787      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1788      Kind = tok::greatergreater;
1789    } else {
1790      Kind = tok::greater;
1791    }
1792    break;
1793  case '^':
1794    Char = getCharAndSize(CurPtr, SizeTmp);
1795    if (Char == '=') {
1796      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1797      Kind = tok::caretequal;
1798    } else {
1799      Kind = tok::caret;
1800    }
1801    break;
1802  case '|':
1803    Char = getCharAndSize(CurPtr, SizeTmp);
1804    if (Char == '=') {
1805      Kind = tok::pipeequal;
1806      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1807    } else if (Char == '|') {
1808      Kind = tok::pipepipe;
1809      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1810    } else {
1811      Kind = tok::pipe;
1812    }
1813    break;
1814  case ':':
1815    Char = getCharAndSize(CurPtr, SizeTmp);
1816    if (Features.Digraphs && Char == '>') {
1817      Kind = tok::r_square; // ':>' -> ']'
1818      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1819    } else if (Features.CPlusPlus && Char == ':') {
1820      Kind = tok::coloncolon;
1821      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1822    } else {
1823      Kind = tok::colon;
1824    }
1825    break;
1826  case ';':
1827    Kind = tok::semi;
1828    break;
1829  case '=':
1830    Char = getCharAndSize(CurPtr, SizeTmp);
1831    if (Char == '=') {
1832      Kind = tok::equalequal;
1833      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1834    } else {
1835      Kind = tok::equal;
1836    }
1837    break;
1838  case ',':
1839    Kind = tok::comma;
1840    break;
1841  case '#':
1842    Char = getCharAndSize(CurPtr, SizeTmp);
1843    if (Char == '#') {
1844      Kind = tok::hashhash;
1845      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1846    } else if (Char == '@' && Features.Microsoft) {  // #@ -> Charize
1847      Kind = tok::hashat;
1848      if (!isLexingRawMode())
1849        Diag(BufferPtr, diag::charize_microsoft_ext);
1850      CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1851    } else {
1852      // We parsed a # character.  If this occurs at the start of the line,
1853      // it's actually the start of a preprocessing directive.  Callback to
1854      // the preprocessor to handle it.
1855      // FIXME: -fpreprocessed mode??
1856      if (Result.isAtStartOfLine() && !LexingRawMode && !Is_PragmaLexer) {
1857        FormTokenWithChars(Result, CurPtr, tok::hash);
1858        PP->HandleDirective(Result);
1859
1860        // As an optimization, if the preprocessor didn't switch lexers, tail
1861        // recurse.
1862        if (PP->isCurrentLexer(this)) {
1863          // Start a new token.  If this is a #include or something, the PP may
1864          // want us starting at the beginning of the line again.  If so, set
1865          // the StartOfLine flag.
1866          if (IsAtStartOfLine) {
1867            Result.setFlag(Token::StartOfLine);
1868            IsAtStartOfLine = false;
1869          }
1870          goto LexNextToken;   // GCC isn't tail call eliminating.
1871        }
1872        return PP->Lex(Result);
1873      }
1874
1875      Kind = tok::hash;
1876    }
1877    break;
1878
1879  case '@':
1880    // Objective C support.
1881    if (CurPtr[-1] == '@' && Features.ObjC1)
1882      Kind = tok::at;
1883    else
1884      Kind = tok::unknown;
1885    break;
1886
1887  case '\\':
1888    // FIXME: UCN's.
1889    // FALL THROUGH.
1890  default:
1891    Kind = tok::unknown;
1892    break;
1893  }
1894
1895  // Notify MIOpt that we read a non-whitespace/non-comment token.
1896  MIOpt.ReadToken();
1897
1898  // Update the location of token as well as BufferPtr.
1899  FormTokenWithChars(Result, CurPtr, Kind);
1900}
1901