Lexer.cpp revision 0af574270d3be2b0e73a3379dfaa633746f8fc6f
1//===--- Lexer.cpp - C Language Family Lexer ------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the Lexer and Token interfaces. 11// 12//===----------------------------------------------------------------------===// 13// 14// TODO: GCC Diagnostics emitted by the lexer: 15// PEDWARN: (form feed|vertical tab) in preprocessing directive 16// 17// Universal characters, unicode, char mapping: 18// WARNING: `%.*s' is not in NFKC 19// WARNING: `%.*s' is not in NFC 20// 21// Other: 22// TODO: Options to support: 23// -fexec-charset,-fwide-exec-charset 24// 25//===----------------------------------------------------------------------===// 26 27#include "clang/Lex/Lexer.h" 28#include "clang/Lex/Preprocessor.h" 29#include "clang/Basic/Diagnostic.h" 30#include "clang/Basic/SourceManager.h" 31#include "llvm/Support/Compiler.h" 32#include "llvm/Support/MemoryBuffer.h" 33#include <cctype> 34using namespace clang; 35 36static void InitCharacterInfo(); 37 38//===----------------------------------------------------------------------===// 39// Token Class Implementation 40//===----------------------------------------------------------------------===// 41 42/// isObjCAtKeyword - Return true if we have an ObjC keyword identifier. 43bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const { 44 return is(tok::identifier) && 45 getIdentifierInfo()->getObjCKeywordID() == objcKey; 46} 47 48/// getObjCKeywordID - Return the ObjC keyword kind. 49tok::ObjCKeywordKind Token::getObjCKeywordID() const { 50 IdentifierInfo *specId = getIdentifierInfo(); 51 return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword; 52} 53 54 55//===----------------------------------------------------------------------===// 56// Lexer Class Implementation 57//===----------------------------------------------------------------------===// 58 59 60/// Lexer constructor - Create a new lexer object for the specified buffer 61/// with the specified preprocessor managing the lexing process. This lexer 62/// assumes that the associated file buffer and Preprocessor objects will 63/// outlive it, so it doesn't take ownership of either of them. 64Lexer::Lexer(SourceLocation fileloc, Preprocessor &pp, 65 const char *BufStart, const char *BufEnd) 66 : FileLoc(fileloc), PP(&pp), Features(pp.getLangOptions()) { 67 68 SourceManager &SourceMgr = PP->getSourceManager(); 69 unsigned InputFileID = SourceMgr.getPhysicalLoc(FileLoc).getFileID(); 70 const llvm::MemoryBuffer *InputFile = SourceMgr.getBuffer(InputFileID); 71 72 Is_PragmaLexer = false; 73 InitCharacterInfo(); 74 75 // BufferStart must always be InputFile->getBufferStart(). 76 BufferStart = InputFile->getBufferStart(); 77 78 // BufferPtr and BufferEnd can start out somewhere inside the current buffer. 79 // If unspecified, they starts at the start/end of the buffer. 80 BufferPtr = BufStart ? BufStart : BufferStart; 81 BufferEnd = BufEnd ? BufEnd : InputFile->getBufferEnd(); 82 83 assert(BufferEnd[0] == 0 && 84 "We assume that the input buffer has a null character at the end" 85 " to simplify lexing!"); 86 87 // Start of the file is a start of line. 88 IsAtStartOfLine = true; 89 90 // We are not after parsing a #. 91 ParsingPreprocessorDirective = false; 92 93 // We are not after parsing #include. 94 ParsingFilename = false; 95 96 // We are not in raw mode. Raw mode disables diagnostics and interpretation 97 // of tokens (e.g. identifiers, thus disabling macro expansion). It is used 98 // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block 99 // or otherwise skipping over tokens. 100 LexingRawMode = false; 101 102 // Default to keeping comments if requested. 103 KeepCommentMode = PP->getCommentRetentionState(); 104} 105 106/// Lexer constructor - Create a new raw lexer object. This object is only 107/// suitable for calls to 'LexRawToken'. This lexer assumes that the text 108/// range will outlive it, so it doesn't take ownership of it. 109Lexer::Lexer(SourceLocation fileloc, const LangOptions &features, 110 const char *BufStart, const char *BufEnd, 111 const llvm::MemoryBuffer *FromFile) 112 : FileLoc(fileloc), PP(0), Features(features) { 113 Is_PragmaLexer = false; 114 InitCharacterInfo(); 115 116 // If a MemoryBuffer was specified, use its start as BufferStart. This affects 117 // the source location objects produced by this lexer. 118 BufferStart = FromFile ? FromFile->getBufferStart() : BufStart; 119 BufferPtr = BufStart; 120 BufferEnd = BufEnd; 121 122 assert(BufferEnd[0] == 0 && 123 "We assume that the input buffer has a null character at the end" 124 " to simplify lexing!"); 125 126 // Start of the file is a start of line. 127 IsAtStartOfLine = true; 128 129 // We are not after parsing a #. 130 ParsingPreprocessorDirective = false; 131 132 // We are not after parsing #include. 133 ParsingFilename = false; 134 135 // We *are* in raw mode. 136 LexingRawMode = true; 137 138 // Never keep comments in raw mode. 139 KeepCommentMode = false; 140} 141 142 143/// Stringify - Convert the specified string into a C string, with surrounding 144/// ""'s, and with escaped \ and " characters. 145std::string Lexer::Stringify(const std::string &Str, bool Charify) { 146 std::string Result = Str; 147 char Quote = Charify ? '\'' : '"'; 148 for (unsigned i = 0, e = Result.size(); i != e; ++i) { 149 if (Result[i] == '\\' || Result[i] == Quote) { 150 Result.insert(Result.begin()+i, '\\'); 151 ++i; ++e; 152 } 153 } 154 return Result; 155} 156 157/// Stringify - Convert the specified string into a C string by escaping '\' 158/// and " characters. This does not add surrounding ""'s to the string. 159void Lexer::Stringify(llvm::SmallVectorImpl<char> &Str) { 160 for (unsigned i = 0, e = Str.size(); i != e; ++i) { 161 if (Str[i] == '\\' || Str[i] == '"') { 162 Str.insert(Str.begin()+i, '\\'); 163 ++i; ++e; 164 } 165 } 166} 167 168 169/// MeasureTokenLength - Relex the token at the specified location and return 170/// its length in bytes in the input file. If the token needs cleaning (e.g. 171/// includes a trigraph or an escaped newline) then this count includes bytes 172/// that are part of that. 173unsigned Lexer::MeasureTokenLength(SourceLocation Loc, 174 const SourceManager &SM) { 175 // If this comes from a macro expansion, we really do want the macro name, not 176 // the token this macro expanded to. 177 Loc = SM.getLogicalLoc(Loc); 178 179 const char *StrData = SM.getCharacterData(Loc); 180 181 // TODO: this could be special cased for common tokens like identifiers, ')', 182 // etc to make this faster, if it mattered. Just look at StrData[0] to handle 183 // all obviously single-char tokens. This could use 184 // Lexer::isObviouslySimpleCharacter for example to handle identifiers or 185 // something. 186 187 188 const char *BufEnd = SM.getBufferData(Loc.getFileID()).second; 189 190 // Create a langops struct and enable trigraphs. This is sufficient for 191 // measuring tokens. 192 LangOptions LangOpts; 193 LangOpts.Trigraphs = true; 194 195 // Create a lexer starting at the beginning of this token. 196 Lexer TheLexer(Loc, LangOpts, StrData, BufEnd); 197 Token TheTok; 198 TheLexer.LexFromRawLexer(TheTok); 199 return TheTok.getLength(); 200} 201 202//===----------------------------------------------------------------------===// 203// Character information. 204//===----------------------------------------------------------------------===// 205 206static unsigned char CharInfo[256]; 207 208enum { 209 CHAR_HORZ_WS = 0x01, // ' ', '\t', '\f', '\v'. Note, no '\0' 210 CHAR_VERT_WS = 0x02, // '\r', '\n' 211 CHAR_LETTER = 0x04, // a-z,A-Z 212 CHAR_NUMBER = 0x08, // 0-9 213 CHAR_UNDER = 0x10, // _ 214 CHAR_PERIOD = 0x20 // . 215}; 216 217static void InitCharacterInfo() { 218 static bool isInited = false; 219 if (isInited) return; 220 isInited = true; 221 222 // Intiialize the CharInfo table. 223 // TODO: statically initialize this. 224 CharInfo[(int)' '] = CharInfo[(int)'\t'] = 225 CharInfo[(int)'\f'] = CharInfo[(int)'\v'] = CHAR_HORZ_WS; 226 CharInfo[(int)'\n'] = CharInfo[(int)'\r'] = CHAR_VERT_WS; 227 228 CharInfo[(int)'_'] = CHAR_UNDER; 229 CharInfo[(int)'.'] = CHAR_PERIOD; 230 for (unsigned i = 'a'; i <= 'z'; ++i) 231 CharInfo[i] = CharInfo[i+'A'-'a'] = CHAR_LETTER; 232 for (unsigned i = '0'; i <= '9'; ++i) 233 CharInfo[i] = CHAR_NUMBER; 234} 235 236/// isIdentifierBody - Return true if this is the body character of an 237/// identifier, which is [a-zA-Z0-9_]. 238static inline bool isIdentifierBody(unsigned char c) { 239 return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER)) ? true : false; 240} 241 242/// isHorizontalWhitespace - Return true if this character is horizontal 243/// whitespace: ' ', '\t', '\f', '\v'. Note that this returns false for '\0'. 244static inline bool isHorizontalWhitespace(unsigned char c) { 245 return (CharInfo[c] & CHAR_HORZ_WS) ? true : false; 246} 247 248/// isWhitespace - Return true if this character is horizontal or vertical 249/// whitespace: ' ', '\t', '\f', '\v', '\n', '\r'. Note that this returns false 250/// for '\0'. 251static inline bool isWhitespace(unsigned char c) { 252 return (CharInfo[c] & (CHAR_HORZ_WS|CHAR_VERT_WS)) ? true : false; 253} 254 255/// isNumberBody - Return true if this is the body character of an 256/// preprocessing number, which is [a-zA-Z0-9_.]. 257static inline bool isNumberBody(unsigned char c) { 258 return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD)) ? 259 true : false; 260} 261 262 263//===----------------------------------------------------------------------===// 264// Diagnostics forwarding code. 265//===----------------------------------------------------------------------===// 266 267/// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the 268/// lexer buffer was all instantiated at a single point, perform the mapping. 269/// This is currently only used for _Pragma implementation, so it is the slow 270/// path of the hot getSourceLocation method. Do not allow it to be inlined. 271static SourceLocation GetMappedTokenLoc(Preprocessor &PP, 272 SourceLocation FileLoc, 273 unsigned CharNo) DISABLE_INLINE; 274static SourceLocation GetMappedTokenLoc(Preprocessor &PP, 275 SourceLocation FileLoc, 276 unsigned CharNo) { 277 // Otherwise, we're lexing "mapped tokens". This is used for things like 278 // _Pragma handling. Combine the instantiation location of FileLoc with the 279 // physical location. 280 SourceManager &SourceMgr = PP.getSourceManager(); 281 282 // Create a new SLoc which is expanded from logical(FileLoc) but whose 283 // characters come from phys(FileLoc)+Offset. 284 SourceLocation VirtLoc = SourceMgr.getLogicalLoc(FileLoc); 285 SourceLocation PhysLoc = SourceMgr.getPhysicalLoc(FileLoc); 286 PhysLoc = SourceLocation::getFileLoc(PhysLoc.getFileID(), CharNo); 287 return SourceMgr.getInstantiationLoc(PhysLoc, VirtLoc); 288} 289 290/// getSourceLocation - Return a source location identifier for the specified 291/// offset in the current file. 292SourceLocation Lexer::getSourceLocation(const char *Loc) const { 293 assert(Loc >= BufferStart && Loc <= BufferEnd && 294 "Location out of range for this buffer!"); 295 296 // In the normal case, we're just lexing from a simple file buffer, return 297 // the file id from FileLoc with the offset specified. 298 unsigned CharNo = Loc-BufferStart; 299 if (FileLoc.isFileID()) 300 return SourceLocation::getFileLoc(FileLoc.getFileID(), CharNo); 301 302 assert(PP && "This doesn't work on raw lexers"); 303 return GetMappedTokenLoc(*PP, FileLoc, CharNo); 304} 305 306/// Diag - Forwarding function for diagnostics. This translate a source 307/// position in the current buffer into a SourceLocation object for rendering. 308void Lexer::Diag(const char *Loc, unsigned DiagID, 309 const std::string &Msg) const { 310 if (LexingRawMode && Diagnostic::isBuiltinNoteWarningOrExtension(DiagID)) 311 return; 312 PP->Diag(getSourceLocation(Loc), DiagID, Msg); 313} 314void Lexer::Diag(SourceLocation Loc, unsigned DiagID, 315 const std::string &Msg) const { 316 if (LexingRawMode && Diagnostic::isBuiltinNoteWarningOrExtension(DiagID)) 317 return; 318 PP->Diag(Loc, DiagID, Msg); 319} 320 321 322//===----------------------------------------------------------------------===// 323// Trigraph and Escaped Newline Handling Code. 324//===----------------------------------------------------------------------===// 325 326/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair, 327/// return the decoded trigraph letter it corresponds to, or '\0' if nothing. 328static char GetTrigraphCharForLetter(char Letter) { 329 switch (Letter) { 330 default: return 0; 331 case '=': return '#'; 332 case ')': return ']'; 333 case '(': return '['; 334 case '!': return '|'; 335 case '\'': return '^'; 336 case '>': return '}'; 337 case '/': return '\\'; 338 case '<': return '{'; 339 case '-': return '~'; 340 } 341} 342 343/// DecodeTrigraphChar - If the specified character is a legal trigraph when 344/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled, 345/// return the result character. Finally, emit a warning about trigraph use 346/// whether trigraphs are enabled or not. 347static char DecodeTrigraphChar(const char *CP, Lexer *L) { 348 char Res = GetTrigraphCharForLetter(*CP); 349 if (Res && L) { 350 if (!L->getFeatures().Trigraphs) { 351 L->Diag(CP-2, diag::trigraph_ignored); 352 return 0; 353 } else { 354 L->Diag(CP-2, diag::trigraph_converted, std::string()+Res); 355 } 356 } 357 return Res; 358} 359 360/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer, 361/// get its size, and return it. This is tricky in several cases: 362/// 1. If currently at the start of a trigraph, we warn about the trigraph, 363/// then either return the trigraph (skipping 3 chars) or the '?', 364/// depending on whether trigraphs are enabled or not. 365/// 2. If this is an escaped newline (potentially with whitespace between 366/// the backslash and newline), implicitly skip the newline and return 367/// the char after it. 368/// 3. If this is a UCN, return it. FIXME: C++ UCN's? 369/// 370/// This handles the slow/uncommon case of the getCharAndSize method. Here we 371/// know that we can accumulate into Size, and that we have already incremented 372/// Ptr by Size bytes. 373/// 374/// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should 375/// be updated to match. 376/// 377char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size, 378 Token *Tok) { 379 // If we have a slash, look for an escaped newline. 380 if (Ptr[0] == '\\') { 381 ++Size; 382 ++Ptr; 383Slash: 384 // Common case, backslash-char where the char is not whitespace. 385 if (!isWhitespace(Ptr[0])) return '\\'; 386 387 // See if we have optional whitespace characters followed by a newline. 388 { 389 unsigned SizeTmp = 0; 390 do { 391 ++SizeTmp; 392 if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') { 393 // Remember that this token needs to be cleaned. 394 if (Tok) Tok->setFlag(Token::NeedsCleaning); 395 396 // Warn if there was whitespace between the backslash and newline. 397 if (SizeTmp != 1 && Tok) 398 Diag(Ptr, diag::backslash_newline_space); 399 400 // If this is a \r\n or \n\r, skip the newlines. 401 if ((Ptr[SizeTmp] == '\r' || Ptr[SizeTmp] == '\n') && 402 Ptr[SizeTmp-1] != Ptr[SizeTmp]) 403 ++SizeTmp; 404 405 // Found backslash<whitespace><newline>. Parse the char after it. 406 Size += SizeTmp; 407 Ptr += SizeTmp; 408 // Use slow version to accumulate a correct size field. 409 return getCharAndSizeSlow(Ptr, Size, Tok); 410 } 411 } while (isWhitespace(Ptr[SizeTmp])); 412 } 413 414 // Otherwise, this is not an escaped newline, just return the slash. 415 return '\\'; 416 } 417 418 // If this is a trigraph, process it. 419 if (Ptr[0] == '?' && Ptr[1] == '?') { 420 // If this is actually a legal trigraph (not something like "??x"), emit 421 // a trigraph warning. If so, and if trigraphs are enabled, return it. 422 if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) { 423 // Remember that this token needs to be cleaned. 424 if (Tok) Tok->setFlag(Token::NeedsCleaning); 425 426 Ptr += 3; 427 Size += 3; 428 if (C == '\\') goto Slash; 429 return C; 430 } 431 } 432 433 // If this is neither, return a single character. 434 ++Size; 435 return *Ptr; 436} 437 438 439/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the 440/// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size, 441/// and that we have already incremented Ptr by Size bytes. 442/// 443/// NOTE: When this method is updated, getCharAndSizeSlow (above) should 444/// be updated to match. 445char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size, 446 const LangOptions &Features) { 447 // If we have a slash, look for an escaped newline. 448 if (Ptr[0] == '\\') { 449 ++Size; 450 ++Ptr; 451Slash: 452 // Common case, backslash-char where the char is not whitespace. 453 if (!isWhitespace(Ptr[0])) return '\\'; 454 455 // See if we have optional whitespace characters followed by a newline. 456 { 457 unsigned SizeTmp = 0; 458 do { 459 ++SizeTmp; 460 if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') { 461 462 // If this is a \r\n or \n\r, skip the newlines. 463 if ((Ptr[SizeTmp] == '\r' || Ptr[SizeTmp] == '\n') && 464 Ptr[SizeTmp-1] != Ptr[SizeTmp]) 465 ++SizeTmp; 466 467 // Found backslash<whitespace><newline>. Parse the char after it. 468 Size += SizeTmp; 469 Ptr += SizeTmp; 470 471 // Use slow version to accumulate a correct size field. 472 return getCharAndSizeSlowNoWarn(Ptr, Size, Features); 473 } 474 } while (isWhitespace(Ptr[SizeTmp])); 475 } 476 477 // Otherwise, this is not an escaped newline, just return the slash. 478 return '\\'; 479 } 480 481 // If this is a trigraph, process it. 482 if (Features.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') { 483 // If this is actually a legal trigraph (not something like "??x"), return 484 // it. 485 if (char C = GetTrigraphCharForLetter(Ptr[2])) { 486 Ptr += 3; 487 Size += 3; 488 if (C == '\\') goto Slash; 489 return C; 490 } 491 } 492 493 // If this is neither, return a single character. 494 ++Size; 495 return *Ptr; 496} 497 498//===----------------------------------------------------------------------===// 499// Helper methods for lexing. 500//===----------------------------------------------------------------------===// 501 502void Lexer::LexIdentifier(Token &Result, const char *CurPtr) { 503 // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$] 504 unsigned Size; 505 unsigned char C = *CurPtr++; 506 while (isIdentifierBody(C)) { 507 C = *CurPtr++; 508 } 509 --CurPtr; // Back up over the skipped character. 510 511 // Fast path, no $,\,? in identifier found. '\' might be an escaped newline 512 // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN. 513 // FIXME: UCNs. 514 if (C != '\\' && C != '?' && (C != '$' || !Features.DollarIdents)) { 515FinishIdentifier: 516 const char *IdStart = BufferPtr; 517 FormTokenWithChars(Result, CurPtr); 518 Result.setKind(tok::identifier); 519 520 // If we are in raw mode, return this identifier raw. There is no need to 521 // look up identifier information or attempt to macro expand it. 522 if (LexingRawMode) return; 523 524 // Fill in Result.IdentifierInfo, looking up the identifier in the 525 // identifier table. 526 PP->LookUpIdentifierInfo(Result, IdStart); 527 528 // Finally, now that we know we have an identifier, pass this off to the 529 // preprocessor, which may macro expand it or something. 530 return PP->HandleIdentifier(Result); 531 } 532 533 // Otherwise, $,\,? in identifier found. Enter slower path. 534 535 C = getCharAndSize(CurPtr, Size); 536 while (1) { 537 if (C == '$') { 538 // If we hit a $ and they are not supported in identifiers, we are done. 539 if (!Features.DollarIdents) goto FinishIdentifier; 540 541 // Otherwise, emit a diagnostic and continue. 542 Diag(CurPtr, diag::ext_dollar_in_identifier); 543 CurPtr = ConsumeChar(CurPtr, Size, Result); 544 C = getCharAndSize(CurPtr, Size); 545 continue; 546 } else if (!isIdentifierBody(C)) { // FIXME: UCNs. 547 // Found end of identifier. 548 goto FinishIdentifier; 549 } 550 551 // Otherwise, this character is good, consume it. 552 CurPtr = ConsumeChar(CurPtr, Size, Result); 553 554 C = getCharAndSize(CurPtr, Size); 555 while (isIdentifierBody(C)) { // FIXME: UCNs. 556 CurPtr = ConsumeChar(CurPtr, Size, Result); 557 C = getCharAndSize(CurPtr, Size); 558 } 559 } 560} 561 562 563/// LexNumericConstant - Lex the remainder of a integer or floating point 564/// constant. From[-1] is the first character lexed. Return the end of the 565/// constant. 566void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) { 567 unsigned Size; 568 char C = getCharAndSize(CurPtr, Size); 569 char PrevCh = 0; 570 while (isNumberBody(C)) { // FIXME: UCNs? 571 CurPtr = ConsumeChar(CurPtr, Size, Result); 572 PrevCh = C; 573 C = getCharAndSize(CurPtr, Size); 574 } 575 576 // If we fell out, check for a sign, due to 1e+12. If we have one, continue. 577 if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) 578 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 579 580 // If we have a hex FP constant, continue. 581 if (Features.HexFloats && 582 (C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p')) 583 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 584 585 Result.setKind(tok::numeric_constant); 586 587 // Update the location of token as well as BufferPtr. 588 FormTokenWithChars(Result, CurPtr); 589} 590 591/// LexStringLiteral - Lex the remainder of a string literal, after having lexed 592/// either " or L". 593void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide){ 594 const char *NulCharacter = 0; // Does this string contain the \0 character? 595 596 char C = getAndAdvanceChar(CurPtr, Result); 597 while (C != '"') { 598 // Skip escaped characters. 599 if (C == '\\') { 600 // Skip the escaped character. 601 C = getAndAdvanceChar(CurPtr, Result); 602 } else if (C == '\n' || C == '\r' || // Newline. 603 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 604 if (!LexingRawMode) Diag(BufferPtr, diag::err_unterminated_string); 605 Result.setKind(tok::unknown); 606 FormTokenWithChars(Result, CurPtr-1); 607 return; 608 } else if (C == 0) { 609 NulCharacter = CurPtr-1; 610 } 611 C = getAndAdvanceChar(CurPtr, Result); 612 } 613 614 // If a nul character existed in the string, warn about it. 615 if (NulCharacter) Diag(NulCharacter, diag::null_in_string); 616 617 Result.setKind(Wide ? tok::wide_string_literal : tok::string_literal); 618 619 // Update the location of the token as well as the BufferPtr instance var. 620 FormTokenWithChars(Result, CurPtr); 621} 622 623/// LexAngledStringLiteral - Lex the remainder of an angled string literal, 624/// after having lexed the '<' character. This is used for #include filenames. 625void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) { 626 const char *NulCharacter = 0; // Does this string contain the \0 character? 627 628 char C = getAndAdvanceChar(CurPtr, Result); 629 while (C != '>') { 630 // Skip escaped characters. 631 if (C == '\\') { 632 // Skip the escaped character. 633 C = getAndAdvanceChar(CurPtr, Result); 634 } else if (C == '\n' || C == '\r' || // Newline. 635 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 636 if (!LexingRawMode) Diag(BufferPtr, diag::err_unterminated_string); 637 Result.setKind(tok::unknown); 638 FormTokenWithChars(Result, CurPtr-1); 639 return; 640 } else if (C == 0) { 641 NulCharacter = CurPtr-1; 642 } 643 C = getAndAdvanceChar(CurPtr, Result); 644 } 645 646 // If a nul character existed in the string, warn about it. 647 if (NulCharacter) Diag(NulCharacter, diag::null_in_string); 648 649 Result.setKind(tok::angle_string_literal); 650 651 // Update the location of token as well as BufferPtr. 652 FormTokenWithChars(Result, CurPtr); 653} 654 655 656/// LexCharConstant - Lex the remainder of a character constant, after having 657/// lexed either ' or L'. 658void Lexer::LexCharConstant(Token &Result, const char *CurPtr) { 659 const char *NulCharacter = 0; // Does this character contain the \0 character? 660 661 // Handle the common case of 'x' and '\y' efficiently. 662 char C = getAndAdvanceChar(CurPtr, Result); 663 if (C == '\'') { 664 if (!LexingRawMode) Diag(BufferPtr, diag::err_empty_character); 665 Result.setKind(tok::unknown); 666 FormTokenWithChars(Result, CurPtr); 667 return; 668 } else if (C == '\\') { 669 // Skip the escaped character. 670 // FIXME: UCN's. 671 C = getAndAdvanceChar(CurPtr, Result); 672 } 673 674 if (C && C != '\n' && C != '\r' && CurPtr[0] == '\'') { 675 ++CurPtr; 676 } else { 677 // Fall back on generic code for embedded nulls, newlines, wide chars. 678 do { 679 // Skip escaped characters. 680 if (C == '\\') { 681 // Skip the escaped character. 682 C = getAndAdvanceChar(CurPtr, Result); 683 } else if (C == '\n' || C == '\r' || // Newline. 684 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 685 if (!LexingRawMode) Diag(BufferPtr, diag::err_unterminated_char); 686 Result.setKind(tok::unknown); 687 FormTokenWithChars(Result, CurPtr-1); 688 return; 689 } else if (C == 0) { 690 NulCharacter = CurPtr-1; 691 } 692 C = getAndAdvanceChar(CurPtr, Result); 693 } while (C != '\''); 694 } 695 696 if (NulCharacter) Diag(NulCharacter, diag::null_in_char); 697 698 Result.setKind(tok::char_constant); 699 700 // Update the location of token as well as BufferPtr. 701 FormTokenWithChars(Result, CurPtr); 702} 703 704/// SkipWhitespace - Efficiently skip over a series of whitespace characters. 705/// Update BufferPtr to point to the next non-whitespace character and return. 706void Lexer::SkipWhitespace(Token &Result, const char *CurPtr) { 707 // Whitespace - Skip it, then return the token after the whitespace. 708 unsigned char Char = *CurPtr; // Skip consequtive spaces efficiently. 709 while (1) { 710 // Skip horizontal whitespace very aggressively. 711 while (isHorizontalWhitespace(Char)) 712 Char = *++CurPtr; 713 714 // Otherwise if we something other than whitespace, we're done. 715 if (Char != '\n' && Char != '\r') 716 break; 717 718 if (ParsingPreprocessorDirective) { 719 // End of preprocessor directive line, let LexTokenInternal handle this. 720 BufferPtr = CurPtr; 721 return; 722 } 723 724 // ok, but handle newline. 725 // The returned token is at the start of the line. 726 Result.setFlag(Token::StartOfLine); 727 // No leading whitespace seen so far. 728 Result.clearFlag(Token::LeadingSpace); 729 Char = *++CurPtr; 730 } 731 732 // If this isn't immediately after a newline, there is leading space. 733 char PrevChar = CurPtr[-1]; 734 if (PrevChar != '\n' && PrevChar != '\r') 735 Result.setFlag(Token::LeadingSpace); 736 737 BufferPtr = CurPtr; 738} 739 740// SkipBCPLComment - We have just read the // characters from input. Skip until 741// we find the newline character thats terminate the comment. Then update 742/// BufferPtr and return. 743bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) { 744 // If BCPL comments aren't explicitly enabled for this language, emit an 745 // extension warning. 746 if (!Features.BCPLComment) { 747 Diag(BufferPtr, diag::ext_bcpl_comment); 748 749 // Mark them enabled so we only emit one warning for this translation 750 // unit. 751 Features.BCPLComment = true; 752 } 753 754 // Scan over the body of the comment. The common case, when scanning, is that 755 // the comment contains normal ascii characters with nothing interesting in 756 // them. As such, optimize for this case with the inner loop. 757 char C; 758 do { 759 C = *CurPtr; 760 // FIXME: Speedup BCPL comment lexing. Just scan for a \n or \r character. 761 // If we find a \n character, scan backwards, checking to see if it's an 762 // escaped newline, like we do for block comments. 763 764 // Skip over characters in the fast loop. 765 while (C != 0 && // Potentially EOF. 766 C != '\\' && // Potentially escaped newline. 767 C != '?' && // Potentially trigraph. 768 C != '\n' && C != '\r') // Newline or DOS-style newline. 769 C = *++CurPtr; 770 771 // If this is a newline, we're done. 772 if (C == '\n' || C == '\r') 773 break; // Found the newline? Break out! 774 775 // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to 776 // properly decode the character. 777 const char *OldPtr = CurPtr; 778 C = getAndAdvanceChar(CurPtr, Result); 779 780 // If we read multiple characters, and one of those characters was a \r or 781 // \n, then we had an escaped newline within the comment. Emit diagnostic 782 // unless the next line is also a // comment. 783 if (CurPtr != OldPtr+1 && C != '/' && CurPtr[0] != '/') { 784 for (; OldPtr != CurPtr; ++OldPtr) 785 if (OldPtr[0] == '\n' || OldPtr[0] == '\r') { 786 // Okay, we found a // comment that ends in a newline, if the next 787 // line is also a // comment, but has spaces, don't emit a diagnostic. 788 if (isspace(C)) { 789 const char *ForwardPtr = CurPtr; 790 while (isspace(*ForwardPtr)) // Skip whitespace. 791 ++ForwardPtr; 792 if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/') 793 break; 794 } 795 796 Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment); 797 break; 798 } 799 } 800 801 if (CurPtr == BufferEnd+1) { --CurPtr; break; } 802 } while (C != '\n' && C != '\r'); 803 804 // Found but did not consume the newline. 805 806 // If we are returning comments as tokens, return this comment as a token. 807 if (KeepCommentMode) 808 return SaveBCPLComment(Result, CurPtr); 809 810 // If we are inside a preprocessor directive and we see the end of line, 811 // return immediately, so that the lexer can return this as an EOM token. 812 if (ParsingPreprocessorDirective || CurPtr == BufferEnd) { 813 BufferPtr = CurPtr; 814 return true; 815 } 816 817 // Otherwise, eat the \n character. We don't care if this is a \n\r or 818 // \r\n sequence. This is an efficiency hack (because we know the \n can't 819 // contribute to another token), it isn't needed for correctness. 820 ++CurPtr; 821 822 // The next returned token is at the start of the line. 823 Result.setFlag(Token::StartOfLine); 824 // No leading whitespace seen so far. 825 Result.clearFlag(Token::LeadingSpace); 826 BufferPtr = CurPtr; 827 return true; 828} 829 830/// SaveBCPLComment - If in save-comment mode, package up this BCPL comment in 831/// an appropriate way and return it. 832bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) { 833 Result.setKind(tok::comment); 834 FormTokenWithChars(Result, CurPtr); 835 836 // If this BCPL-style comment is in a macro definition, transmogrify it into 837 // a C-style block comment. 838 if (ParsingPreprocessorDirective) { 839 std::string Spelling = PP->getSpelling(Result); 840 assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not bcpl comment?"); 841 Spelling[1] = '*'; // Change prefix to "/*". 842 Spelling += "*/"; // add suffix. 843 844 Result.setLocation(PP->CreateString(&Spelling[0], Spelling.size(), 845 Result.getLocation())); 846 Result.setLength(Spelling.size()); 847 } 848 return false; 849} 850 851/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline 852/// character (either \n or \r) is part of an escaped newline sequence. Issue a 853/// diagnostic if so. We know that the is inside of a block comment. 854static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr, 855 Lexer *L) { 856 assert(CurPtr[0] == '\n' || CurPtr[0] == '\r'); 857 858 // Back up off the newline. 859 --CurPtr; 860 861 // If this is a two-character newline sequence, skip the other character. 862 if (CurPtr[0] == '\n' || CurPtr[0] == '\r') { 863 // \n\n or \r\r -> not escaped newline. 864 if (CurPtr[0] == CurPtr[1]) 865 return false; 866 // \n\r or \r\n -> skip the newline. 867 --CurPtr; 868 } 869 870 // If we have horizontal whitespace, skip over it. We allow whitespace 871 // between the slash and newline. 872 bool HasSpace = false; 873 while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) { 874 --CurPtr; 875 HasSpace = true; 876 } 877 878 // If we have a slash, we know this is an escaped newline. 879 if (*CurPtr == '\\') { 880 if (CurPtr[-1] != '*') return false; 881 } else { 882 // It isn't a slash, is it the ?? / trigraph? 883 if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' || 884 CurPtr[-3] != '*') 885 return false; 886 887 // This is the trigraph ending the comment. Emit a stern warning! 888 CurPtr -= 2; 889 890 // If no trigraphs are enabled, warn that we ignored this trigraph and 891 // ignore this * character. 892 if (!L->getFeatures().Trigraphs) { 893 L->Diag(CurPtr, diag::trigraph_ignored_block_comment); 894 return false; 895 } 896 L->Diag(CurPtr, diag::trigraph_ends_block_comment); 897 } 898 899 // Warn about having an escaped newline between the */ characters. 900 L->Diag(CurPtr, diag::escaped_newline_block_comment_end); 901 902 // If there was space between the backslash and newline, warn about it. 903 if (HasSpace) L->Diag(CurPtr, diag::backslash_newline_space); 904 905 return true; 906} 907 908#ifdef __SSE2__ 909#include <emmintrin.h> 910#elif __ALTIVEC__ 911#include <altivec.h> 912#undef bool 913#endif 914 915/// SkipBlockComment - We have just read the /* characters from input. Read 916/// until we find the */ characters that terminate the comment. Note that we 917/// don't bother decoding trigraphs or escaped newlines in block comments, 918/// because they cannot cause the comment to end. The only thing that can 919/// happen is the comment could end with an escaped newline between the */ end 920/// of comment. 921bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) { 922 // Scan one character past where we should, looking for a '/' character. Once 923 // we find it, check to see if it was preceeded by a *. This common 924 // optimization helps people who like to put a lot of * characters in their 925 // comments. 926 927 // The first character we get with newlines and trigraphs skipped to handle 928 // the degenerate /*/ case below correctly if the * has an escaped newline 929 // after it. 930 unsigned CharSize; 931 unsigned char C = getCharAndSize(CurPtr, CharSize); 932 CurPtr += CharSize; 933 if (C == 0 && CurPtr == BufferEnd+1) { 934 if (!LexingRawMode) 935 Diag(BufferPtr, diag::err_unterminated_block_comment); 936 BufferPtr = CurPtr-1; 937 return true; 938 } 939 940 // Check to see if the first character after the '/*' is another /. If so, 941 // then this slash does not end the block comment, it is part of it. 942 if (C == '/') 943 C = *CurPtr++; 944 945 while (1) { 946 // Skip over all non-interesting characters until we find end of buffer or a 947 // (probably ending) '/' character. 948 if (CurPtr + 24 < BufferEnd) { 949 // While not aligned to a 16-byte boundary. 950 while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0) 951 C = *CurPtr++; 952 953 if (C == '/') goto FoundSlash; 954 955#ifdef __SSE2__ 956 __m128i Slashes = _mm_set_epi8('/', '/', '/', '/', '/', '/', '/', '/', 957 '/', '/', '/', '/', '/', '/', '/', '/'); 958 while (CurPtr+16 <= BufferEnd && 959 _mm_movemask_epi8(_mm_cmpeq_epi8(*(__m128i*)CurPtr, Slashes)) == 0) 960 CurPtr += 16; 961#elif __ALTIVEC__ 962 __vector unsigned char Slashes = { 963 '/', '/', '/', '/', '/', '/', '/', '/', 964 '/', '/', '/', '/', '/', '/', '/', '/' 965 }; 966 while (CurPtr+16 <= BufferEnd && 967 !vec_any_eq(*(vector unsigned char*)CurPtr, Slashes)) 968 CurPtr += 16; 969#else 970 // Scan for '/' quickly. Many block comments are very large. 971 while (CurPtr[0] != '/' && 972 CurPtr[1] != '/' && 973 CurPtr[2] != '/' && 974 CurPtr[3] != '/' && 975 CurPtr+4 < BufferEnd) { 976 CurPtr += 4; 977 } 978#endif 979 980 // It has to be one of the bytes scanned, increment to it and read one. 981 C = *CurPtr++; 982 } 983 984 // Loop to scan the remainder. 985 while (C != '/' && C != '\0') 986 C = *CurPtr++; 987 988 FoundSlash: 989 if (C == '/') { 990 if (CurPtr[-2] == '*') // We found the final */. We're done! 991 break; 992 993 if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) { 994 if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) { 995 // We found the final */, though it had an escaped newline between the 996 // * and /. We're done! 997 break; 998 } 999 } 1000 if (CurPtr[0] == '*' && CurPtr[1] != '/') { 1001 // If this is a /* inside of the comment, emit a warning. Don't do this 1002 // if this is a /*/, which will end the comment. This misses cases with 1003 // embedded escaped newlines, but oh well. 1004 Diag(CurPtr-1, diag::warn_nested_block_comment); 1005 } 1006 } else if (C == 0 && CurPtr == BufferEnd+1) { 1007 if (!LexingRawMode) Diag(BufferPtr, diag::err_unterminated_block_comment); 1008 // Note: the user probably forgot a */. We could continue immediately 1009 // after the /*, but this would involve lexing a lot of what really is the 1010 // comment, which surely would confuse the parser. 1011 BufferPtr = CurPtr-1; 1012 return true; 1013 } 1014 C = *CurPtr++; 1015 } 1016 1017 // If we are returning comments as tokens, return this comment as a token. 1018 if (KeepCommentMode) { 1019 Result.setKind(tok::comment); 1020 FormTokenWithChars(Result, CurPtr); 1021 return false; 1022 } 1023 1024 // It is common for the tokens immediately after a /**/ comment to be 1025 // whitespace. Instead of going through the big switch, handle it 1026 // efficiently now. 1027 if (isHorizontalWhitespace(*CurPtr)) { 1028 Result.setFlag(Token::LeadingSpace); 1029 SkipWhitespace(Result, CurPtr+1); 1030 return true; 1031 } 1032 1033 // Otherwise, just return so that the next character will be lexed as a token. 1034 BufferPtr = CurPtr; 1035 Result.setFlag(Token::LeadingSpace); 1036 return true; 1037} 1038 1039//===----------------------------------------------------------------------===// 1040// Primary Lexing Entry Points 1041//===----------------------------------------------------------------------===// 1042 1043/// LexIncludeFilename - After the preprocessor has parsed a #include, lex and 1044/// (potentially) macro expand the filename. 1045void Lexer::LexIncludeFilename(Token &FilenameTok) { 1046 assert(ParsingPreprocessorDirective && 1047 ParsingFilename == false && 1048 "Must be in a preprocessing directive!"); 1049 1050 // We are now parsing a filename! 1051 ParsingFilename = true; 1052 1053 // Lex the filename. 1054 Lex(FilenameTok); 1055 1056 // We should have obtained the filename now. 1057 ParsingFilename = false; 1058 1059 // No filename? 1060 if (FilenameTok.is(tok::eom)) 1061 Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename); 1062} 1063 1064/// ReadToEndOfLine - Read the rest of the current preprocessor line as an 1065/// uninterpreted string. This switches the lexer out of directive mode. 1066std::string Lexer::ReadToEndOfLine() { 1067 assert(ParsingPreprocessorDirective && ParsingFilename == false && 1068 "Must be in a preprocessing directive!"); 1069 std::string Result; 1070 Token Tmp; 1071 1072 // CurPtr - Cache BufferPtr in an automatic variable. 1073 const char *CurPtr = BufferPtr; 1074 while (1) { 1075 char Char = getAndAdvanceChar(CurPtr, Tmp); 1076 switch (Char) { 1077 default: 1078 Result += Char; 1079 break; 1080 case 0: // Null. 1081 // Found end of file? 1082 if (CurPtr-1 != BufferEnd) { 1083 // Nope, normal character, continue. 1084 Result += Char; 1085 break; 1086 } 1087 // FALL THROUGH. 1088 case '\r': 1089 case '\n': 1090 // Okay, we found the end of the line. First, back up past the \0, \r, \n. 1091 assert(CurPtr[-1] == Char && "Trigraphs for newline?"); 1092 BufferPtr = CurPtr-1; 1093 1094 // Next, lex the character, which should handle the EOM transition. 1095 Lex(Tmp); 1096 assert(Tmp.is(tok::eom) && "Unexpected token!"); 1097 1098 // Finally, we're done, return the string we found. 1099 return Result; 1100 } 1101 } 1102} 1103 1104/// LexEndOfFile - CurPtr points to the end of this file. Handle this 1105/// condition, reporting diagnostics and handling other edge cases as required. 1106/// This returns true if Result contains a token, false if PP.Lex should be 1107/// called again. 1108bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) { 1109 // If we hit the end of the file while parsing a preprocessor directive, 1110 // end the preprocessor directive first. The next token returned will 1111 // then be the end of file. 1112 if (ParsingPreprocessorDirective) { 1113 // Done parsing the "line". 1114 ParsingPreprocessorDirective = false; 1115 Result.setKind(tok::eom); 1116 // Update the location of token as well as BufferPtr. 1117 FormTokenWithChars(Result, CurPtr); 1118 1119 // Restore comment saving mode, in case it was disabled for directive. 1120 KeepCommentMode = PP->getCommentRetentionState(); 1121 return true; // Have a token. 1122 } 1123 1124 // If we are in raw mode, return this event as an EOF token. Let the caller 1125 // that put us in raw mode handle the event. 1126 if (LexingRawMode) { 1127 Result.startToken(); 1128 BufferPtr = BufferEnd; 1129 FormTokenWithChars(Result, BufferEnd); 1130 Result.setKind(tok::eof); 1131 return true; 1132 } 1133 1134 // Otherwise, issue diagnostics for unterminated #if and missing newline. 1135 1136 // If we are in a #if directive, emit an error. 1137 while (!ConditionalStack.empty()) { 1138 Diag(ConditionalStack.back().IfLoc, diag::err_pp_unterminated_conditional); 1139 ConditionalStack.pop_back(); 1140 } 1141 1142 // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue 1143 // a pedwarn. 1144 if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) 1145 Diag(BufferEnd, diag::ext_no_newline_eof); 1146 1147 BufferPtr = CurPtr; 1148 1149 // Finally, let the preprocessor handle this. 1150 return PP->HandleEndOfFile(Result); 1151} 1152 1153/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from 1154/// the specified lexer will return a tok::l_paren token, 0 if it is something 1155/// else and 2 if there are no more tokens in the buffer controlled by the 1156/// lexer. 1157unsigned Lexer::isNextPPTokenLParen() { 1158 assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?"); 1159 1160 // Switch to 'skipping' mode. This will ensure that we can lex a token 1161 // without emitting diagnostics, disables macro expansion, and will cause EOF 1162 // to return an EOF token instead of popping the include stack. 1163 LexingRawMode = true; 1164 1165 // Save state that can be changed while lexing so that we can restore it. 1166 const char *TmpBufferPtr = BufferPtr; 1167 1168 Token Tok; 1169 Tok.startToken(); 1170 LexTokenInternal(Tok); 1171 1172 // Restore state that may have changed. 1173 BufferPtr = TmpBufferPtr; 1174 1175 // Restore the lexer back to non-skipping mode. 1176 LexingRawMode = false; 1177 1178 if (Tok.is(tok::eof)) 1179 return 2; 1180 return Tok.is(tok::l_paren); 1181} 1182 1183 1184/// LexTokenInternal - This implements a simple C family lexer. It is an 1185/// extremely performance critical piece of code. This assumes that the buffer 1186/// has a null character at the end of the file. Return true if an error 1187/// occurred and compilation should terminate, false if normal. This returns a 1188/// preprocessing token, not a normal token, as such, it is an internal 1189/// interface. It assumes that the Flags of result have been cleared before 1190/// calling this. 1191void Lexer::LexTokenInternal(Token &Result) { 1192LexNextToken: 1193 // New token, can't need cleaning yet. 1194 Result.clearFlag(Token::NeedsCleaning); 1195 Result.setIdentifierInfo(0); 1196 1197 // CurPtr - Cache BufferPtr in an automatic variable. 1198 const char *CurPtr = BufferPtr; 1199 1200 // Small amounts of horizontal whitespace is very common between tokens. 1201 if ((*CurPtr == ' ') || (*CurPtr == '\t')) { 1202 ++CurPtr; 1203 while ((*CurPtr == ' ') || (*CurPtr == '\t')) 1204 ++CurPtr; 1205 BufferPtr = CurPtr; 1206 Result.setFlag(Token::LeadingSpace); 1207 } 1208 1209 unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below. 1210 1211 // Read a character, advancing over it. 1212 char Char = getAndAdvanceChar(CurPtr, Result); 1213 switch (Char) { 1214 case 0: // Null. 1215 // Found end of file? 1216 if (CurPtr-1 == BufferEnd) { 1217 // Read the PP instance variable into an automatic variable, because 1218 // LexEndOfFile will often delete 'this'. 1219 Preprocessor *PPCache = PP; 1220 if (LexEndOfFile(Result, CurPtr-1)) // Retreat back into the file. 1221 return; // Got a token to return. 1222 assert(PPCache && "Raw buffer::LexEndOfFile should return a token"); 1223 return PPCache->Lex(Result); 1224 } 1225 1226 Diag(CurPtr-1, diag::null_in_file); 1227 Result.setFlag(Token::LeadingSpace); 1228 SkipWhitespace(Result, CurPtr); 1229 goto LexNextToken; // GCC isn't tail call eliminating. 1230 case '\n': 1231 case '\r': 1232 // If we are inside a preprocessor directive and we see the end of line, 1233 // we know we are done with the directive, so return an EOM token. 1234 if (ParsingPreprocessorDirective) { 1235 // Done parsing the "line". 1236 ParsingPreprocessorDirective = false; 1237 1238 // Restore comment saving mode, in case it was disabled for directive. 1239 KeepCommentMode = PP->getCommentRetentionState(); 1240 1241 // Since we consumed a newline, we are back at the start of a line. 1242 IsAtStartOfLine = true; 1243 1244 Result.setKind(tok::eom); 1245 break; 1246 } 1247 // The returned token is at the start of the line. 1248 Result.setFlag(Token::StartOfLine); 1249 // No leading whitespace seen so far. 1250 Result.clearFlag(Token::LeadingSpace); 1251 SkipWhitespace(Result, CurPtr); 1252 goto LexNextToken; // GCC isn't tail call eliminating. 1253 case ' ': 1254 case '\t': 1255 case '\f': 1256 case '\v': 1257 SkipHorizontalWhitespace: 1258 Result.setFlag(Token::LeadingSpace); 1259 SkipWhitespace(Result, CurPtr); 1260 1261 SkipIgnoredUnits: 1262 CurPtr = BufferPtr; 1263 1264 // If the next token is obviously a // or /* */ comment, skip it efficiently 1265 // too (without going through the big switch stmt). 1266 if (CurPtr[0] == '/' && CurPtr[1] == '/' && !KeepCommentMode) { 1267 SkipBCPLComment(Result, CurPtr+2); 1268 goto SkipIgnoredUnits; 1269 } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !KeepCommentMode) { 1270 SkipBlockComment(Result, CurPtr+2); 1271 goto SkipIgnoredUnits; 1272 } else if (isHorizontalWhitespace(*CurPtr)) { 1273 goto SkipHorizontalWhitespace; 1274 } 1275 goto LexNextToken; // GCC isn't tail call eliminating. 1276 1277 // C99 6.4.4.1: Integer Constants. 1278 // C99 6.4.4.2: Floating Constants. 1279 case '0': case '1': case '2': case '3': case '4': 1280 case '5': case '6': case '7': case '8': case '9': 1281 // Notify MIOpt that we read a non-whitespace/non-comment token. 1282 MIOpt.ReadToken(); 1283 return LexNumericConstant(Result, CurPtr); 1284 1285 case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz"). 1286 // Notify MIOpt that we read a non-whitespace/non-comment token. 1287 MIOpt.ReadToken(); 1288 Char = getCharAndSize(CurPtr, SizeTmp); 1289 1290 // Wide string literal. 1291 if (Char == '"') 1292 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 1293 true); 1294 1295 // Wide character constant. 1296 if (Char == '\'') 1297 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result)); 1298 // FALL THROUGH, treating L like the start of an identifier. 1299 1300 // C99 6.4.2: Identifiers. 1301 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': 1302 case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N': 1303 case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': 1304 case 'V': case 'W': case 'X': case 'Y': case 'Z': 1305 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': 1306 case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': 1307 case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': 1308 case 'v': case 'w': case 'x': case 'y': case 'z': 1309 case '_': 1310 // Notify MIOpt that we read a non-whitespace/non-comment token. 1311 MIOpt.ReadToken(); 1312 return LexIdentifier(Result, CurPtr); 1313 1314 case '$': // $ in identifiers. 1315 if (Features.DollarIdents) { 1316 Diag(CurPtr-1, diag::ext_dollar_in_identifier); 1317 // Notify MIOpt that we read a non-whitespace/non-comment token. 1318 MIOpt.ReadToken(); 1319 return LexIdentifier(Result, CurPtr); 1320 } 1321 1322 Result.setKind(tok::unknown); 1323 break; 1324 1325 // C99 6.4.4: Character Constants. 1326 case '\'': 1327 // Notify MIOpt that we read a non-whitespace/non-comment token. 1328 MIOpt.ReadToken(); 1329 return LexCharConstant(Result, CurPtr); 1330 1331 // C99 6.4.5: String Literals. 1332 case '"': 1333 // Notify MIOpt that we read a non-whitespace/non-comment token. 1334 MIOpt.ReadToken(); 1335 return LexStringLiteral(Result, CurPtr, false); 1336 1337 // C99 6.4.6: Punctuators. 1338 case '?': 1339 Result.setKind(tok::question); 1340 break; 1341 case '[': 1342 Result.setKind(tok::l_square); 1343 break; 1344 case ']': 1345 Result.setKind(tok::r_square); 1346 break; 1347 case '(': 1348 Result.setKind(tok::l_paren); 1349 break; 1350 case ')': 1351 Result.setKind(tok::r_paren); 1352 break; 1353 case '{': 1354 Result.setKind(tok::l_brace); 1355 break; 1356 case '}': 1357 Result.setKind(tok::r_brace); 1358 break; 1359 case '.': 1360 Char = getCharAndSize(CurPtr, SizeTmp); 1361 if (Char >= '0' && Char <= '9') { 1362 // Notify MIOpt that we read a non-whitespace/non-comment token. 1363 MIOpt.ReadToken(); 1364 1365 return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result)); 1366 } else if (Features.CPlusPlus && Char == '*') { 1367 Result.setKind(tok::periodstar); 1368 CurPtr += SizeTmp; 1369 } else if (Char == '.' && 1370 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') { 1371 Result.setKind(tok::ellipsis); 1372 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1373 SizeTmp2, Result); 1374 } else { 1375 Result.setKind(tok::period); 1376 } 1377 break; 1378 case '&': 1379 Char = getCharAndSize(CurPtr, SizeTmp); 1380 if (Char == '&') { 1381 Result.setKind(tok::ampamp); 1382 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1383 } else if (Char == '=') { 1384 Result.setKind(tok::ampequal); 1385 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1386 } else { 1387 Result.setKind(tok::amp); 1388 } 1389 break; 1390 case '*': 1391 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 1392 Result.setKind(tok::starequal); 1393 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1394 } else { 1395 Result.setKind(tok::star); 1396 } 1397 break; 1398 case '+': 1399 Char = getCharAndSize(CurPtr, SizeTmp); 1400 if (Char == '+') { 1401 Result.setKind(tok::plusplus); 1402 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1403 } else if (Char == '=') { 1404 Result.setKind(tok::plusequal); 1405 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1406 } else { 1407 Result.setKind(tok::plus); 1408 } 1409 break; 1410 case '-': 1411 Char = getCharAndSize(CurPtr, SizeTmp); 1412 if (Char == '-') { 1413 Result.setKind(tok::minusminus); 1414 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1415 } else if (Char == '>' && Features.CPlusPlus && 1416 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { 1417 Result.setKind(tok::arrowstar); // C++ ->* 1418 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1419 SizeTmp2, Result); 1420 } else if (Char == '>') { 1421 Result.setKind(tok::arrow); 1422 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1423 } else if (Char == '=') { 1424 Result.setKind(tok::minusequal); 1425 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1426 } else { 1427 Result.setKind(tok::minus); 1428 } 1429 break; 1430 case '~': 1431 Result.setKind(tok::tilde); 1432 break; 1433 case '!': 1434 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 1435 Result.setKind(tok::exclaimequal); 1436 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1437 } else { 1438 Result.setKind(tok::exclaim); 1439 } 1440 break; 1441 case '/': 1442 // 6.4.9: Comments 1443 Char = getCharAndSize(CurPtr, SizeTmp); 1444 if (Char == '/') { // BCPL comment. 1445 if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result))) { 1446 // It is common for the tokens immediately after a // comment to be 1447 // whitespace (indentation for the next line). Instead of going through 1448 // the big switch, handle it efficiently now. 1449 goto SkipIgnoredUnits; 1450 } 1451 return; // KeepCommentMode 1452 } else if (Char == '*') { // /**/ comment. 1453 if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result))) 1454 goto LexNextToken; // GCC isn't tail call eliminating. 1455 return; // KeepCommentMode 1456 } else if (Char == '=') { 1457 Result.setKind(tok::slashequal); 1458 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1459 } else { 1460 Result.setKind(tok::slash); 1461 } 1462 break; 1463 case '%': 1464 Char = getCharAndSize(CurPtr, SizeTmp); 1465 if (Char == '=') { 1466 Result.setKind(tok::percentequal); 1467 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1468 } else if (Features.Digraphs && Char == '>') { 1469 Result.setKind(tok::r_brace); // '%>' -> '}' 1470 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1471 } else if (Features.Digraphs && Char == ':') { 1472 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1473 Char = getCharAndSize(CurPtr, SizeTmp); 1474 if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') { 1475 Result.setKind(tok::hashhash); // '%:%:' -> '##' 1476 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1477 SizeTmp2, Result); 1478 } else if (Char == '@' && Features.Microsoft) { // %:@ -> #@ -> Charize 1479 Result.setKind(tok::hashat); 1480 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1481 Diag(BufferPtr, diag::charize_microsoft_ext); 1482 } else { 1483 Result.setKind(tok::hash); // '%:' -> '#' 1484 1485 // We parsed a # character. If this occurs at the start of the line, 1486 // it's actually the start of a preprocessing directive. Callback to 1487 // the preprocessor to handle it. 1488 // FIXME: -fpreprocessed mode?? 1489 if (Result.isAtStartOfLine() && !LexingRawMode) { 1490 BufferPtr = CurPtr; 1491 PP->HandleDirective(Result); 1492 1493 // As an optimization, if the preprocessor didn't switch lexers, tail 1494 // recurse. 1495 if (PP->isCurrentLexer(this)) { 1496 // Start a new token. If this is a #include or something, the PP may 1497 // want us starting at the beginning of the line again. If so, set 1498 // the StartOfLine flag. 1499 if (IsAtStartOfLine) { 1500 Result.setFlag(Token::StartOfLine); 1501 IsAtStartOfLine = false; 1502 } 1503 goto LexNextToken; // GCC isn't tail call eliminating. 1504 } 1505 1506 return PP->Lex(Result); 1507 } 1508 } 1509 } else { 1510 Result.setKind(tok::percent); 1511 } 1512 break; 1513 case '<': 1514 Char = getCharAndSize(CurPtr, SizeTmp); 1515 if (ParsingFilename) { 1516 return LexAngledStringLiteral(Result, CurPtr+SizeTmp); 1517 } else if (Char == '<' && 1518 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') { 1519 Result.setKind(tok::lesslessequal); 1520 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1521 SizeTmp2, Result); 1522 } else if (Char == '<') { 1523 Result.setKind(tok::lessless); 1524 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1525 } else if (Char == '=') { 1526 Result.setKind(tok::lessequal); 1527 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1528 } else if (Features.Digraphs && Char == ':') { 1529 Result.setKind(tok::l_square); // '<:' -> '[' 1530 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1531 } else if (Features.Digraphs && Char == '%') { 1532 Result.setKind(tok::l_brace); // '<%' -> '{' 1533 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1534 } else { 1535 Result.setKind(tok::less); 1536 } 1537 break; 1538 case '>': 1539 Char = getCharAndSize(CurPtr, SizeTmp); 1540 if (Char == '=') { 1541 Result.setKind(tok::greaterequal); 1542 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1543 } else if (Char == '>' && 1544 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') { 1545 Result.setKind(tok::greatergreaterequal); 1546 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1547 SizeTmp2, Result); 1548 } else if (Char == '>') { 1549 Result.setKind(tok::greatergreater); 1550 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1551 } else { 1552 Result.setKind(tok::greater); 1553 } 1554 break; 1555 case '^': 1556 Char = getCharAndSize(CurPtr, SizeTmp); 1557 if (Char == '=') { 1558 Result.setKind(tok::caretequal); 1559 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1560 } else { 1561 Result.setKind(tok::caret); 1562 } 1563 break; 1564 case '|': 1565 Char = getCharAndSize(CurPtr, SizeTmp); 1566 if (Char == '=') { 1567 Result.setKind(tok::pipeequal); 1568 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1569 } else if (Char == '|') { 1570 Result.setKind(tok::pipepipe); 1571 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1572 } else { 1573 Result.setKind(tok::pipe); 1574 } 1575 break; 1576 case ':': 1577 Char = getCharAndSize(CurPtr, SizeTmp); 1578 if (Features.Digraphs && Char == '>') { 1579 Result.setKind(tok::r_square); // ':>' -> ']' 1580 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1581 } else if (Features.CPlusPlus && Char == ':') { 1582 Result.setKind(tok::coloncolon); 1583 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1584 } else { 1585 Result.setKind(tok::colon); 1586 } 1587 break; 1588 case ';': 1589 Result.setKind(tok::semi); 1590 break; 1591 case '=': 1592 Char = getCharAndSize(CurPtr, SizeTmp); 1593 if (Char == '=') { 1594 Result.setKind(tok::equalequal); 1595 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1596 } else { 1597 Result.setKind(tok::equal); 1598 } 1599 break; 1600 case ',': 1601 Result.setKind(tok::comma); 1602 break; 1603 case '#': 1604 Char = getCharAndSize(CurPtr, SizeTmp); 1605 if (Char == '#') { 1606 Result.setKind(tok::hashhash); 1607 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1608 } else if (Char == '@' && Features.Microsoft) { // #@ -> Charize 1609 Result.setKind(tok::hashat); 1610 Diag(BufferPtr, diag::charize_microsoft_ext); 1611 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1612 } else { 1613 Result.setKind(tok::hash); 1614 // We parsed a # character. If this occurs at the start of the line, 1615 // it's actually the start of a preprocessing directive. Callback to 1616 // the preprocessor to handle it. 1617 // FIXME: -fpreprocessed mode?? 1618 if (Result.isAtStartOfLine() && !LexingRawMode) { 1619 BufferPtr = CurPtr; 1620 PP->HandleDirective(Result); 1621 1622 // As an optimization, if the preprocessor didn't switch lexers, tail 1623 // recurse. 1624 if (PP->isCurrentLexer(this)) { 1625 // Start a new token. If this is a #include or something, the PP may 1626 // want us starting at the beginning of the line again. If so, set 1627 // the StartOfLine flag. 1628 if (IsAtStartOfLine) { 1629 Result.setFlag(Token::StartOfLine); 1630 IsAtStartOfLine = false; 1631 } 1632 goto LexNextToken; // GCC isn't tail call eliminating. 1633 } 1634 return PP->Lex(Result); 1635 } 1636 } 1637 break; 1638 1639 case '@': 1640 // Objective C support. 1641 if (CurPtr[-1] == '@' && Features.ObjC1) 1642 Result.setKind(tok::at); 1643 else 1644 Result.setKind(tok::unknown); 1645 break; 1646 1647 case '\\': 1648 // FIXME: UCN's. 1649 // FALL THROUGH. 1650 default: 1651 Result.setKind(tok::unknown); 1652 break; 1653 } 1654 1655 // Notify MIOpt that we read a non-whitespace/non-comment token. 1656 MIOpt.ReadToken(); 1657 1658 // Update the location of token as well as BufferPtr. 1659 FormTokenWithChars(Result, CurPtr); 1660} 1661