Lexer.cpp revision dd81731d82a33a242e7b088e249e56873ef52807
1//===--- Lexer.cpp - C Language Family Lexer ------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the Lexer and Token interfaces. 11// 12//===----------------------------------------------------------------------===// 13// 14// TODO: GCC Diagnostics emitted by the lexer: 15// PEDWARN: (form feed|vertical tab) in preprocessing directive 16// 17// Universal characters, unicode, char mapping: 18// WARNING: `%.*s' is not in NFKC 19// WARNING: `%.*s' is not in NFC 20// 21// Other: 22// TODO: Options to support: 23// -fexec-charset,-fwide-exec-charset 24// 25//===----------------------------------------------------------------------===// 26 27#include "clang/Lex/Lexer.h" 28#include "clang/Lex/Preprocessor.h" 29#include "clang/Lex/LexDiagnostic.h" 30#include "clang/Lex/CodeCompletionHandler.h" 31#include "clang/Basic/SourceManager.h" 32#include "llvm/ADT/StringSwitch.h" 33#include "llvm/ADT/STLExtras.h" 34#include "llvm/Support/Compiler.h" 35#include "llvm/Support/MemoryBuffer.h" 36#include <cstring> 37using namespace clang; 38 39static void InitCharacterInfo(); 40 41//===----------------------------------------------------------------------===// 42// Token Class Implementation 43//===----------------------------------------------------------------------===// 44 45/// isObjCAtKeyword - Return true if we have an ObjC keyword identifier. 46bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const { 47 if (IdentifierInfo *II = getIdentifierInfo()) 48 return II->getObjCKeywordID() == objcKey; 49 return false; 50} 51 52/// getObjCKeywordID - Return the ObjC keyword kind. 53tok::ObjCKeywordKind Token::getObjCKeywordID() const { 54 IdentifierInfo *specId = getIdentifierInfo(); 55 return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword; 56} 57 58 59//===----------------------------------------------------------------------===// 60// Lexer Class Implementation 61//===----------------------------------------------------------------------===// 62 63void Lexer::anchor() { } 64 65void Lexer::InitLexer(const char *BufStart, const char *BufPtr, 66 const char *BufEnd) { 67 InitCharacterInfo(); 68 69 BufferStart = BufStart; 70 BufferPtr = BufPtr; 71 BufferEnd = BufEnd; 72 73 assert(BufEnd[0] == 0 && 74 "We assume that the input buffer has a null character at the end" 75 " to simplify lexing!"); 76 77 // Check whether we have a BOM in the beginning of the buffer. If yes - act 78 // accordingly. Right now we support only UTF-8 with and without BOM, so, just 79 // skip the UTF-8 BOM if it's present. 80 if (BufferStart == BufferPtr) { 81 // Determine the size of the BOM. 82 StringRef Buf(BufferStart, BufferEnd - BufferStart); 83 size_t BOMLength = llvm::StringSwitch<size_t>(Buf) 84 .StartsWith("\xEF\xBB\xBF", 3) // UTF-8 BOM 85 .Default(0); 86 87 // Skip the BOM. 88 BufferPtr += BOMLength; 89 } 90 91 Is_PragmaLexer = false; 92 CurrentConflictMarkerState = CMK_None; 93 94 // Start of the file is a start of line. 95 IsAtStartOfLine = true; 96 97 // We are not after parsing a #. 98 ParsingPreprocessorDirective = false; 99 100 // We are not after parsing #include. 101 ParsingFilename = false; 102 103 // We are not in raw mode. Raw mode disables diagnostics and interpretation 104 // of tokens (e.g. identifiers, thus disabling macro expansion). It is used 105 // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block 106 // or otherwise skipping over tokens. 107 LexingRawMode = false; 108 109 // Default to not keeping comments. 110 ExtendedTokenMode = 0; 111} 112 113/// Lexer constructor - Create a new lexer object for the specified buffer 114/// with the specified preprocessor managing the lexing process. This lexer 115/// assumes that the associated file buffer and Preprocessor objects will 116/// outlive it, so it doesn't take ownership of either of them. 117Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *InputFile, Preprocessor &PP) 118 : PreprocessorLexer(&PP, FID), 119 FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)), 120 LangOpts(PP.getLangOpts()) { 121 122 InitLexer(InputFile->getBufferStart(), InputFile->getBufferStart(), 123 InputFile->getBufferEnd()); 124 125 // Default to keeping comments if the preprocessor wants them. 126 SetCommentRetentionState(PP.getCommentRetentionState()); 127} 128 129/// Lexer constructor - Create a new raw lexer object. This object is only 130/// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text 131/// range will outlive it, so it doesn't take ownership of it. 132Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts, 133 const char *BufStart, const char *BufPtr, const char *BufEnd) 134 : FileLoc(fileloc), LangOpts(langOpts) { 135 136 InitLexer(BufStart, BufPtr, BufEnd); 137 138 // We *are* in raw mode. 139 LexingRawMode = true; 140} 141 142/// Lexer constructor - Create a new raw lexer object. This object is only 143/// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text 144/// range will outlive it, so it doesn't take ownership of it. 145Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *FromFile, 146 const SourceManager &SM, const LangOptions &langOpts) 147 : FileLoc(SM.getLocForStartOfFile(FID)), LangOpts(langOpts) { 148 149 InitLexer(FromFile->getBufferStart(), FromFile->getBufferStart(), 150 FromFile->getBufferEnd()); 151 152 // We *are* in raw mode. 153 LexingRawMode = true; 154} 155 156/// Create_PragmaLexer: Lexer constructor - Create a new lexer object for 157/// _Pragma expansion. This has a variety of magic semantics that this method 158/// sets up. It returns a new'd Lexer that must be delete'd when done. 159/// 160/// On entrance to this routine, TokStartLoc is a macro location which has a 161/// spelling loc that indicates the bytes to be lexed for the token and an 162/// expansion location that indicates where all lexed tokens should be 163/// "expanded from". 164/// 165/// FIXME: It would really be nice to make _Pragma just be a wrapper around a 166/// normal lexer that remaps tokens as they fly by. This would require making 167/// Preprocessor::Lex virtual. Given that, we could just dump in a magic lexer 168/// interface that could handle this stuff. This would pull GetMappedTokenLoc 169/// out of the critical path of the lexer! 170/// 171Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc, 172 SourceLocation ExpansionLocStart, 173 SourceLocation ExpansionLocEnd, 174 unsigned TokLen, Preprocessor &PP) { 175 SourceManager &SM = PP.getSourceManager(); 176 177 // Create the lexer as if we were going to lex the file normally. 178 FileID SpellingFID = SM.getFileID(SpellingLoc); 179 const llvm::MemoryBuffer *InputFile = SM.getBuffer(SpellingFID); 180 Lexer *L = new Lexer(SpellingFID, InputFile, PP); 181 182 // Now that the lexer is created, change the start/end locations so that we 183 // just lex the subsection of the file that we want. This is lexing from a 184 // scratch buffer. 185 const char *StrData = SM.getCharacterData(SpellingLoc); 186 187 L->BufferPtr = StrData; 188 L->BufferEnd = StrData+TokLen; 189 assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!"); 190 191 // Set the SourceLocation with the remapping information. This ensures that 192 // GetMappedTokenLoc will remap the tokens as they are lexed. 193 L->FileLoc = SM.createExpansionLoc(SM.getLocForStartOfFile(SpellingFID), 194 ExpansionLocStart, 195 ExpansionLocEnd, TokLen); 196 197 // Ensure that the lexer thinks it is inside a directive, so that end \n will 198 // return an EOD token. 199 L->ParsingPreprocessorDirective = true; 200 201 // This lexer really is for _Pragma. 202 L->Is_PragmaLexer = true; 203 return L; 204} 205 206 207/// Stringify - Convert the specified string into a C string, with surrounding 208/// ""'s, and with escaped \ and " characters. 209std::string Lexer::Stringify(const std::string &Str, bool Charify) { 210 std::string Result = Str; 211 char Quote = Charify ? '\'' : '"'; 212 for (unsigned i = 0, e = Result.size(); i != e; ++i) { 213 if (Result[i] == '\\' || Result[i] == Quote) { 214 Result.insert(Result.begin()+i, '\\'); 215 ++i; ++e; 216 } 217 } 218 return Result; 219} 220 221/// Stringify - Convert the specified string into a C string by escaping '\' 222/// and " characters. This does not add surrounding ""'s to the string. 223void Lexer::Stringify(SmallVectorImpl<char> &Str) { 224 for (unsigned i = 0, e = Str.size(); i != e; ++i) { 225 if (Str[i] == '\\' || Str[i] == '"') { 226 Str.insert(Str.begin()+i, '\\'); 227 ++i; ++e; 228 } 229 } 230} 231 232//===----------------------------------------------------------------------===// 233// Token Spelling 234//===----------------------------------------------------------------------===// 235 236/// getSpelling() - Return the 'spelling' of this token. The spelling of a 237/// token are the characters used to represent the token in the source file 238/// after trigraph expansion and escaped-newline folding. In particular, this 239/// wants to get the true, uncanonicalized, spelling of things like digraphs 240/// UCNs, etc. 241StringRef Lexer::getSpelling(SourceLocation loc, 242 SmallVectorImpl<char> &buffer, 243 const SourceManager &SM, 244 const LangOptions &options, 245 bool *invalid) { 246 // Break down the source location. 247 std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc); 248 249 // Try to the load the file buffer. 250 bool invalidTemp = false; 251 StringRef file = SM.getBufferData(locInfo.first, &invalidTemp); 252 if (invalidTemp) { 253 if (invalid) *invalid = true; 254 return StringRef(); 255 } 256 257 const char *tokenBegin = file.data() + locInfo.second; 258 259 // Lex from the start of the given location. 260 Lexer lexer(SM.getLocForStartOfFile(locInfo.first), options, 261 file.begin(), tokenBegin, file.end()); 262 Token token; 263 lexer.LexFromRawLexer(token); 264 265 unsigned length = token.getLength(); 266 267 // Common case: no need for cleaning. 268 if (!token.needsCleaning()) 269 return StringRef(tokenBegin, length); 270 271 // Hard case, we need to relex the characters into the string. 272 buffer.clear(); 273 buffer.reserve(length); 274 275 for (const char *ti = tokenBegin, *te = ti + length; ti != te; ) { 276 unsigned charSize; 277 buffer.push_back(Lexer::getCharAndSizeNoWarn(ti, charSize, options)); 278 ti += charSize; 279 } 280 281 return StringRef(buffer.data(), buffer.size()); 282} 283 284/// getSpelling() - Return the 'spelling' of this token. The spelling of a 285/// token are the characters used to represent the token in the source file 286/// after trigraph expansion and escaped-newline folding. In particular, this 287/// wants to get the true, uncanonicalized, spelling of things like digraphs 288/// UCNs, etc. 289std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr, 290 const LangOptions &LangOpts, bool *Invalid) { 291 assert((int)Tok.getLength() >= 0 && "Token character range is bogus!"); 292 293 // If this token contains nothing interesting, return it directly. 294 bool CharDataInvalid = false; 295 const char* TokStart = SourceMgr.getCharacterData(Tok.getLocation(), 296 &CharDataInvalid); 297 if (Invalid) 298 *Invalid = CharDataInvalid; 299 if (CharDataInvalid) 300 return std::string(); 301 302 if (!Tok.needsCleaning()) 303 return std::string(TokStart, TokStart+Tok.getLength()); 304 305 std::string Result; 306 Result.reserve(Tok.getLength()); 307 308 // Otherwise, hard case, relex the characters into the string. 309 for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength(); 310 Ptr != End; ) { 311 unsigned CharSize; 312 Result.push_back(Lexer::getCharAndSizeNoWarn(Ptr, CharSize, LangOpts)); 313 Ptr += CharSize; 314 } 315 assert(Result.size() != unsigned(Tok.getLength()) && 316 "NeedsCleaning flag set on something that didn't need cleaning!"); 317 return Result; 318} 319 320/// getSpelling - This method is used to get the spelling of a token into a 321/// preallocated buffer, instead of as an std::string. The caller is required 322/// to allocate enough space for the token, which is guaranteed to be at least 323/// Tok.getLength() bytes long. The actual length of the token is returned. 324/// 325/// Note that this method may do two possible things: it may either fill in 326/// the buffer specified with characters, or it may *change the input pointer* 327/// to point to a constant buffer with the data already in it (avoiding a 328/// copy). The caller is not allowed to modify the returned buffer pointer 329/// if an internal buffer is returned. 330unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer, 331 const SourceManager &SourceMgr, 332 const LangOptions &LangOpts, bool *Invalid) { 333 assert((int)Tok.getLength() >= 0 && "Token character range is bogus!"); 334 335 const char *TokStart = 0; 336 // NOTE: this has to be checked *before* testing for an IdentifierInfo. 337 if (Tok.is(tok::raw_identifier)) 338 TokStart = Tok.getRawIdentifierData(); 339 else if (const IdentifierInfo *II = Tok.getIdentifierInfo()) { 340 // Just return the string from the identifier table, which is very quick. 341 Buffer = II->getNameStart(); 342 return II->getLength(); 343 } 344 345 // NOTE: this can be checked even after testing for an IdentifierInfo. 346 if (Tok.isLiteral()) 347 TokStart = Tok.getLiteralData(); 348 349 if (TokStart == 0) { 350 // Compute the start of the token in the input lexer buffer. 351 bool CharDataInvalid = false; 352 TokStart = SourceMgr.getCharacterData(Tok.getLocation(), &CharDataInvalid); 353 if (Invalid) 354 *Invalid = CharDataInvalid; 355 if (CharDataInvalid) { 356 Buffer = ""; 357 return 0; 358 } 359 } 360 361 // If this token contains nothing interesting, return it directly. 362 if (!Tok.needsCleaning()) { 363 Buffer = TokStart; 364 return Tok.getLength(); 365 } 366 367 // Otherwise, hard case, relex the characters into the string. 368 char *OutBuf = const_cast<char*>(Buffer); 369 for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength(); 370 Ptr != End; ) { 371 unsigned CharSize; 372 *OutBuf++ = Lexer::getCharAndSizeNoWarn(Ptr, CharSize, LangOpts); 373 Ptr += CharSize; 374 } 375 assert(unsigned(OutBuf-Buffer) != Tok.getLength() && 376 "NeedsCleaning flag set on something that didn't need cleaning!"); 377 378 return OutBuf-Buffer; 379} 380 381 382 383static bool isWhitespace(unsigned char c); 384 385/// MeasureTokenLength - Relex the token at the specified location and return 386/// its length in bytes in the input file. If the token needs cleaning (e.g. 387/// includes a trigraph or an escaped newline) then this count includes bytes 388/// that are part of that. 389unsigned Lexer::MeasureTokenLength(SourceLocation Loc, 390 const SourceManager &SM, 391 const LangOptions &LangOpts) { 392 // TODO: this could be special cased for common tokens like identifiers, ')', 393 // etc to make this faster, if it mattered. Just look at StrData[0] to handle 394 // all obviously single-char tokens. This could use 395 // Lexer::isObviouslySimpleCharacter for example to handle identifiers or 396 // something. 397 398 // If this comes from a macro expansion, we really do want the macro name, not 399 // the token this macro expanded to. 400 Loc = SM.getExpansionLoc(Loc); 401 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 402 bool Invalid = false; 403 StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid); 404 if (Invalid) 405 return 0; 406 407 const char *StrData = Buffer.data()+LocInfo.second; 408 409 if (isWhitespace(StrData[0])) 410 return 0; 411 412 // Create a lexer starting at the beginning of this token. 413 Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, 414 Buffer.begin(), StrData, Buffer.end()); 415 TheLexer.SetCommentRetentionState(true); 416 Token TheTok; 417 TheLexer.LexFromRawLexer(TheTok); 418 return TheTok.getLength(); 419} 420 421static SourceLocation getBeginningOfFileToken(SourceLocation Loc, 422 const SourceManager &SM, 423 const LangOptions &LangOpts) { 424 assert(Loc.isFileID()); 425 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 426 if (LocInfo.first.isInvalid()) 427 return Loc; 428 429 bool Invalid = false; 430 StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid); 431 if (Invalid) 432 return Loc; 433 434 // Back up from the current location until we hit the beginning of a line 435 // (or the buffer). We'll relex from that point. 436 const char *BufStart = Buffer.data(); 437 if (LocInfo.second >= Buffer.size()) 438 return Loc; 439 440 const char *StrData = BufStart+LocInfo.second; 441 if (StrData[0] == '\n' || StrData[0] == '\r') 442 return Loc; 443 444 const char *LexStart = StrData; 445 while (LexStart != BufStart) { 446 if (LexStart[0] == '\n' || LexStart[0] == '\r') { 447 ++LexStart; 448 break; 449 } 450 451 --LexStart; 452 } 453 454 // Create a lexer starting at the beginning of this token. 455 SourceLocation LexerStartLoc = Loc.getLocWithOffset(-LocInfo.second); 456 Lexer TheLexer(LexerStartLoc, LangOpts, BufStart, LexStart, Buffer.end()); 457 TheLexer.SetCommentRetentionState(true); 458 459 // Lex tokens until we find the token that contains the source location. 460 Token TheTok; 461 do { 462 TheLexer.LexFromRawLexer(TheTok); 463 464 if (TheLexer.getBufferLocation() > StrData) { 465 // Lexing this token has taken the lexer past the source location we're 466 // looking for. If the current token encompasses our source location, 467 // return the beginning of that token. 468 if (TheLexer.getBufferLocation() - TheTok.getLength() <= StrData) 469 return TheTok.getLocation(); 470 471 // We ended up skipping over the source location entirely, which means 472 // that it points into whitespace. We're done here. 473 break; 474 } 475 } while (TheTok.getKind() != tok::eof); 476 477 // We've passed our source location; just return the original source location. 478 return Loc; 479} 480 481SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc, 482 const SourceManager &SM, 483 const LangOptions &LangOpts) { 484 if (Loc.isFileID()) 485 return getBeginningOfFileToken(Loc, SM, LangOpts); 486 487 if (!SM.isMacroArgExpansion(Loc)) 488 return Loc; 489 490 SourceLocation FileLoc = SM.getSpellingLoc(Loc); 491 SourceLocation BeginFileLoc = getBeginningOfFileToken(FileLoc, SM, LangOpts); 492 std::pair<FileID, unsigned> FileLocInfo = SM.getDecomposedLoc(FileLoc); 493 std::pair<FileID, unsigned> BeginFileLocInfo 494 = SM.getDecomposedLoc(BeginFileLoc); 495 assert(FileLocInfo.first == BeginFileLocInfo.first && 496 FileLocInfo.second >= BeginFileLocInfo.second); 497 return Loc.getLocWithOffset(BeginFileLocInfo.second - FileLocInfo.second); 498} 499 500namespace { 501 enum PreambleDirectiveKind { 502 PDK_Skipped, 503 PDK_StartIf, 504 PDK_EndIf, 505 PDK_Unknown 506 }; 507} 508 509std::pair<unsigned, bool> 510Lexer::ComputePreamble(const llvm::MemoryBuffer *Buffer, 511 const LangOptions &LangOpts, unsigned MaxLines) { 512 // Create a lexer starting at the beginning of the file. Note that we use a 513 // "fake" file source location at offset 1 so that the lexer will track our 514 // position within the file. 515 const unsigned StartOffset = 1; 516 SourceLocation FileLoc = SourceLocation::getFromRawEncoding(StartOffset); 517 Lexer TheLexer(FileLoc, LangOpts, Buffer->getBufferStart(), 518 Buffer->getBufferStart(), Buffer->getBufferEnd()); 519 520 // StartLoc will differ from FileLoc if there is a BOM that was skipped. 521 SourceLocation StartLoc = TheLexer.getSourceLocation(); 522 523 bool InPreprocessorDirective = false; 524 Token TheTok; 525 Token IfStartTok; 526 unsigned IfCount = 0; 527 528 unsigned MaxLineOffset = 0; 529 if (MaxLines) { 530 const char *CurPtr = Buffer->getBufferStart(); 531 unsigned CurLine = 0; 532 while (CurPtr != Buffer->getBufferEnd()) { 533 char ch = *CurPtr++; 534 if (ch == '\n') { 535 ++CurLine; 536 if (CurLine == MaxLines) 537 break; 538 } 539 } 540 if (CurPtr != Buffer->getBufferEnd()) 541 MaxLineOffset = CurPtr - Buffer->getBufferStart(); 542 } 543 544 do { 545 TheLexer.LexFromRawLexer(TheTok); 546 547 if (InPreprocessorDirective) { 548 // If we've hit the end of the file, we're done. 549 if (TheTok.getKind() == tok::eof) { 550 break; 551 } 552 553 // If we haven't hit the end of the preprocessor directive, skip this 554 // token. 555 if (!TheTok.isAtStartOfLine()) 556 continue; 557 558 // We've passed the end of the preprocessor directive, and will look 559 // at this token again below. 560 InPreprocessorDirective = false; 561 } 562 563 // Keep track of the # of lines in the preamble. 564 if (TheTok.isAtStartOfLine()) { 565 unsigned TokOffset = TheTok.getLocation().getRawEncoding() - StartOffset; 566 567 // If we were asked to limit the number of lines in the preamble, 568 // and we're about to exceed that limit, we're done. 569 if (MaxLineOffset && TokOffset >= MaxLineOffset) 570 break; 571 } 572 573 // Comments are okay; skip over them. 574 if (TheTok.getKind() == tok::comment) 575 continue; 576 577 if (TheTok.isAtStartOfLine() && TheTok.getKind() == tok::hash) { 578 // This is the start of a preprocessor directive. 579 Token HashTok = TheTok; 580 InPreprocessorDirective = true; 581 582 // Figure out which directive this is. Since we're lexing raw tokens, 583 // we don't have an identifier table available. Instead, just look at 584 // the raw identifier to recognize and categorize preprocessor directives. 585 TheLexer.LexFromRawLexer(TheTok); 586 if (TheTok.getKind() == tok::raw_identifier && !TheTok.needsCleaning()) { 587 StringRef Keyword(TheTok.getRawIdentifierData(), 588 TheTok.getLength()); 589 PreambleDirectiveKind PDK 590 = llvm::StringSwitch<PreambleDirectiveKind>(Keyword) 591 .Case("include", PDK_Skipped) 592 .Case("__include_macros", PDK_Skipped) 593 .Case("define", PDK_Skipped) 594 .Case("undef", PDK_Skipped) 595 .Case("line", PDK_Skipped) 596 .Case("error", PDK_Skipped) 597 .Case("pragma", PDK_Skipped) 598 .Case("import", PDK_Skipped) 599 .Case("include_next", PDK_Skipped) 600 .Case("warning", PDK_Skipped) 601 .Case("ident", PDK_Skipped) 602 .Case("sccs", PDK_Skipped) 603 .Case("assert", PDK_Skipped) 604 .Case("unassert", PDK_Skipped) 605 .Case("if", PDK_StartIf) 606 .Case("ifdef", PDK_StartIf) 607 .Case("ifndef", PDK_StartIf) 608 .Case("elif", PDK_Skipped) 609 .Case("else", PDK_Skipped) 610 .Case("endif", PDK_EndIf) 611 .Default(PDK_Unknown); 612 613 switch (PDK) { 614 case PDK_Skipped: 615 continue; 616 617 case PDK_StartIf: 618 if (IfCount == 0) 619 IfStartTok = HashTok; 620 621 ++IfCount; 622 continue; 623 624 case PDK_EndIf: 625 // Mismatched #endif. The preamble ends here. 626 if (IfCount == 0) 627 break; 628 629 --IfCount; 630 continue; 631 632 case PDK_Unknown: 633 // We don't know what this directive is; stop at the '#'. 634 break; 635 } 636 } 637 638 // We only end up here if we didn't recognize the preprocessor 639 // directive or it was one that can't occur in the preamble at this 640 // point. Roll back the current token to the location of the '#'. 641 InPreprocessorDirective = false; 642 TheTok = HashTok; 643 } 644 645 // We hit a token that we don't recognize as being in the 646 // "preprocessing only" part of the file, so we're no longer in 647 // the preamble. 648 break; 649 } while (true); 650 651 SourceLocation End = IfCount? IfStartTok.getLocation() : TheTok.getLocation(); 652 return std::make_pair(End.getRawEncoding() - StartLoc.getRawEncoding(), 653 IfCount? IfStartTok.isAtStartOfLine() 654 : TheTok.isAtStartOfLine()); 655} 656 657 658/// AdvanceToTokenCharacter - Given a location that specifies the start of a 659/// token, return a new location that specifies a character within the token. 660SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart, 661 unsigned CharNo, 662 const SourceManager &SM, 663 const LangOptions &LangOpts) { 664 // Figure out how many physical characters away the specified expansion 665 // character is. This needs to take into consideration newlines and 666 // trigraphs. 667 bool Invalid = false; 668 const char *TokPtr = SM.getCharacterData(TokStart, &Invalid); 669 670 // If they request the first char of the token, we're trivially done. 671 if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr))) 672 return TokStart; 673 674 unsigned PhysOffset = 0; 675 676 // The usual case is that tokens don't contain anything interesting. Skip 677 // over the uninteresting characters. If a token only consists of simple 678 // chars, this method is extremely fast. 679 while (Lexer::isObviouslySimpleCharacter(*TokPtr)) { 680 if (CharNo == 0) 681 return TokStart.getLocWithOffset(PhysOffset); 682 ++TokPtr, --CharNo, ++PhysOffset; 683 } 684 685 // If we have a character that may be a trigraph or escaped newline, use a 686 // lexer to parse it correctly. 687 for (; CharNo; --CharNo) { 688 unsigned Size; 689 Lexer::getCharAndSizeNoWarn(TokPtr, Size, LangOpts); 690 TokPtr += Size; 691 PhysOffset += Size; 692 } 693 694 // Final detail: if we end up on an escaped newline, we want to return the 695 // location of the actual byte of the token. For example foo\<newline>bar 696 // advanced by 3 should return the location of b, not of \\. One compounding 697 // detail of this is that the escape may be made by a trigraph. 698 if (!Lexer::isObviouslySimpleCharacter(*TokPtr)) 699 PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr; 700 701 return TokStart.getLocWithOffset(PhysOffset); 702} 703 704/// \brief Computes the source location just past the end of the 705/// token at this source location. 706/// 707/// This routine can be used to produce a source location that 708/// points just past the end of the token referenced by \p Loc, and 709/// is generally used when a diagnostic needs to point just after a 710/// token where it expected something different that it received. If 711/// the returned source location would not be meaningful (e.g., if 712/// it points into a macro), this routine returns an invalid 713/// source location. 714/// 715/// \param Offset an offset from the end of the token, where the source 716/// location should refer to. The default offset (0) produces a source 717/// location pointing just past the end of the token; an offset of 1 produces 718/// a source location pointing to the last character in the token, etc. 719SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset, 720 const SourceManager &SM, 721 const LangOptions &LangOpts) { 722 if (Loc.isInvalid()) 723 return SourceLocation(); 724 725 if (Loc.isMacroID()) { 726 if (Offset > 0 || !isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc)) 727 return SourceLocation(); // Points inside the macro expansion. 728 } 729 730 unsigned Len = Lexer::MeasureTokenLength(Loc, SM, LangOpts); 731 if (Len > Offset) 732 Len = Len - Offset; 733 else 734 return Loc; 735 736 return Loc.getLocWithOffset(Len); 737} 738 739/// \brief Returns true if the given MacroID location points at the first 740/// token of the macro expansion. 741bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc, 742 const SourceManager &SM, 743 const LangOptions &LangOpts, 744 SourceLocation *MacroBegin) { 745 assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc"); 746 747 std::pair<FileID, unsigned> infoLoc = SM.getDecomposedLoc(loc); 748 // FIXME: If the token comes from the macro token paste operator ('##') 749 // this function will always return false; 750 if (infoLoc.second > 0) 751 return false; // Does not point at the start of token. 752 753 SourceLocation expansionLoc = 754 SM.getSLocEntry(infoLoc.first).getExpansion().getExpansionLocStart(); 755 if (expansionLoc.isFileID()) { 756 // No other macro expansions, this is the first. 757 if (MacroBegin) 758 *MacroBegin = expansionLoc; 759 return true; 760 } 761 762 return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts, MacroBegin); 763} 764 765/// \brief Returns true if the given MacroID location points at the last 766/// token of the macro expansion. 767bool Lexer::isAtEndOfMacroExpansion(SourceLocation loc, 768 const SourceManager &SM, 769 const LangOptions &LangOpts, 770 SourceLocation *MacroEnd) { 771 assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc"); 772 773 SourceLocation spellLoc = SM.getSpellingLoc(loc); 774 unsigned tokLen = MeasureTokenLength(spellLoc, SM, LangOpts); 775 if (tokLen == 0) 776 return false; 777 778 FileID FID = SM.getFileID(loc); 779 SourceLocation afterLoc = loc.getLocWithOffset(tokLen+1); 780 if (SM.isInFileID(afterLoc, FID)) 781 return false; // Still in the same FileID, does not point to the last token. 782 783 // FIXME: If the token comes from the macro token paste operator ('##') 784 // or the stringify operator ('#') this function will always return false; 785 786 SourceLocation expansionLoc = 787 SM.getSLocEntry(FID).getExpansion().getExpansionLocEnd(); 788 if (expansionLoc.isFileID()) { 789 // No other macro expansions. 790 if (MacroEnd) 791 *MacroEnd = expansionLoc; 792 return true; 793 } 794 795 return isAtEndOfMacroExpansion(expansionLoc, SM, LangOpts, MacroEnd); 796} 797 798static CharSourceRange makeRangeFromFileLocs(CharSourceRange Range, 799 const SourceManager &SM, 800 const LangOptions &LangOpts) { 801 SourceLocation Begin = Range.getBegin(); 802 SourceLocation End = Range.getEnd(); 803 assert(Begin.isFileID() && End.isFileID()); 804 if (Range.isTokenRange()) { 805 End = Lexer::getLocForEndOfToken(End, 0, SM,LangOpts); 806 if (End.isInvalid()) 807 return CharSourceRange(); 808 } 809 810 // Break down the source locations. 811 FileID FID; 812 unsigned BeginOffs; 813 llvm::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin); 814 if (FID.isInvalid()) 815 return CharSourceRange(); 816 817 unsigned EndOffs; 818 if (!SM.isInFileID(End, FID, &EndOffs) || 819 BeginOffs > EndOffs) 820 return CharSourceRange(); 821 822 return CharSourceRange::getCharRange(Begin, End); 823} 824 825CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range, 826 const SourceManager &SM, 827 const LangOptions &LangOpts) { 828 SourceLocation Begin = Range.getBegin(); 829 SourceLocation End = Range.getEnd(); 830 if (Begin.isInvalid() || End.isInvalid()) 831 return CharSourceRange(); 832 833 if (Begin.isFileID() && End.isFileID()) 834 return makeRangeFromFileLocs(Range, SM, LangOpts); 835 836 if (Begin.isMacroID() && End.isFileID()) { 837 if (!isAtStartOfMacroExpansion(Begin, SM, LangOpts, &Begin)) 838 return CharSourceRange(); 839 Range.setBegin(Begin); 840 return makeRangeFromFileLocs(Range, SM, LangOpts); 841 } 842 843 if (Begin.isFileID() && End.isMacroID()) { 844 if ((Range.isTokenRange() && !isAtEndOfMacroExpansion(End, SM, LangOpts, 845 &End)) || 846 (Range.isCharRange() && !isAtStartOfMacroExpansion(End, SM, LangOpts, 847 &End))) 848 return CharSourceRange(); 849 Range.setEnd(End); 850 return makeRangeFromFileLocs(Range, SM, LangOpts); 851 } 852 853 assert(Begin.isMacroID() && End.isMacroID()); 854 SourceLocation MacroBegin, MacroEnd; 855 if (isAtStartOfMacroExpansion(Begin, SM, LangOpts, &MacroBegin) && 856 ((Range.isTokenRange() && isAtEndOfMacroExpansion(End, SM, LangOpts, 857 &MacroEnd)) || 858 (Range.isCharRange() && isAtStartOfMacroExpansion(End, SM, LangOpts, 859 &MacroEnd)))) { 860 Range.setBegin(MacroBegin); 861 Range.setEnd(MacroEnd); 862 return makeRangeFromFileLocs(Range, SM, LangOpts); 863 } 864 865 FileID FID; 866 unsigned BeginOffs; 867 llvm::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin); 868 if (FID.isInvalid()) 869 return CharSourceRange(); 870 871 unsigned EndOffs; 872 if (!SM.isInFileID(End, FID, &EndOffs) || 873 BeginOffs > EndOffs) 874 return CharSourceRange(); 875 876 const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID); 877 const SrcMgr::ExpansionInfo &Expansion = E->getExpansion(); 878 if (Expansion.isMacroArgExpansion() && 879 Expansion.getSpellingLoc().isFileID()) { 880 SourceLocation SpellLoc = Expansion.getSpellingLoc(); 881 Range.setBegin(SpellLoc.getLocWithOffset(BeginOffs)); 882 Range.setEnd(SpellLoc.getLocWithOffset(EndOffs)); 883 return makeRangeFromFileLocs(Range, SM, LangOpts); 884 } 885 886 return CharSourceRange(); 887} 888 889StringRef Lexer::getSourceText(CharSourceRange Range, 890 const SourceManager &SM, 891 const LangOptions &LangOpts, 892 bool *Invalid) { 893 Range = makeFileCharRange(Range, SM, LangOpts); 894 if (Range.isInvalid()) { 895 if (Invalid) *Invalid = true; 896 return StringRef(); 897 } 898 899 // Break down the source location. 900 std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(Range.getBegin()); 901 if (beginInfo.first.isInvalid()) { 902 if (Invalid) *Invalid = true; 903 return StringRef(); 904 } 905 906 unsigned EndOffs; 907 if (!SM.isInFileID(Range.getEnd(), beginInfo.first, &EndOffs) || 908 beginInfo.second > EndOffs) { 909 if (Invalid) *Invalid = true; 910 return StringRef(); 911 } 912 913 // Try to the load the file buffer. 914 bool invalidTemp = false; 915 StringRef file = SM.getBufferData(beginInfo.first, &invalidTemp); 916 if (invalidTemp) { 917 if (Invalid) *Invalid = true; 918 return StringRef(); 919 } 920 921 if (Invalid) *Invalid = false; 922 return file.substr(beginInfo.second, EndOffs - beginInfo.second); 923} 924 925StringRef Lexer::getImmediateMacroName(SourceLocation Loc, 926 const SourceManager &SM, 927 const LangOptions &LangOpts) { 928 assert(Loc.isMacroID() && "Only reasonble to call this on macros"); 929 930 // Find the location of the immediate macro expansion. 931 while (1) { 932 FileID FID = SM.getFileID(Loc); 933 const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID); 934 const SrcMgr::ExpansionInfo &Expansion = E->getExpansion(); 935 Loc = Expansion.getExpansionLocStart(); 936 if (!Expansion.isMacroArgExpansion()) 937 break; 938 939 // For macro arguments we need to check that the argument did not come 940 // from an inner macro, e.g: "MAC1( MAC2(foo) )" 941 942 // Loc points to the argument id of the macro definition, move to the 943 // macro expansion. 944 Loc = SM.getImmediateExpansionRange(Loc).first; 945 SourceLocation SpellLoc = Expansion.getSpellingLoc(); 946 if (SpellLoc.isFileID()) 947 break; // No inner macro. 948 949 // If spelling location resides in the same FileID as macro expansion 950 // location, it means there is no inner macro. 951 FileID MacroFID = SM.getFileID(Loc); 952 if (SM.isInFileID(SpellLoc, MacroFID)) 953 break; 954 955 // Argument came from inner macro. 956 Loc = SpellLoc; 957 } 958 959 // Find the spelling location of the start of the non-argument expansion 960 // range. This is where the macro name was spelled in order to begin 961 // expanding this macro. 962 Loc = SM.getSpellingLoc(Loc); 963 964 // Dig out the buffer where the macro name was spelled and the extents of the 965 // name so that we can render it into the expansion note. 966 std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc); 967 unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts); 968 StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first); 969 return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength); 970} 971 972//===----------------------------------------------------------------------===// 973// Character information. 974//===----------------------------------------------------------------------===// 975 976enum { 977 CHAR_HORZ_WS = 0x01, // ' ', '\t', '\f', '\v'. Note, no '\0' 978 CHAR_VERT_WS = 0x02, // '\r', '\n' 979 CHAR_LETTER = 0x04, // a-z,A-Z 980 CHAR_NUMBER = 0x08, // 0-9 981 CHAR_UNDER = 0x10, // _ 982 CHAR_PERIOD = 0x20, // . 983 CHAR_RAWDEL = 0x40 // {}[]#<>%:;?*+-/^&|~!=,"' 984}; 985 986// Statically initialize CharInfo table based on ASCII character set 987// Reference: FreeBSD 7.2 /usr/share/misc/ascii 988static const unsigned char CharInfo[256] = 989{ 990// 0 NUL 1 SOH 2 STX 3 ETX 991// 4 EOT 5 ENQ 6 ACK 7 BEL 992 0 , 0 , 0 , 0 , 993 0 , 0 , 0 , 0 , 994// 8 BS 9 HT 10 NL 11 VT 995//12 NP 13 CR 14 SO 15 SI 996 0 , CHAR_HORZ_WS, CHAR_VERT_WS, CHAR_HORZ_WS, 997 CHAR_HORZ_WS, CHAR_VERT_WS, 0 , 0 , 998//16 DLE 17 DC1 18 DC2 19 DC3 999//20 DC4 21 NAK 22 SYN 23 ETB 1000 0 , 0 , 0 , 0 , 1001 0 , 0 , 0 , 0 , 1002//24 CAN 25 EM 26 SUB 27 ESC 1003//28 FS 29 GS 30 RS 31 US 1004 0 , 0 , 0 , 0 , 1005 0 , 0 , 0 , 0 , 1006//32 SP 33 ! 34 " 35 # 1007//36 $ 37 % 38 & 39 ' 1008 CHAR_HORZ_WS, CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL , 1009 0 , CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL , 1010//40 ( 41 ) 42 * 43 + 1011//44 , 45 - 46 . 47 / 1012 0 , 0 , CHAR_RAWDEL , CHAR_RAWDEL , 1013 CHAR_RAWDEL , CHAR_RAWDEL , CHAR_PERIOD , CHAR_RAWDEL , 1014//48 0 49 1 50 2 51 3 1015//52 4 53 5 54 6 55 7 1016 CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER , 1017 CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER , 1018//56 8 57 9 58 : 59 ; 1019//60 < 61 = 62 > 63 ? 1020 CHAR_NUMBER , CHAR_NUMBER , CHAR_RAWDEL , CHAR_RAWDEL , 1021 CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL , 1022//64 @ 65 A 66 B 67 C 1023//68 D 69 E 70 F 71 G 1024 0 , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 1025 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 1026//72 H 73 I 74 J 75 K 1027//76 L 77 M 78 N 79 O 1028 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 1029 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 1030//80 P 81 Q 82 R 83 S 1031//84 T 85 U 86 V 87 W 1032 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 1033 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 1034//88 X 89 Y 90 Z 91 [ 1035//92 \ 93 ] 94 ^ 95 _ 1036 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_RAWDEL , 1037 0 , CHAR_RAWDEL , CHAR_RAWDEL , CHAR_UNDER , 1038//96 ` 97 a 98 b 99 c 1039//100 d 101 e 102 f 103 g 1040 0 , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 1041 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 1042//104 h 105 i 106 j 107 k 1043//108 l 109 m 110 n 111 o 1044 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 1045 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 1046//112 p 113 q 114 r 115 s 1047//116 t 117 u 118 v 119 w 1048 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 1049 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 1050//120 x 121 y 122 z 123 { 1051//124 | 125 } 126 ~ 127 DEL 1052 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_RAWDEL , 1053 CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL , 0 1054}; 1055 1056static void InitCharacterInfo() { 1057 static bool isInited = false; 1058 if (isInited) return; 1059 // check the statically-initialized CharInfo table 1060 assert(CHAR_HORZ_WS == CharInfo[(int)' ']); 1061 assert(CHAR_HORZ_WS == CharInfo[(int)'\t']); 1062 assert(CHAR_HORZ_WS == CharInfo[(int)'\f']); 1063 assert(CHAR_HORZ_WS == CharInfo[(int)'\v']); 1064 assert(CHAR_VERT_WS == CharInfo[(int)'\n']); 1065 assert(CHAR_VERT_WS == CharInfo[(int)'\r']); 1066 assert(CHAR_UNDER == CharInfo[(int)'_']); 1067 assert(CHAR_PERIOD == CharInfo[(int)'.']); 1068 for (unsigned i = 'a'; i <= 'z'; ++i) { 1069 assert(CHAR_LETTER == CharInfo[i]); 1070 assert(CHAR_LETTER == CharInfo[i+'A'-'a']); 1071 } 1072 for (unsigned i = '0'; i <= '9'; ++i) 1073 assert(CHAR_NUMBER == CharInfo[i]); 1074 1075 isInited = true; 1076} 1077 1078 1079/// isIdentifierHead - Return true if this is the first character of an 1080/// identifier, which is [a-zA-Z_]. 1081static inline bool isIdentifierHead(unsigned char c) { 1082 return (CharInfo[c] & (CHAR_LETTER|CHAR_UNDER)) ? true : false; 1083} 1084 1085/// isIdentifierBody - Return true if this is the body character of an 1086/// identifier, which is [a-zA-Z0-9_]. 1087static inline bool isIdentifierBody(unsigned char c) { 1088 return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER)) ? true : false; 1089} 1090 1091/// isHorizontalWhitespace - Return true if this character is horizontal 1092/// whitespace: ' ', '\\t', '\\f', '\\v'. Note that this returns false for 1093/// '\\0'. 1094static inline bool isHorizontalWhitespace(unsigned char c) { 1095 return (CharInfo[c] & CHAR_HORZ_WS) ? true : false; 1096} 1097 1098/// isVerticalWhitespace - Return true if this character is vertical 1099/// whitespace: '\\n', '\\r'. Note that this returns false for '\\0'. 1100static inline bool isVerticalWhitespace(unsigned char c) { 1101 return (CharInfo[c] & CHAR_VERT_WS) ? true : false; 1102} 1103 1104/// isWhitespace - Return true if this character is horizontal or vertical 1105/// whitespace: ' ', '\\t', '\\f', '\\v', '\\n', '\\r'. Note that this returns 1106/// false for '\\0'. 1107static inline bool isWhitespace(unsigned char c) { 1108 return (CharInfo[c] & (CHAR_HORZ_WS|CHAR_VERT_WS)) ? true : false; 1109} 1110 1111/// isNumberBody - Return true if this is the body character of an 1112/// preprocessing number, which is [a-zA-Z0-9_.]. 1113static inline bool isNumberBody(unsigned char c) { 1114 return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD)) ? 1115 true : false; 1116} 1117 1118/// isRawStringDelimBody - Return true if this is the body character of a 1119/// raw string delimiter. 1120static inline bool isRawStringDelimBody(unsigned char c) { 1121 return (CharInfo[c] & 1122 (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD|CHAR_RAWDEL)) ? 1123 true : false; 1124} 1125 1126// Allow external clients to make use of CharInfo. 1127bool Lexer::isIdentifierBodyChar(char c, const LangOptions &LangOpts) { 1128 return isIdentifierBody(c) || (c == '$' && LangOpts.DollarIdents); 1129} 1130 1131 1132//===----------------------------------------------------------------------===// 1133// Diagnostics forwarding code. 1134//===----------------------------------------------------------------------===// 1135 1136/// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the 1137/// lexer buffer was all expanded at a single point, perform the mapping. 1138/// This is currently only used for _Pragma implementation, so it is the slow 1139/// path of the hot getSourceLocation method. Do not allow it to be inlined. 1140static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc( 1141 Preprocessor &PP, SourceLocation FileLoc, unsigned CharNo, unsigned TokLen); 1142static SourceLocation GetMappedTokenLoc(Preprocessor &PP, 1143 SourceLocation FileLoc, 1144 unsigned CharNo, unsigned TokLen) { 1145 assert(FileLoc.isMacroID() && "Must be a macro expansion"); 1146 1147 // Otherwise, we're lexing "mapped tokens". This is used for things like 1148 // _Pragma handling. Combine the expansion location of FileLoc with the 1149 // spelling location. 1150 SourceManager &SM = PP.getSourceManager(); 1151 1152 // Create a new SLoc which is expanded from Expansion(FileLoc) but whose 1153 // characters come from spelling(FileLoc)+Offset. 1154 SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc); 1155 SpellingLoc = SpellingLoc.getLocWithOffset(CharNo); 1156 1157 // Figure out the expansion loc range, which is the range covered by the 1158 // original _Pragma(...) sequence. 1159 std::pair<SourceLocation,SourceLocation> II = 1160 SM.getImmediateExpansionRange(FileLoc); 1161 1162 return SM.createExpansionLoc(SpellingLoc, II.first, II.second, TokLen); 1163} 1164 1165/// getSourceLocation - Return a source location identifier for the specified 1166/// offset in the current file. 1167SourceLocation Lexer::getSourceLocation(const char *Loc, 1168 unsigned TokLen) const { 1169 assert(Loc >= BufferStart && Loc <= BufferEnd && 1170 "Location out of range for this buffer!"); 1171 1172 // In the normal case, we're just lexing from a simple file buffer, return 1173 // the file id from FileLoc with the offset specified. 1174 unsigned CharNo = Loc-BufferStart; 1175 if (FileLoc.isFileID()) 1176 return FileLoc.getLocWithOffset(CharNo); 1177 1178 // Otherwise, this is the _Pragma lexer case, which pretends that all of the 1179 // tokens are lexed from where the _Pragma was defined. 1180 assert(PP && "This doesn't work on raw lexers"); 1181 return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen); 1182} 1183 1184/// Diag - Forwarding function for diagnostics. This translate a source 1185/// position in the current buffer into a SourceLocation object for rendering. 1186DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const { 1187 return PP->Diag(getSourceLocation(Loc), DiagID); 1188} 1189 1190//===----------------------------------------------------------------------===// 1191// Trigraph and Escaped Newline Handling Code. 1192//===----------------------------------------------------------------------===// 1193 1194/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair, 1195/// return the decoded trigraph letter it corresponds to, or '\0' if nothing. 1196static char GetTrigraphCharForLetter(char Letter) { 1197 switch (Letter) { 1198 default: return 0; 1199 case '=': return '#'; 1200 case ')': return ']'; 1201 case '(': return '['; 1202 case '!': return '|'; 1203 case '\'': return '^'; 1204 case '>': return '}'; 1205 case '/': return '\\'; 1206 case '<': return '{'; 1207 case '-': return '~'; 1208 } 1209} 1210 1211/// DecodeTrigraphChar - If the specified character is a legal trigraph when 1212/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled, 1213/// return the result character. Finally, emit a warning about trigraph use 1214/// whether trigraphs are enabled or not. 1215static char DecodeTrigraphChar(const char *CP, Lexer *L) { 1216 char Res = GetTrigraphCharForLetter(*CP); 1217 if (!Res || !L) return Res; 1218 1219 if (!L->getLangOpts().Trigraphs) { 1220 if (!L->isLexingRawMode()) 1221 L->Diag(CP-2, diag::trigraph_ignored); 1222 return 0; 1223 } 1224 1225 if (!L->isLexingRawMode()) 1226 L->Diag(CP-2, diag::trigraph_converted) << StringRef(&Res, 1); 1227 return Res; 1228} 1229 1230/// getEscapedNewLineSize - Return the size of the specified escaped newline, 1231/// or 0 if it is not an escaped newline. P[-1] is known to be a "\" or a 1232/// trigraph equivalent on entry to this function. 1233unsigned Lexer::getEscapedNewLineSize(const char *Ptr) { 1234 unsigned Size = 0; 1235 while (isWhitespace(Ptr[Size])) { 1236 ++Size; 1237 1238 if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r') 1239 continue; 1240 1241 // If this is a \r\n or \n\r, skip the other half. 1242 if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') && 1243 Ptr[Size-1] != Ptr[Size]) 1244 ++Size; 1245 1246 return Size; 1247 } 1248 1249 // Not an escaped newline, must be a \t or something else. 1250 return 0; 1251} 1252 1253/// SkipEscapedNewLines - If P points to an escaped newline (or a series of 1254/// them), skip over them and return the first non-escaped-newline found, 1255/// otherwise return P. 1256const char *Lexer::SkipEscapedNewLines(const char *P) { 1257 while (1) { 1258 const char *AfterEscape; 1259 if (*P == '\\') { 1260 AfterEscape = P+1; 1261 } else if (*P == '?') { 1262 // If not a trigraph for escape, bail out. 1263 if (P[1] != '?' || P[2] != '/') 1264 return P; 1265 AfterEscape = P+3; 1266 } else { 1267 return P; 1268 } 1269 1270 unsigned NewLineSize = Lexer::getEscapedNewLineSize(AfterEscape); 1271 if (NewLineSize == 0) return P; 1272 P = AfterEscape+NewLineSize; 1273 } 1274} 1275 1276/// \brief Checks that the given token is the first token that occurs after the 1277/// given location (this excludes comments and whitespace). Returns the location 1278/// immediately after the specified token. If the token is not found or the 1279/// location is inside a macro, the returned source location will be invalid. 1280SourceLocation Lexer::findLocationAfterToken(SourceLocation Loc, 1281 tok::TokenKind TKind, 1282 const SourceManager &SM, 1283 const LangOptions &LangOpts, 1284 bool SkipTrailingWhitespaceAndNewLine) { 1285 if (Loc.isMacroID()) { 1286 if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc)) 1287 return SourceLocation(); 1288 } 1289 Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts); 1290 1291 // Break down the source location. 1292 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 1293 1294 // Try to load the file buffer. 1295 bool InvalidTemp = false; 1296 llvm::StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp); 1297 if (InvalidTemp) 1298 return SourceLocation(); 1299 1300 const char *TokenBegin = File.data() + LocInfo.second; 1301 1302 // Lex from the start of the given location. 1303 Lexer lexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(), 1304 TokenBegin, File.end()); 1305 // Find the token. 1306 Token Tok; 1307 lexer.LexFromRawLexer(Tok); 1308 if (Tok.isNot(TKind)) 1309 return SourceLocation(); 1310 SourceLocation TokenLoc = Tok.getLocation(); 1311 1312 // Calculate how much whitespace needs to be skipped if any. 1313 unsigned NumWhitespaceChars = 0; 1314 if (SkipTrailingWhitespaceAndNewLine) { 1315 const char *TokenEnd = SM.getCharacterData(TokenLoc) + 1316 Tok.getLength(); 1317 unsigned char C = *TokenEnd; 1318 while (isHorizontalWhitespace(C)) { 1319 C = *(++TokenEnd); 1320 NumWhitespaceChars++; 1321 } 1322 if (isVerticalWhitespace(C)) 1323 NumWhitespaceChars++; 1324 } 1325 1326 return TokenLoc.getLocWithOffset(Tok.getLength() + NumWhitespaceChars); 1327} 1328 1329/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer, 1330/// get its size, and return it. This is tricky in several cases: 1331/// 1. If currently at the start of a trigraph, we warn about the trigraph, 1332/// then either return the trigraph (skipping 3 chars) or the '?', 1333/// depending on whether trigraphs are enabled or not. 1334/// 2. If this is an escaped newline (potentially with whitespace between 1335/// the backslash and newline), implicitly skip the newline and return 1336/// the char after it. 1337/// 3. If this is a UCN, return it. FIXME: C++ UCN's? 1338/// 1339/// This handles the slow/uncommon case of the getCharAndSize method. Here we 1340/// know that we can accumulate into Size, and that we have already incremented 1341/// Ptr by Size bytes. 1342/// 1343/// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should 1344/// be updated to match. 1345/// 1346char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size, 1347 Token *Tok) { 1348 // If we have a slash, look for an escaped newline. 1349 if (Ptr[0] == '\\') { 1350 ++Size; 1351 ++Ptr; 1352Slash: 1353 // Common case, backslash-char where the char is not whitespace. 1354 if (!isWhitespace(Ptr[0])) return '\\'; 1355 1356 // See if we have optional whitespace characters between the slash and 1357 // newline. 1358 if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) { 1359 // Remember that this token needs to be cleaned. 1360 if (Tok) Tok->setFlag(Token::NeedsCleaning); 1361 1362 // Warn if there was whitespace between the backslash and newline. 1363 if (Ptr[0] != '\n' && Ptr[0] != '\r' && Tok && !isLexingRawMode()) 1364 Diag(Ptr, diag::backslash_newline_space); 1365 1366 // Found backslash<whitespace><newline>. Parse the char after it. 1367 Size += EscapedNewLineSize; 1368 Ptr += EscapedNewLineSize; 1369 1370 // If the char that we finally got was a \n, then we must have had 1371 // something like \<newline><newline>. We don't want to consume the 1372 // second newline. 1373 if (*Ptr == '\n' || *Ptr == '\r' || *Ptr == '\0') 1374 return ' '; 1375 1376 // Use slow version to accumulate a correct size field. 1377 return getCharAndSizeSlow(Ptr, Size, Tok); 1378 } 1379 1380 // Otherwise, this is not an escaped newline, just return the slash. 1381 return '\\'; 1382 } 1383 1384 // If this is a trigraph, process it. 1385 if (Ptr[0] == '?' && Ptr[1] == '?') { 1386 // If this is actually a legal trigraph (not something like "??x"), emit 1387 // a trigraph warning. If so, and if trigraphs are enabled, return it. 1388 if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) { 1389 // Remember that this token needs to be cleaned. 1390 if (Tok) Tok->setFlag(Token::NeedsCleaning); 1391 1392 Ptr += 3; 1393 Size += 3; 1394 if (C == '\\') goto Slash; 1395 return C; 1396 } 1397 } 1398 1399 // If this is neither, return a single character. 1400 ++Size; 1401 return *Ptr; 1402} 1403 1404 1405/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the 1406/// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size, 1407/// and that we have already incremented Ptr by Size bytes. 1408/// 1409/// NOTE: When this method is updated, getCharAndSizeSlow (above) should 1410/// be updated to match. 1411char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size, 1412 const LangOptions &LangOpts) { 1413 // If we have a slash, look for an escaped newline. 1414 if (Ptr[0] == '\\') { 1415 ++Size; 1416 ++Ptr; 1417Slash: 1418 // Common case, backslash-char where the char is not whitespace. 1419 if (!isWhitespace(Ptr[0])) return '\\'; 1420 1421 // See if we have optional whitespace characters followed by a newline. 1422 if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) { 1423 // Found backslash<whitespace><newline>. Parse the char after it. 1424 Size += EscapedNewLineSize; 1425 Ptr += EscapedNewLineSize; 1426 1427 // If the char that we finally got was a \n, then we must have had 1428 // something like \<newline><newline>. We don't want to consume the 1429 // second newline. 1430 if (*Ptr == '\n' || *Ptr == '\r' || *Ptr == '\0') 1431 return ' '; 1432 1433 // Use slow version to accumulate a correct size field. 1434 return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts); 1435 } 1436 1437 // Otherwise, this is not an escaped newline, just return the slash. 1438 return '\\'; 1439 } 1440 1441 // If this is a trigraph, process it. 1442 if (LangOpts.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') { 1443 // If this is actually a legal trigraph (not something like "??x"), return 1444 // it. 1445 if (char C = GetTrigraphCharForLetter(Ptr[2])) { 1446 Ptr += 3; 1447 Size += 3; 1448 if (C == '\\') goto Slash; 1449 return C; 1450 } 1451 } 1452 1453 // If this is neither, return a single character. 1454 ++Size; 1455 return *Ptr; 1456} 1457 1458//===----------------------------------------------------------------------===// 1459// Helper methods for lexing. 1460//===----------------------------------------------------------------------===// 1461 1462/// \brief Routine that indiscriminately skips bytes in the source file. 1463void Lexer::SkipBytes(unsigned Bytes, bool StartOfLine) { 1464 BufferPtr += Bytes; 1465 if (BufferPtr > BufferEnd) 1466 BufferPtr = BufferEnd; 1467 IsAtStartOfLine = StartOfLine; 1468} 1469 1470void Lexer::LexIdentifier(Token &Result, const char *CurPtr) { 1471 // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$] 1472 unsigned Size; 1473 unsigned char C = *CurPtr++; 1474 while (isIdentifierBody(C)) 1475 C = *CurPtr++; 1476 1477 --CurPtr; // Back up over the skipped character. 1478 1479 // Fast path, no $,\,? in identifier found. '\' might be an escaped newline 1480 // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN. 1481 // FIXME: UCNs. 1482 // 1483 // TODO: Could merge these checks into a CharInfo flag to make the comparison 1484 // cheaper 1485 if (C != '\\' && C != '?' && (C != '$' || !LangOpts.DollarIdents)) { 1486FinishIdentifier: 1487 const char *IdStart = BufferPtr; 1488 FormTokenWithChars(Result, CurPtr, tok::raw_identifier); 1489 Result.setRawIdentifierData(IdStart); 1490 1491 // If we are in raw mode, return this identifier raw. There is no need to 1492 // look up identifier information or attempt to macro expand it. 1493 if (LexingRawMode) 1494 return; 1495 1496 // Fill in Result.IdentifierInfo and update the token kind, 1497 // looking up the identifier in the identifier table. 1498 IdentifierInfo *II = PP->LookUpIdentifierInfo(Result); 1499 1500 // Finally, now that we know we have an identifier, pass this off to the 1501 // preprocessor, which may macro expand it or something. 1502 if (II->isHandleIdentifierCase()) 1503 PP->HandleIdentifier(Result); 1504 1505 return; 1506 } 1507 1508 // Otherwise, $,\,? in identifier found. Enter slower path. 1509 1510 C = getCharAndSize(CurPtr, Size); 1511 while (1) { 1512 if (C == '$') { 1513 // If we hit a $ and they are not supported in identifiers, we are done. 1514 if (!LangOpts.DollarIdents) goto FinishIdentifier; 1515 1516 // Otherwise, emit a diagnostic and continue. 1517 if (!isLexingRawMode()) 1518 Diag(CurPtr, diag::ext_dollar_in_identifier); 1519 CurPtr = ConsumeChar(CurPtr, Size, Result); 1520 C = getCharAndSize(CurPtr, Size); 1521 continue; 1522 } else if (!isIdentifierBody(C)) { // FIXME: UCNs. 1523 // Found end of identifier. 1524 goto FinishIdentifier; 1525 } 1526 1527 // Otherwise, this character is good, consume it. 1528 CurPtr = ConsumeChar(CurPtr, Size, Result); 1529 1530 C = getCharAndSize(CurPtr, Size); 1531 while (isIdentifierBody(C)) { // FIXME: UCNs. 1532 CurPtr = ConsumeChar(CurPtr, Size, Result); 1533 C = getCharAndSize(CurPtr, Size); 1534 } 1535 } 1536} 1537 1538/// isHexaLiteral - Return true if Start points to a hex constant. 1539/// in microsoft mode (where this is supposed to be several different tokens). 1540bool Lexer::isHexaLiteral(const char *Start, const LangOptions &LangOpts) { 1541 unsigned Size; 1542 char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, LangOpts); 1543 if (C1 != '0') 1544 return false; 1545 char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, LangOpts); 1546 return (C2 == 'x' || C2 == 'X'); 1547} 1548 1549/// LexNumericConstant - Lex the remainder of a integer or floating point 1550/// constant. From[-1] is the first character lexed. Return the end of the 1551/// constant. 1552void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) { 1553 unsigned Size; 1554 char C = getCharAndSize(CurPtr, Size); 1555 char PrevCh = 0; 1556 while (isNumberBody(C)) { // FIXME: UCNs in ud-suffix. 1557 CurPtr = ConsumeChar(CurPtr, Size, Result); 1558 PrevCh = C; 1559 C = getCharAndSize(CurPtr, Size); 1560 } 1561 1562 // If we fell out, check for a sign, due to 1e+12. If we have one, continue. 1563 if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) { 1564 // If we are in Microsoft mode, don't continue if the constant is hex. 1565 // For example, MSVC will accept the following as 3 tokens: 0x1234567e+1 1566 if (!LangOpts.MicrosoftExt || !isHexaLiteral(BufferPtr, LangOpts)) 1567 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 1568 } 1569 1570 // If we have a hex FP constant, continue. 1571 if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p')) { 1572 // Outside C99, we accept hexadecimal floating point numbers as a 1573 // not-quite-conforming extension. Only do so if this looks like it's 1574 // actually meant to be a hexfloat, and not if it has a ud-suffix. 1575 bool IsHexFloat = true; 1576 if (!LangOpts.C99) { 1577 if (!isHexaLiteral(BufferPtr, LangOpts)) 1578 IsHexFloat = false; 1579 else if (std::find(BufferPtr, CurPtr, '_') != CurPtr) 1580 IsHexFloat = false; 1581 } 1582 if (IsHexFloat) 1583 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 1584 } 1585 1586 // Update the location of token as well as BufferPtr. 1587 const char *TokStart = BufferPtr; 1588 FormTokenWithChars(Result, CurPtr, tok::numeric_constant); 1589 Result.setLiteralData(TokStart); 1590} 1591 1592/// LexUDSuffix - Lex the ud-suffix production for user-defined literal suffixes 1593/// in C++11, or warn on a ud-suffix in C++98. 1594const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr) { 1595 assert(getLangOpts().CPlusPlus); 1596 1597 // Maximally munch an identifier. FIXME: UCNs. 1598 unsigned Size; 1599 char C = getCharAndSize(CurPtr, Size); 1600 if (isIdentifierHead(C)) { 1601 if (!getLangOpts().CPlusPlus0x) { 1602 if (!isLexingRawMode()) 1603 Diag(CurPtr, 1604 C == '_' ? diag::warn_cxx11_compat_user_defined_literal 1605 : diag::warn_cxx11_compat_reserved_user_defined_literal) 1606 << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " "); 1607 return CurPtr; 1608 } 1609 1610 // C++11 [lex.ext]p10, [usrlit.suffix]p1: A program containing a ud-suffix 1611 // that does not start with an underscore is ill-formed. As a conforming 1612 // extension, we treat all such suffixes as if they had whitespace before 1613 // them. 1614 if (C != '_') { 1615 if (!isLexingRawMode()) 1616 Diag(CurPtr, getLangOpts().MicrosoftMode ? 1617 diag::ext_ms_reserved_user_defined_literal : 1618 diag::ext_reserved_user_defined_literal) 1619 << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " "); 1620 return CurPtr; 1621 } 1622 1623 Result.setFlag(Token::HasUDSuffix); 1624 do { 1625 CurPtr = ConsumeChar(CurPtr, Size, Result); 1626 C = getCharAndSize(CurPtr, Size); 1627 } while (isIdentifierBody(C)); 1628 } 1629 return CurPtr; 1630} 1631 1632/// LexStringLiteral - Lex the remainder of a string literal, after having lexed 1633/// either " or L" or u8" or u" or U". 1634void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, 1635 tok::TokenKind Kind) { 1636 const char *NulCharacter = 0; // Does this string contain the \0 character? 1637 1638 if (!isLexingRawMode() && 1639 (Kind == tok::utf8_string_literal || 1640 Kind == tok::utf16_string_literal || 1641 Kind == tok::utf32_string_literal)) 1642 Diag(BufferPtr, diag::warn_cxx98_compat_unicode_literal); 1643 1644 char C = getAndAdvanceChar(CurPtr, Result); 1645 while (C != '"') { 1646 // Skip escaped characters. Escaped newlines will already be processed by 1647 // getAndAdvanceChar. 1648 if (C == '\\') 1649 C = getAndAdvanceChar(CurPtr, Result); 1650 1651 if (C == '\n' || C == '\r' || // Newline. 1652 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 1653 if (!isLexingRawMode() && !LangOpts.AsmPreprocessor) 1654 Diag(BufferPtr, diag::ext_unterminated_string); 1655 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 1656 return; 1657 } 1658 1659 if (C == 0) { 1660 if (isCodeCompletionPoint(CurPtr-1)) { 1661 PP->CodeCompleteNaturalLanguage(); 1662 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 1663 return cutOffLexing(); 1664 } 1665 1666 NulCharacter = CurPtr-1; 1667 } 1668 C = getAndAdvanceChar(CurPtr, Result); 1669 } 1670 1671 // If we are in C++11, lex the optional ud-suffix. 1672 if (getLangOpts().CPlusPlus) 1673 CurPtr = LexUDSuffix(Result, CurPtr); 1674 1675 // If a nul character existed in the string, warn about it. 1676 if (NulCharacter && !isLexingRawMode()) 1677 Diag(NulCharacter, diag::null_in_string); 1678 1679 // Update the location of the token as well as the BufferPtr instance var. 1680 const char *TokStart = BufferPtr; 1681 FormTokenWithChars(Result, CurPtr, Kind); 1682 Result.setLiteralData(TokStart); 1683} 1684 1685/// LexRawStringLiteral - Lex the remainder of a raw string literal, after 1686/// having lexed R", LR", u8R", uR", or UR". 1687void Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr, 1688 tok::TokenKind Kind) { 1689 // This function doesn't use getAndAdvanceChar because C++0x [lex.pptoken]p3: 1690 // Between the initial and final double quote characters of the raw string, 1691 // any transformations performed in phases 1 and 2 (trigraphs, 1692 // universal-character-names, and line splicing) are reverted. 1693 1694 if (!isLexingRawMode()) 1695 Diag(BufferPtr, diag::warn_cxx98_compat_raw_string_literal); 1696 1697 unsigned PrefixLen = 0; 1698 1699 while (PrefixLen != 16 && isRawStringDelimBody(CurPtr[PrefixLen])) 1700 ++PrefixLen; 1701 1702 // If the last character was not a '(', then we didn't lex a valid delimiter. 1703 if (CurPtr[PrefixLen] != '(') { 1704 if (!isLexingRawMode()) { 1705 const char *PrefixEnd = &CurPtr[PrefixLen]; 1706 if (PrefixLen == 16) { 1707 Diag(PrefixEnd, diag::err_raw_delim_too_long); 1708 } else { 1709 Diag(PrefixEnd, diag::err_invalid_char_raw_delim) 1710 << StringRef(PrefixEnd, 1); 1711 } 1712 } 1713 1714 // Search for the next '"' in hopes of salvaging the lexer. Unfortunately, 1715 // it's possible the '"' was intended to be part of the raw string, but 1716 // there's not much we can do about that. 1717 while (1) { 1718 char C = *CurPtr++; 1719 1720 if (C == '"') 1721 break; 1722 if (C == 0 && CurPtr-1 == BufferEnd) { 1723 --CurPtr; 1724 break; 1725 } 1726 } 1727 1728 FormTokenWithChars(Result, CurPtr, tok::unknown); 1729 return; 1730 } 1731 1732 // Save prefix and move CurPtr past it 1733 const char *Prefix = CurPtr; 1734 CurPtr += PrefixLen + 1; // skip over prefix and '(' 1735 1736 while (1) { 1737 char C = *CurPtr++; 1738 1739 if (C == ')') { 1740 // Check for prefix match and closing quote. 1741 if (strncmp(CurPtr, Prefix, PrefixLen) == 0 && CurPtr[PrefixLen] == '"') { 1742 CurPtr += PrefixLen + 1; // skip over prefix and '"' 1743 break; 1744 } 1745 } else if (C == 0 && CurPtr-1 == BufferEnd) { // End of file. 1746 if (!isLexingRawMode()) 1747 Diag(BufferPtr, diag::err_unterminated_raw_string) 1748 << StringRef(Prefix, PrefixLen); 1749 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 1750 return; 1751 } 1752 } 1753 1754 // If we are in C++11, lex the optional ud-suffix. 1755 if (getLangOpts().CPlusPlus) 1756 CurPtr = LexUDSuffix(Result, CurPtr); 1757 1758 // Update the location of token as well as BufferPtr. 1759 const char *TokStart = BufferPtr; 1760 FormTokenWithChars(Result, CurPtr, Kind); 1761 Result.setLiteralData(TokStart); 1762} 1763 1764/// LexAngledStringLiteral - Lex the remainder of an angled string literal, 1765/// after having lexed the '<' character. This is used for #include filenames. 1766void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) { 1767 const char *NulCharacter = 0; // Does this string contain the \0 character? 1768 const char *AfterLessPos = CurPtr; 1769 char C = getAndAdvanceChar(CurPtr, Result); 1770 while (C != '>') { 1771 // Skip escaped characters. 1772 if (C == '\\') { 1773 // Skip the escaped character. 1774 getAndAdvanceChar(CurPtr, Result); 1775 } else if (C == '\n' || C == '\r' || // Newline. 1776 (C == 0 && (CurPtr-1 == BufferEnd || // End of file. 1777 isCodeCompletionPoint(CurPtr-1)))) { 1778 // If the filename is unterminated, then it must just be a lone < 1779 // character. Return this as such. 1780 FormTokenWithChars(Result, AfterLessPos, tok::less); 1781 return; 1782 } else if (C == 0) { 1783 NulCharacter = CurPtr-1; 1784 } 1785 C = getAndAdvanceChar(CurPtr, Result); 1786 } 1787 1788 // If a nul character existed in the string, warn about it. 1789 if (NulCharacter && !isLexingRawMode()) 1790 Diag(NulCharacter, diag::null_in_string); 1791 1792 // Update the location of token as well as BufferPtr. 1793 const char *TokStart = BufferPtr; 1794 FormTokenWithChars(Result, CurPtr, tok::angle_string_literal); 1795 Result.setLiteralData(TokStart); 1796} 1797 1798 1799/// LexCharConstant - Lex the remainder of a character constant, after having 1800/// lexed either ' or L' or u' or U'. 1801void Lexer::LexCharConstant(Token &Result, const char *CurPtr, 1802 tok::TokenKind Kind) { 1803 const char *NulCharacter = 0; // Does this character contain the \0 character? 1804 1805 if (!isLexingRawMode() && 1806 (Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant)) 1807 Diag(BufferPtr, diag::warn_cxx98_compat_unicode_literal); 1808 1809 char C = getAndAdvanceChar(CurPtr, Result); 1810 if (C == '\'') { 1811 if (!isLexingRawMode() && !LangOpts.AsmPreprocessor) 1812 Diag(BufferPtr, diag::ext_empty_character); 1813 FormTokenWithChars(Result, CurPtr, tok::unknown); 1814 return; 1815 } 1816 1817 while (C != '\'') { 1818 // Skip escaped characters. 1819 if (C == '\\') { 1820 // Skip the escaped character. 1821 getAndAdvanceChar(CurPtr, Result); 1822 } else if (C == '\n' || C == '\r' || // Newline. 1823 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 1824 if (!isLexingRawMode() && !LangOpts.AsmPreprocessor) 1825 Diag(BufferPtr, diag::ext_unterminated_char); 1826 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 1827 return; 1828 } else if (C == 0) { 1829 if (isCodeCompletionPoint(CurPtr-1)) { 1830 PP->CodeCompleteNaturalLanguage(); 1831 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 1832 return cutOffLexing(); 1833 } 1834 1835 NulCharacter = CurPtr-1; 1836 } 1837 C = getAndAdvanceChar(CurPtr, Result); 1838 } 1839 1840 // If we are in C++11, lex the optional ud-suffix. 1841 if (getLangOpts().CPlusPlus) 1842 CurPtr = LexUDSuffix(Result, CurPtr); 1843 1844 // If a nul character existed in the character, warn about it. 1845 if (NulCharacter && !isLexingRawMode()) 1846 Diag(NulCharacter, diag::null_in_char); 1847 1848 // Update the location of token as well as BufferPtr. 1849 const char *TokStart = BufferPtr; 1850 FormTokenWithChars(Result, CurPtr, Kind); 1851 Result.setLiteralData(TokStart); 1852} 1853 1854/// SkipWhitespace - Efficiently skip over a series of whitespace characters. 1855/// Update BufferPtr to point to the next non-whitespace character and return. 1856/// 1857/// This method forms a token and returns true if KeepWhitespaceMode is enabled. 1858/// 1859bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr) { 1860 // Whitespace - Skip it, then return the token after the whitespace. 1861 unsigned char Char = *CurPtr; // Skip consequtive spaces efficiently. 1862 while (1) { 1863 // Skip horizontal whitespace very aggressively. 1864 while (isHorizontalWhitespace(Char)) 1865 Char = *++CurPtr; 1866 1867 // Otherwise if we have something other than whitespace, we're done. 1868 if (Char != '\n' && Char != '\r') 1869 break; 1870 1871 if (ParsingPreprocessorDirective) { 1872 // End of preprocessor directive line, let LexTokenInternal handle this. 1873 BufferPtr = CurPtr; 1874 return false; 1875 } 1876 1877 // ok, but handle newline. 1878 // The returned token is at the start of the line. 1879 Result.setFlag(Token::StartOfLine); 1880 // No leading whitespace seen so far. 1881 Result.clearFlag(Token::LeadingSpace); 1882 Char = *++CurPtr; 1883 } 1884 1885 // If this isn't immediately after a newline, there is leading space. 1886 char PrevChar = CurPtr[-1]; 1887 if (PrevChar != '\n' && PrevChar != '\r') 1888 Result.setFlag(Token::LeadingSpace); 1889 1890 // If the client wants us to return whitespace, return it now. 1891 if (isKeepWhitespaceMode()) { 1892 FormTokenWithChars(Result, CurPtr, tok::unknown); 1893 return true; 1894 } 1895 1896 BufferPtr = CurPtr; 1897 return false; 1898} 1899 1900/// We have just read the // characters from input. Skip until we find the 1901/// newline character thats terminate the comment. Then update BufferPtr and 1902/// return. 1903/// 1904/// If we're in KeepCommentMode or any CommentHandler has inserted 1905/// some tokens, this will store the first token and return true. 1906bool Lexer::SkipLineComment(Token &Result, const char *CurPtr) { 1907 // If Line comments aren't explicitly enabled for this language, emit an 1908 // extension warning. 1909 if (!LangOpts.LineComment && !isLexingRawMode()) { 1910 Diag(BufferPtr, diag::ext_line_comment); 1911 1912 // Mark them enabled so we only emit one warning for this translation 1913 // unit. 1914 LangOpts.LineComment = true; 1915 } 1916 1917 // Scan over the body of the comment. The common case, when scanning, is that 1918 // the comment contains normal ascii characters with nothing interesting in 1919 // them. As such, optimize for this case with the inner loop. 1920 char C; 1921 do { 1922 C = *CurPtr; 1923 // Skip over characters in the fast loop. 1924 while (C != 0 && // Potentially EOF. 1925 C != '\n' && C != '\r') // Newline or DOS-style newline. 1926 C = *++CurPtr; 1927 1928 const char *NextLine = CurPtr; 1929 if (C != 0) { 1930 // We found a newline, see if it's escaped. 1931 const char *EscapePtr = CurPtr-1; 1932 while (isHorizontalWhitespace(*EscapePtr)) // Skip whitespace. 1933 --EscapePtr; 1934 1935 if (*EscapePtr == '\\') // Escaped newline. 1936 CurPtr = EscapePtr; 1937 else if (EscapePtr[0] == '/' && EscapePtr[-1] == '?' && 1938 EscapePtr[-2] == '?') // Trigraph-escaped newline. 1939 CurPtr = EscapePtr-2; 1940 else 1941 break; // This is a newline, we're done. 1942 } 1943 1944 // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to 1945 // properly decode the character. Read it in raw mode to avoid emitting 1946 // diagnostics about things like trigraphs. If we see an escaped newline, 1947 // we'll handle it below. 1948 const char *OldPtr = CurPtr; 1949 bool OldRawMode = isLexingRawMode(); 1950 LexingRawMode = true; 1951 C = getAndAdvanceChar(CurPtr, Result); 1952 LexingRawMode = OldRawMode; 1953 1954 // If we only read only one character, then no special handling is needed. 1955 // We're done and can skip forward to the newline. 1956 if (C != 0 && CurPtr == OldPtr+1) { 1957 CurPtr = NextLine; 1958 break; 1959 } 1960 1961 // If we read multiple characters, and one of those characters was a \r or 1962 // \n, then we had an escaped newline within the comment. Emit diagnostic 1963 // unless the next line is also a // comment. 1964 if (CurPtr != OldPtr+1 && C != '/' && CurPtr[0] != '/') { 1965 for (; OldPtr != CurPtr; ++OldPtr) 1966 if (OldPtr[0] == '\n' || OldPtr[0] == '\r') { 1967 // Okay, we found a // comment that ends in a newline, if the next 1968 // line is also a // comment, but has spaces, don't emit a diagnostic. 1969 if (isWhitespace(C)) { 1970 const char *ForwardPtr = CurPtr; 1971 while (isWhitespace(*ForwardPtr)) // Skip whitespace. 1972 ++ForwardPtr; 1973 if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/') 1974 break; 1975 } 1976 1977 if (!isLexingRawMode()) 1978 Diag(OldPtr-1, diag::ext_multi_line_line_comment); 1979 break; 1980 } 1981 } 1982 1983 if (CurPtr == BufferEnd+1) { 1984 --CurPtr; 1985 break; 1986 } 1987 1988 if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) { 1989 PP->CodeCompleteNaturalLanguage(); 1990 cutOffLexing(); 1991 return false; 1992 } 1993 1994 } while (C != '\n' && C != '\r'); 1995 1996 // Found but did not consume the newline. Notify comment handlers about the 1997 // comment unless we're in a #if 0 block. 1998 if (PP && !isLexingRawMode() && 1999 PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr), 2000 getSourceLocation(CurPtr)))) { 2001 BufferPtr = CurPtr; 2002 return true; // A token has to be returned. 2003 } 2004 2005 // If we are returning comments as tokens, return this comment as a token. 2006 if (inKeepCommentMode()) 2007 return SaveLineComment(Result, CurPtr); 2008 2009 // If we are inside a preprocessor directive and we see the end of line, 2010 // return immediately, so that the lexer can return this as an EOD token. 2011 if (ParsingPreprocessorDirective || CurPtr == BufferEnd) { 2012 BufferPtr = CurPtr; 2013 return false; 2014 } 2015 2016 // Otherwise, eat the \n character. We don't care if this is a \n\r or 2017 // \r\n sequence. This is an efficiency hack (because we know the \n can't 2018 // contribute to another token), it isn't needed for correctness. Note that 2019 // this is ok even in KeepWhitespaceMode, because we would have returned the 2020 /// comment above in that mode. 2021 ++CurPtr; 2022 2023 // The next returned token is at the start of the line. 2024 Result.setFlag(Token::StartOfLine); 2025 // No leading whitespace seen so far. 2026 Result.clearFlag(Token::LeadingSpace); 2027 BufferPtr = CurPtr; 2028 return false; 2029} 2030 2031/// If in save-comment mode, package up this Line comment in an appropriate 2032/// way and return it. 2033bool Lexer::SaveLineComment(Token &Result, const char *CurPtr) { 2034 // If we're not in a preprocessor directive, just return the // comment 2035 // directly. 2036 FormTokenWithChars(Result, CurPtr, tok::comment); 2037 2038 if (!ParsingPreprocessorDirective || LexingRawMode) 2039 return true; 2040 2041 // If this Line-style comment is in a macro definition, transmogrify it into 2042 // a C-style block comment. 2043 bool Invalid = false; 2044 std::string Spelling = PP->getSpelling(Result, &Invalid); 2045 if (Invalid) 2046 return true; 2047 2048 assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not line comment?"); 2049 Spelling[1] = '*'; // Change prefix to "/*". 2050 Spelling += "*/"; // add suffix. 2051 2052 Result.setKind(tok::comment); 2053 PP->CreateString(Spelling, Result, 2054 Result.getLocation(), Result.getLocation()); 2055 return true; 2056} 2057 2058/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline 2059/// character (either \\n or \\r) is part of an escaped newline sequence. Issue 2060/// a diagnostic if so. We know that the newline is inside of a block comment. 2061static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr, 2062 Lexer *L) { 2063 assert(CurPtr[0] == '\n' || CurPtr[0] == '\r'); 2064 2065 // Back up off the newline. 2066 --CurPtr; 2067 2068 // If this is a two-character newline sequence, skip the other character. 2069 if (CurPtr[0] == '\n' || CurPtr[0] == '\r') { 2070 // \n\n or \r\r -> not escaped newline. 2071 if (CurPtr[0] == CurPtr[1]) 2072 return false; 2073 // \n\r or \r\n -> skip the newline. 2074 --CurPtr; 2075 } 2076 2077 // If we have horizontal whitespace, skip over it. We allow whitespace 2078 // between the slash and newline. 2079 bool HasSpace = false; 2080 while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) { 2081 --CurPtr; 2082 HasSpace = true; 2083 } 2084 2085 // If we have a slash, we know this is an escaped newline. 2086 if (*CurPtr == '\\') { 2087 if (CurPtr[-1] != '*') return false; 2088 } else { 2089 // It isn't a slash, is it the ?? / trigraph? 2090 if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' || 2091 CurPtr[-3] != '*') 2092 return false; 2093 2094 // This is the trigraph ending the comment. Emit a stern warning! 2095 CurPtr -= 2; 2096 2097 // If no trigraphs are enabled, warn that we ignored this trigraph and 2098 // ignore this * character. 2099 if (!L->getLangOpts().Trigraphs) { 2100 if (!L->isLexingRawMode()) 2101 L->Diag(CurPtr, diag::trigraph_ignored_block_comment); 2102 return false; 2103 } 2104 if (!L->isLexingRawMode()) 2105 L->Diag(CurPtr, diag::trigraph_ends_block_comment); 2106 } 2107 2108 // Warn about having an escaped newline between the */ characters. 2109 if (!L->isLexingRawMode()) 2110 L->Diag(CurPtr, diag::escaped_newline_block_comment_end); 2111 2112 // If there was space between the backslash and newline, warn about it. 2113 if (HasSpace && !L->isLexingRawMode()) 2114 L->Diag(CurPtr, diag::backslash_newline_space); 2115 2116 return true; 2117} 2118 2119#ifdef __SSE2__ 2120#include <emmintrin.h> 2121#elif __ALTIVEC__ 2122#include <altivec.h> 2123#undef bool 2124#endif 2125 2126/// We have just read from input the / and * characters that started a comment. 2127/// Read until we find the * and / characters that terminate the comment. 2128/// Note that we don't bother decoding trigraphs or escaped newlines in block 2129/// comments, because they cannot cause the comment to end. The only thing 2130/// that can happen is the comment could end with an escaped newline between 2131/// the terminating * and /. 2132/// 2133/// If we're in KeepCommentMode or any CommentHandler has inserted 2134/// some tokens, this will store the first token and return true. 2135bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) { 2136 // Scan one character past where we should, looking for a '/' character. Once 2137 // we find it, check to see if it was preceded by a *. This common 2138 // optimization helps people who like to put a lot of * characters in their 2139 // comments. 2140 2141 // The first character we get with newlines and trigraphs skipped to handle 2142 // the degenerate /*/ case below correctly if the * has an escaped newline 2143 // after it. 2144 unsigned CharSize; 2145 unsigned char C = getCharAndSize(CurPtr, CharSize); 2146 CurPtr += CharSize; 2147 if (C == 0 && CurPtr == BufferEnd+1) { 2148 if (!isLexingRawMode()) 2149 Diag(BufferPtr, diag::err_unterminated_block_comment); 2150 --CurPtr; 2151 2152 // KeepWhitespaceMode should return this broken comment as a token. Since 2153 // it isn't a well formed comment, just return it as an 'unknown' token. 2154 if (isKeepWhitespaceMode()) { 2155 FormTokenWithChars(Result, CurPtr, tok::unknown); 2156 return true; 2157 } 2158 2159 BufferPtr = CurPtr; 2160 return false; 2161 } 2162 2163 // Check to see if the first character after the '/*' is another /. If so, 2164 // then this slash does not end the block comment, it is part of it. 2165 if (C == '/') 2166 C = *CurPtr++; 2167 2168 while (1) { 2169 // Skip over all non-interesting characters until we find end of buffer or a 2170 // (probably ending) '/' character. 2171 if (CurPtr + 24 < BufferEnd && 2172 // If there is a code-completion point avoid the fast scan because it 2173 // doesn't check for '\0'. 2174 !(PP && PP->getCodeCompletionFileLoc() == FileLoc)) { 2175 // While not aligned to a 16-byte boundary. 2176 while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0) 2177 C = *CurPtr++; 2178 2179 if (C == '/') goto FoundSlash; 2180 2181#ifdef __SSE2__ 2182 __m128i Slashes = _mm_set1_epi8('/'); 2183 while (CurPtr+16 <= BufferEnd) { 2184 int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(const __m128i*)CurPtr, 2185 Slashes)); 2186 if (cmp != 0) { 2187 // Adjust the pointer to point directly after the first slash. It's 2188 // not necessary to set C here, it will be overwritten at the end of 2189 // the outer loop. 2190 CurPtr += llvm::CountTrailingZeros_32(cmp) + 1; 2191 goto FoundSlash; 2192 } 2193 CurPtr += 16; 2194 } 2195#elif __ALTIVEC__ 2196 __vector unsigned char Slashes = { 2197 '/', '/', '/', '/', '/', '/', '/', '/', 2198 '/', '/', '/', '/', '/', '/', '/', '/' 2199 }; 2200 while (CurPtr+16 <= BufferEnd && 2201 !vec_any_eq(*(vector unsigned char*)CurPtr, Slashes)) 2202 CurPtr += 16; 2203#else 2204 // Scan for '/' quickly. Many block comments are very large. 2205 while (CurPtr[0] != '/' && 2206 CurPtr[1] != '/' && 2207 CurPtr[2] != '/' && 2208 CurPtr[3] != '/' && 2209 CurPtr+4 < BufferEnd) { 2210 CurPtr += 4; 2211 } 2212#endif 2213 2214 // It has to be one of the bytes scanned, increment to it and read one. 2215 C = *CurPtr++; 2216 } 2217 2218 // Loop to scan the remainder. 2219 while (C != '/' && C != '\0') 2220 C = *CurPtr++; 2221 2222 if (C == '/') { 2223 FoundSlash: 2224 if (CurPtr[-2] == '*') // We found the final */. We're done! 2225 break; 2226 2227 if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) { 2228 if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) { 2229 // We found the final */, though it had an escaped newline between the 2230 // * and /. We're done! 2231 break; 2232 } 2233 } 2234 if (CurPtr[0] == '*' && CurPtr[1] != '/') { 2235 // If this is a /* inside of the comment, emit a warning. Don't do this 2236 // if this is a /*/, which will end the comment. This misses cases with 2237 // embedded escaped newlines, but oh well. 2238 if (!isLexingRawMode()) 2239 Diag(CurPtr-1, diag::warn_nested_block_comment); 2240 } 2241 } else if (C == 0 && CurPtr == BufferEnd+1) { 2242 if (!isLexingRawMode()) 2243 Diag(BufferPtr, diag::err_unterminated_block_comment); 2244 // Note: the user probably forgot a */. We could continue immediately 2245 // after the /*, but this would involve lexing a lot of what really is the 2246 // comment, which surely would confuse the parser. 2247 --CurPtr; 2248 2249 // KeepWhitespaceMode should return this broken comment as a token. Since 2250 // it isn't a well formed comment, just return it as an 'unknown' token. 2251 if (isKeepWhitespaceMode()) { 2252 FormTokenWithChars(Result, CurPtr, tok::unknown); 2253 return true; 2254 } 2255 2256 BufferPtr = CurPtr; 2257 return false; 2258 } else if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) { 2259 PP->CodeCompleteNaturalLanguage(); 2260 cutOffLexing(); 2261 return false; 2262 } 2263 2264 C = *CurPtr++; 2265 } 2266 2267 // Notify comment handlers about the comment unless we're in a #if 0 block. 2268 if (PP && !isLexingRawMode() && 2269 PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr), 2270 getSourceLocation(CurPtr)))) { 2271 BufferPtr = CurPtr; 2272 return true; // A token has to be returned. 2273 } 2274 2275 // If we are returning comments as tokens, return this comment as a token. 2276 if (inKeepCommentMode()) { 2277 FormTokenWithChars(Result, CurPtr, tok::comment); 2278 return true; 2279 } 2280 2281 // It is common for the tokens immediately after a /**/ comment to be 2282 // whitespace. Instead of going through the big switch, handle it 2283 // efficiently now. This is safe even in KeepWhitespaceMode because we would 2284 // have already returned above with the comment as a token. 2285 if (isHorizontalWhitespace(*CurPtr)) { 2286 Result.setFlag(Token::LeadingSpace); 2287 SkipWhitespace(Result, CurPtr+1); 2288 return false; 2289 } 2290 2291 // Otherwise, just return so that the next character will be lexed as a token. 2292 BufferPtr = CurPtr; 2293 Result.setFlag(Token::LeadingSpace); 2294 return false; 2295} 2296 2297//===----------------------------------------------------------------------===// 2298// Primary Lexing Entry Points 2299//===----------------------------------------------------------------------===// 2300 2301/// ReadToEndOfLine - Read the rest of the current preprocessor line as an 2302/// uninterpreted string. This switches the lexer out of directive mode. 2303void Lexer::ReadToEndOfLine(SmallVectorImpl<char> *Result) { 2304 assert(ParsingPreprocessorDirective && ParsingFilename == false && 2305 "Must be in a preprocessing directive!"); 2306 Token Tmp; 2307 2308 // CurPtr - Cache BufferPtr in an automatic variable. 2309 const char *CurPtr = BufferPtr; 2310 while (1) { 2311 char Char = getAndAdvanceChar(CurPtr, Tmp); 2312 switch (Char) { 2313 default: 2314 if (Result) 2315 Result->push_back(Char); 2316 break; 2317 case 0: // Null. 2318 // Found end of file? 2319 if (CurPtr-1 != BufferEnd) { 2320 if (isCodeCompletionPoint(CurPtr-1)) { 2321 PP->CodeCompleteNaturalLanguage(); 2322 cutOffLexing(); 2323 return; 2324 } 2325 2326 // Nope, normal character, continue. 2327 if (Result) 2328 Result->push_back(Char); 2329 break; 2330 } 2331 // FALL THROUGH. 2332 case '\r': 2333 case '\n': 2334 // Okay, we found the end of the line. First, back up past the \0, \r, \n. 2335 assert(CurPtr[-1] == Char && "Trigraphs for newline?"); 2336 BufferPtr = CurPtr-1; 2337 2338 // Next, lex the character, which should handle the EOD transition. 2339 Lex(Tmp); 2340 if (Tmp.is(tok::code_completion)) { 2341 if (PP) 2342 PP->CodeCompleteNaturalLanguage(); 2343 Lex(Tmp); 2344 } 2345 assert(Tmp.is(tok::eod) && "Unexpected token!"); 2346 2347 // Finally, we're done; 2348 return; 2349 } 2350 } 2351} 2352 2353/// LexEndOfFile - CurPtr points to the end of this file. Handle this 2354/// condition, reporting diagnostics and handling other edge cases as required. 2355/// This returns true if Result contains a token, false if PP.Lex should be 2356/// called again. 2357bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) { 2358 // If we hit the end of the file while parsing a preprocessor directive, 2359 // end the preprocessor directive first. The next token returned will 2360 // then be the end of file. 2361 if (ParsingPreprocessorDirective) { 2362 // Done parsing the "line". 2363 ParsingPreprocessorDirective = false; 2364 // Update the location of token as well as BufferPtr. 2365 FormTokenWithChars(Result, CurPtr, tok::eod); 2366 2367 // Restore comment saving mode, in case it was disabled for directive. 2368 SetCommentRetentionState(PP->getCommentRetentionState()); 2369 return true; // Have a token. 2370 } 2371 2372 // If we are in raw mode, return this event as an EOF token. Let the caller 2373 // that put us in raw mode handle the event. 2374 if (isLexingRawMode()) { 2375 Result.startToken(); 2376 BufferPtr = BufferEnd; 2377 FormTokenWithChars(Result, BufferEnd, tok::eof); 2378 return true; 2379 } 2380 2381 // Issue diagnostics for unterminated #if and missing newline. 2382 2383 // If we are in a #if directive, emit an error. 2384 while (!ConditionalStack.empty()) { 2385 if (PP->getCodeCompletionFileLoc() != FileLoc) 2386 PP->Diag(ConditionalStack.back().IfLoc, 2387 diag::err_pp_unterminated_conditional); 2388 ConditionalStack.pop_back(); 2389 } 2390 2391 // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue 2392 // a pedwarn. 2393 if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) 2394 Diag(BufferEnd, LangOpts.CPlusPlus0x ? // C++11 [lex.phases] 2.2 p2 2395 diag::warn_cxx98_compat_no_newline_eof : diag::ext_no_newline_eof) 2396 << FixItHint::CreateInsertion(getSourceLocation(BufferEnd), "\n"); 2397 2398 BufferPtr = CurPtr; 2399 2400 // Finally, let the preprocessor handle this. 2401 return PP->HandleEndOfFile(Result, isPragmaLexer()); 2402} 2403 2404/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from 2405/// the specified lexer will return a tok::l_paren token, 0 if it is something 2406/// else and 2 if there are no more tokens in the buffer controlled by the 2407/// lexer. 2408unsigned Lexer::isNextPPTokenLParen() { 2409 assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?"); 2410 2411 // Switch to 'skipping' mode. This will ensure that we can lex a token 2412 // without emitting diagnostics, disables macro expansion, and will cause EOF 2413 // to return an EOF token instead of popping the include stack. 2414 LexingRawMode = true; 2415 2416 // Save state that can be changed while lexing so that we can restore it. 2417 const char *TmpBufferPtr = BufferPtr; 2418 bool inPPDirectiveMode = ParsingPreprocessorDirective; 2419 2420 Token Tok; 2421 Tok.startToken(); 2422 LexTokenInternal(Tok); 2423 2424 // Restore state that may have changed. 2425 BufferPtr = TmpBufferPtr; 2426 ParsingPreprocessorDirective = inPPDirectiveMode; 2427 2428 // Restore the lexer back to non-skipping mode. 2429 LexingRawMode = false; 2430 2431 if (Tok.is(tok::eof)) 2432 return 2; 2433 return Tok.is(tok::l_paren); 2434} 2435 2436/// \brief Find the end of a version control conflict marker. 2437static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd, 2438 ConflictMarkerKind CMK) { 2439 const char *Terminator = CMK == CMK_Perforce ? "<<<<\n" : ">>>>>>>"; 2440 size_t TermLen = CMK == CMK_Perforce ? 5 : 7; 2441 StringRef RestOfBuffer(CurPtr+TermLen, BufferEnd-CurPtr-TermLen); 2442 size_t Pos = RestOfBuffer.find(Terminator); 2443 while (Pos != StringRef::npos) { 2444 // Must occur at start of line. 2445 if (RestOfBuffer[Pos-1] != '\r' && 2446 RestOfBuffer[Pos-1] != '\n') { 2447 RestOfBuffer = RestOfBuffer.substr(Pos+TermLen); 2448 Pos = RestOfBuffer.find(Terminator); 2449 continue; 2450 } 2451 return RestOfBuffer.data()+Pos; 2452 } 2453 return 0; 2454} 2455 2456/// IsStartOfConflictMarker - If the specified pointer is the start of a version 2457/// control conflict marker like '<<<<<<<', recognize it as such, emit an error 2458/// and recover nicely. This returns true if it is a conflict marker and false 2459/// if not. 2460bool Lexer::IsStartOfConflictMarker(const char *CurPtr) { 2461 // Only a conflict marker if it starts at the beginning of a line. 2462 if (CurPtr != BufferStart && 2463 CurPtr[-1] != '\n' && CurPtr[-1] != '\r') 2464 return false; 2465 2466 // Check to see if we have <<<<<<< or >>>>. 2467 if ((BufferEnd-CurPtr < 8 || StringRef(CurPtr, 7) != "<<<<<<<") && 2468 (BufferEnd-CurPtr < 6 || StringRef(CurPtr, 5) != ">>>> ")) 2469 return false; 2470 2471 // If we have a situation where we don't care about conflict markers, ignore 2472 // it. 2473 if (CurrentConflictMarkerState || isLexingRawMode()) 2474 return false; 2475 2476 ConflictMarkerKind Kind = *CurPtr == '<' ? CMK_Normal : CMK_Perforce; 2477 2478 // Check to see if there is an ending marker somewhere in the buffer at the 2479 // start of a line to terminate this conflict marker. 2480 if (FindConflictEnd(CurPtr, BufferEnd, Kind)) { 2481 // We found a match. We are really in a conflict marker. 2482 // Diagnose this, and ignore to the end of line. 2483 Diag(CurPtr, diag::err_conflict_marker); 2484 CurrentConflictMarkerState = Kind; 2485 2486 // Skip ahead to the end of line. We know this exists because the 2487 // end-of-conflict marker starts with \r or \n. 2488 while (*CurPtr != '\r' && *CurPtr != '\n') { 2489 assert(CurPtr != BufferEnd && "Didn't find end of line"); 2490 ++CurPtr; 2491 } 2492 BufferPtr = CurPtr; 2493 return true; 2494 } 2495 2496 // No end of conflict marker found. 2497 return false; 2498} 2499 2500 2501/// HandleEndOfConflictMarker - If this is a '====' or '||||' or '>>>>', or if 2502/// it is '<<<<' and the conflict marker started with a '>>>>' marker, then it 2503/// is the end of a conflict marker. Handle it by ignoring up until the end of 2504/// the line. This returns true if it is a conflict marker and false if not. 2505bool Lexer::HandleEndOfConflictMarker(const char *CurPtr) { 2506 // Only a conflict marker if it starts at the beginning of a line. 2507 if (CurPtr != BufferStart && 2508 CurPtr[-1] != '\n' && CurPtr[-1] != '\r') 2509 return false; 2510 2511 // If we have a situation where we don't care about conflict markers, ignore 2512 // it. 2513 if (!CurrentConflictMarkerState || isLexingRawMode()) 2514 return false; 2515 2516 // Check to see if we have the marker (4 characters in a row). 2517 for (unsigned i = 1; i != 4; ++i) 2518 if (CurPtr[i] != CurPtr[0]) 2519 return false; 2520 2521 // If we do have it, search for the end of the conflict marker. This could 2522 // fail if it got skipped with a '#if 0' or something. Note that CurPtr might 2523 // be the end of conflict marker. 2524 if (const char *End = FindConflictEnd(CurPtr, BufferEnd, 2525 CurrentConflictMarkerState)) { 2526 CurPtr = End; 2527 2528 // Skip ahead to the end of line. 2529 while (CurPtr != BufferEnd && *CurPtr != '\r' && *CurPtr != '\n') 2530 ++CurPtr; 2531 2532 BufferPtr = CurPtr; 2533 2534 // No longer in the conflict marker. 2535 CurrentConflictMarkerState = CMK_None; 2536 return true; 2537 } 2538 2539 return false; 2540} 2541 2542bool Lexer::isCodeCompletionPoint(const char *CurPtr) const { 2543 if (PP && PP->isCodeCompletionEnabled()) { 2544 SourceLocation Loc = FileLoc.getLocWithOffset(CurPtr-BufferStart); 2545 return Loc == PP->getCodeCompletionLoc(); 2546 } 2547 2548 return false; 2549} 2550 2551 2552/// LexTokenInternal - This implements a simple C family lexer. It is an 2553/// extremely performance critical piece of code. This assumes that the buffer 2554/// has a null character at the end of the file. This returns a preprocessing 2555/// token, not a normal token, as such, it is an internal interface. It assumes 2556/// that the Flags of result have been cleared before calling this. 2557void Lexer::LexTokenInternal(Token &Result) { 2558LexNextToken: 2559 // New token, can't need cleaning yet. 2560 Result.clearFlag(Token::NeedsCleaning); 2561 Result.setIdentifierInfo(0); 2562 2563 // CurPtr - Cache BufferPtr in an automatic variable. 2564 const char *CurPtr = BufferPtr; 2565 2566 // Small amounts of horizontal whitespace is very common between tokens. 2567 if ((*CurPtr == ' ') || (*CurPtr == '\t')) { 2568 ++CurPtr; 2569 while ((*CurPtr == ' ') || (*CurPtr == '\t')) 2570 ++CurPtr; 2571 2572 // If we are keeping whitespace and other tokens, just return what we just 2573 // skipped. The next lexer invocation will return the token after the 2574 // whitespace. 2575 if (isKeepWhitespaceMode()) { 2576 FormTokenWithChars(Result, CurPtr, tok::unknown); 2577 return; 2578 } 2579 2580 BufferPtr = CurPtr; 2581 Result.setFlag(Token::LeadingSpace); 2582 } 2583 2584 unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below. 2585 2586 // Read a character, advancing over it. 2587 char Char = getAndAdvanceChar(CurPtr, Result); 2588 tok::TokenKind Kind; 2589 2590 switch (Char) { 2591 case 0: // Null. 2592 // Found end of file? 2593 if (CurPtr-1 == BufferEnd) { 2594 // Read the PP instance variable into an automatic variable, because 2595 // LexEndOfFile will often delete 'this'. 2596 Preprocessor *PPCache = PP; 2597 bool EnableIncludedEOFCache = EnableIncludedEOF; 2598 if (LexEndOfFile(Result, CurPtr-1)) // Retreat back into the file. 2599 return; // Got a token to return. 2600 2601 if (EnableIncludedEOFCache) { 2602 Result.setKind(tok::included_eof); 2603 return; 2604 } 2605 assert(PPCache && "Raw buffer::LexEndOfFile should return a token"); 2606 return PPCache->Lex(Result); 2607 } 2608 2609 // Check if we are performing code completion. 2610 if (isCodeCompletionPoint(CurPtr-1)) { 2611 // Return the code-completion token. 2612 Result.startToken(); 2613 FormTokenWithChars(Result, CurPtr, tok::code_completion); 2614 return; 2615 } 2616 2617 if (!isLexingRawMode()) 2618 Diag(CurPtr-1, diag::null_in_file); 2619 Result.setFlag(Token::LeadingSpace); 2620 if (SkipWhitespace(Result, CurPtr)) 2621 return; // KeepWhitespaceMode 2622 2623 goto LexNextToken; // GCC isn't tail call eliminating. 2624 2625 case 26: // DOS & CP/M EOF: "^Z". 2626 // If we're in Microsoft extensions mode, treat this as end of file. 2627 if (LangOpts.MicrosoftExt) { 2628 // Read the PP instance variable into an automatic variable, because 2629 // LexEndOfFile will often delete 'this'. 2630 Preprocessor *PPCache = PP; 2631 if (LexEndOfFile(Result, CurPtr-1)) // Retreat back into the file. 2632 return; // Got a token to return. 2633 assert(PPCache && "Raw buffer::LexEndOfFile should return a token"); 2634 return PPCache->Lex(Result); 2635 } 2636 // If Microsoft extensions are disabled, this is just random garbage. 2637 Kind = tok::unknown; 2638 break; 2639 2640 case '\n': 2641 case '\r': 2642 // If we are inside a preprocessor directive and we see the end of line, 2643 // we know we are done with the directive, so return an EOD token. 2644 if (ParsingPreprocessorDirective) { 2645 // Done parsing the "line". 2646 ParsingPreprocessorDirective = false; 2647 2648 // Restore comment saving mode, in case it was disabled for directive. 2649 if (PP) 2650 SetCommentRetentionState(PP->getCommentRetentionState()); 2651 2652 // Since we consumed a newline, we are back at the start of a line. 2653 IsAtStartOfLine = true; 2654 2655 Kind = tok::eod; 2656 break; 2657 } 2658 // The returned token is at the start of the line. 2659 Result.setFlag(Token::StartOfLine); 2660 // No leading whitespace seen so far. 2661 Result.clearFlag(Token::LeadingSpace); 2662 2663 if (SkipWhitespace(Result, CurPtr)) 2664 return; // KeepWhitespaceMode 2665 goto LexNextToken; // GCC isn't tail call eliminating. 2666 case ' ': 2667 case '\t': 2668 case '\f': 2669 case '\v': 2670 SkipHorizontalWhitespace: 2671 Result.setFlag(Token::LeadingSpace); 2672 if (SkipWhitespace(Result, CurPtr)) 2673 return; // KeepWhitespaceMode 2674 2675 SkipIgnoredUnits: 2676 CurPtr = BufferPtr; 2677 2678 // If the next token is obviously a // or /* */ comment, skip it efficiently 2679 // too (without going through the big switch stmt). 2680 if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() && 2681 LangOpts.LineComment && !LangOpts.TraditionalCPP) { 2682 if (SkipLineComment(Result, CurPtr+2)) 2683 return; // There is a token to return. 2684 goto SkipIgnoredUnits; 2685 } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) { 2686 if (SkipBlockComment(Result, CurPtr+2)) 2687 return; // There is a token to return. 2688 goto SkipIgnoredUnits; 2689 } else if (isHorizontalWhitespace(*CurPtr)) { 2690 goto SkipHorizontalWhitespace; 2691 } 2692 goto LexNextToken; // GCC isn't tail call eliminating. 2693 2694 // C99 6.4.4.1: Integer Constants. 2695 // C99 6.4.4.2: Floating Constants. 2696 case '0': case '1': case '2': case '3': case '4': 2697 case '5': case '6': case '7': case '8': case '9': 2698 // Notify MIOpt that we read a non-whitespace/non-comment token. 2699 MIOpt.ReadToken(); 2700 return LexNumericConstant(Result, CurPtr); 2701 2702 case 'u': // Identifier (uber) or C++0x UTF-8 or UTF-16 string literal 2703 // Notify MIOpt that we read a non-whitespace/non-comment token. 2704 MIOpt.ReadToken(); 2705 2706 if (LangOpts.CPlusPlus0x) { 2707 Char = getCharAndSize(CurPtr, SizeTmp); 2708 2709 // UTF-16 string literal 2710 if (Char == '"') 2711 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 2712 tok::utf16_string_literal); 2713 2714 // UTF-16 character constant 2715 if (Char == '\'') 2716 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result), 2717 tok::utf16_char_constant); 2718 2719 // UTF-16 raw string literal 2720 if (Char == 'R' && getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"') 2721 return LexRawStringLiteral(Result, 2722 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 2723 SizeTmp2, Result), 2724 tok::utf16_string_literal); 2725 2726 if (Char == '8') { 2727 char Char2 = getCharAndSize(CurPtr + SizeTmp, SizeTmp2); 2728 2729 // UTF-8 string literal 2730 if (Char2 == '"') 2731 return LexStringLiteral(Result, 2732 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 2733 SizeTmp2, Result), 2734 tok::utf8_string_literal); 2735 2736 if (Char2 == 'R') { 2737 unsigned SizeTmp3; 2738 char Char3 = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3); 2739 // UTF-8 raw string literal 2740 if (Char3 == '"') { 2741 return LexRawStringLiteral(Result, 2742 ConsumeChar(ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 2743 SizeTmp2, Result), 2744 SizeTmp3, Result), 2745 tok::utf8_string_literal); 2746 } 2747 } 2748 } 2749 } 2750 2751 // treat u like the start of an identifier. 2752 return LexIdentifier(Result, CurPtr); 2753 2754 case 'U': // Identifier (Uber) or C++0x UTF-32 string literal 2755 // Notify MIOpt that we read a non-whitespace/non-comment token. 2756 MIOpt.ReadToken(); 2757 2758 if (LangOpts.CPlusPlus0x) { 2759 Char = getCharAndSize(CurPtr, SizeTmp); 2760 2761 // UTF-32 string literal 2762 if (Char == '"') 2763 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 2764 tok::utf32_string_literal); 2765 2766 // UTF-32 character constant 2767 if (Char == '\'') 2768 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result), 2769 tok::utf32_char_constant); 2770 2771 // UTF-32 raw string literal 2772 if (Char == 'R' && getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"') 2773 return LexRawStringLiteral(Result, 2774 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 2775 SizeTmp2, Result), 2776 tok::utf32_string_literal); 2777 } 2778 2779 // treat U like the start of an identifier. 2780 return LexIdentifier(Result, CurPtr); 2781 2782 case 'R': // Identifier or C++0x raw string literal 2783 // Notify MIOpt that we read a non-whitespace/non-comment token. 2784 MIOpt.ReadToken(); 2785 2786 if (LangOpts.CPlusPlus0x) { 2787 Char = getCharAndSize(CurPtr, SizeTmp); 2788 2789 if (Char == '"') 2790 return LexRawStringLiteral(Result, 2791 ConsumeChar(CurPtr, SizeTmp, Result), 2792 tok::string_literal); 2793 } 2794 2795 // treat R like the start of an identifier. 2796 return LexIdentifier(Result, CurPtr); 2797 2798 case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz"). 2799 // Notify MIOpt that we read a non-whitespace/non-comment token. 2800 MIOpt.ReadToken(); 2801 Char = getCharAndSize(CurPtr, SizeTmp); 2802 2803 // Wide string literal. 2804 if (Char == '"') 2805 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 2806 tok::wide_string_literal); 2807 2808 // Wide raw string literal. 2809 if (LangOpts.CPlusPlus0x && Char == 'R' && 2810 getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"') 2811 return LexRawStringLiteral(Result, 2812 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 2813 SizeTmp2, Result), 2814 tok::wide_string_literal); 2815 2816 // Wide character constant. 2817 if (Char == '\'') 2818 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result), 2819 tok::wide_char_constant); 2820 // FALL THROUGH, treating L like the start of an identifier. 2821 2822 // C99 6.4.2: Identifiers. 2823 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': 2824 case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N': 2825 case 'O': case 'P': case 'Q': /*'R'*/case 'S': case 'T': /*'U'*/ 2826 case 'V': case 'W': case 'X': case 'Y': case 'Z': 2827 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': 2828 case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': 2829 case 'o': case 'p': case 'q': case 'r': case 's': case 't': /*'u'*/ 2830 case 'v': case 'w': case 'x': case 'y': case 'z': 2831 case '_': 2832 // Notify MIOpt that we read a non-whitespace/non-comment token. 2833 MIOpt.ReadToken(); 2834 return LexIdentifier(Result, CurPtr); 2835 2836 case '$': // $ in identifiers. 2837 if (LangOpts.DollarIdents) { 2838 if (!isLexingRawMode()) 2839 Diag(CurPtr-1, diag::ext_dollar_in_identifier); 2840 // Notify MIOpt that we read a non-whitespace/non-comment token. 2841 MIOpt.ReadToken(); 2842 return LexIdentifier(Result, CurPtr); 2843 } 2844 2845 Kind = tok::unknown; 2846 break; 2847 2848 // C99 6.4.4: Character Constants. 2849 case '\'': 2850 // Notify MIOpt that we read a non-whitespace/non-comment token. 2851 MIOpt.ReadToken(); 2852 return LexCharConstant(Result, CurPtr, tok::char_constant); 2853 2854 // C99 6.4.5: String Literals. 2855 case '"': 2856 // Notify MIOpt that we read a non-whitespace/non-comment token. 2857 MIOpt.ReadToken(); 2858 return LexStringLiteral(Result, CurPtr, tok::string_literal); 2859 2860 // C99 6.4.6: Punctuators. 2861 case '?': 2862 Kind = tok::question; 2863 break; 2864 case '[': 2865 Kind = tok::l_square; 2866 break; 2867 case ']': 2868 Kind = tok::r_square; 2869 break; 2870 case '(': 2871 Kind = tok::l_paren; 2872 break; 2873 case ')': 2874 Kind = tok::r_paren; 2875 break; 2876 case '{': 2877 Kind = tok::l_brace; 2878 break; 2879 case '}': 2880 Kind = tok::r_brace; 2881 break; 2882 case '.': 2883 Char = getCharAndSize(CurPtr, SizeTmp); 2884 if (Char >= '0' && Char <= '9') { 2885 // Notify MIOpt that we read a non-whitespace/non-comment token. 2886 MIOpt.ReadToken(); 2887 2888 return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result)); 2889 } else if (LangOpts.CPlusPlus && Char == '*') { 2890 Kind = tok::periodstar; 2891 CurPtr += SizeTmp; 2892 } else if (Char == '.' && 2893 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') { 2894 Kind = tok::ellipsis; 2895 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 2896 SizeTmp2, Result); 2897 } else { 2898 Kind = tok::period; 2899 } 2900 break; 2901 case '&': 2902 Char = getCharAndSize(CurPtr, SizeTmp); 2903 if (Char == '&') { 2904 Kind = tok::ampamp; 2905 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 2906 } else if (Char == '=') { 2907 Kind = tok::ampequal; 2908 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 2909 } else { 2910 Kind = tok::amp; 2911 } 2912 break; 2913 case '*': 2914 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 2915 Kind = tok::starequal; 2916 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 2917 } else { 2918 Kind = tok::star; 2919 } 2920 break; 2921 case '+': 2922 Char = getCharAndSize(CurPtr, SizeTmp); 2923 if (Char == '+') { 2924 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 2925 Kind = tok::plusplus; 2926 } else if (Char == '=') { 2927 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 2928 Kind = tok::plusequal; 2929 } else { 2930 Kind = tok::plus; 2931 } 2932 break; 2933 case '-': 2934 Char = getCharAndSize(CurPtr, SizeTmp); 2935 if (Char == '-') { // -- 2936 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 2937 Kind = tok::minusminus; 2938 } else if (Char == '>' && LangOpts.CPlusPlus && 2939 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { // C++ ->* 2940 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 2941 SizeTmp2, Result); 2942 Kind = tok::arrowstar; 2943 } else if (Char == '>') { // -> 2944 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 2945 Kind = tok::arrow; 2946 } else if (Char == '=') { // -= 2947 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 2948 Kind = tok::minusequal; 2949 } else { 2950 Kind = tok::minus; 2951 } 2952 break; 2953 case '~': 2954 Kind = tok::tilde; 2955 break; 2956 case '!': 2957 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 2958 Kind = tok::exclaimequal; 2959 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 2960 } else { 2961 Kind = tok::exclaim; 2962 } 2963 break; 2964 case '/': 2965 // 6.4.9: Comments 2966 Char = getCharAndSize(CurPtr, SizeTmp); 2967 if (Char == '/') { // Line comment. 2968 // Even if Line comments are disabled (e.g. in C89 mode), we generally 2969 // want to lex this as a comment. There is one problem with this though, 2970 // that in one particular corner case, this can change the behavior of the 2971 // resultant program. For example, In "foo //**/ bar", C89 would lex 2972 // this as "foo / bar" and langauges with Line comments would lex it as 2973 // "foo". Check to see if the character after the second slash is a '*'. 2974 // If so, we will lex that as a "/" instead of the start of a comment. 2975 // However, we never do this in -traditional-cpp mode. 2976 if ((LangOpts.LineComment || 2977 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*') && 2978 !LangOpts.TraditionalCPP) { 2979 if (SkipLineComment(Result, ConsumeChar(CurPtr, SizeTmp, Result))) 2980 return; // There is a token to return. 2981 2982 // It is common for the tokens immediately after a // comment to be 2983 // whitespace (indentation for the next line). Instead of going through 2984 // the big switch, handle it efficiently now. 2985 goto SkipIgnoredUnits; 2986 } 2987 } 2988 2989 if (Char == '*') { // /**/ comment. 2990 if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result))) 2991 return; // There is a token to return. 2992 goto LexNextToken; // GCC isn't tail call eliminating. 2993 } 2994 2995 if (Char == '=') { 2996 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 2997 Kind = tok::slashequal; 2998 } else { 2999 Kind = tok::slash; 3000 } 3001 break; 3002 case '%': 3003 Char = getCharAndSize(CurPtr, SizeTmp); 3004 if (Char == '=') { 3005 Kind = tok::percentequal; 3006 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3007 } else if (LangOpts.Digraphs && Char == '>') { 3008 Kind = tok::r_brace; // '%>' -> '}' 3009 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3010 } else if (LangOpts.Digraphs && Char == ':') { 3011 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3012 Char = getCharAndSize(CurPtr, SizeTmp); 3013 if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') { 3014 Kind = tok::hashhash; // '%:%:' -> '##' 3015 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3016 SizeTmp2, Result); 3017 } else if (Char == '@' && LangOpts.MicrosoftExt) {// %:@ -> #@ -> Charize 3018 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3019 if (!isLexingRawMode()) 3020 Diag(BufferPtr, diag::ext_charize_microsoft); 3021 Kind = tok::hashat; 3022 } else { // '%:' -> '#' 3023 // We parsed a # character. If this occurs at the start of the line, 3024 // it's actually the start of a preprocessing directive. Callback to 3025 // the preprocessor to handle it. 3026 // FIXME: -fpreprocessed mode?? 3027 if (Result.isAtStartOfLine() && !LexingRawMode && !Is_PragmaLexer) 3028 goto HandleDirective; 3029 3030 Kind = tok::hash; 3031 } 3032 } else { 3033 Kind = tok::percent; 3034 } 3035 break; 3036 case '<': 3037 Char = getCharAndSize(CurPtr, SizeTmp); 3038 if (ParsingFilename) { 3039 return LexAngledStringLiteral(Result, CurPtr); 3040 } else if (Char == '<') { 3041 char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2); 3042 if (After == '=') { 3043 Kind = tok::lesslessequal; 3044 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3045 SizeTmp2, Result); 3046 } else if (After == '<' && IsStartOfConflictMarker(CurPtr-1)) { 3047 // If this is actually a '<<<<<<<' version control conflict marker, 3048 // recognize it as such and recover nicely. 3049 goto LexNextToken; 3050 } else if (After == '<' && HandleEndOfConflictMarker(CurPtr-1)) { 3051 // If this is '<<<<' and we're in a Perforce-style conflict marker, 3052 // ignore it. 3053 goto LexNextToken; 3054 } else if (LangOpts.CUDA && After == '<') { 3055 Kind = tok::lesslessless; 3056 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3057 SizeTmp2, Result); 3058 } else { 3059 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3060 Kind = tok::lessless; 3061 } 3062 } else if (Char == '=') { 3063 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3064 Kind = tok::lessequal; 3065 } else if (LangOpts.Digraphs && Char == ':') { // '<:' -> '[' 3066 if (LangOpts.CPlusPlus0x && 3067 getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == ':') { 3068 // C++0x [lex.pptoken]p3: 3069 // Otherwise, if the next three characters are <:: and the subsequent 3070 // character is neither : nor >, the < is treated as a preprocessor 3071 // token by itself and not as the first character of the alternative 3072 // token <:. 3073 unsigned SizeTmp3; 3074 char After = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3); 3075 if (After != ':' && After != '>') { 3076 Kind = tok::less; 3077 if (!isLexingRawMode()) 3078 Diag(BufferPtr, diag::warn_cxx98_compat_less_colon_colon); 3079 break; 3080 } 3081 } 3082 3083 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3084 Kind = tok::l_square; 3085 } else if (LangOpts.Digraphs && Char == '%') { // '<%' -> '{' 3086 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3087 Kind = tok::l_brace; 3088 } else { 3089 Kind = tok::less; 3090 } 3091 break; 3092 case '>': 3093 Char = getCharAndSize(CurPtr, SizeTmp); 3094 if (Char == '=') { 3095 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3096 Kind = tok::greaterequal; 3097 } else if (Char == '>') { 3098 char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2); 3099 if (After == '=') { 3100 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3101 SizeTmp2, Result); 3102 Kind = tok::greatergreaterequal; 3103 } else if (After == '>' && IsStartOfConflictMarker(CurPtr-1)) { 3104 // If this is actually a '>>>>' conflict marker, recognize it as such 3105 // and recover nicely. 3106 goto LexNextToken; 3107 } else if (After == '>' && HandleEndOfConflictMarker(CurPtr-1)) { 3108 // If this is '>>>>>>>' and we're in a conflict marker, ignore it. 3109 goto LexNextToken; 3110 } else if (LangOpts.CUDA && After == '>') { 3111 Kind = tok::greatergreatergreater; 3112 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3113 SizeTmp2, Result); 3114 } else { 3115 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3116 Kind = tok::greatergreater; 3117 } 3118 3119 } else { 3120 Kind = tok::greater; 3121 } 3122 break; 3123 case '^': 3124 Char = getCharAndSize(CurPtr, SizeTmp); 3125 if (Char == '=') { 3126 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3127 Kind = tok::caretequal; 3128 } else { 3129 Kind = tok::caret; 3130 } 3131 break; 3132 case '|': 3133 Char = getCharAndSize(CurPtr, SizeTmp); 3134 if (Char == '=') { 3135 Kind = tok::pipeequal; 3136 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3137 } else if (Char == '|') { 3138 // If this is '|||||||' and we're in a conflict marker, ignore it. 3139 if (CurPtr[1] == '|' && HandleEndOfConflictMarker(CurPtr-1)) 3140 goto LexNextToken; 3141 Kind = tok::pipepipe; 3142 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3143 } else { 3144 Kind = tok::pipe; 3145 } 3146 break; 3147 case ':': 3148 Char = getCharAndSize(CurPtr, SizeTmp); 3149 if (LangOpts.Digraphs && Char == '>') { 3150 Kind = tok::r_square; // ':>' -> ']' 3151 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3152 } else if (LangOpts.CPlusPlus && Char == ':') { 3153 Kind = tok::coloncolon; 3154 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3155 } else { 3156 Kind = tok::colon; 3157 } 3158 break; 3159 case ';': 3160 Kind = tok::semi; 3161 break; 3162 case '=': 3163 Char = getCharAndSize(CurPtr, SizeTmp); 3164 if (Char == '=') { 3165 // If this is '====' and we're in a conflict marker, ignore it. 3166 if (CurPtr[1] == '=' && HandleEndOfConflictMarker(CurPtr-1)) 3167 goto LexNextToken; 3168 3169 Kind = tok::equalequal; 3170 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3171 } else { 3172 Kind = tok::equal; 3173 } 3174 break; 3175 case ',': 3176 Kind = tok::comma; 3177 break; 3178 case '#': 3179 Char = getCharAndSize(CurPtr, SizeTmp); 3180 if (Char == '#') { 3181 Kind = tok::hashhash; 3182 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3183 } else if (Char == '@' && LangOpts.MicrosoftExt) { // #@ -> Charize 3184 Kind = tok::hashat; 3185 if (!isLexingRawMode()) 3186 Diag(BufferPtr, diag::ext_charize_microsoft); 3187 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3188 } else { 3189 // We parsed a # character. If this occurs at the start of the line, 3190 // it's actually the start of a preprocessing directive. Callback to 3191 // the preprocessor to handle it. 3192 // FIXME: -fpreprocessed mode?? 3193 if (Result.isAtStartOfLine() && !LexingRawMode && !Is_PragmaLexer) 3194 goto HandleDirective; 3195 3196 Kind = tok::hash; 3197 } 3198 break; 3199 3200 case '@': 3201 // Objective C support. 3202 if (CurPtr[-1] == '@' && LangOpts.ObjC1) 3203 Kind = tok::at; 3204 else 3205 Kind = tok::unknown; 3206 break; 3207 3208 case '\\': 3209 // FIXME: UCN's. 3210 // FALL THROUGH. 3211 default: 3212 Kind = tok::unknown; 3213 break; 3214 } 3215 3216 // Notify MIOpt that we read a non-whitespace/non-comment token. 3217 MIOpt.ReadToken(); 3218 3219 // Update the location of token as well as BufferPtr. 3220 FormTokenWithChars(Result, CurPtr, Kind); 3221 return; 3222 3223HandleDirective: 3224 // We parsed a # character and it's the start of a preprocessing directive. 3225 3226 FormTokenWithChars(Result, CurPtr, tok::hash); 3227 PP->HandleDirective(Result); 3228 3229 // As an optimization, if the preprocessor didn't switch lexers, tail 3230 // recurse. 3231 if (PP->isCurrentLexer(this)) { 3232 // Start a new token. If this is a #include or something, the PP may 3233 // want us starting at the beginning of the line again. If so, set 3234 // the StartOfLine flag and clear LeadingSpace. 3235 if (IsAtStartOfLine) { 3236 Result.setFlag(Token::StartOfLine); 3237 Result.clearFlag(Token::LeadingSpace); 3238 IsAtStartOfLine = false; 3239 } 3240 goto LexNextToken; // GCC isn't tail call eliminating. 3241 } 3242 3243 if (PreprocessorLexer *PPLex = PP->getCurrentLexer()) { 3244 // If we #include something that contributes no tokens at all, return with 3245 // a tok::included_eof instead of recursively continuing lexing. 3246 // This avoids a stack overflow with a sequence of many empty #includes. 3247 PPLex->setEnableIncludedEOF(true); 3248 PP->Lex(Result); 3249 if (Result.isNot(tok::included_eof)) { 3250 if (Result.isNot(tok::eof) && Result.isNot(tok::eod)) 3251 PPLex->setEnableIncludedEOF(false); 3252 return; 3253 } 3254 if (PP->isCurrentLexer(this)) 3255 goto LexNextToken; 3256 } 3257 3258 return PP->Lex(Result); 3259} 3260