Lexer.cpp revision 8527b71b1944b155a2bd60ec364d700299bc3ff7
1//===--- Lexer.cpp - C Language Family Lexer ------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the Lexer and Token interfaces. 11// 12//===----------------------------------------------------------------------===// 13// 14// TODO: GCC Diagnostics emitted by the lexer: 15// PEDWARN: (form feed|vertical tab) in preprocessing directive 16// 17// Universal characters, unicode, char mapping: 18// WARNING: `%.*s' is not in NFKC 19// WARNING: `%.*s' is not in NFC 20// 21// Other: 22// TODO: Options to support: 23// -fexec-charset,-fwide-exec-charset 24// 25//===----------------------------------------------------------------------===// 26 27#include "clang/Lex/Lexer.h" 28#include "clang/Lex/Preprocessor.h" 29#include "clang/Basic/Diagnostic.h" 30#include "clang/Basic/SourceManager.h" 31#include "llvm/Support/Compiler.h" 32#include "llvm/Support/MemoryBuffer.h" 33#include <cctype> 34using namespace clang; 35 36static void InitCharacterInfo(); 37 38//===----------------------------------------------------------------------===// 39// Token Class Implementation 40//===----------------------------------------------------------------------===// 41 42/// isObjCAtKeyword - Return true if we have an ObjC keyword identifier. 43bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const { 44 return is(tok::identifier) && 45 getIdentifierInfo()->getObjCKeywordID() == objcKey; 46} 47 48/// getObjCKeywordID - Return the ObjC keyword kind. 49tok::ObjCKeywordKind Token::getObjCKeywordID() const { 50 IdentifierInfo *specId = getIdentifierInfo(); 51 return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword; 52} 53 54 55//===----------------------------------------------------------------------===// 56// Lexer Class Implementation 57//===----------------------------------------------------------------------===// 58 59 60/// Lexer constructor - Create a new lexer object for the specified buffer 61/// with the specified preprocessor managing the lexing process. This lexer 62/// assumes that the associated file buffer and Preprocessor objects will 63/// outlive it, so it doesn't take ownership of either of them. 64Lexer::Lexer(SourceLocation fileloc, Preprocessor &pp, 65 const char *BufStart, const char *BufEnd) 66 : FileLoc(fileloc), PP(&pp), Features(pp.getLangOptions()) { 67 68 SourceManager &SourceMgr = PP->getSourceManager(); 69 unsigned InputFileID = SourceMgr.getPhysicalLoc(FileLoc).getFileID(); 70 const llvm::MemoryBuffer *InputFile = SourceMgr.getBuffer(InputFileID); 71 72 Is_PragmaLexer = false; 73 InitCharacterInfo(); 74 75 // BufferStart must always be InputFile->getBufferStart(). 76 BufferStart = InputFile->getBufferStart(); 77 78 // BufferPtr and BufferEnd can start out somewhere inside the current buffer. 79 // If unspecified, they starts at the start/end of the buffer. 80 BufferPtr = BufStart ? BufStart : BufferStart; 81 BufferEnd = BufEnd ? BufEnd : InputFile->getBufferEnd(); 82 83 assert(BufferEnd[0] == 0 && 84 "We assume that the input buffer has a null character at the end" 85 " to simplify lexing!"); 86 87 // Start of the file is a start of line. 88 IsAtStartOfLine = true; 89 90 // We are not after parsing a #. 91 ParsingPreprocessorDirective = false; 92 93 // We are not after parsing #include. 94 ParsingFilename = false; 95 96 // We are not in raw mode. Raw mode disables diagnostics and interpretation 97 // of tokens (e.g. identifiers, thus disabling macro expansion). It is used 98 // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block 99 // or otherwise skipping over tokens. 100 LexingRawMode = false; 101 102 // Default to keeping comments if requested. 103 KeepCommentMode = PP->getCommentRetentionState(); 104} 105 106/// Lexer constructor - Create a new raw lexer object. This object is only 107/// suitable for calls to 'LexRawToken'. This lexer assumes that the text 108/// range will outlive it, so it doesn't take ownership of it. 109Lexer::Lexer(SourceLocation fileloc, const LangOptions &features, 110 const char *BufStart, const char *BufEnd, 111 const llvm::MemoryBuffer *FromFile) 112 : FileLoc(fileloc), PP(0), Features(features) { 113 Is_PragmaLexer = false; 114 InitCharacterInfo(); 115 116 // If a MemoryBuffer was specified, use its start as BufferStart. This affects 117 // the source location objects produced by this lexer. 118 BufferStart = FromFile ? FromFile->getBufferStart() : BufStart; 119 BufferPtr = BufStart; 120 BufferEnd = BufEnd; 121 122 assert(BufferEnd[0] == 0 && 123 "We assume that the input buffer has a null character at the end" 124 " to simplify lexing!"); 125 126 // Start of the file is a start of line. 127 IsAtStartOfLine = true; 128 129 // We are not after parsing a #. 130 ParsingPreprocessorDirective = false; 131 132 // We are not after parsing #include. 133 ParsingFilename = false; 134 135 // We *are* in raw mode. 136 LexingRawMode = true; 137 138 // Never keep comments in raw mode. 139 KeepCommentMode = false; 140} 141 142 143/// Stringify - Convert the specified string into a C string, with surrounding 144/// ""'s, and with escaped \ and " characters. 145std::string Lexer::Stringify(const std::string &Str, bool Charify) { 146 std::string Result = Str; 147 char Quote = Charify ? '\'' : '"'; 148 for (unsigned i = 0, e = Result.size(); i != e; ++i) { 149 if (Result[i] == '\\' || Result[i] == Quote) { 150 Result.insert(Result.begin()+i, '\\'); 151 ++i; ++e; 152 } 153 } 154 return Result; 155} 156 157/// Stringify - Convert the specified string into a C string by escaping '\' 158/// and " characters. This does not add surrounding ""'s to the string. 159void Lexer::Stringify(llvm::SmallVectorImpl<char> &Str) { 160 for (unsigned i = 0, e = Str.size(); i != e; ++i) { 161 if (Str[i] == '\\' || Str[i] == '"') { 162 Str.insert(Str.begin()+i, '\\'); 163 ++i; ++e; 164 } 165 } 166} 167 168 169/// MeasureTokenLength - Relex the token at the specified location and return 170/// its length in bytes in the input file. If the token needs cleaning (e.g. 171/// includes a trigraph or an escaped newline) then this count includes bytes 172/// that are part of that. 173unsigned Lexer::MeasureTokenLength(SourceLocation Loc, 174 const SourceManager &SM) { 175 // If this comes from a macro expansion, we really do want the macro name, not 176 // the token this macro expanded to. 177 Loc = SM.getLogicalLoc(Loc); 178 179 const char *StrData = SM.getCharacterData(Loc); 180 181 // TODO: this could be special cased for common tokens like identifiers, ')', 182 // etc to make this faster, if it mattered. Just look at StrData[0] to handle 183 // all obviously single-char tokens. This could use 184 // Lexer::isObviouslySimpleCharacter for example to handle identifiers or 185 // something. 186 187 188 const char *BufEnd = SM.getBufferData(Loc.getFileID()).second; 189 190 // Create a langops struct and enable trigraphs. This is sufficient for 191 // measuring tokens. 192 LangOptions LangOpts; 193 LangOpts.Trigraphs = true; 194 195 // Create a lexer starting at the beginning of this token. 196 Lexer TheLexer(Loc, LangOpts, StrData, BufEnd); 197 Token TheTok; 198 TheLexer.LexFromRawLexer(TheTok); 199 return TheTok.getLength(); 200} 201 202//===----------------------------------------------------------------------===// 203// Character information. 204//===----------------------------------------------------------------------===// 205 206static unsigned char CharInfo[256]; 207 208enum { 209 CHAR_HORZ_WS = 0x01, // ' ', '\t', '\f', '\v'. Note, no '\0' 210 CHAR_VERT_WS = 0x02, // '\r', '\n' 211 CHAR_LETTER = 0x04, // a-z,A-Z 212 CHAR_NUMBER = 0x08, // 0-9 213 CHAR_UNDER = 0x10, // _ 214 CHAR_PERIOD = 0x20 // . 215}; 216 217static void InitCharacterInfo() { 218 static bool isInited = false; 219 if (isInited) return; 220 isInited = true; 221 222 // Intiialize the CharInfo table. 223 // TODO: statically initialize this. 224 CharInfo[(int)' '] = CharInfo[(int)'\t'] = 225 CharInfo[(int)'\f'] = CharInfo[(int)'\v'] = CHAR_HORZ_WS; 226 CharInfo[(int)'\n'] = CharInfo[(int)'\r'] = CHAR_VERT_WS; 227 228 CharInfo[(int)'_'] = CHAR_UNDER; 229 CharInfo[(int)'.'] = CHAR_PERIOD; 230 for (unsigned i = 'a'; i <= 'z'; ++i) 231 CharInfo[i] = CharInfo[i+'A'-'a'] = CHAR_LETTER; 232 for (unsigned i = '0'; i <= '9'; ++i) 233 CharInfo[i] = CHAR_NUMBER; 234} 235 236/// isIdentifierBody - Return true if this is the body character of an 237/// identifier, which is [a-zA-Z0-9_]. 238static inline bool isIdentifierBody(unsigned char c) { 239 return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER)) ? true : false; 240} 241 242/// isHorizontalWhitespace - Return true if this character is horizontal 243/// whitespace: ' ', '\t', '\f', '\v'. Note that this returns false for '\0'. 244static inline bool isHorizontalWhitespace(unsigned char c) { 245 return (CharInfo[c] & CHAR_HORZ_WS) ? true : false; 246} 247 248/// isWhitespace - Return true if this character is horizontal or vertical 249/// whitespace: ' ', '\t', '\f', '\v', '\n', '\r'. Note that this returns false 250/// for '\0'. 251static inline bool isWhitespace(unsigned char c) { 252 return (CharInfo[c] & (CHAR_HORZ_WS|CHAR_VERT_WS)) ? true : false; 253} 254 255/// isNumberBody - Return true if this is the body character of an 256/// preprocessing number, which is [a-zA-Z0-9_.]. 257static inline bool isNumberBody(unsigned char c) { 258 return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD)) ? 259 true : false; 260} 261 262 263//===----------------------------------------------------------------------===// 264// Diagnostics forwarding code. 265//===----------------------------------------------------------------------===// 266 267/// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the 268/// lexer buffer was all instantiated at a single point, perform the mapping. 269/// This is currently only used for _Pragma implementation, so it is the slow 270/// path of the hot getSourceLocation method. Do not allow it to be inlined. 271static SourceLocation GetMappedTokenLoc(Preprocessor &PP, 272 SourceLocation FileLoc, 273 unsigned CharNo) DISABLE_INLINE; 274static SourceLocation GetMappedTokenLoc(Preprocessor &PP, 275 SourceLocation FileLoc, 276 unsigned CharNo) { 277 // Otherwise, we're lexing "mapped tokens". This is used for things like 278 // _Pragma handling. Combine the instantiation location of FileLoc with the 279 // physical location. 280 SourceManager &SourceMgr = PP.getSourceManager(); 281 282 // Create a new SLoc which is expanded from logical(FileLoc) but whose 283 // characters come from phys(FileLoc)+Offset. 284 SourceLocation VirtLoc = SourceMgr.getLogicalLoc(FileLoc); 285 SourceLocation PhysLoc = SourceMgr.getPhysicalLoc(FileLoc); 286 PhysLoc = SourceLocation::getFileLoc(PhysLoc.getFileID(), CharNo); 287 return SourceMgr.getInstantiationLoc(PhysLoc, VirtLoc); 288} 289 290/// getSourceLocation - Return a source location identifier for the specified 291/// offset in the current file. 292SourceLocation Lexer::getSourceLocation(const char *Loc) const { 293 assert(Loc >= BufferStart && Loc <= BufferEnd && 294 "Location out of range for this buffer!"); 295 296 // In the normal case, we're just lexing from a simple file buffer, return 297 // the file id from FileLoc with the offset specified. 298 unsigned CharNo = Loc-BufferStart; 299 if (FileLoc.isFileID()) 300 return SourceLocation::getFileLoc(FileLoc.getFileID(), CharNo); 301 302 assert(PP && "This doesn't work on raw lexers"); 303 return GetMappedTokenLoc(*PP, FileLoc, CharNo); 304} 305 306/// Diag - Forwarding function for diagnostics. This translate a source 307/// position in the current buffer into a SourceLocation object for rendering. 308void Lexer::Diag(const char *Loc, unsigned DiagID, 309 const std::string &Msg) const { 310 if (LexingRawMode && Diagnostic::isBuiltinNoteWarningOrExtension(DiagID)) 311 return; 312 PP->Diag(getSourceLocation(Loc), DiagID, Msg); 313} 314void Lexer::Diag(SourceLocation Loc, unsigned DiagID, 315 const std::string &Msg) const { 316 if (LexingRawMode && Diagnostic::isBuiltinNoteWarningOrExtension(DiagID)) 317 return; 318 PP->Diag(Loc, DiagID, Msg); 319} 320 321 322//===----------------------------------------------------------------------===// 323// Trigraph and Escaped Newline Handling Code. 324//===----------------------------------------------------------------------===// 325 326/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair, 327/// return the decoded trigraph letter it corresponds to, or '\0' if nothing. 328static char GetTrigraphCharForLetter(char Letter) { 329 switch (Letter) { 330 default: return 0; 331 case '=': return '#'; 332 case ')': return ']'; 333 case '(': return '['; 334 case '!': return '|'; 335 case '\'': return '^'; 336 case '>': return '}'; 337 case '/': return '\\'; 338 case '<': return '{'; 339 case '-': return '~'; 340 } 341} 342 343/// DecodeTrigraphChar - If the specified character is a legal trigraph when 344/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled, 345/// return the result character. Finally, emit a warning about trigraph use 346/// whether trigraphs are enabled or not. 347static char DecodeTrigraphChar(const char *CP, Lexer *L) { 348 char Res = GetTrigraphCharForLetter(*CP); 349 if (Res && L) { 350 if (!L->getFeatures().Trigraphs) { 351 L->Diag(CP-2, diag::trigraph_ignored); 352 return 0; 353 } else { 354 L->Diag(CP-2, diag::trigraph_converted, std::string()+Res); 355 } 356 } 357 return Res; 358} 359 360/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer, 361/// get its size, and return it. This is tricky in several cases: 362/// 1. If currently at the start of a trigraph, we warn about the trigraph, 363/// then either return the trigraph (skipping 3 chars) or the '?', 364/// depending on whether trigraphs are enabled or not. 365/// 2. If this is an escaped newline (potentially with whitespace between 366/// the backslash and newline), implicitly skip the newline and return 367/// the char after it. 368/// 3. If this is a UCN, return it. FIXME: C++ UCN's? 369/// 370/// This handles the slow/uncommon case of the getCharAndSize method. Here we 371/// know that we can accumulate into Size, and that we have already incremented 372/// Ptr by Size bytes. 373/// 374/// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should 375/// be updated to match. 376/// 377char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size, 378 Token *Tok) { 379 // If we have a slash, look for an escaped newline. 380 if (Ptr[0] == '\\') { 381 ++Size; 382 ++Ptr; 383Slash: 384 // Common case, backslash-char where the char is not whitespace. 385 if (!isWhitespace(Ptr[0])) return '\\'; 386 387 // See if we have optional whitespace characters followed by a newline. 388 { 389 unsigned SizeTmp = 0; 390 do { 391 ++SizeTmp; 392 if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') { 393 // Remember that this token needs to be cleaned. 394 if (Tok) Tok->setFlag(Token::NeedsCleaning); 395 396 // Warn if there was whitespace between the backslash and newline. 397 if (SizeTmp != 1 && Tok) 398 Diag(Ptr, diag::backslash_newline_space); 399 400 // If this is a \r\n or \n\r, skip the newlines. 401 if ((Ptr[SizeTmp] == '\r' || Ptr[SizeTmp] == '\n') && 402 Ptr[SizeTmp-1] != Ptr[SizeTmp]) 403 ++SizeTmp; 404 405 // Found backslash<whitespace><newline>. Parse the char after it. 406 Size += SizeTmp; 407 Ptr += SizeTmp; 408 // Use slow version to accumulate a correct size field. 409 return getCharAndSizeSlow(Ptr, Size, Tok); 410 } 411 } while (isWhitespace(Ptr[SizeTmp])); 412 } 413 414 // Otherwise, this is not an escaped newline, just return the slash. 415 return '\\'; 416 } 417 418 // If this is a trigraph, process it. 419 if (Ptr[0] == '?' && Ptr[1] == '?') { 420 // If this is actually a legal trigraph (not something like "??x"), emit 421 // a trigraph warning. If so, and if trigraphs are enabled, return it. 422 if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) { 423 // Remember that this token needs to be cleaned. 424 if (Tok) Tok->setFlag(Token::NeedsCleaning); 425 426 Ptr += 3; 427 Size += 3; 428 if (C == '\\') goto Slash; 429 return C; 430 } 431 } 432 433 // If this is neither, return a single character. 434 ++Size; 435 return *Ptr; 436} 437 438 439/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the 440/// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size, 441/// and that we have already incremented Ptr by Size bytes. 442/// 443/// NOTE: When this method is updated, getCharAndSizeSlow (above) should 444/// be updated to match. 445char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size, 446 const LangOptions &Features) { 447 // If we have a slash, look for an escaped newline. 448 if (Ptr[0] == '\\') { 449 ++Size; 450 ++Ptr; 451Slash: 452 // Common case, backslash-char where the char is not whitespace. 453 if (!isWhitespace(Ptr[0])) return '\\'; 454 455 // See if we have optional whitespace characters followed by a newline. 456 { 457 unsigned SizeTmp = 0; 458 do { 459 ++SizeTmp; 460 if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') { 461 462 // If this is a \r\n or \n\r, skip the newlines. 463 if ((Ptr[SizeTmp] == '\r' || Ptr[SizeTmp] == '\n') && 464 Ptr[SizeTmp-1] != Ptr[SizeTmp]) 465 ++SizeTmp; 466 467 // Found backslash<whitespace><newline>. Parse the char after it. 468 Size += SizeTmp; 469 Ptr += SizeTmp; 470 471 // Use slow version to accumulate a correct size field. 472 return getCharAndSizeSlowNoWarn(Ptr, Size, Features); 473 } 474 } while (isWhitespace(Ptr[SizeTmp])); 475 } 476 477 // Otherwise, this is not an escaped newline, just return the slash. 478 return '\\'; 479 } 480 481 // If this is a trigraph, process it. 482 if (Features.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') { 483 // If this is actually a legal trigraph (not something like "??x"), return 484 // it. 485 if (char C = GetTrigraphCharForLetter(Ptr[2])) { 486 Ptr += 3; 487 Size += 3; 488 if (C == '\\') goto Slash; 489 return C; 490 } 491 } 492 493 // If this is neither, return a single character. 494 ++Size; 495 return *Ptr; 496} 497 498//===----------------------------------------------------------------------===// 499// Helper methods for lexing. 500//===----------------------------------------------------------------------===// 501 502void Lexer::LexIdentifier(Token &Result, const char *CurPtr) { 503 // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$] 504 unsigned Size; 505 unsigned char C = *CurPtr++; 506 while (isIdentifierBody(C)) { 507 C = *CurPtr++; 508 } 509 --CurPtr; // Back up over the skipped character. 510 511 // Fast path, no $,\,? in identifier found. '\' might be an escaped newline 512 // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN. 513 // FIXME: UCNs. 514 if (C != '\\' && C != '?' && (C != '$' || !Features.DollarIdents)) { 515FinishIdentifier: 516 const char *IdStart = BufferPtr; 517 FormTokenWithChars(Result, CurPtr); 518 Result.setKind(tok::identifier); 519 520 // If we are in raw mode, return this identifier raw. There is no need to 521 // look up identifier information or attempt to macro expand it. 522 if (LexingRawMode) return; 523 524 // Fill in Result.IdentifierInfo, looking up the identifier in the 525 // identifier table. 526 PP->LookUpIdentifierInfo(Result, IdStart); 527 528 // Finally, now that we know we have an identifier, pass this off to the 529 // preprocessor, which may macro expand it or something. 530 return PP->HandleIdentifier(Result); 531 } 532 533 // Otherwise, $,\,? in identifier found. Enter slower path. 534 535 C = getCharAndSize(CurPtr, Size); 536 while (1) { 537 if (C == '$') { 538 // If we hit a $ and they are not supported in identifiers, we are done. 539 if (!Features.DollarIdents) goto FinishIdentifier; 540 541 // Otherwise, emit a diagnostic and continue. 542 Diag(CurPtr, diag::ext_dollar_in_identifier); 543 CurPtr = ConsumeChar(CurPtr, Size, Result); 544 C = getCharAndSize(CurPtr, Size); 545 continue; 546 } else if (!isIdentifierBody(C)) { // FIXME: UCNs. 547 // Found end of identifier. 548 goto FinishIdentifier; 549 } 550 551 // Otherwise, this character is good, consume it. 552 CurPtr = ConsumeChar(CurPtr, Size, Result); 553 554 C = getCharAndSize(CurPtr, Size); 555 while (isIdentifierBody(C)) { // FIXME: UCNs. 556 CurPtr = ConsumeChar(CurPtr, Size, Result); 557 C = getCharAndSize(CurPtr, Size); 558 } 559 } 560} 561 562 563/// LexNumericConstant - Lex the remainder of a integer or floating point 564/// constant. From[-1] is the first character lexed. Return the end of the 565/// constant. 566void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) { 567 unsigned Size; 568 char C = getCharAndSize(CurPtr, Size); 569 char PrevCh = 0; 570 while (isNumberBody(C)) { // FIXME: UCNs? 571 CurPtr = ConsumeChar(CurPtr, Size, Result); 572 PrevCh = C; 573 C = getCharAndSize(CurPtr, Size); 574 } 575 576 // If we fell out, check for a sign, due to 1e+12. If we have one, continue. 577 if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) 578 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 579 580 // If we have a hex FP constant, continue. 581 if (Features.HexFloats && 582 (C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p')) 583 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 584 585 Result.setKind(tok::numeric_constant); 586 587 // Update the location of token as well as BufferPtr. 588 FormTokenWithChars(Result, CurPtr); 589} 590 591/// LexStringLiteral - Lex the remainder of a string literal, after having lexed 592/// either " or L". 593void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide){ 594 const char *NulCharacter = 0; // Does this string contain the \0 character? 595 596 char C = getAndAdvanceChar(CurPtr, Result); 597 while (C != '"') { 598 // Skip escaped characters. 599 if (C == '\\') { 600 // Skip the escaped character. 601 C = getAndAdvanceChar(CurPtr, Result); 602 } else if (C == '\n' || C == '\r' || // Newline. 603 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 604 if (!LexingRawMode) Diag(BufferPtr, diag::err_unterminated_string); 605 Result.setKind(tok::unknown); 606 FormTokenWithChars(Result, CurPtr-1); 607 return; 608 } else if (C == 0) { 609 NulCharacter = CurPtr-1; 610 } 611 C = getAndAdvanceChar(CurPtr, Result); 612 } 613 614 // If a nul character existed in the string, warn about it. 615 if (NulCharacter) Diag(NulCharacter, diag::null_in_string); 616 617 Result.setKind(Wide ? tok::wide_string_literal : tok::string_literal); 618 619 // Update the location of the token as well as the BufferPtr instance var. 620 FormTokenWithChars(Result, CurPtr); 621} 622 623/// LexAngledStringLiteral - Lex the remainder of an angled string literal, 624/// after having lexed the '<' character. This is used for #include filenames. 625void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) { 626 const char *NulCharacter = 0; // Does this string contain the \0 character? 627 628 char C = getAndAdvanceChar(CurPtr, Result); 629 while (C != '>') { 630 // Skip escaped characters. 631 if (C == '\\') { 632 // Skip the escaped character. 633 C = getAndAdvanceChar(CurPtr, Result); 634 } else if (C == '\n' || C == '\r' || // Newline. 635 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 636 if (!LexingRawMode) Diag(BufferPtr, diag::err_unterminated_string); 637 Result.setKind(tok::unknown); 638 FormTokenWithChars(Result, CurPtr-1); 639 return; 640 } else if (C == 0) { 641 NulCharacter = CurPtr-1; 642 } 643 C = getAndAdvanceChar(CurPtr, Result); 644 } 645 646 // If a nul character existed in the string, warn about it. 647 if (NulCharacter) Diag(NulCharacter, diag::null_in_string); 648 649 Result.setKind(tok::angle_string_literal); 650 651 // Update the location of token as well as BufferPtr. 652 FormTokenWithChars(Result, CurPtr); 653} 654 655 656/// LexCharConstant - Lex the remainder of a character constant, after having 657/// lexed either ' or L'. 658void Lexer::LexCharConstant(Token &Result, const char *CurPtr) { 659 const char *NulCharacter = 0; // Does this character contain the \0 character? 660 661 // Handle the common case of 'x' and '\y' efficiently. 662 char C = getAndAdvanceChar(CurPtr, Result); 663 if (C == '\'') { 664 if (!LexingRawMode) Diag(BufferPtr, diag::err_empty_character); 665 Result.setKind(tok::unknown); 666 FormTokenWithChars(Result, CurPtr); 667 return; 668 } else if (C == '\\') { 669 // Skip the escaped character. 670 // FIXME: UCN's. 671 C = getAndAdvanceChar(CurPtr, Result); 672 } 673 674 if (C && C != '\n' && C != '\r' && CurPtr[0] == '\'') { 675 ++CurPtr; 676 } else { 677 // Fall back on generic code for embedded nulls, newlines, wide chars. 678 do { 679 // Skip escaped characters. 680 if (C == '\\') { 681 // Skip the escaped character. 682 C = getAndAdvanceChar(CurPtr, Result); 683 } else if (C == '\n' || C == '\r' || // Newline. 684 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 685 if (!LexingRawMode) Diag(BufferPtr, diag::err_unterminated_char); 686 Result.setKind(tok::unknown); 687 FormTokenWithChars(Result, CurPtr-1); 688 return; 689 } else if (C == 0) { 690 NulCharacter = CurPtr-1; 691 } 692 C = getAndAdvanceChar(CurPtr, Result); 693 } while (C != '\''); 694 } 695 696 if (NulCharacter) Diag(NulCharacter, diag::null_in_char); 697 698 Result.setKind(tok::char_constant); 699 700 // Update the location of token as well as BufferPtr. 701 FormTokenWithChars(Result, CurPtr); 702} 703 704/// SkipWhitespace - Efficiently skip over a series of whitespace characters. 705/// Update BufferPtr to point to the next non-whitespace character and return. 706void Lexer::SkipWhitespace(Token &Result, const char *CurPtr) { 707 // Whitespace - Skip it, then return the token after the whitespace. 708 unsigned char Char = *CurPtr; // Skip consequtive spaces efficiently. 709 while (1) { 710 // Skip horizontal whitespace very aggressively. 711 while (isHorizontalWhitespace(Char)) 712 Char = *++CurPtr; 713 714 // Otherwise if we something other than whitespace, we're done. 715 if (Char != '\n' && Char != '\r') 716 break; 717 718 if (ParsingPreprocessorDirective) { 719 // End of preprocessor directive line, let LexTokenInternal handle this. 720 BufferPtr = CurPtr; 721 return; 722 } 723 724 // ok, but handle newline. 725 // The returned token is at the start of the line. 726 Result.setFlag(Token::StartOfLine); 727 // No leading whitespace seen so far. 728 Result.clearFlag(Token::LeadingSpace); 729 Char = *++CurPtr; 730 } 731 732 // If this isn't immediately after a newline, there is leading space. 733 char PrevChar = CurPtr[-1]; 734 if (PrevChar != '\n' && PrevChar != '\r') 735 Result.setFlag(Token::LeadingSpace); 736 737 BufferPtr = CurPtr; 738} 739 740// SkipBCPLComment - We have just read the // characters from input. Skip until 741// we find the newline character thats terminate the comment. Then update 742/// BufferPtr and return. 743bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) { 744 // If BCPL comments aren't explicitly enabled for this language, emit an 745 // extension warning. 746 if (!Features.BCPLComment) { 747 Diag(BufferPtr, diag::ext_bcpl_comment); 748 749 // Mark them enabled so we only emit one warning for this translation 750 // unit. 751 Features.BCPLComment = true; 752 } 753 754 // Scan over the body of the comment. The common case, when scanning, is that 755 // the comment contains normal ascii characters with nothing interesting in 756 // them. As such, optimize for this case with the inner loop. 757 char C; 758 do { 759 C = *CurPtr; 760 // FIXME: Speedup BCPL comment lexing. Just scan for a \n or \r character. 761 // If we find a \n character, scan backwards, checking to see if it's an 762 // escaped newline, like we do for block comments. 763 764 // Skip over characters in the fast loop. 765 while (C != 0 && // Potentially EOF. 766 C != '\\' && // Potentially escaped newline. 767 C != '?' && // Potentially trigraph. 768 C != '\n' && C != '\r') // Newline or DOS-style newline. 769 C = *++CurPtr; 770 771 // If this is a newline, we're done. 772 if (C == '\n' || C == '\r') 773 break; // Found the newline? Break out! 774 775 // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to 776 // properly decode the character. 777 const char *OldPtr = CurPtr; 778 C = getAndAdvanceChar(CurPtr, Result); 779 780 // If we read multiple characters, and one of those characters was a \r or 781 // \n, then we had an escaped newline within the comment. Emit diagnostic 782 // unless the next line is also a // comment. 783 if (CurPtr != OldPtr+1 && C != '/' && CurPtr[0] != '/') { 784 for (; OldPtr != CurPtr; ++OldPtr) 785 if (OldPtr[0] == '\n' || OldPtr[0] == '\r') { 786 // Okay, we found a // comment that ends in a newline, if the next 787 // line is also a // comment, but has spaces, don't emit a diagnostic. 788 if (isspace(C)) { 789 const char *ForwardPtr = CurPtr; 790 while (isspace(*ForwardPtr)) // Skip whitespace. 791 ++ForwardPtr; 792 if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/') 793 break; 794 } 795 796 Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment); 797 break; 798 } 799 } 800 801 if (CurPtr == BufferEnd+1) { --CurPtr; break; } 802 } while (C != '\n' && C != '\r'); 803 804 // Found but did not consume the newline. 805 806 // If we are returning comments as tokens, return this comment as a token. 807 if (KeepCommentMode) 808 return SaveBCPLComment(Result, CurPtr); 809 810 // If we are inside a preprocessor directive and we see the end of line, 811 // return immediately, so that the lexer can return this as an EOM token. 812 if (ParsingPreprocessorDirective || CurPtr == BufferEnd) { 813 BufferPtr = CurPtr; 814 return true; 815 } 816 817 // Otherwise, eat the \n character. We don't care if this is a \n\r or 818 // \r\n sequence. This is an efficiency hack (because we know the \n can't 819 // contribute to another token), it isn't needed for correctness. 820 ++CurPtr; 821 822 // The next returned token is at the start of the line. 823 Result.setFlag(Token::StartOfLine); 824 // No leading whitespace seen so far. 825 Result.clearFlag(Token::LeadingSpace); 826 BufferPtr = CurPtr; 827 return true; 828} 829 830/// SaveBCPLComment - If in save-comment mode, package up this BCPL comment in 831/// an appropriate way and return it. 832bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) { 833 Result.setKind(tok::comment); 834 FormTokenWithChars(Result, CurPtr); 835 836 // If this BCPL-style comment is in a macro definition, transmogrify it into 837 // a C-style block comment. 838 if (ParsingPreprocessorDirective) { 839 std::string Spelling = PP->getSpelling(Result); 840 assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not bcpl comment?"); 841 Spelling[1] = '*'; // Change prefix to "/*". 842 Spelling += "*/"; // add suffix. 843 844 Result.setLocation(PP->CreateString(&Spelling[0], Spelling.size(), 845 Result.getLocation())); 846 Result.setLength(Spelling.size()); 847 } 848 return false; 849} 850 851/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline 852/// character (either \n or \r) is part of an escaped newline sequence. Issue a 853/// diagnostic if so. We know that the is inside of a block comment. 854static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr, 855 Lexer *L) { 856 assert(CurPtr[0] == '\n' || CurPtr[0] == '\r'); 857 858 // Back up off the newline. 859 --CurPtr; 860 861 // If this is a two-character newline sequence, skip the other character. 862 if (CurPtr[0] == '\n' || CurPtr[0] == '\r') { 863 // \n\n or \r\r -> not escaped newline. 864 if (CurPtr[0] == CurPtr[1]) 865 return false; 866 // \n\r or \r\n -> skip the newline. 867 --CurPtr; 868 } 869 870 // If we have horizontal whitespace, skip over it. We allow whitespace 871 // between the slash and newline. 872 bool HasSpace = false; 873 while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) { 874 --CurPtr; 875 HasSpace = true; 876 } 877 878 // If we have a slash, we know this is an escaped newline. 879 if (*CurPtr == '\\') { 880 if (CurPtr[-1] != '*') return false; 881 } else { 882 // It isn't a slash, is it the ?? / trigraph? 883 if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' || 884 CurPtr[-3] != '*') 885 return false; 886 887 // This is the trigraph ending the comment. Emit a stern warning! 888 CurPtr -= 2; 889 890 // If no trigraphs are enabled, warn that we ignored this trigraph and 891 // ignore this * character. 892 if (!L->getFeatures().Trigraphs) { 893 L->Diag(CurPtr, diag::trigraph_ignored_block_comment); 894 return false; 895 } 896 L->Diag(CurPtr, diag::trigraph_ends_block_comment); 897 } 898 899 // Warn about having an escaped newline between the */ characters. 900 L->Diag(CurPtr, diag::escaped_newline_block_comment_end); 901 902 // If there was space between the backslash and newline, warn about it. 903 if (HasSpace) L->Diag(CurPtr, diag::backslash_newline_space); 904 905 return true; 906} 907 908#ifdef __SSE2__ 909#include <emmintrin.h> 910#elif __ALTIVEC__ 911#include <altivec.h> 912#undef bool 913#endif 914 915/// SkipBlockComment - We have just read the /* characters from input. Read 916/// until we find the */ characters that terminate the comment. Note that we 917/// don't bother decoding trigraphs or escaped newlines in block comments, 918/// because they cannot cause the comment to end. The only thing that can 919/// happen is the comment could end with an escaped newline between the */ end 920/// of comment. 921bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) { 922 // Scan one character past where we should, looking for a '/' character. Once 923 // we find it, check to see if it was preceeded by a *. This common 924 // optimization helps people who like to put a lot of * characters in their 925 // comments. 926 927 // The first character we get with newlines and trigraphs skipped to handle 928 // the degenerate /*/ case below correctly if the * has an escaped newline 929 // after it. 930 unsigned CharSize; 931 unsigned char C = getCharAndSize(CurPtr, CharSize); 932 CurPtr += CharSize; 933 if (C == 0 && CurPtr == BufferEnd+1) { 934 Diag(BufferPtr, diag::err_unterminated_block_comment); 935 BufferPtr = CurPtr-1; 936 return true; 937 } 938 939 // Check to see if the first character after the '/*' is another /. If so, 940 // then this slash does not end the block comment, it is part of it. 941 if (C == '/') 942 C = *CurPtr++; 943 944 while (1) { 945 // Skip over all non-interesting characters until we find end of buffer or a 946 // (probably ending) '/' character. 947 if (CurPtr + 24 < BufferEnd) { 948 // While not aligned to a 16-byte boundary. 949 while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0) 950 C = *CurPtr++; 951 952 if (C == '/') goto FoundSlash; 953 954#ifdef __SSE2__ 955 __m128i Slashes = _mm_set_epi8('/', '/', '/', '/', '/', '/', '/', '/', 956 '/', '/', '/', '/', '/', '/', '/', '/'); 957 while (CurPtr+16 <= BufferEnd && 958 _mm_movemask_epi8(_mm_cmpeq_epi8(*(__m128i*)CurPtr, Slashes)) == 0) 959 CurPtr += 16; 960#elif __ALTIVEC__ 961 __vector unsigned char Slashes = { 962 '/', '/', '/', '/', '/', '/', '/', '/', 963 '/', '/', '/', '/', '/', '/', '/', '/' 964 }; 965 while (CurPtr+16 <= BufferEnd && 966 !vec_any_eq(*(vector unsigned char*)CurPtr, Slashes)) 967 CurPtr += 16; 968#else 969 // Scan for '/' quickly. Many block comments are very large. 970 while (CurPtr[0] != '/' && 971 CurPtr[1] != '/' && 972 CurPtr[2] != '/' && 973 CurPtr[3] != '/' && 974 CurPtr+4 < BufferEnd) { 975 CurPtr += 4; 976 } 977#endif 978 979 // It has to be one of the bytes scanned, increment to it and read one. 980 C = *CurPtr++; 981 } 982 983 // Loop to scan the remainder. 984 while (C != '/' && C != '\0') 985 C = *CurPtr++; 986 987 FoundSlash: 988 if (C == '/') { 989 if (CurPtr[-2] == '*') // We found the final */. We're done! 990 break; 991 992 if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) { 993 if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) { 994 // We found the final */, though it had an escaped newline between the 995 // * and /. We're done! 996 break; 997 } 998 } 999 if (CurPtr[0] == '*' && CurPtr[1] != '/') { 1000 // If this is a /* inside of the comment, emit a warning. Don't do this 1001 // if this is a /*/, which will end the comment. This misses cases with 1002 // embedded escaped newlines, but oh well. 1003 Diag(CurPtr-1, diag::nested_block_comment); 1004 } 1005 } else if (C == 0 && CurPtr == BufferEnd+1) { 1006 Diag(BufferPtr, diag::err_unterminated_block_comment); 1007 // Note: the user probably forgot a */. We could continue immediately 1008 // after the /*, but this would involve lexing a lot of what really is the 1009 // comment, which surely would confuse the parser. 1010 BufferPtr = CurPtr-1; 1011 return true; 1012 } 1013 C = *CurPtr++; 1014 } 1015 1016 // If we are returning comments as tokens, return this comment as a token. 1017 if (KeepCommentMode) { 1018 Result.setKind(tok::comment); 1019 FormTokenWithChars(Result, CurPtr); 1020 return false; 1021 } 1022 1023 // It is common for the tokens immediately after a /**/ comment to be 1024 // whitespace. Instead of going through the big switch, handle it 1025 // efficiently now. 1026 if (isHorizontalWhitespace(*CurPtr)) { 1027 Result.setFlag(Token::LeadingSpace); 1028 SkipWhitespace(Result, CurPtr+1); 1029 return true; 1030 } 1031 1032 // Otherwise, just return so that the next character will be lexed as a token. 1033 BufferPtr = CurPtr; 1034 Result.setFlag(Token::LeadingSpace); 1035 return true; 1036} 1037 1038//===----------------------------------------------------------------------===// 1039// Primary Lexing Entry Points 1040//===----------------------------------------------------------------------===// 1041 1042/// LexIncludeFilename - After the preprocessor has parsed a #include, lex and 1043/// (potentially) macro expand the filename. 1044void Lexer::LexIncludeFilename(Token &FilenameTok) { 1045 assert(ParsingPreprocessorDirective && 1046 ParsingFilename == false && 1047 "Must be in a preprocessing directive!"); 1048 1049 // We are now parsing a filename! 1050 ParsingFilename = true; 1051 1052 // Lex the filename. 1053 Lex(FilenameTok); 1054 1055 // We should have obtained the filename now. 1056 ParsingFilename = false; 1057 1058 // No filename? 1059 if (FilenameTok.is(tok::eom)) 1060 Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename); 1061} 1062 1063/// ReadToEndOfLine - Read the rest of the current preprocessor line as an 1064/// uninterpreted string. This switches the lexer out of directive mode. 1065std::string Lexer::ReadToEndOfLine() { 1066 assert(ParsingPreprocessorDirective && ParsingFilename == false && 1067 "Must be in a preprocessing directive!"); 1068 std::string Result; 1069 Token Tmp; 1070 1071 // CurPtr - Cache BufferPtr in an automatic variable. 1072 const char *CurPtr = BufferPtr; 1073 while (1) { 1074 char Char = getAndAdvanceChar(CurPtr, Tmp); 1075 switch (Char) { 1076 default: 1077 Result += Char; 1078 break; 1079 case 0: // Null. 1080 // Found end of file? 1081 if (CurPtr-1 != BufferEnd) { 1082 // Nope, normal character, continue. 1083 Result += Char; 1084 break; 1085 } 1086 // FALL THROUGH. 1087 case '\r': 1088 case '\n': 1089 // Okay, we found the end of the line. First, back up past the \0, \r, \n. 1090 assert(CurPtr[-1] == Char && "Trigraphs for newline?"); 1091 BufferPtr = CurPtr-1; 1092 1093 // Next, lex the character, which should handle the EOM transition. 1094 Lex(Tmp); 1095 assert(Tmp.is(tok::eom) && "Unexpected token!"); 1096 1097 // Finally, we're done, return the string we found. 1098 return Result; 1099 } 1100 } 1101} 1102 1103/// LexEndOfFile - CurPtr points to the end of this file. Handle this 1104/// condition, reporting diagnostics and handling other edge cases as required. 1105/// This returns true if Result contains a token, false if PP.Lex should be 1106/// called again. 1107bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) { 1108 // If we hit the end of the file while parsing a preprocessor directive, 1109 // end the preprocessor directive first. The next token returned will 1110 // then be the end of file. 1111 if (ParsingPreprocessorDirective) { 1112 // Done parsing the "line". 1113 ParsingPreprocessorDirective = false; 1114 Result.setKind(tok::eom); 1115 // Update the location of token as well as BufferPtr. 1116 FormTokenWithChars(Result, CurPtr); 1117 1118 // Restore comment saving mode, in case it was disabled for directive. 1119 KeepCommentMode = PP->getCommentRetentionState(); 1120 return true; // Have a token. 1121 } 1122 1123 // If we are in raw mode, return this event as an EOF token. Let the caller 1124 // that put us in raw mode handle the event. 1125 if (LexingRawMode) { 1126 Result.startToken(); 1127 BufferPtr = BufferEnd; 1128 FormTokenWithChars(Result, BufferEnd); 1129 Result.setKind(tok::eof); 1130 return true; 1131 } 1132 1133 // Otherwise, issue diagnostics for unterminated #if and missing newline. 1134 1135 // If we are in a #if directive, emit an error. 1136 while (!ConditionalStack.empty()) { 1137 Diag(ConditionalStack.back().IfLoc, diag::err_pp_unterminated_conditional); 1138 ConditionalStack.pop_back(); 1139 } 1140 1141 // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue 1142 // a pedwarn. 1143 if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) 1144 Diag(BufferEnd, diag::ext_no_newline_eof); 1145 1146 BufferPtr = CurPtr; 1147 1148 // Finally, let the preprocessor handle this. 1149 return PP->HandleEndOfFile(Result); 1150} 1151 1152/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from 1153/// the specified lexer will return a tok::l_paren token, 0 if it is something 1154/// else and 2 if there are no more tokens in the buffer controlled by the 1155/// lexer. 1156unsigned Lexer::isNextPPTokenLParen() { 1157 assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?"); 1158 1159 // Switch to 'skipping' mode. This will ensure that we can lex a token 1160 // without emitting diagnostics, disables macro expansion, and will cause EOF 1161 // to return an EOF token instead of popping the include stack. 1162 LexingRawMode = true; 1163 1164 // Save state that can be changed while lexing so that we can restore it. 1165 const char *TmpBufferPtr = BufferPtr; 1166 1167 Token Tok; 1168 Tok.startToken(); 1169 LexTokenInternal(Tok); 1170 1171 // Restore state that may have changed. 1172 BufferPtr = TmpBufferPtr; 1173 1174 // Restore the lexer back to non-skipping mode. 1175 LexingRawMode = false; 1176 1177 if (Tok.is(tok::eof)) 1178 return 2; 1179 return Tok.is(tok::l_paren); 1180} 1181 1182 1183/// LexTokenInternal - This implements a simple C family lexer. It is an 1184/// extremely performance critical piece of code. This assumes that the buffer 1185/// has a null character at the end of the file. Return true if an error 1186/// occurred and compilation should terminate, false if normal. This returns a 1187/// preprocessing token, not a normal token, as such, it is an internal 1188/// interface. It assumes that the Flags of result have been cleared before 1189/// calling this. 1190void Lexer::LexTokenInternal(Token &Result) { 1191LexNextToken: 1192 // New token, can't need cleaning yet. 1193 Result.clearFlag(Token::NeedsCleaning); 1194 Result.setIdentifierInfo(0); 1195 1196 // CurPtr - Cache BufferPtr in an automatic variable. 1197 const char *CurPtr = BufferPtr; 1198 1199 // Small amounts of horizontal whitespace is very common between tokens. 1200 if ((*CurPtr == ' ') || (*CurPtr == '\t')) { 1201 ++CurPtr; 1202 while ((*CurPtr == ' ') || (*CurPtr == '\t')) 1203 ++CurPtr; 1204 BufferPtr = CurPtr; 1205 Result.setFlag(Token::LeadingSpace); 1206 } 1207 1208 unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below. 1209 1210 // Read a character, advancing over it. 1211 char Char = getAndAdvanceChar(CurPtr, Result); 1212 switch (Char) { 1213 case 0: // Null. 1214 // Found end of file? 1215 if (CurPtr-1 == BufferEnd) { 1216 // Read the PP instance variable into an automatic variable, because 1217 // LexEndOfFile will often delete 'this'. 1218 Preprocessor *PPCache = PP; 1219 if (LexEndOfFile(Result, CurPtr-1)) // Retreat back into the file. 1220 return; // Got a token to return. 1221 assert(PPCache && "Raw buffer::LexEndOfFile should return a token"); 1222 return PPCache->Lex(Result); 1223 } 1224 1225 Diag(CurPtr-1, diag::null_in_file); 1226 Result.setFlag(Token::LeadingSpace); 1227 SkipWhitespace(Result, CurPtr); 1228 goto LexNextToken; // GCC isn't tail call eliminating. 1229 case '\n': 1230 case '\r': 1231 // If we are inside a preprocessor directive and we see the end of line, 1232 // we know we are done with the directive, so return an EOM token. 1233 if (ParsingPreprocessorDirective) { 1234 // Done parsing the "line". 1235 ParsingPreprocessorDirective = false; 1236 1237 // Restore comment saving mode, in case it was disabled for directive. 1238 KeepCommentMode = PP->getCommentRetentionState(); 1239 1240 // Since we consumed a newline, we are back at the start of a line. 1241 IsAtStartOfLine = true; 1242 1243 Result.setKind(tok::eom); 1244 break; 1245 } 1246 // The returned token is at the start of the line. 1247 Result.setFlag(Token::StartOfLine); 1248 // No leading whitespace seen so far. 1249 Result.clearFlag(Token::LeadingSpace); 1250 SkipWhitespace(Result, CurPtr); 1251 goto LexNextToken; // GCC isn't tail call eliminating. 1252 case ' ': 1253 case '\t': 1254 case '\f': 1255 case '\v': 1256 SkipHorizontalWhitespace: 1257 Result.setFlag(Token::LeadingSpace); 1258 SkipWhitespace(Result, CurPtr); 1259 1260 SkipIgnoredUnits: 1261 CurPtr = BufferPtr; 1262 1263 // If the next token is obviously a // or /* */ comment, skip it efficiently 1264 // too (without going through the big switch stmt). 1265 if (CurPtr[0] == '/' && CurPtr[1] == '/' && !KeepCommentMode) { 1266 SkipBCPLComment(Result, CurPtr+2); 1267 goto SkipIgnoredUnits; 1268 } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !KeepCommentMode) { 1269 SkipBlockComment(Result, CurPtr+2); 1270 goto SkipIgnoredUnits; 1271 } else if (isHorizontalWhitespace(*CurPtr)) { 1272 goto SkipHorizontalWhitespace; 1273 } 1274 goto LexNextToken; // GCC isn't tail call eliminating. 1275 1276 // C99 6.4.4.1: Integer Constants. 1277 // C99 6.4.4.2: Floating Constants. 1278 case '0': case '1': case '2': case '3': case '4': 1279 case '5': case '6': case '7': case '8': case '9': 1280 // Notify MIOpt that we read a non-whitespace/non-comment token. 1281 MIOpt.ReadToken(); 1282 return LexNumericConstant(Result, CurPtr); 1283 1284 case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz"). 1285 // Notify MIOpt that we read a non-whitespace/non-comment token. 1286 MIOpt.ReadToken(); 1287 Char = getCharAndSize(CurPtr, SizeTmp); 1288 1289 // Wide string literal. 1290 if (Char == '"') 1291 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 1292 true); 1293 1294 // Wide character constant. 1295 if (Char == '\'') 1296 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result)); 1297 // FALL THROUGH, treating L like the start of an identifier. 1298 1299 // C99 6.4.2: Identifiers. 1300 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': 1301 case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N': 1302 case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': 1303 case 'V': case 'W': case 'X': case 'Y': case 'Z': 1304 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': 1305 case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': 1306 case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': 1307 case 'v': case 'w': case 'x': case 'y': case 'z': 1308 case '_': 1309 // Notify MIOpt that we read a non-whitespace/non-comment token. 1310 MIOpt.ReadToken(); 1311 return LexIdentifier(Result, CurPtr); 1312 1313 case '$': // $ in identifiers. 1314 if (Features.DollarIdents) { 1315 Diag(CurPtr-1, diag::ext_dollar_in_identifier); 1316 // Notify MIOpt that we read a non-whitespace/non-comment token. 1317 MIOpt.ReadToken(); 1318 return LexIdentifier(Result, CurPtr); 1319 } 1320 1321 Result.setKind(tok::unknown); 1322 break; 1323 1324 // C99 6.4.4: Character Constants. 1325 case '\'': 1326 // Notify MIOpt that we read a non-whitespace/non-comment token. 1327 MIOpt.ReadToken(); 1328 return LexCharConstant(Result, CurPtr); 1329 1330 // C99 6.4.5: String Literals. 1331 case '"': 1332 // Notify MIOpt that we read a non-whitespace/non-comment token. 1333 MIOpt.ReadToken(); 1334 return LexStringLiteral(Result, CurPtr, false); 1335 1336 // C99 6.4.6: Punctuators. 1337 case '?': 1338 Result.setKind(tok::question); 1339 break; 1340 case '[': 1341 Result.setKind(tok::l_square); 1342 break; 1343 case ']': 1344 Result.setKind(tok::r_square); 1345 break; 1346 case '(': 1347 Result.setKind(tok::l_paren); 1348 break; 1349 case ')': 1350 Result.setKind(tok::r_paren); 1351 break; 1352 case '{': 1353 Result.setKind(tok::l_brace); 1354 break; 1355 case '}': 1356 Result.setKind(tok::r_brace); 1357 break; 1358 case '.': 1359 Char = getCharAndSize(CurPtr, SizeTmp); 1360 if (Char >= '0' && Char <= '9') { 1361 // Notify MIOpt that we read a non-whitespace/non-comment token. 1362 MIOpt.ReadToken(); 1363 1364 return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result)); 1365 } else if (Features.CPlusPlus && Char == '*') { 1366 Result.setKind(tok::periodstar); 1367 CurPtr += SizeTmp; 1368 } else if (Char == '.' && 1369 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') { 1370 Result.setKind(tok::ellipsis); 1371 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1372 SizeTmp2, Result); 1373 } else { 1374 Result.setKind(tok::period); 1375 } 1376 break; 1377 case '&': 1378 Char = getCharAndSize(CurPtr, SizeTmp); 1379 if (Char == '&') { 1380 Result.setKind(tok::ampamp); 1381 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1382 } else if (Char == '=') { 1383 Result.setKind(tok::ampequal); 1384 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1385 } else { 1386 Result.setKind(tok::amp); 1387 } 1388 break; 1389 case '*': 1390 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 1391 Result.setKind(tok::starequal); 1392 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1393 } else { 1394 Result.setKind(tok::star); 1395 } 1396 break; 1397 case '+': 1398 Char = getCharAndSize(CurPtr, SizeTmp); 1399 if (Char == '+') { 1400 Result.setKind(tok::plusplus); 1401 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1402 } else if (Char == '=') { 1403 Result.setKind(tok::plusequal); 1404 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1405 } else { 1406 Result.setKind(tok::plus); 1407 } 1408 break; 1409 case '-': 1410 Char = getCharAndSize(CurPtr, SizeTmp); 1411 if (Char == '-') { 1412 Result.setKind(tok::minusminus); 1413 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1414 } else if (Char == '>' && Features.CPlusPlus && 1415 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { 1416 Result.setKind(tok::arrowstar); // C++ ->* 1417 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1418 SizeTmp2, Result); 1419 } else if (Char == '>') { 1420 Result.setKind(tok::arrow); 1421 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1422 } else if (Char == '=') { 1423 Result.setKind(tok::minusequal); 1424 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1425 } else { 1426 Result.setKind(tok::minus); 1427 } 1428 break; 1429 case '~': 1430 Result.setKind(tok::tilde); 1431 break; 1432 case '!': 1433 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 1434 Result.setKind(tok::exclaimequal); 1435 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1436 } else { 1437 Result.setKind(tok::exclaim); 1438 } 1439 break; 1440 case '/': 1441 // 6.4.9: Comments 1442 Char = getCharAndSize(CurPtr, SizeTmp); 1443 if (Char == '/') { // BCPL comment. 1444 if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result))) { 1445 // It is common for the tokens immediately after a // comment to be 1446 // whitespace (indentation for the next line). Instead of going through 1447 // the big switch, handle it efficiently now. 1448 goto SkipIgnoredUnits; 1449 } 1450 return; // KeepCommentMode 1451 } else if (Char == '*') { // /**/ comment. 1452 if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result))) 1453 goto LexNextToken; // GCC isn't tail call eliminating. 1454 return; // KeepCommentMode 1455 } else if (Char == '=') { 1456 Result.setKind(tok::slashequal); 1457 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1458 } else { 1459 Result.setKind(tok::slash); 1460 } 1461 break; 1462 case '%': 1463 Char = getCharAndSize(CurPtr, SizeTmp); 1464 if (Char == '=') { 1465 Result.setKind(tok::percentequal); 1466 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1467 } else if (Features.Digraphs && Char == '>') { 1468 Result.setKind(tok::r_brace); // '%>' -> '}' 1469 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1470 } else if (Features.Digraphs && Char == ':') { 1471 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1472 Char = getCharAndSize(CurPtr, SizeTmp); 1473 if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') { 1474 Result.setKind(tok::hashhash); // '%:%:' -> '##' 1475 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1476 SizeTmp2, Result); 1477 } else if (Char == '@' && Features.Microsoft) { // %:@ -> #@ -> Charize 1478 Result.setKind(tok::hashat); 1479 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1480 Diag(BufferPtr, diag::charize_microsoft_ext); 1481 } else { 1482 Result.setKind(tok::hash); // '%:' -> '#' 1483 1484 // We parsed a # character. If this occurs at the start of the line, 1485 // it's actually the start of a preprocessing directive. Callback to 1486 // the preprocessor to handle it. 1487 // FIXME: -fpreprocessed mode?? 1488 if (Result.isAtStartOfLine() && !LexingRawMode) { 1489 BufferPtr = CurPtr; 1490 PP->HandleDirective(Result); 1491 1492 // As an optimization, if the preprocessor didn't switch lexers, tail 1493 // recurse. 1494 if (PP->isCurrentLexer(this)) { 1495 // Start a new token. If this is a #include or something, the PP may 1496 // want us starting at the beginning of the line again. If so, set 1497 // the StartOfLine flag. 1498 if (IsAtStartOfLine) { 1499 Result.setFlag(Token::StartOfLine); 1500 IsAtStartOfLine = false; 1501 } 1502 goto LexNextToken; // GCC isn't tail call eliminating. 1503 } 1504 1505 return PP->Lex(Result); 1506 } 1507 } 1508 } else { 1509 Result.setKind(tok::percent); 1510 } 1511 break; 1512 case '<': 1513 Char = getCharAndSize(CurPtr, SizeTmp); 1514 if (ParsingFilename) { 1515 return LexAngledStringLiteral(Result, CurPtr+SizeTmp); 1516 } else if (Char == '<' && 1517 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') { 1518 Result.setKind(tok::lesslessequal); 1519 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1520 SizeTmp2, Result); 1521 } else if (Char == '<') { 1522 Result.setKind(tok::lessless); 1523 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1524 } else if (Char == '=') { 1525 Result.setKind(tok::lessequal); 1526 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1527 } else if (Features.Digraphs && Char == ':') { 1528 Result.setKind(tok::l_square); // '<:' -> '[' 1529 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1530 } else if (Features.Digraphs && Char == '%') { 1531 Result.setKind(tok::l_brace); // '<%' -> '{' 1532 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1533 } else { 1534 Result.setKind(tok::less); 1535 } 1536 break; 1537 case '>': 1538 Char = getCharAndSize(CurPtr, SizeTmp); 1539 if (Char == '=') { 1540 Result.setKind(tok::greaterequal); 1541 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1542 } else if (Char == '>' && 1543 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') { 1544 Result.setKind(tok::greatergreaterequal); 1545 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1546 SizeTmp2, Result); 1547 } else if (Char == '>') { 1548 Result.setKind(tok::greatergreater); 1549 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1550 } else { 1551 Result.setKind(tok::greater); 1552 } 1553 break; 1554 case '^': 1555 Char = getCharAndSize(CurPtr, SizeTmp); 1556 if (Char == '=') { 1557 Result.setKind(tok::caretequal); 1558 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1559 } else { 1560 Result.setKind(tok::caret); 1561 } 1562 break; 1563 case '|': 1564 Char = getCharAndSize(CurPtr, SizeTmp); 1565 if (Char == '=') { 1566 Result.setKind(tok::pipeequal); 1567 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1568 } else if (Char == '|') { 1569 Result.setKind(tok::pipepipe); 1570 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1571 } else { 1572 Result.setKind(tok::pipe); 1573 } 1574 break; 1575 case ':': 1576 Char = getCharAndSize(CurPtr, SizeTmp); 1577 if (Features.Digraphs && Char == '>') { 1578 Result.setKind(tok::r_square); // ':>' -> ']' 1579 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1580 } else if (Features.CPlusPlus && Char == ':') { 1581 Result.setKind(tok::coloncolon); 1582 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1583 } else { 1584 Result.setKind(tok::colon); 1585 } 1586 break; 1587 case ';': 1588 Result.setKind(tok::semi); 1589 break; 1590 case '=': 1591 Char = getCharAndSize(CurPtr, SizeTmp); 1592 if (Char == '=') { 1593 Result.setKind(tok::equalequal); 1594 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1595 } else { 1596 Result.setKind(tok::equal); 1597 } 1598 break; 1599 case ',': 1600 Result.setKind(tok::comma); 1601 break; 1602 case '#': 1603 Char = getCharAndSize(CurPtr, SizeTmp); 1604 if (Char == '#') { 1605 Result.setKind(tok::hashhash); 1606 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1607 } else if (Char == '@' && Features.Microsoft) { // #@ -> Charize 1608 Result.setKind(tok::hashat); 1609 Diag(BufferPtr, diag::charize_microsoft_ext); 1610 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1611 } else { 1612 Result.setKind(tok::hash); 1613 // We parsed a # character. If this occurs at the start of the line, 1614 // it's actually the start of a preprocessing directive. Callback to 1615 // the preprocessor to handle it. 1616 // FIXME: -fpreprocessed mode?? 1617 if (Result.isAtStartOfLine() && !LexingRawMode) { 1618 BufferPtr = CurPtr; 1619 PP->HandleDirective(Result); 1620 1621 // As an optimization, if the preprocessor didn't switch lexers, tail 1622 // recurse. 1623 if (PP->isCurrentLexer(this)) { 1624 // Start a new token. If this is a #include or something, the PP may 1625 // want us starting at the beginning of the line again. If so, set 1626 // the StartOfLine flag. 1627 if (IsAtStartOfLine) { 1628 Result.setFlag(Token::StartOfLine); 1629 IsAtStartOfLine = false; 1630 } 1631 goto LexNextToken; // GCC isn't tail call eliminating. 1632 } 1633 return PP->Lex(Result); 1634 } 1635 } 1636 break; 1637 1638 case '@': 1639 // Objective C support. 1640 if (CurPtr[-1] == '@' && Features.ObjC1) 1641 Result.setKind(tok::at); 1642 else 1643 Result.setKind(tok::unknown); 1644 break; 1645 1646 case '\\': 1647 // FIXME: UCN's. 1648 // FALL THROUGH. 1649 default: 1650 Result.setKind(tok::unknown); 1651 break; 1652 } 1653 1654 // Notify MIOpt that we read a non-whitespace/non-comment token. 1655 MIOpt.ReadToken(); 1656 1657 // Update the location of token as well as BufferPtr. 1658 FormTokenWithChars(Result, CurPtr); 1659} 1660