Lexer.cpp revision f7cf85b330bedd2877e1371fb0a83e99751ae162
1//===--- Lexer.cpp - C Language Family Lexer ------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the Lexer and Token interfaces. 11// 12//===----------------------------------------------------------------------===// 13// 14// TODO: GCC Diagnostics emitted by the lexer: 15// PEDWARN: (form feed|vertical tab) in preprocessing directive 16// 17// Universal characters, unicode, char mapping: 18// WARNING: `%.*s' is not in NFKC 19// WARNING: `%.*s' is not in NFC 20// 21// Other: 22// TODO: Options to support: 23// -fexec-charset,-fwide-exec-charset 24// 25//===----------------------------------------------------------------------===// 26 27#include "clang/Lex/Lexer.h" 28#include "clang/Lex/Preprocessor.h" 29#include "clang/Basic/Diagnostic.h" 30#include "clang/Basic/SourceManager.h" 31#include "llvm/Support/Compiler.h" 32#include "llvm/Support/MemoryBuffer.h" 33#include <cctype> 34using namespace clang; 35 36static void InitCharacterInfo(); 37 38//===----------------------------------------------------------------------===// 39// Token Class Implementation 40//===----------------------------------------------------------------------===// 41 42/// isObjCAtKeyword - Return true if we have an ObjC keyword identifier. 43bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const { 44 if (IdentifierInfo *II = getIdentifierInfo()) 45 return II->getObjCKeywordID() == objcKey; 46 return false; 47} 48 49/// getObjCKeywordID - Return the ObjC keyword kind. 50tok::ObjCKeywordKind Token::getObjCKeywordID() const { 51 IdentifierInfo *specId = getIdentifierInfo(); 52 return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword; 53} 54 55 56//===----------------------------------------------------------------------===// 57// Lexer Class Implementation 58//===----------------------------------------------------------------------===// 59 60 61/// Lexer constructor - Create a new lexer object for the specified buffer 62/// with the specified preprocessor managing the lexing process. This lexer 63/// assumes that the associated file buffer and Preprocessor objects will 64/// outlive it, so it doesn't take ownership of either of them. 65Lexer::Lexer(SourceLocation fileloc, Preprocessor &pp, 66 const char *BufStart, const char *BufEnd) 67 : PreprocessorLexer(&pp, fileloc), FileLoc(fileloc), 68 Features(pp.getLangOptions()) { 69 70 SourceManager &SourceMgr = PP->getSourceManager(); 71 unsigned InputFileID = SourceMgr.getSpellingLoc(FileLoc).getFileID(); 72 const llvm::MemoryBuffer *InputFile = SourceMgr.getBuffer(InputFileID); 73 74 Is_PragmaLexer = false; 75 InitCharacterInfo(); 76 77 // BufferStart must always be InputFile->getBufferStart(). 78 BufferStart = InputFile->getBufferStart(); 79 80 // BufferPtr and BufferEnd can start out somewhere inside the current buffer. 81 // If unspecified, they starts at the start/end of the buffer. 82 BufferPtr = BufStart ? BufStart : BufferStart; 83 BufferEnd = BufEnd ? BufEnd : InputFile->getBufferEnd(); 84 85 assert(BufferEnd[0] == 0 && 86 "We assume that the input buffer has a null character at the end" 87 " to simplify lexing!"); 88 89 // Start of the file is a start of line. 90 IsAtStartOfLine = true; 91 92 // We are not after parsing a #. 93 ParsingPreprocessorDirective = false; 94 95 // We are not after parsing #include. 96 ParsingFilename = false; 97 98 // We are not in raw mode. Raw mode disables diagnostics and interpretation 99 // of tokens (e.g. identifiers, thus disabling macro expansion). It is used 100 // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block 101 // or otherwise skipping over tokens. 102 LexingRawMode = false; 103 104 // Default to keeping comments if the preprocessor wants them. 105 ExtendedTokenMode = 0; 106 SetCommentRetentionState(PP->getCommentRetentionState()); 107} 108 109/// Lexer constructor - Create a new raw lexer object. This object is only 110/// suitable for calls to 'LexRawToken'. This lexer assumes that the text 111/// range will outlive it, so it doesn't take ownership of it. 112Lexer::Lexer(SourceLocation fileloc, const LangOptions &features, 113 const char *BufStart, const char *BufEnd, 114 const llvm::MemoryBuffer *FromFile) 115 : PreprocessorLexer(), FileLoc(fileloc), 116 Features(features) { 117 118 Is_PragmaLexer = false; 119 InitCharacterInfo(); 120 121 // If a MemoryBuffer was specified, use its start as BufferStart. This affects 122 // the source location objects produced by this lexer. 123 BufferStart = FromFile ? FromFile->getBufferStart() : BufStart; 124 BufferPtr = BufStart; 125 BufferEnd = BufEnd; 126 127 assert(BufferEnd[0] == 0 && 128 "We assume that the input buffer has a null character at the end" 129 " to simplify lexing!"); 130 131 // Start of the file is a start of line. 132 IsAtStartOfLine = true; 133 134 // We are not after parsing a #. 135 ParsingPreprocessorDirective = false; 136 137 // We are not after parsing #include. 138 ParsingFilename = false; 139 140 // We *are* in raw mode. 141 LexingRawMode = true; 142 143 // Default to not keeping comments in raw mode. 144 ExtendedTokenMode = 0; 145} 146 147 148/// Stringify - Convert the specified string into a C string, with surrounding 149/// ""'s, and with escaped \ and " characters. 150std::string Lexer::Stringify(const std::string &Str, bool Charify) { 151 std::string Result = Str; 152 char Quote = Charify ? '\'' : '"'; 153 for (unsigned i = 0, e = Result.size(); i != e; ++i) { 154 if (Result[i] == '\\' || Result[i] == Quote) { 155 Result.insert(Result.begin()+i, '\\'); 156 ++i; ++e; 157 } 158 } 159 return Result; 160} 161 162/// Stringify - Convert the specified string into a C string by escaping '\' 163/// and " characters. This does not add surrounding ""'s to the string. 164void Lexer::Stringify(llvm::SmallVectorImpl<char> &Str) { 165 for (unsigned i = 0, e = Str.size(); i != e; ++i) { 166 if (Str[i] == '\\' || Str[i] == '"') { 167 Str.insert(Str.begin()+i, '\\'); 168 ++i; ++e; 169 } 170 } 171} 172 173 174/// MeasureTokenLength - Relex the token at the specified location and return 175/// its length in bytes in the input file. If the token needs cleaning (e.g. 176/// includes a trigraph or an escaped newline) then this count includes bytes 177/// that are part of that. 178unsigned Lexer::MeasureTokenLength(SourceLocation Loc, 179 const SourceManager &SM) { 180 // If this comes from a macro expansion, we really do want the macro name, not 181 // the token this macro expanded to. 182 Loc = SM.getInstantiationLoc(Loc); 183 184 const char *StrData = SM.getCharacterData(Loc); 185 186 // TODO: this could be special cased for common tokens like identifiers, ')', 187 // etc to make this faster, if it mattered. Just look at StrData[0] to handle 188 // all obviously single-char tokens. This could use 189 // Lexer::isObviouslySimpleCharacter for example to handle identifiers or 190 // something. 191 192 193 const char *BufEnd = SM.getBufferData(Loc.getFileID()).second; 194 195 // Create a langops struct and enable trigraphs. This is sufficient for 196 // measuring tokens. 197 LangOptions LangOpts; 198 LangOpts.Trigraphs = true; 199 200 // Create a lexer starting at the beginning of this token. 201 Lexer TheLexer(Loc, LangOpts, StrData, BufEnd); 202 Token TheTok; 203 TheLexer.LexFromRawLexer(TheTok); 204 return TheTok.getLength(); 205} 206 207//===----------------------------------------------------------------------===// 208// Character information. 209//===----------------------------------------------------------------------===// 210 211static unsigned char CharInfo[256]; 212 213enum { 214 CHAR_HORZ_WS = 0x01, // ' ', '\t', '\f', '\v'. Note, no '\0' 215 CHAR_VERT_WS = 0x02, // '\r', '\n' 216 CHAR_LETTER = 0x04, // a-z,A-Z 217 CHAR_NUMBER = 0x08, // 0-9 218 CHAR_UNDER = 0x10, // _ 219 CHAR_PERIOD = 0x20 // . 220}; 221 222static void InitCharacterInfo() { 223 static bool isInited = false; 224 if (isInited) return; 225 isInited = true; 226 227 // Intiialize the CharInfo table. 228 // TODO: statically initialize this. 229 CharInfo[(int)' '] = CharInfo[(int)'\t'] = 230 CharInfo[(int)'\f'] = CharInfo[(int)'\v'] = CHAR_HORZ_WS; 231 CharInfo[(int)'\n'] = CharInfo[(int)'\r'] = CHAR_VERT_WS; 232 233 CharInfo[(int)'_'] = CHAR_UNDER; 234 CharInfo[(int)'.'] = CHAR_PERIOD; 235 for (unsigned i = 'a'; i <= 'z'; ++i) 236 CharInfo[i] = CharInfo[i+'A'-'a'] = CHAR_LETTER; 237 for (unsigned i = '0'; i <= '9'; ++i) 238 CharInfo[i] = CHAR_NUMBER; 239} 240 241/// isIdentifierBody - Return true if this is the body character of an 242/// identifier, which is [a-zA-Z0-9_]. 243static inline bool isIdentifierBody(unsigned char c) { 244 return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER)) ? true : false; 245} 246 247/// isHorizontalWhitespace - Return true if this character is horizontal 248/// whitespace: ' ', '\t', '\f', '\v'. Note that this returns false for '\0'. 249static inline bool isHorizontalWhitespace(unsigned char c) { 250 return (CharInfo[c] & CHAR_HORZ_WS) ? true : false; 251} 252 253/// isWhitespace - Return true if this character is horizontal or vertical 254/// whitespace: ' ', '\t', '\f', '\v', '\n', '\r'. Note that this returns false 255/// for '\0'. 256static inline bool isWhitespace(unsigned char c) { 257 return (CharInfo[c] & (CHAR_HORZ_WS|CHAR_VERT_WS)) ? true : false; 258} 259 260/// isNumberBody - Return true if this is the body character of an 261/// preprocessing number, which is [a-zA-Z0-9_.]. 262static inline bool isNumberBody(unsigned char c) { 263 return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD)) ? 264 true : false; 265} 266 267 268//===----------------------------------------------------------------------===// 269// Diagnostics forwarding code. 270//===----------------------------------------------------------------------===// 271 272/// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the 273/// lexer buffer was all instantiated at a single point, perform the mapping. 274/// This is currently only used for _Pragma implementation, so it is the slow 275/// path of the hot getSourceLocation method. Do not allow it to be inlined. 276static SourceLocation GetMappedTokenLoc(Preprocessor &PP, 277 SourceLocation FileLoc, 278 unsigned CharNo) DISABLE_INLINE; 279static SourceLocation GetMappedTokenLoc(Preprocessor &PP, 280 SourceLocation FileLoc, 281 unsigned CharNo) { 282 // Otherwise, we're lexing "mapped tokens". This is used for things like 283 // _Pragma handling. Combine the instantiation location of FileLoc with the 284 // spelling location. 285 SourceManager &SourceMgr = PP.getSourceManager(); 286 287 // Create a new SLoc which is expanded from Instantiation(FileLoc) but whose 288 // characters come from spelling(FileLoc)+Offset. 289 SourceLocation InstLoc = SourceMgr.getInstantiationLoc(FileLoc); 290 SourceLocation SpellingLoc = SourceMgr.getSpellingLoc(FileLoc); 291 SpellingLoc = SourceLocation::getFileLoc(SpellingLoc.getFileID(), CharNo); 292 return SourceMgr.getInstantiationLoc(SpellingLoc, InstLoc); 293} 294 295/// getSourceLocation - Return a source location identifier for the specified 296/// offset in the current file. 297SourceLocation Lexer::getSourceLocation(const char *Loc) const { 298 assert(Loc >= BufferStart && Loc <= BufferEnd && 299 "Location out of range for this buffer!"); 300 301 // In the normal case, we're just lexing from a simple file buffer, return 302 // the file id from FileLoc with the offset specified. 303 unsigned CharNo = Loc-BufferStart; 304 if (FileLoc.isFileID()) 305 return SourceLocation::getFileLoc(FileLoc.getFileID(), CharNo); 306 307 assert(PP && "This doesn't work on raw lexers"); 308 return GetMappedTokenLoc(*PP, FileLoc, CharNo); 309} 310 311/// Diag - Forwarding function for diagnostics. This translate a source 312/// position in the current buffer into a SourceLocation object for rendering. 313DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const { 314 return PP->Diag(getSourceLocation(Loc), DiagID); 315} 316 317//===----------------------------------------------------------------------===// 318// Trigraph and Escaped Newline Handling Code. 319//===----------------------------------------------------------------------===// 320 321/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair, 322/// return the decoded trigraph letter it corresponds to, or '\0' if nothing. 323static char GetTrigraphCharForLetter(char Letter) { 324 switch (Letter) { 325 default: return 0; 326 case '=': return '#'; 327 case ')': return ']'; 328 case '(': return '['; 329 case '!': return '|'; 330 case '\'': return '^'; 331 case '>': return '}'; 332 case '/': return '\\'; 333 case '<': return '{'; 334 case '-': return '~'; 335 } 336} 337 338/// DecodeTrigraphChar - If the specified character is a legal trigraph when 339/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled, 340/// return the result character. Finally, emit a warning about trigraph use 341/// whether trigraphs are enabled or not. 342static char DecodeTrigraphChar(const char *CP, Lexer *L) { 343 char Res = GetTrigraphCharForLetter(*CP); 344 if (!Res || !L) return Res; 345 346 if (!L->getFeatures().Trigraphs) { 347 if (!L->isLexingRawMode()) 348 L->Diag(CP-2, diag::trigraph_ignored); 349 return 0; 350 } 351 352 if (!L->isLexingRawMode()) 353 L->Diag(CP-2, diag::trigraph_converted) << std::string()+Res; 354 return Res; 355} 356 357/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer, 358/// get its size, and return it. This is tricky in several cases: 359/// 1. If currently at the start of a trigraph, we warn about the trigraph, 360/// then either return the trigraph (skipping 3 chars) or the '?', 361/// depending on whether trigraphs are enabled or not. 362/// 2. If this is an escaped newline (potentially with whitespace between 363/// the backslash and newline), implicitly skip the newline and return 364/// the char after it. 365/// 3. If this is a UCN, return it. FIXME: C++ UCN's? 366/// 367/// This handles the slow/uncommon case of the getCharAndSize method. Here we 368/// know that we can accumulate into Size, and that we have already incremented 369/// Ptr by Size bytes. 370/// 371/// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should 372/// be updated to match. 373/// 374char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size, 375 Token *Tok) { 376 // If we have a slash, look for an escaped newline. 377 if (Ptr[0] == '\\') { 378 ++Size; 379 ++Ptr; 380Slash: 381 // Common case, backslash-char where the char is not whitespace. 382 if (!isWhitespace(Ptr[0])) return '\\'; 383 384 // See if we have optional whitespace characters followed by a newline. 385 { 386 unsigned SizeTmp = 0; 387 do { 388 ++SizeTmp; 389 if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') { 390 // Remember that this token needs to be cleaned. 391 if (Tok) Tok->setFlag(Token::NeedsCleaning); 392 393 // Warn if there was whitespace between the backslash and newline. 394 if (SizeTmp != 1 && Tok && !isLexingRawMode()) 395 Diag(Ptr, diag::backslash_newline_space); 396 397 // If this is a \r\n or \n\r, skip the newlines. 398 if ((Ptr[SizeTmp] == '\r' || Ptr[SizeTmp] == '\n') && 399 Ptr[SizeTmp-1] != Ptr[SizeTmp]) 400 ++SizeTmp; 401 402 // Found backslash<whitespace><newline>. Parse the char after it. 403 Size += SizeTmp; 404 Ptr += SizeTmp; 405 // Use slow version to accumulate a correct size field. 406 return getCharAndSizeSlow(Ptr, Size, Tok); 407 } 408 } while (isWhitespace(Ptr[SizeTmp])); 409 } 410 411 // Otherwise, this is not an escaped newline, just return the slash. 412 return '\\'; 413 } 414 415 // If this is a trigraph, process it. 416 if (Ptr[0] == '?' && Ptr[1] == '?') { 417 // If this is actually a legal trigraph (not something like "??x"), emit 418 // a trigraph warning. If so, and if trigraphs are enabled, return it. 419 if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) { 420 // Remember that this token needs to be cleaned. 421 if (Tok) Tok->setFlag(Token::NeedsCleaning); 422 423 Ptr += 3; 424 Size += 3; 425 if (C == '\\') goto Slash; 426 return C; 427 } 428 } 429 430 // If this is neither, return a single character. 431 ++Size; 432 return *Ptr; 433} 434 435 436/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the 437/// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size, 438/// and that we have already incremented Ptr by Size bytes. 439/// 440/// NOTE: When this method is updated, getCharAndSizeSlow (above) should 441/// be updated to match. 442char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size, 443 const LangOptions &Features) { 444 // If we have a slash, look for an escaped newline. 445 if (Ptr[0] == '\\') { 446 ++Size; 447 ++Ptr; 448Slash: 449 // Common case, backslash-char where the char is not whitespace. 450 if (!isWhitespace(Ptr[0])) return '\\'; 451 452 // See if we have optional whitespace characters followed by a newline. 453 { 454 unsigned SizeTmp = 0; 455 do { 456 ++SizeTmp; 457 if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') { 458 459 // If this is a \r\n or \n\r, skip the newlines. 460 if ((Ptr[SizeTmp] == '\r' || Ptr[SizeTmp] == '\n') && 461 Ptr[SizeTmp-1] != Ptr[SizeTmp]) 462 ++SizeTmp; 463 464 // Found backslash<whitespace><newline>. Parse the char after it. 465 Size += SizeTmp; 466 Ptr += SizeTmp; 467 468 // Use slow version to accumulate a correct size field. 469 return getCharAndSizeSlowNoWarn(Ptr, Size, Features); 470 } 471 } while (isWhitespace(Ptr[SizeTmp])); 472 } 473 474 // Otherwise, this is not an escaped newline, just return the slash. 475 return '\\'; 476 } 477 478 // If this is a trigraph, process it. 479 if (Features.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') { 480 // If this is actually a legal trigraph (not something like "??x"), return 481 // it. 482 if (char C = GetTrigraphCharForLetter(Ptr[2])) { 483 Ptr += 3; 484 Size += 3; 485 if (C == '\\') goto Slash; 486 return C; 487 } 488 } 489 490 // If this is neither, return a single character. 491 ++Size; 492 return *Ptr; 493} 494 495//===----------------------------------------------------------------------===// 496// Helper methods for lexing. 497//===----------------------------------------------------------------------===// 498 499void Lexer::LexIdentifier(Token &Result, const char *CurPtr) { 500 // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$] 501 unsigned Size; 502 unsigned char C = *CurPtr++; 503 while (isIdentifierBody(C)) { 504 C = *CurPtr++; 505 } 506 --CurPtr; // Back up over the skipped character. 507 508 // Fast path, no $,\,? in identifier found. '\' might be an escaped newline 509 // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN. 510 // FIXME: UCNs. 511 if (C != '\\' && C != '?' && (C != '$' || !Features.DollarIdents)) { 512FinishIdentifier: 513 const char *IdStart = BufferPtr; 514 FormTokenWithChars(Result, CurPtr, tok::identifier); 515 516 // If we are in raw mode, return this identifier raw. There is no need to 517 // look up identifier information or attempt to macro expand it. 518 if (LexingRawMode) return; 519 520 // Fill in Result.IdentifierInfo, looking up the identifier in the 521 // identifier table. 522 PP->LookUpIdentifierInfo(Result, IdStart); 523 524 // Finally, now that we know we have an identifier, pass this off to the 525 // preprocessor, which may macro expand it or something. 526 return PP->HandleIdentifier(Result); 527 } 528 529 // Otherwise, $,\,? in identifier found. Enter slower path. 530 531 C = getCharAndSize(CurPtr, Size); 532 while (1) { 533 if (C == '$') { 534 // If we hit a $ and they are not supported in identifiers, we are done. 535 if (!Features.DollarIdents) goto FinishIdentifier; 536 537 // Otherwise, emit a diagnostic and continue. 538 if (!isLexingRawMode()) 539 Diag(CurPtr, diag::ext_dollar_in_identifier); 540 CurPtr = ConsumeChar(CurPtr, Size, Result); 541 C = getCharAndSize(CurPtr, Size); 542 continue; 543 } else if (!isIdentifierBody(C)) { // FIXME: UCNs. 544 // Found end of identifier. 545 goto FinishIdentifier; 546 } 547 548 // Otherwise, this character is good, consume it. 549 CurPtr = ConsumeChar(CurPtr, Size, Result); 550 551 C = getCharAndSize(CurPtr, Size); 552 while (isIdentifierBody(C)) { // FIXME: UCNs. 553 CurPtr = ConsumeChar(CurPtr, Size, Result); 554 C = getCharAndSize(CurPtr, Size); 555 } 556 } 557} 558 559 560/// LexNumericConstant - Lex the remainder of a integer or floating point 561/// constant. From[-1] is the first character lexed. Return the end of the 562/// constant. 563void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) { 564 unsigned Size; 565 char C = getCharAndSize(CurPtr, Size); 566 char PrevCh = 0; 567 while (isNumberBody(C)) { // FIXME: UCNs? 568 CurPtr = ConsumeChar(CurPtr, Size, Result); 569 PrevCh = C; 570 C = getCharAndSize(CurPtr, Size); 571 } 572 573 // If we fell out, check for a sign, due to 1e+12. If we have one, continue. 574 if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) 575 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 576 577 // If we have a hex FP constant, continue. 578 if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p') && 579 (Features.HexFloats || !Features.NoExtensions)) 580 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 581 582 // Update the location of token as well as BufferPtr. 583 FormTokenWithChars(Result, CurPtr, tok::numeric_constant); 584} 585 586/// LexStringLiteral - Lex the remainder of a string literal, after having lexed 587/// either " or L". 588void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide) { 589 const char *NulCharacter = 0; // Does this string contain the \0 character? 590 591 char C = getAndAdvanceChar(CurPtr, Result); 592 while (C != '"') { 593 // Skip escaped characters. 594 if (C == '\\') { 595 // Skip the escaped character. 596 C = getAndAdvanceChar(CurPtr, Result); 597 } else if (C == '\n' || C == '\r' || // Newline. 598 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 599 if (!isLexingRawMode()) 600 Diag(BufferPtr, diag::err_unterminated_string); 601 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 602 return; 603 } else if (C == 0) { 604 NulCharacter = CurPtr-1; 605 } 606 C = getAndAdvanceChar(CurPtr, Result); 607 } 608 609 // If a nul character existed in the string, warn about it. 610 if (NulCharacter && !isLexingRawMode()) 611 Diag(NulCharacter, diag::null_in_string); 612 613 // Update the location of the token as well as the BufferPtr instance var. 614 FormTokenWithChars(Result, CurPtr, 615 Wide ? tok::wide_string_literal : tok::string_literal); 616} 617 618/// LexAngledStringLiteral - Lex the remainder of an angled string literal, 619/// after having lexed the '<' character. This is used for #include filenames. 620void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) { 621 const char *NulCharacter = 0; // Does this string contain the \0 character? 622 623 char C = getAndAdvanceChar(CurPtr, Result); 624 while (C != '>') { 625 // Skip escaped characters. 626 if (C == '\\') { 627 // Skip the escaped character. 628 C = getAndAdvanceChar(CurPtr, Result); 629 } else if (C == '\n' || C == '\r' || // Newline. 630 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 631 if (!isLexingRawMode()) 632 Diag(BufferPtr, diag::err_unterminated_string); 633 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 634 return; 635 } else if (C == 0) { 636 NulCharacter = CurPtr-1; 637 } 638 C = getAndAdvanceChar(CurPtr, Result); 639 } 640 641 // If a nul character existed in the string, warn about it. 642 if (NulCharacter && !isLexingRawMode()) 643 Diag(NulCharacter, diag::null_in_string); 644 645 // Update the location of token as well as BufferPtr. 646 FormTokenWithChars(Result, CurPtr, tok::angle_string_literal); 647} 648 649 650/// LexCharConstant - Lex the remainder of a character constant, after having 651/// lexed either ' or L'. 652void Lexer::LexCharConstant(Token &Result, const char *CurPtr) { 653 const char *NulCharacter = 0; // Does this character contain the \0 character? 654 655 // Handle the common case of 'x' and '\y' efficiently. 656 char C = getAndAdvanceChar(CurPtr, Result); 657 if (C == '\'') { 658 if (!isLexingRawMode()) 659 Diag(BufferPtr, diag::err_empty_character); 660 FormTokenWithChars(Result, CurPtr, tok::unknown); 661 return; 662 } else if (C == '\\') { 663 // Skip the escaped character. 664 // FIXME: UCN's. 665 C = getAndAdvanceChar(CurPtr, Result); 666 } 667 668 if (C && C != '\n' && C != '\r' && CurPtr[0] == '\'') { 669 ++CurPtr; 670 } else { 671 // Fall back on generic code for embedded nulls, newlines, wide chars. 672 do { 673 // Skip escaped characters. 674 if (C == '\\') { 675 // Skip the escaped character. 676 C = getAndAdvanceChar(CurPtr, Result); 677 } else if (C == '\n' || C == '\r' || // Newline. 678 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 679 if (!isLexingRawMode()) 680 Diag(BufferPtr, diag::err_unterminated_char); 681 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 682 return; 683 } else if (C == 0) { 684 NulCharacter = CurPtr-1; 685 } 686 C = getAndAdvanceChar(CurPtr, Result); 687 } while (C != '\''); 688 } 689 690 if (NulCharacter && !isLexingRawMode()) 691 Diag(NulCharacter, diag::null_in_char); 692 693 // Update the location of token as well as BufferPtr. 694 FormTokenWithChars(Result, CurPtr, tok::char_constant); 695} 696 697/// SkipWhitespace - Efficiently skip over a series of whitespace characters. 698/// Update BufferPtr to point to the next non-whitespace character and return. 699/// 700/// This method forms a token and returns true if KeepWhitespaceMode is enabled. 701/// 702bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr) { 703 // Whitespace - Skip it, then return the token after the whitespace. 704 unsigned char Char = *CurPtr; // Skip consequtive spaces efficiently. 705 while (1) { 706 // Skip horizontal whitespace very aggressively. 707 while (isHorizontalWhitespace(Char)) 708 Char = *++CurPtr; 709 710 // Otherwise if we have something other than whitespace, we're done. 711 if (Char != '\n' && Char != '\r') 712 break; 713 714 if (ParsingPreprocessorDirective) { 715 // End of preprocessor directive line, let LexTokenInternal handle this. 716 BufferPtr = CurPtr; 717 return false; 718 } 719 720 // ok, but handle newline. 721 // The returned token is at the start of the line. 722 Result.setFlag(Token::StartOfLine); 723 // No leading whitespace seen so far. 724 Result.clearFlag(Token::LeadingSpace); 725 Char = *++CurPtr; 726 } 727 728 // If this isn't immediately after a newline, there is leading space. 729 char PrevChar = CurPtr[-1]; 730 if (PrevChar != '\n' && PrevChar != '\r') 731 Result.setFlag(Token::LeadingSpace); 732 733 // If the client wants us to return whitespace, return it now. 734 if (isKeepWhitespaceMode()) { 735 FormTokenWithChars(Result, CurPtr, tok::unknown); 736 return true; 737 } 738 739 BufferPtr = CurPtr; 740 return false; 741} 742 743// SkipBCPLComment - We have just read the // characters from input. Skip until 744// we find the newline character thats terminate the comment. Then update 745/// BufferPtr and return. If we're in KeepCommentMode, this will form the token 746/// and return true. 747bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) { 748 // If BCPL comments aren't explicitly enabled for this language, emit an 749 // extension warning. 750 if (!Features.BCPLComment && !isLexingRawMode()) { 751 Diag(BufferPtr, diag::ext_bcpl_comment); 752 753 // Mark them enabled so we only emit one warning for this translation 754 // unit. 755 Features.BCPLComment = true; 756 } 757 758 // Scan over the body of the comment. The common case, when scanning, is that 759 // the comment contains normal ascii characters with nothing interesting in 760 // them. As such, optimize for this case with the inner loop. 761 char C; 762 do { 763 C = *CurPtr; 764 // FIXME: Speedup BCPL comment lexing. Just scan for a \n or \r character. 765 // If we find a \n character, scan backwards, checking to see if it's an 766 // escaped newline, like we do for block comments. 767 768 // Skip over characters in the fast loop. 769 while (C != 0 && // Potentially EOF. 770 C != '\\' && // Potentially escaped newline. 771 C != '?' && // Potentially trigraph. 772 C != '\n' && C != '\r') // Newline or DOS-style newline. 773 C = *++CurPtr; 774 775 // If this is a newline, we're done. 776 if (C == '\n' || C == '\r') 777 break; // Found the newline? Break out! 778 779 // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to 780 // properly decode the character. Read it in raw mode to avoid emitting 781 // diagnostics about things like trigraphs. If we see an escaped newline, 782 // we'll handle it below. 783 const char *OldPtr = CurPtr; 784 bool OldRawMode = isLexingRawMode(); 785 LexingRawMode = true; 786 C = getAndAdvanceChar(CurPtr, Result); 787 LexingRawMode = OldRawMode; 788 789 // If we read multiple characters, and one of those characters was a \r or 790 // \n, then we had an escaped newline within the comment. Emit diagnostic 791 // unless the next line is also a // comment. 792 if (CurPtr != OldPtr+1 && C != '/' && CurPtr[0] != '/') { 793 for (; OldPtr != CurPtr; ++OldPtr) 794 if (OldPtr[0] == '\n' || OldPtr[0] == '\r') { 795 // Okay, we found a // comment that ends in a newline, if the next 796 // line is also a // comment, but has spaces, don't emit a diagnostic. 797 if (isspace(C)) { 798 const char *ForwardPtr = CurPtr; 799 while (isspace(*ForwardPtr)) // Skip whitespace. 800 ++ForwardPtr; 801 if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/') 802 break; 803 } 804 805 if (!isLexingRawMode()) 806 Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment); 807 break; 808 } 809 } 810 811 if (CurPtr == BufferEnd+1) { --CurPtr; break; } 812 } while (C != '\n' && C != '\r'); 813 814 // Found but did not consume the newline. 815 816 // If we are returning comments as tokens, return this comment as a token. 817 if (inKeepCommentMode()) 818 return SaveBCPLComment(Result, CurPtr); 819 820 // If we are inside a preprocessor directive and we see the end of line, 821 // return immediately, so that the lexer can return this as an EOM token. 822 if (ParsingPreprocessorDirective || CurPtr == BufferEnd) { 823 BufferPtr = CurPtr; 824 return false; 825 } 826 827 // Otherwise, eat the \n character. We don't care if this is a \n\r or 828 // \r\n sequence. This is an efficiency hack (because we know the \n can't 829 // contribute to another token), it isn't needed for correctness. Note that 830 // this is ok even in KeepWhitespaceMode, because we would have returned the 831 /// comment above in that mode. 832 ++CurPtr; 833 834 // The next returned token is at the start of the line. 835 Result.setFlag(Token::StartOfLine); 836 // No leading whitespace seen so far. 837 Result.clearFlag(Token::LeadingSpace); 838 BufferPtr = CurPtr; 839 return false; 840} 841 842/// SaveBCPLComment - If in save-comment mode, package up this BCPL comment in 843/// an appropriate way and return it. 844bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) { 845 // If we're not in a preprocessor directive, just return the // comment 846 // directly. 847 FormTokenWithChars(Result, CurPtr, tok::comment); 848 849 if (!ParsingPreprocessorDirective) 850 return true; 851 852 // If this BCPL-style comment is in a macro definition, transmogrify it into 853 // a C-style block comment. 854 std::string Spelling = PP->getSpelling(Result); 855 assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not bcpl comment?"); 856 Spelling[1] = '*'; // Change prefix to "/*". 857 Spelling += "*/"; // add suffix. 858 859 Result.setKind(tok::comment); 860 Result.setLocation(PP->CreateString(&Spelling[0], Spelling.size(), 861 Result.getLocation())); 862 Result.setLength(Spelling.size()); 863 return true; 864} 865 866/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline 867/// character (either \n or \r) is part of an escaped newline sequence. Issue a 868/// diagnostic if so. We know that the newline is inside of a block comment. 869static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr, 870 Lexer *L) { 871 assert(CurPtr[0] == '\n' || CurPtr[0] == '\r'); 872 873 // Back up off the newline. 874 --CurPtr; 875 876 // If this is a two-character newline sequence, skip the other character. 877 if (CurPtr[0] == '\n' || CurPtr[0] == '\r') { 878 // \n\n or \r\r -> not escaped newline. 879 if (CurPtr[0] == CurPtr[1]) 880 return false; 881 // \n\r or \r\n -> skip the newline. 882 --CurPtr; 883 } 884 885 // If we have horizontal whitespace, skip over it. We allow whitespace 886 // between the slash and newline. 887 bool HasSpace = false; 888 while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) { 889 --CurPtr; 890 HasSpace = true; 891 } 892 893 // If we have a slash, we know this is an escaped newline. 894 if (*CurPtr == '\\') { 895 if (CurPtr[-1] != '*') return false; 896 } else { 897 // It isn't a slash, is it the ?? / trigraph? 898 if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' || 899 CurPtr[-3] != '*') 900 return false; 901 902 // This is the trigraph ending the comment. Emit a stern warning! 903 CurPtr -= 2; 904 905 // If no trigraphs are enabled, warn that we ignored this trigraph and 906 // ignore this * character. 907 if (!L->getFeatures().Trigraphs) { 908 if (!L->isLexingRawMode()) 909 L->Diag(CurPtr, diag::trigraph_ignored_block_comment); 910 return false; 911 } 912 if (!L->isLexingRawMode()) 913 L->Diag(CurPtr, diag::trigraph_ends_block_comment); 914 } 915 916 // Warn about having an escaped newline between the */ characters. 917 if (!L->isLexingRawMode()) 918 L->Diag(CurPtr, diag::escaped_newline_block_comment_end); 919 920 // If there was space between the backslash and newline, warn about it. 921 if (HasSpace && !L->isLexingRawMode()) 922 L->Diag(CurPtr, diag::backslash_newline_space); 923 924 return true; 925} 926 927#ifdef __SSE2__ 928#include <emmintrin.h> 929#elif __ALTIVEC__ 930#include <altivec.h> 931#undef bool 932#endif 933 934/// SkipBlockComment - We have just read the /* characters from input. Read 935/// until we find the */ characters that terminate the comment. Note that we 936/// don't bother decoding trigraphs or escaped newlines in block comments, 937/// because they cannot cause the comment to end. The only thing that can 938/// happen is the comment could end with an escaped newline between the */ end 939/// of comment. 940/// 941/// If KeepCommentMode is enabled, this forms a token from the comment and 942/// returns true. 943bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) { 944 // Scan one character past where we should, looking for a '/' character. Once 945 // we find it, check to see if it was preceeded by a *. This common 946 // optimization helps people who like to put a lot of * characters in their 947 // comments. 948 949 // The first character we get with newlines and trigraphs skipped to handle 950 // the degenerate /*/ case below correctly if the * has an escaped newline 951 // after it. 952 unsigned CharSize; 953 unsigned char C = getCharAndSize(CurPtr, CharSize); 954 CurPtr += CharSize; 955 if (C == 0 && CurPtr == BufferEnd+1) { 956 if (!isLexingRawMode()) 957 Diag(BufferPtr, diag::err_unterminated_block_comment); 958 --CurPtr; 959 960 // KeepWhitespaceMode should return this broken comment as a token. Since 961 // it isn't a well formed comment, just return it as an 'unknown' token. 962 if (isKeepWhitespaceMode()) { 963 FormTokenWithChars(Result, CurPtr, tok::unknown); 964 return true; 965 } 966 967 BufferPtr = CurPtr; 968 return false; 969 } 970 971 // Check to see if the first character after the '/*' is another /. If so, 972 // then this slash does not end the block comment, it is part of it. 973 if (C == '/') 974 C = *CurPtr++; 975 976 while (1) { 977 // Skip over all non-interesting characters until we find end of buffer or a 978 // (probably ending) '/' character. 979 if (CurPtr + 24 < BufferEnd) { 980 // While not aligned to a 16-byte boundary. 981 while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0) 982 C = *CurPtr++; 983 984 if (C == '/') goto FoundSlash; 985 986#ifdef __SSE2__ 987 __m128i Slashes = _mm_set_epi8('/', '/', '/', '/', '/', '/', '/', '/', 988 '/', '/', '/', '/', '/', '/', '/', '/'); 989 while (CurPtr+16 <= BufferEnd && 990 _mm_movemask_epi8(_mm_cmpeq_epi8(*(__m128i*)CurPtr, Slashes)) == 0) 991 CurPtr += 16; 992#elif __ALTIVEC__ 993 __vector unsigned char Slashes = { 994 '/', '/', '/', '/', '/', '/', '/', '/', 995 '/', '/', '/', '/', '/', '/', '/', '/' 996 }; 997 while (CurPtr+16 <= BufferEnd && 998 !vec_any_eq(*(vector unsigned char*)CurPtr, Slashes)) 999 CurPtr += 16; 1000#else 1001 // Scan for '/' quickly. Many block comments are very large. 1002 while (CurPtr[0] != '/' && 1003 CurPtr[1] != '/' && 1004 CurPtr[2] != '/' && 1005 CurPtr[3] != '/' && 1006 CurPtr+4 < BufferEnd) { 1007 CurPtr += 4; 1008 } 1009#endif 1010 1011 // It has to be one of the bytes scanned, increment to it and read one. 1012 C = *CurPtr++; 1013 } 1014 1015 // Loop to scan the remainder. 1016 while (C != '/' && C != '\0') 1017 C = *CurPtr++; 1018 1019 FoundSlash: 1020 if (C == '/') { 1021 if (CurPtr[-2] == '*') // We found the final */. We're done! 1022 break; 1023 1024 if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) { 1025 if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) { 1026 // We found the final */, though it had an escaped newline between the 1027 // * and /. We're done! 1028 break; 1029 } 1030 } 1031 if (CurPtr[0] == '*' && CurPtr[1] != '/') { 1032 // If this is a /* inside of the comment, emit a warning. Don't do this 1033 // if this is a /*/, which will end the comment. This misses cases with 1034 // embedded escaped newlines, but oh well. 1035 if (!isLexingRawMode()) 1036 Diag(CurPtr-1, diag::warn_nested_block_comment); 1037 } 1038 } else if (C == 0 && CurPtr == BufferEnd+1) { 1039 if (!isLexingRawMode()) 1040 Diag(BufferPtr, diag::err_unterminated_block_comment); 1041 // Note: the user probably forgot a */. We could continue immediately 1042 // after the /*, but this would involve lexing a lot of what really is the 1043 // comment, which surely would confuse the parser. 1044 --CurPtr; 1045 1046 // KeepWhitespaceMode should return this broken comment as a token. Since 1047 // it isn't a well formed comment, just return it as an 'unknown' token. 1048 if (isKeepWhitespaceMode()) { 1049 FormTokenWithChars(Result, CurPtr, tok::unknown); 1050 return true; 1051 } 1052 1053 BufferPtr = CurPtr; 1054 return false; 1055 } 1056 C = *CurPtr++; 1057 } 1058 1059 // If we are returning comments as tokens, return this comment as a token. 1060 if (inKeepCommentMode()) { 1061 FormTokenWithChars(Result, CurPtr, tok::comment); 1062 return true; 1063 } 1064 1065 // It is common for the tokens immediately after a /**/ comment to be 1066 // whitespace. Instead of going through the big switch, handle it 1067 // efficiently now. This is safe even in KeepWhitespaceMode because we would 1068 // have already returned above with the comment as a token. 1069 if (isHorizontalWhitespace(*CurPtr)) { 1070 Result.setFlag(Token::LeadingSpace); 1071 SkipWhitespace(Result, CurPtr+1); 1072 return false; 1073 } 1074 1075 // Otherwise, just return so that the next character will be lexed as a token. 1076 BufferPtr = CurPtr; 1077 Result.setFlag(Token::LeadingSpace); 1078 return false; 1079} 1080 1081//===----------------------------------------------------------------------===// 1082// Primary Lexing Entry Points 1083//===----------------------------------------------------------------------===// 1084 1085/// ReadToEndOfLine - Read the rest of the current preprocessor line as an 1086/// uninterpreted string. This switches the lexer out of directive mode. 1087std::string Lexer::ReadToEndOfLine() { 1088 assert(ParsingPreprocessorDirective && ParsingFilename == false && 1089 "Must be in a preprocessing directive!"); 1090 std::string Result; 1091 Token Tmp; 1092 1093 // CurPtr - Cache BufferPtr in an automatic variable. 1094 const char *CurPtr = BufferPtr; 1095 while (1) { 1096 char Char = getAndAdvanceChar(CurPtr, Tmp); 1097 switch (Char) { 1098 default: 1099 Result += Char; 1100 break; 1101 case 0: // Null. 1102 // Found end of file? 1103 if (CurPtr-1 != BufferEnd) { 1104 // Nope, normal character, continue. 1105 Result += Char; 1106 break; 1107 } 1108 // FALL THROUGH. 1109 case '\r': 1110 case '\n': 1111 // Okay, we found the end of the line. First, back up past the \0, \r, \n. 1112 assert(CurPtr[-1] == Char && "Trigraphs for newline?"); 1113 BufferPtr = CurPtr-1; 1114 1115 // Next, lex the character, which should handle the EOM transition. 1116 Lex(Tmp); 1117 assert(Tmp.is(tok::eom) && "Unexpected token!"); 1118 1119 // Finally, we're done, return the string we found. 1120 return Result; 1121 } 1122 } 1123} 1124 1125/// LexEndOfFile - CurPtr points to the end of this file. Handle this 1126/// condition, reporting diagnostics and handling other edge cases as required. 1127/// This returns true if Result contains a token, false if PP.Lex should be 1128/// called again. 1129bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) { 1130 // If we hit the end of the file while parsing a preprocessor directive, 1131 // end the preprocessor directive first. The next token returned will 1132 // then be the end of file. 1133 if (ParsingPreprocessorDirective) { 1134 // Done parsing the "line". 1135 ParsingPreprocessorDirective = false; 1136 // Update the location of token as well as BufferPtr. 1137 FormTokenWithChars(Result, CurPtr, tok::eom); 1138 1139 // Restore comment saving mode, in case it was disabled for directive. 1140 SetCommentRetentionState(PP->getCommentRetentionState()); 1141 return true; // Have a token. 1142 } 1143 1144 // If we are in raw mode, return this event as an EOF token. Let the caller 1145 // that put us in raw mode handle the event. 1146 if (isLexingRawMode()) { 1147 Result.startToken(); 1148 BufferPtr = BufferEnd; 1149 FormTokenWithChars(Result, BufferEnd, tok::eof); 1150 return true; 1151 } 1152 1153 // Otherwise, issue diagnostics for unterminated #if and missing newline. 1154 1155 // If we are in a #if directive, emit an error. 1156 while (!ConditionalStack.empty()) { 1157 PP->Diag(ConditionalStack.back().IfLoc, 1158 diag::err_pp_unterminated_conditional); 1159 ConditionalStack.pop_back(); 1160 } 1161 1162 // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue 1163 // a pedwarn. 1164 if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) 1165 Diag(BufferEnd, diag::ext_no_newline_eof); 1166 1167 BufferPtr = CurPtr; 1168 1169 // Finally, let the preprocessor handle this. 1170 return PP->HandleEndOfFile(Result); 1171} 1172 1173/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from 1174/// the specified lexer will return a tok::l_paren token, 0 if it is something 1175/// else and 2 if there are no more tokens in the buffer controlled by the 1176/// lexer. 1177unsigned Lexer::isNextPPTokenLParen() { 1178 assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?"); 1179 1180 // Switch to 'skipping' mode. This will ensure that we can lex a token 1181 // without emitting diagnostics, disables macro expansion, and will cause EOF 1182 // to return an EOF token instead of popping the include stack. 1183 LexingRawMode = true; 1184 1185 // Save state that can be changed while lexing so that we can restore it. 1186 const char *TmpBufferPtr = BufferPtr; 1187 1188 Token Tok; 1189 Tok.startToken(); 1190 LexTokenInternal(Tok); 1191 1192 // Restore state that may have changed. 1193 BufferPtr = TmpBufferPtr; 1194 1195 // Restore the lexer back to non-skipping mode. 1196 LexingRawMode = false; 1197 1198 if (Tok.is(tok::eof)) 1199 return 2; 1200 return Tok.is(tok::l_paren); 1201} 1202 1203 1204/// LexTokenInternal - This implements a simple C family lexer. It is an 1205/// extremely performance critical piece of code. This assumes that the buffer 1206/// has a null character at the end of the file. Return true if an error 1207/// occurred and compilation should terminate, false if normal. This returns a 1208/// preprocessing token, not a normal token, as such, it is an internal 1209/// interface. It assumes that the Flags of result have been cleared before 1210/// calling this. 1211void Lexer::LexTokenInternal(Token &Result) { 1212LexNextToken: 1213 // New token, can't need cleaning yet. 1214 Result.clearFlag(Token::NeedsCleaning); 1215 Result.setIdentifierInfo(0); 1216 1217 // CurPtr - Cache BufferPtr in an automatic variable. 1218 const char *CurPtr = BufferPtr; 1219 1220 // Small amounts of horizontal whitespace is very common between tokens. 1221 if ((*CurPtr == ' ') || (*CurPtr == '\t')) { 1222 ++CurPtr; 1223 while ((*CurPtr == ' ') || (*CurPtr == '\t')) 1224 ++CurPtr; 1225 1226 // If we are keeping whitespace and other tokens, just return what we just 1227 // skipped. The next lexer invocation will return the token after the 1228 // whitespace. 1229 if (isKeepWhitespaceMode()) { 1230 FormTokenWithChars(Result, CurPtr, tok::unknown); 1231 return; 1232 } 1233 1234 BufferPtr = CurPtr; 1235 Result.setFlag(Token::LeadingSpace); 1236 } 1237 1238 unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below. 1239 1240 // Read a character, advancing over it. 1241 char Char = getAndAdvanceChar(CurPtr, Result); 1242 tok::TokenKind Kind; 1243 1244 switch (Char) { 1245 case 0: // Null. 1246 // Found end of file? 1247 if (CurPtr-1 == BufferEnd) { 1248 // Read the PP instance variable into an automatic variable, because 1249 // LexEndOfFile will often delete 'this'. 1250 Preprocessor *PPCache = PP; 1251 if (LexEndOfFile(Result, CurPtr-1)) // Retreat back into the file. 1252 return; // Got a token to return. 1253 assert(PPCache && "Raw buffer::LexEndOfFile should return a token"); 1254 return PPCache->Lex(Result); 1255 } 1256 1257 if (!isLexingRawMode()) 1258 Diag(CurPtr-1, diag::null_in_file); 1259 Result.setFlag(Token::LeadingSpace); 1260 if (SkipWhitespace(Result, CurPtr)) 1261 return; // KeepWhitespaceMode 1262 1263 goto LexNextToken; // GCC isn't tail call eliminating. 1264 case '\n': 1265 case '\r': 1266 // If we are inside a preprocessor directive and we see the end of line, 1267 // we know we are done with the directive, so return an EOM token. 1268 if (ParsingPreprocessorDirective) { 1269 // Done parsing the "line". 1270 ParsingPreprocessorDirective = false; 1271 1272 // Restore comment saving mode, in case it was disabled for directive. 1273 SetCommentRetentionState(PP->getCommentRetentionState()); 1274 1275 // Since we consumed a newline, we are back at the start of a line. 1276 IsAtStartOfLine = true; 1277 1278 Kind = tok::eom; 1279 break; 1280 } 1281 // The returned token is at the start of the line. 1282 Result.setFlag(Token::StartOfLine); 1283 // No leading whitespace seen so far. 1284 Result.clearFlag(Token::LeadingSpace); 1285 1286 if (SkipWhitespace(Result, CurPtr)) 1287 return; // KeepWhitespaceMode 1288 goto LexNextToken; // GCC isn't tail call eliminating. 1289 case ' ': 1290 case '\t': 1291 case '\f': 1292 case '\v': 1293 SkipHorizontalWhitespace: 1294 Result.setFlag(Token::LeadingSpace); 1295 if (SkipWhitespace(Result, CurPtr)) 1296 return; // KeepWhitespaceMode 1297 1298 SkipIgnoredUnits: 1299 CurPtr = BufferPtr; 1300 1301 // If the next token is obviously a // or /* */ comment, skip it efficiently 1302 // too (without going through the big switch stmt). 1303 if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode()) { 1304 SkipBCPLComment(Result, CurPtr+2); 1305 goto SkipIgnoredUnits; 1306 } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) { 1307 SkipBlockComment(Result, CurPtr+2); 1308 goto SkipIgnoredUnits; 1309 } else if (isHorizontalWhitespace(*CurPtr)) { 1310 goto SkipHorizontalWhitespace; 1311 } 1312 goto LexNextToken; // GCC isn't tail call eliminating. 1313 1314 // C99 6.4.4.1: Integer Constants. 1315 // C99 6.4.4.2: Floating Constants. 1316 case '0': case '1': case '2': case '3': case '4': 1317 case '5': case '6': case '7': case '8': case '9': 1318 // Notify MIOpt that we read a non-whitespace/non-comment token. 1319 MIOpt.ReadToken(); 1320 return LexNumericConstant(Result, CurPtr); 1321 1322 case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz"). 1323 // Notify MIOpt that we read a non-whitespace/non-comment token. 1324 MIOpt.ReadToken(); 1325 Char = getCharAndSize(CurPtr, SizeTmp); 1326 1327 // Wide string literal. 1328 if (Char == '"') 1329 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 1330 true); 1331 1332 // Wide character constant. 1333 if (Char == '\'') 1334 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result)); 1335 // FALL THROUGH, treating L like the start of an identifier. 1336 1337 // C99 6.4.2: Identifiers. 1338 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': 1339 case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N': 1340 case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': 1341 case 'V': case 'W': case 'X': case 'Y': case 'Z': 1342 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': 1343 case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': 1344 case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': 1345 case 'v': case 'w': case 'x': case 'y': case 'z': 1346 case '_': 1347 // Notify MIOpt that we read a non-whitespace/non-comment token. 1348 MIOpt.ReadToken(); 1349 return LexIdentifier(Result, CurPtr); 1350 1351 case '$': // $ in identifiers. 1352 if (Features.DollarIdents) { 1353 if (!isLexingRawMode()) 1354 Diag(CurPtr-1, diag::ext_dollar_in_identifier); 1355 // Notify MIOpt that we read a non-whitespace/non-comment token. 1356 MIOpt.ReadToken(); 1357 return LexIdentifier(Result, CurPtr); 1358 } 1359 1360 Kind = tok::unknown; 1361 break; 1362 1363 // C99 6.4.4: Character Constants. 1364 case '\'': 1365 // Notify MIOpt that we read a non-whitespace/non-comment token. 1366 MIOpt.ReadToken(); 1367 return LexCharConstant(Result, CurPtr); 1368 1369 // C99 6.4.5: String Literals. 1370 case '"': 1371 // Notify MIOpt that we read a non-whitespace/non-comment token. 1372 MIOpt.ReadToken(); 1373 return LexStringLiteral(Result, CurPtr, false); 1374 1375 // C99 6.4.6: Punctuators. 1376 case '?': 1377 Kind = tok::question; 1378 break; 1379 case '[': 1380 Kind = tok::l_square; 1381 break; 1382 case ']': 1383 Kind = tok::r_square; 1384 break; 1385 case '(': 1386 Kind = tok::l_paren; 1387 break; 1388 case ')': 1389 Kind = tok::r_paren; 1390 break; 1391 case '{': 1392 Kind = tok::l_brace; 1393 break; 1394 case '}': 1395 Kind = tok::r_brace; 1396 break; 1397 case '.': 1398 Char = getCharAndSize(CurPtr, SizeTmp); 1399 if (Char >= '0' && Char <= '9') { 1400 // Notify MIOpt that we read a non-whitespace/non-comment token. 1401 MIOpt.ReadToken(); 1402 1403 return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result)); 1404 } else if (Features.CPlusPlus && Char == '*') { 1405 Kind = tok::periodstar; 1406 CurPtr += SizeTmp; 1407 } else if (Char == '.' && 1408 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') { 1409 Kind = tok::ellipsis; 1410 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1411 SizeTmp2, Result); 1412 } else { 1413 Kind = tok::period; 1414 } 1415 break; 1416 case '&': 1417 Char = getCharAndSize(CurPtr, SizeTmp); 1418 if (Char == '&') { 1419 Kind = tok::ampamp; 1420 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1421 } else if (Char == '=') { 1422 Kind = tok::ampequal; 1423 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1424 } else { 1425 Kind = tok::amp; 1426 } 1427 break; 1428 case '*': 1429 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 1430 Kind = tok::starequal; 1431 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1432 } else { 1433 Kind = tok::star; 1434 } 1435 break; 1436 case '+': 1437 Char = getCharAndSize(CurPtr, SizeTmp); 1438 if (Char == '+') { 1439 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1440 Kind = tok::plusplus; 1441 } else if (Char == '=') { 1442 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1443 Kind = tok::plusequal; 1444 } else { 1445 Kind = tok::plus; 1446 } 1447 break; 1448 case '-': 1449 Char = getCharAndSize(CurPtr, SizeTmp); 1450 if (Char == '-') { // -- 1451 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1452 Kind = tok::minusminus; 1453 } else if (Char == '>' && Features.CPlusPlus && 1454 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { // C++ ->* 1455 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1456 SizeTmp2, Result); 1457 Kind = tok::arrowstar; 1458 } else if (Char == '>') { // -> 1459 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1460 Kind = tok::arrow; 1461 } else if (Char == '=') { // -= 1462 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1463 Kind = tok::minusequal; 1464 } else { 1465 Kind = tok::minus; 1466 } 1467 break; 1468 case '~': 1469 Kind = tok::tilde; 1470 break; 1471 case '!': 1472 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 1473 Kind = tok::exclaimequal; 1474 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1475 } else { 1476 Kind = tok::exclaim; 1477 } 1478 break; 1479 case '/': 1480 // 6.4.9: Comments 1481 Char = getCharAndSize(CurPtr, SizeTmp); 1482 if (Char == '/') { // BCPL comment. 1483 if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result))) 1484 return; // KeepCommentMode 1485 1486 // It is common for the tokens immediately after a // comment to be 1487 // whitespace (indentation for the next line). Instead of going through 1488 // the big switch, handle it efficiently now. 1489 goto SkipIgnoredUnits; 1490 } else if (Char == '*') { // /**/ comment. 1491 if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result))) 1492 return; // KeepCommentMode 1493 goto LexNextToken; // GCC isn't tail call eliminating. 1494 } else if (Char == '=') { 1495 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1496 Kind = tok::slashequal; 1497 } else { 1498 Kind = tok::slash; 1499 } 1500 break; 1501 case '%': 1502 Char = getCharAndSize(CurPtr, SizeTmp); 1503 if (Char == '=') { 1504 Kind = tok::percentequal; 1505 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1506 } else if (Features.Digraphs && Char == '>') { 1507 Kind = tok::r_brace; // '%>' -> '}' 1508 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1509 } else if (Features.Digraphs && Char == ':') { 1510 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1511 Char = getCharAndSize(CurPtr, SizeTmp); 1512 if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') { 1513 Kind = tok::hashhash; // '%:%:' -> '##' 1514 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1515 SizeTmp2, Result); 1516 } else if (Char == '@' && Features.Microsoft) { // %:@ -> #@ -> Charize 1517 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1518 if (!isLexingRawMode()) 1519 Diag(BufferPtr, diag::charize_microsoft_ext); 1520 Kind = tok::hashat; 1521 } else { 1522 Kind = tok::hash; // '%:' -> '#' 1523 1524 // We parsed a # character. If this occurs at the start of the line, 1525 // it's actually the start of a preprocessing directive. Callback to 1526 // the preprocessor to handle it. 1527 // FIXME: -fpreprocessed mode?? 1528 if (Result.isAtStartOfLine() && !LexingRawMode) { 1529 BufferPtr = CurPtr; 1530 PP->HandleDirective(Result); 1531 1532 // As an optimization, if the preprocessor didn't switch lexers, tail 1533 // recurse. 1534 if (PP->isCurrentLexer(this)) { 1535 // Start a new token. If this is a #include or something, the PP may 1536 // want us starting at the beginning of the line again. If so, set 1537 // the StartOfLine flag. 1538 if (IsAtStartOfLine) { 1539 Result.setFlag(Token::StartOfLine); 1540 IsAtStartOfLine = false; 1541 } 1542 goto LexNextToken; // GCC isn't tail call eliminating. 1543 } 1544 1545 return PP->Lex(Result); 1546 } 1547 } 1548 } else { 1549 Kind = tok::percent; 1550 } 1551 break; 1552 case '<': 1553 Char = getCharAndSize(CurPtr, SizeTmp); 1554 if (ParsingFilename) { 1555 return LexAngledStringLiteral(Result, CurPtr+SizeTmp); 1556 } else if (Char == '<' && 1557 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') { 1558 Kind = tok::lesslessequal; 1559 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1560 SizeTmp2, Result); 1561 } else if (Char == '<') { 1562 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1563 Kind = tok::lessless; 1564 } else if (Char == '=') { 1565 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1566 Kind = tok::lessequal; 1567 } else if (Features.Digraphs && Char == ':') { // '<:' -> '[' 1568 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1569 Kind = tok::l_square; 1570 } else if (Features.Digraphs && Char == '%') { // '<%' -> '{' 1571 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1572 Kind = tok::l_brace; 1573 } else { 1574 Kind = tok::less; 1575 } 1576 break; 1577 case '>': 1578 Char = getCharAndSize(CurPtr, SizeTmp); 1579 if (Char == '=') { 1580 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1581 Kind = tok::greaterequal; 1582 } else if (Char == '>' && 1583 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') { 1584 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1585 SizeTmp2, Result); 1586 Kind = tok::greatergreaterequal; 1587 } else if (Char == '>') { 1588 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1589 Kind = tok::greatergreater; 1590 } else { 1591 Kind = tok::greater; 1592 } 1593 break; 1594 case '^': 1595 Char = getCharAndSize(CurPtr, SizeTmp); 1596 if (Char == '=') { 1597 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1598 Kind = tok::caretequal; 1599 } else { 1600 Kind = tok::caret; 1601 } 1602 break; 1603 case '|': 1604 Char = getCharAndSize(CurPtr, SizeTmp); 1605 if (Char == '=') { 1606 Kind = tok::pipeequal; 1607 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1608 } else if (Char == '|') { 1609 Kind = tok::pipepipe; 1610 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1611 } else { 1612 Kind = tok::pipe; 1613 } 1614 break; 1615 case ':': 1616 Char = getCharAndSize(CurPtr, SizeTmp); 1617 if (Features.Digraphs && Char == '>') { 1618 Kind = tok::r_square; // ':>' -> ']' 1619 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1620 } else if (Features.CPlusPlus && Char == ':') { 1621 Kind = tok::coloncolon; 1622 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1623 } else { 1624 Kind = tok::colon; 1625 } 1626 break; 1627 case ';': 1628 Kind = tok::semi; 1629 break; 1630 case '=': 1631 Char = getCharAndSize(CurPtr, SizeTmp); 1632 if (Char == '=') { 1633 Kind = tok::equalequal; 1634 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1635 } else { 1636 Kind = tok::equal; 1637 } 1638 break; 1639 case ',': 1640 Kind = tok::comma; 1641 break; 1642 case '#': 1643 Char = getCharAndSize(CurPtr, SizeTmp); 1644 if (Char == '#') { 1645 Kind = tok::hashhash; 1646 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1647 } else if (Char == '@' && Features.Microsoft) { // #@ -> Charize 1648 Kind = tok::hashat; 1649 if (!isLexingRawMode()) 1650 Diag(BufferPtr, diag::charize_microsoft_ext); 1651 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1652 } else { 1653 Kind = tok::hash; 1654 // We parsed a # character. If this occurs at the start of the line, 1655 // it's actually the start of a preprocessing directive. Callback to 1656 // the preprocessor to handle it. 1657 // FIXME: -fpreprocessed mode?? 1658 if (Result.isAtStartOfLine() && !LexingRawMode) { 1659 BufferPtr = CurPtr; 1660 PP->HandleDirective(Result); 1661 1662 // As an optimization, if the preprocessor didn't switch lexers, tail 1663 // recurse. 1664 if (PP->isCurrentLexer(this)) { 1665 // Start a new token. If this is a #include or something, the PP may 1666 // want us starting at the beginning of the line again. If so, set 1667 // the StartOfLine flag. 1668 if (IsAtStartOfLine) { 1669 Result.setFlag(Token::StartOfLine); 1670 IsAtStartOfLine = false; 1671 } 1672 goto LexNextToken; // GCC isn't tail call eliminating. 1673 } 1674 return PP->Lex(Result); 1675 } 1676 } 1677 break; 1678 1679 case '@': 1680 // Objective C support. 1681 if (CurPtr[-1] == '@' && Features.ObjC1) 1682 Kind = tok::at; 1683 else 1684 Kind = tok::unknown; 1685 break; 1686 1687 case '\\': 1688 // FIXME: UCN's. 1689 // FALL THROUGH. 1690 default: 1691 Kind = tok::unknown; 1692 break; 1693 } 1694 1695 // Notify MIOpt that we read a non-whitespace/non-comment token. 1696 MIOpt.ReadToken(); 1697 1698 // Update the location of token as well as BufferPtr. 1699 FormTokenWithChars(Result, CurPtr, Kind); 1700} 1701