Lexer.cpp revision 41938c8493b4380df738263166b746eacb33c309
1//===--- Lexer.cpp - C Language Family Lexer ------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the Lexer and Token interfaces. 11// 12//===----------------------------------------------------------------------===// 13// 14// TODO: GCC Diagnostics emitted by the lexer: 15// PEDWARN: (form feed|vertical tab) in preprocessing directive 16// 17// Universal characters, unicode, char mapping: 18// WARNING: `%.*s' is not in NFKC 19// WARNING: `%.*s' is not in NFC 20// 21// Other: 22// TODO: Options to support: 23// -fexec-charset,-fwide-exec-charset 24// 25//===----------------------------------------------------------------------===// 26 27#include "clang/Lex/Lexer.h" 28#include "clang/Lex/Preprocessor.h" 29#include "clang/Basic/Diagnostic.h" 30#include "clang/Basic/SourceManager.h" 31#include "llvm/Support/Compiler.h" 32#include "llvm/Support/MemoryBuffer.h" 33#include <cctype> 34using namespace clang; 35 36static void InitCharacterInfo(); 37 38//===----------------------------------------------------------------------===// 39// Token Class Implementation 40//===----------------------------------------------------------------------===// 41 42/// isObjCAtKeyword - Return true if we have an ObjC keyword identifier. 43bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const { 44 return is(tok::identifier) && 45 getIdentifierInfo()->getObjCKeywordID() == objcKey; 46} 47 48/// getObjCKeywordID - Return the ObjC keyword kind. 49tok::ObjCKeywordKind Token::getObjCKeywordID() const { 50 IdentifierInfo *specId = getIdentifierInfo(); 51 return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword; 52} 53 54 55//===----------------------------------------------------------------------===// 56// Lexer Class Implementation 57//===----------------------------------------------------------------------===// 58 59 60/// Lexer constructor - Create a new lexer object for the specified buffer 61/// with the specified preprocessor managing the lexing process. This lexer 62/// assumes that the associated file buffer and Preprocessor objects will 63/// outlive it, so it doesn't take ownership of either of them. 64Lexer::Lexer(SourceLocation fileloc, Preprocessor &pp, 65 const char *BufStart, const char *BufEnd) 66 : PreprocessorLexer(&pp, fileloc), FileLoc(fileloc), 67 Features(pp.getLangOptions()) { 68 69 SourceManager &SourceMgr = PP->getSourceManager(); 70 unsigned InputFileID = SourceMgr.getPhysicalLoc(FileLoc).getFileID(); 71 const llvm::MemoryBuffer *InputFile = SourceMgr.getBuffer(InputFileID); 72 73 Is_PragmaLexer = false; 74 InitCharacterInfo(); 75 76 // BufferStart must always be InputFile->getBufferStart(). 77 BufferStart = InputFile->getBufferStart(); 78 79 // BufferPtr and BufferEnd can start out somewhere inside the current buffer. 80 // If unspecified, they starts at the start/end of the buffer. 81 BufferPtr = BufStart ? BufStart : BufferStart; 82 BufferEnd = BufEnd ? BufEnd : InputFile->getBufferEnd(); 83 84 assert(BufferEnd[0] == 0 && 85 "We assume that the input buffer has a null character at the end" 86 " to simplify lexing!"); 87 88 // Start of the file is a start of line. 89 IsAtStartOfLine = true; 90 91 // We are not after parsing a #. 92 ParsingPreprocessorDirective = false; 93 94 // We are not after parsing #include. 95 ParsingFilename = false; 96 97 // We are not in raw mode. Raw mode disables diagnostics and interpretation 98 // of tokens (e.g. identifiers, thus disabling macro expansion). It is used 99 // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block 100 // or otherwise skipping over tokens. 101 LexingRawMode = false; 102 103 // Default to keeping comments if the preprocessor wants them. 104 ExtendedTokenMode = 0; 105 SetCommentRetentionState(PP->getCommentRetentionState()); 106} 107 108/// Lexer constructor - Create a new raw lexer object. This object is only 109/// suitable for calls to 'LexRawToken'. This lexer assumes that the text 110/// range will outlive it, so it doesn't take ownership of it. 111Lexer::Lexer(SourceLocation fileloc, const LangOptions &features, 112 const char *BufStart, const char *BufEnd, 113 const llvm::MemoryBuffer *FromFile) 114 : PreprocessorLexer(), FileLoc(fileloc), 115 Features(features) { 116 117 Is_PragmaLexer = false; 118 InitCharacterInfo(); 119 120 // If a MemoryBuffer was specified, use its start as BufferStart. This affects 121 // the source location objects produced by this lexer. 122 BufferStart = FromFile ? FromFile->getBufferStart() : BufStart; 123 BufferPtr = BufStart; 124 BufferEnd = BufEnd; 125 126 assert(BufferEnd[0] == 0 && 127 "We assume that the input buffer has a null character at the end" 128 " to simplify lexing!"); 129 130 // Start of the file is a start of line. 131 IsAtStartOfLine = true; 132 133 // We are not after parsing a #. 134 ParsingPreprocessorDirective = false; 135 136 // We are not after parsing #include. 137 ParsingFilename = false; 138 139 // We *are* in raw mode. 140 LexingRawMode = true; 141 142 // Default to not keeping comments in raw mode. 143 ExtendedTokenMode = 0; 144} 145 146 147/// Stringify - Convert the specified string into a C string, with surrounding 148/// ""'s, and with escaped \ and " characters. 149std::string Lexer::Stringify(const std::string &Str, bool Charify) { 150 std::string Result = Str; 151 char Quote = Charify ? '\'' : '"'; 152 for (unsigned i = 0, e = Result.size(); i != e; ++i) { 153 if (Result[i] == '\\' || Result[i] == Quote) { 154 Result.insert(Result.begin()+i, '\\'); 155 ++i; ++e; 156 } 157 } 158 return Result; 159} 160 161/// Stringify - Convert the specified string into a C string by escaping '\' 162/// and " characters. This does not add surrounding ""'s to the string. 163void Lexer::Stringify(llvm::SmallVectorImpl<char> &Str) { 164 for (unsigned i = 0, e = Str.size(); i != e; ++i) { 165 if (Str[i] == '\\' || Str[i] == '"') { 166 Str.insert(Str.begin()+i, '\\'); 167 ++i; ++e; 168 } 169 } 170} 171 172 173/// MeasureTokenLength - Relex the token at the specified location and return 174/// its length in bytes in the input file. If the token needs cleaning (e.g. 175/// includes a trigraph or an escaped newline) then this count includes bytes 176/// that are part of that. 177unsigned Lexer::MeasureTokenLength(SourceLocation Loc, 178 const SourceManager &SM) { 179 // If this comes from a macro expansion, we really do want the macro name, not 180 // the token this macro expanded to. 181 Loc = SM.getLogicalLoc(Loc); 182 183 const char *StrData = SM.getCharacterData(Loc); 184 185 // TODO: this could be special cased for common tokens like identifiers, ')', 186 // etc to make this faster, if it mattered. Just look at StrData[0] to handle 187 // all obviously single-char tokens. This could use 188 // Lexer::isObviouslySimpleCharacter for example to handle identifiers or 189 // something. 190 191 192 const char *BufEnd = SM.getBufferData(Loc.getFileID()).second; 193 194 // Create a langops struct and enable trigraphs. This is sufficient for 195 // measuring tokens. 196 LangOptions LangOpts; 197 LangOpts.Trigraphs = true; 198 199 // Create a lexer starting at the beginning of this token. 200 Lexer TheLexer(Loc, LangOpts, StrData, BufEnd); 201 Token TheTok; 202 TheLexer.LexFromRawLexer(TheTok); 203 return TheTok.getLength(); 204} 205 206//===----------------------------------------------------------------------===// 207// Character information. 208//===----------------------------------------------------------------------===// 209 210static unsigned char CharInfo[256]; 211 212enum { 213 CHAR_HORZ_WS = 0x01, // ' ', '\t', '\f', '\v'. Note, no '\0' 214 CHAR_VERT_WS = 0x02, // '\r', '\n' 215 CHAR_LETTER = 0x04, // a-z,A-Z 216 CHAR_NUMBER = 0x08, // 0-9 217 CHAR_UNDER = 0x10, // _ 218 CHAR_PERIOD = 0x20 // . 219}; 220 221static void InitCharacterInfo() { 222 static bool isInited = false; 223 if (isInited) return; 224 isInited = true; 225 226 // Intiialize the CharInfo table. 227 // TODO: statically initialize this. 228 CharInfo[(int)' '] = CharInfo[(int)'\t'] = 229 CharInfo[(int)'\f'] = CharInfo[(int)'\v'] = CHAR_HORZ_WS; 230 CharInfo[(int)'\n'] = CharInfo[(int)'\r'] = CHAR_VERT_WS; 231 232 CharInfo[(int)'_'] = CHAR_UNDER; 233 CharInfo[(int)'.'] = CHAR_PERIOD; 234 for (unsigned i = 'a'; i <= 'z'; ++i) 235 CharInfo[i] = CharInfo[i+'A'-'a'] = CHAR_LETTER; 236 for (unsigned i = '0'; i <= '9'; ++i) 237 CharInfo[i] = CHAR_NUMBER; 238} 239 240/// isIdentifierBody - Return true if this is the body character of an 241/// identifier, which is [a-zA-Z0-9_]. 242static inline bool isIdentifierBody(unsigned char c) { 243 return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER)) ? true : false; 244} 245 246/// isHorizontalWhitespace - Return true if this character is horizontal 247/// whitespace: ' ', '\t', '\f', '\v'. Note that this returns false for '\0'. 248static inline bool isHorizontalWhitespace(unsigned char c) { 249 return (CharInfo[c] & CHAR_HORZ_WS) ? true : false; 250} 251 252/// isWhitespace - Return true if this character is horizontal or vertical 253/// whitespace: ' ', '\t', '\f', '\v', '\n', '\r'. Note that this returns false 254/// for '\0'. 255static inline bool isWhitespace(unsigned char c) { 256 return (CharInfo[c] & (CHAR_HORZ_WS|CHAR_VERT_WS)) ? true : false; 257} 258 259/// isNumberBody - Return true if this is the body character of an 260/// preprocessing number, which is [a-zA-Z0-9_.]. 261static inline bool isNumberBody(unsigned char c) { 262 return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD)) ? 263 true : false; 264} 265 266 267//===----------------------------------------------------------------------===// 268// Diagnostics forwarding code. 269//===----------------------------------------------------------------------===// 270 271/// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the 272/// lexer buffer was all instantiated at a single point, perform the mapping. 273/// This is currently only used for _Pragma implementation, so it is the slow 274/// path of the hot getSourceLocation method. Do not allow it to be inlined. 275static SourceLocation GetMappedTokenLoc(Preprocessor &PP, 276 SourceLocation FileLoc, 277 unsigned CharNo) DISABLE_INLINE; 278static SourceLocation GetMappedTokenLoc(Preprocessor &PP, 279 SourceLocation FileLoc, 280 unsigned CharNo) { 281 // Otherwise, we're lexing "mapped tokens". This is used for things like 282 // _Pragma handling. Combine the instantiation location of FileLoc with the 283 // physical location. 284 SourceManager &SourceMgr = PP.getSourceManager(); 285 286 // Create a new SLoc which is expanded from logical(FileLoc) but whose 287 // characters come from phys(FileLoc)+Offset. 288 SourceLocation VirtLoc = SourceMgr.getLogicalLoc(FileLoc); 289 SourceLocation PhysLoc = SourceMgr.getPhysicalLoc(FileLoc); 290 PhysLoc = SourceLocation::getFileLoc(PhysLoc.getFileID(), CharNo); 291 return SourceMgr.getInstantiationLoc(PhysLoc, VirtLoc); 292} 293 294/// getSourceLocation - Return a source location identifier for the specified 295/// offset in the current file. 296SourceLocation Lexer::getSourceLocation(const char *Loc) const { 297 assert(Loc >= BufferStart && Loc <= BufferEnd && 298 "Location out of range for this buffer!"); 299 300 // In the normal case, we're just lexing from a simple file buffer, return 301 // the file id from FileLoc with the offset specified. 302 unsigned CharNo = Loc-BufferStart; 303 if (FileLoc.isFileID()) 304 return SourceLocation::getFileLoc(FileLoc.getFileID(), CharNo); 305 306 assert(PP && "This doesn't work on raw lexers"); 307 return GetMappedTokenLoc(*PP, FileLoc, CharNo); 308} 309 310/// Diag - Forwarding function for diagnostics. This translate a source 311/// position in the current buffer into a SourceLocation object for rendering. 312DiagnosticInfo Lexer::Diag(const char *Loc, unsigned DiagID) const { 313 if (LexingRawMode && Diagnostic::isBuiltinNoteWarningOrExtension(DiagID)) 314 return DiagnosticInfo(0, FullSourceLoc(), 0); 315 return PP->Diag(getSourceLocation(Loc), DiagID); 316} 317 318//===----------------------------------------------------------------------===// 319// Trigraph and Escaped Newline Handling Code. 320//===----------------------------------------------------------------------===// 321 322/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair, 323/// return the decoded trigraph letter it corresponds to, or '\0' if nothing. 324static char GetTrigraphCharForLetter(char Letter) { 325 switch (Letter) { 326 default: return 0; 327 case '=': return '#'; 328 case ')': return ']'; 329 case '(': return '['; 330 case '!': return '|'; 331 case '\'': return '^'; 332 case '>': return '}'; 333 case '/': return '\\'; 334 case '<': return '{'; 335 case '-': return '~'; 336 } 337} 338 339/// DecodeTrigraphChar - If the specified character is a legal trigraph when 340/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled, 341/// return the result character. Finally, emit a warning about trigraph use 342/// whether trigraphs are enabled or not. 343static char DecodeTrigraphChar(const char *CP, Lexer *L) { 344 char Res = GetTrigraphCharForLetter(*CP); 345 if (!Res || !L) return Res; 346 347 if (!L->getFeatures().Trigraphs) { 348 L->Diag(CP-2, diag::trigraph_ignored); 349 return 0; 350 } 351 352 L->Diag(CP-2, diag::trigraph_converted) << std::string()+Res; 353 return Res; 354} 355 356/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer, 357/// get its size, and return it. This is tricky in several cases: 358/// 1. If currently at the start of a trigraph, we warn about the trigraph, 359/// then either return the trigraph (skipping 3 chars) or the '?', 360/// depending on whether trigraphs are enabled or not. 361/// 2. If this is an escaped newline (potentially with whitespace between 362/// the backslash and newline), implicitly skip the newline and return 363/// the char after it. 364/// 3. If this is a UCN, return it. FIXME: C++ UCN's? 365/// 366/// This handles the slow/uncommon case of the getCharAndSize method. Here we 367/// know that we can accumulate into Size, and that we have already incremented 368/// Ptr by Size bytes. 369/// 370/// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should 371/// be updated to match. 372/// 373char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size, 374 Token *Tok) { 375 // If we have a slash, look for an escaped newline. 376 if (Ptr[0] == '\\') { 377 ++Size; 378 ++Ptr; 379Slash: 380 // Common case, backslash-char where the char is not whitespace. 381 if (!isWhitespace(Ptr[0])) return '\\'; 382 383 // See if we have optional whitespace characters followed by a newline. 384 { 385 unsigned SizeTmp = 0; 386 do { 387 ++SizeTmp; 388 if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') { 389 // Remember that this token needs to be cleaned. 390 if (Tok) Tok->setFlag(Token::NeedsCleaning); 391 392 // Warn if there was whitespace between the backslash and newline. 393 if (SizeTmp != 1 && Tok) 394 Diag(Ptr, diag::backslash_newline_space); 395 396 // If this is a \r\n or \n\r, skip the newlines. 397 if ((Ptr[SizeTmp] == '\r' || Ptr[SizeTmp] == '\n') && 398 Ptr[SizeTmp-1] != Ptr[SizeTmp]) 399 ++SizeTmp; 400 401 // Found backslash<whitespace><newline>. Parse the char after it. 402 Size += SizeTmp; 403 Ptr += SizeTmp; 404 // Use slow version to accumulate a correct size field. 405 return getCharAndSizeSlow(Ptr, Size, Tok); 406 } 407 } while (isWhitespace(Ptr[SizeTmp])); 408 } 409 410 // Otherwise, this is not an escaped newline, just return the slash. 411 return '\\'; 412 } 413 414 // If this is a trigraph, process it. 415 if (Ptr[0] == '?' && Ptr[1] == '?') { 416 // If this is actually a legal trigraph (not something like "??x"), emit 417 // a trigraph warning. If so, and if trigraphs are enabled, return it. 418 if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) { 419 // Remember that this token needs to be cleaned. 420 if (Tok) Tok->setFlag(Token::NeedsCleaning); 421 422 Ptr += 3; 423 Size += 3; 424 if (C == '\\') goto Slash; 425 return C; 426 } 427 } 428 429 // If this is neither, return a single character. 430 ++Size; 431 return *Ptr; 432} 433 434 435/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the 436/// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size, 437/// and that we have already incremented Ptr by Size bytes. 438/// 439/// NOTE: When this method is updated, getCharAndSizeSlow (above) should 440/// be updated to match. 441char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size, 442 const LangOptions &Features) { 443 // If we have a slash, look for an escaped newline. 444 if (Ptr[0] == '\\') { 445 ++Size; 446 ++Ptr; 447Slash: 448 // Common case, backslash-char where the char is not whitespace. 449 if (!isWhitespace(Ptr[0])) return '\\'; 450 451 // See if we have optional whitespace characters followed by a newline. 452 { 453 unsigned SizeTmp = 0; 454 do { 455 ++SizeTmp; 456 if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') { 457 458 // If this is a \r\n or \n\r, skip the newlines. 459 if ((Ptr[SizeTmp] == '\r' || Ptr[SizeTmp] == '\n') && 460 Ptr[SizeTmp-1] != Ptr[SizeTmp]) 461 ++SizeTmp; 462 463 // Found backslash<whitespace><newline>. Parse the char after it. 464 Size += SizeTmp; 465 Ptr += SizeTmp; 466 467 // Use slow version to accumulate a correct size field. 468 return getCharAndSizeSlowNoWarn(Ptr, Size, Features); 469 } 470 } while (isWhitespace(Ptr[SizeTmp])); 471 } 472 473 // Otherwise, this is not an escaped newline, just return the slash. 474 return '\\'; 475 } 476 477 // If this is a trigraph, process it. 478 if (Features.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') { 479 // If this is actually a legal trigraph (not something like "??x"), return 480 // it. 481 if (char C = GetTrigraphCharForLetter(Ptr[2])) { 482 Ptr += 3; 483 Size += 3; 484 if (C == '\\') goto Slash; 485 return C; 486 } 487 } 488 489 // If this is neither, return a single character. 490 ++Size; 491 return *Ptr; 492} 493 494//===----------------------------------------------------------------------===// 495// Helper methods for lexing. 496//===----------------------------------------------------------------------===// 497 498void Lexer::LexIdentifier(Token &Result, const char *CurPtr) { 499 // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$] 500 unsigned Size; 501 unsigned char C = *CurPtr++; 502 while (isIdentifierBody(C)) { 503 C = *CurPtr++; 504 } 505 --CurPtr; // Back up over the skipped character. 506 507 // Fast path, no $,\,? in identifier found. '\' might be an escaped newline 508 // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN. 509 // FIXME: UCNs. 510 if (C != '\\' && C != '?' && (C != '$' || !Features.DollarIdents)) { 511FinishIdentifier: 512 const char *IdStart = BufferPtr; 513 FormTokenWithChars(Result, CurPtr, tok::identifier); 514 515 // If we are in raw mode, return this identifier raw. There is no need to 516 // look up identifier information or attempt to macro expand it. 517 if (LexingRawMode) return; 518 519 // Fill in Result.IdentifierInfo, looking up the identifier in the 520 // identifier table. 521 PP->LookUpIdentifierInfo(Result, IdStart); 522 523 // Finally, now that we know we have an identifier, pass this off to the 524 // preprocessor, which may macro expand it or something. 525 return PP->HandleIdentifier(Result); 526 } 527 528 // Otherwise, $,\,? in identifier found. Enter slower path. 529 530 C = getCharAndSize(CurPtr, Size); 531 while (1) { 532 if (C == '$') { 533 // If we hit a $ and they are not supported in identifiers, we are done. 534 if (!Features.DollarIdents) goto FinishIdentifier; 535 536 // Otherwise, emit a diagnostic and continue. 537 Diag(CurPtr, diag::ext_dollar_in_identifier); 538 CurPtr = ConsumeChar(CurPtr, Size, Result); 539 C = getCharAndSize(CurPtr, Size); 540 continue; 541 } else if (!isIdentifierBody(C)) { // FIXME: UCNs. 542 // Found end of identifier. 543 goto FinishIdentifier; 544 } 545 546 // Otherwise, this character is good, consume it. 547 CurPtr = ConsumeChar(CurPtr, Size, Result); 548 549 C = getCharAndSize(CurPtr, Size); 550 while (isIdentifierBody(C)) { // FIXME: UCNs. 551 CurPtr = ConsumeChar(CurPtr, Size, Result); 552 C = getCharAndSize(CurPtr, Size); 553 } 554 } 555} 556 557 558/// LexNumericConstant - Lex the remainder of a integer or floating point 559/// constant. From[-1] is the first character lexed. Return the end of the 560/// constant. 561void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) { 562 unsigned Size; 563 char C = getCharAndSize(CurPtr, Size); 564 char PrevCh = 0; 565 while (isNumberBody(C)) { // FIXME: UCNs? 566 CurPtr = ConsumeChar(CurPtr, Size, Result); 567 PrevCh = C; 568 C = getCharAndSize(CurPtr, Size); 569 } 570 571 // If we fell out, check for a sign, due to 1e+12. If we have one, continue. 572 if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) 573 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 574 575 // If we have a hex FP constant, continue. 576 if (Features.HexFloats && 577 (C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p')) 578 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 579 580 // Update the location of token as well as BufferPtr. 581 FormTokenWithChars(Result, CurPtr, tok::numeric_constant); 582} 583 584/// LexStringLiteral - Lex the remainder of a string literal, after having lexed 585/// either " or L". 586void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide) { 587 const char *NulCharacter = 0; // Does this string contain the \0 character? 588 589 char C = getAndAdvanceChar(CurPtr, Result); 590 while (C != '"') { 591 // Skip escaped characters. 592 if (C == '\\') { 593 // Skip the escaped character. 594 C = getAndAdvanceChar(CurPtr, Result); 595 } else if (C == '\n' || C == '\r' || // Newline. 596 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 597 if (!LexingRawMode) Diag(BufferPtr, diag::err_unterminated_string); 598 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 599 return; 600 } else if (C == 0) { 601 NulCharacter = CurPtr-1; 602 } 603 C = getAndAdvanceChar(CurPtr, Result); 604 } 605 606 // If a nul character existed in the string, warn about it. 607 if (NulCharacter) Diag(NulCharacter, diag::null_in_string); 608 609 // Update the location of the token as well as the BufferPtr instance var. 610 FormTokenWithChars(Result, CurPtr, 611 Wide ? tok::wide_string_literal : tok::string_literal); 612} 613 614/// LexAngledStringLiteral - Lex the remainder of an angled string literal, 615/// after having lexed the '<' character. This is used for #include filenames. 616void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) { 617 const char *NulCharacter = 0; // Does this string contain the \0 character? 618 619 char C = getAndAdvanceChar(CurPtr, Result); 620 while (C != '>') { 621 // Skip escaped characters. 622 if (C == '\\') { 623 // Skip the escaped character. 624 C = getAndAdvanceChar(CurPtr, Result); 625 } else if (C == '\n' || C == '\r' || // Newline. 626 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 627 if (!LexingRawMode) Diag(BufferPtr, diag::err_unterminated_string); 628 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 629 return; 630 } else if (C == 0) { 631 NulCharacter = CurPtr-1; 632 } 633 C = getAndAdvanceChar(CurPtr, Result); 634 } 635 636 // If a nul character existed in the string, warn about it. 637 if (NulCharacter) Diag(NulCharacter, diag::null_in_string); 638 639 // Update the location of token as well as BufferPtr. 640 FormTokenWithChars(Result, CurPtr, tok::angle_string_literal); 641} 642 643 644/// LexCharConstant - Lex the remainder of a character constant, after having 645/// lexed either ' or L'. 646void Lexer::LexCharConstant(Token &Result, const char *CurPtr) { 647 const char *NulCharacter = 0; // Does this character contain the \0 character? 648 649 // Handle the common case of 'x' and '\y' efficiently. 650 char C = getAndAdvanceChar(CurPtr, Result); 651 if (C == '\'') { 652 if (!LexingRawMode) Diag(BufferPtr, diag::err_empty_character); 653 FormTokenWithChars(Result, CurPtr, tok::unknown); 654 return; 655 } else if (C == '\\') { 656 // Skip the escaped character. 657 // FIXME: UCN's. 658 C = getAndAdvanceChar(CurPtr, Result); 659 } 660 661 if (C && C != '\n' && C != '\r' && CurPtr[0] == '\'') { 662 ++CurPtr; 663 } else { 664 // Fall back on generic code for embedded nulls, newlines, wide chars. 665 do { 666 // Skip escaped characters. 667 if (C == '\\') { 668 // Skip the escaped character. 669 C = getAndAdvanceChar(CurPtr, Result); 670 } else if (C == '\n' || C == '\r' || // Newline. 671 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 672 if (!LexingRawMode) Diag(BufferPtr, diag::err_unterminated_char); 673 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 674 return; 675 } else if (C == 0) { 676 NulCharacter = CurPtr-1; 677 } 678 C = getAndAdvanceChar(CurPtr, Result); 679 } while (C != '\''); 680 } 681 682 if (NulCharacter) Diag(NulCharacter, diag::null_in_char); 683 684 // Update the location of token as well as BufferPtr. 685 FormTokenWithChars(Result, CurPtr, tok::char_constant); 686} 687 688/// SkipWhitespace - Efficiently skip over a series of whitespace characters. 689/// Update BufferPtr to point to the next non-whitespace character and return. 690/// 691/// This method forms a token and returns true if KeepWhitespaceMode is enabled. 692/// 693bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr) { 694 // Whitespace - Skip it, then return the token after the whitespace. 695 unsigned char Char = *CurPtr; // Skip consequtive spaces efficiently. 696 while (1) { 697 // Skip horizontal whitespace very aggressively. 698 while (isHorizontalWhitespace(Char)) 699 Char = *++CurPtr; 700 701 // Otherwise if we something other than whitespace, we're done. 702 if (Char != '\n' && Char != '\r') 703 break; 704 705 if (ParsingPreprocessorDirective) { 706 // End of preprocessor directive line, let LexTokenInternal handle this. 707 BufferPtr = CurPtr; 708 return false; 709 } 710 711 // ok, but handle newline. 712 // The returned token is at the start of the line. 713 Result.setFlag(Token::StartOfLine); 714 // No leading whitespace seen so far. 715 Result.clearFlag(Token::LeadingSpace); 716 Char = *++CurPtr; 717 } 718 719 // If this isn't immediately after a newline, there is leading space. 720 char PrevChar = CurPtr[-1]; 721 if (PrevChar != '\n' && PrevChar != '\r') 722 Result.setFlag(Token::LeadingSpace); 723 724 // If the client wants us to return whitespace, return it now. 725 if (isKeepWhitespaceMode()) { 726 FormTokenWithChars(Result, CurPtr, tok::unknown); 727 return true; 728 } 729 730 BufferPtr = CurPtr; 731 return false; 732} 733 734// SkipBCPLComment - We have just read the // characters from input. Skip until 735// we find the newline character thats terminate the comment. Then update 736/// BufferPtr and return. If we're in KeepCommentMode, this will form the token 737/// and return true. 738bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) { 739 // If BCPL comments aren't explicitly enabled for this language, emit an 740 // extension warning. 741 if (!Features.BCPLComment) { 742 Diag(BufferPtr, diag::ext_bcpl_comment); 743 744 // Mark them enabled so we only emit one warning for this translation 745 // unit. 746 Features.BCPLComment = true; 747 } 748 749 // Scan over the body of the comment. The common case, when scanning, is that 750 // the comment contains normal ascii characters with nothing interesting in 751 // them. As such, optimize for this case with the inner loop. 752 char C; 753 do { 754 C = *CurPtr; 755 // FIXME: Speedup BCPL comment lexing. Just scan for a \n or \r character. 756 // If we find a \n character, scan backwards, checking to see if it's an 757 // escaped newline, like we do for block comments. 758 759 // Skip over characters in the fast loop. 760 while (C != 0 && // Potentially EOF. 761 C != '\\' && // Potentially escaped newline. 762 C != '?' && // Potentially trigraph. 763 C != '\n' && C != '\r') // Newline or DOS-style newline. 764 C = *++CurPtr; 765 766 // If this is a newline, we're done. 767 if (C == '\n' || C == '\r') 768 break; // Found the newline? Break out! 769 770 // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to 771 // properly decode the character. 772 const char *OldPtr = CurPtr; 773 C = getAndAdvanceChar(CurPtr, Result); 774 775 // If we read multiple characters, and one of those characters was a \r or 776 // \n, then we had an escaped newline within the comment. Emit diagnostic 777 // unless the next line is also a // comment. 778 if (CurPtr != OldPtr+1 && C != '/' && CurPtr[0] != '/') { 779 for (; OldPtr != CurPtr; ++OldPtr) 780 if (OldPtr[0] == '\n' || OldPtr[0] == '\r') { 781 // Okay, we found a // comment that ends in a newline, if the next 782 // line is also a // comment, but has spaces, don't emit a diagnostic. 783 if (isspace(C)) { 784 const char *ForwardPtr = CurPtr; 785 while (isspace(*ForwardPtr)) // Skip whitespace. 786 ++ForwardPtr; 787 if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/') 788 break; 789 } 790 791 Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment); 792 break; 793 } 794 } 795 796 if (CurPtr == BufferEnd+1) { --CurPtr; break; } 797 } while (C != '\n' && C != '\r'); 798 799 // Found but did not consume the newline. 800 801 // If we are returning comments as tokens, return this comment as a token. 802 if (inKeepCommentMode()) 803 return SaveBCPLComment(Result, CurPtr); 804 805 // If we are inside a preprocessor directive and we see the end of line, 806 // return immediately, so that the lexer can return this as an EOM token. 807 if (ParsingPreprocessorDirective || CurPtr == BufferEnd) { 808 BufferPtr = CurPtr; 809 return false; 810 } 811 812 // Otherwise, eat the \n character. We don't care if this is a \n\r or 813 // \r\n sequence. This is an efficiency hack (because we know the \n can't 814 // contribute to another token), it isn't needed for correctness. Note that 815 // this is ok even in KeepWhitespaceMode, because we would have returned the 816 /// comment above in that mode. 817 ++CurPtr; 818 819 // The next returned token is at the start of the line. 820 Result.setFlag(Token::StartOfLine); 821 // No leading whitespace seen so far. 822 Result.clearFlag(Token::LeadingSpace); 823 BufferPtr = CurPtr; 824 return false; 825} 826 827/// SaveBCPLComment - If in save-comment mode, package up this BCPL comment in 828/// an appropriate way and return it. 829bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) { 830 // If we're not in a preprocessor directive, just return the // comment 831 // directly. 832 FormTokenWithChars(Result, CurPtr, tok::comment); 833 834 if (!ParsingPreprocessorDirective) 835 return true; 836 837 // If this BCPL-style comment is in a macro definition, transmogrify it into 838 // a C-style block comment. 839 std::string Spelling = PP->getSpelling(Result); 840 assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not bcpl comment?"); 841 Spelling[1] = '*'; // Change prefix to "/*". 842 Spelling += "*/"; // add suffix. 843 844 Result.setKind(tok::comment); 845 Result.setLocation(PP->CreateString(&Spelling[0], Spelling.size(), 846 Result.getLocation())); 847 Result.setLength(Spelling.size()); 848 return true; 849} 850 851/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline 852/// character (either \n or \r) is part of an escaped newline sequence. Issue a 853/// diagnostic if so. We know that the is inside of a block comment. 854static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr, 855 Lexer *L) { 856 assert(CurPtr[0] == '\n' || CurPtr[0] == '\r'); 857 858 // Back up off the newline. 859 --CurPtr; 860 861 // If this is a two-character newline sequence, skip the other character. 862 if (CurPtr[0] == '\n' || CurPtr[0] == '\r') { 863 // \n\n or \r\r -> not escaped newline. 864 if (CurPtr[0] == CurPtr[1]) 865 return false; 866 // \n\r or \r\n -> skip the newline. 867 --CurPtr; 868 } 869 870 // If we have horizontal whitespace, skip over it. We allow whitespace 871 // between the slash and newline. 872 bool HasSpace = false; 873 while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) { 874 --CurPtr; 875 HasSpace = true; 876 } 877 878 // If we have a slash, we know this is an escaped newline. 879 if (*CurPtr == '\\') { 880 if (CurPtr[-1] != '*') return false; 881 } else { 882 // It isn't a slash, is it the ?? / trigraph? 883 if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' || 884 CurPtr[-3] != '*') 885 return false; 886 887 // This is the trigraph ending the comment. Emit a stern warning! 888 CurPtr -= 2; 889 890 // If no trigraphs are enabled, warn that we ignored this trigraph and 891 // ignore this * character. 892 if (!L->getFeatures().Trigraphs) { 893 L->Diag(CurPtr, diag::trigraph_ignored_block_comment); 894 return false; 895 } 896 L->Diag(CurPtr, diag::trigraph_ends_block_comment); 897 } 898 899 // Warn about having an escaped newline between the */ characters. 900 L->Diag(CurPtr, diag::escaped_newline_block_comment_end); 901 902 // If there was space between the backslash and newline, warn about it. 903 if (HasSpace) L->Diag(CurPtr, diag::backslash_newline_space); 904 905 return true; 906} 907 908#ifdef __SSE2__ 909#include <emmintrin.h> 910#elif __ALTIVEC__ 911#include <altivec.h> 912#undef bool 913#endif 914 915/// SkipBlockComment - We have just read the /* characters from input. Read 916/// until we find the */ characters that terminate the comment. Note that we 917/// don't bother decoding trigraphs or escaped newlines in block comments, 918/// because they cannot cause the comment to end. The only thing that can 919/// happen is the comment could end with an escaped newline between the */ end 920/// of comment. 921/// 922/// If KeepCommentMode is enabled, this forms a token from the comment and 923/// returns true. 924bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) { 925 // Scan one character past where we should, looking for a '/' character. Once 926 // we find it, check to see if it was preceeded by a *. This common 927 // optimization helps people who like to put a lot of * characters in their 928 // comments. 929 930 // The first character we get with newlines and trigraphs skipped to handle 931 // the degenerate /*/ case below correctly if the * has an escaped newline 932 // after it. 933 unsigned CharSize; 934 unsigned char C = getCharAndSize(CurPtr, CharSize); 935 CurPtr += CharSize; 936 if (C == 0 && CurPtr == BufferEnd+1) { 937 if (!LexingRawMode) 938 Diag(BufferPtr, diag::err_unterminated_block_comment); 939 --CurPtr; 940 941 // KeepWhitespaceMode should return this broken comment as a token. Since 942 // it isn't a well formed comment, just return it as an 'unknown' token. 943 if (isKeepWhitespaceMode()) { 944 FormTokenWithChars(Result, CurPtr, tok::unknown); 945 return true; 946 } 947 948 BufferPtr = CurPtr; 949 return false; 950 } 951 952 // Check to see if the first character after the '/*' is another /. If so, 953 // then this slash does not end the block comment, it is part of it. 954 if (C == '/') 955 C = *CurPtr++; 956 957 while (1) { 958 // Skip over all non-interesting characters until we find end of buffer or a 959 // (probably ending) '/' character. 960 if (CurPtr + 24 < BufferEnd) { 961 // While not aligned to a 16-byte boundary. 962 while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0) 963 C = *CurPtr++; 964 965 if (C == '/') goto FoundSlash; 966 967#ifdef __SSE2__ 968 __m128i Slashes = _mm_set_epi8('/', '/', '/', '/', '/', '/', '/', '/', 969 '/', '/', '/', '/', '/', '/', '/', '/'); 970 while (CurPtr+16 <= BufferEnd && 971 _mm_movemask_epi8(_mm_cmpeq_epi8(*(__m128i*)CurPtr, Slashes)) == 0) 972 CurPtr += 16; 973#elif __ALTIVEC__ 974 __vector unsigned char Slashes = { 975 '/', '/', '/', '/', '/', '/', '/', '/', 976 '/', '/', '/', '/', '/', '/', '/', '/' 977 }; 978 while (CurPtr+16 <= BufferEnd && 979 !vec_any_eq(*(vector unsigned char*)CurPtr, Slashes)) 980 CurPtr += 16; 981#else 982 // Scan for '/' quickly. Many block comments are very large. 983 while (CurPtr[0] != '/' && 984 CurPtr[1] != '/' && 985 CurPtr[2] != '/' && 986 CurPtr[3] != '/' && 987 CurPtr+4 < BufferEnd) { 988 CurPtr += 4; 989 } 990#endif 991 992 // It has to be one of the bytes scanned, increment to it and read one. 993 C = *CurPtr++; 994 } 995 996 // Loop to scan the remainder. 997 while (C != '/' && C != '\0') 998 C = *CurPtr++; 999 1000 FoundSlash: 1001 if (C == '/') { 1002 if (CurPtr[-2] == '*') // We found the final */. We're done! 1003 break; 1004 1005 if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) { 1006 if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) { 1007 // We found the final */, though it had an escaped newline between the 1008 // * and /. We're done! 1009 break; 1010 } 1011 } 1012 if (CurPtr[0] == '*' && CurPtr[1] != '/') { 1013 // If this is a /* inside of the comment, emit a warning. Don't do this 1014 // if this is a /*/, which will end the comment. This misses cases with 1015 // embedded escaped newlines, but oh well. 1016 Diag(CurPtr-1, diag::warn_nested_block_comment); 1017 } 1018 } else if (C == 0 && CurPtr == BufferEnd+1) { 1019 if (!LexingRawMode) Diag(BufferPtr, diag::err_unterminated_block_comment); 1020 // Note: the user probably forgot a */. We could continue immediately 1021 // after the /*, but this would involve lexing a lot of what really is the 1022 // comment, which surely would confuse the parser. 1023 --CurPtr; 1024 1025 // KeepWhitespaceMode should return this broken comment as a token. Since 1026 // it isn't a well formed comment, just return it as an 'unknown' token. 1027 if (isKeepWhitespaceMode()) { 1028 FormTokenWithChars(Result, CurPtr, tok::unknown); 1029 return true; 1030 } 1031 1032 BufferPtr = CurPtr; 1033 return false; 1034 } 1035 C = *CurPtr++; 1036 } 1037 1038 // If we are returning comments as tokens, return this comment as a token. 1039 if (inKeepCommentMode()) { 1040 FormTokenWithChars(Result, CurPtr, tok::comment); 1041 return true; 1042 } 1043 1044 // It is common for the tokens immediately after a /**/ comment to be 1045 // whitespace. Instead of going through the big switch, handle it 1046 // efficiently now. This is safe even in KeepWhitespaceMode because we would 1047 // have already returned above with the comment as a token. 1048 if (isHorizontalWhitespace(*CurPtr)) { 1049 Result.setFlag(Token::LeadingSpace); 1050 SkipWhitespace(Result, CurPtr+1); 1051 return false; 1052 } 1053 1054 // Otherwise, just return so that the next character will be lexed as a token. 1055 BufferPtr = CurPtr; 1056 Result.setFlag(Token::LeadingSpace); 1057 return false; 1058} 1059 1060//===----------------------------------------------------------------------===// 1061// Primary Lexing Entry Points 1062//===----------------------------------------------------------------------===// 1063 1064/// ReadToEndOfLine - Read the rest of the current preprocessor line as an 1065/// uninterpreted string. This switches the lexer out of directive mode. 1066std::string Lexer::ReadToEndOfLine() { 1067 assert(ParsingPreprocessorDirective && ParsingFilename == false && 1068 "Must be in a preprocessing directive!"); 1069 std::string Result; 1070 Token Tmp; 1071 1072 // CurPtr - Cache BufferPtr in an automatic variable. 1073 const char *CurPtr = BufferPtr; 1074 while (1) { 1075 char Char = getAndAdvanceChar(CurPtr, Tmp); 1076 switch (Char) { 1077 default: 1078 Result += Char; 1079 break; 1080 case 0: // Null. 1081 // Found end of file? 1082 if (CurPtr-1 != BufferEnd) { 1083 // Nope, normal character, continue. 1084 Result += Char; 1085 break; 1086 } 1087 // FALL THROUGH. 1088 case '\r': 1089 case '\n': 1090 // Okay, we found the end of the line. First, back up past the \0, \r, \n. 1091 assert(CurPtr[-1] == Char && "Trigraphs for newline?"); 1092 BufferPtr = CurPtr-1; 1093 1094 // Next, lex the character, which should handle the EOM transition. 1095 Lex(Tmp); 1096 assert(Tmp.is(tok::eom) && "Unexpected token!"); 1097 1098 // Finally, we're done, return the string we found. 1099 return Result; 1100 } 1101 } 1102} 1103 1104/// LexEndOfFile - CurPtr points to the end of this file. Handle this 1105/// condition, reporting diagnostics and handling other edge cases as required. 1106/// This returns true if Result contains a token, false if PP.Lex should be 1107/// called again. 1108bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) { 1109 // If we hit the end of the file while parsing a preprocessor directive, 1110 // end the preprocessor directive first. The next token returned will 1111 // then be the end of file. 1112 if (ParsingPreprocessorDirective) { 1113 // Done parsing the "line". 1114 ParsingPreprocessorDirective = false; 1115 // Update the location of token as well as BufferPtr. 1116 FormTokenWithChars(Result, CurPtr, tok::eom); 1117 1118 // Restore comment saving mode, in case it was disabled for directive. 1119 SetCommentRetentionState(PP->getCommentRetentionState()); 1120 return true; // Have a token. 1121 } 1122 1123 // If we are in raw mode, return this event as an EOF token. Let the caller 1124 // that put us in raw mode handle the event. 1125 if (LexingRawMode) { 1126 Result.startToken(); 1127 BufferPtr = BufferEnd; 1128 FormTokenWithChars(Result, BufferEnd, tok::eof); 1129 return true; 1130 } 1131 1132 // Otherwise, issue diagnostics for unterminated #if and missing newline. 1133 1134 // If we are in a #if directive, emit an error. 1135 while (!ConditionalStack.empty()) { 1136 PreprocessorLexer::Diag(ConditionalStack.back().IfLoc, 1137 diag::err_pp_unterminated_conditional); 1138 1139 ConditionalStack.pop_back(); 1140 } 1141 1142 // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue 1143 // a pedwarn. 1144 if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) 1145 Diag(BufferEnd, diag::ext_no_newline_eof); 1146 1147 BufferPtr = CurPtr; 1148 1149 // Finally, let the preprocessor handle this. 1150 return PP->HandleEndOfFile(Result); 1151} 1152 1153/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from 1154/// the specified lexer will return a tok::l_paren token, 0 if it is something 1155/// else and 2 if there are no more tokens in the buffer controlled by the 1156/// lexer. 1157unsigned Lexer::isNextPPTokenLParen() { 1158 assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?"); 1159 1160 // Switch to 'skipping' mode. This will ensure that we can lex a token 1161 // without emitting diagnostics, disables macro expansion, and will cause EOF 1162 // to return an EOF token instead of popping the include stack. 1163 LexingRawMode = true; 1164 1165 // Save state that can be changed while lexing so that we can restore it. 1166 const char *TmpBufferPtr = BufferPtr; 1167 1168 Token Tok; 1169 Tok.startToken(); 1170 LexTokenInternal(Tok); 1171 1172 // Restore state that may have changed. 1173 BufferPtr = TmpBufferPtr; 1174 1175 // Restore the lexer back to non-skipping mode. 1176 LexingRawMode = false; 1177 1178 if (Tok.is(tok::eof)) 1179 return 2; 1180 return Tok.is(tok::l_paren); 1181} 1182 1183 1184/// LexTokenInternal - This implements a simple C family lexer. It is an 1185/// extremely performance critical piece of code. This assumes that the buffer 1186/// has a null character at the end of the file. Return true if an error 1187/// occurred and compilation should terminate, false if normal. This returns a 1188/// preprocessing token, not a normal token, as such, it is an internal 1189/// interface. It assumes that the Flags of result have been cleared before 1190/// calling this. 1191void Lexer::LexTokenInternal(Token &Result) { 1192LexNextToken: 1193 // New token, can't need cleaning yet. 1194 Result.clearFlag(Token::NeedsCleaning); 1195 Result.setIdentifierInfo(0); 1196 1197 // CurPtr - Cache BufferPtr in an automatic variable. 1198 const char *CurPtr = BufferPtr; 1199 1200 // Small amounts of horizontal whitespace is very common between tokens. 1201 if ((*CurPtr == ' ') || (*CurPtr == '\t')) { 1202 ++CurPtr; 1203 while ((*CurPtr == ' ') || (*CurPtr == '\t')) 1204 ++CurPtr; 1205 1206 // If we are keeping whitespace and other tokens, just return what we just 1207 // skipped. The next lexer invocation will return the token after the 1208 // whitespace. 1209 if (isKeepWhitespaceMode()) { 1210 FormTokenWithChars(Result, CurPtr, tok::unknown); 1211 return; 1212 } 1213 1214 BufferPtr = CurPtr; 1215 Result.setFlag(Token::LeadingSpace); 1216 } 1217 1218 unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below. 1219 1220 // Read a character, advancing over it. 1221 char Char = getAndAdvanceChar(CurPtr, Result); 1222 tok::TokenKind Kind; 1223 1224 switch (Char) { 1225 case 0: // Null. 1226 // Found end of file? 1227 if (CurPtr-1 == BufferEnd) { 1228 // Read the PP instance variable into an automatic variable, because 1229 // LexEndOfFile will often delete 'this'. 1230 Preprocessor *PPCache = PP; 1231 if (LexEndOfFile(Result, CurPtr-1)) // Retreat back into the file. 1232 return; // Got a token to return. 1233 assert(PPCache && "Raw buffer::LexEndOfFile should return a token"); 1234 return PPCache->Lex(Result); 1235 } 1236 1237 Diag(CurPtr-1, diag::null_in_file); 1238 Result.setFlag(Token::LeadingSpace); 1239 if (SkipWhitespace(Result, CurPtr)) 1240 return; // KeepWhitespaceMode 1241 1242 goto LexNextToken; // GCC isn't tail call eliminating. 1243 case '\n': 1244 case '\r': 1245 // If we are inside a preprocessor directive and we see the end of line, 1246 // we know we are done with the directive, so return an EOM token. 1247 if (ParsingPreprocessorDirective) { 1248 // Done parsing the "line". 1249 ParsingPreprocessorDirective = false; 1250 1251 // Restore comment saving mode, in case it was disabled for directive. 1252 SetCommentRetentionState(PP->getCommentRetentionState()); 1253 1254 // Since we consumed a newline, we are back at the start of a line. 1255 IsAtStartOfLine = true; 1256 1257 Kind = tok::eom; 1258 break; 1259 } 1260 // The returned token is at the start of the line. 1261 Result.setFlag(Token::StartOfLine); 1262 // No leading whitespace seen so far. 1263 Result.clearFlag(Token::LeadingSpace); 1264 1265 if (SkipWhitespace(Result, CurPtr)) 1266 return; // KeepWhitespaceMode 1267 goto LexNextToken; // GCC isn't tail call eliminating. 1268 case ' ': 1269 case '\t': 1270 case '\f': 1271 case '\v': 1272 SkipHorizontalWhitespace: 1273 Result.setFlag(Token::LeadingSpace); 1274 if (SkipWhitespace(Result, CurPtr)) 1275 return; // KeepWhitespaceMode 1276 1277 SkipIgnoredUnits: 1278 CurPtr = BufferPtr; 1279 1280 // If the next token is obviously a // or /* */ comment, skip it efficiently 1281 // too (without going through the big switch stmt). 1282 if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode()) { 1283 SkipBCPLComment(Result, CurPtr+2); 1284 goto SkipIgnoredUnits; 1285 } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) { 1286 SkipBlockComment(Result, CurPtr+2); 1287 goto SkipIgnoredUnits; 1288 } else if (isHorizontalWhitespace(*CurPtr)) { 1289 goto SkipHorizontalWhitespace; 1290 } 1291 goto LexNextToken; // GCC isn't tail call eliminating. 1292 1293 // C99 6.4.4.1: Integer Constants. 1294 // C99 6.4.4.2: Floating Constants. 1295 case '0': case '1': case '2': case '3': case '4': 1296 case '5': case '6': case '7': case '8': case '9': 1297 // Notify MIOpt that we read a non-whitespace/non-comment token. 1298 MIOpt.ReadToken(); 1299 return LexNumericConstant(Result, CurPtr); 1300 1301 case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz"). 1302 // Notify MIOpt that we read a non-whitespace/non-comment token. 1303 MIOpt.ReadToken(); 1304 Char = getCharAndSize(CurPtr, SizeTmp); 1305 1306 // Wide string literal. 1307 if (Char == '"') 1308 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 1309 true); 1310 1311 // Wide character constant. 1312 if (Char == '\'') 1313 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result)); 1314 // FALL THROUGH, treating L like the start of an identifier. 1315 1316 // C99 6.4.2: Identifiers. 1317 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': 1318 case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N': 1319 case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': 1320 case 'V': case 'W': case 'X': case 'Y': case 'Z': 1321 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': 1322 case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': 1323 case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': 1324 case 'v': case 'w': case 'x': case 'y': case 'z': 1325 case '_': 1326 // Notify MIOpt that we read a non-whitespace/non-comment token. 1327 MIOpt.ReadToken(); 1328 return LexIdentifier(Result, CurPtr); 1329 1330 case '$': // $ in identifiers. 1331 if (Features.DollarIdents) { 1332 Diag(CurPtr-1, diag::ext_dollar_in_identifier); 1333 // Notify MIOpt that we read a non-whitespace/non-comment token. 1334 MIOpt.ReadToken(); 1335 return LexIdentifier(Result, CurPtr); 1336 } 1337 1338 Kind = tok::unknown; 1339 break; 1340 1341 // C99 6.4.4: Character Constants. 1342 case '\'': 1343 // Notify MIOpt that we read a non-whitespace/non-comment token. 1344 MIOpt.ReadToken(); 1345 return LexCharConstant(Result, CurPtr); 1346 1347 // C99 6.4.5: String Literals. 1348 case '"': 1349 // Notify MIOpt that we read a non-whitespace/non-comment token. 1350 MIOpt.ReadToken(); 1351 return LexStringLiteral(Result, CurPtr, false); 1352 1353 // C99 6.4.6: Punctuators. 1354 case '?': 1355 Kind = tok::question; 1356 break; 1357 case '[': 1358 Kind = tok::l_square; 1359 break; 1360 case ']': 1361 Kind = tok::r_square; 1362 break; 1363 case '(': 1364 Kind = tok::l_paren; 1365 break; 1366 case ')': 1367 Kind = tok::r_paren; 1368 break; 1369 case '{': 1370 Kind = tok::l_brace; 1371 break; 1372 case '}': 1373 Kind = tok::r_brace; 1374 break; 1375 case '.': 1376 Char = getCharAndSize(CurPtr, SizeTmp); 1377 if (Char >= '0' && Char <= '9') { 1378 // Notify MIOpt that we read a non-whitespace/non-comment token. 1379 MIOpt.ReadToken(); 1380 1381 return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result)); 1382 } else if (Features.CPlusPlus && Char == '*') { 1383 Kind = tok::periodstar; 1384 CurPtr += SizeTmp; 1385 } else if (Char == '.' && 1386 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') { 1387 Kind = tok::ellipsis; 1388 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1389 SizeTmp2, Result); 1390 } else { 1391 Kind = tok::period; 1392 } 1393 break; 1394 case '&': 1395 Char = getCharAndSize(CurPtr, SizeTmp); 1396 if (Char == '&') { 1397 Kind = tok::ampamp; 1398 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1399 } else if (Char == '=') { 1400 Kind = tok::ampequal; 1401 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1402 } else { 1403 Kind = tok::amp; 1404 } 1405 break; 1406 case '*': 1407 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 1408 Kind = tok::starequal; 1409 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1410 } else { 1411 Kind = tok::star; 1412 } 1413 break; 1414 case '+': 1415 Char = getCharAndSize(CurPtr, SizeTmp); 1416 if (Char == '+') { 1417 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1418 Kind = tok::plusplus; 1419 } else if (Char == '=') { 1420 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1421 Kind = tok::plusequal; 1422 } else { 1423 Kind = tok::plus; 1424 } 1425 break; 1426 case '-': 1427 Char = getCharAndSize(CurPtr, SizeTmp); 1428 if (Char == '-') { // -- 1429 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1430 Kind = tok::minusminus; 1431 } else if (Char == '>' && Features.CPlusPlus && 1432 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { // C++ ->* 1433 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1434 SizeTmp2, Result); 1435 Kind = tok::arrowstar; 1436 } else if (Char == '>') { // -> 1437 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1438 Kind = tok::arrow; 1439 } else if (Char == '=') { // -= 1440 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1441 Kind = tok::minusequal; 1442 } else { 1443 Kind = tok::minus; 1444 } 1445 break; 1446 case '~': 1447 Kind = tok::tilde; 1448 break; 1449 case '!': 1450 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 1451 Kind = tok::exclaimequal; 1452 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1453 } else { 1454 Kind = tok::exclaim; 1455 } 1456 break; 1457 case '/': 1458 // 6.4.9: Comments 1459 Char = getCharAndSize(CurPtr, SizeTmp); 1460 if (Char == '/') { // BCPL comment. 1461 if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result))) 1462 return; // KeepCommentMode 1463 1464 // It is common for the tokens immediately after a // comment to be 1465 // whitespace (indentation for the next line). Instead of going through 1466 // the big switch, handle it efficiently now. 1467 goto SkipIgnoredUnits; 1468 } else if (Char == '*') { // /**/ comment. 1469 if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result))) 1470 return; // KeepCommentMode 1471 goto LexNextToken; // GCC isn't tail call eliminating. 1472 } else if (Char == '=') { 1473 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1474 Kind = tok::slashequal; 1475 } else { 1476 Kind = tok::slash; 1477 } 1478 break; 1479 case '%': 1480 Char = getCharAndSize(CurPtr, SizeTmp); 1481 if (Char == '=') { 1482 Kind = tok::percentequal; 1483 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1484 } else if (Features.Digraphs && Char == '>') { 1485 Kind = tok::r_brace; // '%>' -> '}' 1486 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1487 } else if (Features.Digraphs && Char == ':') { 1488 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1489 Char = getCharAndSize(CurPtr, SizeTmp); 1490 if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') { 1491 Kind = tok::hashhash; // '%:%:' -> '##' 1492 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1493 SizeTmp2, Result); 1494 } else if (Char == '@' && Features.Microsoft) { // %:@ -> #@ -> Charize 1495 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1496 Diag(BufferPtr, diag::charize_microsoft_ext); 1497 Kind = tok::hashat; 1498 } else { 1499 Kind = tok::hash; // '%:' -> '#' 1500 1501 // We parsed a # character. If this occurs at the start of the line, 1502 // it's actually the start of a preprocessing directive. Callback to 1503 // the preprocessor to handle it. 1504 // FIXME: -fpreprocessed mode?? 1505 if (Result.isAtStartOfLine() && !LexingRawMode) { 1506 BufferPtr = CurPtr; 1507 PP->HandleDirective(Result); 1508 1509 // As an optimization, if the preprocessor didn't switch lexers, tail 1510 // recurse. 1511 if (PP->isCurrentLexer(this)) { 1512 // Start a new token. If this is a #include or something, the PP may 1513 // want us starting at the beginning of the line again. If so, set 1514 // the StartOfLine flag. 1515 if (IsAtStartOfLine) { 1516 Result.setFlag(Token::StartOfLine); 1517 IsAtStartOfLine = false; 1518 } 1519 goto LexNextToken; // GCC isn't tail call eliminating. 1520 } 1521 1522 return PP->Lex(Result); 1523 } 1524 } 1525 } else { 1526 Kind = tok::percent; 1527 } 1528 break; 1529 case '<': 1530 Char = getCharAndSize(CurPtr, SizeTmp); 1531 if (ParsingFilename) { 1532 return LexAngledStringLiteral(Result, CurPtr+SizeTmp); 1533 } else if (Char == '<' && 1534 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') { 1535 Kind = tok::lesslessequal; 1536 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1537 SizeTmp2, Result); 1538 } else if (Char == '<') { 1539 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1540 Kind = tok::lessless; 1541 } else if (Char == '=') { 1542 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1543 Kind = tok::lessequal; 1544 } else if (Features.Digraphs && Char == ':') { // '<:' -> '[' 1545 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1546 Kind = tok::l_square; 1547 } else if (Features.Digraphs && Char == '%') { // '<%' -> '{' 1548 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1549 Kind = tok::l_brace; 1550 } else { 1551 Kind = tok::less; 1552 } 1553 break; 1554 case '>': 1555 Char = getCharAndSize(CurPtr, SizeTmp); 1556 if (Char == '=') { 1557 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1558 Kind = tok::greaterequal; 1559 } else if (Char == '>' && 1560 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') { 1561 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1562 SizeTmp2, Result); 1563 Kind = tok::greatergreaterequal; 1564 } else if (Char == '>') { 1565 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1566 Kind = tok::greatergreater; 1567 } else { 1568 Kind = tok::greater; 1569 } 1570 break; 1571 case '^': 1572 Char = getCharAndSize(CurPtr, SizeTmp); 1573 if (Char == '=') { 1574 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1575 Kind = tok::caretequal; 1576 } else { 1577 Kind = tok::caret; 1578 } 1579 break; 1580 case '|': 1581 Char = getCharAndSize(CurPtr, SizeTmp); 1582 if (Char == '=') { 1583 Kind = tok::pipeequal; 1584 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1585 } else if (Char == '|') { 1586 Kind = tok::pipepipe; 1587 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1588 } else { 1589 Kind = tok::pipe; 1590 } 1591 break; 1592 case ':': 1593 Char = getCharAndSize(CurPtr, SizeTmp); 1594 if (Features.Digraphs && Char == '>') { 1595 Kind = tok::r_square; // ':>' -> ']' 1596 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1597 } else if (Features.CPlusPlus && Char == ':') { 1598 Kind = tok::coloncolon; 1599 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1600 } else { 1601 Kind = tok::colon; 1602 } 1603 break; 1604 case ';': 1605 Kind = tok::semi; 1606 break; 1607 case '=': 1608 Char = getCharAndSize(CurPtr, SizeTmp); 1609 if (Char == '=') { 1610 Kind = tok::equalequal; 1611 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1612 } else { 1613 Kind = tok::equal; 1614 } 1615 break; 1616 case ',': 1617 Kind = tok::comma; 1618 break; 1619 case '#': 1620 Char = getCharAndSize(CurPtr, SizeTmp); 1621 if (Char == '#') { 1622 Kind = tok::hashhash; 1623 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1624 } else if (Char == '@' && Features.Microsoft) { // #@ -> Charize 1625 Kind = tok::hashat; 1626 Diag(BufferPtr, diag::charize_microsoft_ext); 1627 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1628 } else { 1629 Kind = tok::hash; 1630 // We parsed a # character. If this occurs at the start of the line, 1631 // it's actually the start of a preprocessing directive. Callback to 1632 // the preprocessor to handle it. 1633 // FIXME: -fpreprocessed mode?? 1634 if (Result.isAtStartOfLine() && !LexingRawMode) { 1635 BufferPtr = CurPtr; 1636 PP->HandleDirective(Result); 1637 1638 // As an optimization, if the preprocessor didn't switch lexers, tail 1639 // recurse. 1640 if (PP->isCurrentLexer(this)) { 1641 // Start a new token. If this is a #include or something, the PP may 1642 // want us starting at the beginning of the line again. If so, set 1643 // the StartOfLine flag. 1644 if (IsAtStartOfLine) { 1645 Result.setFlag(Token::StartOfLine); 1646 IsAtStartOfLine = false; 1647 } 1648 goto LexNextToken; // GCC isn't tail call eliminating. 1649 } 1650 return PP->Lex(Result); 1651 } 1652 } 1653 break; 1654 1655 case '@': 1656 // Objective C support. 1657 if (CurPtr[-1] == '@' && Features.ObjC1) 1658 Kind = tok::at; 1659 else 1660 Kind = tok::unknown; 1661 break; 1662 1663 case '\\': 1664 // FIXME: UCN's. 1665 // FALL THROUGH. 1666 default: 1667 Kind = tok::unknown; 1668 break; 1669 } 1670 1671 // Notify MIOpt that we read a non-whitespace/non-comment token. 1672 MIOpt.ReadToken(); 1673 1674 // Update the location of token as well as BufferPtr. 1675 FormTokenWithChars(Result, CurPtr, Kind); 1676} 1677