Lexer.cpp revision 33611e0d5ab1372608a7649b1877cd4300621c71
1//===--- Lexer.cpp - C Language Family Lexer ------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the Lexer and Token interfaces. 11// 12//===----------------------------------------------------------------------===// 13// 14// TODO: GCC Diagnostics emitted by the lexer: 15// PEDWARN: (form feed|vertical tab) in preprocessing directive 16// 17// Universal characters, unicode, char mapping: 18// WARNING: `%.*s' is not in NFKC 19// WARNING: `%.*s' is not in NFC 20// 21// Other: 22// TODO: Options to support: 23// -fexec-charset,-fwide-exec-charset 24// 25//===----------------------------------------------------------------------===// 26 27#include "clang/Lex/Lexer.h" 28#include "clang/Lex/Preprocessor.h" 29#include "clang/Lex/LexDiagnostic.h" 30#include "clang/Basic/SourceManager.h" 31#include "llvm/Support/Compiler.h" 32#include "llvm/Support/MemoryBuffer.h" 33#include <cctype> 34using namespace clang; 35 36static void InitCharacterInfo(); 37 38//===----------------------------------------------------------------------===// 39// Token Class Implementation 40//===----------------------------------------------------------------------===// 41 42/// isObjCAtKeyword - Return true if we have an ObjC keyword identifier. 43bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const { 44 if (IdentifierInfo *II = getIdentifierInfo()) 45 return II->getObjCKeywordID() == objcKey; 46 return false; 47} 48 49/// getObjCKeywordID - Return the ObjC keyword kind. 50tok::ObjCKeywordKind Token::getObjCKeywordID() const { 51 IdentifierInfo *specId = getIdentifierInfo(); 52 return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword; 53} 54 55 56//===----------------------------------------------------------------------===// 57// Lexer Class Implementation 58//===----------------------------------------------------------------------===// 59 60void Lexer::InitLexer(const char *BufStart, const char *BufPtr, 61 const char *BufEnd) { 62 InitCharacterInfo(); 63 64 BufferStart = BufStart; 65 BufferPtr = BufPtr; 66 BufferEnd = BufEnd; 67 68 assert(BufEnd[0] == 0 && 69 "We assume that the input buffer has a null character at the end" 70 " to simplify lexing!"); 71 72 Is_PragmaLexer = false; 73 IsInConflictMarker = false; 74 75 // Start of the file is a start of line. 76 IsAtStartOfLine = true; 77 78 // We are not after parsing a #. 79 ParsingPreprocessorDirective = false; 80 81 // We are not after parsing #include. 82 ParsingFilename = false; 83 84 // We are not in raw mode. Raw mode disables diagnostics and interpretation 85 // of tokens (e.g. identifiers, thus disabling macro expansion). It is used 86 // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block 87 // or otherwise skipping over tokens. 88 LexingRawMode = false; 89 90 // Default to not keeping comments. 91 ExtendedTokenMode = 0; 92} 93 94/// Lexer constructor - Create a new lexer object for the specified buffer 95/// with the specified preprocessor managing the lexing process. This lexer 96/// assumes that the associated file buffer and Preprocessor objects will 97/// outlive it, so it doesn't take ownership of either of them. 98Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *InputFile, Preprocessor &PP) 99 : PreprocessorLexer(&PP, FID), 100 FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)), 101 Features(PP.getLangOptions()) { 102 103 InitLexer(InputFile->getBufferStart(), InputFile->getBufferStart(), 104 InputFile->getBufferEnd()); 105 106 // Default to keeping comments if the preprocessor wants them. 107 SetCommentRetentionState(PP.getCommentRetentionState()); 108} 109 110/// Lexer constructor - Create a new raw lexer object. This object is only 111/// suitable for calls to 'LexRawToken'. This lexer assumes that the text 112/// range will outlive it, so it doesn't take ownership of it. 113Lexer::Lexer(SourceLocation fileloc, const LangOptions &features, 114 const char *BufStart, const char *BufPtr, const char *BufEnd) 115 : FileLoc(fileloc), Features(features) { 116 117 InitLexer(BufStart, BufPtr, BufEnd); 118 119 // We *are* in raw mode. 120 LexingRawMode = true; 121} 122 123/// Lexer constructor - Create a new raw lexer object. This object is only 124/// suitable for calls to 'LexRawToken'. This lexer assumes that the text 125/// range will outlive it, so it doesn't take ownership of it. 126Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *FromFile, 127 const SourceManager &SM, const LangOptions &features) 128 : FileLoc(SM.getLocForStartOfFile(FID)), Features(features) { 129 130 InitLexer(FromFile->getBufferStart(), FromFile->getBufferStart(), 131 FromFile->getBufferEnd()); 132 133 // We *are* in raw mode. 134 LexingRawMode = true; 135} 136 137/// Create_PragmaLexer: Lexer constructor - Create a new lexer object for 138/// _Pragma expansion. This has a variety of magic semantics that this method 139/// sets up. It returns a new'd Lexer that must be delete'd when done. 140/// 141/// On entrance to this routine, TokStartLoc is a macro location which has a 142/// spelling loc that indicates the bytes to be lexed for the token and an 143/// instantiation location that indicates where all lexed tokens should be 144/// "expanded from". 145/// 146/// FIXME: It would really be nice to make _Pragma just be a wrapper around a 147/// normal lexer that remaps tokens as they fly by. This would require making 148/// Preprocessor::Lex virtual. Given that, we could just dump in a magic lexer 149/// interface that could handle this stuff. This would pull GetMappedTokenLoc 150/// out of the critical path of the lexer! 151/// 152Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc, 153 SourceLocation InstantiationLocStart, 154 SourceLocation InstantiationLocEnd, 155 unsigned TokLen, Preprocessor &PP) { 156 SourceManager &SM = PP.getSourceManager(); 157 158 // Create the lexer as if we were going to lex the file normally. 159 FileID SpellingFID = SM.getFileID(SpellingLoc); 160 const llvm::MemoryBuffer *InputFile = SM.getBuffer(SpellingFID); 161 Lexer *L = new Lexer(SpellingFID, InputFile, PP); 162 163 // Now that the lexer is created, change the start/end locations so that we 164 // just lex the subsection of the file that we want. This is lexing from a 165 // scratch buffer. 166 const char *StrData = SM.getCharacterData(SpellingLoc); 167 168 L->BufferPtr = StrData; 169 L->BufferEnd = StrData+TokLen; 170 assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!"); 171 172 // Set the SourceLocation with the remapping information. This ensures that 173 // GetMappedTokenLoc will remap the tokens as they are lexed. 174 L->FileLoc = SM.createInstantiationLoc(SM.getLocForStartOfFile(SpellingFID), 175 InstantiationLocStart, 176 InstantiationLocEnd, TokLen); 177 178 // Ensure that the lexer thinks it is inside a directive, so that end \n will 179 // return an EOM token. 180 L->ParsingPreprocessorDirective = true; 181 182 // This lexer really is for _Pragma. 183 L->Is_PragmaLexer = true; 184 return L; 185} 186 187 188/// Stringify - Convert the specified string into a C string, with surrounding 189/// ""'s, and with escaped \ and " characters. 190std::string Lexer::Stringify(const std::string &Str, bool Charify) { 191 std::string Result = Str; 192 char Quote = Charify ? '\'' : '"'; 193 for (unsigned i = 0, e = Result.size(); i != e; ++i) { 194 if (Result[i] == '\\' || Result[i] == Quote) { 195 Result.insert(Result.begin()+i, '\\'); 196 ++i; ++e; 197 } 198 } 199 return Result; 200} 201 202/// Stringify - Convert the specified string into a C string by escaping '\' 203/// and " characters. This does not add surrounding ""'s to the string. 204void Lexer::Stringify(llvm::SmallVectorImpl<char> &Str) { 205 for (unsigned i = 0, e = Str.size(); i != e; ++i) { 206 if (Str[i] == '\\' || Str[i] == '"') { 207 Str.insert(Str.begin()+i, '\\'); 208 ++i; ++e; 209 } 210 } 211} 212 213static bool isWhitespace(unsigned char c); 214 215/// MeasureTokenLength - Relex the token at the specified location and return 216/// its length in bytes in the input file. If the token needs cleaning (e.g. 217/// includes a trigraph or an escaped newline) then this count includes bytes 218/// that are part of that. 219unsigned Lexer::MeasureTokenLength(SourceLocation Loc, 220 const SourceManager &SM, 221 const LangOptions &LangOpts) { 222 // TODO: this could be special cased for common tokens like identifiers, ')', 223 // etc to make this faster, if it mattered. Just look at StrData[0] to handle 224 // all obviously single-char tokens. This could use 225 // Lexer::isObviouslySimpleCharacter for example to handle identifiers or 226 // something. 227 228 // If this comes from a macro expansion, we really do want the macro name, not 229 // the token this macro expanded to. 230 Loc = SM.getInstantiationLoc(Loc); 231 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 232 bool Invalid = false; 233 llvm::StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid); 234 if (Invalid) 235 return 0; 236 237 const char *StrData = Buffer.data()+LocInfo.second; 238 239 if (isWhitespace(StrData[0])) 240 return 0; 241 242 // Create a lexer starting at the beginning of this token. 243 Lexer TheLexer(Loc, LangOpts, Buffer.begin(), StrData, Buffer.end()); 244 TheLexer.SetCommentRetentionState(true); 245 Token TheTok; 246 TheLexer.LexFromRawLexer(TheTok); 247 return TheTok.getLength(); 248} 249 250//===----------------------------------------------------------------------===// 251// Character information. 252//===----------------------------------------------------------------------===// 253 254enum { 255 CHAR_HORZ_WS = 0x01, // ' ', '\t', '\f', '\v'. Note, no '\0' 256 CHAR_VERT_WS = 0x02, // '\r', '\n' 257 CHAR_LETTER = 0x04, // a-z,A-Z 258 CHAR_NUMBER = 0x08, // 0-9 259 CHAR_UNDER = 0x10, // _ 260 CHAR_PERIOD = 0x20 // . 261}; 262 263// Statically initialize CharInfo table based on ASCII character set 264// Reference: FreeBSD 7.2 /usr/share/misc/ascii 265static const unsigned char CharInfo[256] = 266{ 267// 0 NUL 1 SOH 2 STX 3 ETX 268// 4 EOT 5 ENQ 6 ACK 7 BEL 269 0 , 0 , 0 , 0 , 270 0 , 0 , 0 , 0 , 271// 8 BS 9 HT 10 NL 11 VT 272//12 NP 13 CR 14 SO 15 SI 273 0 , CHAR_HORZ_WS, CHAR_VERT_WS, CHAR_HORZ_WS, 274 CHAR_HORZ_WS, CHAR_VERT_WS, 0 , 0 , 275//16 DLE 17 DC1 18 DC2 19 DC3 276//20 DC4 21 NAK 22 SYN 23 ETB 277 0 , 0 , 0 , 0 , 278 0 , 0 , 0 , 0 , 279//24 CAN 25 EM 26 SUB 27 ESC 280//28 FS 29 GS 30 RS 31 US 281 0 , 0 , 0 , 0 , 282 0 , 0 , 0 , 0 , 283//32 SP 33 ! 34 " 35 # 284//36 $ 37 % 38 & 39 ' 285 CHAR_HORZ_WS, 0 , 0 , 0 , 286 0 , 0 , 0 , 0 , 287//40 ( 41 ) 42 * 43 + 288//44 , 45 - 46 . 47 / 289 0 , 0 , 0 , 0 , 290 0 , 0 , CHAR_PERIOD , 0 , 291//48 0 49 1 50 2 51 3 292//52 4 53 5 54 6 55 7 293 CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER , 294 CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER , 295//56 8 57 9 58 : 59 ; 296//60 < 61 = 62 > 63 ? 297 CHAR_NUMBER , CHAR_NUMBER , 0 , 0 , 298 0 , 0 , 0 , 0 , 299//64 @ 65 A 66 B 67 C 300//68 D 69 E 70 F 71 G 301 0 , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 302 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 303//72 H 73 I 74 J 75 K 304//76 L 77 M 78 N 79 O 305 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 306 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 307//80 P 81 Q 82 R 83 S 308//84 T 85 U 86 V 87 W 309 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 310 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 311//88 X 89 Y 90 Z 91 [ 312//92 \ 93 ] 94 ^ 95 _ 313 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 0 , 314 0 , 0 , 0 , CHAR_UNDER , 315//96 ` 97 a 98 b 99 c 316//100 d 101 e 102 f 103 g 317 0 , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 318 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 319//104 h 105 i 106 j 107 k 320//108 l 109 m 110 n 111 o 321 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 322 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 323//112 p 113 q 114 r 115 s 324//116 t 117 u 118 v 119 w 325 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 326 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 327//120 x 121 y 122 z 123 { 328//124 | 125 } 126 ~ 127 DEL 329 CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , 0 , 330 0 , 0 , 0 , 0 331}; 332 333static void InitCharacterInfo() { 334 static bool isInited = false; 335 if (isInited) return; 336 // check the statically-initialized CharInfo table 337 assert(CHAR_HORZ_WS == CharInfo[(int)' ']); 338 assert(CHAR_HORZ_WS == CharInfo[(int)'\t']); 339 assert(CHAR_HORZ_WS == CharInfo[(int)'\f']); 340 assert(CHAR_HORZ_WS == CharInfo[(int)'\v']); 341 assert(CHAR_VERT_WS == CharInfo[(int)'\n']); 342 assert(CHAR_VERT_WS == CharInfo[(int)'\r']); 343 assert(CHAR_UNDER == CharInfo[(int)'_']); 344 assert(CHAR_PERIOD == CharInfo[(int)'.']); 345 for (unsigned i = 'a'; i <= 'z'; ++i) { 346 assert(CHAR_LETTER == CharInfo[i]); 347 assert(CHAR_LETTER == CharInfo[i+'A'-'a']); 348 } 349 for (unsigned i = '0'; i <= '9'; ++i) 350 assert(CHAR_NUMBER == CharInfo[i]); 351 352 isInited = true; 353} 354 355 356/// isIdentifierBody - Return true if this is the body character of an 357/// identifier, which is [a-zA-Z0-9_]. 358static inline bool isIdentifierBody(unsigned char c) { 359 return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER)) ? true : false; 360} 361 362/// isHorizontalWhitespace - Return true if this character is horizontal 363/// whitespace: ' ', '\t', '\f', '\v'. Note that this returns false for '\0'. 364static inline bool isHorizontalWhitespace(unsigned char c) { 365 return (CharInfo[c] & CHAR_HORZ_WS) ? true : false; 366} 367 368/// isWhitespace - Return true if this character is horizontal or vertical 369/// whitespace: ' ', '\t', '\f', '\v', '\n', '\r'. Note that this returns false 370/// for '\0'. 371static inline bool isWhitespace(unsigned char c) { 372 return (CharInfo[c] & (CHAR_HORZ_WS|CHAR_VERT_WS)) ? true : false; 373} 374 375/// isNumberBody - Return true if this is the body character of an 376/// preprocessing number, which is [a-zA-Z0-9_.]. 377static inline bool isNumberBody(unsigned char c) { 378 return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD)) ? 379 true : false; 380} 381 382 383//===----------------------------------------------------------------------===// 384// Diagnostics forwarding code. 385//===----------------------------------------------------------------------===// 386 387/// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the 388/// lexer buffer was all instantiated at a single point, perform the mapping. 389/// This is currently only used for _Pragma implementation, so it is the slow 390/// path of the hot getSourceLocation method. Do not allow it to be inlined. 391static DISABLE_INLINE SourceLocation GetMappedTokenLoc(Preprocessor &PP, 392 SourceLocation FileLoc, 393 unsigned CharNo, 394 unsigned TokLen); 395static SourceLocation GetMappedTokenLoc(Preprocessor &PP, 396 SourceLocation FileLoc, 397 unsigned CharNo, unsigned TokLen) { 398 assert(FileLoc.isMacroID() && "Must be an instantiation"); 399 400 // Otherwise, we're lexing "mapped tokens". This is used for things like 401 // _Pragma handling. Combine the instantiation location of FileLoc with the 402 // spelling location. 403 SourceManager &SM = PP.getSourceManager(); 404 405 // Create a new SLoc which is expanded from Instantiation(FileLoc) but whose 406 // characters come from spelling(FileLoc)+Offset. 407 SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc); 408 SpellingLoc = SpellingLoc.getFileLocWithOffset(CharNo); 409 410 // Figure out the expansion loc range, which is the range covered by the 411 // original _Pragma(...) sequence. 412 std::pair<SourceLocation,SourceLocation> II = 413 SM.getImmediateInstantiationRange(FileLoc); 414 415 return SM.createInstantiationLoc(SpellingLoc, II.first, II.second, TokLen); 416} 417 418/// getSourceLocation - Return a source location identifier for the specified 419/// offset in the current file. 420SourceLocation Lexer::getSourceLocation(const char *Loc, 421 unsigned TokLen) const { 422 assert(Loc >= BufferStart && Loc <= BufferEnd && 423 "Location out of range for this buffer!"); 424 425 // In the normal case, we're just lexing from a simple file buffer, return 426 // the file id from FileLoc with the offset specified. 427 unsigned CharNo = Loc-BufferStart; 428 if (FileLoc.isFileID()) 429 return FileLoc.getFileLocWithOffset(CharNo); 430 431 // Otherwise, this is the _Pragma lexer case, which pretends that all of the 432 // tokens are lexed from where the _Pragma was defined. 433 assert(PP && "This doesn't work on raw lexers"); 434 return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen); 435} 436 437/// Diag - Forwarding function for diagnostics. This translate a source 438/// position in the current buffer into a SourceLocation object for rendering. 439DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const { 440 return PP->Diag(getSourceLocation(Loc), DiagID); 441} 442 443//===----------------------------------------------------------------------===// 444// Trigraph and Escaped Newline Handling Code. 445//===----------------------------------------------------------------------===// 446 447/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair, 448/// return the decoded trigraph letter it corresponds to, or '\0' if nothing. 449static char GetTrigraphCharForLetter(char Letter) { 450 switch (Letter) { 451 default: return 0; 452 case '=': return '#'; 453 case ')': return ']'; 454 case '(': return '['; 455 case '!': return '|'; 456 case '\'': return '^'; 457 case '>': return '}'; 458 case '/': return '\\'; 459 case '<': return '{'; 460 case '-': return '~'; 461 } 462} 463 464/// DecodeTrigraphChar - If the specified character is a legal trigraph when 465/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled, 466/// return the result character. Finally, emit a warning about trigraph use 467/// whether trigraphs are enabled or not. 468static char DecodeTrigraphChar(const char *CP, Lexer *L) { 469 char Res = GetTrigraphCharForLetter(*CP); 470 if (!Res || !L) return Res; 471 472 if (!L->getFeatures().Trigraphs) { 473 if (!L->isLexingRawMode()) 474 L->Diag(CP-2, diag::trigraph_ignored); 475 return 0; 476 } 477 478 if (!L->isLexingRawMode()) 479 L->Diag(CP-2, diag::trigraph_converted) << std::string()+Res; 480 return Res; 481} 482 483/// getEscapedNewLineSize - Return the size of the specified escaped newline, 484/// or 0 if it is not an escaped newline. P[-1] is known to be a "\" or a 485/// trigraph equivalent on entry to this function. 486unsigned Lexer::getEscapedNewLineSize(const char *Ptr) { 487 unsigned Size = 0; 488 while (isWhitespace(Ptr[Size])) { 489 ++Size; 490 491 if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r') 492 continue; 493 494 // If this is a \r\n or \n\r, skip the other half. 495 if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') && 496 Ptr[Size-1] != Ptr[Size]) 497 ++Size; 498 499 return Size; 500 } 501 502 // Not an escaped newline, must be a \t or something else. 503 return 0; 504} 505 506/// SkipEscapedNewLines - If P points to an escaped newline (or a series of 507/// them), skip over them and return the first non-escaped-newline found, 508/// otherwise return P. 509const char *Lexer::SkipEscapedNewLines(const char *P) { 510 while (1) { 511 const char *AfterEscape; 512 if (*P == '\\') { 513 AfterEscape = P+1; 514 } else if (*P == '?') { 515 // If not a trigraph for escape, bail out. 516 if (P[1] != '?' || P[2] != '/') 517 return P; 518 AfterEscape = P+3; 519 } else { 520 return P; 521 } 522 523 unsigned NewLineSize = Lexer::getEscapedNewLineSize(AfterEscape); 524 if (NewLineSize == 0) return P; 525 P = AfterEscape+NewLineSize; 526 } 527} 528 529 530/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer, 531/// get its size, and return it. This is tricky in several cases: 532/// 1. If currently at the start of a trigraph, we warn about the trigraph, 533/// then either return the trigraph (skipping 3 chars) or the '?', 534/// depending on whether trigraphs are enabled or not. 535/// 2. If this is an escaped newline (potentially with whitespace between 536/// the backslash and newline), implicitly skip the newline and return 537/// the char after it. 538/// 3. If this is a UCN, return it. FIXME: C++ UCN's? 539/// 540/// This handles the slow/uncommon case of the getCharAndSize method. Here we 541/// know that we can accumulate into Size, and that we have already incremented 542/// Ptr by Size bytes. 543/// 544/// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should 545/// be updated to match. 546/// 547char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size, 548 Token *Tok) { 549 // If we have a slash, look for an escaped newline. 550 if (Ptr[0] == '\\') { 551 ++Size; 552 ++Ptr; 553Slash: 554 // Common case, backslash-char where the char is not whitespace. 555 if (!isWhitespace(Ptr[0])) return '\\'; 556 557 // See if we have optional whitespace characters between the slash and 558 // newline. 559 if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) { 560 // Remember that this token needs to be cleaned. 561 if (Tok) Tok->setFlag(Token::NeedsCleaning); 562 563 // Warn if there was whitespace between the backslash and newline. 564 if (Ptr[0] != '\n' && Ptr[0] != '\r' && Tok && !isLexingRawMode()) 565 Diag(Ptr, diag::backslash_newline_space); 566 567 // Found backslash<whitespace><newline>. Parse the char after it. 568 Size += EscapedNewLineSize; 569 Ptr += EscapedNewLineSize; 570 // Use slow version to accumulate a correct size field. 571 return getCharAndSizeSlow(Ptr, Size, Tok); 572 } 573 574 // Otherwise, this is not an escaped newline, just return the slash. 575 return '\\'; 576 } 577 578 // If this is a trigraph, process it. 579 if (Ptr[0] == '?' && Ptr[1] == '?') { 580 // If this is actually a legal trigraph (not something like "??x"), emit 581 // a trigraph warning. If so, and if trigraphs are enabled, return it. 582 if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) { 583 // Remember that this token needs to be cleaned. 584 if (Tok) Tok->setFlag(Token::NeedsCleaning); 585 586 Ptr += 3; 587 Size += 3; 588 if (C == '\\') goto Slash; 589 return C; 590 } 591 } 592 593 // If this is neither, return a single character. 594 ++Size; 595 return *Ptr; 596} 597 598 599/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the 600/// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size, 601/// and that we have already incremented Ptr by Size bytes. 602/// 603/// NOTE: When this method is updated, getCharAndSizeSlow (above) should 604/// be updated to match. 605char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size, 606 const LangOptions &Features) { 607 // If we have a slash, look for an escaped newline. 608 if (Ptr[0] == '\\') { 609 ++Size; 610 ++Ptr; 611Slash: 612 // Common case, backslash-char where the char is not whitespace. 613 if (!isWhitespace(Ptr[0])) return '\\'; 614 615 // See if we have optional whitespace characters followed by a newline. 616 if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) { 617 // Found backslash<whitespace><newline>. Parse the char after it. 618 Size += EscapedNewLineSize; 619 Ptr += EscapedNewLineSize; 620 621 // Use slow version to accumulate a correct size field. 622 return getCharAndSizeSlowNoWarn(Ptr, Size, Features); 623 } 624 625 // Otherwise, this is not an escaped newline, just return the slash. 626 return '\\'; 627 } 628 629 // If this is a trigraph, process it. 630 if (Features.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') { 631 // If this is actually a legal trigraph (not something like "??x"), return 632 // it. 633 if (char C = GetTrigraphCharForLetter(Ptr[2])) { 634 Ptr += 3; 635 Size += 3; 636 if (C == '\\') goto Slash; 637 return C; 638 } 639 } 640 641 // If this is neither, return a single character. 642 ++Size; 643 return *Ptr; 644} 645 646//===----------------------------------------------------------------------===// 647// Helper methods for lexing. 648//===----------------------------------------------------------------------===// 649 650void Lexer::LexIdentifier(Token &Result, const char *CurPtr) { 651 // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$] 652 unsigned Size; 653 unsigned char C = *CurPtr++; 654 while (isIdentifierBody(C)) 655 C = *CurPtr++; 656 657 --CurPtr; // Back up over the skipped character. 658 659 // Fast path, no $,\,? in identifier found. '\' might be an escaped newline 660 // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN. 661 // FIXME: UCNs. 662 // 663 // TODO: Could merge these checks into a CharInfo flag to make the comparison 664 // cheaper 665 if (C != '\\' && C != '?' && (C != '$' || !Features.DollarIdents)) { 666FinishIdentifier: 667 const char *IdStart = BufferPtr; 668 FormTokenWithChars(Result, CurPtr, tok::identifier); 669 670 // If we are in raw mode, return this identifier raw. There is no need to 671 // look up identifier information or attempt to macro expand it. 672 if (LexingRawMode) return; 673 674 // Fill in Result.IdentifierInfo, looking up the identifier in the 675 // identifier table. 676 IdentifierInfo *II = PP->LookUpIdentifierInfo(Result, IdStart); 677 678 // Change the kind of this identifier to the appropriate token kind, e.g. 679 // turning "for" into a keyword. 680 Result.setKind(II->getTokenID()); 681 682 // Finally, now that we know we have an identifier, pass this off to the 683 // preprocessor, which may macro expand it or something. 684 if (II->isHandleIdentifierCase()) 685 PP->HandleIdentifier(Result); 686 return; 687 } 688 689 // Otherwise, $,\,? in identifier found. Enter slower path. 690 691 C = getCharAndSize(CurPtr, Size); 692 while (1) { 693 if (C == '$') { 694 // If we hit a $ and they are not supported in identifiers, we are done. 695 if (!Features.DollarIdents) goto FinishIdentifier; 696 697 // Otherwise, emit a diagnostic and continue. 698 if (!isLexingRawMode()) 699 Diag(CurPtr, diag::ext_dollar_in_identifier); 700 CurPtr = ConsumeChar(CurPtr, Size, Result); 701 C = getCharAndSize(CurPtr, Size); 702 continue; 703 } else if (!isIdentifierBody(C)) { // FIXME: UCNs. 704 // Found end of identifier. 705 goto FinishIdentifier; 706 } 707 708 // Otherwise, this character is good, consume it. 709 CurPtr = ConsumeChar(CurPtr, Size, Result); 710 711 C = getCharAndSize(CurPtr, Size); 712 while (isIdentifierBody(C)) { // FIXME: UCNs. 713 CurPtr = ConsumeChar(CurPtr, Size, Result); 714 C = getCharAndSize(CurPtr, Size); 715 } 716 } 717} 718 719 720/// LexNumericConstant - Lex the remainder of a integer or floating point 721/// constant. From[-1] is the first character lexed. Return the end of the 722/// constant. 723void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) { 724 unsigned Size; 725 char C = getCharAndSize(CurPtr, Size); 726 char PrevCh = 0; 727 while (isNumberBody(C)) { // FIXME: UCNs? 728 CurPtr = ConsumeChar(CurPtr, Size, Result); 729 PrevCh = C; 730 C = getCharAndSize(CurPtr, Size); 731 } 732 733 // If we fell out, check for a sign, due to 1e+12. If we have one, continue. 734 if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) 735 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 736 737 // If we have a hex FP constant, continue. 738 if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p') && 739 (!PP || !PP->getLangOptions().CPlusPlus0x)) 740 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 741 742 // Update the location of token as well as BufferPtr. 743 const char *TokStart = BufferPtr; 744 FormTokenWithChars(Result, CurPtr, tok::numeric_constant); 745 Result.setLiteralData(TokStart); 746} 747 748/// LexStringLiteral - Lex the remainder of a string literal, after having lexed 749/// either " or L". 750void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide) { 751 const char *NulCharacter = 0; // Does this string contain the \0 character? 752 753 char C = getAndAdvanceChar(CurPtr, Result); 754 while (C != '"') { 755 // Skip escaped characters. 756 bool Escaped = false; 757 if (C == '\\') { 758 // Skip the escaped character. 759 C = getAndAdvanceChar(CurPtr, Result); 760 Escaped = true; 761 } 762 763 if ((!Escaped && (C == '\n' || C == '\r')) || // Newline. 764 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 765 if (!isLexingRawMode() && !Features.AsmPreprocessor) 766 Diag(BufferPtr, diag::err_unterminated_string); 767 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 768 return; 769 } else if (C == 0) { 770 NulCharacter = CurPtr-1; 771 } 772 773 C = getAndAdvanceChar(CurPtr, Result); 774 } 775 776 // If a nul character existed in the string, warn about it. 777 if (NulCharacter && !isLexingRawMode()) 778 Diag(NulCharacter, diag::null_in_string); 779 780 // Update the location of the token as well as the BufferPtr instance var. 781 const char *TokStart = BufferPtr; 782 FormTokenWithChars(Result, CurPtr, 783 Wide ? tok::wide_string_literal : tok::string_literal); 784 Result.setLiteralData(TokStart); 785} 786 787/// LexAngledStringLiteral - Lex the remainder of an angled string literal, 788/// after having lexed the '<' character. This is used for #include filenames. 789void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) { 790 const char *NulCharacter = 0; // Does this string contain the \0 character? 791 const char *AfterLessPos = CurPtr; 792 char C = getAndAdvanceChar(CurPtr, Result); 793 while (C != '>') { 794 // Skip escaped characters. 795 if (C == '\\') { 796 // Skip the escaped character. 797 C = getAndAdvanceChar(CurPtr, Result); 798 } else if (C == '\n' || C == '\r' || // Newline. 799 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 800 // If the filename is unterminated, then it must just be a lone < 801 // character. Return this as such. 802 FormTokenWithChars(Result, AfterLessPos, tok::less); 803 return; 804 } else if (C == 0) { 805 NulCharacter = CurPtr-1; 806 } 807 C = getAndAdvanceChar(CurPtr, Result); 808 } 809 810 // If a nul character existed in the string, warn about it. 811 if (NulCharacter && !isLexingRawMode()) 812 Diag(NulCharacter, diag::null_in_string); 813 814 // Update the location of token as well as BufferPtr. 815 const char *TokStart = BufferPtr; 816 FormTokenWithChars(Result, CurPtr, tok::angle_string_literal); 817 Result.setLiteralData(TokStart); 818} 819 820 821/// LexCharConstant - Lex the remainder of a character constant, after having 822/// lexed either ' or L'. 823void Lexer::LexCharConstant(Token &Result, const char *CurPtr) { 824 const char *NulCharacter = 0; // Does this character contain the \0 character? 825 826 // Handle the common case of 'x' and '\y' efficiently. 827 char C = getAndAdvanceChar(CurPtr, Result); 828 if (C == '\'') { 829 if (!isLexingRawMode() && !Features.AsmPreprocessor) 830 Diag(BufferPtr, diag::err_empty_character); 831 FormTokenWithChars(Result, CurPtr, tok::unknown); 832 return; 833 } else if (C == '\\') { 834 // Skip the escaped character. 835 // FIXME: UCN's. 836 C = getAndAdvanceChar(CurPtr, Result); 837 } 838 839 if (C && C != '\n' && C != '\r' && CurPtr[0] == '\'') { 840 ++CurPtr; 841 } else { 842 // Fall back on generic code for embedded nulls, newlines, wide chars. 843 do { 844 // Skip escaped characters. 845 if (C == '\\') { 846 // Skip the escaped character. 847 C = getAndAdvanceChar(CurPtr, Result); 848 } else if (C == '\n' || C == '\r' || // Newline. 849 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 850 if (!isLexingRawMode() && !Features.AsmPreprocessor) 851 Diag(BufferPtr, diag::err_unterminated_char); 852 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 853 return; 854 } else if (C == 0) { 855 NulCharacter = CurPtr-1; 856 } 857 C = getAndAdvanceChar(CurPtr, Result); 858 } while (C != '\''); 859 } 860 861 if (NulCharacter && !isLexingRawMode()) 862 Diag(NulCharacter, diag::null_in_char); 863 864 // Update the location of token as well as BufferPtr. 865 const char *TokStart = BufferPtr; 866 FormTokenWithChars(Result, CurPtr, tok::char_constant); 867 Result.setLiteralData(TokStart); 868} 869 870/// SkipWhitespace - Efficiently skip over a series of whitespace characters. 871/// Update BufferPtr to point to the next non-whitespace character and return. 872/// 873/// This method forms a token and returns true if KeepWhitespaceMode is enabled. 874/// 875bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr) { 876 // Whitespace - Skip it, then return the token after the whitespace. 877 unsigned char Char = *CurPtr; // Skip consequtive spaces efficiently. 878 while (1) { 879 // Skip horizontal whitespace very aggressively. 880 while (isHorizontalWhitespace(Char)) 881 Char = *++CurPtr; 882 883 // Otherwise if we have something other than whitespace, we're done. 884 if (Char != '\n' && Char != '\r') 885 break; 886 887 if (ParsingPreprocessorDirective) { 888 // End of preprocessor directive line, let LexTokenInternal handle this. 889 BufferPtr = CurPtr; 890 return false; 891 } 892 893 // ok, but handle newline. 894 // The returned token is at the start of the line. 895 Result.setFlag(Token::StartOfLine); 896 // No leading whitespace seen so far. 897 Result.clearFlag(Token::LeadingSpace); 898 Char = *++CurPtr; 899 } 900 901 // If this isn't immediately after a newline, there is leading space. 902 char PrevChar = CurPtr[-1]; 903 if (PrevChar != '\n' && PrevChar != '\r') 904 Result.setFlag(Token::LeadingSpace); 905 906 // If the client wants us to return whitespace, return it now. 907 if (isKeepWhitespaceMode()) { 908 FormTokenWithChars(Result, CurPtr, tok::unknown); 909 return true; 910 } 911 912 BufferPtr = CurPtr; 913 return false; 914} 915 916// SkipBCPLComment - We have just read the // characters from input. Skip until 917// we find the newline character thats terminate the comment. Then update 918/// BufferPtr and return. 919/// 920/// If we're in KeepCommentMode or any CommentHandler has inserted 921/// some tokens, this will store the first token and return true. 922bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) { 923 // If BCPL comments aren't explicitly enabled for this language, emit an 924 // extension warning. 925 if (!Features.BCPLComment && !isLexingRawMode()) { 926 Diag(BufferPtr, diag::ext_bcpl_comment); 927 928 // Mark them enabled so we only emit one warning for this translation 929 // unit. 930 Features.BCPLComment = true; 931 } 932 933 // Scan over the body of the comment. The common case, when scanning, is that 934 // the comment contains normal ascii characters with nothing interesting in 935 // them. As such, optimize for this case with the inner loop. 936 char C; 937 do { 938 C = *CurPtr; 939 // FIXME: Speedup BCPL comment lexing. Just scan for a \n or \r character. 940 // If we find a \n character, scan backwards, checking to see if it's an 941 // escaped newline, like we do for block comments. 942 943 // Skip over characters in the fast loop. 944 while (C != 0 && // Potentially EOF. 945 C != '\\' && // Potentially escaped newline. 946 C != '?' && // Potentially trigraph. 947 C != '\n' && C != '\r') // Newline or DOS-style newline. 948 C = *++CurPtr; 949 950 // If this is a newline, we're done. 951 if (C == '\n' || C == '\r') 952 break; // Found the newline? Break out! 953 954 // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to 955 // properly decode the character. Read it in raw mode to avoid emitting 956 // diagnostics about things like trigraphs. If we see an escaped newline, 957 // we'll handle it below. 958 const char *OldPtr = CurPtr; 959 bool OldRawMode = isLexingRawMode(); 960 LexingRawMode = true; 961 C = getAndAdvanceChar(CurPtr, Result); 962 LexingRawMode = OldRawMode; 963 964 // If the char that we finally got was a \n, then we must have had something 965 // like \<newline><newline>. We don't want to have consumed the second 966 // newline, we want CurPtr, to end up pointing to it down below. 967 if (C == '\n' || C == '\r') { 968 --CurPtr; 969 C = 'x'; // doesn't matter what this is. 970 } 971 972 // If we read multiple characters, and one of those characters was a \r or 973 // \n, then we had an escaped newline within the comment. Emit diagnostic 974 // unless the next line is also a // comment. 975 if (CurPtr != OldPtr+1 && C != '/' && CurPtr[0] != '/') { 976 for (; OldPtr != CurPtr; ++OldPtr) 977 if (OldPtr[0] == '\n' || OldPtr[0] == '\r') { 978 // Okay, we found a // comment that ends in a newline, if the next 979 // line is also a // comment, but has spaces, don't emit a diagnostic. 980 if (isspace(C)) { 981 const char *ForwardPtr = CurPtr; 982 while (isspace(*ForwardPtr)) // Skip whitespace. 983 ++ForwardPtr; 984 if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/') 985 break; 986 } 987 988 if (!isLexingRawMode()) 989 Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment); 990 break; 991 } 992 } 993 994 if (CurPtr == BufferEnd+1) { --CurPtr; break; } 995 } while (C != '\n' && C != '\r'); 996 997 // Found but did not consume the newline. Notify comment handlers about the 998 // comment unless we're in a #if 0 block. 999 if (PP && !isLexingRawMode() && 1000 PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr), 1001 getSourceLocation(CurPtr)))) { 1002 BufferPtr = CurPtr; 1003 return true; // A token has to be returned. 1004 } 1005 1006 // If we are returning comments as tokens, return this comment as a token. 1007 if (inKeepCommentMode()) 1008 return SaveBCPLComment(Result, CurPtr); 1009 1010 // If we are inside a preprocessor directive and we see the end of line, 1011 // return immediately, so that the lexer can return this as an EOM token. 1012 if (ParsingPreprocessorDirective || CurPtr == BufferEnd) { 1013 BufferPtr = CurPtr; 1014 return false; 1015 } 1016 1017 // Otherwise, eat the \n character. We don't care if this is a \n\r or 1018 // \r\n sequence. This is an efficiency hack (because we know the \n can't 1019 // contribute to another token), it isn't needed for correctness. Note that 1020 // this is ok even in KeepWhitespaceMode, because we would have returned the 1021 /// comment above in that mode. 1022 ++CurPtr; 1023 1024 // The next returned token is at the start of the line. 1025 Result.setFlag(Token::StartOfLine); 1026 // No leading whitespace seen so far. 1027 Result.clearFlag(Token::LeadingSpace); 1028 BufferPtr = CurPtr; 1029 return false; 1030} 1031 1032/// SaveBCPLComment - If in save-comment mode, package up this BCPL comment in 1033/// an appropriate way and return it. 1034bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) { 1035 // If we're not in a preprocessor directive, just return the // comment 1036 // directly. 1037 FormTokenWithChars(Result, CurPtr, tok::comment); 1038 1039 if (!ParsingPreprocessorDirective) 1040 return true; 1041 1042 // If this BCPL-style comment is in a macro definition, transmogrify it into 1043 // a C-style block comment. 1044 bool Invalid = false; 1045 std::string Spelling = PP->getSpelling(Result, &Invalid); 1046 if (Invalid) 1047 return true; 1048 1049 assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not bcpl comment?"); 1050 Spelling[1] = '*'; // Change prefix to "/*". 1051 Spelling += "*/"; // add suffix. 1052 1053 Result.setKind(tok::comment); 1054 PP->CreateString(&Spelling[0], Spelling.size(), Result, 1055 Result.getLocation()); 1056 return true; 1057} 1058 1059/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline 1060/// character (either \n or \r) is part of an escaped newline sequence. Issue a 1061/// diagnostic if so. We know that the newline is inside of a block comment. 1062static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr, 1063 Lexer *L) { 1064 assert(CurPtr[0] == '\n' || CurPtr[0] == '\r'); 1065 1066 // Back up off the newline. 1067 --CurPtr; 1068 1069 // If this is a two-character newline sequence, skip the other character. 1070 if (CurPtr[0] == '\n' || CurPtr[0] == '\r') { 1071 // \n\n or \r\r -> not escaped newline. 1072 if (CurPtr[0] == CurPtr[1]) 1073 return false; 1074 // \n\r or \r\n -> skip the newline. 1075 --CurPtr; 1076 } 1077 1078 // If we have horizontal whitespace, skip over it. We allow whitespace 1079 // between the slash and newline. 1080 bool HasSpace = false; 1081 while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) { 1082 --CurPtr; 1083 HasSpace = true; 1084 } 1085 1086 // If we have a slash, we know this is an escaped newline. 1087 if (*CurPtr == '\\') { 1088 if (CurPtr[-1] != '*') return false; 1089 } else { 1090 // It isn't a slash, is it the ?? / trigraph? 1091 if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' || 1092 CurPtr[-3] != '*') 1093 return false; 1094 1095 // This is the trigraph ending the comment. Emit a stern warning! 1096 CurPtr -= 2; 1097 1098 // If no trigraphs are enabled, warn that we ignored this trigraph and 1099 // ignore this * character. 1100 if (!L->getFeatures().Trigraphs) { 1101 if (!L->isLexingRawMode()) 1102 L->Diag(CurPtr, diag::trigraph_ignored_block_comment); 1103 return false; 1104 } 1105 if (!L->isLexingRawMode()) 1106 L->Diag(CurPtr, diag::trigraph_ends_block_comment); 1107 } 1108 1109 // Warn about having an escaped newline between the */ characters. 1110 if (!L->isLexingRawMode()) 1111 L->Diag(CurPtr, diag::escaped_newline_block_comment_end); 1112 1113 // If there was space between the backslash and newline, warn about it. 1114 if (HasSpace && !L->isLexingRawMode()) 1115 L->Diag(CurPtr, diag::backslash_newline_space); 1116 1117 return true; 1118} 1119 1120#ifdef __SSE2__ 1121#include <emmintrin.h> 1122#elif __ALTIVEC__ 1123#include <altivec.h> 1124#undef bool 1125#endif 1126 1127/// SkipBlockComment - We have just read the /* characters from input. Read 1128/// until we find the */ characters that terminate the comment. Note that we 1129/// don't bother decoding trigraphs or escaped newlines in block comments, 1130/// because they cannot cause the comment to end. The only thing that can 1131/// happen is the comment could end with an escaped newline between the */ end 1132/// of comment. 1133/// 1134/// If we're in KeepCommentMode or any CommentHandler has inserted 1135/// some tokens, this will store the first token and return true. 1136bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) { 1137 // Scan one character past where we should, looking for a '/' character. Once 1138 // we find it, check to see if it was preceeded by a *. This common 1139 // optimization helps people who like to put a lot of * characters in their 1140 // comments. 1141 1142 // The first character we get with newlines and trigraphs skipped to handle 1143 // the degenerate /*/ case below correctly if the * has an escaped newline 1144 // after it. 1145 unsigned CharSize; 1146 unsigned char C = getCharAndSize(CurPtr, CharSize); 1147 CurPtr += CharSize; 1148 if (C == 0 && CurPtr == BufferEnd+1) { 1149 if (!isLexingRawMode() && 1150 !PP->isCodeCompletionFile(FileLoc)) 1151 Diag(BufferPtr, diag::err_unterminated_block_comment); 1152 --CurPtr; 1153 1154 // KeepWhitespaceMode should return this broken comment as a token. Since 1155 // it isn't a well formed comment, just return it as an 'unknown' token. 1156 if (isKeepWhitespaceMode()) { 1157 FormTokenWithChars(Result, CurPtr, tok::unknown); 1158 return true; 1159 } 1160 1161 BufferPtr = CurPtr; 1162 return false; 1163 } 1164 1165 // Check to see if the first character after the '/*' is another /. If so, 1166 // then this slash does not end the block comment, it is part of it. 1167 if (C == '/') 1168 C = *CurPtr++; 1169 1170 while (1) { 1171 // Skip over all non-interesting characters until we find end of buffer or a 1172 // (probably ending) '/' character. 1173 if (CurPtr + 24 < BufferEnd) { 1174 // While not aligned to a 16-byte boundary. 1175 while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0) 1176 C = *CurPtr++; 1177 1178 if (C == '/') goto FoundSlash; 1179 1180#ifdef __SSE2__ 1181 __m128i Slashes = _mm_set_epi8('/', '/', '/', '/', '/', '/', '/', '/', 1182 '/', '/', '/', '/', '/', '/', '/', '/'); 1183 while (CurPtr+16 <= BufferEnd && 1184 _mm_movemask_epi8(_mm_cmpeq_epi8(*(__m128i*)CurPtr, Slashes)) == 0) 1185 CurPtr += 16; 1186#elif __ALTIVEC__ 1187 __vector unsigned char Slashes = { 1188 '/', '/', '/', '/', '/', '/', '/', '/', 1189 '/', '/', '/', '/', '/', '/', '/', '/' 1190 }; 1191 while (CurPtr+16 <= BufferEnd && 1192 !vec_any_eq(*(vector unsigned char*)CurPtr, Slashes)) 1193 CurPtr += 16; 1194#else 1195 // Scan for '/' quickly. Many block comments are very large. 1196 while (CurPtr[0] != '/' && 1197 CurPtr[1] != '/' && 1198 CurPtr[2] != '/' && 1199 CurPtr[3] != '/' && 1200 CurPtr+4 < BufferEnd) { 1201 CurPtr += 4; 1202 } 1203#endif 1204 1205 // It has to be one of the bytes scanned, increment to it and read one. 1206 C = *CurPtr++; 1207 } 1208 1209 // Loop to scan the remainder. 1210 while (C != '/' && C != '\0') 1211 C = *CurPtr++; 1212 1213 FoundSlash: 1214 if (C == '/') { 1215 if (CurPtr[-2] == '*') // We found the final */. We're done! 1216 break; 1217 1218 if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) { 1219 if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) { 1220 // We found the final */, though it had an escaped newline between the 1221 // * and /. We're done! 1222 break; 1223 } 1224 } 1225 if (CurPtr[0] == '*' && CurPtr[1] != '/') { 1226 // If this is a /* inside of the comment, emit a warning. Don't do this 1227 // if this is a /*/, which will end the comment. This misses cases with 1228 // embedded escaped newlines, but oh well. 1229 if (!isLexingRawMode()) 1230 Diag(CurPtr-1, diag::warn_nested_block_comment); 1231 } 1232 } else if (C == 0 && CurPtr == BufferEnd+1) { 1233 if (!isLexingRawMode() && !PP->isCodeCompletionFile(FileLoc)) 1234 Diag(BufferPtr, diag::err_unterminated_block_comment); 1235 // Note: the user probably forgot a */. We could continue immediately 1236 // after the /*, but this would involve lexing a lot of what really is the 1237 // comment, which surely would confuse the parser. 1238 --CurPtr; 1239 1240 // KeepWhitespaceMode should return this broken comment as a token. Since 1241 // it isn't a well formed comment, just return it as an 'unknown' token. 1242 if (isKeepWhitespaceMode()) { 1243 FormTokenWithChars(Result, CurPtr, tok::unknown); 1244 return true; 1245 } 1246 1247 BufferPtr = CurPtr; 1248 return false; 1249 } 1250 C = *CurPtr++; 1251 } 1252 1253 // Notify comment handlers about the comment unless we're in a #if 0 block. 1254 if (PP && !isLexingRawMode() && 1255 PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr), 1256 getSourceLocation(CurPtr)))) { 1257 BufferPtr = CurPtr; 1258 return true; // A token has to be returned. 1259 } 1260 1261 // If we are returning comments as tokens, return this comment as a token. 1262 if (inKeepCommentMode()) { 1263 FormTokenWithChars(Result, CurPtr, tok::comment); 1264 return true; 1265 } 1266 1267 // It is common for the tokens immediately after a /**/ comment to be 1268 // whitespace. Instead of going through the big switch, handle it 1269 // efficiently now. This is safe even in KeepWhitespaceMode because we would 1270 // have already returned above with the comment as a token. 1271 if (isHorizontalWhitespace(*CurPtr)) { 1272 Result.setFlag(Token::LeadingSpace); 1273 SkipWhitespace(Result, CurPtr+1); 1274 return false; 1275 } 1276 1277 // Otherwise, just return so that the next character will be lexed as a token. 1278 BufferPtr = CurPtr; 1279 Result.setFlag(Token::LeadingSpace); 1280 return false; 1281} 1282 1283//===----------------------------------------------------------------------===// 1284// Primary Lexing Entry Points 1285//===----------------------------------------------------------------------===// 1286 1287/// ReadToEndOfLine - Read the rest of the current preprocessor line as an 1288/// uninterpreted string. This switches the lexer out of directive mode. 1289std::string Lexer::ReadToEndOfLine() { 1290 assert(ParsingPreprocessorDirective && ParsingFilename == false && 1291 "Must be in a preprocessing directive!"); 1292 std::string Result; 1293 Token Tmp; 1294 1295 // CurPtr - Cache BufferPtr in an automatic variable. 1296 const char *CurPtr = BufferPtr; 1297 while (1) { 1298 char Char = getAndAdvanceChar(CurPtr, Tmp); 1299 switch (Char) { 1300 default: 1301 Result += Char; 1302 break; 1303 case 0: // Null. 1304 // Found end of file? 1305 if (CurPtr-1 != BufferEnd) { 1306 // Nope, normal character, continue. 1307 Result += Char; 1308 break; 1309 } 1310 // FALL THROUGH. 1311 case '\r': 1312 case '\n': 1313 // Okay, we found the end of the line. First, back up past the \0, \r, \n. 1314 assert(CurPtr[-1] == Char && "Trigraphs for newline?"); 1315 BufferPtr = CurPtr-1; 1316 1317 // Next, lex the character, which should handle the EOM transition. 1318 Lex(Tmp); 1319 assert(Tmp.is(tok::eom) && "Unexpected token!"); 1320 1321 // Finally, we're done, return the string we found. 1322 return Result; 1323 } 1324 } 1325} 1326 1327/// LexEndOfFile - CurPtr points to the end of this file. Handle this 1328/// condition, reporting diagnostics and handling other edge cases as required. 1329/// This returns true if Result contains a token, false if PP.Lex should be 1330/// called again. 1331bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) { 1332 // If we hit the end of the file while parsing a preprocessor directive, 1333 // end the preprocessor directive first. The next token returned will 1334 // then be the end of file. 1335 if (ParsingPreprocessorDirective) { 1336 // Done parsing the "line". 1337 ParsingPreprocessorDirective = false; 1338 // Update the location of token as well as BufferPtr. 1339 FormTokenWithChars(Result, CurPtr, tok::eom); 1340 1341 // Restore comment saving mode, in case it was disabled for directive. 1342 SetCommentRetentionState(PP->getCommentRetentionState()); 1343 return true; // Have a token. 1344 } 1345 1346 // If we are in raw mode, return this event as an EOF token. Let the caller 1347 // that put us in raw mode handle the event. 1348 if (isLexingRawMode()) { 1349 Result.startToken(); 1350 BufferPtr = BufferEnd; 1351 FormTokenWithChars(Result, BufferEnd, tok::eof); 1352 return true; 1353 } 1354 1355 // Otherwise, check if we are code-completing, then issue diagnostics for 1356 // unterminated #if and missing newline. 1357 1358 if (PP && PP->isCodeCompletionFile(FileLoc)) { 1359 // We're at the end of the file, but we've been asked to consider the 1360 // end of the file to be a code-completion token. Return the 1361 // code-completion token. 1362 Result.startToken(); 1363 FormTokenWithChars(Result, CurPtr, tok::code_completion); 1364 1365 // Only do the eof -> code_completion translation once. 1366 PP->SetCodeCompletionPoint(0, 0, 0); 1367 1368 // Silence any diagnostics that occur once we hit the code-completion point. 1369 PP->getDiagnostics().setSuppressAllDiagnostics(true); 1370 return true; 1371 } 1372 1373 // If we are in a #if directive, emit an error. 1374 while (!ConditionalStack.empty()) { 1375 PP->Diag(ConditionalStack.back().IfLoc, 1376 diag::err_pp_unterminated_conditional); 1377 ConditionalStack.pop_back(); 1378 } 1379 1380 // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue 1381 // a pedwarn. 1382 if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) 1383 Diag(BufferEnd, diag::ext_no_newline_eof) 1384 << FixItHint::CreateInsertion(getSourceLocation(BufferEnd), "\n"); 1385 1386 BufferPtr = CurPtr; 1387 1388 // Finally, let the preprocessor handle this. 1389 return PP->HandleEndOfFile(Result); 1390} 1391 1392/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from 1393/// the specified lexer will return a tok::l_paren token, 0 if it is something 1394/// else and 2 if there are no more tokens in the buffer controlled by the 1395/// lexer. 1396unsigned Lexer::isNextPPTokenLParen() { 1397 assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?"); 1398 1399 // Switch to 'skipping' mode. This will ensure that we can lex a token 1400 // without emitting diagnostics, disables macro expansion, and will cause EOF 1401 // to return an EOF token instead of popping the include stack. 1402 LexingRawMode = true; 1403 1404 // Save state that can be changed while lexing so that we can restore it. 1405 const char *TmpBufferPtr = BufferPtr; 1406 bool inPPDirectiveMode = ParsingPreprocessorDirective; 1407 1408 Token Tok; 1409 Tok.startToken(); 1410 LexTokenInternal(Tok); 1411 1412 // Restore state that may have changed. 1413 BufferPtr = TmpBufferPtr; 1414 ParsingPreprocessorDirective = inPPDirectiveMode; 1415 1416 // Restore the lexer back to non-skipping mode. 1417 LexingRawMode = false; 1418 1419 if (Tok.is(tok::eof)) 1420 return 2; 1421 return Tok.is(tok::l_paren); 1422} 1423 1424/// FindConflictEnd - Find the end of a version control conflict marker. 1425static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd) { 1426 llvm::StringRef RestOfBuffer(CurPtr+7, BufferEnd-CurPtr-7); 1427 size_t Pos = RestOfBuffer.find(">>>>>>>"); 1428 while (Pos != llvm::StringRef::npos) { 1429 // Must occur at start of line. 1430 if (RestOfBuffer[Pos-1] != '\r' && 1431 RestOfBuffer[Pos-1] != '\n') { 1432 RestOfBuffer = RestOfBuffer.substr(Pos+7); 1433 Pos = RestOfBuffer.find(">>>>>>>"); 1434 continue; 1435 } 1436 return RestOfBuffer.data()+Pos; 1437 } 1438 return 0; 1439} 1440 1441/// IsStartOfConflictMarker - If the specified pointer is the start of a version 1442/// control conflict marker like '<<<<<<<', recognize it as such, emit an error 1443/// and recover nicely. This returns true if it is a conflict marker and false 1444/// if not. 1445bool Lexer::IsStartOfConflictMarker(const char *CurPtr) { 1446 // Only a conflict marker if it starts at the beginning of a line. 1447 if (CurPtr != BufferStart && 1448 CurPtr[-1] != '\n' && CurPtr[-1] != '\r') 1449 return false; 1450 1451 // Check to see if we have <<<<<<<. 1452 if (BufferEnd-CurPtr < 8 || 1453 llvm::StringRef(CurPtr, 7) != "<<<<<<<") 1454 return false; 1455 1456 // If we have a situation where we don't care about conflict markers, ignore 1457 // it. 1458 if (IsInConflictMarker || isLexingRawMode()) 1459 return false; 1460 1461 // Check to see if there is a >>>>>>> somewhere in the buffer at the start of 1462 // a line to terminate this conflict marker. 1463 if (FindConflictEnd(CurPtr, BufferEnd)) { 1464 // We found a match. We are really in a conflict marker. 1465 // Diagnose this, and ignore to the end of line. 1466 Diag(CurPtr, diag::err_conflict_marker); 1467 IsInConflictMarker = true; 1468 1469 // Skip ahead to the end of line. We know this exists because the 1470 // end-of-conflict marker starts with \r or \n. 1471 while (*CurPtr != '\r' && *CurPtr != '\n') { 1472 assert(CurPtr != BufferEnd && "Didn't find end of line"); 1473 ++CurPtr; 1474 } 1475 BufferPtr = CurPtr; 1476 return true; 1477 } 1478 1479 // No end of conflict marker found. 1480 return false; 1481} 1482 1483 1484/// HandleEndOfConflictMarker - If this is a '=======' or '|||||||' or '>>>>>>>' 1485/// marker, then it is the end of a conflict marker. Handle it by ignoring up 1486/// until the end of the line. This returns true if it is a conflict marker and 1487/// false if not. 1488bool Lexer::HandleEndOfConflictMarker(const char *CurPtr) { 1489 // Only a conflict marker if it starts at the beginning of a line. 1490 if (CurPtr != BufferStart && 1491 CurPtr[-1] != '\n' && CurPtr[-1] != '\r') 1492 return false; 1493 1494 // If we have a situation where we don't care about conflict markers, ignore 1495 // it. 1496 if (!IsInConflictMarker || isLexingRawMode()) 1497 return false; 1498 1499 // Check to see if we have the marker (7 characters in a row). 1500 for (unsigned i = 1; i != 7; ++i) 1501 if (CurPtr[i] != CurPtr[0]) 1502 return false; 1503 1504 // If we do have it, search for the end of the conflict marker. This could 1505 // fail if it got skipped with a '#if 0' or something. Note that CurPtr might 1506 // be the end of conflict marker. 1507 if (const char *End = FindConflictEnd(CurPtr, BufferEnd)) { 1508 CurPtr = End; 1509 1510 // Skip ahead to the end of line. 1511 while (CurPtr != BufferEnd && *CurPtr != '\r' && *CurPtr != '\n') 1512 ++CurPtr; 1513 1514 BufferPtr = CurPtr; 1515 1516 // No longer in the conflict marker. 1517 IsInConflictMarker = false; 1518 return true; 1519 } 1520 1521 return false; 1522} 1523 1524 1525/// LexTokenInternal - This implements a simple C family lexer. It is an 1526/// extremely performance critical piece of code. This assumes that the buffer 1527/// has a null character at the end of the file. This returns a preprocessing 1528/// token, not a normal token, as such, it is an internal interface. It assumes 1529/// that the Flags of result have been cleared before calling this. 1530void Lexer::LexTokenInternal(Token &Result) { 1531LexNextToken: 1532 // New token, can't need cleaning yet. 1533 Result.clearFlag(Token::NeedsCleaning); 1534 Result.setIdentifierInfo(0); 1535 1536 // CurPtr - Cache BufferPtr in an automatic variable. 1537 const char *CurPtr = BufferPtr; 1538 1539 // Small amounts of horizontal whitespace is very common between tokens. 1540 if ((*CurPtr == ' ') || (*CurPtr == '\t')) { 1541 ++CurPtr; 1542 while ((*CurPtr == ' ') || (*CurPtr == '\t')) 1543 ++CurPtr; 1544 1545 // If we are keeping whitespace and other tokens, just return what we just 1546 // skipped. The next lexer invocation will return the token after the 1547 // whitespace. 1548 if (isKeepWhitespaceMode()) { 1549 FormTokenWithChars(Result, CurPtr, tok::unknown); 1550 return; 1551 } 1552 1553 BufferPtr = CurPtr; 1554 Result.setFlag(Token::LeadingSpace); 1555 } 1556 1557 unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below. 1558 1559 // Read a character, advancing over it. 1560 char Char = getAndAdvanceChar(CurPtr, Result); 1561 tok::TokenKind Kind; 1562 1563 switch (Char) { 1564 case 0: // Null. 1565 // Found end of file? 1566 if (CurPtr-1 == BufferEnd) { 1567 // Read the PP instance variable into an automatic variable, because 1568 // LexEndOfFile will often delete 'this'. 1569 Preprocessor *PPCache = PP; 1570 if (LexEndOfFile(Result, CurPtr-1)) // Retreat back into the file. 1571 return; // Got a token to return. 1572 assert(PPCache && "Raw buffer::LexEndOfFile should return a token"); 1573 return PPCache->Lex(Result); 1574 } 1575 1576 if (!isLexingRawMode()) 1577 Diag(CurPtr-1, diag::null_in_file); 1578 Result.setFlag(Token::LeadingSpace); 1579 if (SkipWhitespace(Result, CurPtr)) 1580 return; // KeepWhitespaceMode 1581 1582 goto LexNextToken; // GCC isn't tail call eliminating. 1583 1584 case 26: // DOS & CP/M EOF: "^Z". 1585 // If we're in Microsoft extensions mode, treat this as end of file. 1586 if (Features.Microsoft) { 1587 // Read the PP instance variable into an automatic variable, because 1588 // LexEndOfFile will often delete 'this'. 1589 Preprocessor *PPCache = PP; 1590 if (LexEndOfFile(Result, CurPtr-1)) // Retreat back into the file. 1591 return; // Got a token to return. 1592 assert(PPCache && "Raw buffer::LexEndOfFile should return a token"); 1593 return PPCache->Lex(Result); 1594 } 1595 // If Microsoft extensions are disabled, this is just random garbage. 1596 Kind = tok::unknown; 1597 break; 1598 1599 case '\n': 1600 case '\r': 1601 // If we are inside a preprocessor directive and we see the end of line, 1602 // we know we are done with the directive, so return an EOM token. 1603 if (ParsingPreprocessorDirective) { 1604 // Done parsing the "line". 1605 ParsingPreprocessorDirective = false; 1606 1607 // Restore comment saving mode, in case it was disabled for directive. 1608 SetCommentRetentionState(PP->getCommentRetentionState()); 1609 1610 // Since we consumed a newline, we are back at the start of a line. 1611 IsAtStartOfLine = true; 1612 1613 Kind = tok::eom; 1614 break; 1615 } 1616 // The returned token is at the start of the line. 1617 Result.setFlag(Token::StartOfLine); 1618 // No leading whitespace seen so far. 1619 Result.clearFlag(Token::LeadingSpace); 1620 1621 if (SkipWhitespace(Result, CurPtr)) 1622 return; // KeepWhitespaceMode 1623 goto LexNextToken; // GCC isn't tail call eliminating. 1624 case ' ': 1625 case '\t': 1626 case '\f': 1627 case '\v': 1628 SkipHorizontalWhitespace: 1629 Result.setFlag(Token::LeadingSpace); 1630 if (SkipWhitespace(Result, CurPtr)) 1631 return; // KeepWhitespaceMode 1632 1633 SkipIgnoredUnits: 1634 CurPtr = BufferPtr; 1635 1636 // If the next token is obviously a // or /* */ comment, skip it efficiently 1637 // too (without going through the big switch stmt). 1638 if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() && 1639 Features.BCPLComment) { 1640 if (SkipBCPLComment(Result, CurPtr+2)) 1641 return; // There is a token to return. 1642 goto SkipIgnoredUnits; 1643 } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) { 1644 if (SkipBlockComment(Result, CurPtr+2)) 1645 return; // There is a token to return. 1646 goto SkipIgnoredUnits; 1647 } else if (isHorizontalWhitespace(*CurPtr)) { 1648 goto SkipHorizontalWhitespace; 1649 } 1650 goto LexNextToken; // GCC isn't tail call eliminating. 1651 1652 // C99 6.4.4.1: Integer Constants. 1653 // C99 6.4.4.2: Floating Constants. 1654 case '0': case '1': case '2': case '3': case '4': 1655 case '5': case '6': case '7': case '8': case '9': 1656 // Notify MIOpt that we read a non-whitespace/non-comment token. 1657 MIOpt.ReadToken(); 1658 return LexNumericConstant(Result, CurPtr); 1659 1660 case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz"). 1661 // Notify MIOpt that we read a non-whitespace/non-comment token. 1662 MIOpt.ReadToken(); 1663 Char = getCharAndSize(CurPtr, SizeTmp); 1664 1665 // Wide string literal. 1666 if (Char == '"') 1667 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 1668 true); 1669 1670 // Wide character constant. 1671 if (Char == '\'') 1672 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result)); 1673 // FALL THROUGH, treating L like the start of an identifier. 1674 1675 // C99 6.4.2: Identifiers. 1676 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': 1677 case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N': 1678 case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': 1679 case 'V': case 'W': case 'X': case 'Y': case 'Z': 1680 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': 1681 case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': 1682 case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': 1683 case 'v': case 'w': case 'x': case 'y': case 'z': 1684 case '_': 1685 // Notify MIOpt that we read a non-whitespace/non-comment token. 1686 MIOpt.ReadToken(); 1687 return LexIdentifier(Result, CurPtr); 1688 1689 case '$': // $ in identifiers. 1690 if (Features.DollarIdents) { 1691 if (!isLexingRawMode()) 1692 Diag(CurPtr-1, diag::ext_dollar_in_identifier); 1693 // Notify MIOpt that we read a non-whitespace/non-comment token. 1694 MIOpt.ReadToken(); 1695 return LexIdentifier(Result, CurPtr); 1696 } 1697 1698 Kind = tok::unknown; 1699 break; 1700 1701 // C99 6.4.4: Character Constants. 1702 case '\'': 1703 // Notify MIOpt that we read a non-whitespace/non-comment token. 1704 MIOpt.ReadToken(); 1705 return LexCharConstant(Result, CurPtr); 1706 1707 // C99 6.4.5: String Literals. 1708 case '"': 1709 // Notify MIOpt that we read a non-whitespace/non-comment token. 1710 MIOpt.ReadToken(); 1711 return LexStringLiteral(Result, CurPtr, false); 1712 1713 // C99 6.4.6: Punctuators. 1714 case '?': 1715 Kind = tok::question; 1716 break; 1717 case '[': 1718 Kind = tok::l_square; 1719 break; 1720 case ']': 1721 Kind = tok::r_square; 1722 break; 1723 case '(': 1724 Kind = tok::l_paren; 1725 break; 1726 case ')': 1727 Kind = tok::r_paren; 1728 break; 1729 case '{': 1730 Kind = tok::l_brace; 1731 break; 1732 case '}': 1733 Kind = tok::r_brace; 1734 break; 1735 case '.': 1736 Char = getCharAndSize(CurPtr, SizeTmp); 1737 if (Char >= '0' && Char <= '9') { 1738 // Notify MIOpt that we read a non-whitespace/non-comment token. 1739 MIOpt.ReadToken(); 1740 1741 return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result)); 1742 } else if (Features.CPlusPlus && Char == '*') { 1743 Kind = tok::periodstar; 1744 CurPtr += SizeTmp; 1745 } else if (Char == '.' && 1746 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') { 1747 Kind = tok::ellipsis; 1748 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1749 SizeTmp2, Result); 1750 } else { 1751 Kind = tok::period; 1752 } 1753 break; 1754 case '&': 1755 Char = getCharAndSize(CurPtr, SizeTmp); 1756 if (Char == '&') { 1757 Kind = tok::ampamp; 1758 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1759 } else if (Char == '=') { 1760 Kind = tok::ampequal; 1761 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1762 } else { 1763 Kind = tok::amp; 1764 } 1765 break; 1766 case '*': 1767 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 1768 Kind = tok::starequal; 1769 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1770 } else { 1771 Kind = tok::star; 1772 } 1773 break; 1774 case '+': 1775 Char = getCharAndSize(CurPtr, SizeTmp); 1776 if (Char == '+') { 1777 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1778 Kind = tok::plusplus; 1779 } else if (Char == '=') { 1780 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1781 Kind = tok::plusequal; 1782 } else { 1783 Kind = tok::plus; 1784 } 1785 break; 1786 case '-': 1787 Char = getCharAndSize(CurPtr, SizeTmp); 1788 if (Char == '-') { // -- 1789 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1790 Kind = tok::minusminus; 1791 } else if (Char == '>' && Features.CPlusPlus && 1792 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { // C++ ->* 1793 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1794 SizeTmp2, Result); 1795 Kind = tok::arrowstar; 1796 } else if (Char == '>') { // -> 1797 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1798 Kind = tok::arrow; 1799 } else if (Char == '=') { // -= 1800 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1801 Kind = tok::minusequal; 1802 } else { 1803 Kind = tok::minus; 1804 } 1805 break; 1806 case '~': 1807 Kind = tok::tilde; 1808 break; 1809 case '!': 1810 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 1811 Kind = tok::exclaimequal; 1812 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1813 } else { 1814 Kind = tok::exclaim; 1815 } 1816 break; 1817 case '/': 1818 // 6.4.9: Comments 1819 Char = getCharAndSize(CurPtr, SizeTmp); 1820 if (Char == '/') { // BCPL comment. 1821 // Even if BCPL comments are disabled (e.g. in C89 mode), we generally 1822 // want to lex this as a comment. There is one problem with this though, 1823 // that in one particular corner case, this can change the behavior of the 1824 // resultant program. For example, In "foo //**/ bar", C89 would lex 1825 // this as "foo / bar" and langauges with BCPL comments would lex it as 1826 // "foo". Check to see if the character after the second slash is a '*'. 1827 // If so, we will lex that as a "/" instead of the start of a comment. 1828 if (Features.BCPLComment || 1829 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*') { 1830 if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result))) 1831 return; // There is a token to return. 1832 1833 // It is common for the tokens immediately after a // comment to be 1834 // whitespace (indentation for the next line). Instead of going through 1835 // the big switch, handle it efficiently now. 1836 goto SkipIgnoredUnits; 1837 } 1838 } 1839 1840 if (Char == '*') { // /**/ comment. 1841 if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result))) 1842 return; // There is a token to return. 1843 goto LexNextToken; // GCC isn't tail call eliminating. 1844 } 1845 1846 if (Char == '=') { 1847 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1848 Kind = tok::slashequal; 1849 } else { 1850 Kind = tok::slash; 1851 } 1852 break; 1853 case '%': 1854 Char = getCharAndSize(CurPtr, SizeTmp); 1855 if (Char == '=') { 1856 Kind = tok::percentequal; 1857 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1858 } else if (Features.Digraphs && Char == '>') { 1859 Kind = tok::r_brace; // '%>' -> '}' 1860 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1861 } else if (Features.Digraphs && Char == ':') { 1862 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1863 Char = getCharAndSize(CurPtr, SizeTmp); 1864 if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') { 1865 Kind = tok::hashhash; // '%:%:' -> '##' 1866 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1867 SizeTmp2, Result); 1868 } else if (Char == '@' && Features.Microsoft) { // %:@ -> #@ -> Charize 1869 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1870 if (!isLexingRawMode()) 1871 Diag(BufferPtr, diag::charize_microsoft_ext); 1872 Kind = tok::hashat; 1873 } else { // '%:' -> '#' 1874 // We parsed a # character. If this occurs at the start of the line, 1875 // it's actually the start of a preprocessing directive. Callback to 1876 // the preprocessor to handle it. 1877 // FIXME: -fpreprocessed mode?? 1878 if (Result.isAtStartOfLine() && !LexingRawMode && !Is_PragmaLexer) { 1879 FormTokenWithChars(Result, CurPtr, tok::hash); 1880 PP->HandleDirective(Result); 1881 1882 // As an optimization, if the preprocessor didn't switch lexers, tail 1883 // recurse. 1884 if (PP->isCurrentLexer(this)) { 1885 // Start a new token. If this is a #include or something, the PP may 1886 // want us starting at the beginning of the line again. If so, set 1887 // the StartOfLine flag and clear LeadingSpace. 1888 if (IsAtStartOfLine) { 1889 Result.setFlag(Token::StartOfLine); 1890 Result.clearFlag(Token::LeadingSpace); 1891 IsAtStartOfLine = false; 1892 } 1893 goto LexNextToken; // GCC isn't tail call eliminating. 1894 } 1895 1896 return PP->Lex(Result); 1897 } 1898 1899 Kind = tok::hash; 1900 } 1901 } else { 1902 Kind = tok::percent; 1903 } 1904 break; 1905 case '<': 1906 Char = getCharAndSize(CurPtr, SizeTmp); 1907 if (ParsingFilename) { 1908 return LexAngledStringLiteral(Result, CurPtr); 1909 } else if (Char == '<') { 1910 char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2); 1911 if (After == '=') { 1912 Kind = tok::lesslessequal; 1913 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1914 SizeTmp2, Result); 1915 } else if (After == '<' && IsStartOfConflictMarker(CurPtr-1)) { 1916 // If this is actually a '<<<<<<<' version control conflict marker, 1917 // recognize it as such and recover nicely. 1918 goto LexNextToken; 1919 } else { 1920 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1921 Kind = tok::lessless; 1922 } 1923 } else if (Char == '=') { 1924 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1925 Kind = tok::lessequal; 1926 } else if (Features.Digraphs && Char == ':') { // '<:' -> '[' 1927 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1928 Kind = tok::l_square; 1929 } else if (Features.Digraphs && Char == '%') { // '<%' -> '{' 1930 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1931 Kind = tok::l_brace; 1932 } else { 1933 Kind = tok::less; 1934 } 1935 break; 1936 case '>': 1937 Char = getCharAndSize(CurPtr, SizeTmp); 1938 if (Char == '=') { 1939 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1940 Kind = tok::greaterequal; 1941 } else if (Char == '>') { 1942 char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2); 1943 if (After == '=') { 1944 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 1945 SizeTmp2, Result); 1946 Kind = tok::greatergreaterequal; 1947 } else if (After == '>' && HandleEndOfConflictMarker(CurPtr-1)) { 1948 // If this is '>>>>>>>' and we're in a conflict marker, ignore it. 1949 goto LexNextToken; 1950 } else { 1951 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1952 Kind = tok::greatergreater; 1953 } 1954 1955 } else { 1956 Kind = tok::greater; 1957 } 1958 break; 1959 case '^': 1960 Char = getCharAndSize(CurPtr, SizeTmp); 1961 if (Char == '=') { 1962 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1963 Kind = tok::caretequal; 1964 } else { 1965 Kind = tok::caret; 1966 } 1967 break; 1968 case '|': 1969 Char = getCharAndSize(CurPtr, SizeTmp); 1970 if (Char == '=') { 1971 Kind = tok::pipeequal; 1972 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1973 } else if (Char == '|') { 1974 // If this is '|||||||' and we're in a conflict marker, ignore it. 1975 if (CurPtr[1] == '|' && HandleEndOfConflictMarker(CurPtr-1)) 1976 goto LexNextToken; 1977 Kind = tok::pipepipe; 1978 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1979 } else { 1980 Kind = tok::pipe; 1981 } 1982 break; 1983 case ':': 1984 Char = getCharAndSize(CurPtr, SizeTmp); 1985 if (Features.Digraphs && Char == '>') { 1986 Kind = tok::r_square; // ':>' -> ']' 1987 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1988 } else if (Features.CPlusPlus && Char == ':') { 1989 Kind = tok::coloncolon; 1990 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 1991 } else { 1992 Kind = tok::colon; 1993 } 1994 break; 1995 case ';': 1996 Kind = tok::semi; 1997 break; 1998 case '=': 1999 Char = getCharAndSize(CurPtr, SizeTmp); 2000 if (Char == '=') { 2001 // If this is '=======' and we're in a conflict marker, ignore it. 2002 if (CurPtr[1] == '=' && HandleEndOfConflictMarker(CurPtr-1)) 2003 goto LexNextToken; 2004 2005 Kind = tok::equalequal; 2006 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 2007 } else { 2008 Kind = tok::equal; 2009 } 2010 break; 2011 case ',': 2012 Kind = tok::comma; 2013 break; 2014 case '#': 2015 Char = getCharAndSize(CurPtr, SizeTmp); 2016 if (Char == '#') { 2017 Kind = tok::hashhash; 2018 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 2019 } else if (Char == '@' && Features.Microsoft) { // #@ -> Charize 2020 Kind = tok::hashat; 2021 if (!isLexingRawMode()) 2022 Diag(BufferPtr, diag::charize_microsoft_ext); 2023 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 2024 } else { 2025 // We parsed a # character. If this occurs at the start of the line, 2026 // it's actually the start of a preprocessing directive. Callback to 2027 // the preprocessor to handle it. 2028 // FIXME: -fpreprocessed mode?? 2029 if (Result.isAtStartOfLine() && !LexingRawMode && !Is_PragmaLexer) { 2030 FormTokenWithChars(Result, CurPtr, tok::hash); 2031 PP->HandleDirective(Result); 2032 2033 // As an optimization, if the preprocessor didn't switch lexers, tail 2034 // recurse. 2035 if (PP->isCurrentLexer(this)) { 2036 // Start a new token. If this is a #include or something, the PP may 2037 // want us starting at the beginning of the line again. If so, set 2038 // the StartOfLine flag and clear LeadingSpace. 2039 if (IsAtStartOfLine) { 2040 Result.setFlag(Token::StartOfLine); 2041 Result.clearFlag(Token::LeadingSpace); 2042 IsAtStartOfLine = false; 2043 } 2044 goto LexNextToken; // GCC isn't tail call eliminating. 2045 } 2046 return PP->Lex(Result); 2047 } 2048 2049 Kind = tok::hash; 2050 } 2051 break; 2052 2053 case '@': 2054 // Objective C support. 2055 if (CurPtr[-1] == '@' && Features.ObjC1) 2056 Kind = tok::at; 2057 else 2058 Kind = tok::unknown; 2059 break; 2060 2061 case '\\': 2062 // FIXME: UCN's. 2063 // FALL THROUGH. 2064 default: 2065 Kind = tok::unknown; 2066 break; 2067 } 2068 2069 // Notify MIOpt that we read a non-whitespace/non-comment token. 2070 MIOpt.ReadToken(); 2071 2072 // Update the location of token as well as BufferPtr. 2073 FormTokenWithChars(Result, CurPtr, Kind); 2074} 2075