Lexer.h revision 9a6119437672f42be5f50c3fe89fe843b1bfa5b5
1//===--- Lexer.h - C Language Family Lexer ----------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by Chris Lattner and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//  This file defines the Lexer interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CLANG_LEXER_H
15#define LLVM_CLANG_LEXER_H
16
17#include "clang/Lex/Token.h"
18#include "clang/Lex/MultipleIncludeOpt.h"
19#include "clang/Basic/LangOptions.h"
20#include "llvm/ADT/SmallVector.h"
21#include <string>
22#include <vector>
23#include <cassert>
24
25namespace clang {
26class Diagnostic;
27class SourceManager;
28class Preprocessor;
29
30/// Lexer - This provides a simple interface that turns a text buffer into a
31/// stream of tokens.  This provides no support for file reading or buffering,
32/// or buffering/seeking of tokens, only forward lexing is supported.  It relies
33/// on the specified Preprocessor object to handle preprocessor directives, etc.
34class Lexer {
35  //===--------------------------------------------------------------------===//
36  // Constant configuration values for this lexer.
37  const char *BufferStart;       // Start of the buffer.
38  const char *BufferEnd;         // End of the buffer.
39  SourceLocation FileLoc;        // Location for start of file.
40  Preprocessor *PP;              // Preprocessor object controlling lexing.
41  LangOptions Features;          // Features enabled by this language (cache).
42  bool Is_PragmaLexer;           // True if lexer for _Pragma handling.
43
44  //===--------------------------------------------------------------------===//
45  // Context-specific lexing flags set by the preprocessor.
46  //
47
48  /// ParsingPreprocessorDirective - This is true when parsing #XXX.  This turns
49  /// '\n' into a tok::eom token.
50  bool ParsingPreprocessorDirective;
51
52  /// ParsingFilename - True after #include: this turns <xx> into a
53  /// tok::angle_string_literal token.
54  bool ParsingFilename;
55
56  /// LexingRawMode - True if in raw mode:  This flag disables interpretation of
57  /// tokens and is a far faster mode to lex in than non-raw-mode.  This flag:
58  ///  1. If EOF of the current lexer is found, the include stack isn't popped.
59  ///  2. Identifier information is not looked up for identifier tokens.  As an
60  ///     effect of this, implicit macro expansion is naturally disabled.
61  ///  3. "#" tokens at the start of a line are treated as normal tokens, not
62  ///     implicitly transformed by the lexer.
63  ///  4. All diagnostic messages are disabled, except for unterminated /*.
64  ///  5. The only callback made into the preprocessor is to report a hard error
65  ///     on an unterminated '/*' comment.
66  ///
67  /// Note that in raw mode that the PP pointer may be null.
68  bool LexingRawMode;
69
70  /// KeepCommentMode - The lexer can optionally keep C & BCPL-style comments,
71  /// and return them as tokens.  This is used for -C and -CC modes.
72  bool KeepCommentMode;
73
74  //===--------------------------------------------------------------------===//
75  // Context that changes as the file is lexed.
76  // NOTE: any state that mutates when in raw mode must have save/restore code
77  // in Lexer::isNextPPTokenLParen.
78
79  // BufferPtr - Current pointer into the buffer.  This is the next character
80  // to be lexed.
81  const char *BufferPtr;
82
83  // IsAtStartOfLine - True if the next lexed token should get the "start of
84  // line" flag set on it.
85  bool IsAtStartOfLine;
86
87  /// MIOpt - This is a state machine that detects the #ifndef-wrapping a file
88  /// idiom for the multiple-include optimization.
89  MultipleIncludeOpt MIOpt;
90
91  /// ConditionalStack - Information about the set of #if/#ifdef/#ifndef blocks
92  /// we are currently in.
93  std::vector<PPConditionalInfo> ConditionalStack;
94
95  Lexer(const Lexer&);          // DO NOT IMPLEMENT
96  void operator=(const Lexer&); // DO NOT IMPLEMENT
97  friend class Preprocessor;
98public:
99
100  /// Lexer constructor - Create a new lexer object for the specified buffer
101  /// with the specified preprocessor managing the lexing process.  This lexer
102  /// assumes that the associated file buffer and Preprocessor objects will
103  /// outlive it, so it doesn't take ownership of either of them.
104  Lexer(SourceLocation FileLoc, Preprocessor &PP,
105        const char *BufStart = 0, const char *BufEnd = 0);
106
107  /// Lexer constructor - Create a new raw lexer object.  This object is only
108  /// suitable for calls to 'LexRawToken'.  This lexer assumes that the
109  /// associated file buffer will outlive it, so it doesn't take ownership of
110  /// either of them.
111  Lexer(SourceLocation FileLoc, const LangOptions &Features,
112        const char *BufStart, const char *BufEnd);
113
114  /// getFeatures - Return the language features currently enabled.  NOTE: this
115  /// lexer modifies features as a file is parsed!
116  const LangOptions &getFeatures() const { return Features; }
117
118  /// getFileLoc - Return the File Location for the file we are lexing out of.
119  /// The physical location encodes the location where the characters come from,
120  /// the virtual location encodes where we should *claim* the characters came
121  /// from.  Currently this is only used by _Pragma handling.
122  SourceLocation getFileLoc() const { return FileLoc; }
123
124  /// Lex - Return the next token in the file.  If this is the end of file, it
125  /// return the tok::eof token.  Return true if an error occurred and
126  /// compilation should terminate, false if normal.  This implicitly involves
127  /// the preprocessor.
128  void Lex(Token &Result) {
129    // Start a new token.
130    Result.startToken();
131
132    // NOTE, any changes here should also change code after calls to
133    // Preprocessor::HandleDirective
134    if (IsAtStartOfLine) {
135      Result.setFlag(Token::StartOfLine);
136      IsAtStartOfLine = false;
137    }
138
139    // Get a token.  Note that this may delete the current lexer if the end of
140    // file is reached.
141    LexTokenInternal(Result);
142  }
143
144  /// LexRawToken - Switch the lexer to raw mode, lex a token into Result and
145  /// switch it back.  Return true if the 'next character to read' pointer
146  /// points and the end of the lexer buffer, false otherwise.
147  bool LexRawToken(Token &Result) {
148    assert(!LexingRawMode && "Already in raw mode!");
149    LexingRawMode = true;
150    Lex(Result);
151    LexingRawMode = PP == 0;
152    // Note that lexing to the end of the buffer doesn't implicitly delete the
153    // lexer when in raw mode.
154    return BufferPtr == BufferEnd;
155  }
156
157  /// ReadToEndOfLine - Read the rest of the current preprocessor line as an
158  /// uninterpreted string.  This switches the lexer out of directive mode.
159  std::string ReadToEndOfLine();
160
161
162  /// Diag - Forwarding function for diagnostics.  This translate a source
163  /// position in the current buffer into a SourceLocation object for rendering.
164  void Diag(const char *Loc, unsigned DiagID,
165            const std::string &Msg = std::string()) const;
166  void Diag(SourceLocation Loc, unsigned DiagID,
167            const std::string &Msg = std::string()) const;
168
169  /// getSourceLocation - Return a source location identifier for the specified
170  /// offset in the current file.
171  SourceLocation getSourceLocation(const char *Loc) const;
172
173  /// Stringify - Convert the specified string into a C string by escaping '\'
174  /// and " characters.  This does not add surrounding ""'s to the string.
175  /// If Charify is true, this escapes the ' character instead of ".
176  static std::string Stringify(const std::string &Str, bool Charify = false);
177
178  /// Stringify - Convert the specified string into a C string by escaping '\'
179  /// and " characters.  This does not add surrounding ""'s to the string.
180  static void Stringify(llvm::SmallVectorImpl<char> &Str);
181
182  /// MeasureTokenLength - Relex the token at the specified location and return
183  /// its length in bytes in the input file.  If the token needs cleaning (e.g.
184  /// includes a trigraph or an escaped newline) then this count includes bytes
185  /// that are part of that.
186  static unsigned MeasureTokenLength(SourceLocation Loc,
187                                     const SourceManager &SM);
188
189  //===--------------------------------------------------------------------===//
190  // Internal implementation interfaces.
191private:
192
193  /// LexTokenInternal - Internal interface to lex a preprocessing token. Called
194  /// by Lex.
195  ///
196  void LexTokenInternal(Token &Result);
197
198  /// FormTokenWithChars - When we lex a token, we have identified a span
199  /// starting at BufferPtr, going to TokEnd that forms the token.  This method
200  /// takes that range and assigns it to the token as its location and size.  In
201  /// addition, since tokens cannot overlap, this also updates BufferPtr to be
202  /// TokEnd.
203  void FormTokenWithChars(Token &Result, const char *TokEnd) {
204    Result.setLocation(getSourceLocation(BufferPtr));
205    Result.setLength(TokEnd-BufferPtr);
206    BufferPtr = TokEnd;
207  }
208
209  /// isNextPPTokenLParen - Return 1 if the next unexpanded token will return a
210  /// tok::l_paren token, 0 if it is something else and 2 if there are no more
211  /// tokens in the buffer controlled by this lexer.
212  unsigned isNextPPTokenLParen();
213
214  //===--------------------------------------------------------------------===//
215  // Lexer character reading interfaces.
216public:
217
218  // This lexer is built on two interfaces for reading characters, both of which
219  // automatically provide phase 1/2 translation.  getAndAdvanceChar is used
220  // when we know that we will be reading a character from the input buffer and
221  // that this character will be part of the result token. This occurs in (f.e.)
222  // string processing, because we know we need to read until we find the
223  // closing '"' character.
224  //
225  // The second interface is the combination of PeekCharAndSize with
226  // ConsumeChar.  PeekCharAndSize reads a phase 1/2 translated character,
227  // returning it and its size.  If the lexer decides that this character is
228  // part of the current token, it calls ConsumeChar on it.  This two stage
229  // approach allows us to emit diagnostics for characters (e.g. warnings about
230  // trigraphs), knowing that they only are emitted if the character is
231  // consumed.
232
233  /// isObviouslySimpleCharacter - Return true if the specified character is
234  /// obviously the same in translation phase 1 and translation phase 3.  This
235  /// can return false for characters that end up being the same, but it will
236  /// never return true for something that needs to be mapped.
237  static bool isObviouslySimpleCharacter(char C) {
238    return C != '?' && C != '\\';
239  }
240
241  /// getAndAdvanceChar - Read a single 'character' from the specified buffer,
242  /// advance over it, and return it.  This is tricky in several cases.  Here we
243  /// just handle the trivial case and fall-back to the non-inlined
244  /// getCharAndSizeSlow method to handle the hard case.
245  inline char getAndAdvanceChar(const char *&Ptr, Token &Tok) {
246    // If this is not a trigraph and not a UCN or escaped newline, return
247    // quickly.
248    if (isObviouslySimpleCharacter(Ptr[0])) return *Ptr++;
249
250    unsigned Size = 0;
251    char C = getCharAndSizeSlow(Ptr, Size, &Tok);
252    Ptr += Size;
253    return C;
254  }
255
256private:
257  /// ConsumeChar - When a character (identified by PeekCharAndSize) is consumed
258  /// and added to a given token, check to see if there are diagnostics that
259  /// need to be emitted or flags that need to be set on the token.  If so, do
260  /// it.
261  const char *ConsumeChar(const char *Ptr, unsigned Size, Token &Tok) {
262    // Normal case, we consumed exactly one token.  Just return it.
263    if (Size == 1)
264      return Ptr+Size;
265
266    // Otherwise, re-lex the character with a current token, allowing
267    // diagnostics to be emitted and flags to be set.
268    Size = 0;
269    getCharAndSizeSlow(Ptr, Size, &Tok);
270    return Ptr+Size;
271  }
272
273  /// getCharAndSize - Peek a single 'character' from the specified buffer,
274  /// get its size, and return it.  This is tricky in several cases.  Here we
275  /// just handle the trivial case and fall-back to the non-inlined
276  /// getCharAndSizeSlow method to handle the hard case.
277  inline char getCharAndSize(const char *Ptr, unsigned &Size) {
278    // If this is not a trigraph and not a UCN or escaped newline, return
279    // quickly.
280    if (isObviouslySimpleCharacter(Ptr[0])) {
281      Size = 1;
282      return *Ptr;
283    }
284
285    Size = 0;
286    return getCharAndSizeSlow(Ptr, Size);
287  }
288
289  /// getCharAndSizeSlow - Handle the slow/uncommon case of the getCharAndSize
290  /// method.
291  char getCharAndSizeSlow(const char *Ptr, unsigned &Size, Token *Tok = 0);
292
293  /// getCharAndSizeNoWarn - Like the getCharAndSize method, but does not ever
294  /// emit a warning.
295  static inline char getCharAndSizeNoWarn(const char *Ptr, unsigned &Size,
296                                          const LangOptions &Features) {
297    // If this is not a trigraph and not a UCN or escaped newline, return
298    // quickly.
299    if (isObviouslySimpleCharacter(Ptr[0])) {
300      Size = 1;
301      return *Ptr;
302    }
303
304    Size = 0;
305    return getCharAndSizeSlowNoWarn(Ptr, Size, Features);
306  }
307
308  /// getCharAndSizeSlowNoWarn - Same as getCharAndSizeSlow, but never emits a
309  /// diagnostic.
310  static char getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
311                                       const LangOptions &Features);
312
313  //===--------------------------------------------------------------------===//
314  // #if directive handling.
315
316  /// pushConditionalLevel - When we enter a #if directive, this keeps track of
317  /// what we are currently in for diagnostic emission (e.g. #if with missing
318  /// #endif).
319  void pushConditionalLevel(SourceLocation DirectiveStart, bool WasSkipping,
320                            bool FoundNonSkip, bool FoundElse) {
321    PPConditionalInfo CI;
322    CI.IfLoc = DirectiveStart;
323    CI.WasSkipping = WasSkipping;
324    CI.FoundNonSkip = FoundNonSkip;
325    CI.FoundElse = FoundElse;
326    ConditionalStack.push_back(CI);
327  }
328  void pushConditionalLevel(const PPConditionalInfo &CI) {
329    ConditionalStack.push_back(CI);
330  }
331
332  /// popConditionalLevel - Remove an entry off the top of the conditional
333  /// stack, returning information about it.  If the conditional stack is empty,
334  /// this returns true and does not fill in the arguments.
335  bool popConditionalLevel(PPConditionalInfo &CI) {
336    if (ConditionalStack.empty()) return true;
337    CI = ConditionalStack.back();
338    ConditionalStack.pop_back();
339    return false;
340  }
341
342  /// peekConditionalLevel - Return the top of the conditional stack.  This
343  /// requires that there be a conditional active.
344  PPConditionalInfo &peekConditionalLevel() {
345    assert(!ConditionalStack.empty() && "No conditionals active!");
346    return ConditionalStack.back();
347  }
348
349  unsigned getConditionalStackDepth() const { return ConditionalStack.size(); }
350
351  //===--------------------------------------------------------------------===//
352  // Other lexer functions.
353
354  // Helper functions to lex the remainder of a token of the specific type.
355  void LexIdentifier         (Token &Result, const char *CurPtr);
356  void LexNumericConstant    (Token &Result, const char *CurPtr);
357  void LexStringLiteral      (Token &Result, const char *CurPtr,bool Wide);
358  void LexAngledStringLiteral(Token &Result, const char *CurPtr);
359  void LexCharConstant       (Token &Result, const char *CurPtr);
360  bool LexEndOfFile          (Token &Result, const char *CurPtr);
361
362  void SkipWhitespace        (Token &Result, const char *CurPtr);
363  bool SkipBCPLComment       (Token &Result, const char *CurPtr);
364  bool SkipBlockComment      (Token &Result, const char *CurPtr);
365  bool SaveBCPLComment       (Token &Result, const char *CurPtr);
366
367  /// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
368  /// (potentially) macro expand the filename.  If the sequence parsed is not
369  /// lexically legal, emit a diagnostic and return a result EOM token.
370  void LexIncludeFilename(Token &Result);
371};
372
373
374}  // end namespace clang
375
376#endif
377