Lexer.h revision 448cec4c1c3705f6f49ffdefb58a7329942a2dd8
1//===--- Lexer.h - C Language Family Lexer ----------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by Chris Lattner and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//  This file defines the Lexer interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CLANG_LEXER_H
15#define LLVM_CLANG_LEXER_H
16
17#include "clang/Lex/Token.h"
18#include "clang/Lex/MultipleIncludeOpt.h"
19#include "clang/Basic/LangOptions.h"
20#include <string>
21#include <vector>
22#include <cassert>
23
24namespace clang {
25class Diagnostic;
26class Preprocessor;
27
28/// Lexer - This provides a simple interface that turns a text buffer into a
29/// stream of tokens.  This provides no support for file reading or buffering,
30/// or buffering/seeking of tokens, only forward lexing is supported.  It relies
31/// on the specified Preprocessor object to handle preprocessor directives, etc.
32class Lexer {
33  //===--------------------------------------------------------------------===//
34  // Constant configuration values for this lexer.
35  const char *BufferStart;       // Start of the buffer.
36  const char *BufferEnd;         // End of the buffer.
37  SourceLocation FileLoc;        // Location for start of file.
38  Preprocessor &PP;              // Preprocessor object controlling lexing.
39  LangOptions Features;          // Features enabled by this language (cache).
40  bool Is_PragmaLexer;           // True if lexer for _Pragma handling.
41  bool IsMainFile;               // True if top-level file.
42
43  //===--------------------------------------------------------------------===//
44  // Context-specific lexing flags set by the preprocessor.
45  //
46
47  /// ParsingPreprocessorDirective - This is true when parsing #XXX.  This turns
48  /// '\n' into a tok::eom token.
49  bool ParsingPreprocessorDirective;
50
51  /// ParsingFilename - True after #include: this turns <xx> into a
52  /// tok::angle_string_literal token.
53  bool ParsingFilename;
54
55  /// LexingRawMode - True if in raw mode:  This flag disables interpretation of
56  /// tokens and is a far faster mode to lex in than non-raw-mode.  This flag:
57  ///  1. If EOF of the current lexer is found, the include stack isn't popped.
58  ///  2. Identifier information is not looked up for identifier tokens.  As an
59  ///     effect of this, implicit macro expansion is naturally disabled.
60  ///  3. "#" tokens at the start of a line are treated as normal tokens, not
61  ///     implicitly transformed by the lexer.
62  ///  4. All diagnostic messages are disabled, except for unterminated /*.
63  ///  5. The only callback made into the preprocessor is to report a hard error
64  ///     on an unterminated '/*' comment.
65  bool LexingRawMode;
66
67  /// KeepCommentMode - The lexer can optionally keep C & BCPL-style comments,
68  /// and return them as tokens.  This is used for -C and -CC modes.
69  bool KeepCommentMode;
70
71  //===--------------------------------------------------------------------===//
72  // Context that changes as the file is lexed.
73  // NOTE: any state that mutates when in raw mode must have save/restore code
74  // in Lexer::isNextPPTokenLParen.
75
76  // BufferPtr - Current pointer into the buffer.  This is the next character
77  // to be lexed.
78  const char *BufferPtr;
79
80  // IsAtStartOfLine - True if the next lexed token should get the "start of
81  // line" flag set on it.
82  bool IsAtStartOfLine;
83
84  /// MIOpt - This is a state machine that detects the #ifndef-wrapping a file
85  /// idiom for the multiple-include optimization.
86  MultipleIncludeOpt MIOpt;
87
88  /// ConditionalStack - Information about the set of #if/#ifdef/#ifndef blocks
89  /// we are currently in.
90  std::vector<PPConditionalInfo> ConditionalStack;
91
92  Lexer(const Lexer&);          // DO NOT IMPLEMENT
93  void operator=(const Lexer&); // DO NOT IMPLEMENT
94  friend class Preprocessor;
95public:
96
97  /// Lexer constructor - Create a new lexer object for the specified buffer
98  /// with the specified preprocessor managing the lexing process.  This lexer
99  /// assumes that the associated MemoryBuffer and Preprocessor objects will
100  /// outlive it, so it doesn't take ownership of either of them.
101  Lexer(SourceLocation FileLoc, Preprocessor &PP,
102        const char *BufStart = 0, const char *BufEnd = 0);
103
104  /// getFeatures - Return the language features currently enabled.  NOTE: this
105  /// lexer modifies features as a file is parsed!
106  const LangOptions &getFeatures() const { return Features; }
107
108  /// getFileLoc - Return the File Location for the file we are lexing out of.
109  /// The physical location encodes the location where the characters come from,
110  /// the virtual location encodes where we should *claim* the characters came
111  /// from.  Currently this is only used by _Pragma handling.
112  SourceLocation getFileLoc() const { return FileLoc; }
113
114  /// setIsMainFile - Mark this lexer as being the lexer for the top-level
115  /// source file.
116  void setIsMainFile() {
117    IsMainFile = true;
118  }
119
120  /// isMainFile - Return true if this is the top-level file.
121  ///
122  bool isMainFile() const { return IsMainFile; }
123
124  /// Lex - Return the next token in the file.  If this is the end of file, it
125  /// return the tok::eof token.  Return true if an error occurred and
126  /// compilation should terminate, false if normal.  This implicitly involves
127  /// the preprocessor.
128  void Lex(Token &Result) {
129    // Start a new token.
130    Result.startToken();
131
132    // NOTE, any changes here should also change code after calls to
133    // Preprocessor::HandleDirective
134    if (IsAtStartOfLine) {
135      Result.setFlag(Token::StartOfLine);
136      IsAtStartOfLine = false;
137    }
138
139    // Get a token.  Note that this may delete the current lexer if the end of
140    // file is reached.
141    LexTokenInternal(Result);
142  }
143
144  /// LexRawToken - Switch the lexer to raw mode, lex a token into Result and
145  /// switch it back.  Return true if the 'next character to read' pointer
146  /// points and the end of the lexer buffer, false otherwise.
147  bool LexRawToken(Token &Result) {
148    assert(!LexingRawMode && "Already in raw mode!");
149    LexingRawMode = true;
150    Lex(Result);
151    LexingRawMode = false;
152    return BufferPtr == BufferEnd;
153  }
154
155  /// ReadToEndOfLine - Read the rest of the current preprocessor line as an
156  /// uninterpreted string.  This switches the lexer out of directive mode.
157  std::string ReadToEndOfLine();
158
159
160  /// Diag - Forwarding function for diagnostics.  This translate a source
161  /// position in the current buffer into a SourceLocation object for rendering.
162  void Diag(const char *Loc, unsigned DiagID,
163            const std::string &Msg = std::string()) const;
164  void Diag(SourceLocation Loc, unsigned DiagID,
165            const std::string &Msg = std::string()) const;
166
167  /// getSourceLocation - Return a source location identifier for the specified
168  /// offset in the current file.
169  SourceLocation getSourceLocation(const char *Loc) const;
170
171  /// Stringify - Convert the specified string into a C string by escaping '\'
172  /// and " characters.  This does not add surrounding ""'s to the string.
173  /// If Charify is true, this escapes the ' character instead of ".
174  static std::string Stringify(const std::string &Str, bool Charify = false);
175
176  //===--------------------------------------------------------------------===//
177  // Internal implementation interfaces.
178private:
179
180  /// LexTokenInternal - Internal interface to lex a preprocessing token. Called
181  /// by Lex.
182  ///
183  void LexTokenInternal(Token &Result);
184
185  /// FormTokenWithChars - When we lex a token, we have identified a span
186  /// starting at BufferPtr, going to TokEnd that forms the token.  This method
187  /// takes that range and assigns it to the token as its location and size.  In
188  /// addition, since tokens cannot overlap, this also updates BufferPtr to be
189  /// TokEnd.
190  void FormTokenWithChars(Token &Result, const char *TokEnd) {
191    Result.setLocation(getSourceLocation(BufferPtr));
192    Result.setLength(TokEnd-BufferPtr);
193    BufferPtr = TokEnd;
194  }
195
196  /// isNextPPTokenLParen - Return 1 if the next unexpanded token will return a
197  /// tok::l_paren token, 0 if it is something else and 2 if there are no more
198  /// tokens in the buffer controlled by this lexer.
199  unsigned isNextPPTokenLParen();
200
201  //===--------------------------------------------------------------------===//
202  // Lexer character reading interfaces.
203public:
204
205  // This lexer is built on two interfaces for reading characters, both of which
206  // automatically provide phase 1/2 translation.  getAndAdvanceChar is used
207  // when we know that we will be reading a character from the input buffer and
208  // that this character will be part of the result token. This occurs in (f.e.)
209  // string processing, because we know we need to read until we find the
210  // closing '"' character.
211  //
212  // The second interface is the combination of PeekCharAndSize with
213  // ConsumeChar.  PeekCharAndSize reads a phase 1/2 translated character,
214  // returning it and its size.  If the lexer decides that this character is
215  // part of the current token, it calls ConsumeChar on it.  This two stage
216  // approach allows us to emit diagnostics for characters (e.g. warnings about
217  // trigraphs), knowing that they only are emitted if the character is
218  // consumed.
219
220  /// isObviouslySimpleCharacter - Return true if the specified character is
221  /// obviously the same in translation phase 1 and translation phase 3.  This
222  /// can return false for characters that end up being the same, but it will
223  /// never return true for something that needs to be mapped.
224  static bool isObviouslySimpleCharacter(char C) {
225    return C != '?' && C != '\\';
226  }
227
228  /// getAndAdvanceChar - Read a single 'character' from the specified buffer,
229  /// advance over it, and return it.  This is tricky in several cases.  Here we
230  /// just handle the trivial case and fall-back to the non-inlined
231  /// getCharAndSizeSlow method to handle the hard case.
232  inline char getAndAdvanceChar(const char *&Ptr, Token &Tok) {
233    // If this is not a trigraph and not a UCN or escaped newline, return
234    // quickly.
235    if (isObviouslySimpleCharacter(Ptr[0])) return *Ptr++;
236
237    unsigned Size = 0;
238    char C = getCharAndSizeSlow(Ptr, Size, &Tok);
239    Ptr += Size;
240    return C;
241  }
242
243private:
244  /// ConsumeChar - When a character (identified by PeekCharAndSize) is consumed
245  /// and added to a given token, check to see if there are diagnostics that
246  /// need to be emitted or flags that need to be set on the token.  If so, do
247  /// it.
248  const char *ConsumeChar(const char *Ptr, unsigned Size, Token &Tok) {
249    // Normal case, we consumed exactly one token.  Just return it.
250    if (Size == 1)
251      return Ptr+Size;
252
253    // Otherwise, re-lex the character with a current token, allowing
254    // diagnostics to be emitted and flags to be set.
255    Size = 0;
256    getCharAndSizeSlow(Ptr, Size, &Tok);
257    return Ptr+Size;
258  }
259
260  /// getCharAndSize - Peek a single 'character' from the specified buffer,
261  /// get its size, and return it.  This is tricky in several cases.  Here we
262  /// just handle the trivial case and fall-back to the non-inlined
263  /// getCharAndSizeSlow method to handle the hard case.
264  inline char getCharAndSize(const char *Ptr, unsigned &Size) {
265    // If this is not a trigraph and not a UCN or escaped newline, return
266    // quickly.
267    if (isObviouslySimpleCharacter(Ptr[0])) {
268      Size = 1;
269      return *Ptr;
270    }
271
272    Size = 0;
273    return getCharAndSizeSlow(Ptr, Size);
274  }
275
276  /// getCharAndSizeSlow - Handle the slow/uncommon case of the getCharAndSize
277  /// method.
278  char getCharAndSizeSlow(const char *Ptr, unsigned &Size, Token *Tok = 0);
279
280  /// getCharAndSizeNoWarn - Like the getCharAndSize method, but does not ever
281  /// emit a warning.
282  static inline char getCharAndSizeNoWarn(const char *Ptr, unsigned &Size,
283                                          const LangOptions &Features) {
284    // If this is not a trigraph and not a UCN or escaped newline, return
285    // quickly.
286    if (isObviouslySimpleCharacter(Ptr[0])) {
287      Size = 1;
288      return *Ptr;
289    }
290
291    Size = 0;
292    return getCharAndSizeSlowNoWarn(Ptr, Size, Features);
293  }
294
295  /// getCharAndSizeSlowNoWarn - Same as getCharAndSizeSlow, but never emits a
296  /// diagnostic.
297  static char getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
298                                       const LangOptions &Features);
299
300  //===--------------------------------------------------------------------===//
301  // #if directive handling.
302
303  /// pushConditionalLevel - When we enter a #if directive, this keeps track of
304  /// what we are currently in for diagnostic emission (e.g. #if with missing
305  /// #endif).
306  void pushConditionalLevel(SourceLocation DirectiveStart, bool WasSkipping,
307                            bool FoundNonSkip, bool FoundElse) {
308    PPConditionalInfo CI;
309    CI.IfLoc = DirectiveStart;
310    CI.WasSkipping = WasSkipping;
311    CI.FoundNonSkip = FoundNonSkip;
312    CI.FoundElse = FoundElse;
313    ConditionalStack.push_back(CI);
314  }
315  void pushConditionalLevel(const PPConditionalInfo &CI) {
316    ConditionalStack.push_back(CI);
317  }
318
319  /// popConditionalLevel - Remove an entry off the top of the conditional
320  /// stack, returning information about it.  If the conditional stack is empty,
321  /// this returns true and does not fill in the arguments.
322  bool popConditionalLevel(PPConditionalInfo &CI) {
323    if (ConditionalStack.empty()) return true;
324    CI = ConditionalStack.back();
325    ConditionalStack.pop_back();
326    return false;
327  }
328
329  /// peekConditionalLevel - Return the top of the conditional stack.  This
330  /// requires that there be a conditional active.
331  PPConditionalInfo &peekConditionalLevel() {
332    assert(!ConditionalStack.empty() && "No conditionals active!");
333    return ConditionalStack.back();
334  }
335
336  unsigned getConditionalStackDepth() const { return ConditionalStack.size(); }
337
338  //===--------------------------------------------------------------------===//
339  // Other lexer functions.
340
341  // Helper functions to lex the remainder of a token of the specific type.
342  void LexIdentifier         (Token &Result, const char *CurPtr);
343  void LexNumericConstant    (Token &Result, const char *CurPtr);
344  void LexStringLiteral      (Token &Result, const char *CurPtr,bool Wide);
345  void LexAngledStringLiteral(Token &Result, const char *CurPtr);
346  void LexCharConstant       (Token &Result, const char *CurPtr);
347  bool LexEndOfFile          (Token &Result, const char *CurPtr);
348
349  void SkipWhitespace        (Token &Result, const char *CurPtr);
350  bool SkipBCPLComment       (Token &Result, const char *CurPtr);
351  bool SkipBlockComment      (Token &Result, const char *CurPtr);
352  bool SaveBCPLComment       (Token &Result, const char *CurPtr);
353
354  /// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
355  /// (potentially) macro expand the filename.  If the sequence parsed is not
356  /// lexically legal, emit a diagnostic and return a result EOM token.
357  void LexIncludeFilename(Token &Result);
358};
359
360
361}  // end namespace clang
362
363#endif
364