Lexer.h revision 168ae2d44a443da75ea85db5f3b5081eb0bce113
1//===--- Lexer.h - C Language Family Lexer ----------------------*- C++ -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by Chris Lattner and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//  This file defines the Lexer interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CLANG_LEXER_H
15#define LLVM_CLANG_LEXER_H
16
17#include "clang/Lex/Token.h"
18#include "clang/Lex/MultipleIncludeOpt.h"
19#include "clang/Basic/LangOptions.h"
20#include "llvm/ADT/SmallVector.h"
21#include <string>
22#include <vector>
23#include <cassert>
24
25namespace clang {
26class Diagnostic;
27class Preprocessor;
28
29/// Lexer - This provides a simple interface that turns a text buffer into a
30/// stream of tokens.  This provides no support for file reading or buffering,
31/// or buffering/seeking of tokens, only forward lexing is supported.  It relies
32/// on the specified Preprocessor object to handle preprocessor directives, etc.
33class Lexer {
34  //===--------------------------------------------------------------------===//
35  // Constant configuration values for this lexer.
36  const char *BufferStart;       // Start of the buffer.
37  const char *BufferEnd;         // End of the buffer.
38  SourceLocation FileLoc;        // Location for start of file.
39  Preprocessor *PP;              // Preprocessor object controlling lexing.
40  LangOptions Features;          // Features enabled by this language (cache).
41  bool Is_PragmaLexer;           // True if lexer for _Pragma handling.
42
43  //===--------------------------------------------------------------------===//
44  // Context-specific lexing flags set by the preprocessor.
45  //
46
47  /// ParsingPreprocessorDirective - This is true when parsing #XXX.  This turns
48  /// '\n' into a tok::eom token.
49  bool ParsingPreprocessorDirective;
50
51  /// ParsingFilename - True after #include: this turns <xx> into a
52  /// tok::angle_string_literal token.
53  bool ParsingFilename;
54
55  /// LexingRawMode - True if in raw mode:  This flag disables interpretation of
56  /// tokens and is a far faster mode to lex in than non-raw-mode.  This flag:
57  ///  1. If EOF of the current lexer is found, the include stack isn't popped.
58  ///  2. Identifier information is not looked up for identifier tokens.  As an
59  ///     effect of this, implicit macro expansion is naturally disabled.
60  ///  3. "#" tokens at the start of a line are treated as normal tokens, not
61  ///     implicitly transformed by the lexer.
62  ///  4. All diagnostic messages are disabled, except for unterminated /*.
63  ///  5. The only callback made into the preprocessor is to report a hard error
64  ///     on an unterminated '/*' comment.
65  ///
66  /// Note that in raw mode that the PP pointer may be null.
67  bool LexingRawMode;
68
69  /// KeepCommentMode - The lexer can optionally keep C & BCPL-style comments,
70  /// and return them as tokens.  This is used for -C and -CC modes.
71  bool KeepCommentMode;
72
73  //===--------------------------------------------------------------------===//
74  // Context that changes as the file is lexed.
75  // NOTE: any state that mutates when in raw mode must have save/restore code
76  // in Lexer::isNextPPTokenLParen.
77
78  // BufferPtr - Current pointer into the buffer.  This is the next character
79  // to be lexed.
80  const char *BufferPtr;
81
82  // IsAtStartOfLine - True if the next lexed token should get the "start of
83  // line" flag set on it.
84  bool IsAtStartOfLine;
85
86  /// MIOpt - This is a state machine that detects the #ifndef-wrapping a file
87  /// idiom for the multiple-include optimization.
88  MultipleIncludeOpt MIOpt;
89
90  /// ConditionalStack - Information about the set of #if/#ifdef/#ifndef blocks
91  /// we are currently in.
92  std::vector<PPConditionalInfo> ConditionalStack;
93
94  Lexer(const Lexer&);          // DO NOT IMPLEMENT
95  void operator=(const Lexer&); // DO NOT IMPLEMENT
96  friend class Preprocessor;
97public:
98
99  /// Lexer constructor - Create a new lexer object for the specified buffer
100  /// with the specified preprocessor managing the lexing process.  This lexer
101  /// assumes that the associated file buffer and Preprocessor objects will
102  /// outlive it, so it doesn't take ownership of either of them.
103  Lexer(SourceLocation FileLoc, Preprocessor &PP,
104        const char *BufStart = 0, const char *BufEnd = 0);
105
106  /// Lexer constructor - Create a new raw lexer object.  This object is only
107  /// suitable for calls to 'LexRawToken'.  This lexer assumes that the
108  /// associated file buffer will outlive it, so it doesn't take ownership of
109  /// either of them.
110  Lexer(SourceLocation FileLoc, const LangOptions &Features,
111        const char *BufStart, const char *BufEnd);
112
113  /// getFeatures - Return the language features currently enabled.  NOTE: this
114  /// lexer modifies features as a file is parsed!
115  const LangOptions &getFeatures() const { return Features; }
116
117  /// getFileLoc - Return the File Location for the file we are lexing out of.
118  /// The physical location encodes the location where the characters come from,
119  /// the virtual location encodes where we should *claim* the characters came
120  /// from.  Currently this is only used by _Pragma handling.
121  SourceLocation getFileLoc() const { return FileLoc; }
122
123  /// Lex - Return the next token in the file.  If this is the end of file, it
124  /// return the tok::eof token.  Return true if an error occurred and
125  /// compilation should terminate, false if normal.  This implicitly involves
126  /// the preprocessor.
127  void Lex(Token &Result) {
128    // Start a new token.
129    Result.startToken();
130
131    // NOTE, any changes here should also change code after calls to
132    // Preprocessor::HandleDirective
133    if (IsAtStartOfLine) {
134      Result.setFlag(Token::StartOfLine);
135      IsAtStartOfLine = false;
136    }
137
138    // Get a token.  Note that this may delete the current lexer if the end of
139    // file is reached.
140    LexTokenInternal(Result);
141  }
142
143  /// LexRawToken - Switch the lexer to raw mode, lex a token into Result and
144  /// switch it back.  Return true if the 'next character to read' pointer
145  /// points and the end of the lexer buffer, false otherwise.
146  bool LexRawToken(Token &Result) {
147    assert(!LexingRawMode && "Already in raw mode!");
148    LexingRawMode = true;
149    Lex(Result);
150    LexingRawMode = PP == 0;
151    // Note that lexing to the end of the buffer doesn't implicitly delete the
152    // lexer when in raw mode.
153    return BufferPtr == BufferEnd;
154  }
155
156  /// ReadToEndOfLine - Read the rest of the current preprocessor line as an
157  /// uninterpreted string.  This switches the lexer out of directive mode.
158  std::string ReadToEndOfLine();
159
160
161  /// Diag - Forwarding function for diagnostics.  This translate a source
162  /// position in the current buffer into a SourceLocation object for rendering.
163  void Diag(const char *Loc, unsigned DiagID,
164            const std::string &Msg = std::string()) const;
165  void Diag(SourceLocation Loc, unsigned DiagID,
166            const std::string &Msg = std::string()) const;
167
168  /// getSourceLocation - Return a source location identifier for the specified
169  /// offset in the current file.
170  SourceLocation getSourceLocation(const char *Loc) const;
171
172  /// Stringify - Convert the specified string into a C string by escaping '\'
173  /// and " characters.  This does not add surrounding ""'s to the string.
174  /// If Charify is true, this escapes the ' character instead of ".
175  static std::string Stringify(const std::string &Str, bool Charify = false);
176
177  /// Stringify - Convert the specified string into a C string by escaping '\'
178  /// and " characters.  This does not add surrounding ""'s to the string.
179  static void Stringify(llvm::SmallVectorImpl<char> &Str);
180
181  //===--------------------------------------------------------------------===//
182  // Internal implementation interfaces.
183private:
184
185  /// LexTokenInternal - Internal interface to lex a preprocessing token. Called
186  /// by Lex.
187  ///
188  void LexTokenInternal(Token &Result);
189
190  /// FormTokenWithChars - When we lex a token, we have identified a span
191  /// starting at BufferPtr, going to TokEnd that forms the token.  This method
192  /// takes that range and assigns it to the token as its location and size.  In
193  /// addition, since tokens cannot overlap, this also updates BufferPtr to be
194  /// TokEnd.
195  void FormTokenWithChars(Token &Result, const char *TokEnd) {
196    Result.setLocation(getSourceLocation(BufferPtr));
197    Result.setLength(TokEnd-BufferPtr);
198    BufferPtr = TokEnd;
199  }
200
201  /// isNextPPTokenLParen - Return 1 if the next unexpanded token will return a
202  /// tok::l_paren token, 0 if it is something else and 2 if there are no more
203  /// tokens in the buffer controlled by this lexer.
204  unsigned isNextPPTokenLParen();
205
206  //===--------------------------------------------------------------------===//
207  // Lexer character reading interfaces.
208public:
209
210  // This lexer is built on two interfaces for reading characters, both of which
211  // automatically provide phase 1/2 translation.  getAndAdvanceChar is used
212  // when we know that we will be reading a character from the input buffer and
213  // that this character will be part of the result token. This occurs in (f.e.)
214  // string processing, because we know we need to read until we find the
215  // closing '"' character.
216  //
217  // The second interface is the combination of PeekCharAndSize with
218  // ConsumeChar.  PeekCharAndSize reads a phase 1/2 translated character,
219  // returning it and its size.  If the lexer decides that this character is
220  // part of the current token, it calls ConsumeChar on it.  This two stage
221  // approach allows us to emit diagnostics for characters (e.g. warnings about
222  // trigraphs), knowing that they only are emitted if the character is
223  // consumed.
224
225  /// isObviouslySimpleCharacter - Return true if the specified character is
226  /// obviously the same in translation phase 1 and translation phase 3.  This
227  /// can return false for characters that end up being the same, but it will
228  /// never return true for something that needs to be mapped.
229  static bool isObviouslySimpleCharacter(char C) {
230    return C != '?' && C != '\\';
231  }
232
233  /// getAndAdvanceChar - Read a single 'character' from the specified buffer,
234  /// advance over it, and return it.  This is tricky in several cases.  Here we
235  /// just handle the trivial case and fall-back to the non-inlined
236  /// getCharAndSizeSlow method to handle the hard case.
237  inline char getAndAdvanceChar(const char *&Ptr, Token &Tok) {
238    // If this is not a trigraph and not a UCN or escaped newline, return
239    // quickly.
240    if (isObviouslySimpleCharacter(Ptr[0])) return *Ptr++;
241
242    unsigned Size = 0;
243    char C = getCharAndSizeSlow(Ptr, Size, &Tok);
244    Ptr += Size;
245    return C;
246  }
247
248private:
249  /// ConsumeChar - When a character (identified by PeekCharAndSize) is consumed
250  /// and added to a given token, check to see if there are diagnostics that
251  /// need to be emitted or flags that need to be set on the token.  If so, do
252  /// it.
253  const char *ConsumeChar(const char *Ptr, unsigned Size, Token &Tok) {
254    // Normal case, we consumed exactly one token.  Just return it.
255    if (Size == 1)
256      return Ptr+Size;
257
258    // Otherwise, re-lex the character with a current token, allowing
259    // diagnostics to be emitted and flags to be set.
260    Size = 0;
261    getCharAndSizeSlow(Ptr, Size, &Tok);
262    return Ptr+Size;
263  }
264
265  /// getCharAndSize - Peek a single 'character' from the specified buffer,
266  /// get its size, and return it.  This is tricky in several cases.  Here we
267  /// just handle the trivial case and fall-back to the non-inlined
268  /// getCharAndSizeSlow method to handle the hard case.
269  inline char getCharAndSize(const char *Ptr, unsigned &Size) {
270    // If this is not a trigraph and not a UCN or escaped newline, return
271    // quickly.
272    if (isObviouslySimpleCharacter(Ptr[0])) {
273      Size = 1;
274      return *Ptr;
275    }
276
277    Size = 0;
278    return getCharAndSizeSlow(Ptr, Size);
279  }
280
281  /// getCharAndSizeSlow - Handle the slow/uncommon case of the getCharAndSize
282  /// method.
283  char getCharAndSizeSlow(const char *Ptr, unsigned &Size, Token *Tok = 0);
284
285  /// getCharAndSizeNoWarn - Like the getCharAndSize method, but does not ever
286  /// emit a warning.
287  static inline char getCharAndSizeNoWarn(const char *Ptr, unsigned &Size,
288                                          const LangOptions &Features) {
289    // If this is not a trigraph and not a UCN or escaped newline, return
290    // quickly.
291    if (isObviouslySimpleCharacter(Ptr[0])) {
292      Size = 1;
293      return *Ptr;
294    }
295
296    Size = 0;
297    return getCharAndSizeSlowNoWarn(Ptr, Size, Features);
298  }
299
300  /// getCharAndSizeSlowNoWarn - Same as getCharAndSizeSlow, but never emits a
301  /// diagnostic.
302  static char getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
303                                       const LangOptions &Features);
304
305  //===--------------------------------------------------------------------===//
306  // #if directive handling.
307
308  /// pushConditionalLevel - When we enter a #if directive, this keeps track of
309  /// what we are currently in for diagnostic emission (e.g. #if with missing
310  /// #endif).
311  void pushConditionalLevel(SourceLocation DirectiveStart, bool WasSkipping,
312                            bool FoundNonSkip, bool FoundElse) {
313    PPConditionalInfo CI;
314    CI.IfLoc = DirectiveStart;
315    CI.WasSkipping = WasSkipping;
316    CI.FoundNonSkip = FoundNonSkip;
317    CI.FoundElse = FoundElse;
318    ConditionalStack.push_back(CI);
319  }
320  void pushConditionalLevel(const PPConditionalInfo &CI) {
321    ConditionalStack.push_back(CI);
322  }
323
324  /// popConditionalLevel - Remove an entry off the top of the conditional
325  /// stack, returning information about it.  If the conditional stack is empty,
326  /// this returns true and does not fill in the arguments.
327  bool popConditionalLevel(PPConditionalInfo &CI) {
328    if (ConditionalStack.empty()) return true;
329    CI = ConditionalStack.back();
330    ConditionalStack.pop_back();
331    return false;
332  }
333
334  /// peekConditionalLevel - Return the top of the conditional stack.  This
335  /// requires that there be a conditional active.
336  PPConditionalInfo &peekConditionalLevel() {
337    assert(!ConditionalStack.empty() && "No conditionals active!");
338    return ConditionalStack.back();
339  }
340
341  unsigned getConditionalStackDepth() const { return ConditionalStack.size(); }
342
343  //===--------------------------------------------------------------------===//
344  // Other lexer functions.
345
346  // Helper functions to lex the remainder of a token of the specific type.
347  void LexIdentifier         (Token &Result, const char *CurPtr);
348  void LexNumericConstant    (Token &Result, const char *CurPtr);
349  void LexStringLiteral      (Token &Result, const char *CurPtr,bool Wide);
350  void LexAngledStringLiteral(Token &Result, const char *CurPtr);
351  void LexCharConstant       (Token &Result, const char *CurPtr);
352  bool LexEndOfFile          (Token &Result, const char *CurPtr);
353
354  void SkipWhitespace        (Token &Result, const char *CurPtr);
355  bool SkipBCPLComment       (Token &Result, const char *CurPtr);
356  bool SkipBlockComment      (Token &Result, const char *CurPtr);
357  bool SaveBCPLComment       (Token &Result, const char *CurPtr);
358
359  /// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
360  /// (potentially) macro expand the filename.  If the sequence parsed is not
361  /// lexically legal, emit a diagnostic and return a result EOM token.
362  void LexIncludeFilename(Token &Result);
363};
364
365
366}  // end namespace clang
367
368#endif
369