PTHLexer.cpp revision f15674c680730c652a37a16a5d3f3ff429b0c308
1//===--- PTHLexer.cpp - Lex from a token stream ---------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the PTHLexer interface.
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/Basic/TokenKinds.h"
15#include "clang/Basic/FileManager.h"
16#include "clang/Basic/IdentifierTable.h"
17#include "clang/Lex/PTHLexer.h"
18#include "clang/Lex/Preprocessor.h"
19#include "clang/Lex/PTHManager.h"
20#include "clang/Lex/Token.h"
21#include "clang/Lex/Preprocessor.h"
22#include "llvm/Support/Compiler.h"
23#include "llvm/Support/MemoryBuffer.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/OwningPtr.h"
26using namespace clang;
27
28#define DISK_TOKEN_SIZE (1+1+3+4+2)
29
30//===----------------------------------------------------------------------===//
31// Utility methods for reading from the mmap'ed PTH file.
32//===----------------------------------------------------------------------===//
33
34static inline uint8_t Read8(const unsigned char *&Data) {
35  uint8_t V = Data[0];
36  Data += 1;
37  return V;
38}
39
40static inline uint16_t Read16(const unsigned char *&Data) {
41// Targets that directly support unaligned little-endian 16-bit loads can just
42// use them.
43#if defined(__i386__) || defined(__x86_64__)
44  uint16_t V = *((uint16_t*)Data);
45#else
46  uint16_t V = ((uint16_t)Data[0] <<  0) |
47               ((uint16_t)Data[1] <<  8);
48#endif
49  Data += 2;
50  return V;
51}
52
53static inline uint32_t Read24(const unsigned char *&Data) {
54// Targets that directly support unaligned little-endian 16-bit loads can just
55// use them.
56#if defined(__i386__) || defined(__x86_64__)
57  uint32_t V = ((uint16_t*)Data)[0] |
58                 ((uint32_t)Data[2] << 16);
59#else
60  uint32_t V = ((uint32_t)Data[0] <<  0) |
61               ((uint32_t)Data[1] <<  8) |
62               ((uint32_t)Data[2] << 16);
63#endif
64
65  Data += 3;
66  return V;
67}
68
69static inline uint32_t Read32(const unsigned char *&Data) {
70// Targets that directly support unaligned little-endian 32-bit loads can just
71// use them.
72#if defined(__i386__) || defined(__x86_64__)
73  uint32_t V = *((uint32_t*)Data);
74#else
75  uint32_t V = ((uint32_t)Data[0] <<  0) |
76               ((uint32_t)Data[1] <<  8) |
77               ((uint32_t)Data[2] << 16) |
78               ((uint32_t)Data[3] << 24);
79#endif
80  Data += 4;
81  return V;
82}
83
84
85//===----------------------------------------------------------------------===//
86// PTHLexer methods.
87//===----------------------------------------------------------------------===//
88
89PTHLexer::PTHLexer(Preprocessor &PP, FileID FID, const unsigned char *D,
90                   const unsigned char *ppcond,
91                   PTHSpellingSearch &mySpellingSrch, PTHManager &PM)
92  : PreprocessorLexer(&PP, FID), TokBuf(D), CurPtr(D), LastHashTokPtr(0),
93    PPCond(ppcond), CurPPCondPtr(ppcond), MySpellingSrch(mySpellingSrch),
94    PTHMgr(PM) {
95
96  FileStartLoc = PP.getSourceManager().getLocForStartOfFile(FID);
97}
98
99void PTHLexer::Lex(Token& Tok) {
100LexNextToken:
101
102  //===--------------------------------------==//
103  // Read the raw token data.
104  //===--------------------------------------==//
105
106  // Shadow CurPtr into an automatic variable.
107  const unsigned char *CurPtrShadow = CurPtr;
108
109  // Read in the data for the token.
110  tok::TokenKind k = (tok::TokenKind) Read8(CurPtrShadow);
111  Token::TokenFlags flags = (Token::TokenFlags) Read8(CurPtrShadow);
112  uint32_t perID = Read24(CurPtrShadow);
113  uint32_t FileOffset = Read32(CurPtrShadow);
114  uint32_t Len = Read16(CurPtrShadow);
115  CurPtr = CurPtrShadow;
116
117  //===--------------------------------------==//
118  // Construct the token itself.
119  //===--------------------------------------==//
120
121  Tok.startToken();
122  Tok.setKind(k);
123  Tok.setFlag(flags);
124  assert(!LexingRawMode);
125  Tok.setIdentifierInfo(perID ? PTHMgr.GetIdentifierInfo(perID-1) : 0);
126  Tok.setLocation(FileStartLoc.getFileLocWithOffset(FileOffset));
127  Tok.setLength(Len);
128
129  //===--------------------------------------==//
130  // Process the token.
131  //===--------------------------------------==//
132#if 0
133  SourceManager& SM = PP->getSourceManager();
134  llvm::cerr << SM.getFileEntryForID(FileID)->getName()
135    << ':' << SM.getLogicalLineNumber(Tok.getLocation())
136    << ':' << SM.getLogicalColumnNumber(Tok.getLocation())
137    << '\n';
138#endif
139
140  if (k == tok::identifier) {
141    MIOpt.ReadToken();
142    return PP->HandleIdentifier(Tok);
143  }
144
145  if (k == tok::eof) {
146    // Save the end-of-file token.
147    EofToken = Tok;
148
149    Preprocessor *PPCache = PP;
150
151    assert(!ParsingPreprocessorDirective);
152    assert(!LexingRawMode);
153
154    // FIXME: Issue diagnostics similar to Lexer.
155    if (PP->HandleEndOfFile(Tok, false))
156      return;
157
158    assert(PPCache && "Raw buffer::LexEndOfFile should return a token");
159    return PPCache->Lex(Tok);
160  }
161
162  if (k == tok::hash && Tok.isAtStartOfLine()) {
163    LastHashTokPtr = CurPtr - DISK_TOKEN_SIZE;
164    assert(!LexingRawMode);
165    PP->HandleDirective(Tok);
166
167    if (PP->isCurrentLexer(this))
168      goto LexNextToken;
169
170    return PP->Lex(Tok);
171  }
172
173  if (k == tok::eom) {
174    assert(ParsingPreprocessorDirective);
175    ParsingPreprocessorDirective = false;
176    return;
177  }
178
179  MIOpt.ReadToken();
180}
181
182// FIXME: We can just grab the last token instead of storing a copy
183// into EofToken.
184void PTHLexer::getEOF(Token& Tok) {
185  assert(EofToken.is(tok::eof));
186  Tok = EofToken;
187}
188
189void PTHLexer::DiscardToEndOfLine() {
190  assert(ParsingPreprocessorDirective && ParsingFilename == false &&
191         "Must be in a preprocessing directive!");
192
193  // We assume that if the preprocessor wishes to discard to the end of
194  // the line that it also means to end the current preprocessor directive.
195  ParsingPreprocessorDirective = false;
196
197  // Skip tokens by only peeking at their token kind and the flags.
198  // We don't need to actually reconstruct full tokens from the token buffer.
199  // This saves some copies and it also reduces IdentifierInfo* lookup.
200  const unsigned char* p = CurPtr;
201  while (1) {
202    // Read the token kind.  Are we at the end of the file?
203    tok::TokenKind x = (tok::TokenKind) (uint8_t) *p;
204    if (x == tok::eof) break;
205
206    // Read the token flags.  Are we at the start of the next line?
207    Token::TokenFlags y = (Token::TokenFlags) (uint8_t) p[1];
208    if (y & Token::StartOfLine) break;
209
210    // Skip to the next token.
211    p += DISK_TOKEN_SIZE;
212  }
213
214  CurPtr = p;
215}
216
217/// SkipBlock - Used by Preprocessor to skip the current conditional block.
218bool PTHLexer::SkipBlock() {
219  assert(CurPPCondPtr && "No cached PP conditional information.");
220  assert(LastHashTokPtr && "No known '#' token.");
221
222  const unsigned char* HashEntryI = 0;
223  uint32_t Offset;
224  uint32_t TableIdx;
225
226  do {
227    // Read the token offset from the side-table.
228    Offset = Read32(CurPPCondPtr);
229
230    // Read the target table index from the side-table.
231    TableIdx = Read32(CurPPCondPtr);
232
233    // Compute the actual memory address of the '#' token data for this entry.
234    HashEntryI = TokBuf + Offset;
235
236    // Optmization: "Sibling jumping".  #if...#else...#endif blocks can
237    //  contain nested blocks.  In the side-table we can jump over these
238    //  nested blocks instead of doing a linear search if the next "sibling"
239    //  entry is not at a location greater than LastHashTokPtr.
240    if (HashEntryI < LastHashTokPtr && TableIdx) {
241      // In the side-table we are still at an entry for a '#' token that
242      // is earlier than the last one we saw.  Check if the location we would
243      // stride gets us closer.
244      const unsigned char* NextPPCondPtr =
245        PPCond + TableIdx*(sizeof(uint32_t)*2);
246      assert(NextPPCondPtr >= CurPPCondPtr);
247      // Read where we should jump to.
248      uint32_t TmpOffset = Read32(NextPPCondPtr);
249      const unsigned char* HashEntryJ = TokBuf + TmpOffset;
250
251      if (HashEntryJ <= LastHashTokPtr) {
252        // Jump directly to the next entry in the side table.
253        HashEntryI = HashEntryJ;
254        Offset = TmpOffset;
255        TableIdx = Read32(NextPPCondPtr);
256        CurPPCondPtr = NextPPCondPtr;
257      }
258    }
259  }
260  while (HashEntryI < LastHashTokPtr);
261  assert(HashEntryI == LastHashTokPtr && "No PP-cond entry found for '#'");
262  assert(TableIdx && "No jumping from #endifs.");
263
264  // Update our side-table iterator.
265  const unsigned char* NextPPCondPtr = PPCond + TableIdx*(sizeof(uint32_t)*2);
266  assert(NextPPCondPtr >= CurPPCondPtr);
267  CurPPCondPtr = NextPPCondPtr;
268
269  // Read where we should jump to.
270  HashEntryI = TokBuf + Read32(NextPPCondPtr);
271  uint32_t NextIdx = Read32(NextPPCondPtr);
272
273  // By construction NextIdx will be zero if this is a #endif.  This is useful
274  // to know to obviate lexing another token.
275  bool isEndif = NextIdx == 0;
276
277  // This case can occur when we see something like this:
278  //
279  //  #if ...
280  //   /* a comment or nothing */
281  //  #elif
282  //
283  // If we are skipping the first #if block it will be the case that CurPtr
284  // already points 'elif'.  Just return.
285
286  if (CurPtr > HashEntryI) {
287    assert(CurPtr == HashEntryI + DISK_TOKEN_SIZE);
288    // Did we reach a #endif?  If so, go ahead and consume that token as well.
289    if (isEndif)
290      CurPtr += DISK_TOKEN_SIZE*2;
291    else
292      LastHashTokPtr = HashEntryI;
293
294    return isEndif;
295  }
296
297  // Otherwise, we need to advance.  Update CurPtr to point to the '#' token.
298  CurPtr = HashEntryI;
299
300  // Update the location of the last observed '#'.  This is useful if we
301  // are skipping multiple blocks.
302  LastHashTokPtr = CurPtr;
303
304  // Skip the '#' token.
305  assert(((tok::TokenKind)*CurPtr) == tok::hash);
306  CurPtr += DISK_TOKEN_SIZE;
307
308  // Did we reach a #endif?  If so, go ahead and consume that token as well.
309  if (isEndif) { CurPtr += DISK_TOKEN_SIZE*2; }
310
311  return isEndif;
312}
313
314SourceLocation PTHLexer::getSourceLocation() {
315  // getSourceLocation is not on the hot path.  It is used to get the location
316  // of the next token when transitioning back to this lexer when done
317  // handling a #included file.  Just read the necessary data from the token
318  // data buffer to construct the SourceLocation object.
319  // NOTE: This is a virtual function; hence it is defined out-of-line.
320  const unsigned char *OffsetPtr = CurPtr + (1 + 1 + 3);
321  uint32_t Offset = Read32(OffsetPtr);
322  return FileStartLoc.getFileLocWithOffset(Offset);
323}
324
325//===----------------------------------------------------------------------===//
326// getSpelling() - Use cached data in PTH files for getSpelling().
327//===----------------------------------------------------------------------===//
328
329unsigned PTHManager::getSpelling(FileID FID, unsigned FPos,
330                                 const char *&Buffer) {
331  llvm::DenseMap<FileID, PTHSpellingSearch*>::iterator I =SpellingMap.find(FID);
332
333  if (I == SpellingMap.end())
334    return 0;
335
336  return I->second->getSpellingBinarySearch(FPos, Buffer);
337}
338
339unsigned PTHManager::getSpelling(SourceLocation Loc, const char *&Buffer) {
340  SourceManager &SM = PP->getSourceManager();
341  Loc = SM.getSpellingLoc(Loc);
342  std::pair<FileID, unsigned> LocInfo = SM.getDecomposedFileLoc(Loc);
343  return getSpelling(LocInfo.first, LocInfo.second, Buffer);
344}
345
346unsigned PTHManager::getSpellingAtPTHOffset(unsigned PTHOffset,
347                                            const char *&Buffer) {
348  assert(PTHOffset < Buf->getBufferSize());
349  const unsigned char* Ptr =
350    (const unsigned char*)Buf->getBufferStart() + PTHOffset;
351
352  // The string is prefixed by 16 bits for its length, followed by the string
353  // itself.
354  unsigned Len = Read16(Ptr);
355  Buffer = (const char *)Ptr;
356  return Len;
357}
358
359unsigned PTHSpellingSearch::getSpellingLinearSearch(unsigned FPos,
360                                                    const char *&Buffer) {
361  const unsigned char *Ptr = LinearItr;
362  unsigned Len = 0;
363
364  if (Ptr == TableEnd)
365    return getSpellingBinarySearch(FPos, Buffer);
366
367  do {
368    uint32_t TokOffset = Read32(Ptr);
369
370    if (TokOffset > FPos)
371      return getSpellingBinarySearch(FPos, Buffer);
372
373    // Did we find a matching token offset for this spelling?
374    if (TokOffset == FPos) {
375      uint32_t SpellingPTHOffset = Read32(Ptr);
376      Len = PTHMgr.getSpellingAtPTHOffset(SpellingPTHOffset, Buffer);
377      break;
378    }
379  } while (Ptr != TableEnd);
380
381  LinearItr = Ptr;
382  return Len;
383}
384
385
386unsigned PTHSpellingSearch::getSpellingBinarySearch(unsigned FPos,
387                                                    const char *&Buffer) {
388
389  assert((TableEnd - TableBeg) % SpellingEntrySize == 0);
390  assert(TableEnd >= TableBeg);
391
392  if (TableEnd == TableBeg)
393    return 0;
394
395  unsigned min = 0;
396  const unsigned char *tb = TableBeg;
397  unsigned max = NumSpellings;
398
399  do {
400    unsigned i = (max - min) / 2 + min;
401    const unsigned char *Ptr = tb + (i * SpellingEntrySize);
402
403    uint32_t TokOffset = Read32(Ptr);
404    if (TokOffset > FPos) {
405      max = i;
406      assert(!(max == min) || (min == i));
407      continue;
408    }
409
410    if (TokOffset < FPos) {
411      if (i == min)
412        break;
413
414      min = i;
415      continue;
416    }
417
418    uint32_t SpellingPTHOffset = Read32(Ptr);
419    return PTHMgr.getSpellingAtPTHOffset(SpellingPTHOffset, Buffer);
420  }
421  while (min != max);
422
423  return 0;
424}
425
426unsigned PTHLexer::getSpelling(SourceLocation Loc, const char *&Buffer) {
427  SourceManager &SM = PP->getSourceManager();
428  Loc = SM.getSpellingLoc(Loc);
429  std::pair<FileID, unsigned> LocInfo = SM.getDecomposedFileLoc(Loc);
430
431  FileID FID = LocInfo.first;
432  unsigned FPos = LocInfo.second;
433
434  if (FID == getFileID())
435    return MySpellingSrch.getSpellingLinearSearch(FPos, Buffer);
436  return PTHMgr.getSpelling(FID, FPos, Buffer);
437}
438
439//===----------------------------------------------------------------------===//
440// Internal Data Structures for PTH file lookup and resolving identifiers.
441//===----------------------------------------------------------------------===//
442
443
444/// PTHFileLookup - This internal data structure is used by the PTHManager
445///  to map from FileEntry objects managed by FileManager to offsets within
446///  the PTH file.
447namespace {
448class VISIBILITY_HIDDEN PTHFileLookup {
449public:
450  class Val {
451    uint32_t TokenOff;
452    uint32_t PPCondOff;
453    uint32_t SpellingOff;
454  public:
455    Val() : TokenOff(~0) {}
456    Val(uint32_t toff, uint32_t poff, uint32_t soff)
457      : TokenOff(toff), PPCondOff(poff), SpellingOff(soff) {}
458
459    bool isValid() const { return TokenOff != ~((uint32_t)0); }
460
461    uint32_t getTokenOffset() const {
462      assert(isValid() && "PTHFileLookup entry initialized.");
463      return TokenOff;
464    }
465
466    uint32_t getPPCondOffset() const {
467      assert(isValid() && "PTHFileLookup entry initialized.");
468      return PPCondOff;
469    }
470
471    uint32_t getSpellingOffset() const {
472      assert(isValid() && "PTHFileLookup entry initialized.");
473      return SpellingOff;
474    }
475  };
476
477private:
478  llvm::StringMap<Val> FileMap;
479
480public:
481  PTHFileLookup() {};
482
483  Val Lookup(const FileEntry* FE) {
484    const char* s = FE->getName();
485    unsigned size = strlen(s);
486    return FileMap.GetOrCreateValue(s, s+size).getValue();
487  }
488
489  void ReadTable(const unsigned char* D) {
490    uint32_t N = Read32(D);     // Read the length of the table.
491
492    for ( ; N > 0; --N) {       // The rest of the data is the table itself.
493      uint32_t Len = Read32(D);
494      const char* s = (const char *)D;
495      D += Len;
496
497      uint32_t TokenOff = Read32(D);
498      uint32_t PPCondOff = Read32(D);
499      uint32_t SpellingOff = Read32(D);
500
501      FileMap.GetOrCreateValue(s, s+Len).getValue() =
502        Val(TokenOff, PPCondOff, SpellingOff);
503    }
504  }
505};
506} // end anonymous namespace
507
508//===----------------------------------------------------------------------===//
509// PTHManager methods.
510//===----------------------------------------------------------------------===//
511
512PTHManager::PTHManager(const llvm::MemoryBuffer* buf, void* fileLookup,
513                       const unsigned char* idDataTable,
514                       IdentifierInfo** perIDCache,
515                       const unsigned char* sortedIdTable, unsigned numIds)
516: Buf(buf), PerIDCache(perIDCache), FileLookup(fileLookup),
517  IdDataTable(idDataTable), SortedIdTable(sortedIdTable),
518  NumIds(numIds), PP(0) {}
519
520PTHManager::~PTHManager() {
521  delete Buf;
522  delete (PTHFileLookup*) FileLookup;
523  free(PerIDCache);
524}
525
526PTHManager* PTHManager::Create(const std::string& file) {
527  // Memory map the PTH file.
528  llvm::OwningPtr<llvm::MemoryBuffer>
529  File(llvm::MemoryBuffer::getFile(file.c_str()));
530
531  if (!File)
532    return 0;
533
534  // Get the buffer ranges and check if there are at least three 32-bit
535  // words at the end of the file.
536  const unsigned char* BufBeg = (unsigned char*)File->getBufferStart();
537  const unsigned char* BufEnd = (unsigned char*)File->getBufferEnd();
538
539  if(!(BufEnd > BufBeg + sizeof(uint32_t)*3)) {
540    assert(false && "Invalid PTH file.");
541    return 0; // FIXME: Proper error diagnostic?
542  }
543
544  // Compute the address of the index table at the end of the PTH file.
545  // This table contains the offset of the file lookup table, the
546  // persistent ID -> identifer data table.
547  // FIXME: We should just embed this offset in the PTH file.
548  const unsigned char* EndTable = BufEnd - sizeof(uint32_t)*4;
549
550  // Construct the file lookup table.  This will be used for mapping from
551  // FileEntry*'s to cached tokens.
552  const unsigned char* FileTableOffset = EndTable + sizeof(uint32_t)*3;
553  const unsigned char* FileTable = BufBeg + Read32(FileTableOffset);
554
555  if (!(FileTable > BufBeg && FileTable < BufEnd)) {
556    assert(false && "Invalid PTH file.");
557    return 0; // FIXME: Proper error diagnostic?
558  }
559
560  llvm::OwningPtr<PTHFileLookup> FL(new PTHFileLookup());
561  FL->ReadTable(FileTable);
562
563  // Get the location of the table mapping from persistent ids to the
564  // data needed to reconstruct identifiers.
565  const unsigned char* IDTableOffset = EndTable + sizeof(uint32_t)*1;
566  const unsigned char* IData = BufBeg + Read32(IDTableOffset);
567  if (!(IData > BufBeg && IData < BufEnd)) {
568    assert(false && "Invalid PTH file.");
569    return 0; // FIXME: Proper error diagnostic?
570  }
571
572  // Get the location of the lexigraphically-sorted table of persistent IDs.
573  const unsigned char* SortedIdTableOffset = EndTable + sizeof(uint32_t)*2;
574  const unsigned char* SortedIdTable = BufBeg + Read32(SortedIdTableOffset);
575  if (!(SortedIdTable > BufBeg && SortedIdTable < BufEnd)) {
576    assert(false && "Invalid PTH file.");
577    return 0; // FIXME: Proper error diagnostic?
578  }
579
580  // Get the number of IdentifierInfos and pre-allocate the identifier cache.
581  uint32_t NumIds = Read32(IData);
582
583  // Pre-allocate the peristent ID -> IdentifierInfo* cache.  We use calloc()
584  // so that we in the best case only zero out memory once when the OS returns
585  // us new pages.
586  IdentifierInfo** PerIDCache =
587    (IdentifierInfo**)calloc(NumIds, sizeof(*PerIDCache));
588
589  if (!PerIDCache) {
590    assert(false && "Could not allocate Persistent ID cache.");
591    return 0;
592  }
593
594  // Create the new PTHManager.
595  return new PTHManager(File.take(), FL.take(), IData, PerIDCache,
596                        SortedIdTable, NumIds);
597}
598
599IdentifierInfo* PTHManager::GetIdentifierInfo(unsigned persistentID) {
600
601  // Check if the IdentifierInfo has already been resolved.
602  IdentifierInfo* II = PerIDCache[persistentID];
603  if (II) return II;
604
605  // Look in the PTH file for the string data for the IdentifierInfo object.
606  const unsigned char* TableEntry = IdDataTable + sizeof(uint32_t)*persistentID;
607  const unsigned char* IDData =
608    (const unsigned char*)Buf->getBufferStart() + Read32(TableEntry);
609  assert(IDData < (const unsigned char*)Buf->getBufferEnd());
610
611  // Allocate the object.
612  std::pair<IdentifierInfo,const unsigned char*> *Mem =
613    Alloc.Allocate<std::pair<IdentifierInfo,const unsigned char*> >();
614
615  Mem->second = IDData;
616  II = new ((void*) Mem) IdentifierInfo(true);
617
618  // Store the new IdentifierInfo in the cache.
619  PerIDCache[persistentID] = II;
620  return II;
621}
622
623IdentifierInfo* PTHManager::get(const char *NameStart, const char *NameEnd) {
624  unsigned min = 0;
625  unsigned max = NumIds;
626  unsigned Len = NameEnd - NameStart;
627
628  do {
629    unsigned i = (max - min) / 2 + min;
630    const unsigned char *Ptr = SortedIdTable + (i * 4);
631
632    // Read the persistentID.
633    unsigned perID = Read32(Ptr);
634
635    // Get the IdentifierInfo.
636    IdentifierInfo* II = GetIdentifierInfo(perID);
637
638    // First compare the lengths.
639    unsigned IILen = II->getLength();
640    if (Len < IILen) goto IsLess;
641    if (Len > IILen) goto IsGreater;
642
643    // Now compare the strings!
644    {
645      signed comp = strncmp(NameStart, II->getName(), Len);
646      if (comp < 0) goto IsLess;
647      if (comp > 0) goto IsGreater;
648    }
649    // We found a match!
650    return II;
651
652  IsGreater:
653    if (i == min) break;
654    min = i;
655    continue;
656
657  IsLess:
658    max = i;
659    assert(!(max == min) || (min == i));
660  }
661  while (min != max);
662
663  return 0;
664}
665
666
667PTHLexer *PTHManager::CreateLexer(FileID FID) {
668  const FileEntry *FE = PP->getSourceManager().getFileEntryForID(FID);
669  if (!FE)
670    return 0;
671
672  // Lookup the FileEntry object in our file lookup data structure.  It will
673  // return a variant that indicates whether or not there is an offset within
674  // the PTH file that contains cached tokens.
675  PTHFileLookup::Val FileData = ((PTHFileLookup*)FileLookup)->Lookup(FE);
676
677  if (!FileData.isValid()) // No tokens available.
678    return 0;
679
680  const unsigned char *BufStart = (const unsigned char *)Buf->getBufferStart();
681  // Compute the offset of the token data within the buffer.
682  const unsigned char* data = BufStart + FileData.getTokenOffset();
683
684  // Get the location of pp-conditional table.
685  const unsigned char* ppcond = BufStart + FileData.getPPCondOffset();
686  uint32_t Len = Read32(ppcond);
687  if (Len == 0) ppcond = 0;
688
689  // Get the location of the spelling table.
690  const unsigned char* spellingTable = BufStart + FileData.getSpellingOffset();
691
692  Len = Read32(spellingTable);
693  if (Len == 0) spellingTable = 0;
694
695  assert(data < (const unsigned char*)Buf->getBufferEnd());
696
697  // Create the SpellingSearch object for this FileID.
698  PTHSpellingSearch* ss = new PTHSpellingSearch(*this, Len, spellingTable);
699  SpellingMap[FID] = ss;
700
701  assert(PP && "No preprocessor set yet!");
702  return new PTHLexer(*PP, FID, data, ppcond, *ss, *this);
703}
704