1import os
2import os.path
3from fnmatch import fnmatch
4import targz
5
6##def DoxyfileParse(file_contents):
7##   """
8##   Parse a Doxygen source file and return a dictionary of all the values.
9##   Values will be strings and lists of strings.
10##   """
11##   data = {}
12##
13##   import shlex
14##   lex = shlex.shlex(instream = file_contents, posix = True)
15##   lex.wordchars += "*+./-:"
16##   lex.whitespace = lex.whitespace.replace("\n", "")
17##   lex.escape = ""
18##
19##   lineno = lex.lineno
20##   last_backslash_lineno = lineno
21##   token = lex.get_token()
22##   key = token   # the first token should be a key
23##   last_token = ""
24##   key_token = False
25##   next_key = False
26##   new_data = True
27##
28##   def append_data(data, key, new_data, token):
29##      if new_data or len(data[key]) == 0:
30##         data[key].append(token)
31##      else:
32##         data[key][-1] += token
33##
34##   while token:
35##      if token in ['\n']:
36##         if last_token not in ['\\']:
37##            key_token = True
38##      elif token in ['\\']:
39##         pass
40##      elif key_token:
41##         key = token
42##         key_token = False
43##      else:
44##         if token == "+=":
45##            if not data.has_key(key):
46##               data[key] = list()
47##         elif token == "=":
48##            data[key] = list()
49##         else:
50##            append_data( data, key, new_data, token )
51##            new_data = True
52##
53##      last_token = token
54##      token = lex.get_token()
55##
56##      if last_token == '\\' and token != '\n':
57##         new_data = False
58##         append_data( data, key, new_data, '\\' )
59##
60##   # compress lists of len 1 into single strings
61##   for (k, v) in data.items():
62##      if len(v) == 0:
63##         data.pop(k)
64##
65##      # items in the following list will be kept as lists and not converted to strings
66##      if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
67##         continue
68##
69##      if len(v) == 1:
70##         data[k] = v[0]
71##
72##   return data
73##
74##def DoxySourceScan(node, env, path):
75##   """
76##   Doxygen Doxyfile source scanner.  This should scan the Doxygen file and add
77##   any files used to generate docs to the list of source files.
78##   """
79##   default_file_patterns = [
80##      '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
81##      '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
82##      '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
83##      '*.py',
84##   ]
85##
86##   default_exclude_patterns = [
87##      '*~',
88##   ]
89##
90##   sources = []
91##
92##   data = DoxyfileParse(node.get_contents())
93##
94##   if data.get("RECURSIVE", "NO") == "YES":
95##      recursive = True
96##   else:
97##      recursive = False
98##
99##   file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
100##   exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
101##
102##   for node in data.get("INPUT", []):
103##      if os.path.isfile(node):
104##         sources.add(node)
105##      elif os.path.isdir(node):
106##         if recursive:
107##            for root, dirs, files in os.walk(node):
108##               for f in files:
109##                  filename = os.path.join(root, f)
110##
111##                  pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
112##                  exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
113##
114##                  if pattern_check and not exclude_check:
115##                     sources.append(filename)
116##         else:
117##            for pattern in file_patterns:
118##               sources.extend(glob.glob("/".join([node, pattern])))
119##   sources = map( lambda path: env.File(path), sources )
120##   return sources
121##
122##
123##def DoxySourceScanCheck(node, env):
124##   """Check if we should scan this file"""
125##   return os.path.isfile(node.path)
126
127def srcDistEmitter(source, target, env):
128##   """Doxygen Doxyfile emitter"""
129##   # possible output formats and their default values and output locations
130##   output_formats = {
131##      "HTML": ("YES", "html"),
132##      "LATEX": ("YES", "latex"),
133##      "RTF": ("NO", "rtf"),
134##      "MAN": ("YES", "man"),
135##      "XML": ("NO", "xml"),
136##   }
137##
138##   data = DoxyfileParse(source[0].get_contents())
139##
140##   targets = []
141##   out_dir = data.get("OUTPUT_DIRECTORY", ".")
142##
143##   # add our output locations
144##   for (k, v) in output_formats.items():
145##      if data.get("GENERATE_" + k, v[0]) == "YES":
146##         targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
147##
148##   # don't clobber targets
149##   for node in targets:
150##      env.Precious(node)
151##
152##   # set up cleaning stuff
153##   for node in targets:
154##      env.Clean(node, node)
155##
156##   return (targets, source)
157   return (target,source)
158
159def generate(env):
160   """
161   Add builders and construction variables for the
162   SrcDist tool.
163   """
164##   doxyfile_scanner = env.Scanner(
165##      DoxySourceScan,
166##      "DoxySourceScan",
167##      scan_check = DoxySourceScanCheck,
168##   )
169
170   if targz.exists(env):
171      srcdist_builder = targz.makeBuilder( srcDistEmitter )
172
173      env['BUILDERS']['SrcDist'] = srcdist_builder
174
175def exists(env):
176   """
177   Make sure srcdist exists.
178   """
179   return targz.exists(env)
180