/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/lib2to3/pgen2/ |
H A D | grammar.py | 10 token module; the Python tokenize module reports all operators as the 19 from . import token, tokenize namespace 129 # Map from operator to number (since tokenize doesn't do this)
|
H A D | driver.py | 26 from . import grammar, parse, token, tokenize, pgen namespace 40 # XXX Move the prefix computation into a wrapper around tokenize. 59 if type in (tokenize.COMMENT, tokenize.NL): 88 tokens = tokenize.generate_tokens(stream.readline) 105 tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
|
H A D | pgen.py | 5 from . import grammar, token, tokenize namespace 19 self.generator = tokenize.generate_tokens(stream.readline) 323 while tup[0] in (tokenize.COMMENT, tokenize.NL):
|
H A D | tokenize.py | 23 tokenize(readline, tokeneater=printtoken) 37 __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize", 160 def tokenize(readline, tokeneater=printtoken): function 162 The tokenize() function accepts two parameters: one representing the 163 input stream, and one providing an output mechanism for tokenize(). 256 in the same way as the tokenize() generator. 335 # Output text will tokenize the back to the input 427 ("<tokenize>", lnum, pos, line)) 499 if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline) 500 else: tokenize(sy [all...] |
/prebuilts/python/linux-x86/2.7.5/lib/python2.7/lib2to3/pgen2/ |
H A D | grammar.py | 10 token module; the Python tokenize module reports all operators as the 19 from . import token, tokenize namespace 129 # Map from operator to number (since tokenize doesn't do this)
|
H A D | driver.py | 26 from . import grammar, parse, token, tokenize, pgen namespace 40 # XXX Move the prefix computation into a wrapper around tokenize. 59 if type in (tokenize.COMMENT, tokenize.NL): 88 tokens = tokenize.generate_tokens(stream.readline) 105 tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
|
H A D | pgen.py | 5 from . import grammar, token, tokenize namespace 19 self.generator = tokenize.generate_tokens(stream.readline) 323 while tup[0] in (tokenize.COMMENT, tokenize.NL):
|
H A D | tokenize.py | 23 tokenize(readline, tokeneater=printtoken) 37 __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize", 160 def tokenize(readline, tokeneater=printtoken): function 162 The tokenize() function accepts two parameters: one representing the 163 input stream, and one providing an output mechanism for tokenize(). 256 in the same way as the tokenize() generator. 335 # Output text will tokenize the back to the input 427 ("<tokenize>", lnum, pos, line)) 499 if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline) 500 else: tokenize(sy [all...] |
/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/idlelib/ |
H A D | ScriptBinding.py | 24 import tokenize namespace 72 tabnanny.process_tokens(tokenize.generate_tokens(f.readline)) 73 except tokenize.TokenError, msg:
|
/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/ |
H A D | pyclbr.py | 44 import tokenize namespace 153 g = tokenize.generate_tokens(f.readline)
|
H A D | tabnanny.py | 26 import tokenize namespace 27 if not hasattr(tokenize, 'NL'): 28 raise ValueError("tokenize.NL doesn't exist -- tokenize module too old") 106 process_tokens(tokenize.generate_tokens(f.readline)) 108 except tokenize.TokenError, msg: 274 INDENT = tokenize.INDENT 275 DEDENT = tokenize.DEDENT 276 NEWLINE = tokenize.NEWLINE 277 JUNK = tokenize [all...] |
H A D | cgitb.py | 32 import tokenize namespace 84 for ttype, token, start, end, line in tokenize.generate_tokens(reader): 85 if ttype == tokenize.NEWLINE: break 86 if ttype == tokenize.NAME and token not in keyword.kwlist:
|
H A D | gettext.py | 84 import token, tokenize namespace 85 tokens = tokenize.generate_tokens(StringIO(plural).readline) 88 except tokenize.TokenError:
|
H A D | tokenize.py | 20 tokenize(readline, tokeneater=printtoken) 34 __all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"] 155 def tokenize(readline, tokeneater=printtoken): function 157 The tokenize() function accepts two parameters: one representing the 158 input stream, and one providing an output mechanism for tokenize(). 254 # Output text will tokenize the back to the input 351 ("<tokenize>", lnum, pos, line)) 424 tokenize(open(sys.argv[1]).readline) 426 tokenize(sys.stdin.readline)
|
H A D | trace.py | 57 import tokenize namespace 427 for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
|
/prebuilts/python/linux-x86/2.7.5/lib/python2.7/idlelib/ |
H A D | ScriptBinding.py | 24 import tokenize namespace 72 tabnanny.process_tokens(tokenize.generate_tokens(f.readline)) 73 except tokenize.TokenError, msg:
|
/prebuilts/python/linux-x86/2.7.5/lib/python2.7/ |
H A D | pyclbr.py | 44 import tokenize namespace 153 g = tokenize.generate_tokens(f.readline)
|
H A D | tabnanny.py | 26 import tokenize namespace 27 if not hasattr(tokenize, 'NL'): 28 raise ValueError("tokenize.NL doesn't exist -- tokenize module too old") 106 process_tokens(tokenize.generate_tokens(f.readline)) 108 except tokenize.TokenError, msg: 274 INDENT = tokenize.INDENT 275 DEDENT = tokenize.DEDENT 276 NEWLINE = tokenize.NEWLINE 277 JUNK = tokenize [all...] |
H A D | cgitb.py | 32 import tokenize namespace 84 for ttype, token, start, end, line in tokenize.generate_tokens(reader): 85 if ttype == tokenize.NEWLINE: break 86 if ttype == tokenize.NAME and token not in keyword.kwlist:
|
H A D | gettext.py | 84 import token, tokenize namespace 85 tokens = tokenize.generate_tokens(StringIO(plural).readline) 88 except tokenize.TokenError:
|
/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/lib2to3/ |
H A D | patcomp.py | 18 from .pgen2 import driver, literals, token, tokenize, parse, grammar namespace 36 tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
|
H A D | refactor.py | 26 from .pgen2 import driver, tokenize, token namespace 132 gen = tokenize.generate_tokens(StringIO.StringIO(source).readline) 136 ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT)) 333 encoding = tokenize.detect_encoding(f.readline)[0] 658 """Wraps a tokenize stream to systematically modify start/end.""" 659 tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next) 672 """Generates lines as expected by tokenize from a list of lines.
|
/prebuilts/python/linux-x86/2.7.5/lib/python2.7/lib2to3/ |
H A D | patcomp.py | 18 from .pgen2 import driver, literals, token, tokenize, parse, grammar namespace 36 tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
|
H A D | refactor.py | 26 from .pgen2 import driver, tokenize, token namespace 132 gen = tokenize.generate_tokens(StringIO.StringIO(source).readline) 136 ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT)) 333 encoding = tokenize.detect_encoding(f.readline)[0] 658 """Wraps a tokenize stream to systematically modify start/end.""" 659 tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next) 672 """Generates lines as expected by tokenize from a list of lines.
|
/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/lib2to3/tests/ |
H A D | test_parser.py | 20 from lib2to3.pgen2 import tokenize namespace 167 encoding = tokenize.detect_encoding(fp.readline)[0]
|