Searched defs:tokenize (Results 1 - 25 of 87) sorted by relevance

1234

/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/lib2to3/pgen2/
H A Dgrammar.py10 token module; the Python tokenize module reports all operators as the
19 from . import token, tokenize namespace
129 # Map from operator to number (since tokenize doesn't do this)
H A Ddriver.py26 from . import grammar, parse, token, tokenize, pgen namespace
40 # XXX Move the prefix computation into a wrapper around tokenize.
59 if type in (tokenize.COMMENT, tokenize.NL):
88 tokens = tokenize.generate_tokens(stream.readline)
105 tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
H A Dpgen.py5 from . import grammar, token, tokenize namespace
19 self.generator = tokenize.generate_tokens(stream.readline)
323 while tup[0] in (tokenize.COMMENT, tokenize.NL):
H A Dtokenize.py23 tokenize(readline, tokeneater=printtoken)
37 __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
160 def tokenize(readline, tokeneater=printtoken): function
162 The tokenize() function accepts two parameters: one representing the
163 input stream, and one providing an output mechanism for tokenize().
256 in the same way as the tokenize() generator.
335 # Output text will tokenize the back to the input
427 ("<tokenize>", lnum, pos, line))
499 if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
500 else: tokenize(sy
[all...]
/prebuilts/python/linux-x86/2.7.5/lib/python2.7/lib2to3/pgen2/
H A Dgrammar.py10 token module; the Python tokenize module reports all operators as the
19 from . import token, tokenize namespace
129 # Map from operator to number (since tokenize doesn't do this)
H A Ddriver.py26 from . import grammar, parse, token, tokenize, pgen namespace
40 # XXX Move the prefix computation into a wrapper around tokenize.
59 if type in (tokenize.COMMENT, tokenize.NL):
88 tokens = tokenize.generate_tokens(stream.readline)
105 tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
H A Dpgen.py5 from . import grammar, token, tokenize namespace
19 self.generator = tokenize.generate_tokens(stream.readline)
323 while tup[0] in (tokenize.COMMENT, tokenize.NL):
H A Dtokenize.py23 tokenize(readline, tokeneater=printtoken)
37 __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
160 def tokenize(readline, tokeneater=printtoken): function
162 The tokenize() function accepts two parameters: one representing the
163 input stream, and one providing an output mechanism for tokenize().
256 in the same way as the tokenize() generator.
335 # Output text will tokenize the back to the input
427 ("<tokenize>", lnum, pos, line))
499 if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
500 else: tokenize(sy
[all...]
/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/idlelib/
H A DScriptBinding.py24 import tokenize namespace
72 tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
73 except tokenize.TokenError, msg:
/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/
H A Dpyclbr.py44 import tokenize namespace
153 g = tokenize.generate_tokens(f.readline)
H A Dtabnanny.py26 import tokenize namespace
27 if not hasattr(tokenize, 'NL'):
28 raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
106 process_tokens(tokenize.generate_tokens(f.readline))
108 except tokenize.TokenError, msg:
274 INDENT = tokenize.INDENT
275 DEDENT = tokenize.DEDENT
276 NEWLINE = tokenize.NEWLINE
277 JUNK = tokenize
[all...]
H A Dcgitb.py32 import tokenize namespace
84 for ttype, token, start, end, line in tokenize.generate_tokens(reader):
85 if ttype == tokenize.NEWLINE: break
86 if ttype == tokenize.NAME and token not in keyword.kwlist:
H A Dgettext.py84 import token, tokenize namespace
85 tokens = tokenize.generate_tokens(StringIO(plural).readline)
88 except tokenize.TokenError:
H A Dtokenize.py20 tokenize(readline, tokeneater=printtoken)
34 __all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"]
155 def tokenize(readline, tokeneater=printtoken): function
157 The tokenize() function accepts two parameters: one representing the
158 input stream, and one providing an output mechanism for tokenize().
254 # Output text will tokenize the back to the input
351 ("<tokenize>", lnum, pos, line))
424 tokenize(open(sys.argv[1]).readline)
426 tokenize(sys.stdin.readline)
H A Dtrace.py57 import tokenize namespace
427 for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
/prebuilts/python/linux-x86/2.7.5/lib/python2.7/idlelib/
H A DScriptBinding.py24 import tokenize namespace
72 tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
73 except tokenize.TokenError, msg:
/prebuilts/python/linux-x86/2.7.5/lib/python2.7/
H A Dpyclbr.py44 import tokenize namespace
153 g = tokenize.generate_tokens(f.readline)
H A Dtabnanny.py26 import tokenize namespace
27 if not hasattr(tokenize, 'NL'):
28 raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
106 process_tokens(tokenize.generate_tokens(f.readline))
108 except tokenize.TokenError, msg:
274 INDENT = tokenize.INDENT
275 DEDENT = tokenize.DEDENT
276 NEWLINE = tokenize.NEWLINE
277 JUNK = tokenize
[all...]
H A Dcgitb.py32 import tokenize namespace
84 for ttype, token, start, end, line in tokenize.generate_tokens(reader):
85 if ttype == tokenize.NEWLINE: break
86 if ttype == tokenize.NAME and token not in keyword.kwlist:
H A Dgettext.py84 import token, tokenize namespace
85 tokens = tokenize.generate_tokens(StringIO(plural).readline)
88 except tokenize.TokenError:
/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/lib2to3/
H A Dpatcomp.py18 from .pgen2 import driver, literals, token, tokenize, parse, grammar namespace
36 tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
H A Drefactor.py26 from .pgen2 import driver, tokenize, token namespace
132 gen = tokenize.generate_tokens(StringIO.StringIO(source).readline)
136 ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))
333 encoding = tokenize.detect_encoding(f.readline)[0]
658 """Wraps a tokenize stream to systematically modify start/end."""
659 tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
672 """Generates lines as expected by tokenize from a list of lines.
/prebuilts/python/linux-x86/2.7.5/lib/python2.7/lib2to3/
H A Dpatcomp.py18 from .pgen2 import driver, literals, token, tokenize, parse, grammar namespace
36 tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
H A Drefactor.py26 from .pgen2 import driver, tokenize, token namespace
132 gen = tokenize.generate_tokens(StringIO.StringIO(source).readline)
136 ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))
333 encoding = tokenize.detect_encoding(f.readline)[0]
658 """Wraps a tokenize stream to systematically modify start/end."""
659 tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
672 """Generates lines as expected by tokenize from a list of lines.
/prebuilts/python/darwin-x86/2.7.5/lib/python2.7/lib2to3/tests/
H A Dtest_parser.py20 from lib2to3.pgen2 import tokenize namespace
167 encoding = tokenize.detect_encoding(fp.readline)[0]

Completed in 440 milliseconds

1234