/external/python/cpython3/Tools/scripts/ |
H A D | untabify.py | 8 import tokenize namespace 30 with tokenize.open(filename) as f:
|
H A D | finddiv.py | 21 import tokenize namespace 58 g = tokenize.generate_tokens(fp.readline)
|
H A D | cleanfuture.py | 42 import tokenize namespace 145 # Line-getter for tokenize. 157 STRING = tokenize.STRING 158 NL = tokenize.NL 159 NEWLINE = tokenize.NEWLINE 160 COMMENT = tokenize.COMMENT 161 NAME = tokenize.NAME 162 OP = tokenize.OP 165 get = tokenize.generate_tokens(self.getline).__next__ 184 startline = srow - 1 # tokenize i [all...] |
H A D | fixdiv.py | 91 This really shouldn't happen. It means that the tokenize module 136 import tokenize namespace 217 g = tokenize.generate_tokens(f.readline) 367 if type == tokenize.NEWLINE:
|
/external/python/cpython2/Tools/scripts/ |
H A D | finddiv.py | 21 import tokenize namespace 58 g = tokenize.generate_tokens(fp.readline)
|
H A D | checkappend.py | 39 import tokenize namespace 106 tokenize.tokenize(self.file.readline, self.tokeneater) 107 except tokenize.TokenError, msg: 113 NEWLINE=tokenize.NEWLINE, 114 JUNK=(tokenize.COMMENT, tokenize.NL), 115 OP=tokenize.OP, 116 NAME=tokenize.NAME):
|
H A D | cleanfuture.py | 42 import tokenize namespace 145 # Line-getter for tokenize. 157 STRING = tokenize.STRING 158 NL = tokenize.NL 159 NEWLINE = tokenize.NEWLINE 160 COMMENT = tokenize.COMMENT 161 NAME = tokenize.NAME 162 OP = tokenize.OP 165 get = tokenize.generate_tokens(self.getline).next 184 startline = srow - 1 # tokenize i [all...] |
H A D | fixdiv.py | 91 This really shouldn't happen. It means that the tokenize module 136 import tokenize namespace 217 g = tokenize.generate_tokens(f.readline) 367 if type == tokenize.NEWLINE:
|
H A D | reindent.py | 44 import tokenize namespace 170 # that we can use tokenize's 1-based line numbering easily. 179 # signal that tokenize doesn't know what to do about them; 188 tokenize.tokenize(self.getline, self.tokeneater) 259 # Line-getter for tokenize. 268 # Line-eater for tokenize. 270 INDENT=tokenize.INDENT, 271 DEDENT=tokenize.DEDENT, 272 NEWLINE=tokenize [all...] |
/external/chromium-trace/catapult/common/py_utils/py_utils/refactor/ |
H A D | offset_token.py | 8 import tokenize namespace 66 tokenize_tokens = tokenize.generate_tokens(f.readline) 90 while offset_tokens[0].type == tokenize.NL: 100 # Convert OffsetTokens to tokenize tokens. 113 # tokenize can't handle whitespace before line continuations. 115 return tokenize.untokenize(tokenize_tokens).replace('\\\n', ' \\\n')
|
H A D | snippet.py | 9 import tokenize namespace 216 # by the tokenize module to annotate the syntax tree with the information 228 tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL): 232 # tokenize has 0 or 1 depending on if the file has one.
|
/external/python/cpython2/Lib/lib2to3/pgen2/ |
H A D | grammar.py | 10 token module; the Python tokenize module reports all operators as the 20 from . import token, tokenize namespace 152 # Map from operator to number (since tokenize doesn't do this)
|
H A D | driver.py | 26 from . import grammar, parse, token, tokenize, pgen namespace 40 # XXX Move the prefix computation into a wrapper around tokenize. 59 if type in (tokenize.COMMENT, tokenize.NL): 88 tokens = tokenize.generate_tokens(stream.readline) 105 tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
|
/external/python/cpython3/Lib/lib2to3/pgen2/ |
H A D | grammar.py | 10 token module; the Python tokenize module reports all operators as the 20 from . import token, tokenize namespace 151 # Map from operator to number (since tokenize doesn't do this)
|
H A D | driver.py | 26 from . import grammar, parse, token, tokenize, pgen namespace 40 # XXX Move the prefix computation into a wrapper around tokenize. 59 if type in (tokenize.COMMENT, tokenize.NL): 88 tokens = tokenize.generate_tokens(stream.readline) 105 tokens = tokenize.generate_tokens(io.StringIO(text).readline)
|
/external/python/cpython3/Lib/ |
H A D | linecache.py | 11 import tokenize namespace 136 with tokenize.open(fullname) as fp:
|
/external/python/cpython2/Lib/idlelib/ |
H A D | ScriptBinding.py | 24 import tokenize namespace 72 tabnanny.process_tokens(tokenize.generate_tokens(f.readline)) 73 except tokenize.TokenError as msg:
|
/external/python/cpython2/Lib/ |
H A D | pyclbr.py | 44 import tokenize namespace 153 g = tokenize.generate_tokens(f.readline)
|
H A D | tabnanny.py | 26 import tokenize namespace 27 if not hasattr(tokenize, 'NL'): 28 raise ValueError("tokenize.NL doesn't exist -- tokenize module too old") 106 process_tokens(tokenize.generate_tokens(f.readline)) 108 except tokenize.TokenError, msg: 274 INDENT = tokenize.INDENT 275 DEDENT = tokenize.DEDENT 276 NEWLINE = tokenize.NEWLINE 277 JUNK = tokenize [all...] |
/external/python/cpython3/Lib/idlelib/ |
H A D | runscript.py | 22 import tokenize namespace 69 with tokenize.open(filename) as f: 71 tabnanny.process_tokens(tokenize.generate_tokens(f.readline)) 72 except tokenize.TokenError as msg:
|
/external/autotest/utils/ |
H A D | reindent.py | 44 import tokenize namespace 162 # that we can use tokenize's 1-based line numbering easily. 171 # signal that tokenize doesn't know what to do about them; 176 tokenize.tokenize(self.getline, self.tokeneater) 247 # Line-getter for tokenize. 256 # Line-eater for tokenize. 258 INDENT=tokenize.INDENT, 259 DEDENT=tokenize.DEDENT, 260 NEWLINE=tokenize [all...] |
/external/deqp/framework/randomshaders/ |
H A D | rsgShader.cpp | 93 void Shader::tokenize (GeneratorState& state, TokenStream& str) const function in class:rsg::Shader 101 m_globalStatements[ndx]->tokenize(state, str); 107 m_functions[ndx]->tokenize(state, str); 112 m_mainFunction.tokenize(state, str); 125 void Function::tokenize (GeneratorState& state, TokenStream& str) const function in class:rsg::Function 147 m_functionBlock.tokenize(state, str);
|
/external/python/cpython2/Lib/lib2to3/ |
H A D | patcomp.py | 18 from .pgen2 import driver, literals, token, tokenize, parse, grammar namespace 36 tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
|
/external/python/cpython3/Lib/distutils/command/ |
H A D | build_scripts.py | 12 import tokenize namespace 82 encoding, lines = tokenize.detect_encoding(f.readline)
|
/external/python/cpython3/Lib/lib2to3/ |
H A D | patcomp.py | 18 from .pgen2 import driver, literals, token, tokenize, parse, grammar namespace 36 tokens = tokenize.generate_tokens(io.StringIO(input).readline)
|