Searched defs:tokenize (Results 51 - 75 of 104) sorted by relevance

12345

/external/python/cpython2/Tools/i18n/
H A Dpygettext.py35 Enter pygettext, which uses Python's standard tokenize module to scan
165 import tokenize namespace
380 if ttype == tokenize.STRING:
383 elif ttype not in (tokenize.COMMENT, tokenize.NL):
387 if ttype == tokenize.NAME and tstring in ('class', 'def'):
390 if ttype == tokenize.NAME and tstring in opts.keywords:
395 if ttype == tokenize.OP and tstring == ':':
400 if ttype == tokenize.STRING:
403 elif ttype not in (tokenize
[all...]
/external/python/cpython3/Lib/
H A Dimp.py27 import tokenize namespace
155 # tokenize.detect_encoding() only accepts bytes.
301 encoding = tokenize.detect_encoding(file.readline)[0]
H A Dtrace.py57 import tokenize namespace
291 encoding, _ = tokenize.detect_encoding(fp.readline)
382 tok = tokenize.generate_tokens(f.readline)
396 with tokenize.open(filename) as f:
H A Dtokenize.py3 tokenize(readline) is a generator that breaks a stream of bytes into
41 __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
320 token, which is the first token sequence output by tokenize.
330 # Output bytes will tokenize back to the input
331 t1 = [tok[:2] for tok in tokenize(f.readline)]
334 t2 = [tok[:2] for tok in tokenize(readline)]
359 in the same way as the tokenize() generator.
464 def tokenize(readline): function
466 The tokenize() generator requires one argument, readline, which
484 # built yet and tokenize i
[all...]
/external/python/cpython3/Lib/lib2to3/pgen2/
H A Dtokenize.py23 tokenize(readline, tokeneater=printtoken)
37 __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
160 def tokenize(readline, tokeneater=printtoken): function
162 The tokenize() function accepts two parameters: one representing the
163 input stream, and one providing an output mechanism for tokenize().
257 in the same way as the tokenize() generator.
337 # Output text will tokenize the back to the input
439 ("<tokenize>", lnum, pos, line))
575 if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
576 else: tokenize(sy
[all...]
/external/python/cpython3/Lib/lib2to3/
H A Drefactor.py24 from .pgen2 import driver, tokenize, token namespace
129 gen = tokenize.generate_tokens(io.StringIO(source).readline)
133 ignore = frozenset({token.NEWLINE, tokenize.NL, token.COMMENT})
330 encoding = tokenize.detect_encoding(f.readline)[0]
655 """Wraps a tokenize stream to systematically modify start/end."""
656 tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
669 """Generates lines as expected by tokenize from a list of lines.
/external/python/cpython3/Lib/test/test_tools/
H A Dtest_unparse.py8 import tokenize namespace
24 encoding = tokenize.detect_encoding(pyfile.readline)[0]
/external/python/cpython3/Tools/i18n/
H A Dpygettext.py35 Enter pygettext, which uses Python's standard tokenize module to scan
166 import tokenize namespace
342 if ttype == tokenize.STRING:
345 elif ttype not in (tokenize.COMMENT, tokenize.NL):
349 if ttype == tokenize.NAME and tstring in ('class', 'def'):
352 if ttype == tokenize.NAME and tstring in opts.keywords:
357 if ttype == tokenize.OP and tstring == ':':
362 if ttype == tokenize.STRING:
365 elif ttype not in (tokenize
[all...]
/external/selinux/libsepol/src/
H A Dutil.c196 * The tokenize and tokenize_str functions may be used to
240 * line_buf - Buffer containing string to tokenize.
241 * delim - The delimiter used to tokenize line_buf. A whitespace delimiter will
249 * function will not tokenize more than num_args and the last argument will
253 int hidden tokenize(char *line_buf, char delim, int num_args, ...) function
/external/testng/src/main/java/org/testng/remote/strprotocol/
H A DMessageHelper.java200 return tokenize(messagePart, PARAM_DELIMITER);
204 return tokenize(message, DELIMITER);
207 private static String[] tokenize(final String message, final char separator) { method in class:MessageHelper
/external/deqp/framework/randomshaders/
H A DrsgBinaryOps.cpp93 void BinaryOp<Precedence, Assoc>::tokenize (GeneratorState& state, TokenStream& str) const function in class:rsg::BinaryOp
95 m_leftValueExpr->tokenize(state, str);
97 m_rightValueExpr->tokenize(state, str);
H A DrsgStatement.cpp203 void BlockStatement::tokenize (GeneratorState& state, TokenStream& str) const function in class:rsg::BlockStatement
208 (*i)->tokenize(state, str);
219 void ExpressionStatement::tokenize (GeneratorState& state, TokenStream& str) const function in class:rsg::ExpressionStatement
222 m_expression->tokenize(state, str);
333 void DeclarationStatement::tokenize (GeneratorState& state, TokenStream& str) const function in class:rsg::DeclarationStatement
340 m_expression->tokenize(state, str);
456 void ConditionalStatement::tokenize (GeneratorState& state, TokenStream& str) const function in class:rsg::ConditionalStatement
462 m_condition->tokenize(state, str);
469 m_trueStatement->tokenize(state, str);
473 m_trueStatement->tokenize(stat
556 void AssignStatement::tokenize (GeneratorState& state, TokenStream& str) const function in class:rsg::AssignStatement
[all...]
/external/python/cpython2/Lib/
H A Dinspect.py39 import tokenize namespace
650 elif type == tokenize.NEWLINE:
657 elif type == tokenize.INDENT:
660 elif type == tokenize.DEDENT:
667 elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
676 tokenize.tokenize(iter(lines).next, blockfinder.tokeneater)
/external/python/cpython2/Lib/lib2to3/tests/
H A Dtest_parser.py23 from lib2to3.pgen2 import tokenize namespace
283 encoding = tokenize.detect_encoding(fp.readline)[0]
/external/python/cpython2/Parser/
H A Dasdl.py61 def tokenize(self, input): member in class:ASDLScanner
64 super(ASDLScanner, self).tokenize(input)
386 tokens = scanner.tokenize(buf)
/external/python/cpython3/Lib/lib2to3/tests/
H A Dtest_parser.py25 from lib2to3.pgen2 import tokenize namespace
384 encoding = tokenize.detect_encoding(fp.readline)[0]
/external/sqlite/android/
H A Dsqlite3_android.cpp263 static void tokenize(sqlite3_context * context, int argc, sqlite3_value ** argv) function
265 //ALOGD("enter tokenize");
338 // Get the raw bytes for the string to tokenize
455 err = sqlite3_create_function(handle, "_TOKENIZE", 4, SQLITE_UTF16, collator, tokenize, NULL, NULL);
459 err = sqlite3_create_function(handle, "_TOKENIZE", 5, SQLITE_UTF16, collator, tokenize, NULL, NULL);
463 err = sqlite3_create_function(handle, "_TOKENIZE", 6, SQLITE_UTF16, collator, tokenize, NULL, NULL);
/external/webrtc/webrtc/base/
H A Dstringencode.cc557 size_t tokenize(const std::string& source, char delimiter, function in namespace:rtc
595 tokenize(source, delimiter, &new_fields);
600 size_t tokenize(const std::string& source, char delimiter, char start_mark, function in namespace:rtc
618 // We have found the matching marks. First tokenize the pre-mask. Then add
/external/libmojo/third_party/jinja2/
H A Dlexer.py542 def tokenize(self, source, name=None, filename=None, state=None): member in class:Lexer
543 """Calls tokeniter + tokenize and wraps it in a token stream.
549 """This is called with the stream as returned by `tokenize` and wraps
595 generator. Use this method if you just want to tokenize a template.
/external/python/cpython3/Lib/importlib/
H A D_bootstrap_external.py512 import tokenize # To avoid bootstrap issues. namespace
514 encoding = tokenize.detect_encoding(source_bytes_readline)
/external/python/cpython3/Lib/test/
H A Dtest_tokenize.py2 from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP, namespace
14 # Tests for the tokenize module.
25 for type, token, start, end, line in tokenize(f.readline):
66 for tok in tokenize(readline):
193 for toktype, token, start, end, line in tokenize(f.readline):
903 g = tokenize(BytesIO(s.encode('utf-8')).readline) # tokenize the string
1263 with mock.patch('tokenize._builtin_open', return_value=m):
1271 import tokenize a
[all...]
/external/python/cpython3/Tools/parser/
H A Dunparse.py4 import tokenize namespace
671 encoding = tokenize.detect_encoding(pyfile.readline)[0]
/external/e2fsprogs/lib/support/
H A Ddict.c1237 static int tokenize(char *string, ...) function
1332 if (tokenize(in+1, &tok1, &tok2, (char **) 0) != 2) {
1403 if (tokenize(in+1, &tok1, &tok2, (char **) 0) != 2) {
1424 if (tokenize(in+1, &tok1, (char **) 0) != 1) {
1446 if (tokenize(in+1, &tok1, (char **) 0) != 1) {
1496 if (tokenize(in+1, &tok1, (char **) 0) != 1) {
1509 if (tokenize(in+1, &tok1, &tok2, (char **) 0) != 2) {
/external/f2fs-tools/fsck/
H A Ddict.c1207 static int tokenize(char *string, ...) function
1302 if (tokenize(in+1, &tok1, &tok2, (char **) 0) != 2) {
1373 if (tokenize(in+1, &tok1, &tok2, (char **) 0) != 2) {
1394 if (tokenize(in+1, &tok1, (char **) 0) != 1) {
1416 if (tokenize(in+1, &tok1, (char **) 0) != 1) {
1466 if (tokenize(in+1, &tok1, (char **) 0) != 1) {
1479 if (tokenize(in+1, &tok1, &tok2, (char **) 0) != 2) {
/external/google-breakpad/src/testing/scripts/generator/cpp/
H A Dast.py46 from cpp import tokenize namespace
549 if parts[-1].token_type == tokenize.NAME:
579 if (type_name and type_name[-1].token_type == tokenize.NAME and
580 p.token_type == tokenize.NAME):
581 type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
738 if token.token_type == tokenize.NAME:
749 if next.token_type == tokenize.SYNTAX and next.name == '(':
754 syntax = tokenize.SYNTAX
763 new_temp = self._GetTokensUpTo(tokenize
[all...]

Completed in 784 milliseconds

12345