Lines Matching refs:token

44 #    For each token, we determine if it is a block or continuation token.
60 """Stores information about a token.
63 token: The token
64 is_block: Whether the token represents a block indentation.
65 is_transient: Whether the token should be automatically removed without
66 finding a matching end token.
67 overridden_by: TokenInfo for a token that overrides the indentation that
68 this token would require.
69 is_permanent_override: Whether the override on this token should persist
70 even after the overriding token is removed from the stack. For example:
76 line_number: The effective line number of this token. Will either be the
81 def __init__(self, token, is_block=False):
85 token: The token
86 is_block: Whether the token represents a block indentation.
88 self.token = token
92 self.is_transient = not is_block and not token.type in (
94 self.line_number = token.line_number
97 result = '\n %s' % self.token
100 result, self.overridden_by.token.string)
127 def CheckToken(self, token, state):
128 """Checks a token for indentation errors.
131 token: The current token under consideration
135 An error array [error code, error string, error token] if the token is
139 token_type = token.type
142 is_first = self._IsFirstNonWhitespaceTokenInLine(token)
158 goog_scope = self._GoogScopeOrNone(start_token.token)
160 if not token.line.endswith('; // goog.scope\n'):
161 if (token.line.find('//') > -1 and
162 token.line.find('goog.scope') >
163 token.line.find('//')):
169 token,
170 Position(token.start_index, token.length)])
178 token,
179 Position(token.start_index, token.length)])
181 elif token_type == Type.KEYWORD and token.string in ('case', 'default'):
184 elif is_first and token.string == '.':
185 # This token should have been on the previous line, so treat it as if it
187 info = TokenInfo(token)
188 info.line_number = token.line_number - 1
195 token.metadata.IsUnaryOperator())
196 not_dot = token.string != '.'
197 if is_first and not_binary_operator and not_dot and token.type not in (
200 print 'Line #%d: stack %r' % (token.line_number, stack)
208 actual = self._GetActualIndentation(token)
213 next_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
227 token,
229 self._start_index_offset[token.line_number] = expected[0] - actual
233 self._Add(TokenInfo(token=token,
234 is_block=token.metadata.context.type == Context.ARRAY_LITERAL))
236 elif token_type == Type.START_BLOCK or token.metadata.is_implied_block:
237 self._Add(TokenInfo(token=token, is_block=True))
240 self._Add(TokenInfo(token=token, is_block=False))
242 elif token_type == Type.KEYWORD and token.string == 'return':
243 self._Add(TokenInfo(token))
245 elif not token.IsLastInLine() and (
246 token.IsAssignment() or token.IsOperator('?')):
247 self._Add(TokenInfo(token=token))
250 if token.metadata.is_implied_block_close:
254 is_last = self._IsLastCodeInLine(token)
257 if token.string == ':':
258 if (stack and stack[-1].token.string == '?'):
261 if (token.line_number == stack[-1].token.line_number):
262 self._Add(TokenInfo(token))
263 elif token.metadata.context.type == Context.CASE_BLOCK:
271 self._Add(TokenInfo(token=token, is_block=True))
272 elif token.metadata.context.type == Context.LITERAL_ELEMENT:
275 self._Add(TokenInfo(token))
282 elif token.string != ',':
283 self._Add(TokenInfo(token))
285 # The token is a comma.
286 if token.metadata.context.type == Context.VAR:
287 self._Add(TokenInfo(token))
288 elif token.metadata.context.type != Context.PARAMETERS:
291 elif (token.string.endswith('.')
293 self._Add(TokenInfo(token))
294 elif token_type == Type.PARAMETERS and token.string.endswith(','):
296 self._Add(TokenInfo(token))
297 elif token.metadata.is_implied_semicolon:
299 elif token.IsAssignment():
300 self._Add(TokenInfo(token))
321 def _IsHardStop(self, token):
322 """Determines if the given token can have a hard stop after it.
324 Hard stops are indentations defined by the position of another token as in
327 return (token.type in self._HARD_STOP_TYPES or
328 token.string in self._HARD_STOP_STRINGS or
329 token.IsAssignment())
349 token = token_info.token
352 if not token_info.overridden_by and token.string != 'return':
366 if self._IsHardStop(token):
368 self._IsHardStop(token_info.overridden_by.token))
370 start_index = token.start_index
371 if token.line_number in self._start_index_offset:
372 start_index += self._start_index_offset[token.line_number]
373 if (token.type in (Type.START_PAREN, Type.START_PARAMETERS) and
377 elif token.string == 'return' and not token_info.overridden_by:
380 elif (token.type == Type.START_BRACKET):
383 elif token.IsAssignment():
384 hard_stops.add(start_index + len(token.string) + 1)
386 elif token.IsOperator('?') and not token_info.overridden_by:
391 def _GetActualIndentation(self, token):
392 """Gets the actual indentation of the line containing the given token.
395 token: Any token on the line.
398 The actual indentation of the line containing the given token. Returns
401 # Move to the first token in the line
402 token = tokenutil.GetFirstTokenInSameLine(token)
405 if token.type == Type.WHITESPACE:
406 if token.string.find('\t') >= 0:
409 return len(token.string)
410 elif token.type == Type.PARAMETERS:
411 return len(token.string) - len(token.string.lstrip())
415 def _IsFirstNonWhitespaceTokenInLine(self, token):
416 """Determines if the given token is the first non-space token on its line.
419 token: The token.
422 True if the token is the first non-whitespace token on its line.
424 if token.type in (Type.WHITESPACE, Type.BLANK_LINE):
426 if token.IsFirstInLine():
428 return (token.previous and token.previous.IsFirstInLine() and
429 token.previous.type == Type.WHITESPACE)
431 def _IsLastCodeInLine(self, token):
432 """Determines if the given token is the last code token on its line.
435 token: The token.
438 True if the token is the last code token on its line.
440 if token.type in Type.NON_CODE_TYPES:
442 start_token = token
444 token = token.next
445 if not token or token.line_number != start_token.line_number:
447 if token.type not in Type.NON_CODE_TYPES:
450 def _GoogScopeOrNone(self, token):
454 token: A token of type START_BLOCK.
457 The goog.scope function call token, or None if such call doesn't exist.
464 maybe_goog_scope = token
472 """Adds the given token info to the stack.
475 token_info: The token information to add.
477 if self._stack and self._stack[-1].token == token_info.token:
478 # Don't add the same token twice.
481 if token_info.is_block or token_info.token.type == Type.START_PAREN:
482 token_info.overridden_by = self._GoogScopeOrNone(token_info.token)
486 stack_token = stack_info.token
492 if (token_info.token.type == Type.START_BLOCK and
500 close_block = token_info.token.metadata.context.end_token
502 close_block.line_number != token_info.token.line_number
503 elif (token_info.token.type == Type.START_BLOCK and
504 token_info.token.metadata.context.type == Context.BLOCK and
522 """Pops the top token from the stack.
525 The popped token info.
528 if token_info.token.type not in (Type.START_BLOCK, Type.START_BRACKET):
534 token_check = token_info.token
537 if token_info.token.type == Type.START_BRACKET:
541 line_number = token_info.token.line_number
555 """Pops the stack until an implied block token is found."""
556 while not self._Pop().token.metadata.is_implied_block:
560 """Pops the stack until a token of the given type is popped.
563 stop_type: The type of token to pop to.
566 The token info of the given type that was popped.
571 if last.token.type == stop_type:
576 """Marks any token that was overridden by this token as active again.
579 token_info: The token that is being removed from the stack.