Searched defs:important (Results 1 - 12 of 12) sorted by relevance

/external/pdfium/core/fxcrt/css/
H A Dcfx_cssstylesheet_unittest.cpp53 bool important; local
54 RetainPtr<CFX_CSSValue> v = decl_->GetProperty(prop, &important);
63 bool important; local
64 RetainPtr<CFX_CSSValue> v = decl_->GetProperty(prop, &important);
73 bool important; local
75 decl_->GetProperty(prop, &important).As<CFX_CSSValueList>();
/external/honggfuzz/posix/
H A Darch.c52 bool important; member in struct:__anon7856
55 [0 ...(NSIG - 1)].important = false,
58 [SIGILL].important = true,
61 [SIGFPE].important = true,
64 [SIGSEGV].important = true,
67 [SIGBUS].important = true,
71 [SIGABRT].important = false,
75 [SIGVTALRM].important = false,
114 if (!arch_sigs[termsig].important) {
115 LOG_D("It's not that important signa
[all...]
/external/webrtc/webrtc/test/testsupport/
H A Dperf_test.cc29 bool important) {
38 if (important) {
53 bool important) {
55 prefix, suffix, units, important).c_str());
68 bool important) {
72 units, important);
81 bool important) {
86 "", "", units, important);
94 bool important) {
96 important);
22 ResultsToString(const std::string& measurement, const std::string& modifier, const std::string& trace, const std::string& values, const std::string& prefix, const std::string& suffix, const std::string& units, bool important) argument
46 PrintResultsImpl(const std::string& measurement, const std::string& modifier, const std::string& trace, const std::string& values, const std::string& prefix, const std::string& suffix, const std::string& units, bool important) argument
63 PrintResult(const std::string& measurement, const std::string& modifier, const std::string& trace, size_t value, const std::string& units, bool important) argument
75 AppendResult(std::string& output, const std::string& measurement, const std::string& modifier, const std::string& trace, size_t value, const std::string& units, bool important) argument
89 PrintResult(const std::string& measurement, const std::string& modifier, const std::string& trace, const std::string& value, const std::string& units, bool important) argument
99 AppendResult(std::string& output, const std::string& measurement, const std::string& modifier, const std::string& trace, const std::string& value, const std::string& units, bool important) argument
110 PrintResultMeanAndError(const std::string& measurement, const std::string& modifier, const std::string& trace, const std::string& mean_and_error, const std::string& units, bool important) argument
120 AppendResultMeanAndError(std::string& output, const std::string& measurement, const std::string& modifier, const std::string& trace, const std::string& mean_and_error, const std::string& units, bool important) argument
131 PrintResultList(const std::string& measurement, const std::string& modifier, const std::string& trace, const std::string& values, const std::string& units, bool important) argument
141 AppendResultList(std::string& output, const std::string& measurement, const std::string& modifier, const std::string& trace, const std::string& values, const std::string& units, bool important) argument
152 PrintSystemCommitCharge(const std::string& test_name, size_t charge, bool important) argument
158 PrintSystemCommitCharge(FILE* target, const std::string& test_name, size_t charge, bool important) argument
166 SystemCommitChargeToString(const std::string& test_name, size_t charge, bool important) argument
[all...]
/external/honggfuzz/mac/
H A Darch.c107 bool important; member in struct:__anon7854
112 for (int x = 0; x < NSIG; x++) arch_sigs[x].important = false;
114 arch_sigs[SIGILL].important = true;
116 arch_sigs[SIGFPE].important = true;
118 arch_sigs[SIGSEGV].important = true;
120 arch_sigs[SIGBUS].important = true;
124 arch_sigs[SIGABRT].important = true;
128 arch_sigs[SIGVTALRM].important = false;
215 if (!arch_sigs[termsig].important) {
216 LOG_D("It's not that important signa
[all...]
/external/honggfuzz/linux/
H A Dtrace.c250 bool important; member in struct:__anon7851
252 [0 ...(_NSIG)].important = false,
255 [SIGTRAP].important = false,
258 [SIGILL].important = true,
261 [SIGFPE].important = true,
264 [SIGSEGV].important = true,
267 [SIGBUS].important = true,
271 [SIGABRT].important = false,
275 [SIGVTALRM].important = false,
279 [SIGSYS].important
[all...]
/external/python/cpython2/Lib/pydoc_data/
H A Dtopics.py6 'atom-literals': u"\nLiterals\n********\n\nPython supports string literals and various numeric literals:\n\n literal ::= stringliteral | integer | longinteger\n | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\ninteger, long integer, floating point number, complex number) with the\ngiven value. The value may be approximated in the case of floating\npoint and imaginary (complex) literals. See section Literals for\ndetails.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n",
23 'compound': u'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs. "try" specifies exception handlers and/or cleanup\ncode for a group of statements. Function and class definitions are\nalso syntactically compound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n if test1: if test2: print x\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print" statements are executed:\n\n if x < y < z: print x; print y; print z\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n | decorated\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT". Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section Boolean operations\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function "range()" returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s "for i := a to b\ndo"; e.g., "range(3)" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n"try"..."except"..."finally" did not work. "try"..."except" had to be\nnested in "try"..."finally".\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject, or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the "sys" module:\n"sys.exc_type" receives the object identifying the exception;\n"sys.exc_value" receives the exception\'s parameter;\n"sys.exc_traceback" receives a traceback object (see section The\nstandard type hierarchy) identifying the point in the program where\nthe exception occurred. These details are also available through the\n"sys.exc_info()" function, which returns a tuple "(exc_type,\nexc_value, exc_traceback)". Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception, it is re-raised at the end of the\n"finally" clause. If the "finally" clause raises another exception or\nexecutes a "return" or "break" statement, the saved exception is\ndiscarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\nExceptions, and information on using the "raise" statement to generate\nexceptions may be found in section The raise statement.\n\n\nThe "with" statement\n====================\n\nNew in version 2.5.\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section With Statement\nContext Managers). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the "with" statement is only allowed when the\n "with_statement" feature has been enabled. It is always enabled in\n Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 343** - The "with" statement\n The specification, background, and examples for the Python "with"\n statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection The standard type hierarchy):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier ["," "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use "None" as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section Calls.\nA function call always assigns values to all parameters mentioned in\nthe parameter list, either from position arguments, from keyword\narguments, or from default values. If the form ""*identifier"" is\npresent, it is initialized to a tuple receiving any excess positional\nparameters, defaulting to the empty tuple. If the form\n""**identifier"" is present, it is initialized to a new dictionary\nreceiving any excess keyword arguments, defaulting to a new empty\ndictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section Lambdas. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section Naming and binding for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section The standard\ntype hierarchy):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section Naming and binding), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with "self.name = value". Both\nclass and instance variables are accessible through the notation\n""self.name"", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n',
27 'customization': u'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called after the instance has been created (by "__new__()"), but\n before it is returned to the caller. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])".\n\n Because "__new__()" and "__init__()" work together in constructing\n objects ("__new__()" to create it, and "__init__()" to customise\n it), no non-"None" value may be returned by "__init__()"; doing so\n will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_traceback" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.exc_traceback" or "sys.last_traceback". Circular references\n which are garbage are detected when the option cycle detector is\n enabled (it\'s on by default), but can only be cleaned up if there\n are no Python-level "__del__()" methods involved. Refer to the\n documentation for the "gc" module for more information about how\n "__del__()" methods are handled by the cycle detector,\n particularly the description of the "garbage" value.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\n See also the "-R" command-line option.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function and by string conversions\n (reverse quotes) to compute the "official" string representation of\n an object. If at all possible, this should look like a valid\n Python expression that could be used to recreate an object with the\n same value (given an appropriate environment). If this is not\n possible, a string of the form "<...some useful description...>"\n should be returned. The return value must be a string object. If a\n class defines "__repr__()" but not "__str__()", then "__repr__()"\n is also used when an "informal" string representation of instances\n of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the "str()" built-in function and by the "print"\n statement to compute the "informal" string representation of an\n object. This differs from "__repr__()" in that it does not have to\n be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to "__cmp__()" below. The\n correspondence between operator symbols and method names is as\n follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call "x.__ne__(y)",\n "x>y" calls "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if "self < other",\n zero if "self == other", a positive integer if "self > other". If\n no "__cmp__()", "__eq__()" or "__ne__()" operation is defined,\n class instances are compared by object identity ("address"). See\n also the description of "__hash__()" for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by "__cmp__()" has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n If a class does not define a "__cmp__()" or "__eq__()" method it\n should not define a "__hash__()" operation either; if it defines\n "__cmp__()" or "__eq__()" but not "__hash__()", its instances will\n not be usable in hashed collections. If a class defines mutable\n objects and implements a "__cmp__()" or "__eq__()" method, it\n should not implement "__hash__()", since hashable collection\n implementations require that an object\'s hash value is immutable\n (if the object\'s hash value changes, it will be in the wrong hash\n bucket).\n\n User-defined classes have "__cmp__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns a result derived from\n "id(x)".\n\n Classes which inherit a "__hash__()" method from a parent class but\n change the meaning of "__cmp__()" or "__eq__()" such that the hash\n value returned is no longer appropriate (e.g. by switching to a\n value-based concept of equality instead of the default identity\n based equality) can explicitly flag themselves as being unhashable\n by setting "__hash__ = None" in the class definition. Doing so\n means that not only will instances of the class raise an\n appropriate "TypeError" when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable)"\n (unlike classes which define their own "__hash__()" to explicitly\n raise "TypeError").\n\n Changed in version 2.5: "__hash__()" may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: "__hash__" may now be set to "None" to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True", or their integer\n equivalents "0" or "1". When this method is not defined,\n "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__nonzero__()", all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement "unicode()" built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n', namespace
40 'function': u'\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection The standard type hierarchy):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier ["," "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use "None" as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section Calls.\nA function call always assigns values to all parameters mentioned in\nthe parameter list, either from position arguments, from keyword\narguments, or from default values. If the form ""*identifier"" is\npresent, it is initialized to a tuple receiving any excess positional\nparameters, defaulting to the empty tuple. If the form\n""**identifier"" is present, it is initialized to a new dictionary\nreceiving any excess keyword arguments, defaulting to a new empty\ndictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section Lambdas. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section Naming and binding for details.\n',
65 'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "x.__getitem__(i)" for old-style\nclasses and "type(x).__getitem__(x, i)" for new-style classes. Except\nwhere mentioned, attempts to execute an operation raise an exception\nwhen no appropriate method is defined (typically "AttributeError" or\n"TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called after the instance has been created (by "__new__()"), but\n before it is returned to the caller. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])".\n\n Because "__new__()" and "__init__()" work together in constructing\n objects ("__new__()" to create it, and "__init__()" to customise\n it), no non-"None" value may be returned by "__init__()"; doing so\n will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_traceback" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.exc_traceback" or "sys.last_traceback". Circular references\n which are garbage are detected when the option cycle detector is\n enabled (it\'s on by default), but can only be cleaned up if there\n are no Python-level "__del__()" methods involved. Refer to the\n documentation for the "gc" module for more information about how\n "__del__()" methods are handled by the cycle detector,\n particularly the description of the "garbage" value.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\n See also the "-R" command-line option.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function and by string conversions\n (reverse quotes) to compute the "official" string representation of\n an object. If at all possible, this should look like a valid\n Python expression that could be used to recreate an object with the\n same value (given an appropriate environment). If this is not\n possible, a string of the form "<...some useful description...>"\n should be returned. The return value must be a string object. If a\n class defines "__repr__()" but not "__str__()", then "__repr__()"\n is also used when an "informal" string representation of instances\n of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the "str()" built-in function and by the "print"\n statement to compute the "informal" string representation of an\n object. This differs from "__repr__()" in that it does not have to\n be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to "__cmp__()" below. The\n correspondence between operator symbols and method names is as\n follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call "x.__ne__(y)",\n "x>y" calls "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if "self < other",\n zero if "self == other", a positive integer if "self > other". If\n no "__cmp__()", "__eq__()" or "__ne__()" operation is defined,\n class instances are compared by object identity ("address"). See\n also the description of "__hash__()" for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by "__cmp__()" has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n If a class does not define a "__cmp__()" or "__eq__()" method it\n should not define a "__hash__()" operation either; if it defines\n "__cmp__()" or "__eq__()" but not "__hash__()", its instances will\n not be usable in hashed collections. If a class defines mutable\n objects and implements a "__cmp__()" or "__eq__()" method, it\n should not implement "__hash__()", since hashable collection\n implementations require that an object\'s hash value is immutable\n (if the object\'s hash value changes, it will be in the wrong hash\n bucket).\n\n User-defined classes have "__cmp__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns a result derived from\n "id(x)".\n\n Classes which inherit a "__hash__()" method from a parent class but\n change the meaning of "__cmp__()" or "__eq__()" such that the hash\n value returned is no longer appropriate (e.g. by switching to a\n value-based concept of equality instead of the default identity\n based equality) can explicitly flag themselves as being unhashable\n by setting "__hash__ = None" in the class definition. Doing so\n means that not only will instances of the class raise an\n appropriate "TypeError" when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable)"\n (unlike classes which define their own "__hash__()" to explicitly\n raise "TypeError").\n\n Changed in version 2.5: "__hash__()" may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: "__hash__" may now be set to "None" to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True", or their integer\n equivalents "0" or "1". When this method is not defined,\n "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__nonzero__()", all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement "unicode()" built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should not simply execute "self.name = value" --- this would cause\n a recursive call to itself. Instead, it should insert the value in\n the dictionary of instance attributes, e.g., "self.__dict__[name] =\n value". For new-style classes, rather than accessing the instance\n dictionary, it should call the base class method with the same\n name, for example, "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n-------------------------------------------\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See Special method lookup for new-style\n classes.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass "object()" or "type()").\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to a new-style object instance, "a.x" is transformed\n into the call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a new-style class, "A.x" is transformed into the\n call: "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__dict__\'" to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__weakref__\'" to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (Implementing Descriptors) for each variable name. As a\n result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "long", "str" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, new-style classes are constructed using "type()". A class\ndefinition is read into a separate namespace and the value of class\nname is bound to the result of "type(name, bases, dict)".\n\nWhen the class definition is read, if *__metaclass__* is defined then\nthe callable assigned to it will be called instead of "type()". This\nallows classes or functions to be written which monitor or alter the\nclass creation process:\n\n* Modifying the class dictionary prior to the class being created.\n\n* Returning an instance of another class -- essentially performing\n the role of a factory function.\n\nThese steps will have to be performed in the metaclass\'s "__new__()"\nmethod -- "type.__new__()" can then be called from this method to\ncreate a class with different properties. This example adds a new\nelement to the class dictionary before creating the class:\n\n class metacls(type):\n def __new__(mcs, name, bases, dict):\n dict[\'foo\'] = \'metacls was here\'\n return type.__new__(mcs, name, bases, dict)\n\nYou can of course also override other class methods (or add new\nmethods); for example defining a custom "__call__()" method in the\nmetaclass allows custom behavior when the class is called, e.g. not\nalways creating a new instance.\n\n__metaclass__\n\n This variable can be any callable accepting arguments for "name",\n "bases", and "dict". Upon class creation, the callable is used\n instead of the built-in "type()".\n\n New in version 2.2.\n\nThe appropriate metaclass is determined by the following precedence\nrules:\n\n* If "dict[\'__metaclass__\']" exists, it is used.\n\n* Otherwise, if there is at least one base class, its metaclass is\n used (this looks for a *__class__* attribute first and if not found,\n uses its type).\n\n* Otherwise, if a global variable named __metaclass__ exists, it is\n used.\n\n* Otherwise, the old-style, classic metaclass (types.ClassType) is\n used.\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored including logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\n\nCustomizing instance and subclass checks\n========================================\n\nNew in version 2.6.\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n **PEP 3119** - Introducing Abstract Base Classes\n Includes the specification for customizing "isinstance()" and\n "issubclass()" behavior through "__instancecheck__()" and\n "__subclasscheck__()", with motivation for this functionality in\n the context of adding Abstract Base Classes (see the "abc"\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. (For backwards compatibility, the method\n"__getslice__()" (see below) can also be defined to handle simple, but\nnot extended slices.) It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "has_key()", "get()",\n"clear()", "setdefault()", "iterkeys()", "itervalues()",\n"iteritems()", "pop()", "popitem()", "copy()", and "update()" behaving\nsimilar to those for Python\'s standard dictionary objects. The\n"UserDict" module provides a "DictMixin" class to help create those\nmethods from a base set of "__getitem__()", "__setitem__()",\n"__delitem__()", and "keys()". Mutable sequences should provide\nmethods "append()", "count()", "index()", "extend()", "insert()",\n"pop()", "remove()", "reverse()" and "sort()", like Python standard\nlist objects. Finally, sequence types should implement addition\n(meaning concatenation) and multiplication (meaning repetition) by\ndefining the methods "__add__()", "__radd__()", "__iadd__()",\n"__mul__()", "__rmul__()" and "__imul__()" described below; they\nshould not define "__coerce__()" or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should be equivalent of "has_key()"; for sequences,\nit should search through the values. It is further recommended that\nboth mappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "iterkeys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__nonzero__()" method and whose "__len__()"\n method returns zero is considered to be false in a Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__missing__(self, key)\n\n Called by "dict"."__getitem__()" to implement "self[key]" for dict\n subclasses when key is not in the dictionary.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "iterkeys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see Iterator Types.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\n New in version 2.6.\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see this section in the\n language reference.\n\n\nAdditional methods for emulation of sequence types\n==================================================\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine "__getslice__()"; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the "__getitem__()" method. (However, built-in types in CPython\n currently still implement "__getslice__()". Therefore, you have to\n override it in derived classes when implementing slicing.)\n\n Called to implement evaluation of "self[i:j]". The returned object\n should be of the same type as *self*. Note that missing *i* or *j*\n in the slice expression are replaced by zero or "sys.maxsize",\n respectively. If negative indexes are used in the slice, the\n length of the sequence is added to that index. If the instance does\n not implement the "__len__()" method, an "AttributeError" is\n raised. No guarantee is made that indexes adjusted this way are not\n still negative. Indexes which are greater than the length of the\n sequence are not modified. If no "__getslice__()" is found, a slice\n object is created instead, and passed to "__getitem__()" instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to "self[i:j]". Same notes for *i*\n and *j* as for "__getslice__()".\n\n This method is deprecated. If no "__setslice__()" is found, or for\n extended slicing of the form "self[i:j:k]", a slice object is\n created, and passed to "__setitem__()", instead of "__setslice__()"\n being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of "self[i:j]". Same notes for *i* and\n *j* as for "__getslice__()". This method is deprecated. If no\n "__delslice__()" is found, or for extended slicing of the form\n "self[i:j:k]", a slice object is created, and passed to\n "__delitem__()", instead of "__delslice__()" being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, "__getitem__()", "__setitem__()" or "__delitem__()" is\ncalled with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n"__getitem__()", "__setitem__()" and "__delitem__()" support slice\nobjects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to "max()"; these are necessary because of the handling\nof negative indices before the "__*slice__()" methods are called.\nWhen negative indexes are used, the "__*item__()" methods receive them\nas provided, but the "__*slice__()" methods get a "cooked" form of the\nindex values. For each negative index value, the length of the\nsequence is added to the index before calling the method (which may\nstill result in a negative index); this is the customary handling of\nnegative indexes by the built-in sequence types, and the "__*item__()"\nmethods are expected to do this as well. However, since they should\nalready be doing that, negative indexes cannot be passed in; they must\nbe constrained to the bounds of the sequence before being passed to\nthe "__*item__()" methods. Calling "max(0, i)" conveniently returns\nthe proper value.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "//", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()" (described\n below). Note that "__pow__()" should be defined to accept an\n optional third argument if the ternary version of the built-in\n "pow()" function is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator ("/") is implemented by these methods. The\n "__truediv__()" method is used when "__future__.division" is in\n effect, otherwise "__div__()" is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; "TypeError" will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, to execute the statement "x += y", where\n *x* is an instance of a class that has an "__iadd__()" method,\n "x.__iadd__(y)" is called. If *x* is an instance of a class that\n does not define a "__iadd__()" method, "x.__add__(y)" and\n "y.__radd__(x)" are considered, as with the evaluation of "x + y".\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions "complex()", "int()",\n "long()", and "float()". Should return a value of the appropriate\n type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions "oct()" and "hex()".\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement "operator.index()". Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or "None" if conversion is impossible. When\n the common type would be the type of "other", it is sufficient to\n return "None", since the interpreter will also ask the other object\n to attempt a coercion (but sometimes, if the implementation of the\n other type cannot be changed, it is useful to do the conversion to\n the other type here). A return value of "NotImplemented" is\n equivalent to returning "None".\n\n\nCoercion rules\n==============\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don\'t define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from "object") never invoke the\n "__coerce__()" method in response to a binary operator; the only\n time "__coerce__()" is invoked is when the built-in function\n "coerce()" is called.\n\n* For most intents and purposes, an operator that returns\n "NotImplemented" is treated the same as one that is not implemented\n at all.\n\n* Below, "__op__()" and "__rop__()" are used to signify the generic\n method names corresponding to an operator; "__iop__()" is used for\n the corresponding in-place operator. For example, for the operator\n \'"+"\', "__add__()" and "__radd__()" are used for the left and right\n variant of the binary operator, and "__iadd__()" for the in-place\n variant.\n\n* For objects *x* and *y*, first "x.__op__(y)" is tried. If this is\n not implemented or returns "NotImplemented", "y.__rop__(x)" is\n tried. If this is also not implemented or returns "NotImplemented",\n a "TypeError" exception is raised. But see the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base\'s "__rop__()" method, the right operand\'s "__rop__()"\n method is tried *before* the left operand\'s "__op__()" method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand\'s "__op__()" method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is\n called before that type\'s "__op__()" or "__rop__()" method is\n called, but no sooner. If the coercion returns an object of a\n different type for the operand whose coercion is invoked, part of\n the process is redone using the new object.\n\n* When an in-place operator (like \'"+="\') is used, if the left\n operand implements "__iop__()", it is invoked without any coercion.\n When the operation falls back to "__op__()" and/or "__rop__()", the\n normal coercion rules apply.\n\n* In "x + y", if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In "x * y", if one operand is a sequence that implements sequence\n repetition, and the other is an integer ("int" or "long"), sequence\n repetition is invoked.\n\n* Rich comparisons (implemented by methods "__eq__()" and so on)\n never use coercion. Three-way comparison (implemented by\n "__cmp__()") does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types "int",\n "long", "float", and "complex" do not use coercion. All these types\n implement a "__coerce__()" method, for use by the built-in\n "coerce()" function.\n\n Changed in version 2.7: The complex type no longer makes implicit\n calls to the "__coerce__()" method for mixed-type binary arithmetic\n operations.\n\n\nWith Statement Context Managers\n===============================\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section The with\nstatement), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see Context Manager Types.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 343** - The "with" statement\n The specification, background, and examples for the Python "with"\n statement.\n\n\nSpecial method lookup for old-style classes\n===========================================\n\nFor old-style classes, special methods are always looked up in exactly\nthe same way as any other method or attribute. This is the case\nregardless of whether the method is being looked up explicitly as in\n"x.__getitem__(i)" or implicitly as in "x[i]".\n\nThis behaviour means that special methods may exhibit different\nbehaviour for different instances of a single old-style class if the\nappropriate special attributes are set differently:\n\n >>> class C:\n ... pass\n ...\n >>> c1 = C()\n >>> c2 = C()\n >>> c1.__len__ = lambda: 5\n >>> c2.__len__ = lambda: 9\n >>> len(c1)\n 5\n >>> len(c2)\n 9\n\n\nSpecial method lookup for new-style classes\n===========================================\n\nFor new-style classes, implicit invocations of special methods are\nonly guaranteed to work correctly if defined on an object\'s type, not\nin the object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception (unlike the equivalent example\nwith old-style classes):\n\n >>> class C(object):\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print "Metaclass getattribute invoked"\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object):\n ... __metaclass__ = Meta\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print "Class getattribute invoked"\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type,\n under certain controlled conditions. It generally isn\'t a good\n idea though, since it can lead to some very strange behaviour if\n it is handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as "__add__()") fails the operation is not\n supported, which is why the reflected method is not called.\n',
71 'types': u'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.).\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name "None". It\n is used to signify the absence of a value in many situations, e.g.,\n it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "NotImplemented". Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "Ellipsis". It is used to indicate the presence of the "..." syntax\n in a slice. Its truth value is true.\n\n"numbers.Number"\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n "numbers.Integral"\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are three types of integers:\n\n Plain integers\n These represent numbers in the range -2147483648 through\n 2147483647. (The range may be larger on machines with a\n larger natural word size, but not smaller.) When the result\n of an operation would fall outside this range, the result is\n normally returned as a long integer (in some cases, the\n exception "OverflowError" is raised instead). For the\n purpose of shift and mask operations, integers are assumed to\n have a binary, 2\'s complement notation using 32 or more bits,\n and hiding no bits from the user (i.e., all 4294967296\n different bit patterns correspond to different values).\n\n Long integers\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans\n These represent the truth values False and True. The two\n objects representing the values "False" and "True" are the\n only Boolean objects. The Boolean type is a subtype of plain\n integers, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ""False"" or\n ""True"" are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers and the least surprises when\n switching between the plain and long integer domains. Any\n operation, if it yields a result in the plain integer domain,\n will yield the same result in the long integer domain or when\n using mixed operands. The switch between domains is transparent\n to the programmer.\n\n "numbers.Real" ("float")\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these are\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n "numbers.Complex"\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number "z" can be retrieved through the read-only\n attributes "z.real" and "z.imag".\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function "len()" returns the number of items\n of a sequence. When the length of a sequence is *n*, the index set\n contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence *a* is\n selected by "a[i]".\n\n Sequences also support slicing: "a[i:j]" selects all items with\n index *k* such that *i* "<=" *k* "<" *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: "a[i:j:k]" selects all items of *a* with index *x* where\n "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n The items of a string are characters. There is no separate\n character type; a character is represented by a string of one\n item. Characters represent (at least) 8-bit bytes. The\n built-in functions "chr()" and "ord()" convert between\n characters and nonnegative integers representing the byte\n values. Bytes with the values 0--127 usually represent the\n corresponding ASCII values, but the interpretation of values\n is up to the program. The string data type is also used to\n represent arrays of bytes, e.g., to hold data read from a\n file.\n\n (On systems whose native character set is not ASCII, strings\n may use EBCDIC in their internal representation, provided the\n functions "chr()" and "ord()" implement a mapping between\n ASCII and EBCDIC, and string comparison preserves the ASCII\n order. Or perhaps someone can propose a better rule?)\n\n Unicode\n The items of a Unicode object are Unicode code units. A\n Unicode code unit is represented by a Unicode object of one\n item and can hold either a 16-bit or 32-bit value\n representing a Unicode ordinal (the maximum value for the\n ordinal is given in "sys.maxunicode", and depends on how\n Python is configured at compile time). Surrogate pairs may\n be present in the Unicode object, and will be reported as two\n separate items. The built-in functions "unichr()" and\n "ord()" convert between code units and nonnegative integers\n representing the Unicode ordinals as defined in the Unicode\n Standard 3.0. Conversion from and to other encodings are\n possible through the Unicode method "encode()" and the built-\n in function "unicode()".\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and "del" (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in "bytearray()" constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module "array" provides an additional example of a\n mutable sequence type.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function "len()"\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., "1" and\n "1.0"), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n "set()" constructor and can be modified afterwards by several\n methods, such as "add()".\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in "frozenset()" constructor. As a frozenset is immutable\n and *hashable*, it can be used again as an element of another\n set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation "a[k]" selects the item indexed by "k"\n from the mapping "a"; this can be used in expressions and as the\n target of assignments or "del" statements. The built-in function\n "len()" returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., "1" and "1.0")\n then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the "{...}"\n notation (see section Dictionary displays).\n\n The extension modules "dbm", "gdbm", and "bsddb" provide\n additional examples of mapping types.\n\nCallable types\n These are the types to which the function call operation (see\n section Calls) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section Function definitions). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +-------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +=========================+=================================+=============+\n | "__doc__" "func_doc" | The function\'s documentation | Writable |\n | | string, or "None" if | |\n | | unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | "__name__" "func_name" | The function\'s name | Writable |\n +-------------------------+---------------------------------+-------------+\n | "__module__" | The name of the module the | Writable |\n | | function was defined in, or | |\n | | "None" if unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | "__defaults__" | A tuple containing default | Writable |\n | "func_defaults" | argument values for those | |\n | | arguments that have defaults, | |\n | | or "None" if no arguments have | |\n | | a default value. | |\n +-------------------------+---------------------------------+-------------+\n | "__code__" "func_code" | The code object representing | Writable |\n | | the compiled function body. | |\n +-------------------------+---------------------------------+-------------+\n | "__globals__" | A reference to the dictionary | Read-only |\n | "func_globals" | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +-------------------------+---------------------------------+-------------+\n | "__dict__" "func_dict" | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +-------------------------+---------------------------------+-------------+\n | "__closure__" | "None" or a tuple of cells that | Read-only |\n | "func_closure" | contain bindings for the | |\n | | function\'s free variables. | |\n +-------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Changed in version 2.4: "func_name" is now writable.\n\n Changed in version 2.6: The double-underscore attributes\n "__closure__", "__code__", "__defaults__", and "__globals__"\n were introduced as aliases for the corresponding "func_*"\n attributes for forwards compatibility with Python 3.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n User-defined methods\n A user-defined method object combines a class, a class instance\n (or "None") and any callable object (normally a user-defined\n function).\n\n Special read-only attributes: "im_self" is the class instance\n object, "im_func" is the function object; "im_class" is the\n class of "im_self" for bound methods or the class that asked for\n the method for unbound methods; "__doc__" is the method\'s\n documentation (same as "im_func.__doc__"); "__name__" is the\n method name (same as "im_func.__name__"); "__module__" is the\n name of the module the method was defined in, or "None" if\n unavailable.\n\n Changed in version 2.2: "im_self" used to refer to the class\n that defined the method.\n\n Changed in version 2.6: For Python 3 forward-compatibility,\n "im_func" is also available as "__func__", and "im_self" as\n "__self__".\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object, an unbound\n user-defined method object, or a class method object. When the\n attribute is a user-defined method object, a new method object\n is only created if the class from which it is being retrieved is\n the same as, or a derived class of, the class stored in the\n original method object; otherwise, the original method object is\n used as it is.\n\n When a user-defined method object is created by retrieving a\n user-defined function object from a class, its "im_self"\n attribute is "None" and the method object is said to be unbound.\n When one is created by retrieving a user-defined function object\n from a class via one of its instances, its "im_self" attribute\n is the instance, and the method object is said to be bound. In\n either case, the new method\'s "im_class" attribute is the class\n from which the retrieval takes place, and its "im_func"\n attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the "im_func"\n attribute of the new instance is not the original method object\n but its "im_func" attribute.\n\n When a user-defined method object is created by retrieving a\n class method object from a class or instance, its "im_self"\n attribute is the class itself, and its "im_func" attribute is\n the function object underlying the class method.\n\n When an unbound user-defined method object is called, the\n underlying function ("im_func") is called, with the restriction\n that the first argument must be an instance of the proper class\n ("im_class") or of a derived class thereof.\n\n When a bound user-defined method object is called, the\n underlying function ("im_func") is called, inserting the class\n instance ("im_self") in front of the argument list. For\n instance, when "C" is a class which contains a definition for a\n function "f()", and "x" is an instance of "C", calling "x.f(1)"\n is equivalent to calling "C.f(x, 1)".\n\n When a user-defined method object is derived from a class method\n object, the "class instance" stored in "im_self" will actually\n be the class itself, so that calling either "x.f(1)" or "C.f(1)"\n is equivalent to calling "f(C,1)" where "f" is the underlying\n function.\n\n Note that the transformation from function object to (unbound or\n bound) method object happens each time the attribute is\n retrieved from the class or instance. In some cases, a fruitful\n optimization is to assign the attribute to a local variable and\n call that local variable. Also notice that this transformation\n only happens for user-defined functions; other callable objects\n (and all non-callable objects) are retrieved without\n transformation. It is also important t
[all...]
/external/robolectric/v3/runtime/
H A Dandroid-all-4.2.2_r1.2-robolectric-0.jarMETA-INF/ META-INF/MANIFEST.MF android/ android/accessibilityservice/ android/accessibilityservice/AccessibilityService$1.class ...
H A Dandroid-all-4.3_r2-robolectric-0.jarMETA-INF/ META-INF/MANIFEST.MF android/ android/accessibilityservice/ android/accessibilityservice/AccessibilityService$1.class ...
H A Dandroid-all-4.1.2_r1-robolectric-0.jarMETA-INF/ META-INF/MANIFEST.MF android/ android/accessibilityservice/ android/accessibilityservice/AccessibilityService$1.class ...
H A Dandroid-all-4.4_r1-robolectric-1.jarMETA-INF/ META-INF/MANIFEST.MF com/ com/google/ com/google/android/ com/google/android/collect/ ...
H A Dandroid-all-5.0.0_r2-robolectric-1.jarMETA-INF/ META-INF/MANIFEST.MF com/ com/google/ com/google/android/ com/google/android/collect/ ...
H A Dandroid-all-5.1.1_r9-robolectric-1.jarMETA-INF/ META-INF/MANIFEST.MF com/ com/google/ com/google/android/ com/google/android/collect/ ...

Completed in 396 milliseconds