frameobject.c revision 5a57ade58ec5bee85db41b8ce1340ff077781b65
1/* Frame object implementation */ 2 3#include "Python.h" 4 5#include "code.h" 6#include "frameobject.h" 7#include "opcode.h" 8#include "structmember.h" 9 10#define OFF(x) offsetof(PyFrameObject, x) 11 12static PyMemberDef frame_memberlist[] = { 13 {"f_back", T_OBJECT, OFF(f_back), READONLY}, 14 {"f_code", T_OBJECT, OFF(f_code), READONLY}, 15 {"f_builtins", T_OBJECT, OFF(f_builtins), READONLY}, 16 {"f_globals", T_OBJECT, OFF(f_globals), READONLY}, 17 {"f_lasti", T_INT, OFF(f_lasti), READONLY}, 18 {NULL} /* Sentinel */ 19}; 20 21static PyObject * 22frame_getlocals(PyFrameObject *f, void *closure) 23{ 24 if (PyFrame_FastToLocalsWithError(f) < 0) 25 return NULL; 26 Py_INCREF(f->f_locals); 27 return f->f_locals; 28} 29 30int 31PyFrame_GetLineNumber(PyFrameObject *f) 32{ 33 if (f->f_trace) 34 return f->f_lineno; 35 else 36 return PyCode_Addr2Line(f->f_code, f->f_lasti); 37} 38 39static PyObject * 40frame_getlineno(PyFrameObject *f, void *closure) 41{ 42 return PyLong_FromLong(PyFrame_GetLineNumber(f)); 43} 44 45/* Setter for f_lineno - you can set f_lineno from within a trace function in 46 * order to jump to a given line of code, subject to some restrictions. Most 47 * lines are OK to jump to because they don't make any assumptions about the 48 * state of the stack (obvious because you could remove the line and the code 49 * would still work without any stack errors), but there are some constructs 50 * that limit jumping: 51 * 52 * o Lines with an 'except' statement on them can't be jumped to, because 53 * they expect an exception to be on the top of the stack. 54 * o Lines that live in a 'finally' block can't be jumped from or to, since 55 * the END_FINALLY expects to clean up the stack after the 'try' block. 56 * o 'try'/'for'/'while' blocks can't be jumped into because the blockstack 57 * needs to be set up before their code runs, and for 'for' loops the 58 * iterator needs to be on the stack. 59 */ 60static int 61frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno) 62{ 63 int new_lineno = 0; /* The new value of f_lineno */ 64 long l_new_lineno; 65 int overflow; 66 int new_lasti = 0; /* The new value of f_lasti */ 67 int new_iblock = 0; /* The new value of f_iblock */ 68 unsigned char *code = NULL; /* The bytecode for the frame... */ 69 Py_ssize_t code_len = 0; /* ...and its length */ 70 unsigned char *lnotab = NULL; /* Iterating over co_lnotab */ 71 Py_ssize_t lnotab_len = 0; /* (ditto) */ 72 int offset = 0; /* (ditto) */ 73 int line = 0; /* (ditto) */ 74 int addr = 0; /* (ditto) */ 75 int min_addr = 0; /* Scanning the SETUPs and POPs */ 76 int max_addr = 0; /* (ditto) */ 77 int delta_iblock = 0; /* (ditto) */ 78 int min_delta_iblock = 0; /* (ditto) */ 79 int min_iblock = 0; /* (ditto) */ 80 int f_lasti_setup_addr = 0; /* Policing no-jump-into-finally */ 81 int new_lasti_setup_addr = 0; /* (ditto) */ 82 int blockstack[CO_MAXBLOCKS]; /* Walking the 'finally' blocks */ 83 int in_finally[CO_MAXBLOCKS]; /* (ditto) */ 84 int blockstack_top = 0; /* (ditto) */ 85 unsigned char setup_op = 0; /* (ditto) */ 86 87 /* f_lineno must be an integer. */ 88 if (!PyLong_CheckExact(p_new_lineno)) { 89 PyErr_SetString(PyExc_ValueError, 90 "lineno must be an integer"); 91 return -1; 92 } 93 94 /* You can only do this from within a trace function, not via 95 * _getframe or similar hackery. */ 96 if (!f->f_trace) 97 { 98 PyErr_Format(PyExc_ValueError, 99 "f_lineno can only be set by a" 100 " line trace function"); 101 return -1; 102 } 103 104 /* Fail if the line comes before the start of the code block. */ 105 l_new_lineno = PyLong_AsLongAndOverflow(p_new_lineno, &overflow); 106 if (overflow 107#if SIZEOF_LONG > SIZEOF_INT 108 || l_new_lineno > INT_MAX 109 || l_new_lineno < INT_MIN 110#endif 111 ) { 112 PyErr_SetString(PyExc_ValueError, 113 "lineno out of range"); 114 return -1; 115 } 116 new_lineno = (int)l_new_lineno; 117 118 if (new_lineno < f->f_code->co_firstlineno) { 119 PyErr_Format(PyExc_ValueError, 120 "line %d comes before the current code block", 121 new_lineno); 122 return -1; 123 } 124 else if (new_lineno == f->f_code->co_firstlineno) { 125 new_lasti = 0; 126 new_lineno = f->f_code->co_firstlineno; 127 } 128 else { 129 /* Find the bytecode offset for the start of the given 130 * line, or the first code-owning line after it. */ 131 char *tmp; 132 PyBytes_AsStringAndSize(f->f_code->co_lnotab, 133 &tmp, &lnotab_len); 134 lnotab = (unsigned char *) tmp; 135 addr = 0; 136 line = f->f_code->co_firstlineno; 137 new_lasti = -1; 138 for (offset = 0; offset < lnotab_len; offset += 2) { 139 addr += lnotab[offset]; 140 line += lnotab[offset+1]; 141 if (line >= new_lineno) { 142 new_lasti = addr; 143 new_lineno = line; 144 break; 145 } 146 } 147 } 148 149 /* If we didn't reach the requested line, return an error. */ 150 if (new_lasti == -1) { 151 PyErr_Format(PyExc_ValueError, 152 "line %d comes after the current code block", 153 new_lineno); 154 return -1; 155 } 156 157 /* We're now ready to look at the bytecode. */ 158 PyBytes_AsStringAndSize(f->f_code->co_code, (char **)&code, &code_len); 159 min_addr = Py_MIN(new_lasti, f->f_lasti); 160 max_addr = Py_MAX(new_lasti, f->f_lasti); 161 162 /* You can't jump onto a line with an 'except' statement on it - 163 * they expect to have an exception on the top of the stack, which 164 * won't be true if you jump to them. They always start with code 165 * that either pops the exception using POP_TOP (plain 'except:' 166 * lines do this) or duplicates the exception on the stack using 167 * DUP_TOP (if there's an exception type specified). See compile.c, 168 * 'com_try_except' for the full details. There aren't any other 169 * cases (AFAIK) where a line's code can start with DUP_TOP or 170 * POP_TOP, but if any ever appear, they'll be subject to the same 171 * restriction (but with a different error message). */ 172 if (code[new_lasti] == DUP_TOP || code[new_lasti] == POP_TOP) { 173 PyErr_SetString(PyExc_ValueError, 174 "can't jump to 'except' line as there's no exception"); 175 return -1; 176 } 177 178 /* You can't jump into or out of a 'finally' block because the 'try' 179 * block leaves something on the stack for the END_FINALLY to clean 180 * up. So we walk the bytecode, maintaining a simulated blockstack. 181 * When we reach the old or new address and it's in a 'finally' block 182 * we note the address of the corresponding SETUP_FINALLY. The jump 183 * is only legal if neither address is in a 'finally' block or 184 * they're both in the same one. 'blockstack' is a stack of the 185 * bytecode addresses of the SETUP_X opcodes, and 'in_finally' tracks 186 * whether we're in a 'finally' block at each blockstack level. */ 187 f_lasti_setup_addr = -1; 188 new_lasti_setup_addr = -1; 189 memset(blockstack, '\0', sizeof(blockstack)); 190 memset(in_finally, '\0', sizeof(in_finally)); 191 blockstack_top = 0; 192 for (addr = 0; addr < code_len; addr++) { 193 unsigned char op = code[addr]; 194 switch (op) { 195 case SETUP_LOOP: 196 case SETUP_EXCEPT: 197 case SETUP_FINALLY: 198 case SETUP_WITH: 199 case SETUP_ASYNC_WITH: 200 blockstack[blockstack_top++] = addr; 201 in_finally[blockstack_top-1] = 0; 202 break; 203 204 case POP_BLOCK: 205 assert(blockstack_top > 0); 206 setup_op = code[blockstack[blockstack_top-1]]; 207 if (setup_op == SETUP_FINALLY || setup_op == SETUP_WITH 208 || setup_op == SETUP_ASYNC_WITH) { 209 in_finally[blockstack_top-1] = 1; 210 } 211 else { 212 blockstack_top--; 213 } 214 break; 215 216 case END_FINALLY: 217 /* Ignore END_FINALLYs for SETUP_EXCEPTs - they exist 218 * in the bytecode but don't correspond to an actual 219 * 'finally' block. (If blockstack_top is 0, we must 220 * be seeing such an END_FINALLY.) */ 221 if (blockstack_top > 0) { 222 setup_op = code[blockstack[blockstack_top-1]]; 223 if (setup_op == SETUP_FINALLY || setup_op == SETUP_WITH 224 || setup_op == SETUP_ASYNC_WITH) { 225 blockstack_top--; 226 } 227 } 228 break; 229 } 230 231 /* For the addresses we're interested in, see whether they're 232 * within a 'finally' block and if so, remember the address 233 * of the SETUP_FINALLY. */ 234 if (addr == new_lasti || addr == f->f_lasti) { 235 int i = 0; 236 int setup_addr = -1; 237 for (i = blockstack_top-1; i >= 0; i--) { 238 if (in_finally[i]) { 239 setup_addr = blockstack[i]; 240 break; 241 } 242 } 243 244 if (setup_addr != -1) { 245 if (addr == new_lasti) { 246 new_lasti_setup_addr = setup_addr; 247 } 248 249 if (addr == f->f_lasti) { 250 f_lasti_setup_addr = setup_addr; 251 } 252 } 253 } 254 255 if (op >= HAVE_ARGUMENT) { 256 addr += 2; 257 } 258 } 259 260 /* Verify that the blockstack tracking code didn't get lost. */ 261 assert(blockstack_top == 0); 262 263 /* After all that, are we jumping into / out of a 'finally' block? */ 264 if (new_lasti_setup_addr != f_lasti_setup_addr) { 265 PyErr_SetString(PyExc_ValueError, 266 "can't jump into or out of a 'finally' block"); 267 return -1; 268 } 269 270 271 /* Police block-jumping (you can't jump into the middle of a block) 272 * and ensure that the blockstack finishes up in a sensible state (by 273 * popping any blocks we're jumping out of). We look at all the 274 * blockstack operations between the current position and the new 275 * one, and keep track of how many blocks we drop out of on the way. 276 * By also keeping track of the lowest blockstack position we see, we 277 * can tell whether the jump goes into any blocks without coming out 278 * again - in that case we raise an exception below. */ 279 delta_iblock = 0; 280 for (addr = min_addr; addr < max_addr; addr++) { 281 unsigned char op = code[addr]; 282 switch (op) { 283 case SETUP_LOOP: 284 case SETUP_EXCEPT: 285 case SETUP_FINALLY: 286 case SETUP_WITH: 287 case SETUP_ASYNC_WITH: 288 delta_iblock++; 289 break; 290 291 case POP_BLOCK: 292 delta_iblock--; 293 break; 294 } 295 296 min_delta_iblock = Py_MIN(min_delta_iblock, delta_iblock); 297 298 if (op >= HAVE_ARGUMENT) { 299 addr += 2; 300 } 301 } 302 303 /* Derive the absolute iblock values from the deltas. */ 304 min_iblock = f->f_iblock + min_delta_iblock; 305 if (new_lasti > f->f_lasti) { 306 /* Forwards jump. */ 307 new_iblock = f->f_iblock + delta_iblock; 308 } 309 else { 310 /* Backwards jump. */ 311 new_iblock = f->f_iblock - delta_iblock; 312 } 313 314 /* Are we jumping into a block? */ 315 if (new_iblock > min_iblock) { 316 PyErr_SetString(PyExc_ValueError, 317 "can't jump into the middle of a block"); 318 return -1; 319 } 320 321 /* Pop any blocks that we're jumping out of. */ 322 while (f->f_iblock > new_iblock) { 323 PyTryBlock *b = &f->f_blockstack[--f->f_iblock]; 324 while ((f->f_stacktop - f->f_valuestack) > b->b_level) { 325 PyObject *v = (*--f->f_stacktop); 326 Py_DECREF(v); 327 } 328 } 329 330 /* Finally set the new f_lineno and f_lasti and return OK. */ 331 f->f_lineno = new_lineno; 332 f->f_lasti = new_lasti; 333 return 0; 334} 335 336static PyObject * 337frame_gettrace(PyFrameObject *f, void *closure) 338{ 339 PyObject* trace = f->f_trace; 340 341 if (trace == NULL) 342 trace = Py_None; 343 344 Py_INCREF(trace); 345 346 return trace; 347} 348 349static int 350frame_settrace(PyFrameObject *f, PyObject* v, void *closure) 351{ 352 PyObject* old_value; 353 354 /* We rely on f_lineno being accurate when f_trace is set. */ 355 f->f_lineno = PyFrame_GetLineNumber(f); 356 357 old_value = f->f_trace; 358 Py_XINCREF(v); 359 f->f_trace = v; 360 Py_XDECREF(old_value); 361 362 return 0; 363} 364 365 366static PyGetSetDef frame_getsetlist[] = { 367 {"f_locals", (getter)frame_getlocals, NULL, NULL}, 368 {"f_lineno", (getter)frame_getlineno, 369 (setter)frame_setlineno, NULL}, 370 {"f_trace", (getter)frame_gettrace, (setter)frame_settrace, NULL}, 371 {0} 372}; 373 374/* Stack frames are allocated and deallocated at a considerable rate. 375 In an attempt to improve the speed of function calls, we: 376 377 1. Hold a single "zombie" frame on each code object. This retains 378 the allocated and initialised frame object from an invocation of 379 the code object. The zombie is reanimated the next time we need a 380 frame object for that code object. Doing this saves the malloc/ 381 realloc required when using a free_list frame that isn't the 382 correct size. It also saves some field initialisation. 383 384 In zombie mode, no field of PyFrameObject holds a reference, but 385 the following fields are still valid: 386 387 * ob_type, ob_size, f_code, f_valuestack; 388 389 * f_locals, f_trace, 390 f_exc_type, f_exc_value, f_exc_traceback are NULL; 391 392 * f_localsplus does not require re-allocation and 393 the local variables in f_localsplus are NULL. 394 395 2. We also maintain a separate free list of stack frames (just like 396 floats are allocated in a special way -- see floatobject.c). When 397 a stack frame is on the free list, only the following members have 398 a meaning: 399 ob_type == &Frametype 400 f_back next item on free list, or NULL 401 f_stacksize size of value stack 402 ob_size size of localsplus 403 Note that the value and block stacks are preserved -- this can save 404 another malloc() call or two (and two free() calls as well!). 405 Also note that, unlike for integers, each frame object is a 406 malloc'ed object in its own right -- it is only the actual calls to 407 malloc() that we are trying to save here, not the administration. 408 After all, while a typical program may make millions of calls, a 409 call depth of more than 20 or 30 is probably already exceptional 410 unless the program contains run-away recursion. I hope. 411 412 Later, PyFrame_MAXFREELIST was added to bound the # of frames saved on 413 free_list. Else programs creating lots of cyclic trash involving 414 frames could provoke free_list into growing without bound. 415*/ 416 417static PyFrameObject *free_list = NULL; 418static int numfree = 0; /* number of frames currently in free_list */ 419/* max value for numfree */ 420#define PyFrame_MAXFREELIST 200 421 422static void 423frame_dealloc(PyFrameObject *f) 424{ 425 PyObject **p, **valuestack; 426 PyCodeObject *co; 427 428 PyObject_GC_UnTrack(f); 429 Py_TRASHCAN_SAFE_BEGIN(f) 430 /* Kill all local variables */ 431 valuestack = f->f_valuestack; 432 for (p = f->f_localsplus; p < valuestack; p++) 433 Py_CLEAR(*p); 434 435 /* Free stack */ 436 if (f->f_stacktop != NULL) { 437 for (p = valuestack; p < f->f_stacktop; p++) 438 Py_XDECREF(*p); 439 } 440 441 Py_XDECREF(f->f_back); 442 Py_DECREF(f->f_builtins); 443 Py_DECREF(f->f_globals); 444 Py_CLEAR(f->f_locals); 445 Py_CLEAR(f->f_trace); 446 Py_CLEAR(f->f_exc_type); 447 Py_CLEAR(f->f_exc_value); 448 Py_CLEAR(f->f_exc_traceback); 449 450 co = f->f_code; 451 if (co->co_zombieframe == NULL) 452 co->co_zombieframe = f; 453 else if (numfree < PyFrame_MAXFREELIST) { 454 ++numfree; 455 f->f_back = free_list; 456 free_list = f; 457 } 458 else 459 PyObject_GC_Del(f); 460 461 Py_DECREF(co); 462 Py_TRASHCAN_SAFE_END(f) 463} 464 465static int 466frame_traverse(PyFrameObject *f, visitproc visit, void *arg) 467{ 468 PyObject **fastlocals, **p; 469 Py_ssize_t i, slots; 470 471 Py_VISIT(f->f_back); 472 Py_VISIT(f->f_code); 473 Py_VISIT(f->f_builtins); 474 Py_VISIT(f->f_globals); 475 Py_VISIT(f->f_locals); 476 Py_VISIT(f->f_trace); 477 Py_VISIT(f->f_exc_type); 478 Py_VISIT(f->f_exc_value); 479 Py_VISIT(f->f_exc_traceback); 480 481 /* locals */ 482 slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars); 483 fastlocals = f->f_localsplus; 484 for (i = slots; --i >= 0; ++fastlocals) 485 Py_VISIT(*fastlocals); 486 487 /* stack */ 488 if (f->f_stacktop != NULL) { 489 for (p = f->f_valuestack; p < f->f_stacktop; p++) 490 Py_VISIT(*p); 491 } 492 return 0; 493} 494 495static void 496frame_tp_clear(PyFrameObject *f) 497{ 498 PyObject **fastlocals, **p, **oldtop; 499 Py_ssize_t i, slots; 500 501 /* Before anything else, make sure that this frame is clearly marked 502 * as being defunct! Else, e.g., a generator reachable from this 503 * frame may also point to this frame, believe itself to still be 504 * active, and try cleaning up this frame again. 505 */ 506 oldtop = f->f_stacktop; 507 f->f_stacktop = NULL; 508 f->f_executing = 0; 509 510 Py_CLEAR(f->f_exc_type); 511 Py_CLEAR(f->f_exc_value); 512 Py_CLEAR(f->f_exc_traceback); 513 Py_CLEAR(f->f_trace); 514 515 /* locals */ 516 slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars); 517 fastlocals = f->f_localsplus; 518 for (i = slots; --i >= 0; ++fastlocals) 519 Py_CLEAR(*fastlocals); 520 521 /* stack */ 522 if (oldtop != NULL) { 523 for (p = f->f_valuestack; p < oldtop; p++) 524 Py_CLEAR(*p); 525 } 526} 527 528static PyObject * 529frame_clear(PyFrameObject *f) 530{ 531 if (f->f_executing) { 532 PyErr_SetString(PyExc_RuntimeError, 533 "cannot clear an executing frame"); 534 return NULL; 535 } 536 if (f->f_gen) { 537 _PyGen_Finalize(f->f_gen); 538 assert(f->f_gen == NULL); 539 } 540 frame_tp_clear(f); 541 Py_RETURN_NONE; 542} 543 544PyDoc_STRVAR(clear__doc__, 545"F.clear(): clear most references held by the frame"); 546 547static PyObject * 548frame_sizeof(PyFrameObject *f) 549{ 550 Py_ssize_t res, extras, ncells, nfrees; 551 552 ncells = PyTuple_GET_SIZE(f->f_code->co_cellvars); 553 nfrees = PyTuple_GET_SIZE(f->f_code->co_freevars); 554 extras = f->f_code->co_stacksize + f->f_code->co_nlocals + 555 ncells + nfrees; 556 /* subtract one as it is already included in PyFrameObject */ 557 res = sizeof(PyFrameObject) + (extras-1) * sizeof(PyObject *); 558 559 return PyLong_FromSsize_t(res); 560} 561 562PyDoc_STRVAR(sizeof__doc__, 563"F.__sizeof__() -> size of F in memory, in bytes"); 564 565static PyMethodDef frame_methods[] = { 566 {"clear", (PyCFunction)frame_clear, METH_NOARGS, 567 clear__doc__}, 568 {"__sizeof__", (PyCFunction)frame_sizeof, METH_NOARGS, 569 sizeof__doc__}, 570 {NULL, NULL} /* sentinel */ 571}; 572 573PyTypeObject PyFrame_Type = { 574 PyVarObject_HEAD_INIT(&PyType_Type, 0) 575 "frame", 576 sizeof(PyFrameObject), 577 sizeof(PyObject *), 578 (destructor)frame_dealloc, /* tp_dealloc */ 579 0, /* tp_print */ 580 0, /* tp_getattr */ 581 0, /* tp_setattr */ 582 0, /* tp_reserved */ 583 0, /* tp_repr */ 584 0, /* tp_as_number */ 585 0, /* tp_as_sequence */ 586 0, /* tp_as_mapping */ 587 0, /* tp_hash */ 588 0, /* tp_call */ 589 0, /* tp_str */ 590 PyObject_GenericGetAttr, /* tp_getattro */ 591 PyObject_GenericSetAttr, /* tp_setattro */ 592 0, /* tp_as_buffer */ 593 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */ 594 0, /* tp_doc */ 595 (traverseproc)frame_traverse, /* tp_traverse */ 596 (inquiry)frame_tp_clear, /* tp_clear */ 597 0, /* tp_richcompare */ 598 0, /* tp_weaklistoffset */ 599 0, /* tp_iter */ 600 0, /* tp_iternext */ 601 frame_methods, /* tp_methods */ 602 frame_memberlist, /* tp_members */ 603 frame_getsetlist, /* tp_getset */ 604 0, /* tp_base */ 605 0, /* tp_dict */ 606}; 607 608_Py_IDENTIFIER(__builtins__); 609 610int _PyFrame_Init() 611{ 612 /* Before, PyId___builtins__ was a string created explicitly in 613 this function. Now there is nothing to initialize anymore, but 614 the function is kept for backward compatibility. */ 615 return 1; 616} 617 618PyFrameObject * 619PyFrame_New(PyThreadState *tstate, PyCodeObject *code, PyObject *globals, 620 PyObject *locals) 621{ 622 PyFrameObject *back = tstate->frame; 623 PyFrameObject *f; 624 PyObject *builtins; 625 Py_ssize_t i; 626 627#ifdef Py_DEBUG 628 if (code == NULL || globals == NULL || !PyDict_Check(globals) || 629 (locals != NULL && !PyMapping_Check(locals))) { 630 PyErr_BadInternalCall(); 631 return NULL; 632 } 633#endif 634 if (back == NULL || back->f_globals != globals) { 635 builtins = _PyDict_GetItemId(globals, &PyId___builtins__); 636 if (builtins) { 637 if (PyModule_Check(builtins)) { 638 builtins = PyModule_GetDict(builtins); 639 assert(builtins != NULL); 640 } 641 } 642 if (builtins == NULL) { 643 /* No builtins! Make up a minimal one 644 Give them 'None', at least. */ 645 builtins = PyDict_New(); 646 if (builtins == NULL || 647 PyDict_SetItemString( 648 builtins, "None", Py_None) < 0) 649 return NULL; 650 } 651 else 652 Py_INCREF(builtins); 653 654 } 655 else { 656 /* If we share the globals, we share the builtins. 657 Save a lookup and a call. */ 658 builtins = back->f_builtins; 659 assert(builtins != NULL); 660 Py_INCREF(builtins); 661 } 662 if (code->co_zombieframe != NULL) { 663 f = code->co_zombieframe; 664 code->co_zombieframe = NULL; 665 _Py_NewReference((PyObject *)f); 666 assert(f->f_code == code); 667 } 668 else { 669 Py_ssize_t extras, ncells, nfrees; 670 ncells = PyTuple_GET_SIZE(code->co_cellvars); 671 nfrees = PyTuple_GET_SIZE(code->co_freevars); 672 extras = code->co_stacksize + code->co_nlocals + ncells + 673 nfrees; 674 if (free_list == NULL) { 675 f = PyObject_GC_NewVar(PyFrameObject, &PyFrame_Type, 676 extras); 677 if (f == NULL) { 678 Py_DECREF(builtins); 679 return NULL; 680 } 681 } 682 else { 683 assert(numfree > 0); 684 --numfree; 685 f = free_list; 686 free_list = free_list->f_back; 687 if (Py_SIZE(f) < extras) { 688 PyFrameObject *new_f = PyObject_GC_Resize(PyFrameObject, f, extras); 689 if (new_f == NULL) { 690 PyObject_GC_Del(f); 691 Py_DECREF(builtins); 692 return NULL; 693 } 694 f = new_f; 695 } 696 _Py_NewReference((PyObject *)f); 697 } 698 699 f->f_code = code; 700 extras = code->co_nlocals + ncells + nfrees; 701 f->f_valuestack = f->f_localsplus + extras; 702 for (i=0; i<extras; i++) 703 f->f_localsplus[i] = NULL; 704 f->f_locals = NULL; 705 f->f_trace = NULL; 706 f->f_exc_type = f->f_exc_value = f->f_exc_traceback = NULL; 707 } 708 f->f_stacktop = f->f_valuestack; 709 f->f_builtins = builtins; 710 Py_XINCREF(back); 711 f->f_back = back; 712 Py_INCREF(code); 713 Py_INCREF(globals); 714 f->f_globals = globals; 715 /* Most functions have CO_NEWLOCALS and CO_OPTIMIZED set. */ 716 if ((code->co_flags & (CO_NEWLOCALS | CO_OPTIMIZED)) == 717 (CO_NEWLOCALS | CO_OPTIMIZED)) 718 ; /* f_locals = NULL; will be set by PyFrame_FastToLocals() */ 719 else if (code->co_flags & CO_NEWLOCALS) { 720 locals = PyDict_New(); 721 if (locals == NULL) { 722 Py_DECREF(f); 723 return NULL; 724 } 725 f->f_locals = locals; 726 } 727 else { 728 if (locals == NULL) 729 locals = globals; 730 Py_INCREF(locals); 731 f->f_locals = locals; 732 } 733 734 f->f_lasti = -1; 735 f->f_lineno = code->co_firstlineno; 736 f->f_iblock = 0; 737 f->f_executing = 0; 738 f->f_gen = NULL; 739 740 _PyObject_GC_TRACK(f); 741 return f; 742} 743 744/* Block management */ 745 746void 747PyFrame_BlockSetup(PyFrameObject *f, int type, int handler, int level) 748{ 749 PyTryBlock *b; 750 if (f->f_iblock >= CO_MAXBLOCKS) 751 Py_FatalError("XXX block stack overflow"); 752 b = &f->f_blockstack[f->f_iblock++]; 753 b->b_type = type; 754 b->b_level = level; 755 b->b_handler = handler; 756} 757 758PyTryBlock * 759PyFrame_BlockPop(PyFrameObject *f) 760{ 761 PyTryBlock *b; 762 if (f->f_iblock <= 0) 763 Py_FatalError("XXX block stack underflow"); 764 b = &f->f_blockstack[--f->f_iblock]; 765 return b; 766} 767 768/* Convert between "fast" version of locals and dictionary version. 769 770 map and values are input arguments. map is a tuple of strings. 771 values is an array of PyObject*. At index i, map[i] is the name of 772 the variable with value values[i]. The function copies the first 773 nmap variable from map/values into dict. If values[i] is NULL, 774 the variable is deleted from dict. 775 776 If deref is true, then the values being copied are cell variables 777 and the value is extracted from the cell variable before being put 778 in dict. 779 */ 780 781static int 782map_to_dict(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values, 783 int deref) 784{ 785 Py_ssize_t j; 786 assert(PyTuple_Check(map)); 787 assert(PyDict_Check(dict)); 788 assert(PyTuple_Size(map) >= nmap); 789 for (j = nmap; --j >= 0; ) { 790 PyObject *key = PyTuple_GET_ITEM(map, j); 791 PyObject *value = values[j]; 792 assert(PyUnicode_Check(key)); 793 if (deref && value != NULL) { 794 assert(PyCell_Check(value)); 795 value = PyCell_GET(value); 796 } 797 if (value == NULL) { 798 if (PyObject_DelItem(dict, key) != 0) { 799 if (PyErr_ExceptionMatches(PyExc_KeyError)) 800 PyErr_Clear(); 801 else 802 return -1; 803 } 804 } 805 else { 806 if (PyObject_SetItem(dict, key, value) != 0) 807 return -1; 808 } 809 } 810 return 0; 811} 812 813/* Copy values from the "locals" dict into the fast locals. 814 815 dict is an input argument containing string keys representing 816 variables names and arbitrary PyObject* as values. 817 818 map and values are input arguments. map is a tuple of strings. 819 values is an array of PyObject*. At index i, map[i] is the name of 820 the variable with value values[i]. The function copies the first 821 nmap variable from map/values into dict. If values[i] is NULL, 822 the variable is deleted from dict. 823 824 If deref is true, then the values being copied are cell variables 825 and the value is extracted from the cell variable before being put 826 in dict. If clear is true, then variables in map but not in dict 827 are set to NULL in map; if clear is false, variables missing in 828 dict are ignored. 829 830 Exceptions raised while modifying the dict are silently ignored, 831 because there is no good way to report them. 832*/ 833 834static void 835dict_to_map(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values, 836 int deref, int clear) 837{ 838 Py_ssize_t j; 839 assert(PyTuple_Check(map)); 840 assert(PyDict_Check(dict)); 841 assert(PyTuple_Size(map) >= nmap); 842 for (j = nmap; --j >= 0; ) { 843 PyObject *key = PyTuple_GET_ITEM(map, j); 844 PyObject *value = PyObject_GetItem(dict, key); 845 assert(PyUnicode_Check(key)); 846 /* We only care about NULLs if clear is true. */ 847 if (value == NULL) { 848 PyErr_Clear(); 849 if (!clear) 850 continue; 851 } 852 if (deref) { 853 assert(PyCell_Check(values[j])); 854 if (PyCell_GET(values[j]) != value) { 855 if (PyCell_Set(values[j], value) < 0) 856 PyErr_Clear(); 857 } 858 } else if (values[j] != value) { 859 Py_XINCREF(value); 860 Py_SETREF(values[j], value); 861 } 862 Py_XDECREF(value); 863 } 864} 865 866int 867PyFrame_FastToLocalsWithError(PyFrameObject *f) 868{ 869 /* Merge fast locals into f->f_locals */ 870 PyObject *locals, *map; 871 PyObject **fast; 872 PyCodeObject *co; 873 Py_ssize_t j; 874 Py_ssize_t ncells, nfreevars; 875 876 if (f == NULL) { 877 PyErr_BadInternalCall(); 878 return -1; 879 } 880 locals = f->f_locals; 881 if (locals == NULL) { 882 locals = f->f_locals = PyDict_New(); 883 if (locals == NULL) 884 return -1; 885 } 886 co = f->f_code; 887 map = co->co_varnames; 888 if (!PyTuple_Check(map)) { 889 PyErr_Format(PyExc_SystemError, 890 "co_varnames must be a tuple, not %s", 891 Py_TYPE(map)->tp_name); 892 return -1; 893 } 894 fast = f->f_localsplus; 895 j = PyTuple_GET_SIZE(map); 896 if (j > co->co_nlocals) 897 j = co->co_nlocals; 898 if (co->co_nlocals) { 899 if (map_to_dict(map, j, locals, fast, 0) < 0) 900 return -1; 901 } 902 ncells = PyTuple_GET_SIZE(co->co_cellvars); 903 nfreevars = PyTuple_GET_SIZE(co->co_freevars); 904 if (ncells || nfreevars) { 905 if (map_to_dict(co->co_cellvars, ncells, 906 locals, fast + co->co_nlocals, 1)) 907 return -1; 908 909 /* If the namespace is unoptimized, then one of the 910 following cases applies: 911 1. It does not contain free variables, because it 912 uses import * or is a top-level namespace. 913 2. It is a class namespace. 914 We don't want to accidentally copy free variables 915 into the locals dict used by the class. 916 */ 917 if (co->co_flags & CO_OPTIMIZED) { 918 if (map_to_dict(co->co_freevars, nfreevars, 919 locals, fast + co->co_nlocals + ncells, 1) < 0) 920 return -1; 921 } 922 } 923 return 0; 924} 925 926void 927PyFrame_FastToLocals(PyFrameObject *f) 928{ 929 int res; 930 931 assert(!PyErr_Occurred()); 932 933 res = PyFrame_FastToLocalsWithError(f); 934 if (res < 0) 935 PyErr_Clear(); 936} 937 938void 939PyFrame_LocalsToFast(PyFrameObject *f, int clear) 940{ 941 /* Merge f->f_locals into fast locals */ 942 PyObject *locals, *map; 943 PyObject **fast; 944 PyObject *error_type, *error_value, *error_traceback; 945 PyCodeObject *co; 946 Py_ssize_t j; 947 Py_ssize_t ncells, nfreevars; 948 if (f == NULL) 949 return; 950 locals = f->f_locals; 951 co = f->f_code; 952 map = co->co_varnames; 953 if (locals == NULL) 954 return; 955 if (!PyTuple_Check(map)) 956 return; 957 PyErr_Fetch(&error_type, &error_value, &error_traceback); 958 fast = f->f_localsplus; 959 j = PyTuple_GET_SIZE(map); 960 if (j > co->co_nlocals) 961 j = co->co_nlocals; 962 if (co->co_nlocals) 963 dict_to_map(co->co_varnames, j, locals, fast, 0, clear); 964 ncells = PyTuple_GET_SIZE(co->co_cellvars); 965 nfreevars = PyTuple_GET_SIZE(co->co_freevars); 966 if (ncells || nfreevars) { 967 dict_to_map(co->co_cellvars, ncells, 968 locals, fast + co->co_nlocals, 1, clear); 969 /* Same test as in PyFrame_FastToLocals() above. */ 970 if (co->co_flags & CO_OPTIMIZED) { 971 dict_to_map(co->co_freevars, nfreevars, 972 locals, fast + co->co_nlocals + ncells, 1, 973 clear); 974 } 975 } 976 PyErr_Restore(error_type, error_value, error_traceback); 977} 978 979/* Clear out the free list */ 980int 981PyFrame_ClearFreeList(void) 982{ 983 int freelist_size = numfree; 984 985 while (free_list != NULL) { 986 PyFrameObject *f = free_list; 987 free_list = free_list->f_back; 988 PyObject_GC_Del(f); 989 --numfree; 990 } 991 assert(numfree == 0); 992 return freelist_size; 993} 994 995void 996PyFrame_Fini(void) 997{ 998 (void)PyFrame_ClearFreeList(); 999} 1000 1001/* Print summary info about the state of the optimized allocator */ 1002void 1003_PyFrame_DebugMallocStats(FILE *out) 1004{ 1005 _PyDebugAllocatorStats(out, 1006 "free PyFrameObject", 1007 numfree, sizeof(PyFrameObject)); 1008} 1009 1010