frameobject.c revision 07e9e380f93e334f8f11d3ff9f42bf7c68b27d3a
1/* Frame object implementation */ 2 3#include "Python.h" 4 5#include "code.h" 6#include "frameobject.h" 7#include "opcode.h" 8#include "structmember.h" 9 10#define OFF(x) offsetof(PyFrameObject, x) 11 12static PyMemberDef frame_memberlist[] = { 13 {"f_back", T_OBJECT, OFF(f_back), READONLY}, 14 {"f_code", T_OBJECT, OFF(f_code), READONLY}, 15 {"f_builtins", T_OBJECT, OFF(f_builtins), READONLY}, 16 {"f_globals", T_OBJECT, OFF(f_globals), READONLY}, 17 {"f_lasti", T_INT, OFF(f_lasti), READONLY}, 18 {NULL} /* Sentinel */ 19}; 20 21static PyObject * 22frame_getlocals(PyFrameObject *f, void *closure) 23{ 24 if (PyFrame_FastToLocalsWithError(f) < 0) 25 return NULL; 26 Py_INCREF(f->f_locals); 27 return f->f_locals; 28} 29 30int 31PyFrame_GetLineNumber(PyFrameObject *f) 32{ 33 if (f->f_trace) 34 return f->f_lineno; 35 else 36 return PyCode_Addr2Line(f->f_code, f->f_lasti); 37} 38 39static PyObject * 40frame_getlineno(PyFrameObject *f, void *closure) 41{ 42 return PyLong_FromLong(PyFrame_GetLineNumber(f)); 43} 44 45/* Setter for f_lineno - you can set f_lineno from within a trace function in 46 * order to jump to a given line of code, subject to some restrictions. Most 47 * lines are OK to jump to because they don't make any assumptions about the 48 * state of the stack (obvious because you could remove the line and the code 49 * would still work without any stack errors), but there are some constructs 50 * that limit jumping: 51 * 52 * o Lines with an 'except' statement on them can't be jumped to, because 53 * they expect an exception to be on the top of the stack. 54 * o Lines that live in a 'finally' block can't be jumped from or to, since 55 * the END_FINALLY expects to clean up the stack after the 'try' block. 56 * o 'try'/'for'/'while' blocks can't be jumped into because the blockstack 57 * needs to be set up before their code runs, and for 'for' loops the 58 * iterator needs to be on the stack. 59 */ 60static int 61frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno) 62{ 63 int new_lineno = 0; /* The new value of f_lineno */ 64 long l_new_lineno; 65 int overflow; 66 int new_lasti = 0; /* The new value of f_lasti */ 67 int new_iblock = 0; /* The new value of f_iblock */ 68 unsigned char *code = NULL; /* The bytecode for the frame... */ 69 Py_ssize_t code_len = 0; /* ...and its length */ 70 unsigned char *lnotab = NULL; /* Iterating over co_lnotab */ 71 Py_ssize_t lnotab_len = 0; /* (ditto) */ 72 int offset = 0; /* (ditto) */ 73 int line = 0; /* (ditto) */ 74 int addr = 0; /* (ditto) */ 75 int min_addr = 0; /* Scanning the SETUPs and POPs */ 76 int max_addr = 0; /* (ditto) */ 77 int delta_iblock = 0; /* (ditto) */ 78 int min_delta_iblock = 0; /* (ditto) */ 79 int min_iblock = 0; /* (ditto) */ 80 int f_lasti_setup_addr = 0; /* Policing no-jump-into-finally */ 81 int new_lasti_setup_addr = 0; /* (ditto) */ 82 int blockstack[CO_MAXBLOCKS]; /* Walking the 'finally' blocks */ 83 int in_finally[CO_MAXBLOCKS]; /* (ditto) */ 84 int blockstack_top = 0; /* (ditto) */ 85 unsigned char setup_op = 0; /* (ditto) */ 86 87 /* f_lineno must be an integer. */ 88 if (!PyLong_CheckExact(p_new_lineno)) { 89 PyErr_SetString(PyExc_ValueError, 90 "lineno must be an integer"); 91 return -1; 92 } 93 94 /* You can only do this from within a trace function, not via 95 * _getframe or similar hackery. */ 96 if (!f->f_trace) 97 { 98 PyErr_Format(PyExc_ValueError, 99 "f_lineno can only be set by a" 100 " line trace function"); 101 return -1; 102 } 103 104 /* Fail if the line comes before the start of the code block. */ 105 l_new_lineno = PyLong_AsLongAndOverflow(p_new_lineno, &overflow); 106 if (overflow 107#if SIZEOF_LONG > SIZEOF_INT 108 || l_new_lineno > INT_MAX 109 || l_new_lineno < INT_MIN 110#endif 111 ) { 112 PyErr_SetString(PyExc_ValueError, 113 "lineno out of range"); 114 return -1; 115 } 116 new_lineno = (int)l_new_lineno; 117 118 if (new_lineno < f->f_code->co_firstlineno) { 119 PyErr_Format(PyExc_ValueError, 120 "line %d comes before the current code block", 121 new_lineno); 122 return -1; 123 } 124 else if (new_lineno == f->f_code->co_firstlineno) { 125 new_lasti = 0; 126 new_lineno = f->f_code->co_firstlineno; 127 } 128 else { 129 /* Find the bytecode offset for the start of the given 130 * line, or the first code-owning line after it. */ 131 char *tmp; 132 PyBytes_AsStringAndSize(f->f_code->co_lnotab, 133 &tmp, &lnotab_len); 134 lnotab = (unsigned char *) tmp; 135 addr = 0; 136 line = f->f_code->co_firstlineno; 137 new_lasti = -1; 138 for (offset = 0; offset < lnotab_len; offset += 2) { 139 addr += lnotab[offset]; 140 line += lnotab[offset+1]; 141 if (line >= new_lineno) { 142 new_lasti = addr; 143 new_lineno = line; 144 break; 145 } 146 } 147 } 148 149 /* If we didn't reach the requested line, return an error. */ 150 if (new_lasti == -1) { 151 PyErr_Format(PyExc_ValueError, 152 "line %d comes after the current code block", 153 new_lineno); 154 return -1; 155 } 156 157 /* We're now ready to look at the bytecode. */ 158 PyBytes_AsStringAndSize(f->f_code->co_code, (char **)&code, &code_len); 159 min_addr = Py_MIN(new_lasti, f->f_lasti); 160 max_addr = Py_MAX(new_lasti, f->f_lasti); 161 162 /* You can't jump onto a line with an 'except' statement on it - 163 * they expect to have an exception on the top of the stack, which 164 * won't be true if you jump to them. They always start with code 165 * that either pops the exception using POP_TOP (plain 'except:' 166 * lines do this) or duplicates the exception on the stack using 167 * DUP_TOP (if there's an exception type specified). See compile.c, 168 * 'com_try_except' for the full details. There aren't any other 169 * cases (AFAIK) where a line's code can start with DUP_TOP or 170 * POP_TOP, but if any ever appear, they'll be subject to the same 171 * restriction (but with a different error message). */ 172 if (code[new_lasti] == DUP_TOP || code[new_lasti] == POP_TOP) { 173 PyErr_SetString(PyExc_ValueError, 174 "can't jump to 'except' line as there's no exception"); 175 return -1; 176 } 177 178 /* You can't jump into or out of a 'finally' block because the 'try' 179 * block leaves something on the stack for the END_FINALLY to clean 180 * up. So we walk the bytecode, maintaining a simulated blockstack. 181 * When we reach the old or new address and it's in a 'finally' block 182 * we note the address of the corresponding SETUP_FINALLY. The jump 183 * is only legal if neither address is in a 'finally' block or 184 * they're both in the same one. 'blockstack' is a stack of the 185 * bytecode addresses of the SETUP_X opcodes, and 'in_finally' tracks 186 * whether we're in a 'finally' block at each blockstack level. */ 187 f_lasti_setup_addr = -1; 188 new_lasti_setup_addr = -1; 189 memset(blockstack, '\0', sizeof(blockstack)); 190 memset(in_finally, '\0', sizeof(in_finally)); 191 blockstack_top = 0; 192 for (addr = 0; addr < code_len; addr++) { 193 unsigned char op = code[addr]; 194 switch (op) { 195 case SETUP_LOOP: 196 case SETUP_EXCEPT: 197 case SETUP_FINALLY: 198 case SETUP_WITH: 199 blockstack[blockstack_top++] = addr; 200 in_finally[blockstack_top-1] = 0; 201 break; 202 203 case POP_BLOCK: 204 assert(blockstack_top > 0); 205 setup_op = code[blockstack[blockstack_top-1]]; 206 if (setup_op == SETUP_FINALLY || setup_op == SETUP_WITH) { 207 in_finally[blockstack_top-1] = 1; 208 } 209 else { 210 blockstack_top--; 211 } 212 break; 213 214 case END_FINALLY: 215 /* Ignore END_FINALLYs for SETUP_EXCEPTs - they exist 216 * in the bytecode but don't correspond to an actual 217 * 'finally' block. (If blockstack_top is 0, we must 218 * be seeing such an END_FINALLY.) */ 219 if (blockstack_top > 0) { 220 setup_op = code[blockstack[blockstack_top-1]]; 221 if (setup_op == SETUP_FINALLY || setup_op == SETUP_WITH) { 222 blockstack_top--; 223 } 224 } 225 break; 226 } 227 228 /* For the addresses we're interested in, see whether they're 229 * within a 'finally' block and if so, remember the address 230 * of the SETUP_FINALLY. */ 231 if (addr == new_lasti || addr == f->f_lasti) { 232 int i = 0; 233 int setup_addr = -1; 234 for (i = blockstack_top-1; i >= 0; i--) { 235 if (in_finally[i]) { 236 setup_addr = blockstack[i]; 237 break; 238 } 239 } 240 241 if (setup_addr != -1) { 242 if (addr == new_lasti) { 243 new_lasti_setup_addr = setup_addr; 244 } 245 246 if (addr == f->f_lasti) { 247 f_lasti_setup_addr = setup_addr; 248 } 249 } 250 } 251 252 if (op >= HAVE_ARGUMENT) { 253 addr += 2; 254 } 255 } 256 257 /* Verify that the blockstack tracking code didn't get lost. */ 258 assert(blockstack_top == 0); 259 260 /* After all that, are we jumping into / out of a 'finally' block? */ 261 if (new_lasti_setup_addr != f_lasti_setup_addr) { 262 PyErr_SetString(PyExc_ValueError, 263 "can't jump into or out of a 'finally' block"); 264 return -1; 265 } 266 267 268 /* Police block-jumping (you can't jump into the middle of a block) 269 * and ensure that the blockstack finishes up in a sensible state (by 270 * popping any blocks we're jumping out of). We look at all the 271 * blockstack operations between the current position and the new 272 * one, and keep track of how many blocks we drop out of on the way. 273 * By also keeping track of the lowest blockstack position we see, we 274 * can tell whether the jump goes into any blocks without coming out 275 * again - in that case we raise an exception below. */ 276 delta_iblock = 0; 277 for (addr = min_addr; addr < max_addr; addr++) { 278 unsigned char op = code[addr]; 279 switch (op) { 280 case SETUP_LOOP: 281 case SETUP_EXCEPT: 282 case SETUP_FINALLY: 283 case SETUP_WITH: 284 delta_iblock++; 285 break; 286 287 case POP_BLOCK: 288 delta_iblock--; 289 break; 290 } 291 292 min_delta_iblock = Py_MIN(min_delta_iblock, delta_iblock); 293 294 if (op >= HAVE_ARGUMENT) { 295 addr += 2; 296 } 297 } 298 299 /* Derive the absolute iblock values from the deltas. */ 300 min_iblock = f->f_iblock + min_delta_iblock; 301 if (new_lasti > f->f_lasti) { 302 /* Forwards jump. */ 303 new_iblock = f->f_iblock + delta_iblock; 304 } 305 else { 306 /* Backwards jump. */ 307 new_iblock = f->f_iblock - delta_iblock; 308 } 309 310 /* Are we jumping into a block? */ 311 if (new_iblock > min_iblock) { 312 PyErr_SetString(PyExc_ValueError, 313 "can't jump into the middle of a block"); 314 return -1; 315 } 316 317 /* Pop any blocks that we're jumping out of. */ 318 while (f->f_iblock > new_iblock) { 319 PyTryBlock *b = &f->f_blockstack[--f->f_iblock]; 320 while ((f->f_stacktop - f->f_valuestack) > b->b_level) { 321 PyObject *v = (*--f->f_stacktop); 322 Py_DECREF(v); 323 } 324 } 325 326 /* Finally set the new f_lineno and f_lasti and return OK. */ 327 f->f_lineno = new_lineno; 328 f->f_lasti = new_lasti; 329 return 0; 330} 331 332static PyObject * 333frame_gettrace(PyFrameObject *f, void *closure) 334{ 335 PyObject* trace = f->f_trace; 336 337 if (trace == NULL) 338 trace = Py_None; 339 340 Py_INCREF(trace); 341 342 return trace; 343} 344 345static int 346frame_settrace(PyFrameObject *f, PyObject* v, void *closure) 347{ 348 PyObject* old_value; 349 350 /* We rely on f_lineno being accurate when f_trace is set. */ 351 f->f_lineno = PyFrame_GetLineNumber(f); 352 353 old_value = f->f_trace; 354 Py_XINCREF(v); 355 f->f_trace = v; 356 Py_XDECREF(old_value); 357 358 return 0; 359} 360 361 362static PyGetSetDef frame_getsetlist[] = { 363 {"f_locals", (getter)frame_getlocals, NULL, NULL}, 364 {"f_lineno", (getter)frame_getlineno, 365 (setter)frame_setlineno, NULL}, 366 {"f_trace", (getter)frame_gettrace, (setter)frame_settrace, NULL}, 367 {0} 368}; 369 370/* Stack frames are allocated and deallocated at a considerable rate. 371 In an attempt to improve the speed of function calls, we: 372 373 1. Hold a single "zombie" frame on each code object. This retains 374 the allocated and initialised frame object from an invocation of 375 the code object. The zombie is reanimated the next time we need a 376 frame object for that code object. Doing this saves the malloc/ 377 realloc required when using a free_list frame that isn't the 378 correct size. It also saves some field initialisation. 379 380 In zombie mode, no field of PyFrameObject holds a reference, but 381 the following fields are still valid: 382 383 * ob_type, ob_size, f_code, f_valuestack; 384 385 * f_locals, f_trace, 386 f_exc_type, f_exc_value, f_exc_traceback are NULL; 387 388 * f_localsplus does not require re-allocation and 389 the local variables in f_localsplus are NULL. 390 391 2. We also maintain a separate free list of stack frames (just like 392 floats are allocated in a special way -- see floatobject.c). When 393 a stack frame is on the free list, only the following members have 394 a meaning: 395 ob_type == &Frametype 396 f_back next item on free list, or NULL 397 f_stacksize size of value stack 398 ob_size size of localsplus 399 Note that the value and block stacks are preserved -- this can save 400 another malloc() call or two (and two free() calls as well!). 401 Also note that, unlike for integers, each frame object is a 402 malloc'ed object in its own right -- it is only the actual calls to 403 malloc() that we are trying to save here, not the administration. 404 After all, while a typical program may make millions of calls, a 405 call depth of more than 20 or 30 is probably already exceptional 406 unless the program contains run-away recursion. I hope. 407 408 Later, PyFrame_MAXFREELIST was added to bound the # of frames saved on 409 free_list. Else programs creating lots of cyclic trash involving 410 frames could provoke free_list into growing without bound. 411*/ 412 413static PyFrameObject *free_list = NULL; 414static int numfree = 0; /* number of frames currently in free_list */ 415/* max value for numfree */ 416#define PyFrame_MAXFREELIST 200 417 418static void 419frame_dealloc(PyFrameObject *f) 420{ 421 PyObject **p, **valuestack; 422 PyCodeObject *co; 423 424 PyObject_GC_UnTrack(f); 425 Py_TRASHCAN_SAFE_BEGIN(f) 426 /* Kill all local variables */ 427 valuestack = f->f_valuestack; 428 for (p = f->f_localsplus; p < valuestack; p++) 429 Py_CLEAR(*p); 430 431 /* Free stack */ 432 if (f->f_stacktop != NULL) { 433 for (p = valuestack; p < f->f_stacktop; p++) 434 Py_XDECREF(*p); 435 } 436 437 Py_XDECREF(f->f_back); 438 Py_DECREF(f->f_builtins); 439 Py_DECREF(f->f_globals); 440 Py_CLEAR(f->f_locals); 441 Py_CLEAR(f->f_trace); 442 Py_CLEAR(f->f_exc_type); 443 Py_CLEAR(f->f_exc_value); 444 Py_CLEAR(f->f_exc_traceback); 445 446 co = f->f_code; 447 if (co->co_zombieframe == NULL) 448 co->co_zombieframe = f; 449 else if (numfree < PyFrame_MAXFREELIST) { 450 ++numfree; 451 f->f_back = free_list; 452 free_list = f; 453 } 454 else 455 PyObject_GC_Del(f); 456 457 Py_DECREF(co); 458 Py_TRASHCAN_SAFE_END(f) 459} 460 461static int 462frame_traverse(PyFrameObject *f, visitproc visit, void *arg) 463{ 464 PyObject **fastlocals, **p; 465 Py_ssize_t i, slots; 466 467 Py_VISIT(f->f_back); 468 Py_VISIT(f->f_code); 469 Py_VISIT(f->f_builtins); 470 Py_VISIT(f->f_globals); 471 Py_VISIT(f->f_locals); 472 Py_VISIT(f->f_trace); 473 Py_VISIT(f->f_exc_type); 474 Py_VISIT(f->f_exc_value); 475 Py_VISIT(f->f_exc_traceback); 476 477 /* locals */ 478 slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars); 479 fastlocals = f->f_localsplus; 480 for (i = slots; --i >= 0; ++fastlocals) 481 Py_VISIT(*fastlocals); 482 483 /* stack */ 484 if (f->f_stacktop != NULL) { 485 for (p = f->f_valuestack; p < f->f_stacktop; p++) 486 Py_VISIT(*p); 487 } 488 return 0; 489} 490 491static void 492frame_tp_clear(PyFrameObject *f) 493{ 494 PyObject **fastlocals, **p, **oldtop; 495 Py_ssize_t i, slots; 496 497 /* Before anything else, make sure that this frame is clearly marked 498 * as being defunct! Else, e.g., a generator reachable from this 499 * frame may also point to this frame, believe itself to still be 500 * active, and try cleaning up this frame again. 501 */ 502 oldtop = f->f_stacktop; 503 f->f_stacktop = NULL; 504 f->f_executing = 0; 505 506 Py_CLEAR(f->f_exc_type); 507 Py_CLEAR(f->f_exc_value); 508 Py_CLEAR(f->f_exc_traceback); 509 Py_CLEAR(f->f_trace); 510 511 /* locals */ 512 slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars); 513 fastlocals = f->f_localsplus; 514 for (i = slots; --i >= 0; ++fastlocals) 515 Py_CLEAR(*fastlocals); 516 517 /* stack */ 518 if (oldtop != NULL) { 519 for (p = f->f_valuestack; p < oldtop; p++) 520 Py_CLEAR(*p); 521 } 522} 523 524static PyObject * 525frame_clear(PyFrameObject *f) 526{ 527 if (f->f_executing) { 528 PyErr_SetString(PyExc_RuntimeError, 529 "cannot clear an executing frame"); 530 return NULL; 531 } 532 if (f->f_gen) { 533 _PyGen_Finalize(f->f_gen); 534 assert(f->f_gen == NULL); 535 } 536 frame_tp_clear(f); 537 Py_RETURN_NONE; 538} 539 540PyDoc_STRVAR(clear__doc__, 541"F.clear(): clear most references held by the frame"); 542 543static PyObject * 544frame_sizeof(PyFrameObject *f) 545{ 546 Py_ssize_t res, extras, ncells, nfrees; 547 548 ncells = PyTuple_GET_SIZE(f->f_code->co_cellvars); 549 nfrees = PyTuple_GET_SIZE(f->f_code->co_freevars); 550 extras = f->f_code->co_stacksize + f->f_code->co_nlocals + 551 ncells + nfrees; 552 /* subtract one as it is already included in PyFrameObject */ 553 res = sizeof(PyFrameObject) + (extras-1) * sizeof(PyObject *); 554 555 return PyLong_FromSsize_t(res); 556} 557 558PyDoc_STRVAR(sizeof__doc__, 559"F.__sizeof__() -> size of F in memory, in bytes"); 560 561static PyMethodDef frame_methods[] = { 562 {"clear", (PyCFunction)frame_clear, METH_NOARGS, 563 clear__doc__}, 564 {"__sizeof__", (PyCFunction)frame_sizeof, METH_NOARGS, 565 sizeof__doc__}, 566 {NULL, NULL} /* sentinel */ 567}; 568 569PyTypeObject PyFrame_Type = { 570 PyVarObject_HEAD_INIT(&PyType_Type, 0) 571 "frame", 572 sizeof(PyFrameObject), 573 sizeof(PyObject *), 574 (destructor)frame_dealloc, /* tp_dealloc */ 575 0, /* tp_print */ 576 0, /* tp_getattr */ 577 0, /* tp_setattr */ 578 0, /* tp_reserved */ 579 0, /* tp_repr */ 580 0, /* tp_as_number */ 581 0, /* tp_as_sequence */ 582 0, /* tp_as_mapping */ 583 0, /* tp_hash */ 584 0, /* tp_call */ 585 0, /* tp_str */ 586 PyObject_GenericGetAttr, /* tp_getattro */ 587 PyObject_GenericSetAttr, /* tp_setattro */ 588 0, /* tp_as_buffer */ 589 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */ 590 0, /* tp_doc */ 591 (traverseproc)frame_traverse, /* tp_traverse */ 592 (inquiry)frame_tp_clear, /* tp_clear */ 593 0, /* tp_richcompare */ 594 0, /* tp_weaklistoffset */ 595 0, /* tp_iter */ 596 0, /* tp_iternext */ 597 frame_methods, /* tp_methods */ 598 frame_memberlist, /* tp_members */ 599 frame_getsetlist, /* tp_getset */ 600 0, /* tp_base */ 601 0, /* tp_dict */ 602}; 603 604_Py_IDENTIFIER(__builtins__); 605 606int _PyFrame_Init() 607{ 608 /* Before, PyId___builtins__ was a string created explicitly in 609 this function. Now there is nothing to initialize anymore, but 610 the function is kept for backward compatibility. */ 611 return 1; 612} 613 614PyFrameObject * 615PyFrame_New(PyThreadState *tstate, PyCodeObject *code, PyObject *globals, 616 PyObject *locals) 617{ 618 PyFrameObject *back = tstate->frame; 619 PyFrameObject *f; 620 PyObject *builtins; 621 Py_ssize_t i; 622 623#ifdef Py_DEBUG 624 if (code == NULL || globals == NULL || !PyDict_Check(globals) || 625 (locals != NULL && !PyMapping_Check(locals))) { 626 PyErr_BadInternalCall(); 627 return NULL; 628 } 629#endif 630 if (back == NULL || back->f_globals != globals) { 631 builtins = _PyDict_GetItemId(globals, &PyId___builtins__); 632 if (builtins) { 633 if (PyModule_Check(builtins)) { 634 builtins = PyModule_GetDict(builtins); 635 assert(builtins != NULL); 636 } 637 } 638 if (builtins == NULL) { 639 /* No builtins! Make up a minimal one 640 Give them 'None', at least. */ 641 builtins = PyDict_New(); 642 if (builtins == NULL || 643 PyDict_SetItemString( 644 builtins, "None", Py_None) < 0) 645 return NULL; 646 } 647 else 648 Py_INCREF(builtins); 649 650 } 651 else { 652 /* If we share the globals, we share the builtins. 653 Save a lookup and a call. */ 654 builtins = back->f_builtins; 655 assert(builtins != NULL); 656 Py_INCREF(builtins); 657 } 658 if (code->co_zombieframe != NULL) { 659 f = code->co_zombieframe; 660 code->co_zombieframe = NULL; 661 _Py_NewReference((PyObject *)f); 662 assert(f->f_code == code); 663 } 664 else { 665 Py_ssize_t extras, ncells, nfrees; 666 ncells = PyTuple_GET_SIZE(code->co_cellvars); 667 nfrees = PyTuple_GET_SIZE(code->co_freevars); 668 extras = code->co_stacksize + code->co_nlocals + ncells + 669 nfrees; 670 if (free_list == NULL) { 671 f = PyObject_GC_NewVar(PyFrameObject, &PyFrame_Type, 672 extras); 673 if (f == NULL) { 674 Py_DECREF(builtins); 675 return NULL; 676 } 677 } 678 else { 679 assert(numfree > 0); 680 --numfree; 681 f = free_list; 682 free_list = free_list->f_back; 683 if (Py_SIZE(f) < extras) { 684 PyFrameObject *new_f = PyObject_GC_Resize(PyFrameObject, f, extras); 685 if (new_f == NULL) { 686 PyObject_GC_Del(f); 687 Py_DECREF(builtins); 688 return NULL; 689 } 690 f = new_f; 691 } 692 _Py_NewReference((PyObject *)f); 693 } 694 695 f->f_code = code; 696 extras = code->co_nlocals + ncells + nfrees; 697 f->f_valuestack = f->f_localsplus + extras; 698 for (i=0; i<extras; i++) 699 f->f_localsplus[i] = NULL; 700 f->f_locals = NULL; 701 f->f_trace = NULL; 702 f->f_exc_type = f->f_exc_value = f->f_exc_traceback = NULL; 703 } 704 f->f_stacktop = f->f_valuestack; 705 f->f_builtins = builtins; 706 Py_XINCREF(back); 707 f->f_back = back; 708 Py_INCREF(code); 709 Py_INCREF(globals); 710 f->f_globals = globals; 711 /* Most functions have CO_NEWLOCALS and CO_OPTIMIZED set. */ 712 if ((code->co_flags & (CO_NEWLOCALS | CO_OPTIMIZED)) == 713 (CO_NEWLOCALS | CO_OPTIMIZED)) 714 ; /* f_locals = NULL; will be set by PyFrame_FastToLocals() */ 715 else if (code->co_flags & CO_NEWLOCALS) { 716 locals = PyDict_New(); 717 if (locals == NULL) { 718 Py_DECREF(f); 719 return NULL; 720 } 721 f->f_locals = locals; 722 } 723 else { 724 if (locals == NULL) 725 locals = globals; 726 Py_INCREF(locals); 727 f->f_locals = locals; 728 } 729 f->f_tstate = tstate; 730 731 f->f_lasti = -1; 732 f->f_lineno = code->co_firstlineno; 733 f->f_iblock = 0; 734 f->f_executing = 0; 735 f->f_gen = NULL; 736 737 _PyObject_GC_TRACK(f); 738 return f; 739} 740 741/* Block management */ 742 743void 744PyFrame_BlockSetup(PyFrameObject *f, int type, int handler, int level) 745{ 746 PyTryBlock *b; 747 if (f->f_iblock >= CO_MAXBLOCKS) 748 Py_FatalError("XXX block stack overflow"); 749 b = &f->f_blockstack[f->f_iblock++]; 750 b->b_type = type; 751 b->b_level = level; 752 b->b_handler = handler; 753} 754 755PyTryBlock * 756PyFrame_BlockPop(PyFrameObject *f) 757{ 758 PyTryBlock *b; 759 if (f->f_iblock <= 0) 760 Py_FatalError("XXX block stack underflow"); 761 b = &f->f_blockstack[--f->f_iblock]; 762 return b; 763} 764 765/* Convert between "fast" version of locals and dictionary version. 766 767 map and values are input arguments. map is a tuple of strings. 768 values is an array of PyObject*. At index i, map[i] is the name of 769 the variable with value values[i]. The function copies the first 770 nmap variable from map/values into dict. If values[i] is NULL, 771 the variable is deleted from dict. 772 773 If deref is true, then the values being copied are cell variables 774 and the value is extracted from the cell variable before being put 775 in dict. 776 */ 777 778static int 779map_to_dict(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values, 780 int deref) 781{ 782 Py_ssize_t j; 783 assert(PyTuple_Check(map)); 784 assert(PyDict_Check(dict)); 785 assert(PyTuple_Size(map) >= nmap); 786 for (j = nmap; --j >= 0; ) { 787 PyObject *key = PyTuple_GET_ITEM(map, j); 788 PyObject *value = values[j]; 789 assert(PyUnicode_Check(key)); 790 if (deref) { 791 assert(PyCell_Check(value)); 792 value = PyCell_GET(value); 793 } 794 if (value == NULL) { 795 if (PyObject_DelItem(dict, key) != 0) { 796 if (PyErr_ExceptionMatches(PyExc_KeyError)) 797 PyErr_Clear(); 798 else 799 return -1; 800 } 801 } 802 else { 803 if (PyObject_SetItem(dict, key, value) != 0) 804 return -1; 805 } 806 } 807 return 0; 808} 809 810/* Copy values from the "locals" dict into the fast locals. 811 812 dict is an input argument containing string keys representing 813 variables names and arbitrary PyObject* as values. 814 815 map and values are input arguments. map is a tuple of strings. 816 values is an array of PyObject*. At index i, map[i] is the name of 817 the variable with value values[i]. The function copies the first 818 nmap variable from map/values into dict. If values[i] is NULL, 819 the variable is deleted from dict. 820 821 If deref is true, then the values being copied are cell variables 822 and the value is extracted from the cell variable before being put 823 in dict. If clear is true, then variables in map but not in dict 824 are set to NULL in map; if clear is false, variables missing in 825 dict are ignored. 826 827 Exceptions raised while modifying the dict are silently ignored, 828 because there is no good way to report them. 829*/ 830 831static void 832dict_to_map(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values, 833 int deref, int clear) 834{ 835 Py_ssize_t j; 836 assert(PyTuple_Check(map)); 837 assert(PyDict_Check(dict)); 838 assert(PyTuple_Size(map) >= nmap); 839 for (j = nmap; --j >= 0; ) { 840 PyObject *key = PyTuple_GET_ITEM(map, j); 841 PyObject *value = PyObject_GetItem(dict, key); 842 assert(PyUnicode_Check(key)); 843 /* We only care about NULLs if clear is true. */ 844 if (value == NULL) { 845 PyErr_Clear(); 846 if (!clear) 847 continue; 848 } 849 if (deref) { 850 assert(PyCell_Check(values[j])); 851 if (PyCell_GET(values[j]) != value) { 852 if (PyCell_Set(values[j], value) < 0) 853 PyErr_Clear(); 854 } 855 } else if (values[j] != value) { 856 Py_XINCREF(value); 857 Py_XDECREF(values[j]); 858 values[j] = value; 859 } 860 Py_XDECREF(value); 861 } 862} 863 864int 865PyFrame_FastToLocalsWithError(PyFrameObject *f) 866{ 867 /* Merge fast locals into f->f_locals */ 868 PyObject *locals, *map; 869 PyObject **fast; 870 PyCodeObject *co; 871 Py_ssize_t j; 872 Py_ssize_t ncells, nfreevars; 873 874 if (f == NULL) { 875 PyErr_BadInternalCall(); 876 return -1; 877 } 878 locals = f->f_locals; 879 if (locals == NULL) { 880 locals = f->f_locals = PyDict_New(); 881 if (locals == NULL) 882 return -1; 883 } 884 co = f->f_code; 885 map = co->co_varnames; 886 if (!PyTuple_Check(map)) { 887 PyErr_Format(PyExc_SystemError, 888 "co_varnames must be a tuple, not %s", 889 Py_TYPE(map)->tp_name); 890 return -1; 891 } 892 fast = f->f_localsplus; 893 j = PyTuple_GET_SIZE(map); 894 if (j > co->co_nlocals) 895 j = co->co_nlocals; 896 if (co->co_nlocals) { 897 if (map_to_dict(map, j, locals, fast, 0) < 0) 898 return -1; 899 } 900 ncells = PyTuple_GET_SIZE(co->co_cellvars); 901 nfreevars = PyTuple_GET_SIZE(co->co_freevars); 902 if (ncells || nfreevars) { 903 if (map_to_dict(co->co_cellvars, ncells, 904 locals, fast + co->co_nlocals, 1)) 905 return -1; 906 907 /* If the namespace is unoptimized, then one of the 908 following cases applies: 909 1. It does not contain free variables, because it 910 uses import * or is a top-level namespace. 911 2. It is a class namespace. 912 We don't want to accidentally copy free variables 913 into the locals dict used by the class. 914 */ 915 if (co->co_flags & CO_OPTIMIZED) { 916 if (map_to_dict(co->co_freevars, nfreevars, 917 locals, fast + co->co_nlocals + ncells, 1) < 0) 918 return -1; 919 } 920 } 921 return 0; 922} 923 924void 925PyFrame_FastToLocals(PyFrameObject *f) 926{ 927 int res; 928 929 assert(!PyErr_Occurred()); 930 931 res = PyFrame_FastToLocalsWithError(f); 932 if (res < 0) 933 PyErr_Clear(); 934} 935 936void 937PyFrame_LocalsToFast(PyFrameObject *f, int clear) 938{ 939 /* Merge f->f_locals into fast locals */ 940 PyObject *locals, *map; 941 PyObject **fast; 942 PyObject *error_type, *error_value, *error_traceback; 943 PyCodeObject *co; 944 Py_ssize_t j; 945 Py_ssize_t ncells, nfreevars; 946 if (f == NULL) 947 return; 948 locals = f->f_locals; 949 co = f->f_code; 950 map = co->co_varnames; 951 if (locals == NULL) 952 return; 953 if (!PyTuple_Check(map)) 954 return; 955 PyErr_Fetch(&error_type, &error_value, &error_traceback); 956 fast = f->f_localsplus; 957 j = PyTuple_GET_SIZE(map); 958 if (j > co->co_nlocals) 959 j = co->co_nlocals; 960 if (co->co_nlocals) 961 dict_to_map(co->co_varnames, j, locals, fast, 0, clear); 962 ncells = PyTuple_GET_SIZE(co->co_cellvars); 963 nfreevars = PyTuple_GET_SIZE(co->co_freevars); 964 if (ncells || nfreevars) { 965 dict_to_map(co->co_cellvars, ncells, 966 locals, fast + co->co_nlocals, 1, clear); 967 /* Same test as in PyFrame_FastToLocals() above. */ 968 if (co->co_flags & CO_OPTIMIZED) { 969 dict_to_map(co->co_freevars, nfreevars, 970 locals, fast + co->co_nlocals + ncells, 1, 971 clear); 972 } 973 } 974 PyErr_Restore(error_type, error_value, error_traceback); 975} 976 977/* Clear out the free list */ 978int 979PyFrame_ClearFreeList(void) 980{ 981 int freelist_size = numfree; 982 983 while (free_list != NULL) { 984 PyFrameObject *f = free_list; 985 free_list = free_list->f_back; 986 PyObject_GC_Del(f); 987 --numfree; 988 } 989 assert(numfree == 0); 990 return freelist_size; 991} 992 993void 994PyFrame_Fini(void) 995{ 996 (void)PyFrame_ClearFreeList(); 997} 998 999/* Print summary info about the state of the optimized allocator */ 1000void 1001_PyFrame_DebugMallocStats(FILE *out) 1002{ 1003 _PyDebugAllocatorStats(out, 1004 "free PyFrameObject", 1005 numfree, sizeof(PyFrameObject)); 1006} 1007 1008