frameobject.c revision fefeca53eebe8665c08ac0c041639ada3c9f9446
1/* Frame object implementation */ 2 3#include "Python.h" 4 5#include "code.h" 6#include "frameobject.h" 7#include "opcode.h" 8#include "structmember.h" 9 10#undef MIN 11#undef MAX 12#define MIN(a, b) ((a) < (b) ? (a) : (b)) 13#define MAX(a, b) ((a) > (b) ? (a) : (b)) 14 15#define OFF(x) offsetof(PyFrameObject, x) 16 17static PyMemberDef frame_memberlist[] = { 18 {"f_back", T_OBJECT, OFF(f_back), READONLY}, 19 {"f_code", T_OBJECT, OFF(f_code), READONLY}, 20 {"f_builtins", T_OBJECT, OFF(f_builtins),READONLY}, 21 {"f_globals", T_OBJECT, OFF(f_globals), READONLY}, 22 {"f_lasti", T_INT, OFF(f_lasti), READONLY}, 23 {NULL} /* Sentinel */ 24}; 25 26static PyObject * 27frame_getlocals(PyFrameObject *f, void *closure) 28{ 29 PyFrame_FastToLocals(f); 30 Py_INCREF(f->f_locals); 31 return f->f_locals; 32} 33 34static PyObject * 35frame_getlineno(PyFrameObject *f, void *closure) 36{ 37 int lineno; 38 39 if (f->f_trace) 40 lineno = f->f_lineno; 41 else 42 lineno = PyCode_Addr2Line(f->f_code, f->f_lasti); 43 44 return PyLong_FromLong(lineno); 45} 46 47/* Setter for f_lineno - you can set f_lineno from within a trace function in 48 * order to jump to a given line of code, subject to some restrictions. Most 49 * lines are OK to jump to because they don't make any assumptions about the 50 * state of the stack (obvious because you could remove the line and the code 51 * would still work without any stack errors), but there are some constructs 52 * that limit jumping: 53 * 54 * o Lines with an 'except' statement on them can't be jumped to, because 55 * they expect an exception to be on the top of the stack. 56 * o Lines that live in a 'finally' block can't be jumped from or to, since 57 * the END_FINALLY expects to clean up the stack after the 'try' block. 58 * o 'try'/'for'/'while' blocks can't be jumped into because the blockstack 59 * needs to be set up before their code runs, and for 'for' loops the 60 * iterator needs to be on the stack. 61 */ 62static int 63frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno) 64{ 65 int new_lineno = 0; /* The new value of f_lineno */ 66 long l_new_lineno; 67 int overflow; 68 int new_lasti = 0; /* The new value of f_lasti */ 69 int new_iblock = 0; /* The new value of f_iblock */ 70 unsigned char *code = NULL; /* The bytecode for the frame... */ 71 Py_ssize_t code_len = 0; /* ...and its length */ 72 char *lnotab = NULL; /* Iterating over co_lnotab */ 73 Py_ssize_t lnotab_len = 0; /* (ditto) */ 74 int offset = 0; /* (ditto) */ 75 int line = 0; /* (ditto) */ 76 int addr = 0; /* (ditto) */ 77 int min_addr = 0; /* Scanning the SETUPs and POPs */ 78 int max_addr = 0; /* (ditto) */ 79 int delta_iblock = 0; /* (ditto) */ 80 int min_delta_iblock = 0; /* (ditto) */ 81 int min_iblock = 0; /* (ditto) */ 82 int f_lasti_setup_addr = 0; /* Policing no-jump-into-finally */ 83 int new_lasti_setup_addr = 0; /* (ditto) */ 84 int blockstack[CO_MAXBLOCKS]; /* Walking the 'finally' blocks */ 85 int in_finally[CO_MAXBLOCKS]; /* (ditto) */ 86 int blockstack_top = 0; /* (ditto) */ 87 unsigned char setup_op = 0; /* (ditto) */ 88 89 /* f_lineno must be an integer. */ 90 if (!PyLong_CheckExact(p_new_lineno)) { 91 PyErr_SetString(PyExc_ValueError, 92 "lineno must be an integer"); 93 return -1; 94 } 95 96 /* You can only do this from within a trace function, not via 97 * _getframe or similar hackery. */ 98 if (!f->f_trace) 99 { 100 PyErr_Format(PyExc_ValueError, 101 "f_lineno can only be set by a trace function"); 102 return -1; 103 } 104 105 /* Fail if the line comes before the start of the code block. */ 106 l_new_lineno = PyLong_AsLongAndOverflow(p_new_lineno, &overflow); 107 if (overflow 108#if SIZEOF_LONG > SIZEOF_INT 109 || l_new_lineno > INT_MAX 110 || l_new_lineno < INT_MIN 111#endif 112 ) { 113 PyErr_SetString(PyExc_ValueError, 114 "lineno out of range"); 115 return -1; 116 } 117 new_lineno = (int)l_new_lineno; 118 119 if (new_lineno < f->f_code->co_firstlineno) { 120 PyErr_Format(PyExc_ValueError, 121 "line %d comes before the current code block", 122 new_lineno); 123 return -1; 124 } 125 126 /* Find the bytecode offset for the start of the given line, or the 127 * first code-owning line after it. */ 128 PyBytes_AsStringAndSize(f->f_code->co_lnotab, &lnotab, &lnotab_len); 129 addr = 0; 130 line = f->f_code->co_firstlineno; 131 new_lasti = -1; 132 for (offset = 0; offset < lnotab_len; offset += 2) { 133 addr += lnotab[offset]; 134 line += lnotab[offset+1]; 135 if (line >= new_lineno) { 136 new_lasti = addr; 137 new_lineno = line; 138 break; 139 } 140 } 141 142 /* If we didn't reach the requested line, return an error. */ 143 if (new_lasti == -1) { 144 PyErr_Format(PyExc_ValueError, 145 "line %d comes after the current code block", 146 new_lineno); 147 return -1; 148 } 149 150 /* We're now ready to look at the bytecode. */ 151 PyBytes_AsStringAndSize(f->f_code->co_code, (char **)&code, &code_len); 152 min_addr = MIN(new_lasti, f->f_lasti); 153 max_addr = MAX(new_lasti, f->f_lasti); 154 155 /* You can't jump onto a line with an 'except' statement on it - 156 * they expect to have an exception on the top of the stack, which 157 * won't be true if you jump to them. They always start with code 158 * that either pops the exception using POP_TOP (plain 'except:' 159 * lines do this) or duplicates the exception on the stack using 160 * DUP_TOP (if there's an exception type specified). See compile.c, 161 * 'com_try_except' for the full details. There aren't any other 162 * cases (AFAIK) where a line's code can start with DUP_TOP or 163 * POP_TOP, but if any ever appear, they'll be subject to the same 164 * restriction (but with a different error message). */ 165 if (code[new_lasti] == DUP_TOP || code[new_lasti] == POP_TOP) { 166 PyErr_SetString(PyExc_ValueError, 167 "can't jump to 'except' line as there's no exception"); 168 return -1; 169 } 170 171 /* You can't jump into or out of a 'finally' block because the 'try' 172 * block leaves something on the stack for the END_FINALLY to clean 173 * up. So we walk the bytecode, maintaining a simulated blockstack. 174 * When we reach the old or new address and it's in a 'finally' block 175 * we note the address of the corresponding SETUP_FINALLY. The jump 176 * is only legal if neither address is in a 'finally' block or 177 * they're both in the same one. 'blockstack' is a stack of the 178 * bytecode addresses of the SETUP_X opcodes, and 'in_finally' tracks 179 * whether we're in a 'finally' block at each blockstack level. */ 180 f_lasti_setup_addr = -1; 181 new_lasti_setup_addr = -1; 182 memset(blockstack, '\0', sizeof(blockstack)); 183 memset(in_finally, '\0', sizeof(in_finally)); 184 blockstack_top = 0; 185 for (addr = 0; addr < code_len; addr++) { 186 unsigned char op = code[addr]; 187 switch (op) { 188 case SETUP_LOOP: 189 case SETUP_EXCEPT: 190 case SETUP_FINALLY: 191 blockstack[blockstack_top++] = addr; 192 in_finally[blockstack_top-1] = 0; 193 break; 194 195 case POP_BLOCK: 196 assert(blockstack_top > 0); 197 setup_op = code[blockstack[blockstack_top-1]]; 198 if (setup_op == SETUP_FINALLY) { 199 in_finally[blockstack_top-1] = 1; 200 } 201 else { 202 blockstack_top--; 203 } 204 break; 205 206 case END_FINALLY: 207 /* Ignore END_FINALLYs for SETUP_EXCEPTs - they exist 208 * in the bytecode but don't correspond to an actual 209 * 'finally' block. (If blockstack_top is 0, we must 210 * be seeing such an END_FINALLY.) */ 211 if (blockstack_top > 0) { 212 setup_op = code[blockstack[blockstack_top-1]]; 213 if (setup_op == SETUP_FINALLY) { 214 blockstack_top--; 215 } 216 } 217 break; 218 } 219 220 /* For the addresses we're interested in, see whether they're 221 * within a 'finally' block and if so, remember the address 222 * of the SETUP_FINALLY. */ 223 if (addr == new_lasti || addr == f->f_lasti) { 224 int i = 0; 225 int setup_addr = -1; 226 for (i = blockstack_top-1; i >= 0; i--) { 227 if (in_finally[i]) { 228 setup_addr = blockstack[i]; 229 break; 230 } 231 } 232 233 if (setup_addr != -1) { 234 if (addr == new_lasti) { 235 new_lasti_setup_addr = setup_addr; 236 } 237 238 if (addr == f->f_lasti) { 239 f_lasti_setup_addr = setup_addr; 240 } 241 } 242 } 243 244 if (op >= HAVE_ARGUMENT) { 245 addr += 2; 246 } 247 } 248 249 /* Verify that the blockstack tracking code didn't get lost. */ 250 assert(blockstack_top == 0); 251 252 /* After all that, are we jumping into / out of a 'finally' block? */ 253 if (new_lasti_setup_addr != f_lasti_setup_addr) { 254 PyErr_SetString(PyExc_ValueError, 255 "can't jump into or out of a 'finally' block"); 256 return -1; 257 } 258 259 260 /* Police block-jumping (you can't jump into the middle of a block) 261 * and ensure that the blockstack finishes up in a sensible state (by 262 * popping any blocks we're jumping out of). We look at all the 263 * blockstack operations between the current position and the new 264 * one, and keep track of how many blocks we drop out of on the way. 265 * By also keeping track of the lowest blockstack position we see, we 266 * can tell whether the jump goes into any blocks without coming out 267 * again - in that case we raise an exception below. */ 268 delta_iblock = 0; 269 for (addr = min_addr; addr < max_addr; addr++) { 270 unsigned char op = code[addr]; 271 switch (op) { 272 case SETUP_LOOP: 273 case SETUP_EXCEPT: 274 case SETUP_FINALLY: 275 delta_iblock++; 276 break; 277 278 case POP_BLOCK: 279 delta_iblock--; 280 break; 281 } 282 283 min_delta_iblock = MIN(min_delta_iblock, delta_iblock); 284 285 if (op >= HAVE_ARGUMENT) { 286 addr += 2; 287 } 288 } 289 290 /* Derive the absolute iblock values from the deltas. */ 291 min_iblock = f->f_iblock + min_delta_iblock; 292 if (new_lasti > f->f_lasti) { 293 /* Forwards jump. */ 294 new_iblock = f->f_iblock + delta_iblock; 295 } 296 else { 297 /* Backwards jump. */ 298 new_iblock = f->f_iblock - delta_iblock; 299 } 300 301 /* Are we jumping into a block? */ 302 if (new_iblock > min_iblock) { 303 PyErr_SetString(PyExc_ValueError, 304 "can't jump into the middle of a block"); 305 return -1; 306 } 307 308 /* Pop any blocks that we're jumping out of. */ 309 while (f->f_iblock > new_iblock) { 310 PyTryBlock *b = &f->f_blockstack[--f->f_iblock]; 311 while ((f->f_stacktop - f->f_valuestack) > b->b_level) { 312 PyObject *v = (*--f->f_stacktop); 313 Py_DECREF(v); 314 } 315 } 316 317 /* Finally set the new f_lineno and f_lasti and return OK. */ 318 f->f_lineno = new_lineno; 319 f->f_lasti = new_lasti; 320 return 0; 321} 322 323static PyObject * 324frame_gettrace(PyFrameObject *f, void *closure) 325{ 326 PyObject* trace = f->f_trace; 327 328 if (trace == NULL) 329 trace = Py_None; 330 331 Py_INCREF(trace); 332 333 return trace; 334} 335 336static int 337frame_settrace(PyFrameObject *f, PyObject* v, void *closure) 338{ 339 /* We rely on f_lineno being accurate when f_trace is set. */ 340 341 PyObject* old_value = f->f_trace; 342 343 Py_XINCREF(v); 344 f->f_trace = v; 345 346 if (v != NULL) 347 f->f_lineno = PyCode_Addr2Line(f->f_code, f->f_lasti); 348 349 Py_XDECREF(old_value); 350 351 return 0; 352} 353 354 355static PyGetSetDef frame_getsetlist[] = { 356 {"f_locals", (getter)frame_getlocals, NULL, NULL}, 357 {"f_lineno", (getter)frame_getlineno, 358 (setter)frame_setlineno, NULL}, 359 {"f_trace", (getter)frame_gettrace, (setter)frame_settrace, NULL}, 360 {0} 361}; 362 363/* Stack frames are allocated and deallocated at a considerable rate. 364 In an attempt to improve the speed of function calls, we: 365 366 1. Hold a single "zombie" frame on each code object. This retains 367 the allocated and initialised frame object from an invocation of 368 the code object. The zombie is reanimated the next time we need a 369 frame object for that code object. Doing this saves the malloc/ 370 realloc required when using a free_list frame that isn't the 371 correct size. It also saves some field initialisation. 372 373 In zombie mode, no field of PyFrameObject holds a reference, but 374 the following fields are still valid: 375 376 * ob_type, ob_size, f_code, f_valuestack; 377 378 * f_locals, f_trace, 379 f_exc_type, f_exc_value, f_exc_traceback are NULL; 380 381 * f_localsplus does not require re-allocation and 382 the local variables in f_localsplus are NULL. 383 384 2. We also maintain a separate free list of stack frames (just like 385 integers are allocated in a special way -- see intobject.c). When 386 a stack frame is on the free list, only the following members have 387 a meaning: 388 ob_type == &Frametype 389 f_back next item on free list, or NULL 390 f_stacksize size of value stack 391 ob_size size of localsplus 392 Note that the value and block stacks are preserved -- this can save 393 another malloc() call or two (and two free() calls as well!). 394 Also note that, unlike for integers, each frame object is a 395 malloc'ed object in its own right -- it is only the actual calls to 396 malloc() that we are trying to save here, not the administration. 397 After all, while a typical program may make millions of calls, a 398 call depth of more than 20 or 30 is probably already exceptional 399 unless the program contains run-away recursion. I hope. 400 401 Later, PyFrame_MAXFREELIST was added to bound the # of frames saved on 402 free_list. Else programs creating lots of cyclic trash involving 403 frames could provoke free_list into growing without bound. 404*/ 405 406static PyFrameObject *free_list = NULL; 407static int numfree = 0; /* number of frames currently in free_list */ 408/* max value for numfree */ 409#define PyFrame_MAXFREELIST 200 410 411static void 412frame_dealloc(PyFrameObject *f) 413{ 414 PyObject **p, **valuestack; 415 PyCodeObject *co; 416 417 PyObject_GC_UnTrack(f); 418 Py_TRASHCAN_SAFE_BEGIN(f) 419 /* Kill all local variables */ 420 valuestack = f->f_valuestack; 421 for (p = f->f_localsplus; p < valuestack; p++) 422 Py_CLEAR(*p); 423 424 /* Free stack */ 425 if (f->f_stacktop != NULL) { 426 for (p = valuestack; p < f->f_stacktop; p++) 427 Py_XDECREF(*p); 428 } 429 430 Py_XDECREF(f->f_back); 431 Py_DECREF(f->f_builtins); 432 Py_DECREF(f->f_globals); 433 Py_CLEAR(f->f_locals); 434 Py_CLEAR(f->f_trace); 435 Py_CLEAR(f->f_exc_type); 436 Py_CLEAR(f->f_exc_value); 437 Py_CLEAR(f->f_exc_traceback); 438 439 co = f->f_code; 440 if (co->co_zombieframe == NULL) 441 co->co_zombieframe = f; 442 else if (numfree < PyFrame_MAXFREELIST) { 443 ++numfree; 444 f->f_back = free_list; 445 free_list = f; 446 } 447 else 448 PyObject_GC_Del(f); 449 450 Py_DECREF(co); 451 Py_TRASHCAN_SAFE_END(f) 452} 453 454static int 455frame_traverse(PyFrameObject *f, visitproc visit, void *arg) 456{ 457 PyObject **fastlocals, **p; 458 int i, slots; 459 460 Py_VISIT(f->f_back); 461 Py_VISIT(f->f_code); 462 Py_VISIT(f->f_builtins); 463 Py_VISIT(f->f_globals); 464 Py_VISIT(f->f_locals); 465 Py_VISIT(f->f_trace); 466 Py_VISIT(f->f_exc_type); 467 Py_VISIT(f->f_exc_value); 468 Py_VISIT(f->f_exc_traceback); 469 470 /* locals */ 471 slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars); 472 fastlocals = f->f_localsplus; 473 for (i = slots; --i >= 0; ++fastlocals) 474 Py_VISIT(*fastlocals); 475 476 /* stack */ 477 if (f->f_stacktop != NULL) { 478 for (p = f->f_valuestack; p < f->f_stacktop; p++) 479 Py_VISIT(*p); 480 } 481 return 0; 482} 483 484static void 485frame_clear(PyFrameObject *f) 486{ 487 PyObject **fastlocals, **p, **oldtop; 488 int i, slots; 489 490 /* Before anything else, make sure that this frame is clearly marked 491 * as being defunct! Else, e.g., a generator reachable from this 492 * frame may also point to this frame, believe itself to still be 493 * active, and try cleaning up this frame again. 494 */ 495 oldtop = f->f_stacktop; 496 f->f_stacktop = NULL; 497 498 Py_CLEAR(f->f_exc_type); 499 Py_CLEAR(f->f_exc_value); 500 Py_CLEAR(f->f_exc_traceback); 501 Py_CLEAR(f->f_trace); 502 503 /* locals */ 504 slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars); 505 fastlocals = f->f_localsplus; 506 for (i = slots; --i >= 0; ++fastlocals) 507 Py_CLEAR(*fastlocals); 508 509 /* stack */ 510 if (oldtop != NULL) { 511 for (p = f->f_valuestack; p < oldtop; p++) 512 Py_CLEAR(*p); 513 } 514} 515 516static PyObject * 517frame_sizeof(PyFrameObject *f) 518{ 519 Py_ssize_t res, extras, ncells, nfrees; 520 521 ncells = PyTuple_GET_SIZE(f->f_code->co_cellvars); 522 nfrees = PyTuple_GET_SIZE(f->f_code->co_freevars); 523 extras = f->f_code->co_stacksize + f->f_code->co_nlocals + 524 ncells + nfrees; 525 /* subtract one as it is already included in PyFrameObject */ 526 res = sizeof(PyFrameObject) + (extras-1) * sizeof(PyObject *); 527 528 return PyLong_FromSsize_t(res); 529} 530 531PyDoc_STRVAR(sizeof__doc__, 532"F.__sizeof__() -> size of F in memory, in bytes"); 533 534static PyMethodDef frame_methods[] = { 535 {"__sizeof__", (PyCFunction)frame_sizeof, METH_NOARGS, 536 sizeof__doc__}, 537 {NULL, NULL} /* sentinel */ 538}; 539 540PyTypeObject PyFrame_Type = { 541 PyVarObject_HEAD_INIT(&PyType_Type, 0) 542 "frame", 543 sizeof(PyFrameObject), 544 sizeof(PyObject *), 545 (destructor)frame_dealloc, /* tp_dealloc */ 546 0, /* tp_print */ 547 0, /* tp_getattr */ 548 0, /* tp_setattr */ 549 0, /* tp_compare */ 550 0, /* tp_repr */ 551 0, /* tp_as_number */ 552 0, /* tp_as_sequence */ 553 0, /* tp_as_mapping */ 554 0, /* tp_hash */ 555 0, /* tp_call */ 556 0, /* tp_str */ 557 PyObject_GenericGetAttr, /* tp_getattro */ 558 PyObject_GenericSetAttr, /* tp_setattro */ 559 0, /* tp_as_buffer */ 560 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */ 561 0, /* tp_doc */ 562 (traverseproc)frame_traverse, /* tp_traverse */ 563 (inquiry)frame_clear, /* tp_clear */ 564 0, /* tp_richcompare */ 565 0, /* tp_weaklistoffset */ 566 0, /* tp_iter */ 567 0, /* tp_iternext */ 568 frame_methods, /* tp_methods */ 569 frame_memberlist, /* tp_members */ 570 frame_getsetlist, /* tp_getset */ 571 0, /* tp_base */ 572 0, /* tp_dict */ 573}; 574 575static PyObject *builtin_object; 576 577int _PyFrame_Init() 578{ 579 builtin_object = PyUnicode_InternFromString("__builtins__"); 580 return (builtin_object != NULL); 581} 582 583PyFrameObject * 584PyFrame_New(PyThreadState *tstate, PyCodeObject *code, PyObject *globals, 585 PyObject *locals) 586{ 587 PyFrameObject *back = tstate->frame; 588 PyFrameObject *f; 589 PyObject *builtins; 590 Py_ssize_t i; 591 592#ifdef Py_DEBUG 593 if (code == NULL || globals == NULL || !PyDict_Check(globals) || 594 (locals != NULL && !PyMapping_Check(locals))) { 595 PyErr_BadInternalCall(); 596 return NULL; 597 } 598#endif 599 if (back == NULL || back->f_globals != globals) { 600 builtins = PyDict_GetItem(globals, builtin_object); 601 if (builtins) { 602 if (PyModule_Check(builtins)) { 603 builtins = PyModule_GetDict(builtins); 604 assert(!builtins || PyDict_Check(builtins)); 605 } 606 else if (!PyDict_Check(builtins)) 607 builtins = NULL; 608 } 609 if (builtins == NULL) { 610 /* No builtins! Make up a minimal one 611 Give them 'None', at least. */ 612 builtins = PyDict_New(); 613 if (builtins == NULL || 614 PyDict_SetItemString( 615 builtins, "None", Py_None) < 0) 616 return NULL; 617 } 618 else 619 Py_INCREF(builtins); 620 621 } 622 else { 623 /* If we share the globals, we share the builtins. 624 Save a lookup and a call. */ 625 builtins = back->f_builtins; 626 assert(builtins != NULL && PyDict_Check(builtins)); 627 Py_INCREF(builtins); 628 } 629 if (code->co_zombieframe != NULL) { 630 f = code->co_zombieframe; 631 code->co_zombieframe = NULL; 632 _Py_NewReference((PyObject *)f); 633 assert(f->f_code == code); 634 } 635 else { 636 Py_ssize_t extras, ncells, nfrees; 637 ncells = PyTuple_GET_SIZE(code->co_cellvars); 638 nfrees = PyTuple_GET_SIZE(code->co_freevars); 639 extras = code->co_stacksize + code->co_nlocals + ncells + 640 nfrees; 641 if (free_list == NULL) { 642 f = PyObject_GC_NewVar(PyFrameObject, &PyFrame_Type, 643 extras); 644 if (f == NULL) { 645 Py_DECREF(builtins); 646 return NULL; 647 } 648 } 649 else { 650 assert(numfree > 0); 651 --numfree; 652 f = free_list; 653 free_list = free_list->f_back; 654 if (Py_SIZE(f) < extras) { 655 f = PyObject_GC_Resize(PyFrameObject, f, extras); 656 if (f == NULL) { 657 Py_DECREF(builtins); 658 return NULL; 659 } 660 } 661 _Py_NewReference((PyObject *)f); 662 } 663 664 f->f_code = code; 665 extras = code->co_nlocals + ncells + nfrees; 666 f->f_valuestack = f->f_localsplus + extras; 667 for (i=0; i<extras; i++) 668 f->f_localsplus[i] = NULL; 669 f->f_locals = NULL; 670 f->f_trace = NULL; 671 f->f_exc_type = f->f_exc_value = f->f_exc_traceback = NULL; 672 } 673 f->f_stacktop = f->f_valuestack; 674 f->f_builtins = builtins; 675 Py_XINCREF(back); 676 f->f_back = back; 677 Py_INCREF(code); 678 Py_INCREF(globals); 679 f->f_globals = globals; 680 /* Most functions have CO_NEWLOCALS and CO_OPTIMIZED set. */ 681 if ((code->co_flags & (CO_NEWLOCALS | CO_OPTIMIZED)) == 682 (CO_NEWLOCALS | CO_OPTIMIZED)) 683 ; /* f_locals = NULL; will be set by PyFrame_FastToLocals() */ 684 else if (code->co_flags & CO_NEWLOCALS) { 685 locals = PyDict_New(); 686 if (locals == NULL) { 687 Py_DECREF(f); 688 return NULL; 689 } 690 f->f_locals = locals; 691 } 692 else { 693 if (locals == NULL) 694 locals = globals; 695 Py_INCREF(locals); 696 f->f_locals = locals; 697 } 698 f->f_tstate = tstate; 699 700 f->f_lasti = -1; 701 f->f_lineno = code->co_firstlineno; 702 f->f_iblock = 0; 703 704 _PyObject_GC_TRACK(f); 705 return f; 706} 707 708/* Block management */ 709 710void 711PyFrame_BlockSetup(PyFrameObject *f, int type, int handler, int level) 712{ 713 PyTryBlock *b; 714 if (f->f_iblock >= CO_MAXBLOCKS) 715 Py_FatalError("XXX block stack overflow"); 716 b = &f->f_blockstack[f->f_iblock++]; 717 b->b_type = type; 718 b->b_level = level; 719 b->b_handler = handler; 720} 721 722PyTryBlock * 723PyFrame_BlockPop(PyFrameObject *f) 724{ 725 PyTryBlock *b; 726 if (f->f_iblock <= 0) 727 Py_FatalError("XXX block stack underflow"); 728 b = &f->f_blockstack[--f->f_iblock]; 729 return b; 730} 731 732/* Convert between "fast" version of locals and dictionary version. 733 734 map and values are input arguments. map is a tuple of strings. 735 values is an array of PyObject*. At index i, map[i] is the name of 736 the variable with value values[i]. The function copies the first 737 nmap variable from map/values into dict. If values[i] is NULL, 738 the variable is deleted from dict. 739 740 If deref is true, then the values being copied are cell variables 741 and the value is extracted from the cell variable before being put 742 in dict. 743 744 Exceptions raised while modifying the dict are silently ignored, 745 because there is no good way to report them. 746 */ 747 748static void 749map_to_dict(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values, 750 int deref) 751{ 752 Py_ssize_t j; 753 assert(PyTuple_Check(map)); 754 assert(PyDict_Check(dict)); 755 assert(PyTuple_Size(map) >= nmap); 756 for (j = nmap; --j >= 0; ) { 757 PyObject *key = PyTuple_GET_ITEM(map, j); 758 PyObject *value = values[j]; 759 assert(PyUnicode_Check(key)); 760 if (deref) { 761 assert(PyCell_Check(value)); 762 value = PyCell_GET(value); 763 } 764 if (value == NULL) { 765 if (PyObject_DelItem(dict, key) != 0) 766 PyErr_Clear(); 767 } 768 else { 769 if (PyObject_SetItem(dict, key, value) != 0) 770 PyErr_Clear(); 771 } 772 } 773} 774 775/* Copy values from the "locals" dict into the fast locals. 776 777 dict is an input argument containing string keys representing 778 variables names and arbitrary PyObject* as values. 779 780 map and values are input arguments. map is a tuple of strings. 781 values is an array of PyObject*. At index i, map[i] is the name of 782 the variable with value values[i]. The function copies the first 783 nmap variable from map/values into dict. If values[i] is NULL, 784 the variable is deleted from dict. 785 786 If deref is true, then the values being copied are cell variables 787 and the value is extracted from the cell variable before being put 788 in dict. If clear is true, then variables in map but not in dict 789 are set to NULL in map; if clear is false, variables missing in 790 dict are ignored. 791 792 Exceptions raised while modifying the dict are silently ignored, 793 because there is no good way to report them. 794*/ 795 796static void 797dict_to_map(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values, 798 int deref, int clear) 799{ 800 Py_ssize_t j; 801 assert(PyTuple_Check(map)); 802 assert(PyDict_Check(dict)); 803 assert(PyTuple_Size(map) >= nmap); 804 for (j = nmap; --j >= 0; ) { 805 PyObject *key = PyTuple_GET_ITEM(map, j); 806 PyObject *value = PyObject_GetItem(dict, key); 807 assert(PyUnicode_Check(key)); 808 /* We only care about NULLs if clear is true. */ 809 if (value == NULL) { 810 PyErr_Clear(); 811 if (!clear) 812 continue; 813 } 814 if (deref) { 815 assert(PyCell_Check(values[j])); 816 if (PyCell_GET(values[j]) != value) { 817 if (PyCell_Set(values[j], value) < 0) 818 PyErr_Clear(); 819 } 820 } else if (values[j] != value) { 821 Py_XINCREF(value); 822 Py_XDECREF(values[j]); 823 values[j] = value; 824 } 825 Py_XDECREF(value); 826 } 827} 828 829void 830PyFrame_FastToLocals(PyFrameObject *f) 831{ 832 /* Merge fast locals into f->f_locals */ 833 PyObject *locals, *map; 834 PyObject **fast; 835 PyObject *error_type, *error_value, *error_traceback; 836 PyCodeObject *co; 837 Py_ssize_t j; 838 int ncells, nfreevars; 839 if (f == NULL) 840 return; 841 locals = f->f_locals; 842 if (locals == NULL) { 843 locals = f->f_locals = PyDict_New(); 844 if (locals == NULL) { 845 PyErr_Clear(); /* Can't report it :-( */ 846 return; 847 } 848 } 849 co = f->f_code; 850 map = co->co_varnames; 851 if (!PyTuple_Check(map)) 852 return; 853 PyErr_Fetch(&error_type, &error_value, &error_traceback); 854 fast = f->f_localsplus; 855 j = PyTuple_GET_SIZE(map); 856 if (j > co->co_nlocals) 857 j = co->co_nlocals; 858 if (co->co_nlocals) 859 map_to_dict(map, j, locals, fast, 0); 860 ncells = PyTuple_GET_SIZE(co->co_cellvars); 861 nfreevars = PyTuple_GET_SIZE(co->co_freevars); 862 if (ncells || nfreevars) { 863 map_to_dict(co->co_cellvars, ncells, 864 locals, fast + co->co_nlocals, 1); 865 /* If the namespace is unoptimized, then one of the 866 following cases applies: 867 1. It does not contain free variables, because it 868 uses import * or is a top-level namespace. 869 2. It is a class namespace. 870 We don't want to accidentally copy free variables 871 into the locals dict used by the class. 872 */ 873 if (co->co_flags & CO_OPTIMIZED) { 874 map_to_dict(co->co_freevars, nfreevars, 875 locals, fast + co->co_nlocals + ncells, 1); 876 } 877 } 878 PyErr_Restore(error_type, error_value, error_traceback); 879} 880 881void 882PyFrame_LocalsToFast(PyFrameObject *f, int clear) 883{ 884 /* Merge f->f_locals into fast locals */ 885 PyObject *locals, *map; 886 PyObject **fast; 887 PyObject *error_type, *error_value, *error_traceback; 888 PyCodeObject *co; 889 Py_ssize_t j; 890 int ncells, nfreevars; 891 if (f == NULL) 892 return; 893 locals = f->f_locals; 894 co = f->f_code; 895 map = co->co_varnames; 896 if (locals == NULL) 897 return; 898 if (!PyTuple_Check(map)) 899 return; 900 PyErr_Fetch(&error_type, &error_value, &error_traceback); 901 fast = f->f_localsplus; 902 j = PyTuple_GET_SIZE(map); 903 if (j > co->co_nlocals) 904 j = co->co_nlocals; 905 if (co->co_nlocals) 906 dict_to_map(co->co_varnames, j, locals, fast, 0, clear); 907 ncells = PyTuple_GET_SIZE(co->co_cellvars); 908 nfreevars = PyTuple_GET_SIZE(co->co_freevars); 909 if (ncells || nfreevars) { 910 dict_to_map(co->co_cellvars, ncells, 911 locals, fast + co->co_nlocals, 1, clear); 912 /* Same test as in PyFrame_FastToLocals() above. */ 913 if (co->co_flags & CO_OPTIMIZED) { 914 dict_to_map(co->co_freevars, nfreevars, 915 locals, fast + co->co_nlocals + ncells, 1, 916 clear); 917 } 918 } 919 PyErr_Restore(error_type, error_value, error_traceback); 920} 921 922/* Clear out the free list */ 923int 924PyFrame_ClearFreeList(void) 925{ 926 int freelist_size = numfree; 927 928 while (free_list != NULL) { 929 PyFrameObject *f = free_list; 930 free_list = free_list->f_back; 931 PyObject_GC_Del(f); 932 --numfree; 933 } 934 assert(numfree == 0); 935 return freelist_size; 936} 937 938void 939PyFrame_Fini(void) 940{ 941 (void)PyFrame_ClearFreeList(); 942 Py_XDECREF(builtin_object); 943 builtin_object = NULL; 944} 945