static int _opcode_stack_effect_impl(PyModuleDef *module, int opcode, PyObject *oparg) /*[clinic end generated code: output=9e1133f8d587bc67 input=2d0a9ee53c0418f5]*/ { int effect; int oparg_int = 0; if (HAS_ARG(opcode)) { if (oparg == Py_None) { PyErr_SetString(PyExc_ValueError, "stack_effect: opcode requires oparg but oparg was not specified"); return -1; } oparg_int = (int)PyLong_AsLong(oparg); if ((oparg_int == -1) && PyErr_Occurred()) return -1; } else if (oparg != Py_None) { PyErr_SetString(PyExc_ValueError, "stack_effect: opcode does not permit oparg but oparg was specified"); return -1; } effect = PyCompile_OpcodeStackEffect(opcode, oparg_int); if (effect == PY_INVALID_STACK_EFFECT) { PyErr_SetString(PyExc_ValueError, "invalid opcode or oparg"); return -1; } return effect; }
static PyObject * optimize_code(PyObject *code, PyObject* consts) { int i, j, codelen; int tgt, tgttgt, opcode; unsigned char *codestr; /* Make a modifiable copy of the code string */ if (!PyString_Check(code)) goto exitUnchanged; codelen = PyString_Size(code); codestr = PyMem_Malloc(codelen); if (codestr == NULL) goto exitUnchanged; codestr = memcpy(codestr, PyString_AS_STRING(code), codelen); assert(PyTuple_Check(consts)); for (i=0 ; i<codelen-7 ; i += HAS_ARG(codestr[i]) ? 3 : 1) { opcode = codestr[i]; switch (opcode) { /* Skip over LOAD_CONST trueconst JUMP_IF_FALSE xx POP_TOP. Note, only the first opcode is changed, the others still perform normally if they happen to be jump targets. */ case LOAD_CONST: j = GETARG(codestr, i); if (codestr[i+3] != JUMP_IF_FALSE || codestr[i+6] != POP_TOP || !PyObject_IsTrue(PyTuple_GET_ITEM(consts, j))) continue; codestr[i] = JUMP_FORWARD; SETARG(codestr, i, 4); break; /* Replace jumps to unconditional jumps */ case FOR_ITER: case JUMP_FORWARD: case JUMP_IF_FALSE: case JUMP_IF_TRUE: case JUMP_ABSOLUTE: case CONTINUE_LOOP: case SETUP_LOOP: case SETUP_EXCEPT: case SETUP_FINALLY: tgt = GETJUMPTGT(codestr, i); if (!UNCONDITIONAL_JUMP(codestr[tgt])) continue; tgttgt = GETJUMPTGT(codestr, tgt); if (opcode == JUMP_FORWARD) /* JMP_ABS can go backwards */ opcode = JUMP_ABSOLUTE; if (!ABSOLUTE_JUMP(opcode)) tgttgt -= i + 3; /* Calc relative jump addr */ if (tgttgt < 0) /* No backward relative jumps */ continue; codestr[i] = opcode; SETARG(codestr, i, tgttgt); break; case EXTENDED_ARG: PyMem_Free(codestr); goto exitUnchanged; } } code = PyString_FromStringAndSize((char *)codestr, codelen); PyMem_Free(codestr); return code; exitUnchanged: Py_INCREF(code); return code; }
static PyObject * eval_frame(PyFrameObject *f) { LOG("> eval_frame\n"); { PyObject **stack_pointer; /* Next free slot in value stack */ register unsigned char *next_instr; register int opcode=0; /* Current opcode */ register int oparg=0; /* Current opcode argument, if any */ register enum why_code why; /* Reason for block stack unwind */ register int err; /* Error status -- nonzero if error */ register PyObject *x; /* Result object -- NULL if error */ register PyObject *t, *u, *v; /* Temporary objects popped off stack */ register PyObject *w; register PyObject **fastlocals, **freevars; PyObject *retval = NULL; /* Return value */ PyThreadState *tstate = PyThreadState_GET(); PyCodeObject *co; unsigned char *first_instr; PyObject *names; PyObject *consts; /* Tuple access macros */ #define GETITEM(v, i) PyTuple_GET_ITEM((PyTupleObject *)(v), (i)) /* Code access macros */ #define INSTR_OFFSET() (next_instr - first_instr) #define NEXTOP() (*next_instr++) #define NEXTARG() (next_instr += 2, (next_instr[-1]<<8) + next_instr[-2]) #define JUMPTO(x) (next_instr = first_instr + (x)) #define JUMPBY(x) (next_instr += (x)) /* OpCode prediction macros Some opcodes tend to come in pairs thus making it possible to predict the second code when the first is run. For example, COMPARE_OP is often followed by JUMP_IF_FALSE or JUMP_IF_TRUE. And, those opcodes are often followed by a POP_TOP. Verifying the prediction costs a single high-speed test of register variable against a constant. If the pairing was good, then the processor has a high likelihood of making its own successful branch prediction which results in a nearly zero overhead transition to the next opcode. A successful prediction saves a trip through the eval-loop including its two unpredictable branches, the HASARG test and the switch-case. */ #define PREDICT(op) if (*next_instr == op) goto PRED_##op #define PREDICTED(op) PRED_##op: next_instr++ #define PREDICTED_WITH_ARG(op) PRED_##op: oparg = (next_instr[2]<<8) + \ next_instr[1]; next_instr += 3 /* Stack manipulation macros */ #define STACK_LEVEL() (stack_pointer - f->f_valuestack) #define EMPTY() (STACK_LEVEL() == 0) #define TOP() (stack_pointer[-1]) #define SECOND() (stack_pointer[-2]) #define THIRD() (stack_pointer[-3]) #define FOURTH() (stack_pointer[-4]) #define SET_TOP(v) (stack_pointer[-1] = (v)) #define SET_SECOND(v) (stack_pointer[-2] = (v)) #define SET_THIRD(v) (stack_pointer[-3] = (v)) #define SET_FOURTH(v) (stack_pointer[-4] = (v)) #define BASIC_STACKADJ(n) (stack_pointer += n) #define BASIC_PUSH(v) (*stack_pointer++ = (v)) #define BASIC_POP() (*--stack_pointer) #define PUSH(v) BASIC_PUSH(v) #define POP() BASIC_POP() #define STACKADJ(n) BASIC_STACKADJ(n) /* Local variable macros */ #define GETLOCAL(i) (fastlocals[i]) /* The SETLOCAL() macro must not DECREF the local variable in-place and then store the new value; it must copy the old value to a temporary value, then store the new value, and then DECREF the temporary value. This is because it is possible that during the DECREF the frame is accessed by other code (e.g. a __del__ method or gc.collect()) and the variable would be pointing to already-freed memory. */ #define SETLOCAL(i, value) do { PyObject *tmp = GETLOCAL(i); \ GETLOCAL(i) = value; \ Py_XDECREF(tmp); } while (0) /* Start of code */ if (f == NULL) return NULL; /* push frame */ if (++tstate->recursion_depth > recursion_limit) { --tstate->recursion_depth; /* ERROR */ tstate->frame = f->f_back; return NULL; } tstate->frame = f; /* tracing elided */ co = f->f_code; names = co->co_names; consts = co->co_consts; fastlocals = f->f_localsplus; freevars = f->f_localsplus + f->f_nlocals; _PyCode_GETCODEPTR(co, &first_instr); /* An explanation is in order for the next line. f->f_lasti now refers to the index of the last instruction executed. You might think this was obvious from the name, but this wasn't always true before 2.3! PyFrame_New now sets f->f_lasti to -1 (i.e. the index *before* the first instruction) and YIELD_VALUE doesn't fiddle with f_lasti any more. So this does work. Promise. */ next_instr = first_instr + f->f_lasti + 1; stack_pointer = f->f_stacktop; f->f_stacktop = NULL; /* remains NULL unless yield suspends frame */ why = WHY_NOT; err = 0; x = Py_None; /* Not a reference, just anything non-NULL */ w = NULL; for (;;) { /* Do periodic things. Doing this every time through the loop would add too much overhead, so we do it only every Nth instruction. We also do it if ``things_to_do'' is set, i.e. when an asynchronous event needs attention (e.g. a signal handler or async I/O handler); see Py_AddPendingCall() and Py_MakePendingCalls() above. */ if (--_Py_Ticker < 0) { /* @@@ check for SETUP_FINALLY elided */ _Py_Ticker = _Py_CheckInterval; tstate->tick_counter++; if (things_to_do) { if (Py_MakePendingCalls() < 0) { why = WHY_EXCEPTION; goto on_error; } } } fast_next_opcode: f->f_lasti = INSTR_OFFSET(); /* Extract opcode and argument */ opcode = NEXTOP(); if (HAS_ARG(opcode)) oparg = NEXTARG(); /* Main switch on opcode */ switch (opcode) { /* BEWARE! It is essential that any operation that fails sets either x to NULL, err to nonzero, or why to anything but WHY_NOT, and that no operation that succeeds does this! */ /* case STOP_CODE: this is an error! */ case LOAD_FAST: x = GETLOCAL(oparg); if (x != NULL) { Py_INCREF(x); PUSH(x); goto fast_next_opcode; } /* ERROR? */ break; case STORE_FAST: v = POP(); SETLOCAL(oparg, v); continue; case LOAD_CONST: x = GETITEM(consts, oparg); Py_INCREF(x); PUSH(x); goto fast_next_opcode; PREDICTED(POP_TOP); case POP_TOP: v = POP(); Py_DECREF(v); goto fast_next_opcode; case UNARY_NOT: v = TOP(); err = PyObject_IsTrue(v); Py_DECREF(v); if (err == 0) { Py_INCREF(Py_True); SET_TOP(Py_True); continue; } else if (err > 0) { Py_INCREF(Py_False); SET_TOP(Py_False); err = 0; continue; } STACKADJ(-1); break; case BINARY_MODULO: w = POP(); v = TOP(); x = PyNumber_Remainder(v, w); Py_DECREF(v); Py_DECREF(w); SET_TOP(x); if (x != NULL) continue; break; case BINARY_ADD: w = POP(); v = TOP(); if (PyInt_CheckExact(v) && PyInt_CheckExact(w)) { /* INLINE: int + int */ register long a, b, i; a = PyInt_AS_LONG(v); b = PyInt_AS_LONG(w); i = a + b; if ((i^a) < 0 && (i^b) < 0) goto slow_add; x = PyInt_FromLong(i); } else { slow_add: Py_FatalError("slow add not supported."); } Py_DECREF(v); Py_DECREF(w); SET_TOP(x); if (x != NULL) continue; break; case STORE_SLICE+0: case STORE_SLICE+1: case STORE_SLICE+2: case STORE_SLICE+3: if ((opcode-STORE_SLICE) & 2) w = POP(); else w = NULL; if ((opcode-STORE_SLICE) & 1) v = POP(); else v = NULL; u = POP(); t = POP(); err = assign_slice(u, v, w, t); /* u[v:w] = t */ Py_DECREF(t); Py_DECREF(u); Py_XDECREF(v); Py_XDECREF(w); if (err == 0) continue; break; case STORE_SUBSCR: w = POP(); v = POP(); u = POP(); /* v[w] = u */ err = PyObject_SetItem(v, w, u); Py_DECREF(u); Py_DECREF(v); Py_DECREF(w); if (err == 0) continue; break; case BINARY_SUBSCR: w = POP(); v = TOP(); if (PyList_CheckExact(v) && PyInt_CheckExact(w)) { /* INLINE: list[int] */ long i = PyInt_AsLong(w); if (i < 0) i += PyList_GET_SIZE(v); if (i < 0 || i >= PyList_GET_SIZE(v)) { /* ERROR */ printf("list index out of range\n"); x = NULL; } else { x = PyList_GET_ITEM(v, i); Py_INCREF(x); } } else x = PyObject_GetItem(v, w); Py_DECREF(v); Py_DECREF(w); SET_TOP(x); if (x != NULL) continue; break; case BINARY_AND: w = POP(); v = TOP(); x = PyNumber_And(v, w); Py_DECREF(v); Py_DECREF(w); SET_TOP(x); if (x != NULL) continue; break; case PRINT_ITEM: v = POP(); PyObject_Print(v); Py_DECREF(v); break; case PRINT_NEWLINE: printf("\n"); break; case RETURN_VALUE: retval = POP(); why = WHY_RETURN; break; case POP_BLOCK: { PyTryBlock *b = PyFrame_BlockPop(f); while (STACK_LEVEL() > b->b_level) { v = POP(); Py_DECREF(v); } } break; case STORE_NAME: w = GETITEM(names, oparg); v = POP(); if ((x = f->f_locals) == NULL) { /* ERROR */ printf("STORE_NAME ERROR\n"); break; } err = PyDict_SetItem(x, w, v); Py_DECREF(v); break; case LOAD_NAME: w = GETITEM(names, oparg); if ((x = f->f_locals) == NULL) { /* ERROR */ printf("LOAD_NAME ERROR\n"); break; } x = PyDict_GetItem(x, w); if (x == NULL) { x = PyDict_GetItem(f->f_globals, w); if (x == NULL) { x = PyDict_GetItem(f->f_builtins, w); if (x == NULL) { printf("can't find %s\n", ((PyStringObject *)w)->ob_sval); /* format_exc_check_arg */ break; } } } Py_INCREF(x); PUSH(x); break; case LOAD_GLOBAL: w = GETITEM(names, oparg); if (PyString_CheckExact(w)) { /* Inline the PyDict_GetItem() calls. WARNING: this is an extreme speed hack. Do not try this at home. */ long hash = ((PyStringObject *)w)->ob_shash; if (hash != -1) { PyDictObject *d; d = (PyDictObject *)(f->f_globals); x = d->ma_lookup(d, w, hash)->me_value; if (x != NULL) { Py_INCREF(x); PUSH(x); continue; } d = (PyDictObject *)(f->f_builtins); x = d->ma_lookup(d, w, hash)->me_value; if (x != NULL) { Py_INCREF(x); PUSH(x); continue; } goto load_global_error; } } /* This is the un-inlined version of the code above */ x = PyDict_GetItem(f->f_globals, w); if (x == NULL) { x = PyDict_GetItem(f->f_builtins, w); if (x == NULL) { load_global_error: printf("LOAD_GLOBAL ERROR %s", ((PyStringObject *)w)->ob_sval); break; } } Py_INCREF(x); PUSH(x); break; case LOAD_ATTR: w = GETITEM(names, oparg); v = TOP(); x = PyObject_GetAttr(v, w); Py_DECREF(v); SET_TOP(x); if (x != NULL) continue; break; case IMPORT_NAME: w = GETITEM(names, oparg); x = PyDict_GetItemString(f->f_builtins, "__import__"); if (x == NULL) { printf("__import__ not found"); break; } u = TOP(); w = Py_BuildValue("(O)", w); Py_DECREF(u); if (w == NULL) { u = POP(); x = NULL; break; } x = PyEval_CallObject(x, w); Py_DECREF(w); SET_TOP(x); if (x != NULL) continue; break; case JUMP_FORWARD: JUMPBY(oparg); goto fast_next_opcode; PREDICTED_WITH_ARG(JUMP_IF_FALSE); case JUMP_IF_FALSE: w = TOP(); if (w == Py_True) { PREDICT(POP_TOP); goto fast_next_opcode; } if (w == Py_False) { JUMPBY(oparg); goto fast_next_opcode; } err = PyObject_IsTrue(w); if (err > 0) err = 0; else if (err == 0) JUMPBY(oparg); else break; continue; case JUMP_ABSOLUTE: JUMPTO(oparg); continue; case SETUP_LOOP: PyFrame_BlockSetup(f, opcode, INSTR_OFFSET() + oparg, STACK_LEVEL()); continue; case CALL_FUNCTION: x = call_function(&stack_pointer, oparg); PUSH(x); if (x != NULL) continue; break; case MAKE_FUNCTION: v = POP(); /* code object */ x = PyFunction_New(v, f->f_globals); Py_DECREF(v); /* XXX Maybe this should be a separate opcode? */ if (x != NULL && oparg > 0) { v = PyTuple_New(oparg); if (v == NULL) { Py_DECREF(x); x = NULL; break; } while (--oparg >= 0) { w = POP(); PyTuple_SET_ITEM(v, oparg, w); } err = PyFunction_SetDefaults(x, v); Py_DECREF(v); } PUSH(x); break; case SET_LINENO: break; default: printf("opcode: %d\n", opcode); Py_FatalError("unknown opcode"); } /* switch */ on_error: if (why == WHY_NOT) { if (err == 0 && x != NULL) { continue; /* Normal, fast path */ } why = WHY_EXCEPTION; x = Py_None; err = 0; } /* End the loop if we still have an error (or return) */ if (why != WHY_NOT) break; } /* main loop */ if (why != WHY_YIELD) { /* Pop remaining stack entries -- but when yielding */ while (!EMPTY()) { v = POP(); Py_XDECREF(v); } } if (why != WHY_RETURN && why != WHY_YIELD) retval = NULL; /* pop frame */ --tstate->recursion_depth; tstate->frame = f->f_back; return retval; }}
//============================================================================= // METHOD : SPELLbytecode::analyze //============================================================================= void SPELLbytecode::analyze() { assert( m_code != NULL ); // Pointer to initial instruction (used by macros) unsigned char* first_instr = (unsigned char*) PyString_AS_STRING(m_code->co_code); // Pointer to current instruction (used by macros) register unsigned char* next_instr = first_instr; // Opcode argument unsigned int oparg; // Stores the previous line unsigned int prevLine = 0; // Stores the last opcode unsigned int prevOpCode = 0; // Opcode count in line unsigned short opcodeCount = 0; // Will be true when there is no more bytecode to process bool finished = false; unsigned int callDepth = 0; // Try block structure TryBlock tb; tb.try_lineno = 0; tb.end_try_lineno = 0; tb.except_lineno = 0; tb.end_except_lineno = 0; tb.end_lineno = 0; // Holds the bytecode offset for except and finally statements unsigned int except_offset = 0; unsigned int finally_offset = 0; while(not finished) { // Create one BLine info structure per bytecode instruction BLine info; // Get the instruction offset info.offset = INSTR_OFFSET(); // Get the corresponding script line info.lineno = PyCode_Addr2Line(m_code, info.offset); // Obtain the opcode info.opcode = NEXTOP(); // Track the number of opcodes per line in the lnotab. if ((prevLine>0) && (info.lineno != prevLine)) { opcodeCount = 0; } else { opcodeCount++; } ////////////////////////////////////////////////////////////////////////////////// // PHASE 1 - UPDATE PREVIOUS LINE INFORMATION (ALREADY STORED) IF NEEDED ////////////////////////////////////////////////////////////////////////////////// // #1 Detect binary add. This helps us detect lines that shall be executed together // like when statements spread over several lines with binary add (+\) if ((opcodeCount == 0)&&(prevOpCode == BINARY_ADD)&&(info.opcode==LOAD_CONST)) { LineList::iterator it = m_lines.end(); it--; BLine prev = *it; m_lines.pop_back(); prev.keepWithNext = true; m_lines.push_back(prev); } ////////////////////////////////////////////////////////////////////////////////// // #2 Special checks for return statements: we may need to update the previous line if (info.opcode == RETURN_VALUE && ((prevOpCode == LOAD_FAST || prevOpCode == LOAD_CONST))) { LineList::iterator it = m_lines.end(); it--; BLine prev = *it; m_lines.pop_back(); prev.returnConst = true; m_lines.push_back(prev); } // We will ignore this, for the moment oparg = 0; // To decide wether store the bline information or not bool storeit = false; if (HAS_ARG(info.opcode)) oparg = NEXTARG(); ////////////////////////////////////////////////////////////////////////////////// // PHASE 2 - BUILD AND STORE NEXT BLINE INFORMATION ////////////////////////////////////////////////////////////////////////////////// if (( prevLine > 0 ) && ( info.lineno != prevLine )) { // Default values info.executable = false; info.returnConst = false; info.keepWithNext = false; /** \todo // Depending on the bytecode, either set the block as active, // or finish the loop (RETURN_VALUE is found at the end of the script) // This is maybe wrong, need to check if RETURN_VALUE is found // in function code objects, probably yes... */ switch(info.opcode) { case LOAD_NAME: case LOAD_GLOBAL: case LOAD_CONST: callDepth++; info.executable = true; break; case CALL_FUNCTION: callDepth--; info.executable = true; break; case STORE_NAME: case IMPORT_NAME: case JUMP_FORWARD: case JUMP_IF_FALSE: case JUMP_IF_TRUE: case JUMP_ABSOLUTE: case RETURN_VALUE: { info.executable = (callDepth==0); break; } default: info.executable = true; break; } // Store the info m_lines.push_back(info); } // The very first line is always executable, and needs to be stored explicitly else if (prevLine == 0) { info.executable = true; // Store the info m_lines.push_back(info); } ////////////////////////////////////////////////////////////////////////////////// // PHASE 3 - ADDITIONAL INFORMATION FOR TRY BLOCKS, LAST ADDRESS, LAST LINE ////////////////////////////////////////////////////////////////////////////////// switch(info.opcode) { case RETURN_VALUE: m_lastAddr = info.offset; finished = true; break; case SETUP_EXCEPT: tb.try_lineno = info.lineno; except_offset = info.offset + oparg + 3; // This is the real destination offset break; case SETUP_FINALLY: finally_offset = info.offset + oparg + 3; // This is the 'finally' destination offset break; case END_FINALLY: if (tb.try_lineno != 0 && tb.end_lineno == 0) { tb.end_lineno = info.lineno; tb.end_except_lineno = info.lineno; m_tryBlocks.push_back( tb ); tb.try_lineno = 0; tb.end_try_lineno = 0; tb.except_lineno = 0; tb.end_except_lineno = 0; tb.end_lineno = 0; } else { // Update the last try block to update it with the finally block TryBlockList::iterator it = m_tryBlocks.end(); it--; TryBlock prev = *it; m_tryBlocks.pop_back(); prev.end_except_lineno = prev.end_lineno; prev.end_lineno = info.lineno; m_tryBlocks.push_back( prev ); } break; } // This should always happen between a SETUP_EXCEPT and an END_FINALLY if ( (tb.try_lineno >0) && (except_offset == info.offset)) { // The last line before the except tb.end_try_lineno = prevLine; // The line of the except tb.except_lineno = info.lineno; } prevLine = info.lineno; prevOpCode = info.opcode; m_lastLine = info.lineno; } }
static PyObject * eval_frame(PyFrameObject *f) { LOG("> eval_frame\n"); PyObject **stack_pointer; /* Next free slot in value stack */ register unsigned char *next_instr; register int opcode=0; /* Current opcode */ register int oparg=0; /* Current opcode argument, if any */ register enum why_code why; /* Reason for block stack unwind */ register int err; /* Error status -- nonzero if error */ register PyObject *x; /* Result object -- NULL if error */ register PyObject *v; /* Temporary objects popped off stack */ register PyObject *w; register PyObject **fastlocals, **freevars; PyObject *retval = NULL; /* Return value */ PyThreadState *tstate = PyThreadState_GET(); PyCodeObject *co; unsigned char *first_instr; PyObject *names; PyObject *consts; /* Tuple access macros */ #define GETITEM(v, i) PyTuple_GET_ITEM((PyTupleObject *)(v), (i)) /* Code access macros */ #define INSTR_OFFSET() (next_instr - first_instr) #define NEXTOP() (*next_instr++) #define NEXTARG() (next_instr += 2, (next_instr[-1]<<8) + next_instr[-2]) #define JUMPTO(x) (next_instr = first_instr + (x)) #define JUMPBY(x) (next_instr += (x)) /* OpCode prediction macros Some opcodes tend to come in pairs thus making it possible to predict the second code when the first is run. For example, COMPARE_OP is often followed by JUMP_IF_FALSE or JUMP_IF_TRUE. And, those opcodes are often followed by a POP_TOP. Verifying the prediction costs a single high-speed test of register variable against a constant. If the pairing was good, then the processor has a high likelihood of making its own successful branch prediction which results in a nearly zero overhead transition to the next opcode. A successful prediction saves a trip through the eval-loop including its two unpredictable branches, the HASARG test and the switch-case. */ #define PREDICT(op) if (*next_instr == op) goto PRED_##op #define PREDICTED(op) PRED_##op: next_instr++ #define PREDICTED_WITH_ARG(op) PRED_##op: oparg = (next_instr[2]<<8) + \ next_instr[1]; next_instr += 3 /* Stack manipulation macros */ #define STACK_LEVEL() (stack_pointer - f->f_valuestack) #define EMPTY() (STACK_LEVEL() == 0) #define TOP() (stack_pointer[-1]) #define SECOND() (stack_pointer[-2]) #define THIRD() (stack_pointer[-3]) #define FOURTH() (stack_pointer[-4]) #define SET_TOP(v) (stack_pointer[-1] = (v)) #define SET_SECOND(v) (stack_pointer[-2] = (v)) #define SET_THIRD(v) (stack_pointer[-3] = (v)) #define SET_FOURTH(v) (stack_pointer[-4] = (v)) #define BASIC_STACKADJ(n) (stack_pointer += n) #define BASIC_PUSH(v) (*stack_pointer++ = (v)) #define BASIC_POP() (*--stack_pointer) #define PUSH(v) BASIC_PUSH(v) #define POP() BASIC_POP() #define STACKADJ(n) BASIC_STACKADJ(n) /* Local variable macros */ #define GETLOCAL(i) (fastlocals[i]) /* The SETLOCAL() macro must not DECREF the local variable in-place and then store the new value; it must copy the old value to a temporary value, then store the new value, and then DECREF the temporary value. This is because it is possible that during the DECREF the frame is accessed by other code (e.g. a __del__ method or gc.collect()) and the variable would be pointing to already-freed memory. */ #define SETLOCAL(i, value) do { PyObject *tmp = GETLOCAL(i); \ GETLOCAL(i) = value; \ Py_XDECREF(tmp); } while (0) /* Start of code */ if (f == NULL) return NULL; /* push frame */ if (++tstate->recursion_depth > recursion_limit) { --tstate->recursion_depth; /* ERROR */ tstate->frame = f->f_back; return NULL; } tstate->frame = f; /* tracing elided */ co = f->f_code; names = co->co_names; consts = co->co_consts; fastlocals = f->f_localsplus; freevars = f->f_localsplus + f->f_nlocals; _PyCode_GETCODEPTR(co, &first_instr); /* An explanation is in order for the next line. f->f_lasti now refers to the index of the last instruction executed. You might think this was obvious from the name, but this wasn't always true before 2.3! PyFrame_New now sets f->f_lasti to -1 (i.e. the index *before* the first instruction) and YIELD_VALUE doesn't fiddle with f_lasti any more. So this does work. Promise. */ next_instr = first_instr + f->f_lasti + 1; stack_pointer = f->f_stacktop; f->f_stacktop = NULL; /* remains NULL unless yield suspends frame */ why = WHY_NOT; err = 0; x = Py_None; /* Not a reference, just anything non-NULL */ w = NULL; for (;;) { /* @@@ pending calls elided */ fast_next_opcode: f->f_lasti = INSTR_OFFSET(); /* Extract opcode and argument */ opcode = NEXTOP(); if (HAS_ARG(opcode)) oparg = NEXTARG(); /* Main switch on opcode */ switch (opcode) { /* BEWARE! It is essential that any operation that fails sets either x to NULL, err to nonzero, or why to anything but WHY_NOT, and that no operation that succeeds does this! */ /* case STOP_CODE: this is an error! */ case LOAD_CONST: x = GETITEM(consts, oparg); Py_INCREF(x); PUSH(x); goto fast_next_opcode; case BINARY_ADD: w = POP(); v = TOP(); if (PyInt_CheckExact(v) && PyInt_CheckExact(w)) { /* INLINE: int + int */ register long a, b, i; a = PyInt_AS_LONG(v); b = PyInt_AS_LONG(w); i = a + b; if ((i^a) < 0 && (i^b) < 0) goto slow_add; x = PyInt_FromLong(i); } else { slow_add: Py_FatalError("slow add not supported."); } Py_DECREF(v); Py_DECREF(w); SET_TOP(x); if (x != NULL) continue; break; case PRINT_ITEM: v = POP(); PyObject_Print(v); Py_DECREF(v); break; case PRINT_NEWLINE: printf("\n"); break; case RETURN_VALUE: retval = POP(); why = WHY_RETURN; break; default: Py_FatalError("unknown opcode"); } /* switch */ if (why == WHY_NOT) { if (err == 0 && x != NULL) { continue; /* Normal, fast path */ } why = WHY_EXCEPTION; x = Py_None; err = 0; } /* End the loop if we still have an error (or return) */ if (why != WHY_NOT) break; } /* main loop */ if (why != WHY_YIELD) { /* Pop remaining stack entries -- but when yielding */ while (!EMPTY()) { v = POP(); Py_XDECREF(v); } } if (why != WHY_RETURN && why != WHY_YIELD) retval = NULL; /* pop frame */ --tstate->recursion_depth; tstate->frame = f->f_back; return retval; }