int StudyPattern(WORD *lhs) { GETIDENTITY WORD *fullproto, *pat, *p, *p1, *p2, *pstop, *info, f, nn; int numfun = 0, numsym = 0, allwilds = 0, i, j, k, nc; FUN_INFO *finf, *fmin, *f1, *f2, funscratch; fullproto = lhs + IDHEAD; /* if ( *lhs == TYPEIF ) fullproto--; */ pat = fullproto + fullproto[1]; info = pat + *pat; p = pat + 1; while ( p < info ) { if ( *p >= FUNCTION ) { numfun++; nn = *p - FUNCTION; if ( nn >= WILDOFFSET ) nn -= WILDOFFSET; /* We check here for cases that are not allowed like ?a inside symmetric functions or tensors. */ if ( ( functions[nn].symmetric == SYMMETRIC ) || ( functions[nn].symmetric == ANTISYMMETRIC ) ) { p2 = p+p[1]; p1 = p+FUNHEAD; if ( functions[nn].spec ) { while ( p1 < p2 ) { if ( *p1 == FUNNYWILD ) { MesPrint("&Argument field wildcards are not allowed inside (anti)symmetric functions or tensors"); return(1); } p1++; } } else { while ( p1 < p2 ) { if ( *p1 == -ARGWILD ) { MesPrint("&Argument field wildcards are not allowed inside (anti)symmetric functions or tensors"); return(1); } NEXTARG(p1); } } } } p += p[1]; } if ( numfun == 0 ) return(0); if ( ( lhs[2] & SUBMASK ) == SUBALL ) { p = pat + 1; while ( p < info ) { if ( *p == SYMBOL || *p == VECTOR || *p == DOTPRODUCT || *p == INDEX ) { MesPrint("&id,all can have only functions and/or tensors in the lhs."); return(1); } p += p[1]; } } /* We need now some room for the information about the functions */ if ( numfun > AN.numfuninfo ) { if ( AN.FunInfo ) M_free(AN.FunInfo,"funinfo"); AN.numfuninfo = numfun + 10; AN.FunInfo = (FUN_INFO *)Malloc1(AN.numfuninfo*sizeof(FUN_INFO),"funinfo"); } /* Now collect the information. First the locations. */ p = pat + 1; i = 0; while ( p < info ) { if ( *p >= FUNCTION ) AN.FunInfo[i++].location = p; p += p[1]; } for ( i = 0, finf = AN.FunInfo; i < numfun; i++, finf++ ) { p = finf->location; pstop = p + p[1]; f = *p; if ( f > FUNCTION+WILDOFFSET ) f -= WILDOFFSET; finf->numargs = finf->numfunnies = finf->numwildcards = 0; finf->symmet = functions[f-FUNCTION].symmetric; finf->tensor = functions[f-FUNCTION].spec; finf->commute = functions[f-FUNCTION].commute; if ( finf->tensor >= TENSORFUNCTION ) { p += FUNHEAD; while ( p < pstop ) { if ( *p == FUNNYWILD ) { finf->numfunnies++; p+= 2; continue; } else if ( *p < 0 ) { if ( *p >= AM.OffsetVector + WILDOFFSET && *p < MINSPEC ) { finf->numwildcards++; } } else { if ( *p >= AM.OffsetIndex + WILDOFFSET && *p <= AM.OffsetIndex + 2*WILDOFFSET ) finf->numwildcards++; } finf->numargs++; p++; } } else { p += FUNHEAD; while ( p < pstop ) { if ( *p > 0 ) { finf->numargs++; p += *p; continue; } if ( *p <= -FUNCTION ) { if ( *p <= -FUNCTION - WILDOFFSET ) finf->numwildcards++; p++; } else if ( *p == -SYMBOL ) { if ( p[1] >= 2*MAXPOWER ) finf->numwildcards++; p += 2; } else if ( *p == -INDEX ) { if ( p[1] >= AM.OffsetIndex + WILDOFFSET && p[1] <= AM.OffsetIndex + 2*WILDOFFSET ) finf->numwildcards++; p += 2; } else if ( *p == -VECTOR || *p == -MINVECTOR ) { if ( p[1] >= AM.OffsetVector + WILDOFFSET && p[1] < MINSPEC ) { finf->numwildcards++; } p += 2; } else if ( *p == -ARGWILD ) { finf->numfunnies++; p += 2; } else { p += 2; } finf->numargs++; } } if ( finf->symmet ) { numsym++; allwilds += finf->numwildcards + finf->numfunnies; } } if ( numsym == 0 ) return(0); if ( allwilds == 0 ) return(0); /* We have the information in the array AN.FunInfo. We sort things and then write the sorted pattern. Of course we may not play with the order of the noncommuting functions. Of course we have to become even smarter in the future and look during the sorting which wildcards are asigned when. But for now this should do. */ for ( nc = numfun-1; nc >= 0; nc-- ) { if ( AN.FunInfo[nc].commute ) break; } finf = AN.FunInfo; for ( i = nc+2; i < numfun; i++ ) { fmin = finf; finf++; if ( ( finf->symmet < fmin->symmet ) || ( ( finf->symmet == fmin->symmet ) && ( ( finf->numwildcards+finf->numfunnies < fmin->numwildcards+fmin->numfunnies ) || ( ( finf->numwildcards+finf->numfunnies == fmin->numwildcards+fmin->numfunnies ) && ( finf->numwildcards < fmin->numfunnies ) ) ) ) ) { funscratch = AN.FunInfo[i]; AN.FunInfo[i] = AN.FunInfo[i-1]; AN.FunInfo[i-1] = funscratch; for ( j = i-1; j > nc && j > 0; j-- ) { f1 = AN.FunInfo+j; f2 = f1-1; if ( ( f1->symmet < f2->symmet ) || ( ( f1->symmet == f2->symmet ) && ( ( f1->numwildcards+f1->numfunnies < f2->numwildcards+f2->numfunnies ) || ( ( f1->numwildcards+f1->numfunnies == f2->numwildcards+f2->numfunnies ) && ( f1->numwildcards < f2->numfunnies ) ) ) ) ) { funscratch = AN.FunInfo[j]; AN.FunInfo[j] = AN.FunInfo[j-1]; AN.FunInfo[j-1] = funscratch; } else break; } } } /* Now we rewrite the pattern. First into the space after it and then we copy it back. Be careful with the non-commutative functions. There the worst one should decide. */ p = pat + 1; p2 = info; for ( i = 0; i < numfun; i++ ) { if ( i == nc ) { for ( k = 0; k <= nc; k++ ) { if ( AN.FunInfo[k].commute ) { p1 = AN.FunInfo[k].location; j = p1[1]; NCOPY(p2,p1,j) } } } else if ( AN.FunInfo[i].commute == 0 ) {
/* * Evaluate a function object into a object. */ COObject * vm_eval(COObject *func, COObject *globals) { #define JUMPBY(offset) next_code += offset #define JUMPTO(offset) next_code = first_code + offset #define NEXTOP() (*next_code++) #define NEXTARG() (next_code += 2, (next_code[-1]<<8) + next_code[-2]) #define GETITEM(v, i) COTuple_GET_ITEM((COTupleObject *)(v), i) #define GETLOCAL(i) (fastlocals[i]) #define SETLOCAL(i, v) \ do { \ COObject *tmp = GETLOCAL(i); \ GETLOCAL(i) = v; \ CO_XDECREF(tmp); \ } while (0); #define PUSH(o) (*stack_top++ = (o)) #define POP() (*--stack_top) #define TOP() (stack_top[-1]) #define SET_TOP(o) (stack_top[-1] = (o)) #define SECOND() (stack_top[-2]) #define THIRD() (stack_top[-3]) #define FOURTH() (stack_top[-4]) #define PEEK(n) (stack_top[-(n)]) #define STACK_ADJ(n) (stack_top += n) #define STACK_LEVEL() ((int)(stack_top - TS(frame)->f_stack)) #define UNWIND_BLOCK(b) \ do { \ while (STACK_LEVEL() > (b)->fb_level) { \ COObject *o = POP(); \ CO_XDECREF(o); \ } \ } while (0) COCodeObject *code; COObject *names; COObject *consts; COObject *localnames; COObject *funcargs = COList_New(0); COObject **fastlocals; COObject **stack_top; /* Stack top, points to next free slot in stack */ unsigned char *next_code; unsigned char *first_code; unsigned char opcode; /* Current opcode */ int oparg; /* Current opcode argument, if any */ COObject *x; /* Result object -- NULL if error */ COObject *o1, *o2, *o3; /* Temporary objects popped of stack */ int status; /* VM status */ int err; /* C function error code */ status = STATUS_NONE; TS(frame) = (COFrameObject *)COFrame_New((COObject *)TS(frame), func, globals); new_frame: /* reentry point when function call/return */ code = (COCodeObject *)((COFunctionObject *)TS(frame)->f_func)->func_code; stack_top = TS(frame)->f_stacktop; names = code->co_names; localnames = code->co_localnames; consts = code->co_consts; first_code = (unsigned char *)COBytes_AsString(code->co_code); next_code = first_code + TS(frame)->f_lasti; fastlocals = TS(frame)->f_extraplus; /* Parse arguments. */ if (COList_GET_SIZE(funcargs)) { // check arguments count if (code->co_argcount != COList_GET_SIZE(funcargs)) { COErr_Format(COException_ValueError, "takes exactly %d arguments (%d given)", code->co_argcount, COList_Size(funcargs)); status = STATUS_EXCEPTION; goto fast_end; } size_t n = COList_Size(funcargs); for (int i = 0; i < n; i++) { x = COList_GetItem(funcargs, 0); CO_INCREF(x); SETLOCAL(n - i - 1, x); COList_DelItem(funcargs, 0); } } for (;;) { opcode = NEXTOP(); switch (opcode) { case OP_BINARY_ADD: o1 = POP(); o2 = TOP(); if (COStr_Check(o1) && COStr_Check(o2)) { COStr_Concat(&o2, o1); x = o2; goto skip_decref_o2; } else { x = COInt_Type.tp_int_interface->int_add(o1, o2); } CO_DECREF(o2); skip_decref_o2: CO_DECREF(o1); SET_TOP(x); if (!x) { status = STATUS_EXCEPTION; goto fast_end; } break; case OP_BINARY_SUB: o1 = POP(); o2 = TOP(); x = COInt_Type.tp_int_interface->int_sub(o2, o1); CO_DECREF(o1); CO_DECREF(o2); SET_TOP(x); break; case OP_BINARY_MUL: o1 = POP(); o2 = TOP(); x = COInt_Type.tp_int_interface->int_mul(o2, o1); CO_DECREF(o1); CO_DECREF(o2); SET_TOP(x); break; case OP_BINARY_DIV: o1 = POP(); o2 = TOP(); x = COInt_Type.tp_int_interface->int_div(o2, o1); CO_DECREF(o1); CO_DECREF(o2); SET_TOP(x); break; case OP_BINARY_MOD: o1 = POP(); o2 = TOP(); x = COInt_Type.tp_int_interface->int_mod(o2, o1); CO_DECREF(o1); CO_DECREF(o2); SET_TOP(x); break; case OP_BINARY_SL: o1 = POP(); o2 = TOP(); x = COInt_Type.tp_int_interface->int_lshift(o2, o1); CO_DECREF(o1); CO_DECREF(o2); SET_TOP(x); break; case OP_BINARY_SR: o1 = POP(); o2 = TOP(); x = COInt_Type.tp_int_interface->int_rshift(o2, o1); CO_DECREF(o1); CO_DECREF(o2); SET_TOP(x); break; case OP_BINARY_SUBSCRIPT: o1 = POP(); o2 = TOP(); if (!CO_TYPE(o2)->tp_mapping_interface) { COErr_Format(COException_TypeError, "'%.200s' object is not subscriptable", CO_TYPE(o2)->tp_name); status = STATUS_EXCEPTION; } else { x = CO_TYPE(o2)->tp_mapping_interface->mp_subscript(o2, o1); if (!x) { status = STATUS_EXCEPTION; goto fast_end; } } CO_DECREF(o1); CO_DECREF(o2); SET_TOP(x); break; case OP_CMP: o1 = POP(); o2 = TOP(); oparg = NEXTARG(); x = vm_cmp(oparg, o1, o2); if (!x) { status = STATUS_EXCEPTION; goto fast_end; } CO_DECREF(o1); CO_DECREF(o2); SET_TOP(x); break; case OP_UNARY_NEGATE: o1 = TOP(); x = COInt_Type.tp_int_interface->int_neg(o1); CO_DECREF(o1); SET_TOP(x); break; case OP_UNARY_INVERT: o1 = TOP(); x = COInt_Type.tp_int_interface->int_invert(o1); CO_DECREF(o1); SET_TOP(x); break; case OP_LOAD_LOCAL: oparg = NEXTARG(); x = GETLOCAL(oparg); CO_INCREF(x); PUSH(x); break; case OP_LOAD_NAME: oparg = NEXTARG(); o1 = GETITEM(names, oparg); x = COObject_get(o1); if (!x) { COErr_Format(COException_NameError, "name '%s' is not defined", COStr_AsString(o1)); status = STATUS_EXCEPTION; goto fast_end; } CO_INCREF(x); PUSH(x); break; case OP_LOAD_UPVAL: oparg = NEXTARG(); o1 = COTuple_GET_ITEM(((COFunctionObject *)func)->func_upvalues, oparg); o2 = COCell_Get(o1); PUSH(o2); break; case OP_LOAD_CONST: oparg = NEXTARG(); x = GETITEM(consts, oparg); CO_INCREF(x); PUSH(x); break; case OP_BUILD_TUPLE: oparg = NEXTARG(); x = COTuple_New(oparg); if (x != NULL) { for (; --oparg >= 0;) { o1 = POP(); COTuple_SetItem(x, oparg, o1); CO_DECREF(o1); } PUSH(x); } break; case OP_BUILD_LIST: oparg = NEXTARG(); x = COList_New(oparg); if (x != NULL) { for (; --oparg >= 0;) { o1 = POP(); COList_SetItem(x, oparg, o1); CO_DECREF(o1); } PUSH(x); } break; case OP_DICT_BUILD: oparg = NEXTARG(); x = CODict_New(); PUSH(x); break; case OP_DICT_ADD: o1 = POP(); o2 = POP(); o3 = POP(); CODict_SetItem(o3, o2, o1); x = o3; CO_DECREF(o1); CO_DECREF(o2); PUSH(x); break; case OP_STORE_NAME: oparg = NEXTARG(); o1 = GETITEM(names, oparg); o2 = POP(); COObject_set(o1, o2); CO_DECREF(o2); break; case OP_STORE_UPVAL: oparg = NEXTARG(); o1 = COTuple_GET_ITEM(((COFunctionObject *)func)->func_upvalues, oparg); o2 = POP(); COCell_Set(o1, o2); CO_DECREF(o2); break; case OP_STORE_LOCAL: oparg = NEXTARG(); o1 = POP(); SETLOCAL(oparg, o1); break; case OP_JMPZ: oparg = NEXTARG(); o1 = POP(); if (o1 == CO_True) { } else if (o1 == CO_False) { JUMPTO(oparg); } else { err = COObject_IsTrue(o1); if (err > 0) err = 0; else if (err == 0) JUMPTO(oparg); } CO_DECREF(o1); break; case OP_JMP: oparg = NEXTARG(); JUMPBY(oparg); break; case OP_JMPX: oparg = NEXTARG(); JUMPTO(oparg); break; case OP_DECLARE_FUNCTION: o1 = POP(); x = COFunction_New(o1); COCodeObject *c = (COCodeObject *)o1; for (int i = 0; i < CO_SIZE(c->co_upvals); i++) { COObject *name = COTuple_GET_ITEM(c->co_upvals, i); COObject *upvalue = COObject_get(name); if (!upvalue) { // local variables for (int j = 0; j < COTuple_Size(localnames); j++) { if (COObject_CompareBool (COTuple_GET_ITEM(localnames, j), name, Cmp_EQ)) { upvalue = GETLOCAL(j); } } } COObject *cell = COCell_New(upvalue); COTuple_SET_ITEM(((COFunctionObject *)x)->func_upvalues, i, cell); } CO_DECREF(o1); PUSH(x); break; case OP_CALL_FUNCTION: o1 = POP(); oparg = NEXTARG(); COObject *args = COTuple_New(oparg); while (--oparg >= 0) { o2 = POP(); COTuple_SetItem(args, oparg, o2); CO_DECREF(o2); } if (COCFunction_Check(o1)) { COCFunction cfunc = COCFunction_GET_FUNCTION(o1); x = cfunc(NULL, args); CO_DECREF(o1); CO_DECREF(args); PUSH(x); } else if (COFunction_Check(o1)) { ssize_t i = CO_SIZE(args); while (--i >= 0) { COList_Append(funcargs, COTuple_GET_ITEM(args, i)); } CO_DECREF(args); TS(frame)->f_stacktop = stack_top; TS(frame)->f_lasti = (int)(next_code - first_code); TS(frame) = (COFrameObject *)COFrame_New((COObject *)TS(frame), o1, globals); CO_DECREF(o1); func = o1; goto new_frame; } else { x = COObject_Call(o1, args); CO_DECREF(args); CO_DECREF(o1); PUSH(x); } break; case OP_RETURN: o1 = POP(); TS(frame)->f_stacktop = stack_top; TS(frame)->f_lasti = (int)(next_code - first_code); COFrameObject *old_frame = (COFrameObject *)TS(frame); TS(frame) = (COFrameObject *)old_frame->f_prev; CO_DECREF(old_frame); if (!TS(frame)) { CO_DECREF(o1); goto vm_exit; } // init function return *(TS(frame)->f_stacktop++) = o1; goto new_frame; break; case OP_SETUP_LOOP: oparg = NEXTARG(); COFrameBlock_Setup(TS(frame), opcode, oparg, STACK_LEVEL()); break; case OP_SETUP_TRY: oparg = NEXTARG(); COFrameBlock_Setup(TS(frame), opcode, oparg, STACK_LEVEL()); break; case OP_POP_BLOCK: { COFrameBlock *fb = COFrameBlock_Pop(TS(frame)); UNWIND_BLOCK(fb); } break; case OP_POP_TRY: { COFrameBlock *fb = COFrameBlock_Pop(TS(frame)); UNWIND_BLOCK(fb); } break; case OP_BREAK_LOOP: status = STATUS_BREAK; break; case OP_CONTINUE_LOOP: oparg = NEXTARG(); status = STATUS_CONTINUE; break; case OP_THROW: oparg = NEXTARG(); if (oparg == 1) { o1 = POP(); } else if (oparg == 0) { o1 = CO_None; } else { error("error oparg"); } status = STATUS_EXCEPTION; COErr_SetObject(COException_SystemError, o1); break; case OP_DUP_TOP: o1 = TOP(); CO_INCREF(o1); PUSH(o1); break; case OP_POP_TOP: o1 = POP(); CO_DECREF(o1); break; case OP_END_TRY: o1 = POP(); COErr_SetString(COException_SystemError, COStr_AsString(o1)); status = STATUS_EXCEPTION; CO_DECREF(o1); break; case OP_SETUP_FINALLY: oparg = NEXTARG(); COFrameBlock_Setup(TS(frame), opcode, oparg, STACK_LEVEL()); break; case OP_END_FINALLY: o1 = POP(); if (o1 != CO_None) { COErr_SetString(COException_SystemError, COStr_AsString(o1)); status = STATUS_EXCEPTION; } CO_DECREF(o1); break; case OP_STORE_SUBSCRIPT: o1 = TOP(); o2 = SECOND(); o3 = THIRD(); STACK_ADJ(-3); if (COList_Check(o3)) { err = COList_SetItem(o3, COInt_AsSsize_t(o2), o1); } else if (CODict_Check(o3)) { CODict_SetItem(o3, o2, o1); } else { error("wrong store subscript"); } CO_DECREF(o1); CO_DECREF(o2); CO_DECREF(o3); break; case OP_GET_ITER: o1 = TOP(); x = COObject_GetIter(o1); CO_DECREF(o1); SET_TOP(x); break; case OP_FOR_ITER: oparg = NEXTARG(); o1 = TOP(); x = (*o1->co_type->tp_iternext) (o1); if (x) { PUSH(x); break; } o1 = POP(); CO_DECREF(o1); JUMPTO(oparg); break; default: error("unknown handle for opcode(%ld)\n", opcode); } fast_end: while (status != STATUS_NONE && TS(frame)->f_iblock > 0) { COFrameBlock *fb = &TS(frame)->f_blockstack[TS(frame)->f_iblock - 1]; if (fb->fb_type == OP_SETUP_LOOP && status == STATUS_CONTINUE) { status = STATUS_NONE; JUMPTO(oparg); break; } TS(frame)->f_iblock--; UNWIND_BLOCK(fb); if (fb->fb_type == OP_SETUP_LOOP && status == STATUS_BREAK) { status = STATUS_NONE; JUMPTO(fb->fb_handler); break; } if (fb->fb_type == OP_SETUP_TRY && status == STATUS_EXCEPTION) { status = STATUS_NONE; COObject *exc, *val, *tb; COErr_Fetch(&exc, &val, &tb); PUSH(val); JUMPTO(fb->fb_handler); break; } } /* End the loop if we still have an error (or return) */ x = NULL; if (status != STATUS_NONE) break; } vm_exit: /* Clear frame stack. */ while (TS(frame)) { COFrameObject *tmp_frame = (COFrameObject *)TS(frame)->f_prev; CO_DECREF(TS(frame)); TS(frame) = tmp_frame; } return x; }
static PyObject * eval_frame(PyFrameObject *f) { LOG("> eval_frame\n"); { PyObject **stack_pointer; /* Next free slot in value stack */ register unsigned char *next_instr; register int opcode=0; /* Current opcode */ register int oparg=0; /* Current opcode argument, if any */ register enum why_code why; /* Reason for block stack unwind */ register int err; /* Error status -- nonzero if error */ register PyObject *x; /* Result object -- NULL if error */ register PyObject *t, *u, *v; /* Temporary objects popped off stack */ register PyObject *w; register PyObject **fastlocals, **freevars; PyObject *retval = NULL; /* Return value */ PyThreadState *tstate = PyThreadState_GET(); PyCodeObject *co; unsigned char *first_instr; PyObject *names; PyObject *consts; /* Tuple access macros */ #define GETITEM(v, i) PyTuple_GET_ITEM((PyTupleObject *)(v), (i)) /* Code access macros */ #define INSTR_OFFSET() (next_instr - first_instr) #define NEXTOP() (*next_instr++) #define NEXTARG() (next_instr += 2, (next_instr[-1]<<8) + next_instr[-2]) #define JUMPTO(x) (next_instr = first_instr + (x)) #define JUMPBY(x) (next_instr += (x)) /* OpCode prediction macros Some opcodes tend to come in pairs thus making it possible to predict the second code when the first is run. For example, COMPARE_OP is often followed by JUMP_IF_FALSE or JUMP_IF_TRUE. And, those opcodes are often followed by a POP_TOP. Verifying the prediction costs a single high-speed test of register variable against a constant. If the pairing was good, then the processor has a high likelihood of making its own successful branch prediction which results in a nearly zero overhead transition to the next opcode. A successful prediction saves a trip through the eval-loop including its two unpredictable branches, the HASARG test and the switch-case. */ #define PREDICT(op) if (*next_instr == op) goto PRED_##op #define PREDICTED(op) PRED_##op: next_instr++ #define PREDICTED_WITH_ARG(op) PRED_##op: oparg = (next_instr[2]<<8) + \ next_instr[1]; next_instr += 3 /* Stack manipulation macros */ #define STACK_LEVEL() (stack_pointer - f->f_valuestack) #define EMPTY() (STACK_LEVEL() == 0) #define TOP() (stack_pointer[-1]) #define SECOND() (stack_pointer[-2]) #define THIRD() (stack_pointer[-3]) #define FOURTH() (stack_pointer[-4]) #define SET_TOP(v) (stack_pointer[-1] = (v)) #define SET_SECOND(v) (stack_pointer[-2] = (v)) #define SET_THIRD(v) (stack_pointer[-3] = (v)) #define SET_FOURTH(v) (stack_pointer[-4] = (v)) #define BASIC_STACKADJ(n) (stack_pointer += n) #define BASIC_PUSH(v) (*stack_pointer++ = (v)) #define BASIC_POP() (*--stack_pointer) #define PUSH(v) BASIC_PUSH(v) #define POP() BASIC_POP() #define STACKADJ(n) BASIC_STACKADJ(n) /* Local variable macros */ #define GETLOCAL(i) (fastlocals[i]) /* The SETLOCAL() macro must not DECREF the local variable in-place and then store the new value; it must copy the old value to a temporary value, then store the new value, and then DECREF the temporary value. This is because it is possible that during the DECREF the frame is accessed by other code (e.g. a __del__ method or gc.collect()) and the variable would be pointing to already-freed memory. */ #define SETLOCAL(i, value) do { PyObject *tmp = GETLOCAL(i); \ GETLOCAL(i) = value; \ Py_XDECREF(tmp); } while (0) /* Start of code */ if (f == NULL) return NULL; /* push frame */ if (++tstate->recursion_depth > recursion_limit) { --tstate->recursion_depth; /* ERROR */ tstate->frame = f->f_back; return NULL; } tstate->frame = f; /* tracing elided */ co = f->f_code; names = co->co_names; consts = co->co_consts; fastlocals = f->f_localsplus; freevars = f->f_localsplus + f->f_nlocals; _PyCode_GETCODEPTR(co, &first_instr); /* An explanation is in order for the next line. f->f_lasti now refers to the index of the last instruction executed. You might think this was obvious from the name, but this wasn't always true before 2.3! PyFrame_New now sets f->f_lasti to -1 (i.e. the index *before* the first instruction) and YIELD_VALUE doesn't fiddle with f_lasti any more. So this does work. Promise. */ next_instr = first_instr + f->f_lasti + 1; stack_pointer = f->f_stacktop; f->f_stacktop = NULL; /* remains NULL unless yield suspends frame */ why = WHY_NOT; err = 0; x = Py_None; /* Not a reference, just anything non-NULL */ w = NULL; for (;;) { /* Do periodic things. Doing this every time through the loop would add too much overhead, so we do it only every Nth instruction. We also do it if ``things_to_do'' is set, i.e. when an asynchronous event needs attention (e.g. a signal handler or async I/O handler); see Py_AddPendingCall() and Py_MakePendingCalls() above. */ if (--_Py_Ticker < 0) { /* @@@ check for SETUP_FINALLY elided */ _Py_Ticker = _Py_CheckInterval; tstate->tick_counter++; if (things_to_do) { if (Py_MakePendingCalls() < 0) { why = WHY_EXCEPTION; goto on_error; } } } fast_next_opcode: f->f_lasti = INSTR_OFFSET(); /* Extract opcode and argument */ opcode = NEXTOP(); if (HAS_ARG(opcode)) oparg = NEXTARG(); /* Main switch on opcode */ switch (opcode) { /* BEWARE! It is essential that any operation that fails sets either x to NULL, err to nonzero, or why to anything but WHY_NOT, and that no operation that succeeds does this! */ /* case STOP_CODE: this is an error! */ case LOAD_FAST: x = GETLOCAL(oparg); if (x != NULL) { Py_INCREF(x); PUSH(x); goto fast_next_opcode; } /* ERROR? */ break; case STORE_FAST: v = POP(); SETLOCAL(oparg, v); continue; case LOAD_CONST: x = GETITEM(consts, oparg); Py_INCREF(x); PUSH(x); goto fast_next_opcode; PREDICTED(POP_TOP); case POP_TOP: v = POP(); Py_DECREF(v); goto fast_next_opcode; case UNARY_NOT: v = TOP(); err = PyObject_IsTrue(v); Py_DECREF(v); if (err == 0) { Py_INCREF(Py_True); SET_TOP(Py_True); continue; } else if (err > 0) { Py_INCREF(Py_False); SET_TOP(Py_False); err = 0; continue; } STACKADJ(-1); break; case BINARY_MODULO: w = POP(); v = TOP(); x = PyNumber_Remainder(v, w); Py_DECREF(v); Py_DECREF(w); SET_TOP(x); if (x != NULL) continue; break; case BINARY_ADD: w = POP(); v = TOP(); if (PyInt_CheckExact(v) && PyInt_CheckExact(w)) { /* INLINE: int + int */ register long a, b, i; a = PyInt_AS_LONG(v); b = PyInt_AS_LONG(w); i = a + b; if ((i^a) < 0 && (i^b) < 0) goto slow_add; x = PyInt_FromLong(i); } else { slow_add: Py_FatalError("slow add not supported."); } Py_DECREF(v); Py_DECREF(w); SET_TOP(x); if (x != NULL) continue; break; case STORE_SLICE+0: case STORE_SLICE+1: case STORE_SLICE+2: case STORE_SLICE+3: if ((opcode-STORE_SLICE) & 2) w = POP(); else w = NULL; if ((opcode-STORE_SLICE) & 1) v = POP(); else v = NULL; u = POP(); t = POP(); err = assign_slice(u, v, w, t); /* u[v:w] = t */ Py_DECREF(t); Py_DECREF(u); Py_XDECREF(v); Py_XDECREF(w); if (err == 0) continue; break; case STORE_SUBSCR: w = POP(); v = POP(); u = POP(); /* v[w] = u */ err = PyObject_SetItem(v, w, u); Py_DECREF(u); Py_DECREF(v); Py_DECREF(w); if (err == 0) continue; break; case BINARY_SUBSCR: w = POP(); v = TOP(); if (PyList_CheckExact(v) && PyInt_CheckExact(w)) { /* INLINE: list[int] */ long i = PyInt_AsLong(w); if (i < 0) i += PyList_GET_SIZE(v); if (i < 0 || i >= PyList_GET_SIZE(v)) { /* ERROR */ printf("list index out of range\n"); x = NULL; } else { x = PyList_GET_ITEM(v, i); Py_INCREF(x); } } else x = PyObject_GetItem(v, w); Py_DECREF(v); Py_DECREF(w); SET_TOP(x); if (x != NULL) continue; break; case BINARY_AND: w = POP(); v = TOP(); x = PyNumber_And(v, w); Py_DECREF(v); Py_DECREF(w); SET_TOP(x); if (x != NULL) continue; break; case PRINT_ITEM: v = POP(); PyObject_Print(v); Py_DECREF(v); break; case PRINT_NEWLINE: printf("\n"); break; case RETURN_VALUE: retval = POP(); why = WHY_RETURN; break; case POP_BLOCK: { PyTryBlock *b = PyFrame_BlockPop(f); while (STACK_LEVEL() > b->b_level) { v = POP(); Py_DECREF(v); } } break; case STORE_NAME: w = GETITEM(names, oparg); v = POP(); if ((x = f->f_locals) == NULL) { /* ERROR */ printf("STORE_NAME ERROR\n"); break; } err = PyDict_SetItem(x, w, v); Py_DECREF(v); break; case LOAD_NAME: w = GETITEM(names, oparg); if ((x = f->f_locals) == NULL) { /* ERROR */ printf("LOAD_NAME ERROR\n"); break; } x = PyDict_GetItem(x, w); if (x == NULL) { x = PyDict_GetItem(f->f_globals, w); if (x == NULL) { x = PyDict_GetItem(f->f_builtins, w); if (x == NULL) { printf("can't find %s\n", ((PyStringObject *)w)->ob_sval); /* format_exc_check_arg */ break; } } } Py_INCREF(x); PUSH(x); break; case LOAD_GLOBAL: w = GETITEM(names, oparg); if (PyString_CheckExact(w)) { /* Inline the PyDict_GetItem() calls. WARNING: this is an extreme speed hack. Do not try this at home. */ long hash = ((PyStringObject *)w)->ob_shash; if (hash != -1) { PyDictObject *d; d = (PyDictObject *)(f->f_globals); x = d->ma_lookup(d, w, hash)->me_value; if (x != NULL) { Py_INCREF(x); PUSH(x); continue; } d = (PyDictObject *)(f->f_builtins); x = d->ma_lookup(d, w, hash)->me_value; if (x != NULL) { Py_INCREF(x); PUSH(x); continue; } goto load_global_error; } } /* This is the un-inlined version of the code above */ x = PyDict_GetItem(f->f_globals, w); if (x == NULL) { x = PyDict_GetItem(f->f_builtins, w); if (x == NULL) { load_global_error: printf("LOAD_GLOBAL ERROR %s", ((PyStringObject *)w)->ob_sval); break; } } Py_INCREF(x); PUSH(x); break; case LOAD_ATTR: w = GETITEM(names, oparg); v = TOP(); x = PyObject_GetAttr(v, w); Py_DECREF(v); SET_TOP(x); if (x != NULL) continue; break; case IMPORT_NAME: w = GETITEM(names, oparg); x = PyDict_GetItemString(f->f_builtins, "__import__"); if (x == NULL) { printf("__import__ not found"); break; } u = TOP(); w = Py_BuildValue("(O)", w); Py_DECREF(u); if (w == NULL) { u = POP(); x = NULL; break; } x = PyEval_CallObject(x, w); Py_DECREF(w); SET_TOP(x); if (x != NULL) continue; break; case JUMP_FORWARD: JUMPBY(oparg); goto fast_next_opcode; PREDICTED_WITH_ARG(JUMP_IF_FALSE); case JUMP_IF_FALSE: w = TOP(); if (w == Py_True) { PREDICT(POP_TOP); goto fast_next_opcode; } if (w == Py_False) { JUMPBY(oparg); goto fast_next_opcode; } err = PyObject_IsTrue(w); if (err > 0) err = 0; else if (err == 0) JUMPBY(oparg); else break; continue; case JUMP_ABSOLUTE: JUMPTO(oparg); continue; case SETUP_LOOP: PyFrame_BlockSetup(f, opcode, INSTR_OFFSET() + oparg, STACK_LEVEL()); continue; case CALL_FUNCTION: x = call_function(&stack_pointer, oparg); PUSH(x); if (x != NULL) continue; break; case MAKE_FUNCTION: v = POP(); /* code object */ x = PyFunction_New(v, f->f_globals); Py_DECREF(v); /* XXX Maybe this should be a separate opcode? */ if (x != NULL && oparg > 0) { v = PyTuple_New(oparg); if (v == NULL) { Py_DECREF(x); x = NULL; break; } while (--oparg >= 0) { w = POP(); PyTuple_SET_ITEM(v, oparg, w); } err = PyFunction_SetDefaults(x, v); Py_DECREF(v); } PUSH(x); break; case SET_LINENO: break; default: printf("opcode: %d\n", opcode); Py_FatalError("unknown opcode"); } /* switch */ on_error: if (why == WHY_NOT) { if (err == 0 && x != NULL) { continue; /* Normal, fast path */ } why = WHY_EXCEPTION; x = Py_None; err = 0; } /* End the loop if we still have an error (or return) */ if (why != WHY_NOT) break; } /* main loop */ if (why != WHY_YIELD) { /* Pop remaining stack entries -- but when yielding */ while (!EMPTY()) { v = POP(); Py_XDECREF(v); } } if (why != WHY_RETURN && why != WHY_YIELD) retval = NULL; /* pop frame */ --tstate->recursion_depth; tstate->frame = f->f_back; return retval; }}
//============================================================================= // METHOD : SPELLbytecode::analyze //============================================================================= void SPELLbytecode::analyze() { assert( m_code != NULL ); // Pointer to initial instruction (used by macros) unsigned char* first_instr = (unsigned char*) PyString_AS_STRING(m_code->co_code); // Pointer to current instruction (used by macros) register unsigned char* next_instr = first_instr; // Opcode argument unsigned int oparg; // Stores the previous line unsigned int prevLine = 0; // Stores the last opcode unsigned int prevOpCode = 0; // Opcode count in line unsigned short opcodeCount = 0; // Will be true when there is no more bytecode to process bool finished = false; unsigned int callDepth = 0; // Try block structure TryBlock tb; tb.try_lineno = 0; tb.end_try_lineno = 0; tb.except_lineno = 0; tb.end_except_lineno = 0; tb.end_lineno = 0; // Holds the bytecode offset for except and finally statements unsigned int except_offset = 0; unsigned int finally_offset = 0; while(not finished) { // Create one BLine info structure per bytecode instruction BLine info; // Get the instruction offset info.offset = INSTR_OFFSET(); // Get the corresponding script line info.lineno = PyCode_Addr2Line(m_code, info.offset); // Obtain the opcode info.opcode = NEXTOP(); // Track the number of opcodes per line in the lnotab. if ((prevLine>0) && (info.lineno != prevLine)) { opcodeCount = 0; } else { opcodeCount++; } ////////////////////////////////////////////////////////////////////////////////// // PHASE 1 - UPDATE PREVIOUS LINE INFORMATION (ALREADY STORED) IF NEEDED ////////////////////////////////////////////////////////////////////////////////// // #1 Detect binary add. This helps us detect lines that shall be executed together // like when statements spread over several lines with binary add (+\) if ((opcodeCount == 0)&&(prevOpCode == BINARY_ADD)&&(info.opcode==LOAD_CONST)) { LineList::iterator it = m_lines.end(); it--; BLine prev = *it; m_lines.pop_back(); prev.keepWithNext = true; m_lines.push_back(prev); } ////////////////////////////////////////////////////////////////////////////////// // #2 Special checks for return statements: we may need to update the previous line if (info.opcode == RETURN_VALUE && ((prevOpCode == LOAD_FAST || prevOpCode == LOAD_CONST))) { LineList::iterator it = m_lines.end(); it--; BLine prev = *it; m_lines.pop_back(); prev.returnConst = true; m_lines.push_back(prev); } // We will ignore this, for the moment oparg = 0; // To decide wether store the bline information or not bool storeit = false; if (HAS_ARG(info.opcode)) oparg = NEXTARG(); ////////////////////////////////////////////////////////////////////////////////// // PHASE 2 - BUILD AND STORE NEXT BLINE INFORMATION ////////////////////////////////////////////////////////////////////////////////// if (( prevLine > 0 ) && ( info.lineno != prevLine )) { // Default values info.executable = false; info.returnConst = false; info.keepWithNext = false; /** \todo // Depending on the bytecode, either set the block as active, // or finish the loop (RETURN_VALUE is found at the end of the script) // This is maybe wrong, need to check if RETURN_VALUE is found // in function code objects, probably yes... */ switch(info.opcode) { case LOAD_NAME: case LOAD_GLOBAL: case LOAD_CONST: callDepth++; info.executable = true; break; case CALL_FUNCTION: callDepth--; info.executable = true; break; case STORE_NAME: case IMPORT_NAME: case JUMP_FORWARD: case JUMP_IF_FALSE: case JUMP_IF_TRUE: case JUMP_ABSOLUTE: case RETURN_VALUE: { info.executable = (callDepth==0); break; } default: info.executable = true; break; } // Store the info m_lines.push_back(info); } // The very first line is always executable, and needs to be stored explicitly else if (prevLine == 0) { info.executable = true; // Store the info m_lines.push_back(info); } ////////////////////////////////////////////////////////////////////////////////// // PHASE 3 - ADDITIONAL INFORMATION FOR TRY BLOCKS, LAST ADDRESS, LAST LINE ////////////////////////////////////////////////////////////////////////////////// switch(info.opcode) { case RETURN_VALUE: m_lastAddr = info.offset; finished = true; break; case SETUP_EXCEPT: tb.try_lineno = info.lineno; except_offset = info.offset + oparg + 3; // This is the real destination offset break; case SETUP_FINALLY: finally_offset = info.offset + oparg + 3; // This is the 'finally' destination offset break; case END_FINALLY: if (tb.try_lineno != 0 && tb.end_lineno == 0) { tb.end_lineno = info.lineno; tb.end_except_lineno = info.lineno; m_tryBlocks.push_back( tb ); tb.try_lineno = 0; tb.end_try_lineno = 0; tb.except_lineno = 0; tb.end_except_lineno = 0; tb.end_lineno = 0; } else { // Update the last try block to update it with the finally block TryBlockList::iterator it = m_tryBlocks.end(); it--; TryBlock prev = *it; m_tryBlocks.pop_back(); prev.end_except_lineno = prev.end_lineno; prev.end_lineno = info.lineno; m_tryBlocks.push_back( prev ); } break; } // This should always happen between a SETUP_EXCEPT and an END_FINALLY if ( (tb.try_lineno >0) && (except_offset == info.offset)) { // The last line before the except tb.end_try_lineno = prevLine; // The line of the except tb.except_lineno = info.lineno; } prevLine = info.lineno; prevOpCode = info.opcode; m_lastLine = info.lineno; } }
//USAGE: imgcmp -c [-otheropts] img1 img2 // imgcmp // imgcmp [-d] [workdir] [outdir] void ParseCmdLine(int argc, char *argv[]) { int i, j; for (i = 1; i != argc; i++) { if (*argv[i] != '-') { if (!workdir[0]) strlcpy(workdir, argv[i], sizeof(workdir)); else fprintf(stderr, "WARNING: ignored parameter '%s'\n", argv[i]); continue; } switch (argv[i][1]) { case 'a': //Add image // TODO: this is currently the default option break; case 'c': //Cache control NEXTARG(); for (j = 0; j != ARRAYLEN(cache_cmd_strs) && strcmp(argv[i], cache_cmd_strs[j]); j++); switch (j) { case CACHE_CMD_SETINDEX: NEXTARG(); strlcpy(thumb_btree_fn, argv[i], sizeof(thumb_cache_fn)); break; case CACHE_CMD_SETDATA: NEXTARG(); strlcpy(thumb_cache_fn, argv[i], sizeof(thumb_cache_fn)); break; case CACHE_CMD_DUMPALL: cache_dump = TC_DUMP_IMGS; break; case CACHE_CMD_DUMPINFO: cache_dump = TC_DUMP_INFO; break; case CACHE_CMD_DISABLE: cache_dont_use = 1; break; case CACHE_CMD_NOUPDATE: cache_no_update = 1; break; default: USAGE(); } break; case 'd': //Deduplicate deduplicate_dir = 1; break; case 'h': //Help case '?': USAGE(); case 'm': //coMpare <also takes method as option> switch (argv[i][2]) { //comparison method case 'a': comparison = IMG_CMP_ABS; break; case 'r': comparison = IMG_CMP_RANGE; break; case 'h': //histogram comparison type if (argv[i][3] == 'r') comparison = IMG_CMP_HISTRGB; else comparison = IMG_CMP_HISTHSV; break; case 'p': comparison = IMG_CMP_PHASH; break; default: fprintf(stderr, "WARNING: unrecognized comparison " "option '%c'\n", argv[i][2]); } NEXTARG(); strlcpy(imgpath1, argv[i], sizeof(imgpath1)); NEXTARG(); strlcpy(imgpath2, argv[i], sizeof(imgpath2)); break; case 'o': //Output filepath NEXTARG(); strlcpy(outpath, argv[i], sizeof(outpath)); break; case 'p': //percent difference of images (number of pixels) NEXTARG(); npixels_diff = atoi(argv[i]); break; case 'r': //Recursive scan scan_recursive = 1; break; case 't': //pixel difference Tolerance NEXTARG(); pixel_tolerance = atoi(argv[i]); break; case 'v': //verbose verbose++; break; case 'V': //version puts(TEXT_VERSION); exit(0); case '-': //full-string flags, for less commonly used options. break; default: fprintf(stderr, "WARNING: unrecognized option " "'%c', ignoring", argv[i][1]); } } }
static PyObject * eval_frame(PyFrameObject *f) { LOG("> eval_frame\n"); PyObject **stack_pointer; /* Next free slot in value stack */ register unsigned char *next_instr; register int opcode=0; /* Current opcode */ register int oparg=0; /* Current opcode argument, if any */ register enum why_code why; /* Reason for block stack unwind */ register int err; /* Error status -- nonzero if error */ register PyObject *x; /* Result object -- NULL if error */ register PyObject *v; /* Temporary objects popped off stack */ register PyObject *w; register PyObject **fastlocals, **freevars; PyObject *retval = NULL; /* Return value */ PyThreadState *tstate = PyThreadState_GET(); PyCodeObject *co; unsigned char *first_instr; PyObject *names; PyObject *consts; /* Tuple access macros */ #define GETITEM(v, i) PyTuple_GET_ITEM((PyTupleObject *)(v), (i)) /* Code access macros */ #define INSTR_OFFSET() (next_instr - first_instr) #define NEXTOP() (*next_instr++) #define NEXTARG() (next_instr += 2, (next_instr[-1]<<8) + next_instr[-2]) #define JUMPTO(x) (next_instr = first_instr + (x)) #define JUMPBY(x) (next_instr += (x)) /* OpCode prediction macros Some opcodes tend to come in pairs thus making it possible to predict the second code when the first is run. For example, COMPARE_OP is often followed by JUMP_IF_FALSE or JUMP_IF_TRUE. And, those opcodes are often followed by a POP_TOP. Verifying the prediction costs a single high-speed test of register variable against a constant. If the pairing was good, then the processor has a high likelihood of making its own successful branch prediction which results in a nearly zero overhead transition to the next opcode. A successful prediction saves a trip through the eval-loop including its two unpredictable branches, the HASARG test and the switch-case. */ #define PREDICT(op) if (*next_instr == op) goto PRED_##op #define PREDICTED(op) PRED_##op: next_instr++ #define PREDICTED_WITH_ARG(op) PRED_##op: oparg = (next_instr[2]<<8) + \ next_instr[1]; next_instr += 3 /* Stack manipulation macros */ #define STACK_LEVEL() (stack_pointer - f->f_valuestack) #define EMPTY() (STACK_LEVEL() == 0) #define TOP() (stack_pointer[-1]) #define SECOND() (stack_pointer[-2]) #define THIRD() (stack_pointer[-3]) #define FOURTH() (stack_pointer[-4]) #define SET_TOP(v) (stack_pointer[-1] = (v)) #define SET_SECOND(v) (stack_pointer[-2] = (v)) #define SET_THIRD(v) (stack_pointer[-3] = (v)) #define SET_FOURTH(v) (stack_pointer[-4] = (v)) #define BASIC_STACKADJ(n) (stack_pointer += n) #define BASIC_PUSH(v) (*stack_pointer++ = (v)) #define BASIC_POP() (*--stack_pointer) #define PUSH(v) BASIC_PUSH(v) #define POP() BASIC_POP() #define STACKADJ(n) BASIC_STACKADJ(n) /* Local variable macros */ #define GETLOCAL(i) (fastlocals[i]) /* The SETLOCAL() macro must not DECREF the local variable in-place and then store the new value; it must copy the old value to a temporary value, then store the new value, and then DECREF the temporary value. This is because it is possible that during the DECREF the frame is accessed by other code (e.g. a __del__ method or gc.collect()) and the variable would be pointing to already-freed memory. */ #define SETLOCAL(i, value) do { PyObject *tmp = GETLOCAL(i); \ GETLOCAL(i) = value; \ Py_XDECREF(tmp); } while (0) /* Start of code */ if (f == NULL) return NULL; /* push frame */ if (++tstate->recursion_depth > recursion_limit) { --tstate->recursion_depth; /* ERROR */ tstate->frame = f->f_back; return NULL; } tstate->frame = f; /* tracing elided */ co = f->f_code; names = co->co_names; consts = co->co_consts; fastlocals = f->f_localsplus; freevars = f->f_localsplus + f->f_nlocals; _PyCode_GETCODEPTR(co, &first_instr); /* An explanation is in order for the next line. f->f_lasti now refers to the index of the last instruction executed. You might think this was obvious from the name, but this wasn't always true before 2.3! PyFrame_New now sets f->f_lasti to -1 (i.e. the index *before* the first instruction) and YIELD_VALUE doesn't fiddle with f_lasti any more. So this does work. Promise. */ next_instr = first_instr + f->f_lasti + 1; stack_pointer = f->f_stacktop; f->f_stacktop = NULL; /* remains NULL unless yield suspends frame */ why = WHY_NOT; err = 0; x = Py_None; /* Not a reference, just anything non-NULL */ w = NULL; for (;;) { /* @@@ pending calls elided */ fast_next_opcode: f->f_lasti = INSTR_OFFSET(); /* Extract opcode and argument */ opcode = NEXTOP(); if (HAS_ARG(opcode)) oparg = NEXTARG(); /* Main switch on opcode */ switch (opcode) { /* BEWARE! It is essential that any operation that fails sets either x to NULL, err to nonzero, or why to anything but WHY_NOT, and that no operation that succeeds does this! */ /* case STOP_CODE: this is an error! */ case LOAD_CONST: x = GETITEM(consts, oparg); Py_INCREF(x); PUSH(x); goto fast_next_opcode; case BINARY_ADD: w = POP(); v = TOP(); if (PyInt_CheckExact(v) && PyInt_CheckExact(w)) { /* INLINE: int + int */ register long a, b, i; a = PyInt_AS_LONG(v); b = PyInt_AS_LONG(w); i = a + b; if ((i^a) < 0 && (i^b) < 0) goto slow_add; x = PyInt_FromLong(i); } else { slow_add: Py_FatalError("slow add not supported."); } Py_DECREF(v); Py_DECREF(w); SET_TOP(x); if (x != NULL) continue; break; case PRINT_ITEM: v = POP(); PyObject_Print(v); Py_DECREF(v); break; case PRINT_NEWLINE: printf("\n"); break; case RETURN_VALUE: retval = POP(); why = WHY_RETURN; break; default: Py_FatalError("unknown opcode"); } /* switch */ if (why == WHY_NOT) { if (err == 0 && x != NULL) { continue; /* Normal, fast path */ } why = WHY_EXCEPTION; x = Py_None; err = 0; } /* End the loop if we still have an error (or return) */ if (why != WHY_NOT) break; } /* main loop */ if (why != WHY_YIELD) { /* Pop remaining stack entries -- but when yielding */ while (!EMPTY()) { v = POP(); Py_XDECREF(v); } } if (why != WHY_RETURN && why != WHY_YIELD) retval = NULL; /* pop frame */ --tstate->recursion_depth; tstate->frame = f->f_back; return retval; }