static PyObject * function_call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; PyObject *argdefs; PyObject *kwtuple = NULL; PyObject **d, **k; Py_ssize_t nk, nd; argdefs = PyFunction_GET_DEFAULTS(func); if (argdefs != NULL && PyTuple_Check(argdefs)) { d = &PyTuple_GET_ITEM((PyTupleObject *)argdefs, 0); nd = PyTuple_GET_SIZE(argdefs); } else { d = NULL; nd = 0; } if (kw != NULL && PyDict_Check(kw)) { Py_ssize_t pos, i; nk = PyDict_GET_SIZE(kw); kwtuple = PyTuple_New(2*nk); if (kwtuple == NULL) return NULL; k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kw, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i/2; } else { k = NULL; nk = 0; } result = PyEval_EvalCodeEx( PyFunction_GET_CODE(func), PyFunction_GET_GLOBALS(func), (PyObject *)NULL, &PyTuple_GET_ITEM(arg, 0), PyTuple_GET_SIZE(arg), k, nk, d, nd, PyFunction_GET_KW_DEFAULTS(func), PyFunction_GET_CLOSURE(func)); Py_XDECREF(kwtuple); return result; }
static PyObject * fast_function(PyObject *func, PyObject ***pp_stack, int n, int na, int nk) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject **d = NULL; int nd = 0; if (argdefs == NULL && co->co_argcount == n && nk==0 && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { PyFrameObject *f; PyObject *retval = NULL; PyThreadState *tstate = PyThreadState_GET(); PyObject **fastlocals, **stack; int i; /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) return NULL; fastlocals = f->f_localsplus; stack = (*pp_stack) - n; for (i = 0; i < n; i++) { Py_INCREF(*stack); fastlocals[i] = *stack++; } retval = eval_frame(f); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return retval; } if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = ((PyTupleObject *)argdefs)->ob_size; } return PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, (*pp_stack)-n, na, (*pp_stack)-2*nk, nk, d, nd, PyFunction_GET_CLOSURE(func)); }
NUITKA_MAY_BE_UNUSED static PyObject *fast_python_call( PyObject *func, PyObject **args, int count ) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE( func ); PyObject *globals = PyFunction_GET_GLOBALS( func ); PyObject *argdefs = PyFunction_GET_DEFAULTS( func ); #if PYTHON_VERSION >= 300 PyObject *kwdefs = PyFunction_GET_KW_DEFAULTS( func ); if ( kwdefs == NULL && argdefs == NULL && co->co_argcount == count && co->co_flags == ( CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE )) #else if ( argdefs == NULL && co->co_argcount == count && co->co_flags == ( CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE )) #endif { PyThreadState *tstate = PyThreadState_GET(); assertObject( globals ); PyFrameObject *frame = PyFrame_New( tstate, co, globals, NULL ); if (unlikely( frame == NULL )) { return NULL; }; for ( int i = 0; i < count; i++ ) { frame->f_localsplus[i] = INCREASE_REFCOUNT( args[i] ); } PyObject *result = PyEval_EvalFrameEx( frame, 0 ); // Frame release protects against recursion as it may lead to variable // destruction. ++tstate->recursion_depth; Py_DECREF( frame ); --tstate->recursion_depth; return result; } PyObject **defaults = NULL; int nd = 0; if ( argdefs != NULL ) { defaults = &PyTuple_GET_ITEM( argdefs, 0 ); nd = int( Py_SIZE( argdefs ) ); } PyObject *result = PyEval_EvalCodeEx( #if PYTHON_VERSION >= 300 (PyObject *)co, #else co, // code object #endif globals, // globals NULL, // no locals args, // args count, // argcount NULL, // kwds 0, // kwcount defaults, // defaults nd, // defcount #if PYTHON_VERSION >= 300 kwdefs, #endif PyFunction_GET_CLOSURE( func ) ); return result; }