AST_Module* parse_file(const char* fn) { STAT_TIMER(t0, "us_timer_cpyton_parsing"); Timer _t("parsing"); if (ENABLE_PYPA_PARSER) { AST_Module* rtn = pypa_parse(fn); assert(rtn); return rtn; } FILE* fp = popen(getParserCommandLine(fn).c_str(), "r"); BufferedReader* reader = new BufferedReader(fp); AST* rtn = readASTMisc(reader); reader->fill(); ASSERT(reader->bytesBuffered() == 0, "%d", reader->bytesBuffered()); delete reader; int code = pclose(fp); assert(code == 0); assert(rtn->type == AST_TYPE::Module); long us = _t.end(); static StatCounter us_parsing("us_parsing"); us_parsing.log(us); return ast_cast<AST_Module>(rtn); }
Box* BoxedWrapperObject::__call__(BoxedWrapperObject* self, Box* args, Box* kwds) { STAT_TIMER(t0, "us_timer_boxedwrapperobject__call__", (self->cls->is_user_defined ? 1 : 2)); assert(self->cls == wrapperobject_cls); assert(args->cls == tuple_cls); assert(kwds->cls == dict_cls); int flags = self->descr->wrapper->flags; wrapperfunc wrapper = self->descr->wrapper->wrapper; assert(self->descr->wrapper->offset > 0); Box* rtn; if (flags == PyWrapperFlag_KEYWORDS) { wrapperfunc_kwds wk = (wrapperfunc_kwds)wrapper; rtn = (*wk)(self->obj, args, self->descr->wrapped, kwds); } else if (flags == PyWrapperFlag_PYSTON || flags == 0) { rtn = (*wrapper)(self->obj, args, self->descr->wrapped); } else { RELEASE_ASSERT(0, "%d", flags); } checkAndThrowCAPIException(); assert(rtn && "should have set + thrown an exception!"); return rtn; }
// called from both generatorHasNext and generatorSend/generatorNext (but only if generatorHasNext hasn't been called) static void generatorSendInternal(BoxedGenerator* self, Box* v) { STAT_TIMER(t0, "us_timer_generator_switching", 0); if (self->running) raiseExcHelper(ValueError, "generator already executing"); // check if the generator already exited if (self->entryExited) { freeGeneratorStack(self); raiseExcHelper(StopIteration, (const char*)nullptr); } self->returnValue = v; self->running = true; #if STAT_TIMERS if (!self->prev_stack) self->prev_stack = StatTimer::createStack(self->my_timer); else self->prev_stack = StatTimer::swapStack(self->prev_stack); #endif swapContext(&self->returnContext, self->context, (intptr_t)self); #if STAT_TIMERS self->prev_stack = StatTimer::swapStack(self->prev_stack); if (self->entryExited) { assert(self->prev_stack == &self->my_timer); assert(self->my_timer.isPaused()); } #endif self->running = false; // propagate exception to the caller if (self->exception.type) { assert(self->entryExited); freeGeneratorStack(self); // don't raise StopIteration exceptions because those are handled specially. if (!self->exception.matches(StopIteration)) throw self->exception; return; } if (self->entryExited) { freeGeneratorStack(self); // Reset the current exception. // We could directly create the StopIteration exception but we delay creating it because often the caller is not // interested in the exception (=generatorHasnext). If we really need it we will create it inside generatorSend. self->exception = ExcInfo(NULL, NULL, NULL); return; } }
Box* BoxedMethodDescriptor::__call__(BoxedMethodDescriptor* self, Box* obj, BoxedTuple* varargs, Box** _args) { STAT_TIMER(t0, "us_timer_boxedmethoddescriptor__call__", 10); BoxedDict* kwargs = static_cast<BoxedDict*>(_args[0]); assert(self->cls == method_cls); assert(varargs->cls == tuple_cls); assert(kwargs->cls == dict_cls); int ml_flags = self->method->ml_flags; int call_flags; if (ml_flags & METH_CLASS) { if (!isSubclass(obj->cls, type_cls)) raiseExcHelper(TypeError, "descriptor '%s' requires a type but received a '%s'", self->method->ml_name, getFullTypeName(obj).c_str()); call_flags = ml_flags & (~METH_CLASS); } else { if (!isSubclass(obj->cls, self->type)) raiseExcHelper(TypeError, "descriptor '%s' requires a '%s' object but received a '%s'", self->method->ml_name, getFullNameOfClass(self->type).c_str(), getFullTypeName(obj).c_str()); call_flags = ml_flags; } threading::GLPromoteRegion _gil_lock; Box* rtn; if (call_flags == METH_NOARGS) { RELEASE_ASSERT(varargs->size() == 0, ""); RELEASE_ASSERT(kwargs->d.size() == 0, ""); rtn = (Box*)self->method->ml_meth(obj, NULL); } else if (call_flags == METH_VARARGS) { RELEASE_ASSERT(kwargs->d.size() == 0, ""); rtn = (Box*)self->method->ml_meth(obj, varargs); } else if (call_flags == (METH_VARARGS | METH_KEYWORDS)) { rtn = (Box*)((PyCFunctionWithKeywords)self->method->ml_meth)(obj, varargs, kwargs); } else if (call_flags == METH_O) { RELEASE_ASSERT(kwargs->d.size() == 0, ""); RELEASE_ASSERT(varargs->size() == 1, ""); rtn = (Box*)self->method->ml_meth(obj, varargs->elts[0]); } else { RELEASE_ASSERT(0, "0x%x", call_flags); } checkAndThrowCAPIException(); assert(rtn && "should have set + thrown an exception!"); return rtn; }
extern "C" Box* yield(BoxedGenerator* obj, Box* value) { STAT_TIMER(t0, "us_timer_generator_switching", 0); assert(obj->cls == generator_cls); BoxedGenerator* self = static_cast<BoxedGenerator*>(obj); self->returnValue = value; threading::popGenerator(); swapContext(&self->context, self->returnContext, 0); threading::pushGenerator(obj, obj->stack_begin, obj->returnContext); // if the generator receives a exception from the caller we have to throw it if (self->exception.type) { ExcInfo e = self->exception; self->exception = ExcInfo(NULL, NULL, NULL); throw e; } return self->returnValue; }
Box* wrapperObjectTppCall(Box* _self, CallRewriteArgs* rewrite_args, ArgPassSpec argspec, Box* arg1, Box* arg2, Box* arg3, Box** args, const std::vector<BoxedString*>* keyword_names) noexcept(S == CAPI) { STAT_TIMER(t0, "us_timer_boxedwrapperobject_call", (_self->cls->is_user_defined ? 10 : 20)); assert(_self->cls == &wrappertype); wrapperobject* self = reinterpret_cast<wrapperobject*>(_self); RewriterVar* r_obj = NULL; Box** new_args = NULL; if (argspec.totalPassed() >= 3) new_args = (Box**)alloca(sizeof(Box*) * (argspec.totalPassed() + 1 - 3)); if (rewrite_args) { r_obj = rewrite_args->obj->getAttr(offsetof(wrapperobject, self), Location::forArg(0)); } ArgPassSpec new_argspec = bindObjIntoArgs(self->self, r_obj, rewrite_args, argspec, arg1, arg2, arg3, args, new_args); return wrapperDescrTppCall<S>((Box*)self->descr, rewrite_args, new_argspec, arg1, arg2, arg3, new_args, keyword_names); }
// called from both generatorHasNext and generatorSend/generatorNext (but only if generatorHasNext hasn't been called) template <ExceptionStyle S> static bool generatorSendInternal(BoxedGenerator* self, Box* v) noexcept(S == CAPI) { STAT_TIMER(t0, "us_timer_generator_switching", 0); if (!self->returnContext && v != Py_None) { if (S == CAPI) { PyErr_SetString(TypeError, "can't send non-None value to a just-started generator"); return true; } else raiseExcHelper(TypeError, "can't send non-None value to a just-started generator"); } if (self->running) { if (S == CAPI) { PyErr_SetString(ValueError, "generator already executing"); return true; } else raiseExcHelper(ValueError, "generator already executing"); } // check if the generator already exited if (self->entryExited) { freeGeneratorStack(self); if (S == CAPI) { PyErr_SetObject(StopIteration, Py_None); return true; } else raiseExcHelper(StopIteration, (const char*)nullptr); } assert(!self->returnValue); self->returnValue = incref(v); self->running = true; #if STAT_TIMERS if (!self->prev_stack) self->prev_stack = StatTimer::createStack(self->my_timer); else self->prev_stack = StatTimer::swapStack(self->prev_stack); #endif auto* top_caller_frame_info = (FrameInfo*)cur_thread_state.frame_info; swapContext(&self->returnContext, self->context, (intptr_t)self); assert(cur_thread_state.frame_info == top_caller_frame_info && "the generator should reset the frame info before the swapContext"); #if STAT_TIMERS self->prev_stack = StatTimer::swapStack(self->prev_stack); if (self->entryExited) { assert(self->prev_stack == &self->my_timer); assert(self->my_timer.isPaused()); } #endif self->running = false; // propagate exception to the caller if (self->exception.type) { freeGeneratorStack(self); // don't raise StopIteration exceptions because those are handled specially. if (!self->exception.matches(StopIteration)) { if (S == CAPI) { setCAPIException(self->exception); self->exception = ExcInfo(NULL, NULL, NULL); return true; } else { auto exc = self->exception; self->exception = ExcInfo(NULL, NULL, NULL); throw exc; } } return false; } if (self->entryExited) { freeGeneratorStack(self); // Reset the current exception. // We could directly create the StopIteration exception but we delay creating it because often the caller is not // interested in the exception (=generatorHasnext). If we really need it we will create it inside generatorSend. assert(!self->exception.type && "need to decref existing exception"); self->exception = ExcInfo(NULL, NULL, NULL); return false; } return false; }
Box* wrapperDescrTppCall(Box* _self, CallRewriteArgs* rewrite_args, ArgPassSpec argspec, Box* arg1, Box* arg2, Box* arg3, Box** args, const std::vector<BoxedString*>* keyword_names) noexcept(S == CAPI) { if (S == CAPI) { try { return wrapperDescrTppCall<CXX>(_self, NULL, argspec, arg1, arg2, arg3, args, keyword_names); } catch (ExcInfo e) { setCAPIException(e); return NULL; } } if (rewrite_args) { // We are going to embed references to _self->d_base->wrapper and _self->d_wrapped rewrite_args->obj->addGuard((intptr_t)_self); rewrite_args->rewriter->addGCReference(_self); } STAT_TIMER(t0, "us_timer_boxedwrapperdecsriptor_call", (_self->cls->is_user_defined ? 10 : 20)); assert(_self->cls == &PyWrapperDescr_Type); PyWrapperDescrObject* self = reinterpret_cast<PyWrapperDescrObject*>(_self); int flags = self->d_base->flags; wrapperfunc wrapper = self->d_base->wrapper; ParamReceiveSpec paramspec(1, 0, true, false); if (flags == PyWrapperFlag_KEYWORDS) { paramspec = ParamReceiveSpec(1, 0, true, true); } else if (flags == PyWrapperFlag_PYSTON || flags == 0) { paramspec = ParamReceiveSpec(1, 0, true, false); } else if (flags == PyWrapperFlag_1ARG) { paramspec = ParamReceiveSpec(1, 0, false, false); } else if (flags == PyWrapperFlag_2ARG) { paramspec = ParamReceiveSpec(2, 0, false, false); } else { RELEASE_ASSERT(0, "%d", flags); } auto continuation = [=](CallRewriteArgs* rewrite_args, Box* arg1, Box* arg2, Box* arg3, Box** args) { #ifndef NDEBUG if (paramspec.takes_varargs) assert(arg2 && arg2->cls == tuple_cls); #endif Box* rtn; if (flags == PyWrapperFlag_KEYWORDS) { wrapperfunc_kwds wk = (wrapperfunc_kwds)wrapper; rtn = (*wk)(arg1, arg2, self->d_wrapped, arg3); if (rewrite_args) { auto rewriter = rewrite_args->rewriter; rewrite_args->out_rtn = rewriter->call(true, (void*)wk, rewrite_args->arg1, rewrite_args->arg2, rewriter->loadConst((intptr_t)self->d_wrapped, Location::forArg(2)), rewrite_args->arg3)->setType(RefType::OWNED); rewrite_args->rewriter->checkAndThrowCAPIException(rewrite_args->out_rtn); rewrite_args->out_success = true; } } else if (flags == PyWrapperFlag_PYSTON || flags == 0) { rtn = (*wrapper)(arg1, arg2, self->d_wrapped); if (rewrite_args) { auto rewriter = rewrite_args->rewriter; rewrite_args->out_rtn = rewriter->call(true, (void*)wrapper, rewrite_args->arg1, rewrite_args->arg2, rewriter->loadConst((intptr_t)self->d_wrapped, Location::forArg(2))) ->setType(RefType::OWNED); rewrite_args->rewriter->checkAndThrowCAPIException(rewrite_args->out_rtn); rewrite_args->out_success = true; } } else if (flags == PyWrapperFlag_1ARG) { wrapperfunc_1arg wrapper_1arg = (wrapperfunc_1arg)wrapper; rtn = (*wrapper_1arg)(arg1, self->d_wrapped); if (rewrite_args) { auto rewriter = rewrite_args->rewriter; rewrite_args->out_rtn = rewriter->call(true, (void*)wrapper, rewrite_args->arg1, rewriter->loadConst((intptr_t)self->d_wrapped, Location::forArg(1))) ->setType(RefType::OWNED); rewrite_args->rewriter->checkAndThrowCAPIException(rewrite_args->out_rtn); rewrite_args->out_success = true; } } else if (flags == PyWrapperFlag_2ARG) { rtn = (*wrapper)(arg1, arg2, self->d_wrapped); if (rewrite_args) { auto rewriter = rewrite_args->rewriter; rewrite_args->out_rtn = rewriter->call(true, (void*)wrapper, rewrite_args->arg1, rewrite_args->arg2, rewriter->loadConst((intptr_t)self->d_wrapped, Location::forArg(2))) ->setType(RefType::OWNED); rewrite_args->rewriter->checkAndThrowCAPIException(rewrite_args->out_rtn); rewrite_args->out_success = true; } } else { RELEASE_ASSERT(0, "%d", flags); } if (S == CXX && !rtn) throwCAPIException(); return rtn; }; return callCXXFromStyle<S>([&]() { return rearrangeArgumentsAndCall(paramspec, NULL, self->d_base->name, NULL, rewrite_args, argspec, arg1, arg2, arg3, args, keyword_names, continuation); }); }
Box* methodDescrTppCall(Box* _self, CallRewriteArgs* rewrite_args, ArgPassSpec argspec, Box* arg1, Box* arg2, Box* arg3, Box** args, const std::vector<BoxedString*>* keyword_names) noexcept(S == CAPI) { if (S == CAPI) { try { return methodDescrTppCall<CXX>(_self, NULL, argspec, arg1, arg2, arg3, args, keyword_names); } catch (ExcInfo e) { setCAPIException(e); return NULL; } } STAT_TIMER(t0, "us_timer_boxedmethoddescriptor__call__", 10); assert(_self->cls == &PyMethodDescr_Type || _self->cls == &PyClassMethodDescr_Type); PyMethodDescrObject* self = reinterpret_cast<PyMethodDescrObject*>(_self); bool is_classmethod = (_self->cls == &PyClassMethodDescr_Type); int ml_flags = self->d_method->ml_flags; int call_flags = ml_flags & ~(METH_CLASS | METH_COEXIST | METH_STATIC); if (rewrite_args && !rewrite_args->func_guarded) { rewrite_args->obj->addAttrGuard(offsetof(PyMethodDescrObject, d_method), (intptr_t)self->d_method); } ParamReceiveSpec paramspec(0, 0, false, false); Box** defaults = NULL; if (call_flags == METH_NOARGS) { paramspec = ParamReceiveSpec(1, 0, false, false); } else if (call_flags == METH_VARARGS) { paramspec = ParamReceiveSpec(1, 0, true, false); } else if (call_flags == (METH_VARARGS | METH_KEYWORDS)) { paramspec = ParamReceiveSpec(1, 0, true, true); } else if (call_flags == METH_O) { paramspec = ParamReceiveSpec(2, 0, false, false); } else if ((call_flags & ~(METH_O3 | METH_D3)) == 0) { int num_args = 0; if (call_flags & METH_O) num_args++; if (call_flags & METH_O2) num_args += 2; int num_defaults = 0; if (call_flags & METH_D1) num_defaults++; if (call_flags & METH_D2) num_defaults += 2; paramspec = ParamReceiveSpec(1 + num_args, num_defaults, false, false); if (num_defaults) { static Box* _defaults[] = { NULL, NULL, NULL }; assert(num_defaults <= 3); defaults = _defaults; } } else { RELEASE_ASSERT(0, "0x%x", call_flags); } bool arg1_class_guarded = false; if (rewrite_args && argspec.num_args >= 1) { // Try to do the guard before rearrangeArguments if possible: rewrite_args->arg1->addAttrGuard(offsetof(Box, cls), (intptr_t)arg1->cls); arg1_class_guarded = true; } auto continuation = [=](CallRewriteArgs* rewrite_args, Box* arg1, Box* arg2, Box* arg3, Box** args) { if (is_classmethod) { rewrite_args = NULL; if (!PyType_Check(arg1)) raiseExcHelper(TypeError, "descriptor '%s' requires a type but received a '%s'", self->d_method->ml_name, getFullTypeName(arg1).c_str()); } else { if (!isSubclass(arg1->cls, self->d_type)) raiseExcHelper(TypeError, "descriptor '%s' requires a '%s' arg1 but received a '%s'", self->d_method->ml_name, self->d_type->tp_name, getFullTypeName(arg1).c_str()); } if (rewrite_args && !arg1_class_guarded) { rewrite_args->arg1->addAttrGuard(offsetof(Box, cls), (intptr_t)arg1->cls); } Box* rtn; if (call_flags == METH_NOARGS) { { UNAVOIDABLE_STAT_TIMER(t0, "us_timer_in_builtins"); rtn = (Box*)self->d_method->ml_meth(arg1, NULL); } if (rewrite_args) rewrite_args->out_rtn = rewrite_args->rewriter->call(true, (void*)self->d_method->ml_meth, rewrite_args->arg1, rewrite_args->rewriter->loadConst(0, Location::forArg(1))) ->setType(RefType::OWNED); } else if (call_flags == METH_VARARGS) { { UNAVOIDABLE_STAT_TIMER(t0, "us_timer_in_builtins"); rtn = (Box*)self->d_method->ml_meth(arg1, arg2); } if (rewrite_args) rewrite_args->out_rtn = rewrite_args->rewriter->call(true, (void*)self->d_method->ml_meth, rewrite_args->arg1, rewrite_args->arg2)->setType(RefType::OWNED); } else if (call_flags == (METH_VARARGS | METH_KEYWORDS)) { { UNAVOIDABLE_STAT_TIMER(t0, "us_timer_in_builtins"); rtn = (Box*)((PyCFunctionWithKeywords)self->d_method->ml_meth)(arg1, arg2, arg3); } if (rewrite_args) rewrite_args->out_rtn = rewrite_args->rewriter->call(true, (void*)self->d_method->ml_meth, rewrite_args->arg1, rewrite_args->arg2, rewrite_args->arg3)->setType(RefType::OWNED); } else if (call_flags == METH_O) { { UNAVOIDABLE_STAT_TIMER(t0, "us_timer_in_builtins"); rtn = (Box*)self->d_method->ml_meth(arg1, arg2); } if (rewrite_args) rewrite_args->out_rtn = rewrite_args->rewriter->call(true, (void*)self->d_method->ml_meth, rewrite_args->arg1, rewrite_args->arg2)->setType(RefType::OWNED); } else if ((call_flags & ~(METH_O3 | METH_D3)) == 0) { { UNAVOIDABLE_STAT_TIMER(t0, "us_timer_in_builtins"); rtn = ((Box * (*)(Box*, Box*, Box*, Box**))self->d_method->ml_meth)(arg1, arg2, arg3, args); } if (rewrite_args) { if (paramspec.totalReceived() == 2) rewrite_args->out_rtn = rewrite_args->rewriter->call(true, (void*)self->d_method->ml_meth, rewrite_args->arg1, rewrite_args->arg2)->setType(RefType::OWNED); else if (paramspec.totalReceived() == 3) rewrite_args->out_rtn = rewrite_args->rewriter->call(true, (void*)self->d_method->ml_meth, rewrite_args->arg1, rewrite_args->arg2, rewrite_args->arg3)->setType(RefType::OWNED); else if (paramspec.totalReceived() > 3) rewrite_args->out_rtn = rewrite_args->rewriter->call(true, (void*)self->d_method->ml_meth, rewrite_args->arg1, rewrite_args->arg2, rewrite_args->arg3, rewrite_args->args)->setType(RefType::OWNED); else abort(); } } else { RELEASE_ASSERT(0, "0x%x", call_flags); } if (!rtn) throwCAPIException(); if (rewrite_args) { rewrite_args->rewriter->checkAndThrowCAPIException(rewrite_args->out_rtn); rewrite_args->out_success = true; } return rtn; }; return rearrangeArgumentsAndCall(paramspec, NULL, self->d_method->ml_name, defaults, rewrite_args, argspec, arg1, arg2, arg3, args, keyword_names, continuation); }
DDD_RET DDD_JoinEnd (void) #endif { JIJoinPtrArray *arrayJIJoin = NULL; JIAddCplPtrArray *arrayJIAddCpl2 = NULL; JIAddCplPtrArray *arrayJIAddCpl3 = NULL; int obsolete, nRecvMsgs1, nRecvMsgs2, nRecvMsgs3, nSendMsgs; JOINMSG1 *sendMsgs1=NULL, *sm1=NULL; JOINMSG2 *sendMsgs2=NULL, *sm2=NULL; JOINMSG3 *sendMsgs3=NULL, *sm3=NULL; LC_MSGHANDLE *recvMsgs1=NULL, *recvMsgs2=NULL, *recvMsgs3=NULL; DDD_HDR *localCplObjs=NULL; size_t sendMem=0, recvMem=0; JIPartner *joinObjs = NULL; int nJoinObjs; #ifdef JoinMemFromHeap MarkHeap(); LC_SetMemMgr(memmgr_AllocTMEM, memmgr_FreeTMEM, memmgr_AllocHMEM, NULL); #endif STAT_SET_MODULE(DDD_MODULE_JOIN); STAT_ZEROALL; /* step mode and check whether call to JoinEnd is valid */ if (!JoinStepMode(JMODE_CMDS)) { DDD_PrintError('E', 7011, "DDD_JoinEnd() aborted"); HARD_EXIT; } /* PREPARATION PHASE */ /* get sorted array of JIJoin-items */ arrayJIJoin = JIJoinSet_GetArray(joinGlobals.setJIJoin); obsolete = JIJoinSet_GetNDiscarded(joinGlobals.setJIJoin); /* COMMUNICATION PHASE 1 all processors, where JoinObj-commands have been issued, send information about these commands to the target processors together with the GID of the objects on the target procs and the local priority. */ STAT_RESET; /* prepare msgs for JIJoin-items */ nSendMsgs = PreparePhase1Msgs(arrayJIJoin, &sendMsgs1, &sendMem); /* DisplayMemResources(); */ /* init communication topology */ nRecvMsgs1 = LC_Connect(joinGlobals.phase1msg_t); STAT_TIMER(T_JOIN_PREP_MSGS); STAT_RESET; /* build phase1 msgs on sender side and start send */ PackPhase1Msgs(sendMsgs1); STAT_TIMER(T_JOIN_PACK_SEND); /* now messages are in the net, use spare time */ STAT_RESET; /* get sorted list of local objects with couplings */ localCplObjs = LocalCoupledObjectsList(); if (localCplObjs==NULL && ddd_nCpls>0) { DDD_PrintError('E', 7020, "Cannot get list of coupled objects in DDD_JoinEnd(). Aborted"); HARD_EXIT; } if (obsolete>0) { if (DDD_GetOption(OPT_INFO_JOIN) & JOIN_SHOW_OBSOLETE) { int all = JIJoinSet_GetNItems(joinGlobals.setJIJoin); sprintf(cBuffer, "DDD MESG [%03d]: %4d from %4d join-cmds obsolete.\n", me, obsolete, all); DDD_PrintLine(cBuffer); } } STAT_TIMER(T_JOIN); /* nothing more to do until incoming messages arrive */ /* display information about send-messages on lowcomm-level */ if (DDD_GetOption(OPT_INFO_JOIN) & JOIN_SHOW_MSGSALL) { DDD_SyncAll(); if (me==master) DDD_PrintLine("DDD JOIN_SHOW_MSGSALL: Phase1Msg.Send\n"); LC_PrintSendMsgs(); } /* wait for communication-completion (send AND receive) */ STAT_RESET; recvMsgs1 = LC_Communicate(); STAT_TIMER(T_JOIN_WAIT_RECV); /* display information about message buffer sizes */ if (DDD_GetOption(OPT_INFO_JOIN) & JOIN_SHOW_MEMUSAGE) { int k; /* sum up sizes of receive mesg buffers */ for(k=0; k<nRecvMsgs1; k++) { recvMem += LC_GetBufferSize(recvMsgs1[k]); } sprintf(cBuffer, "DDD MESG [%03d]: SHOW_MEM " "msgs send=%010ld recv=%010ld all=%010ld\n", me, (long)sendMem, (long)recvMem, (long)(sendMem+recvMem)); DDD_PrintLine(cBuffer); } /* display information about recv-messages on lowcomm-level */ if (DDD_GetOption(OPT_INFO_JOIN) & JOIN_SHOW_MSGSALL) { DDD_SyncAll(); if (me==master) DDD_PrintLine("DDD JOIN_SHOW_MSGSALL: Phase1Msg.Recv\n"); LC_PrintRecvMsgs(); } /* unpack messages */ STAT_RESET; UnpackPhase1Msgs(recvMsgs1, nRecvMsgs1, localCplObjs, NCpl_Get, &joinObjs, &nJoinObjs); LC_Cleanup(); STAT_TIMER(T_JOIN_UNPACK); /* COMMUNICATION PHASE 2 all processors which received notification of JoinObj-commands during phase 1 send AddCpl-requests to all copies of DDD objects, for which Joins had been issued remotely. */ /* get sorted array of JIAddCpl-items */ arrayJIAddCpl2 = JIAddCplSet_GetArray(joinGlobals.setJIAddCpl2); STAT_RESET; /* prepare msgs for JIAddCpl-items */ nSendMsgs = PreparePhase2Msgs(arrayJIAddCpl2, &sendMsgs2, &sendMem); /* DisplayMemResources(); */ /* init communication topology */ nRecvMsgs2 = LC_Connect(joinGlobals.phase2msg_t); STAT_TIMER(T_JOIN_PREP_MSGS); STAT_RESET; /* build phase2 msgs on sender side and start send */ PackPhase2Msgs(sendMsgs2); STAT_TIMER(T_JOIN_PACK_SEND); /* now messages are in the net, use spare time */ /* reorder Join-commands according to new_gid */ /* this ordering is needed in UnpackPhase3 */ if (JIJoinPtrArray_GetSize(arrayJIJoin) > 1) { qsort( JIJoinPtrArray_GetData(arrayJIJoin), JIJoinPtrArray_GetSize(arrayJIJoin), sizeof(JIJoin *), sort_NewGid); } /* nothing more to do until incoming messages arrive */ /* display information about send-messages on lowcomm-level */ if (DDD_GetOption(OPT_INFO_JOIN) & JOIN_SHOW_MSGSALL) { DDD_SyncAll(); if (me==master) DDD_PrintLine("DDD JOIN_SHOW_MSGSALL: Phase2Msg.Send\n"); LC_PrintSendMsgs(); } /* wait for communication-completion (send AND receive) */ STAT_RESET; recvMsgs2 = LC_Communicate(); STAT_TIMER(T_JOIN_WAIT_RECV); /* display information about message buffer sizes */ if (DDD_GetOption(OPT_INFO_JOIN) & JOIN_SHOW_MEMUSAGE) { int k; /* sum up sizes of receive mesg buffers */ for(k=0; k<nRecvMsgs2; k++) { recvMem += LC_GetBufferSize(recvMsgs2[k]); } sprintf(cBuffer, "DDD MESG [%03d]: SHOW_MEM " "msgs send=%010ld recv=%010ld all=%010ld\n", me, (long)sendMem, (long)recvMem, (long)(sendMem+recvMem)); DDD_PrintLine(cBuffer); } /* display information about recv-messages on lowcomm-level */ if (DDD_GetOption(OPT_INFO_JOIN) & JOIN_SHOW_MSGSALL) { DDD_SyncAll(); if (me==master) DDD_PrintLine("DDD JOIN_SHOW_MSGSALL: Phase2Msg.Recv\n"); LC_PrintRecvMsgs(); } /* unpack messages */ STAT_RESET; UnpackPhase2Msgs(recvMsgs2, nRecvMsgs2, joinObjs, nJoinObjs, localCplObjs, NCpl_Get); LC_Cleanup(); STAT_TIMER(T_JOIN_UNPACK); for(; sendMsgs2!=NULL; sendMsgs2=sm2) { sm2 = sendMsgs2->next; FreeTmp(sendMsgs2, 0); } /* COMMUNICATION PHASE 3 all processors which received notification of JoinObj-commands during phase 1 send AddCpl-requests to the procs where the JoinObj-commands have been issued. One AddCpl-request is sent for each cpl in the local objects coupling list. One AddCpl-request is sent for each AddCpl-request received during phase 2. (i.e., two kinds of AddCpl-requests are send to the processors on which the JoinObj-commands have been issued. */ /* get sorted array of JIAddCpl-items */ arrayJIAddCpl3 = JIAddCplSet_GetArray(joinGlobals.setJIAddCpl3); STAT_RESET; /* prepare msgs for JIAddCpl-items */ nSendMsgs = PreparePhase3Msgs(arrayJIAddCpl3, &sendMsgs3, &sendMem); /* DisplayMemResources(); */ /* init communication topology */ nRecvMsgs3 = LC_Connect(joinGlobals.phase3msg_t); STAT_TIMER(T_JOIN_PREP_MSGS); STAT_RESET; /* build phase3 msgs on sender side and start send */ PackPhase3Msgs(sendMsgs3); STAT_TIMER(T_JOIN_PACK_SEND); /* now messages are in the net, use spare time */ /* ... */ /* nothing more to do until incoming messages arrive */ /* display information about send-messages on lowcomm-level */ if (DDD_GetOption(OPT_INFO_JOIN) & JOIN_SHOW_MSGSALL) { DDD_SyncAll(); if (me==master) DDD_PrintLine("DDD JOIN_SHOW_MSGSALL: Phase3Msg.Send\n"); LC_PrintSendMsgs(); } /* wait for communication-completion (send AND receive) */ STAT_RESET; recvMsgs3 = LC_Communicate(); STAT_TIMER(T_JOIN_WAIT_RECV); /* display information about message buffer sizes */ if (DDD_GetOption(OPT_INFO_JOIN) & JOIN_SHOW_MEMUSAGE) { int k; /* sum up sizes of receive mesg buffers */ for(k=0; k<nRecvMsgs3; k++) { recvMem += LC_GetBufferSize(recvMsgs3[k]); } sprintf(cBuffer, "DDD MESG [%03d]: SHOW_MEM " "msgs send=%010ld recv=%010ld all=%010ld\n", me, (long)sendMem, (long)recvMem, (long)(sendMem+recvMem)); DDD_PrintLine(cBuffer); } /* display information about recv-messages on lowcomm-level */ if (DDD_GetOption(OPT_INFO_JOIN) & JOIN_SHOW_MSGSALL) { DDD_SyncAll(); if (me==master) DDD_PrintLine("DDD JOIN_SHOW_MSGSALL: Phase3Msg.Recv\n"); LC_PrintRecvMsgs(); } /* unpack messages */ STAT_RESET; UnpackPhase3Msgs(recvMsgs3, nRecvMsgs3, arrayJIJoin); LC_Cleanup(); STAT_TIMER(T_JOIN_UNPACK); for(; sendMsgs3!=NULL; sendMsgs3=sm3) { sm3 = sendMsgs3->next; FreeTmp(sendMsgs3, 0); } /* free temporary storage */ JIJoinPtrArray_Free(arrayJIJoin); JIJoinSet_Reset(joinGlobals.setJIJoin); JIAddCplPtrArray_Free(arrayJIAddCpl2); JIAddCplSet_Reset(joinGlobals.setJIAddCpl2); JIAddCplPtrArray_Free(arrayJIAddCpl3); JIAddCplSet_Reset(joinGlobals.setJIAddCpl3); if (localCplObjs!=NULL) FreeTmp(localCplObjs, 0); if (joinObjs!=NULL) FreeTmp(joinObjs, 0); for(; sendMsgs1!=NULL; sendMsgs1=sm1) { sm1 = sendMsgs1->next; FreeTmp(sendMsgs1, 0); } #ifdef JoinMemFromHeap ReleaseHeap(); LC_SetMemMgr(memmgr_AllocTMEM, memmgr_FreeTMEM, memmgr_AllocTMEM, memmgr_FreeTMEM); #endif # if DebugJoin<=4 sprintf(cBuffer,"%4d: JoinEnd, before IFAllFromScratch().\n", me); DDD_PrintDebug(cBuffer); # endif /* re-create all interfaces and step JMODE */ STAT_RESET; IFAllFromScratch(); STAT_TIMER(T_JOIN_BUILD_IF); JoinStepMode(JMODE_BUSY); return(DDD_RET_OK); }
// Parsing the file is somewhat expensive since we have to shell out to cpython; // it's not a huge deal right now, but this caching version can significantly cut down // on the startup time (40ms -> 10ms). AST_Module* caching_parse_file(const char* fn) { STAT_TIMER(t0, "us_timer_caching_parse_file"); static StatCounter us_parsing("us_parsing"); Timer _t("parsing"); _t.setExitCallback([](uint64_t t) { us_parsing.log(t); }); int code; std::string cache_fn = std::string(fn) + "c"; struct stat source_stat, cache_stat; code = stat(fn, &source_stat); assert(code == 0); code = stat(cache_fn.c_str(), &cache_stat); if (code != 0 || cache_stat.st_mtime < source_stat.st_mtime || (cache_stat.st_mtime == source_stat.st_mtime && cache_stat.st_mtim.tv_nsec < source_stat.st_mtim.tv_nsec)) { AST_Module* mod = 0; auto result = _reparse(fn, cache_fn, mod); if (mod) return mod; if (result == ParseResult::FAILURE) return NULL; if (result == ParseResult::PYC_UNWRITABLE) return parse_file(fn); code = stat(cache_fn.c_str(), &cache_stat); assert(code == 0); } FILE* fp = fopen(cache_fn.c_str(), "r"); assert(fp); while (true) { bool good = true; if (good) { char buf[MAGIC_STRING_LENGTH]; int read = fread(buf, 1, MAGIC_STRING_LENGTH, fp); if (read != MAGIC_STRING_LENGTH || strncmp(buf, getMagic(), MAGIC_STRING_LENGTH) != 0) { if (VERBOSITY()) { printf("Warning: corrupt or non-Pyston .pyc file found; ignoring\n"); } good = false; } } if (good) { int length = 0; fseek(fp, MAGIC_STRING_LENGTH, SEEK_SET); static_assert(sizeof(length) >= CHECKSUM_LENGTH, ""); int read = fread(&length, 1, CHECKSUM_LENGTH, fp); int expected_total_length = MAGIC_STRING_LENGTH + CHECKSUM_LENGTH + length; if (read != CHECKSUM_LENGTH || expected_total_length != cache_stat.st_size) { if (VERBOSITY()) { printf("Warning: truncated .pyc file found; ignoring\n"); } good = false; } } if (!good) { fclose(fp); AST_Module* mod = 0; auto result = _reparse(fn, cache_fn, mod); if (mod) return mod; if (result == ParseResult::FAILURE) return NULL; if (result == ParseResult::PYC_UNWRITABLE) return parse_file(fn); code = stat(cache_fn.c_str(), &cache_stat); assert(code == 0); fp = fopen(cache_fn.c_str(), "r"); assert(fp); } else { break; } } BufferedReader* reader = new BufferedReader(fp); AST* rtn = readASTMisc(reader); reader->fill(); assert(reader->bytesBuffered() == 0); delete reader; fclose(fp); assert(rtn->type == AST_TYPE::Module); return ast_cast<AST_Module>(rtn); }
Box* BoxedMethodDescriptor::tppCall(Box* _self, CallRewriteArgs* rewrite_args, ArgPassSpec argspec, Box* arg1, Box* arg2, Box* arg3, Box** args, const std::vector<BoxedString*>* keyword_names) { STAT_TIMER(t0, "us_timer_boxedmethoddescriptor__call__", 10); assert(_self->cls == method_cls); BoxedMethodDescriptor* self = static_cast<BoxedMethodDescriptor*>(_self); int ml_flags = self->method->ml_flags; int call_flags = ml_flags & (~METH_CLASS); if (rewrite_args && !rewrite_args->func_guarded) { rewrite_args->obj->addAttrGuard(offsetof(BoxedMethodDescriptor, method), (intptr_t)self->method); } ParamReceiveSpec paramspec(0, 0, false, false); if (call_flags == METH_NOARGS) { paramspec = ParamReceiveSpec(1, 0, false, false); } else if (call_flags == METH_VARARGS) { paramspec = ParamReceiveSpec(1, 0, true, false); } else if (call_flags == (METH_VARARGS | METH_KEYWORDS)) { paramspec = ParamReceiveSpec(1, 0, true, true); } else if (call_flags == METH_O) { paramspec = ParamReceiveSpec(2, 0, false, false); } else { RELEASE_ASSERT(0, "0x%x", call_flags); } Box* oarg1 = NULL; Box* oarg2 = NULL; Box* oarg3 = NULL; Box** oargs = NULL; bool rewrite_success = false; rearrangeArguments(paramspec, NULL, self->method->ml_name, NULL, rewrite_args, rewrite_success, argspec, arg1, arg2, arg3, args, keyword_names, oarg1, oarg2, oarg3, args); if (!rewrite_success) rewrite_args = NULL; if (ml_flags & METH_CLASS) { rewrite_args = NULL; if (!isSubclass(oarg1->cls, type_cls)) raiseExcHelper(TypeError, "descriptor '%s' requires a type but received a '%s'", self->method->ml_name, getFullTypeName(oarg1).c_str()); } else { if (!isSubclass(oarg1->cls, self->type)) raiseExcHelper(TypeError, "descriptor '%s' requires a '%s' oarg1 but received a '%s'", self->method->ml_name, getFullNameOfClass(self->type).c_str(), getFullTypeName(oarg1).c_str()); } if (rewrite_args) { rewrite_args->arg1->addAttrGuard(offsetof(Box, cls), (intptr_t)oarg1->cls); } Box* rtn; if (call_flags == METH_NOARGS) { { UNAVOIDABLE_STAT_TIMER(t0, "us_timer_in_builtins"); rtn = (Box*)self->method->ml_meth(oarg1, NULL); } if (rewrite_args) rewrite_args->out_rtn = rewrite_args->rewriter->call(true, (void*)self->method->ml_meth, rewrite_args->arg1, rewrite_args->rewriter->loadConst(0, Location::forArg(1))); } else if (call_flags == METH_VARARGS) { { UNAVOIDABLE_STAT_TIMER(t0, "us_timer_in_builtins"); rtn = (Box*)self->method->ml_meth(oarg1, oarg2); } if (rewrite_args) rewrite_args->out_rtn = rewrite_args->rewriter->call(true, (void*)self->method->ml_meth, rewrite_args->arg1, rewrite_args->arg2); } else if (call_flags == (METH_VARARGS | METH_KEYWORDS)) { { UNAVOIDABLE_STAT_TIMER(t0, "us_timer_in_builtins"); rtn = (Box*)((PyCFunctionWithKeywords)self->method->ml_meth)(oarg1, oarg2, oarg3); } if (rewrite_args) rewrite_args->out_rtn = rewrite_args->rewriter->call(true, (void*)self->method->ml_meth, rewrite_args->arg1, rewrite_args->arg2, rewrite_args->arg3); } else if (call_flags == METH_O) { { UNAVOIDABLE_STAT_TIMER(t0, "us_timer_in_builtins"); rtn = (Box*)self->method->ml_meth(oarg1, oarg2); } if (rewrite_args) rewrite_args->out_rtn = rewrite_args->rewriter->call(true, (void*)self->method->ml_meth, rewrite_args->arg1, rewrite_args->arg2); } else { RELEASE_ASSERT(0, "0x%x", call_flags); } if (!rtn) throwCAPIException(); if (rewrite_args) { rewrite_args->rewriter->call(false, (void*)checkAndThrowCAPIException); rewrite_args->out_success = true; } return rtn; }