// =========================================================================== // method definitions // =========================================================================== GUIParameterTableWindow::GUIParameterTableWindow(GUIMainWindow& app, GUIGlObject& o, int noRows) : FXMainWindow(app.getApp(), (o.getFullName() + " Parameter").c_str(), NULL, NULL, DECOR_ALL, 20, 20, 500, (FXint)((noRows + numParams(&o)) * 20 + 60)), myObject(&o), myApplication(&app), myCurrentPos(0) { noRows += numParams(&o); myTable = new FXTable(this, this, MID_TABLE, TABLE_COL_SIZABLE | TABLE_ROW_SIZABLE | LAYOUT_FILL_X | LAYOUT_FILL_Y); myTable->setVisibleRows((FXint)(noRows + 1)); myTable->setVisibleColumns(3); myTable->setTableSize((FXint)(noRows + 1), 3); myTable->setBackColor(FXRGB(255, 255, 255)); myTable->setColumnText(0, "Name"); myTable->setColumnText(1, "Value"); myTable->setColumnText(2, "Dynamic"); myTable->getRowHeader()->setWidth(0); FXHeader* header = myTable->getColumnHeader(); header->setItemJustify(0, JUSTIFY_CENTER_X); header->setItemSize(0, 240); header->setItemJustify(1, JUSTIFY_CENTER_X); header->setItemSize(1, 120); header->setItemJustify(2, JUSTIFY_CENTER_X); header->setItemSize(2, 60); setIcon(GUIIconSubSys::getIcon(ICON_APP_TABLE)); myLock.lock(); myObject->addParameterTable(this); myLock.unlock(); AbstractMutex::ScopedLocker locker(myGlobalContainerLock); myContainer.push_back(this); // Table cannot be editable myTable->setEditable(FALSE); }
Array c_Closure::t___debuginfo() { Array ret = Array::Create(); // Serialize 'use' parameters. if (auto propValues = propVec()) { Array use; auto propsInfo = getVMClass()->declProperties(); for (size_t i = 0; i < getVMClass()->numDeclProperties(); ++i) { auto value = &propValues[i]; use.setWithRef(Variant(StrNR(propsInfo[i].name)), tvAsCVarRef(value)); } if (!use.empty()) { ret.set(s_static, use); } } auto const func = getInvokeFunc(); // Serialize function parameters. if (func->numParams()) { Array params; for (int i = 0; i < func->numParams(); ++i) { auto str = String::attach( StringData::Make(s_varprefix.get(), func->localNames()[i]) ); bool optional = func->params()[i].phpCode; if (auto mi = func->methInfo()) { optional = optional || mi->parameters[i]->valueText; } params.set(str, optional ? s_optional : s_required); } ret.set(s_parameter, params); } // Serialize 'this' object. if (hasThis()) { ret.set(s_this, Object(getThis())); } return ret; }
TypedValue* unimplementedWrapper(ActRec* ar) { auto func = ar->m_func; auto cls = func->cls(); if (cls) { raise_error("Call to unimplemented native method %s::%s()", cls->name()->data(), func->name()->data()); if (func->isStatic()) { frame_free_locals_no_this_inl(ar, func->numParams(), nullptr); } else { frame_free_locals_inl(ar, func->numParams(), nullptr); } } else { raise_error("Call to unimplemented native function %s()", func->name()->data()); frame_free_locals_no_this_inl(ar, func->numParams(), nullptr); } ar->m_r.m_type = KindOfNull; return &ar->m_r; }
static inline int32_t minNumArgs(ActRec *ar) { auto func = ar->m_func; auto numArgs = func->numParams(); int32_t num = numArgs; const Func::ParamInfoVec& paramInfo = func->params(); while (num && (paramInfo[num-1].funcletOff() != InvalidAbsoluteOffset)) { --num; } return num; }
DVFuncletsVec Func::getDVFunclets() const { DVFuncletsVec dvs; int nParams = numParams(); for (int i = 0; i < nParams; ++i) { const ParamInfo& pi = params()[i]; if (pi.hasDefaultValue()) { dvs.push_back(std::make_pair(i, pi.funcletOff())); } } return dvs; }
static Array HHVM_METHOD(Closure, __debugInfo) { auto closure = c_Closure::fromObject(this_); Array ret = Array::Create(); // Serialize 'use' parameters. if (auto useVars = closure->getUseVars()) { Array use; auto cls = this_->getVMClass(); auto propsInfo = cls->declProperties(); auto nProps = cls->numDeclProperties(); for (size_t i = 0; i < nProps; ++i) { auto value = &useVars[i]; use.setWithRef(Variant(StrNR(propsInfo[i].name)), tvAsCVarRef(value)); } if (!use.empty()) { ret.set(s_static, use); } } auto const func = closure->getInvokeFunc(); // Serialize function parameters. if (auto nParams = func->numParams()) { Array params; auto lNames = func->localNames(); for (int i = 0; i < nParams; ++i) { auto str = String::attach( StringData::Make(s_varprefix.get(), lNames[i]) ); bool optional = func->params()[i].phpCode; if (auto mi = func->methInfo()) { optional = optional || mi->parameters[i]->valueText; } params.set(str, optional ? s_optional : s_required); } ret.set(s_parameter, params); } // Serialize 'this' object. if (closure->hasThis()) { ret.set(s_this, Object(closure->getThis())); } return ret; }
TypedValue* methodWrapper(ActRec* ar) { auto func = ar->m_func; auto numArgs = func->numParams(); auto numNonDefault = ar->numArgs(); bool isStatic = func->isStatic(); assert(!func->hasVariadicCaptureParam()); TypedValue* args = ((TypedValue*)ar) - 1; TypedValue rv; rv.m_type = KindOfNull; if (LIKELY(numNonDefault == numArgs) || LIKELY(nativeWrapperCheckArgs(ar))) { if (coerceFCallArgs(args, numArgs, numNonDefault, func)) { // Prepend a context arg for methods // KindOfClass when it's being called statically Foo::bar() // KindOfObject when it's being called on an instance $foo->bar() TypedValue ctx; if (ar->hasThis()) { if (isStatic) { throw_instance_method_fatal(getInvokeName(ar)->data()); } ctx.m_type = KindOfObject; ctx.m_data.pobj = ar->getThis(); } else { if (!isStatic) { throw_instance_method_fatal(getInvokeName(ar)->data()); } ctx.m_type = KindOfClass; ctx.m_data.pcls = const_cast<Class*>(ar->getClass()); } callFunc(func, &ctx, args, numArgs, rv); } else if (func->attrs() & AttrParamCoerceModeFalse) { rv.m_type = KindOfBoolean; rv.m_data.num = 0; } } assert(rv.m_type != KindOfUninit); if (isStatic) { frame_free_locals_no_this_inl(ar, func->numLocals(), &rv); } else { frame_free_locals_inl(ar, func->numLocals(), &rv); } tvCopy(rv, ar->m_r); return &ar->m_r; }
RefData* closureStaticLocInit(StringData* name, ActRec* fp, TypedValue val) { auto const func = fp->m_func; assert(func->isClosureBody() || func->isGeneratorFromClosure()); auto const closureLoc = LIKELY(func->isClosureBody()) ? frame_local(fp, func->numParams()) : frame_local(fp, frame_continuation(fp)->m_origFunc->numParams()); bool inited; auto const refData = lookupStaticFromClosure( closureLoc->m_data.pobj, name, inited); if (!inited) { cellCopy(val, *refData->tv()); } refData->incRefCount(); return refData; }
Func::~Func() { if (m_fullName != nullptr && m_maybeIntercepted != -1) { unregister_intercept_flag(fullNameRef(), &m_maybeIntercepted); } if (m_funcId != InvalidFuncId) { DEBUG_ONLY auto oldVal = s_funcVec.exchange(m_funcId, nullptr); assert(oldVal == this); } int maxNumPrologues = getMaxNumPrologues(numParams()); int numPrologues = maxNumPrologues > kNumFixedPrologues ? maxNumPrologues : kNumFixedPrologues; TranslatorX64::Get()->smashPrologueGuards((TCA *)m_prologueTable, numPrologues, this); #ifdef DEBUG validate(); m_magic = ~m_magic; #endif }
std::string func_param_list(const FuncInfo& finfo) { auto ret = std::string{}; auto const func = finfo.func; for (auto i = uint32_t{0}; i < func->numParams(); ++i) { if (i != 0) ret += ", "; if (func->byRef(i)) ret += "&"; ret += folly::format("${}", loc_name(finfo, i)).str(); if (func->params()[i].hasDefaultValue()) { auto const off = func->params()[i].funcletOff(); ret += folly::format(" = {}", jmp_label(finfo, off)).str(); if (auto const code = func->params()[i].phpCode()) { ret += folly::format("({})", escaped_long(code)).str(); } } } return ret; }
//=========================================================================== void Intersector::setHighPriSing(double* par) //=========================================================================== { // Purpose: Instruct the intersector about known singular points int nmbpar = numParams(); if (!hasSingularityInfo()) { if (prev_intersector_ && prev_intersector_->hasSingularityInfo() && (prev_intersector_->numParams() == nmbpar)) { singularity_info_ = (shared_ptr<SingularityInfo>) (new SingularityInfo(prev_intersector_ ->getSingularityInfo())); } else { // Make empty singularity info instance singularity_info_ = (shared_ptr<SingularityInfo>) (new SingularityInfo()); } } singularity_info_->setHighPriSing(par, nmbpar); }
TypedValue* functionWrapper(ActRec* ar) { auto func = ar->m_func; auto numArgs = func->numParams(); auto numNonDefault = ar->numArgs(); assert(!func->hasVariadicCaptureParam()); TypedValue* args = ((TypedValue*)ar) - 1; TypedValue rv; rv.m_type = KindOfNull; if (LIKELY(numNonDefault == numArgs) || LIKELY(nativeWrapperCheckArgs(ar))) { if (coerceFCallArgs(args, numArgs, numNonDefault, func)) { callFunc(func, nullptr, args, numArgs, rv); } else if (func->attrs() & AttrParamCoerceModeFalse) { rv.m_type = KindOfBoolean; rv.m_data.num = 0; } } assert(rv.m_type != KindOfUninit); frame_free_locals_no_this_inl(ar, func->numLocals(), &rv); tvCopy(rv, ar->m_r); return &ar->m_r; }
static inline bool nativeWrapperCheckArgs(ActRec* ar) { auto func = ar->m_func; auto numArgs = func->numParams(); auto numNonDefault = ar->numArgs(); if (numNonDefault < numArgs) { const Func::ParamInfoVec& paramInfo = func->params(); for (auto i = numNonDefault; i < numArgs; ++i) { if (InvalidAbsoluteOffset == paramInfo[i].funcletOff()) { // There's at least one non-default param which wasn't passed throw_wrong_arguments_nr(getInvokeName(ar)->data(), numNonDefault, minNumArgs(ar), numArgs, 1); return false; } } } else if (numNonDefault > numArgs) { // Too many arguments passed, raise a warning ourselves this time throw_wrong_arguments_nr(getInvokeName(ar)->data(), numNonDefault, minNumArgs(ar), numArgs, 1); return false; } // Looks good return true; }
bool RegionFormer::tryInline(uint32_t& instrSize) { assertx(m_inst.source == m_sk); assertx(m_inst.func() == curFunc()); assertx(m_sk.resumed() == resumed()); instrSize = 0; if (!m_inl.canInlineAt(m_inst.source, m_inst.funcd, *m_region)) { return false; } auto refuse = [this](const std::string& str) { FTRACE(2, "selectTracelet not inlining {}: {}\n", m_inst.toString(), str); return false; }; auto callee = m_inst.funcd; // Make sure the FPushOp wasn't interpreted. if (m_irgs.fpiStack.empty()) { return refuse("fpistack empty; fpush was in a different region"); } auto spillFrame = m_irgs.fpiStack.top().spillFrame; if (!spillFrame) { return refuse("couldn't find SpillFrame for FPushOp"); } auto numArgs = m_inst.imm[0].u_IVA; auto numParams = callee->numParams(); // Set up the region context, mapping stack slots in the caller to locals in // the callee. RegionContext ctx; ctx.func = callee; ctx.bcOffset = callee->getEntryForNumArgs(numArgs); ctx.spOffset = FPInvOffset{safe_cast<int32_t>(callee->numSlotsInFrame())}; ctx.resumed = false; for (int i = 0; i < numArgs; ++i) { auto type = irgen::publicTopType(m_irgs, BCSPOffset{i}); uint32_t paramIdx = numArgs - 1 - i; ctx.liveTypes.push_back({RegionDesc::Location::Local{paramIdx}, type}); } for (unsigned i = numArgs; i < numParams; ++i) { // These locals will be populated by DV init funclets but they'll start // out as Uninit. ctx.liveTypes.push_back({RegionDesc::Location::Local{i}, TUninit}); } FTRACE(1, "selectTracelet analyzing callee {} with context:\n{}", callee->fullName()->data(), show(ctx)); auto region = selectTracelet(ctx, m_profiling, false /* noinline */); if (!region) { return refuse("failed to select region in callee"); } instrSize = region->instrSize(); auto newInstrSize = instrSize + m_numBCInstrs + m_pendingInlinedInstrs; if (newInstrSize > RuntimeOption::EvalJitMaxRegionInstrs) { return refuse("new region would be too large"); } if (!m_inl.shouldInline(callee, *region)) { return refuse("shouldIRInline failed"); } return true; }
//=========================================================================== void Intersector::compute(bool compute_at_boundary) //=========================================================================== { // Purpose: Compute the topology of the current intersection // Make sure that no "dead intersection points" exist in the pool, // i.e. points that have been removed when compute() has been run // on sibling subintersectors. int_results_->synchronizePool(); // Make sure that all intersection points at the // boundary/boundaries of the current object are already computed if (compute_at_boundary) getBoundaryIntersections(); // Remove inner points in constant parameter intersection // links // (vsk, 0609) and isolated points identical to the existing ones int_results_->cleanUpPool(); int nmb_orig = int_results_->numIntersectionPoints(); if (getenv("DEBUG") && *(getenv("DEBUG")) == '1') { try { printDebugInfo(); } catch (...) { MESSAGE("Failed printing debug info, continuing."); } } // Check if any intersections are possible in the inner of the // objects int status_intercept = performInterception(); // Branch on the outcome of the interseption test if (status_intercept == 0) { // No intersection is possible } else if (status_intercept == 2) { // Both objects are too small for further processing. // Handle micro case microCase(); } else if (degTriangleSimple()) { // This situation is currently relevant only for intersections // between two parametric surfaces. It will probably at some // stage be relevant for two-parametric functions. // All the necessary connections are made } else if (checkCoincidence()) { // The two objects coincide. The representation is already // updated according to this situation } else { // status_intercept == 1 // Intersections might exist. Check for simple case. 0 = Maybe // simple case; 1 = Confirmed simple case. int status_simplecase = simpleCase(); if (status_simplecase == 1) { // Confirmed simple case. // Compute intersection points or curves according to the // properties of this particular intersection updateIntersections(); } else if (isLinear()) { // Linearity is a simple case, but it is important to // check for coincidence before trying to find/connect // intersections as the simple case criteria is not // satisfied updateIntersections(); } else if (complexIntercept()) { // Interception by more complex algorithms is performed // (implicitization). No further intersectsions are found // to be possible } else if (complexSimpleCase()) { // Simple case test by more complex algorithms is performed // (implicitization). A simple case is found. updateIntersections(); } else if (!complexityReduced()) { // For the time being, write documentation of the // situation to a file handleComplexity(); } else { // It is necessary to subdivide the current objects doSubdivide(); int nsubint = int(sub_intersectors_.size()); for (int ki = 0; ki < nsubint; ki++) { sub_intersectors_[ki]->getIntPool() ->includeCoveredNeighbourPoints(); sub_intersectors_[ki]->compute(); } } } // // Write intersection point diagnostics // if (numParams() == 4) { // writeIntersectionPoints(); // } if (prev_intersector_ && prev_intersector_->numParams() > numParams()) { // Remove inner points in constant parameter intersection // links // (vsk, 0609) and isolated points identical to the existing ones int_results_->cleanUpPool(nmb_orig); // No more recursion at this level. Post iterate the intersection points doPostIterate(); } // Prepare output intersection results if (prev_intersector_ == 0 || prev_intersector_->isSelfIntersection()) { /*if (getenv("DEBUG_FINISH") && *(getenv("DEBUG_FINISH")) == '1') { cout << "Status after cleaning up pool:" << endl; writeIntersectionPoints(); }*/ // Remove loose ends of intersection links in the inner //int_results_->weedOutClutterPoints(); if (getenv("DEBUG_FINISH") && *(getenv("DEBUG_FINISH")) == '1') { cout << "Status after removing clutter points:" << endl; writeIntersectionPoints(); int_results_->writeDebug(); } // Remove loose ends of intersection links in the inner //int_results_->weedOutClutterPoints(); int_results_->cleanUpPool(0); if (true /*getenv("DO_REPAIR") && *(getenv("DO_REPAIR")) == '1'*/) { if (getenv("DEBUG_FINISH") && *(getenv("DEBUG_FINISH")) == '1') { cout << "Starting repair" << endl; } repairIntersections(); if (getenv("DEBUG_FINISH") && *(getenv("DEBUG_FINISH")) == '1') { cout << "Status after repairing intersections:" << endl; writeIntersectionPoints(); } } } if (prev_intersector_ == 0) { // Top level intersector /*if (getenv("DEBUG_FINISH") && *(getenv("DEBUG_FINISH")) == '1') { cout << "Status after removing clutter points:" << endl; writeIntersectionPoints(); }*/ // if (/*true */getenv("DO_REPAIR") && *(getenv("DO_REPAIR")) == '1') { // repairIntersections(); // if (getenv("DEBUG_FINISH") && *(getenv("DEBUG_FINISH")) == '1') { // cout << "Status after repairing intersections:" << endl; // writeIntersectionPoints(); // } // } if (getenv("DEBUG_FINISH") && *(getenv("DEBUG_FINISH")) == '1') { int_results_->writeDebug(); } int_results_->makeIntersectionCurves(); } }
void cgCall(IRLS& env, const IRInstruction* inst) { auto const sp = srcLoc(env, inst, 0).reg(); auto const fp = srcLoc(env, inst, 1).reg(); auto const extra = inst->extra<Call>(); auto const callee = extra->callee; auto const argc = extra->numParams; auto& v = vmain(env); auto& vc = vcold(env); auto const catchBlock = label(env, inst->taken()); auto const calleeSP = sp[cellsToBytes(extra->spOffset.offset)]; auto const calleeAR = calleeSP + cellsToBytes(argc); v << store{fp, calleeAR + AROFF(m_sfp)}; v << storeli{safe_cast<int32_t>(extra->after), calleeAR + AROFF(m_soff)}; if (extra->fcallAwait) { // This clobbers any flags that might have already been set on the callee // AR (e.g., by SpillFrame), but this is okay because there should never be // any conflicts; see the documentation in act-rec.h. auto const imm = static_cast<int32_t>( ActRec::encodeNumArgsAndFlags(argc, ActRec::Flags::IsFCallAwait) ); v << storeli{imm, calleeAR + AROFF(m_numArgsAndFlags)}; } auto const isNativeImplCall = callee && callee->builtinFuncPtr() && !callee->nativeFuncPtr() && argc == callee->numParams(); if (isNativeImplCall) { // The assumption here is that for builtins, the generated func contains // only a single opcode (NativeImpl), and there are no non-argument locals. if (do_assert) { assertx(argc == callee->numLocals()); assertx(callee->numIterators() == 0); auto addr = callee->getEntry(); while (peek_op(addr) == Op::AssertRATL) { addr += instrLen(addr); } assertx(peek_op(addr) == Op::NativeImpl); assertx(addr + instrLen(addr) == callee->unit()->entry() + callee->past()); } v << store{v.cns(mcg->ustubs().retHelper), calleeAR + AROFF(m_savedRip)}; if (callee->attrs() & AttrMayUseVV) { v << storeqi{0, calleeAR + AROFF(m_invName)}; } v << lea{calleeAR, rvmfp()}; emitCheckSurpriseFlagsEnter(v, vc, fp, Fixup(0, argc), catchBlock); auto const builtinFuncPtr = callee->builtinFuncPtr(); TRACE(2, "Calling builtin preClass %p func %p\n", callee->preClass(), builtinFuncPtr); // We sometimes call this while curFunc() isn't really the builtin, so make // sure to record the sync point as if we are inside the builtin. if (FixupMap::eagerRecord(callee)) { auto const syncSP = v.makeReg(); v << lea{calleeSP, syncSP}; emitEagerSyncPoint(v, callee->getEntry(), rvmtl(), rvmfp(), syncSP); } // Call the native implementation. This will free the locals for us in the // normal case. In the case where an exception is thrown, the VM unwinder // will handle it for us. auto const done = v.makeBlock(); v << vinvoke{CallSpec::direct(builtinFuncPtr), v.makeVcallArgs({{rvmfp()}}), v.makeTuple({}), {done, catchBlock}, Fixup(0, argc)}; env.catch_calls[inst->taken()] = CatchCall::CPP; v = done; // The native implementation already put the return value on the stack for // us, and handled cleaning up the arguments. We have to update the frame // pointer and the stack pointer, and load the return value into the return // register so the trace we are returning to has it where it expects. // TODO(#1273094): We should probably modify the actual builtins to return // values via registers using the C ABI and do a reg-to-reg move. loadTV(v, inst->dst(), dstLoc(env, inst, 0), rvmfp()[AROFF(m_r)], true); v << load{rvmfp()[AROFF(m_sfp)], rvmfp()}; emitRB(v, Trace::RBTypeFuncExit, callee->fullName()->data()); return; } v << lea{calleeAR, rvmfp()}; if (RuntimeOption::EvalHHIRGenerateAsserts) { v << syncvmsp{v.cns(0x42)}; constexpr uint64_t kUninitializedRIP = 0xba5eba11acc01ade; emitImmStoreq(v, kUninitializedRIP, rvmfp()[AROFF(m_savedRip)]); } // Emit a smashable call that initially calls a recyclable service request // stub. The stub and the eventual targets take rvmfp() as an argument, // pointing to the callee ActRec. auto const target = callee ? mcg->ustubs().immutableBindCallStub : mcg->ustubs().bindCallStub; auto const done = v.makeBlock(); v << callphp{target, php_call_regs(), {{done, catchBlock}}}; env.catch_calls[inst->taken()] = CatchCall::PHP; v = done; auto const dst = dstLoc(env, inst, 0); v << defvmret{dst.reg(0), dst.reg(1)}; }
void cgCallBuiltin(IRLS& env, const IRInstruction* inst) { auto const extra = inst->extra<CallBuiltin>(); auto const callee = extra->callee; auto const returnType = inst->typeParam(); auto const funcReturnType = callee->returnType(); auto const returnByValue = callee->isReturnByValue(); auto const dstData = dstLoc(env, inst, 0).reg(0); auto const dstType = dstLoc(env, inst, 0).reg(1); auto& v = vmain(env); // Whether `t' is passed in/out of C++ as String&/Array&/Object&. auto const isReqPtrRef = [] (MaybeDataType t) { return isStringType(t) || isArrayLikeType(t) || t == KindOfObject || t == KindOfResource; }; if (FixupMap::eagerRecord(callee)) { auto const sp = srcLoc(env, inst, 1).reg(); auto const spOffset = cellsToBytes(extra->spOffset.offset); auto const& marker = inst->marker(); auto const pc = marker.fixupSk().unit()->entry() + marker.fixupBcOff(); auto const synced_sp = v.makeReg(); v << lea{sp[spOffset], synced_sp}; emitEagerSyncPoint(v, pc, rvmtl(), srcLoc(env, inst, 0).reg(), synced_sp); } int returnOffset = rds::kVmMInstrStateOff + offsetof(MInstrState, tvBuiltinReturn); auto args = argGroup(env, inst); if (!returnByValue) { if (isBuiltinByRef(funcReturnType)) { if (isReqPtrRef(funcReturnType)) { returnOffset += TVOFF(m_data); } // Pass the address of tvBuiltinReturn to the native function as the // location where it can construct the return Array, String, Object, or // Variant. args.addr(rvmtl(), returnOffset); args.indirect(); } } // The srcs past the first two (sp and fp) are the arguments to the callee. auto srcNum = uint32_t{2}; // Add the this_ or self_ argument for HNI builtins. if (callee->isMethod()) { if (callee->isStatic()) { args.ssa(srcNum); ++srcNum; } else { // Note that we don't support objects with vtables here (if they may need // a $this pointer adjustment). This should be filtered out during irgen // or before. args.ssa(srcNum); ++srcNum; } } // Add the func_num_args() value if needed. if (callee->attrs() & AttrNumArgs) { // If `numNonDefault' is negative, this is passed as an src. if (extra->numNonDefault >= 0) { args.imm((int64_t)extra->numNonDefault); } else { args.ssa(srcNum); ++srcNum; } } // Add the positional arguments. for (uint32_t i = 0; i < callee->numParams(); ++i, ++srcNum) { auto const& pi = callee->params()[i]; // Non-pointer and NativeArg args are passed by value. String, Array, // Object, and Variant are passed by const&, i.e. a pointer to stack memory // holding the value, so we expect PtrToT types for these. Pointers to // req::ptr types (String, Array, Object) need adjusting to point to // &ptr->m_data. if (TVOFF(m_data) && !pi.nativeArg && isReqPtrRef(pi.builtinType)) { assertx(inst->src(srcNum)->type() <= TPtrToGen); args.addr(srcLoc(env, inst, srcNum).reg(), TVOFF(m_data)); } else if (pi.nativeArg && !pi.builtinType && !callee->byRef(i)) { // This condition indicates a MixedTV (i.e., TypedValue-by-value) arg. args.typedValue(srcNum); } else { args.ssa(srcNum, pi.builtinType == KindOfDouble); } } auto dest = [&] () -> CallDest { if (isBuiltinByRef(funcReturnType)) { if (!returnByValue) return kVoidDest; // indirect return return funcReturnType ? callDest(dstData) // String, Array, or Object : callDest(dstData, dstType); // Variant } return funcReturnType == KindOfDouble ? callDestDbl(env, inst) : callDest(env, inst); }(); cgCallHelper(v, env, CallSpec::direct(callee->nativeFuncPtr()), dest, SyncOptions::Sync, args); // For primitive return types (int, bool, double) and returnByValue, the // return value is already in dstData/dstType. if (returnType.isSimpleType() || returnByValue) return; // For return by reference (String, Object, Array, Variant), the builtin // writes the return value into MInstrState::tvBuiltinReturn, from where it // has to be tested and copied. if (returnType.isReferenceType()) { // The return type is String, Array, or Object; fold nullptr to KindOfNull. assertx(isBuiltinByRef(funcReturnType) && isReqPtrRef(funcReturnType)); v << load{rvmtl()[returnOffset], dstData}; if (dstType.isValid()) { auto const sf = v.makeReg(); auto const rtype = v.cns(returnType.toDataType()); auto const nulltype = v.cns(KindOfNull); v << testq{dstData, dstData, sf}; v << cmovb{CC_Z, sf, rtype, nulltype, dstType}; } return; } if (returnType <= TCell || returnType <= TBoxedCell) { // The return type is Variant; fold KindOfUninit to KindOfNull. assertx(isBuiltinByRef(funcReturnType) && !isReqPtrRef(funcReturnType)); static_assert(KindOfUninit == 0, "KindOfUninit must be 0 for test"); v << load{rvmtl()[returnOffset + TVOFF(m_data)], dstData}; if (dstType.isValid()) { auto const rtype = v.makeReg(); v << loadb{rvmtl()[returnOffset + TVOFF(m_type)], rtype}; auto const sf = v.makeReg(); auto const nulltype = v.cns(KindOfNull); v << testb{rtype, rtype, sf}; v << cmovb{CC_Z, sf, rtype, nulltype, dstType}; } return; } not_reached(); }
void Sinful::regenerateV1String() { if(! m_valid) { // The empty list. m_v1String = "{}"; return; } std::vector< SourceRoute > v; std::vector< SourceRoute > publics; // // We need to preserve the primary address to permit round-trips from // original serialization to v1 serialization and back again. If we're // clever, we can also use the special primary-address entry to handle // some troublesome backwards-compability concerns: original Sinful // did no input validation, and an empty original Sinful is considered // valid. We should also be able to maintain the invariant that all // addresses are protocol literals (and therefore require no lookup). // SourceRoute sr( CP_PRIMARY, m_host, getPortNum(), PUBLIC_NETWORK_NAME ); v.push_back( sr ); // // Presently, // each element of the list must be of one of the following forms: // // a = primary, port = port, p = IPv4, n = "internet" // a = primary, port = port, p = IPv6, n = "internet" // a = addrs[], port = port, p = IPv4, n = "internet" // a = addrs[], port = port, p = IPv6, n = "internet" // // a = primary, port = port, p = IPv4, n = "private" // a = primary, port = port, p = IPv6, n = "private" // a = private, port = privport, p = IPv4, n = "private" // a = private, port = privport, p = IPv6, n = "private" // // a = CCB[], port = ccbport, p = IPv4, n = "internet" // a = CCB[], port = ccbport, p = IPv6, n = "internet" // a = CCB[], port = ccbport, p = IPv4, n = "internet", ccbsharedport // a = CCB[], port = ccbport, p = IPv6, n = "internet", ccbsharedport // // Additionally, each of the above may also include sp; if any // address includes sp, all must include (the same) sp. // // Start by generating our list of public addresses. if( numParams() == 0 ) { condor_sockaddr sa; if( sa.from_ip_string( m_host ) ) { SourceRoute * sr = simpleRouteFromSinful( * this ); if( sr != NULL ) { publics.push_back( * sr ); delete sr; } } } else if( hasAddrs() ) { for( unsigned i = 0; i < addrs.size(); ++i ) { condor_sockaddr sa = addrs[i]; SourceRoute sr( sa, PUBLIC_NETWORK_NAME ); publics.push_back( sr ); } } // If we have a private network, either: // * add its private network address, if it exists // or // * add each of its public addresses. // In both cases, the network name for the routes being added is the // private network name. if( getPrivateNetworkName() != NULL ) { if( getPrivateAddr() == NULL ) { for( unsigned i = 0; i < publics.size(); ++i ) { SourceRoute sr( publics[i], getPrivateNetworkName() ); v.push_back( sr ); } } else { // The private address is defined to be a simple original Sinful, // just and ip-and-port string surrounded by brackets. This is // overkill, but it's less ugly than stripping the brackets. Sinful s( getPrivateAddr() ); if(! s.valid()) { m_valid = false; return; } SourceRoute * sr = simpleRouteFromSinful( s, getPrivateNetworkName() ); if( sr == NULL ) { m_valid = false; return; } v.push_back( * sr ); free( sr ); } } // If we have a CCB address, add all CCB addresses. Otherwise, add all // of our public addresses. if( getCCBContact() != NULL ) { unsigned brokerIndex = 0; StringList brokers( getCCBContact(), " " ); brokers.rewind(); char * contact = NULL; while( (contact = brokers.next()) != NULL ) { MyString ccbAddr, ccbID; MyString peer( "er, constructing v1 Sinful string" ); bool contactOK = CCBClient::SplitCCBContact( contact, ccbAddr, ccbID, peer, NULL ); if(! contactOK ) { m_valid = false; return; } // // A ccbAddr is an original Sinful without the <brackets>. It // may have "PrivNet", "sock", "noUDP", and "alias" set. What // we want to do is add copy ccbAddr's source routes to this // Sinful, adding the ccbID and setting the brokerIndex, so // that we know how to merge them back together when regenerating // this Sinful's original Sinful string. // std::string ccbSinfulString; formatstr( ccbSinfulString, "<%s>", ccbAddr.c_str() ); Sinful s( ccbSinfulString.c_str() ); if(! s.valid()) { m_valid = false; return; } std::vector< SourceRoute > w; if(! s.getSourceRoutes( w )) { m_valid = false; return; } for( unsigned j = 0; j < w.size(); ++j ) { SourceRoute sr = w[j]; sr.setBrokerIndex( brokerIndex ); sr.setCCBID( ccbID.c_str() ); sr.setSharedPortID( "" ); if( s.getSharedPortID() != NULL ) { sr.setCCBSharedPortID( s.getSharedPortID() ); } v.push_back( sr ); } ++brokerIndex; } } // We'll never use these addresses -- the CCB address will supersede // them -- but we need to record them to properly recreate addrs. for( unsigned i = 0; i < publics.size(); ++i ) { v.push_back( publics[i] ); } // Set the host alias, if present, on all addresses. if( getAlias() != NULL ) { std::string alias( getAlias() ); for( unsigned i = 0; i < v.size(); ++i ) { v[i].setAlias( alias ); } } // Set the shared port ID, if present, on all addresses. if( getSharedPortID() != NULL ) { std::string spid( getSharedPortID() ); for( unsigned i = 0; i < v.size(); ++i ) { v[i].setSharedPortID( spid ); } } // Set noUDP, if true, on all addresses. (We don't have to set // noUDP on public non-CCB addresses, or on the private address, // unless WANT_UDP_COMMAND_SOCKET is false. However, we can't // distinguish that case from the former two unless both CCB and // SP are disabled.) if( noUDP() ) { for( unsigned i = 0; i < v.size(); ++i ) { v[i].setNoUDP( true ); } } // // Now that we've generated a list of source routes, convert it into // a nested ClassAd list. The correct way to do this is to faff // about with ClassAds, but they make it uneccessarily hard; for now, // I'll just generated the appropriate string directly. // m_v1String.erase(); m_v1String += "{"; m_v1String += v[0].serialize(); for( unsigned i = 1; i < v.size(); ++i ) { m_v1String += ", "; m_v1String += v[i].serialize(); } m_v1String += "}"; }
void relocate(std::vector<TransRelocInfo>& relocs, CodeBlock& dest, CGMeta& fixups) { assertOwnsCodeLock(); assert(!Func::s_treadmill); auto newRelocMapName = Debug::DebugInfo::Get()->getRelocMapName() + ".tmp"; auto newRelocMap = fopen(newRelocMapName.c_str(), "w+"); if (!newRelocMap) return; SCOPE_EXIT { if (newRelocMap) fclose(newRelocMap); }; Func::s_treadmill = true; SCOPE_EXIT { Func::s_treadmill = false; }; auto ignoreEntry = [](const SrcKey& sk) { // We can have entries such as UniqueStubs with no SrcKey // These are ok to process. if (!sk.valid()) return false; // But if the func has been removed from the AtomicHashMap, // we don't want to process it. return !Func::isFuncIdValid(sk.funcID()); }; RelocationInfo rel; size_t num = 0; assert(fixups.alignments.empty()); for (size_t sz = relocs.size(); num < sz; num++) { auto& reloc = relocs[num]; if (ignoreEntry(reloc.sk)) continue; auto start DEBUG_ONLY = dest.frontier(); try { x64::relocate(rel, dest, reloc.start, reloc.end, reloc.fixups, nullptr); } catch (const DataBlockFull& dbf) { break; } if (Trace::moduleEnabledRelease(Trace::mcg, 1)) { Trace::traceRelease( folly::sformat("Relocate: 0x{:08x}+0x{:04x} => 0x{:08x}+0x{:04x}\n", (uintptr_t)reloc.start, reloc.end - reloc.start, (uintptr_t)start, dest.frontier() - start)); } } swap_trick(fixups.alignments); assert(fixups.empty()); x64::adjustForRelocation(rel); for (size_t i = 0; i < num; i++) { if (!ignoreEntry(relocs[i].sk)) { x64::adjustMetaDataForRelocation(rel, nullptr, relocs[i].fixups); } } for (size_t i = 0; i < num; i++) { if (!ignoreEntry(relocs[i].sk)) { relocs[i].fixups.process_only(nullptr); } } // At this point, all the relocated code should be correct, and runable. // But eg if it has unlikely paths into cold code that has not been relocated, // then the cold code will still point back to the original, not the relocated // versions. Similarly reusable stubs will still point to the old code. // Since we can now execute the relocated code, its ok to start fixing these // things now. for (auto& it : srcDB()) { it.second->relocate(rel); } std::unordered_set<Func*> visitedFuncs; CodeSmasher s; for (size_t i = 0; i < num; i++) { auto& reloc = relocs[i]; if (ignoreEntry(reloc.sk)) continue; for (auto& ib : reloc.incomingBranches) { ib.relocate(rel); } if (!reloc.sk.valid()) continue; auto f = const_cast<Func*>(reloc.sk.func()); x64::adjustCodeForRelocation(rel, reloc.fixups); reloc.fixups.clear(); // fixup code references in the corresponding cold block to point // to the new code x64::adjustForRelocation(rel, reloc.coldStart, reloc.coldEnd); if (visitedFuncs.insert(f).second) { if (auto adjusted = rel.adjustedAddressAfter(f->getFuncBody())) { f->setFuncBody(adjusted); } int num = Func::getMaxNumPrologues(f->numParams()); if (num < kNumFixedPrologues) num = kNumFixedPrologues; while (num--) { auto addr = f->getPrologue(num); if (auto adjusted = rel.adjustedAddressAfter(addr)) { f->setPrologue(num, adjusted); } } } if (reloc.end != reloc.start) { s.entries.emplace_back(reloc.start, reloc.end); } } auto relocMap = Debug::DebugInfo::Get()->getRelocMap(); always_assert(relocMap); fseek(relocMap, 0, SEEK_SET); auto deadStubs = getFreeTCStubs(); auto param = PostProcessParam { rel, deadStubs, newRelocMap }; std::set<TCA> liveStubs; readRelocations(relocMap, &liveStubs, postProcess, ¶m); // ensure that any reusable stubs are updated for the relocated code for (auto stub : liveStubs) { FTRACE(1, "Stub: 0x{:08x}\n", (uintptr_t)stub); fixups.reusedStubs.emplace_back(stub); always_assert(!rel.adjustedAddressAfter(stub)); fprintf(newRelocMap, "%" PRIxPTR " 0 %s\n", uintptr_t(stub), "NewStub"); } x64::adjustCodeForRelocation(rel, fixups); unlink(Debug::DebugInfo::Get()->getRelocMapName().c_str()); rename(newRelocMapName.c_str(), Debug::DebugInfo::Get()->getRelocMapName().c_str()); fclose(newRelocMap); newRelocMap = nullptr; freopen(Debug::DebugInfo::Get()->getRelocMapName().c_str(), "r+", relocMap); fseek(relocMap, 0, SEEK_END); okToRelocate = false; Treadmill::enqueue(std::move(s)); }
uint numEntries() const { return 1+numParams()+(mTerminal?0:1); }