void storeTV(Vout& v, Vptr dst, Vloc srcLoc, const SSATmp* src) { auto const type = src->type(); if (srcLoc.isFullSIMD()) { // The whole TV is stored in a single SIMD reg. assertx(RuntimeOption::EvalHHIRAllocSIMDRegs); v << storeups{srcLoc.reg(), dst}; return; } if (type.needsReg()) { assertx(srcLoc.hasReg(1)); v << storeb{srcLoc.reg(1), dst + TVOFF(m_type)}; } else { v << storeb{v.cns(type.toDataType()), dst + TVOFF(m_type)}; } // We ignore the values of statically nullish types. if (src->isA(TNull) || src->isA(TNullptr)) return; // Store the value. if (src->hasConstVal()) { // Skip potential zero-extend if we know the value. v << store{v.cns(src->rawVal()), dst + TVOFF(m_data)}; } else { assertx(srcLoc.hasReg(0)); auto const extended = zeroExtendIfBool(v, src->type(), srcLoc.reg(0)); v << store{extended, dst + TVOFF(m_data)}; } }
void emitRB(Vout& v, Trace::RingBufferType t, const char* msg) { if (!Trace::moduleEnabled(Trace::ringbuffer, 1)) { return; } v << vcall{CppCall::direct(Trace::ringbufferMsg), v.makeVcallArgs({{v.cns(msg), v.cns(strlen(msg)), v.cns(t)}}), v.makeTuple({})}; }
void cmpLowPtrImpl(Vout& v, Vreg sf, const void* ptr, Vptr mem, size_t size) { if (size == 8) { v << cmpqm{v.cns(ptr), mem, sf}; } else if (size == 4) { auto const ptrImm = safe_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)); v << cmplm{v.cns(ptrImm), mem, sf}; } else { not_implemented(); } }
void emitCmpClass(Vout& v, Vreg sf, const Class* cls, Vptr mem) { auto size = sizeof(LowPtr<Class>); if (size == 8) { v << cmpqm{v.cns(cls), mem, sf}; } else if (size == 4) { auto const clsImm = safe_cast<uint32_t>(reinterpret_cast<intptr_t>(cls)); v << cmplm{v.cns(clsImm), mem, sf}; } else { not_implemented(); } }
void emitImmStoreq(Vout& v, Immed64 imm, Vptr ref) { if (imm.fits(sz::dword)) { v << storeqi{imm.l(), ref}; } else { v << store{v.cns(imm.q()), ref}; } }
Vreg check_subcls(Vout& v, Vreg sf, Vreg d, Vreg lhs, Cls rhs, Len rhsVecLen) { return cond(v, CC_NB, sf, d, [&] (Vout& v) { return check_clsvec(v, v.makeReg(), lhs, rhs, rhsVecLen); }, [&] (Vout& v) { return v.cns(false); } ); }
void emitImmStoreq(Vout& v, Immed64 imm, Vptr ref) { if (imm.fits(sz::dword)) { v << storeqi{imm.l(), ref}; } else { // An alternative is two 32-bit immediate stores, but that's little-endian // specific and generates larger code on x64 (24 bytes vs. 18 bytes). v << store{v.cns(imm.q()), ref}; } }
void copyTV(Vout& v, Vreg data, Vreg type, Vloc srcLoc, const SSATmp* src) { // SIMD register are not supported here. assertx(!srcLoc.isFullSIMD()); if (src->type().needsReg()) { assertx(srcLoc.hasReg(1)); v << copy{srcLoc.reg(1), type}; } else { v << copy{v.cns(src->type().toDataType()), type}; } // Ignore the values for nulls. if (src->isA(TNull)) return; if (src->hasConstVal()) { // Skip potential zero-extend if we know the value. v << copy{v.cns(src->rawVal()), data}; } else { assertx(srcLoc.hasReg(0)); auto const extended = zeroExtendIfBool(v, src->type(), srcLoc.reg(0)); v << copy{extended, data}; } }
Vptr lookupDestructor(Vout& v, Vreg type) { auto const table = reinterpret_cast<intptr_t>(g_destructors); auto const typel = v.makeReg(); auto const index = v.makeReg(); auto const indexl = v.makeReg(); // This movzbl is only needed because callers aren't required to zero-extend // the type. v << movzbl{type, typel}; v << shrli{kShiftDataTypeToDestrIndex, typel, indexl, v.makeReg()}; v << movzlq{indexl, index}; // The baseless form is more compact, but isn't supported for 64-bit // displacements. if (table <= std::numeric_limits<int>::max()) { return baseless(index * 8 + safe_cast<int>(table)); } return v.cns(table)[index * 8]; }
void emitCallNativeImpl(Vout& v, Vout& vc, SrcKey srcKey, const Func* func, int numArgs) { assert(isNativeImplCall(func, numArgs)); // We need to store the return address into the AR, but we don't know it // yet. Use ldpoint, and point{} below, to get the address. PhysReg sp{rVmSp}, fp{rVmFp}, rds{rVmTl}; auto ret_point = v.makePoint(); auto ret_addr = v.makeReg(); v << ldpoint{ret_point, ret_addr}; v << store{ret_addr, sp[cellsToBytes(numArgs) + AROFF(m_savedRip)]}; v << lea{sp[cellsToBytes(numArgs)], fp}; emitCheckSurpriseFlagsEnter(v, vc, Fixup(0, numArgs)); // rVmSp is already correctly adjusted, because there's no locals other than // the arguments passed. BuiltinFunction builtinFuncPtr = func->builtinFuncPtr(); v << copy{fp, PhysReg{argReg(0)}}; if (mcg->fixupMap().eagerRecord(func)) { v << store{v.cns(func->getEntry()), rds[RDS::kVmpcOff]}; v << store{fp, rds[RDS::kVmfpOff]}; v << store{sp, rds[RDS::kVmspOff]}; } auto syncPoint = emitCall(v, CppCall::direct(builtinFuncPtr), argSet(1)); Offset pcOffset = 0; Offset stackOff = func->numLocals(); v << hcsync{Fixup{pcOffset, stackOff}, syncPoint}; int nLocalCells = func->numSlotsInFrame(); v << load{fp[AROFF(m_sfp)], fp}; v << point{ret_point}; int adjust = sizeof(ActRec) + cellsToBytes(nLocalCells - 1); if (adjust != 0) { v << addqi{adjust, sp, sp, v.makeReg()}; } }
void CodeGenerator::emitStore(Vout& v, Vreg base, ptrdiff_t offset, SSATmp* src, Vloc srcLoc, bool genStoreType /* = true */) { auto type = src->type(); if (type.needsReg()) { return emitStoreTypedValue(v, base, offset, srcLoc); } if (genStoreType) { auto dt = type.toDataType(); v << storeb{v.cns(dt), base[offset + TVOFF(m_type)]}; } if (type <= Type::Null) { return; } auto data = srcLoc.reg(); if (src->isA(Type::Bool)) { auto extended = v.makeReg(); v << movzbl{data, extended}; data = extended; } v << store{data, base[offset + TVOFF(m_data)]}; }
void emitTransCounterInc(Vout& v) { if (!mcg->tx().isTransDBEnabled()) return; auto t = v.cns(mcg->tx().getTransCounterAddr()); v << incqmlock{*t, v.makeReg()}; }