TCA emitFreeLocalsHelpers(CodeBlock& cb, DataBlock& data, UniqueStubs& us) { // The address of the first local is passed in the second argument register. // We use the third and fourth as scratch registers. auto const local = rarg(1); auto const last = rarg(2); auto const type = rarg(3); CGMeta fixups; TCA freeLocalsHelpers[kNumFreeLocalsHelpers]; TCA freeManyLocalsHelper; // This stub is very hot; keep it cache-aligned. align(cb, &fixups, Alignment::CacheLine, AlignContext::Dead); auto const release = emitDecRefHelper(cb, data, fixups, local, type, local | last); auto const decref_local = [&] (Vout& v) { auto const sf = v.makeReg(); // We can't use emitLoadTVType() here because it does a byte load, and we // need to sign-extend since we use `type' as a 32-bit array index to the // destructor table. v << loadzbl{local[TVOFF(m_type)], type}; emitCmpTVType(v, sf, KindOfRefCountThreshold, type); ifThen(v, CC_G, sf, [&] (Vout& v) { v << call{release, local | type}; }); }; auto const next_local = [&] (Vout& v) { v << addqi{static_cast<int>(sizeof(TypedValue)), local, local, v.makeReg()}; }; alignJmpTarget(cb); freeManyLocalsHelper = vwrap(cb, data, [&] (Vout& v) { // We always unroll the final `kNumFreeLocalsHelpers' decrefs, so only loop // until we hit that point. v << lea{rvmfp()[localOffset(kNumFreeLocalsHelpers - 1)], last}; // Set up frame linkage to avoid an indirect fixup. v << copy{rsp(), rfp()}; doWhile(v, CC_NZ, {}, [&](const VregList& /*in*/, const VregList& /*out*/) { auto const sf = v.makeReg(); decref_local(v); next_local(v); v << cmpq{ local, last, sf }; return sf; }); }); for (auto i = kNumFreeLocalsHelpers - 1; i >= 0; --i) { freeLocalsHelpers[i] = vwrap(cb, data, [&] (Vout& v) { decref_local(v); if (i != 0) next_local(v); }); } // All the stub entrypoints share the same ret. vwrap(cb, data, fixups, [] (Vout& v) { v << popp{rfp(), rlr()}; v << ret{}; }); // Create a table of branches us.freeManyLocalsHelper = vwrap(cb, data, [&] (Vout& v) { v << pushp{rlr(), rfp()}; // rvmfp() is needed by the freeManyLocalsHelper stub above, so frame // linkage setup is deferred until after its use in freeManyLocalsHelper. v << jmpi{freeManyLocalsHelper}; }); for (auto i = kNumFreeLocalsHelpers - 1; i >= 0; --i) { us.freeLocalsHelpers[i] = vwrap(cb, data, [&] (Vout& v) { // We set up frame linkage to avoid an indirect fixup. v << pushp{rlr(), rfp()}; v << copy{rsp(), rfp()}; v << jmpi{freeLocalsHelpers[i]}; }); } // FIXME: This stub is hot, so make sure to keep it small. #if 0 always_assert(Stats::enabled() || (cb.frontier() - release <= 4 * x64::cache_line_size())); #endif fixups.process(nullptr); return release; }
Variant c_Closure::t___invoke(int _argc, CArrRef _argv) { always_assert(false); return uninit_null(); }
void cgCheckType(IRLS& env, const IRInstruction* inst) { // Note: If you add new supported type checks, you should update // negativeCheckType() to indicate whether it is precise or not. auto const src = inst->src(0); auto const dst = inst->dst(); auto const srcData = srcLoc(env, inst, 0).reg(0); auto const srcType = srcLoc(env, inst, 0).reg(1); auto& v = vmain(env); auto const doJcc = [&] (ConditionCode cc, Vreg sf) { fwdJcc(v, env, ccNegate(cc), sf, inst->taken()); }; auto const doMov = [&] { auto const dstData = dstLoc(env, inst, 0).reg(0); auto const dstType = dstLoc(env, inst, 0).reg(1); if (dst->isA(TBool) && !src->isA(TBool)) { v << movtqb{srcData, dstData}; } else { v << copy{srcData, dstData}; } if (dstType == InvalidReg) return; if (srcType != InvalidReg) { v << copy{srcType, dstType}; } else { v << ldimmq{src->type().toDataType(), dstType}; } }; auto const typeParam = inst->typeParam(); if (src->isA(typeParam)) { // src is the target type or better. Just define our dst. doMov(); return; } if (!src->type().maybe(typeParam)) { // src is definitely not the target type. Always jump. v << jmp{label(env, inst->taken())}; return; } if (srcType != InvalidReg) { emitTypeTest(v, env, typeParam, srcType, srcData, v.makeReg(), doJcc); doMov(); return; } if (src->type() <= TBoxedCell && typeParam <= TBoxedCell) { // We should never have specific known Boxed types; those should only be // used for hints and predictions. always_assert(!(typeParam < TBoxedInitCell)); doMov(); return; } /* * See if we're just checking the array kind or object class of a value with * a mostly-known type. * * Important: We don't support typeParam being something like * StaticArr=kPackedKind unless the src->type() also already knows its * staticness. We do allow things like CheckType<Arr=Packed> t1:StaticArr, * though. This is why we have to check that the unspecialized type is at * least as big as the src->type(). */ if (typeParam.isSpecialized() && typeParam.unspecialize() >= src->type()) { detail::emitSpecializedTypeTest(v, env, typeParam, srcData, v.makeReg(), doJcc); doMov(); return; } /* * Since not all of our unions carry a type register, there are some * situations with strings and arrays that are neither constantly-foldable * nor in the emitTypeTest() code path. * * We currently actually check their persistent bit here, which will let * both static and uncounted strings through. Also note that * CheckType<Uncounted> t1:{Null|Str} doesn't get this treatment currently--- * the emitTypeTest() path above will only check the type register. */ if (!typeParam.isSpecialized() && typeParam <= TUncounted && src->type().subtypeOfAny(TStr, TArr) && src->type().maybe(typeParam)) { assertx(src->type().maybe(TPersistent)); auto const sf = v.makeReg(); v << cmplim{0, srcData[FAST_REFCOUNT_OFFSET], sf}; doJcc(CC_L, sf); doMov(); return; } always_assert_flog( false, "Bad src: {} and dst: {} types in '{}'", src->type(), typeParam, *inst ); }
Object c_WaitHandle::t_getwaithandle() { always_assert(false); }
size_t OfflineX86Code::printBCMapping(BCMappingInfo bcMappingInfo, size_t currBC, TCA ip) { TransBCMapping curr, next; TCA x86Start, x86Stop; auto const& bcMap = bcMappingInfo.bcMapping; curr = next = TransBCMapping { MD5(), 0, 0, 0, 0 }; x86Start = x86Stop = 0; // Account for the sentinel. size_t mappingSize = bcMap.size() - 1; // Starting from currBC, find the next bytecode with a non-empty x86 range // that could potentially correspond to instruction ip. for (; currBC < mappingSize; ++currBC) { curr = bcMap[currBC]; next = bcMap[currBC + 1]; switch (bcMappingInfo.tcRegion) { case TCRHot: case TCRMain: case TCRProfile: x86Start = curr.aStart; x86Stop = next.aStart; break; case TCRCold: x86Start = curr.acoldStart; x86Stop = next.acoldStart; break; case TCRFrozen: x86Start = curr.afrozenStart; x86Stop = next.afrozenStart; break; default: error("printBCMapping: unexpected TCRegion"); } always_assert(x86Start <= x86Stop); if (x86Start >= ip && x86Start < x86Stop) break; } if (currBC < mappingSize && x86Start == ip) { if (auto currUnit = g_repo->getUnit(curr.md5)) { auto bcPast = curr.bcStart + instrLen(currUnit->at(curr.bcStart)); currUnit->prettyPrint(std::cout, Unit::PrintOpts().range(curr.bcStart, bcPast)); } else { std::cout << folly::format( "<<< couldn't find unit {} to print bytecode at offset {} >>>\n", curr.md5, curr.bcStart); } currBC++; } return currBC; }
void OfflineX86Code::disasm(FILE* file, TCA fileStartAddr, TCA codeStartAddr, uint64_t codeLen, const PerfEventsMap<TCA>& perfEvents, BCMappingInfo bcMappingInfo, bool printAddr /* =true */, bool printBinary /* =false */) { char codeStr[MAX_INSTR_ASM_LEN]; xed_uint8_t* code = (xed_uint8_t*) alloca(codeLen); xed_uint8_t* frontier; TCA ip; TCA r10val = 0; size_t currBC = 0; if (codeLen == 0) return; auto const offset = codeStartAddr - fileStartAddr; if (fseek(file, offset, SEEK_SET)) { error("disasm error: seeking file"); } size_t readLen = fread(code, codeLen, 1, file); if (readLen != 1) { error("Failed to read {} bytes at offset {} from code file due to {}", codeLen, offset, feof(file) ? "EOF" : "read error"); } xed_decoded_inst_t xedd; // Decode and print each instruction for (frontier = code, ip = codeStartAddr; frontier < code + codeLen; ) { xed_decoded_inst_zero_set_mode(&xedd, &xed_state); xed_decoded_inst_set_input_chip(&xedd, XED_CHIP_INVALID); xed_error_enum_t xed_error = xed_decode(&xedd, frontier, 15); if (xed_error != XED_ERROR_NONE) break; // Get disassembled instruction in codeStr if (!xed_format_context(xed_syntax, &xedd, codeStr, MAX_INSTR_ASM_LEN, (uint64_t)ip, nullptr #if XED_ENCODE_ORDER_MAX_ENTRIES != 28 // Newer version of XED library , 0 #endif )) { error("disasm error: xed_format_context failed"); } // Annotate the x86 with its bytecode. currBC = printBCMapping(bcMappingInfo, currBC, (TCA)ip); if (printAddr) printf("%14p: ", ip); uint32_t instrLen = xed_decoded_inst_get_length(&xedd); if (printBinary) { uint32_t i; for (i=0; i < instrLen; i++) { printf("%02X", frontier[i]); } for (; i < 16; i++) { printf(" "); } } // For calls, we try to figure out the destination symbol name. // We look both at relative branches and the pattern: // move r10, IMMEDIATE // call r10 xed_iclass_enum_t iclass = xed_decoded_inst_get_iclass(&xedd); string callDest = ""; if (iclass == XED_ICLASS_CALL_NEAR || iclass == XED_ICLASS_CALL_FAR) { const xed_inst_t *xi = xed_decoded_inst_inst(&xedd); always_assert(xed_inst_noperands(xi) >= 1); const xed_operand_t *opnd = xed_inst_operand(xi, 0); xed_operand_enum_t opndName = xed_operand_name(opnd); if (opndName == XED_OPERAND_RELBR) { if (xed_decoded_inst_get_branch_displacement_width(&xedd)) { xed_int32_t disp = xed_decoded_inst_get_branch_displacement(&xedd); TCA addr = ip + instrLen + disp; callDest = getSymbolName(addr); } } else if (opndName == XED_OPERAND_REG0) { if (xed_decoded_inst_get_reg(&xedd, opndName) == XED_REG_R10) { callDest = getSymbolName(r10val); } } } else if (iclass == XED_ICLASS_MOV) { // Look for moves into r10 and keep r10val updated const xed_inst_t* xi = xed_decoded_inst_inst(&xedd); always_assert(xed_inst_noperands(xi) >= 2); const xed_operand_t *destOpnd = xed_inst_operand(xi, 0); xed_operand_enum_t destOpndName = xed_operand_name(destOpnd); if (destOpndName == XED_OPERAND_REG0 && xed_decoded_inst_get_reg(&xedd, destOpndName) == XED_REG_R10) { const xed_operand_t *srcOpnd = xed_inst_operand(xi, 1); xed_operand_enum_t srcOpndName = xed_operand_name(srcOpnd); if (srcOpndName == XED_OPERAND_IMM0) { TCA addr = (TCA)xed_decoded_inst_get_unsigned_immediate(&xedd); r10val = addr; } } } if (!perfEvents.empty()) { printEventStats((TCA)ip, instrLen, perfEvents); } else { printf("%48s", ""); } printf("%s%s\n", codeStr, callDest.c_str()); frontier += instrLen; ip += instrLen; } }
TCA OfflineX86Code::collectJmpTargets(FILE *file, TCA fileStartAddr, TCA codeStartAddr, uint64_t codeLen, vector<TCA> *jmpTargets) { xed_uint8_t* code = (xed_uint8_t*) alloca(codeLen); xed_uint8_t* frontier; TCA ip; if (codeLen == 0) return 0; if (fseek(file, codeStartAddr - fileStartAddr, SEEK_SET)) { error("collectJmpTargets error: seeking file"); } size_t readLen = fread(code, codeLen, 1, file); if (readLen != 1) error("collectJmpTargets error: reading file"); xed_decoded_inst_t xedd; xed_iclass_enum_t iclass = XED_ICLASS_NOP; // Decode each instruction for (frontier = code, ip = codeStartAddr; frontier < code + codeLen; ) { xed_decoded_inst_zero_set_mode(&xedd, &xed_state); xed_decoded_inst_set_input_chip(&xedd, XED_CHIP_INVALID); xed_error_enum_t xed_error = xed_decode(&xedd, frontier, 15); if (xed_error != XED_ERROR_NONE) break; uint32_t instrLen = xed_decoded_inst_get_length(&xedd); iclass = xed_decoded_inst_get_iclass(&xedd); if (iclass >= XED_ICLASS_JB && iclass <= XED_ICLASS_JZ) { const xed_inst_t *xi = xed_decoded_inst_inst(&xedd); always_assert(xed_inst_noperands(xi) >= 1); const xed_operand_t *opnd = xed_inst_operand(xi, 0); xed_operand_enum_t opndName = xed_operand_name(opnd); if (opndName == XED_OPERAND_RELBR) { always_assert(xed_decoded_inst_get_branch_displacement_width(&xedd)); xed_int32_t disp = xed_decoded_inst_get_branch_displacement(&xedd); TCA addr = ip + instrLen + disp; jmpTargets->push_back(addr); } } frontier += instrLen; ip += instrLen; } // If the code sequence falls thru, then add the next instruction as a // possible target bool fallsThru = (iclass != XED_ICLASS_JMP && iclass != XED_ICLASS_JMP_FAR && iclass != XED_ICLASS_RET_NEAR && iclass != XED_ICLASS_RET_FAR); if (fallsThru) { jmpTargets->push_back(ip); return ip; } return 0; }
const typename M::mapped_type& get_required(const M& m, typename M::key_type key) { auto it = m.find(key); always_assert(it != m.end()); return it->second; }