Exemplo n.º 1
0
void cgContEnter(IRLS& env, const IRInstruction* inst) {
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const fp = srcLoc(env, inst, 1).reg();
  auto const genFP = srcLoc(env, inst, 2).reg();
  auto const target = srcLoc(env, inst, 3).reg();

  auto const extra = inst->extra<ContEnter>();
  auto const spOff = extra->spOffset;
  auto const returnOff = extra->returnBCOffset;

  auto& v = vmain(env);

  auto const next = v.makeBlock();

  v << store{fp, genFP[AROFF(m_sfp)]};
  v << storeli{returnOff, genFP[AROFF(m_soff)]};

  v << copy{genFP, fp};
  auto const sync_sp = v.makeReg();
  v << lea{sp[cellsToBytes(spOff.offset)], sync_sp};
  v << syncvmsp{sync_sp};

  v << contenter{fp, target, cross_trace_regs_resumed(),
                 {next, label(env, inst->taken())}};
  env.catch_calls[inst->taken()] = CatchCall::PHP;
  v = next;
}
Exemplo n.º 2
0
void cgContEnter(IRLS& env, const IRInstruction* inst) {
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const fp = srcLoc(env, inst, 1).reg();
  auto const genFP = srcLoc(env, inst, 2).reg();
  auto const target = srcLoc(env, inst, 3).reg();

  auto const extra = inst->extra<ContEnter>();
  auto const spOff = extra->spOffset;
  auto const returnOff = extra->returnBCOffset;

  auto& v = vmain(env);

  auto const next = v.makeBlock();

  v << store{fp, genFP[AROFF(m_sfp)]};
  v << storeli{returnOff, genFP[AROFF(m_soff)]};

  v << copy{genFP, fp};
  auto const sync_sp = v.makeReg();
  v << lea{sp[cellsToBytes(spOff.offset)], sync_sp};
  v << syncvmsp{sync_sp};

  v << contenter{fp, target, cross_trace_regs_resumed(),
                 {next, label(env, inst->taken())}};
  v = next;

  auto const dst = dstLoc(env, inst, 0);
  auto const type = inst->dst()->type();
  if (!type.admitsSingleVal()) {
    v << defvmretdata{dst.reg(0)};
  }
  if (type.needsReg()) {
    v << defvmrettype{dst.reg(1)};
  }
}
Exemplo n.º 3
0
int32_t emitBindCall(CodeBlock& mainCode, CodeBlock& stubsCode,
                     SrcKey srcKey, const Func* funcd, int numArgs) {
  // If this is a call to a builtin and we don't need any argument
  // munging, we can skip the prologue system and do it inline.
  if (isNativeImplCall(funcd, numArgs)) {
    StoreImmPatcher patchIP(mainCode, (uint64_t)mainCode.frontier(), reg::rax,
                            cellsToBytes(numArgs) + AROFF(m_savedRip),
                            rVmSp);
    assert(funcd->numLocals() == funcd->numParams());
    assert(funcd->numIterators() == 0);
    Asm a { mainCode };
    emitLea(a, rVmSp[cellsToBytes(numArgs)], rVmFp);
    emitCheckSurpriseFlagsEnter(mainCode, stubsCode, true, mcg->fixupMap(),
                                Fixup(0, numArgs));
    // rVmSp is already correctly adjusted, because there's no locals
    // other than the arguments passed.
    auto retval = emitNativeImpl(mainCode, funcd);
    patchIP.patch(uint64_t(mainCode.frontier()));
    return retval;
  }

  Asm a { mainCode };
  if (debug) {
    auto off = cellsToBytes(numArgs) + AROFF(m_savedRip);
    emitImmStoreq(a, kUninitializedRIP, rVmSp[off]);
  }
  // Stash callee's rVmFp into rStashedAR for the callee's prologue
  emitLea(a, rVmSp[cellsToBytes(numArgs)], rStashedAR);
  emitBindCallHelper(mainCode, stubsCode, srcKey, funcd, numArgs);
  return 0;
}
Exemplo n.º 4
0
void cgSyncReturnBC(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<SyncReturnBC>();
  auto const spOffset = cellsToBytes(extra->spOffset.offset);
  auto const bcOffset = extra->bcOffset;
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const fp = srcLoc(env, inst, 1).reg();

  auto& v = vmain(env);
  v << storeli{safe_cast<int32_t>(bcOffset), sp[spOffset + AROFF(m_soff)]};
  v << store{fp, sp[spOffset + AROFF(m_sfp)]};
}
Exemplo n.º 5
0
IRSPOffset offsetToReturnSlot(IRGS& env) {
  return offsetFromIRSP(
    env,
    BCSPOffset{
      logicalStackDepth(env).offset +
        AROFF(m_r) / int32_t{sizeof(Cell)}
    }
  );
}
Exemplo n.º 6
0
void cgDefInlineFP(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<DefInlineFP>();
  auto const callerSP = srcLoc(env, inst, 0).reg();
  auto const callerFP = srcLoc(env, inst, 1).reg();
  auto& v = vmain(env);

  auto const ar = callerSP[cellsToBytes(extra->spOffset.offset)];

  // Do roughly the same work as an HHIR Call.
  v << store{callerFP, ar + AROFF(m_sfp)};
  emitImmStoreq(v, uintptr_t(tc::ustubs().retInlHelper),
                ar + AROFF(m_savedRip));
  v << storeli{extra->retBCOff, ar + AROFF(m_soff)};
  if (extra->target->attrs() & AttrMayUseVV) {
    v << storeqi{0, ar + AROFF(m_invName)};
  }

  v << lea{ar, dstLoc(env, inst, 0).reg()};
}
Exemplo n.º 7
0
void cgDbgAssertARFunc(IRLS& env, const IRInstruction* inst) {
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const func = srcLoc(env, inst, 1).reg(0);
  auto const off = cellsToBytes(inst->extra<DbgAssertARFunc>()->offset.offset);

  auto& v = vmain(env);

  auto const sf = v.makeReg();
  v << cmpqm{func, sp[off + AROFF(m_func)], sf};

  ifThen(v, CC_NE, sf, [&](Vout& v) { v << ud2{}; });
}
Exemplo n.º 8
0
TCA emitCallArrayPrologue(Func* func, DVFuncletsVec& dvs) {
  auto& mainCode = mcg->code.main();
  auto& frozenCode = mcg->code.frozen();
  vixl::MacroAssembler a { mainCode };
  vixl::MacroAssembler afrozen { frozenCode };
  TCA start = mainCode.frontier();
  a.   Ldr   (rAsm.W(), rVmFp[AROFF(m_numArgsAndFlags)]);
  for (auto i = 0; i < dvs.size(); ++i) {
    a. Cmp   (rAsm.W(), dvs[i].first);
    emitBindJ(mainCode, frozenCode, CC_LE, SrcKey(func, dvs[i].second, false));
  }
  emitBindJ(mainCode, frozenCode, CC_None, SrcKey(func, func->base(), false));
  mcg->cgFixups().process(nullptr);
  return start;
}
Exemplo n.º 9
0
void emitFuncGuard(const Func* func, CodeBlock& cb, CGMeta& fixups) {
  ppc64_asm::Assembler a { cb };

  const auto tmp1 = ppc64_asm::reg::r3;
  const auto tmp2 = ppc64_asm::reg::r4;

  assertx(ppc64::abi(CodeKind::CrossTrace).gpUnreserved.contains(tmp1));
  assertx(ppc64::abi(CodeKind::CrossTrace).gpUnreserved.contains(tmp2));

  emitSmashableMovq(a.code(), fixups, uint64_t(func), tmp1);
  a.  ld     (tmp2, rvmfp()[AROFF(m_func)]);
  a.  cmpd   (tmp1, tmp2);

  a.  branchFar(tc::ustubs().funcPrologueRedispatch,
                  ppc64_asm::BranchConditions::NotEqual);

  DEBUG_ONLY auto guard = funcGuardFromPrologue(a.frontier(), func);
  assertx(funcGuardMatches(guard, func));
}
Exemplo n.º 10
0
void emitFuncGuard(const Func* func, CodeBlock& cb, CGMeta& fixups) {
  vixl::MacroAssembler a { cb };
  vixl::Label after_data;
  vixl::Label target_data;
  auto const begin = cb.frontier();

  assertx(arm::abi(CodeKind::CrossTrace).gpUnreserved.contains(vixl::x0));

  emitSmashableMovq(cb, fixups, uint64_t(func), vixl::x0);
  a.  Ldr   (rAsm, M(rvmfp()[AROFF(m_func)]));
  a.  Cmp   (vixl::x0, rAsm);
  a.  B     (&after_data, convertCC(CC_Z));

  a.  Ldr   (rAsm_w, &target_data);
  a.  Br    (rAsm);

  a.  bind  (&target_data);
  a.  dc32  (makeTarget32(tc::ustubs().funcPrologueRedispatch));
  a.  bind  (&after_data);

  cb.sync(begin);
}
Exemplo n.º 11
0
IRSPRelOffset offsetToReturnSlot(IRGS& env) {
  auto const retOff = FPRelOffset { AROFF(m_r) / int32_t{sizeof(Cell)} };
  return retOff.to<IRSPRelOffset>(env.irb->fs().irSPOff());
}
Exemplo n.º 12
0
void cgCall(IRLS& env, const IRInstruction* inst) {
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const fp = srcLoc(env, inst, 1).reg();
  auto const extra = inst->extra<Call>();
  auto const callee = extra->callee;
  auto const argc = extra->numParams;

  auto& v = vmain(env);
  auto& vc = vcold(env);
  auto const catchBlock = label(env, inst->taken());

  auto const calleeSP = sp[cellsToBytes(extra->spOffset.offset)];
  auto const calleeAR = calleeSP + cellsToBytes(argc);

  v << store{fp, calleeAR + AROFF(m_sfp)};
  v << storeli{safe_cast<int32_t>(extra->after), calleeAR + AROFF(m_soff)};

  if (extra->fcallAwait) {
    // This clobbers any flags that might have already been set on the callee
    // AR (e.g., by SpillFrame), but this is okay because there should never be
    // any conflicts; see the documentation in act-rec.h.
    auto const imm = static_cast<int32_t>(
      ActRec::encodeNumArgsAndFlags(argc, ActRec::Flags::IsFCallAwait)
    );
    v << storeli{imm, calleeAR + AROFF(m_numArgsAndFlags)};
  }

  auto const isNativeImplCall = callee &&
                                callee->builtinFuncPtr() &&
                                !callee->nativeFuncPtr() &&
                                argc == callee->numParams();
  if (isNativeImplCall) {
    // The assumption here is that for builtins, the generated func contains
    // only a single opcode (NativeImpl), and there are no non-argument locals.
    if (do_assert) {
      assertx(argc == callee->numLocals());
      assertx(callee->numIterators() == 0);

      auto addr = callee->getEntry();
      while (peek_op(addr) == Op::AssertRATL) {
        addr += instrLen(addr);
      }
      assertx(peek_op(addr) == Op::NativeImpl);
      assertx(addr + instrLen(addr) ==
              callee->unit()->entry() + callee->past());
    }

    v << store{v.cns(mcg->ustubs().retHelper), calleeAR + AROFF(m_savedRip)};
    if (callee->attrs() & AttrMayUseVV) {
      v << storeqi{0, calleeAR + AROFF(m_invName)};
    }
    v << lea{calleeAR, rvmfp()};

    emitCheckSurpriseFlagsEnter(v, vc, fp, Fixup(0, argc), catchBlock);

    auto const builtinFuncPtr = callee->builtinFuncPtr();
    TRACE(2, "Calling builtin preClass %p func %p\n",
          callee->preClass(), builtinFuncPtr);

    // We sometimes call this while curFunc() isn't really the builtin, so make
    // sure to record the sync point as if we are inside the builtin.
    if (FixupMap::eagerRecord(callee)) {
      auto const syncSP = v.makeReg();
      v << lea{calleeSP, syncSP};
      emitEagerSyncPoint(v, callee->getEntry(), rvmtl(), rvmfp(), syncSP);
    }

    // Call the native implementation.  This will free the locals for us in the
    // normal case.  In the case where an exception is thrown, the VM unwinder
    // will handle it for us.
    auto const done = v.makeBlock();
    v << vinvoke{CallSpec::direct(builtinFuncPtr), v.makeVcallArgs({{rvmfp()}}),
                 v.makeTuple({}), {done, catchBlock}, Fixup(0, argc)};
    env.catch_calls[inst->taken()] = CatchCall::CPP;

    v = done;
    // The native implementation already put the return value on the stack for
    // us, and handled cleaning up the arguments.  We have to update the frame
    // pointer and the stack pointer, and load the return value into the return
    // register so the trace we are returning to has it where it expects.
    // TODO(#1273094): We should probably modify the actual builtins to return
    // values via registers using the C ABI and do a reg-to-reg move.
    loadTV(v, inst->dst(), dstLoc(env, inst, 0), rvmfp()[AROFF(m_r)], true);
    v << load{rvmfp()[AROFF(m_sfp)], rvmfp()};
    emitRB(v, Trace::RBTypeFuncExit, callee->fullName()->data());
    return;
  }

  v << lea{calleeAR, rvmfp()};

  if (RuntimeOption::EvalHHIRGenerateAsserts) {
    v << syncvmsp{v.cns(0x42)};

    constexpr uint64_t kUninitializedRIP = 0xba5eba11acc01ade;
    emitImmStoreq(v, kUninitializedRIP, rvmfp()[AROFF(m_savedRip)]);
  }

  // Emit a smashable call that initially calls a recyclable service request
  // stub.  The stub and the eventual targets take rvmfp() as an argument,
  // pointing to the callee ActRec.
  auto const target = callee
    ? mcg->ustubs().immutableBindCallStub
    : mcg->ustubs().bindCallStub;

  auto const done = v.makeBlock();
  v << callphp{target, php_call_regs(), {{done, catchBlock}}};
  env.catch_calls[inst->taken()] = CatchCall::PHP;
  v = done;

  auto const dst = dstLoc(env, inst, 0);
  v << defvmret{dst.reg(0), dst.reg(1)};
}
Exemplo n.º 13
0
void CodeGenerator::cgLdARFuncPtr(IRInstruction* inst) {
  auto dstReg  = x2a(curOpd(inst->dst()).reg());
  auto baseReg = x2a(curOpd(inst->src(0)).reg());
  auto offset  = inst->src(1)->getValInt();
  m_as.  Ldr  (dstReg, baseReg[offset + AROFF(m_func)]);
}
Exemplo n.º 14
0
TCA emitFunctionEnterHelper(CodeBlock& cb, UniqueStubs& us) {
  alignJmpTarget(cb);

  auto const start = vwrap(cb, [&] (Vout& v) {
    auto const ar = v.makeReg();

    v << copy{rvmfp(), ar};

    // Fully set up the call frame for the stub.  We can't skip this like we do
    // in other stubs because we need the return IP for this frame in the %rbp
    // chain, in order to find the proper fixup for the VMRegAnchor in the
    // intercept handler.
    v << stublogue{true};
    v << copy{rsp(), rvmfp()};

    // When we call the event hook, it might tell us to skip the callee
    // (because of fb_intercept).  If that happens, we need to return to the
    // caller, but the handler will have already popped the callee's frame.
    // So, we need to save these values for later.
    v << pushm{ar[AROFF(m_savedRip)]};
    v << pushm{ar[AROFF(m_sfp)]};

    v << copy2{ar, v.cns(EventHook::NormalFunc), rarg(0), rarg(1)};

    bool (*hook)(const ActRec*, int) = &EventHook::onFunctionCall;
    v << call{TCA(hook)};
  });

  us.functionEnterHelperReturn = vwrap2(cb, [&] (Vout& v, Vout& vcold) {
    auto const sf = v.makeReg();
    v << testb{rret(), rret(), sf};

    unlikelyIfThen(v, vcold, CC_Z, sf, [&] (Vout& v) {
      auto const saved_rip = v.makeReg();

      // The event hook has already cleaned up the stack and popped the
      // callee's frame, so we're ready to continue from the original call
      // site.  We just need to grab the fp/rip of the original frame that we
      // saved earlier, and sync rvmsp().
      v << pop{rvmfp()};
      v << pop{saved_rip};

      // Drop our call frame; the stublogue{} instruction guarantees that this
      // is exactly 16 bytes.
      v << lea{rsp()[16], rsp()};

      // Sync vmsp and return to the caller.  This unbalances the return stack
      // buffer, but if we're intercepting, we probably don't care.
      v << load{rvmtl()[rds::kVmspOff], rvmsp()};
      v << jmpr{saved_rip};
    });

    // Skip past the stuff we saved for the intercept case.
    v << lea{rsp()[16], rsp()};

    // Restore rvmfp() and return to the callee's func prologue.
    v << stubret{RegSet(), true};
  });

  return start;
}