Ejemplo n.º 1
0
void emitAssertRefCount(Asm& as, PhysReg base) {
  as.cmpl(HPHP::StaticValue, base[FAST_REFCOUNT_OFFSET]);
  ifThen(as, CC_NBE, [&] {
      as.cmpl(HPHP::RefCountMaxRealistic, base[FAST_REFCOUNT_OFFSET]);
      ifThen(as, CC_NBE, [&] { as.ud2(); });
  });
}
Ejemplo n.º 2
0
TCA emitCallToExit(CodeBlock& cb) {
  Asm a { cb };

  // Emit a byte of padding. This is a kind of hacky way to avoid
  // hitting an assert in recordGdbStub when we call it with stub - 1
  // as the start address.
  a.emitNop(1);
  auto const start = a.frontier();
  if (RuntimeOption::EvalHHIRGenerateAsserts) {
    Label ok;
    a.emitImmReg(uintptr_t(enterTCExit), rax);
    a.cmpq(rax, *rsp());
    a.je8 (ok);
    a.ud2();
  asm_label(a, ok);
  }

  // Emulate a ret to enterTCExit without actually doing one to avoid
  // unbalancing the return stack buffer. The call from enterTCHelper() that
  // got us into the TC was popped off the RSB by the ret that got us to this
  // stub.
  a.addq(8, rsp());
  a.jmp(TCA(enterTCExit));

  // On a backtrace, gdb tries to locate the calling frame at address
  // returnRIP-1. However, for the first VM frame, there is no code at
  // returnRIP-1, since the AR was set up manually. For this frame,
  // record the tracelet address as starting from this callToExit-1,
  // so gdb does not barf.
  return start;
}
Ejemplo n.º 3
0
void emitTraceCall(CodeBlock& cb, int64_t pcOff) {
  // TODO(2967396) implement properly, move function
  if (arch() == Arch::ARM) return;

  Asm as { cb };
  // call to a trace function
  as.mov_imm64_reg((int64_t)as.frontier(), reg::rcx);
  as.mov_reg64_reg64(rVmFp, reg::rdi);
  as.mov_reg64_reg64(rVmSp, reg::rsi);
  as.mov_imm64_reg(pcOff, reg::rdx);
  // do the call; may use a trampoline
  emitCall(as, (TCA)traceCallback);
}
Ejemplo n.º 4
0
int main(int argc, const char *argv[])
{
  Asm a;
/*  cout<<a.getCurrentLine()<<endl
    <<a.getInFileName()<<endl
    <<a.getOutFileName()<<endl
    <<a.getFileExt()<<endl;
  */
  a.init();
  a.run("tst.pre.s");
  cout<<a.getLogger();
  return 0;
}
Ejemplo n.º 5
0
void emitCall(Asm& a, TCA dest, RegSet args) {
  // NB: Keep this in sync with Vgen::emit(call) in vasm-x64.cpp.
  if (a.jmpDeltaFits(dest)) {
    a.call(dest);
  } else {
    // can't do a near call; store address in data section.
    // call by loading the address using rip-relative addressing.  This
    // assumes the data section is near the current code section.  Since
    // this sequence is directly in-line, rip-relative like this is
    // more compact than loading a 64-bit immediate.
    auto addr = mcg->allocLiteral((uint64_t)dest);
    a.call(rip[(intptr_t)addr]);
  }
}
Ejemplo n.º 6
0
void emitCall(Asm& a, TCA dest) {
  if (a.jmpDeltaFits(dest) && !Stats::enabled()) {
    a.    call(dest);
  } else {
    a.    call(tx64->getNativeTrampoline(dest));
  }
}
Ejemplo n.º 7
0
void emitCall(Asm& a, TCA dest) {
  if (a.jmpDeltaFits(dest) && !Stats::enabled()) {
    a.    call(dest);
  } else {
    a.    call(TranslatorX64::Get()->getNativeTrampoline(dest));
  }
}
Ejemplo n.º 8
0
void emitTraceCall(CodeBlock& cb, int64_t pcOff) {
    switch (arch()) {
    case Arch::X64: {
        Asm a { cb };
        // call to a trace function
        a.    movq   (a.frontier(), rcx);
        a.    movq   (rVmFp, rdi);
        a.    movq   (rVmSp, rsi);
        a.    movq   (pcOff, rdx);
        // do the call; may use a trampoline
        emitCall(a, reinterpret_cast<TCA>(traceCallback));
        break;
    }
    case Arch::ARM:
        // TODO(2967396) implement properly, move function
        break;
    }
}
Ejemplo n.º 9
0
void emitIncRef(Asm& as, PhysReg base) {
  if (RuntimeOption::EvalHHIRGenerateAsserts) {
    emitAssertRefCount(as, base);
  }
  // emit incref
  as.incl(base[FAST_REFCOUNT_OFFSET]);
  if (RuntimeOption::EvalHHIRGenerateAsserts) {
    // Assert that the ref count is greater than zero
    emitAssertFlagsNonNegative(as);
  }
}
Ejemplo n.º 10
0
void dotest(const char* opName, Asm& a, void (Asm::*memFn)(Arg)) {
  std::vector<std::string> expecteds;

  auto& args = Gen<Arg>::gen();
  for (auto& ar : args) {
    expecteds.push_back(expected_str(ar));
    (a.*memFn)(ar);
  }

  auto const dump = dump_disasm(a);
  compare(opName, dump, expecteds);
  a.clear();
}
Ejemplo n.º 11
0
TCA emitFunctionEnterHelper(CodeBlock& cb, UniqueStubs& us) {
  bool (*helper)(const ActRec*, int) = &EventHook::onFunctionCall;
  Asm a { cb };

  alignJmpTarget(cb);
  auto const start = a.frontier();

  Label skip;

  PhysReg ar = rarg(0);

  a.   movq    (rvmfp(), ar);
  a.   push    (rvmfp());
  a.   movq    (rsp(), rvmfp());
  a.   push    (ar[AROFF(m_savedRip)]);
  a.   push    (ar[AROFF(m_sfp)]);
  a.   movq    (EventHook::NormalFunc, rarg(1));
  emitCall(a, CppCall::direct(helper), arg_regs(2));
  us.functionEnterHelperReturn = a.frontier();
  a.   testb   (al, al);
  a.   je8     (skip);
  a.   addq    (16, rsp());
  a.   pop     (rvmfp());
  a.   ret     ();

asm_label(a, skip);
  // The event hook has already cleaned up the stack/actrec so that we're ready
  // to continue from the original call site.  Just need to grab the fp/rip
  // from the original frame, and sync rvmsp() to the execution-context's copy.
  a.   pop     (rvmfp());
  a.   pop     (rsi);
  a.   addq    (16, rsp()); // drop our call frame
  a.   loadq   (rvmtl()[rds::kVmspOff], rvmsp());
  a.   jmp     (rsi);
  a.   ud2     ();

  return start;
}
Ejemplo n.º 12
0
TCA emitEndCatchHelper(CodeBlock& cb, UniqueStubs& us) {
  Asm a { cb };
  alignJmpTarget(cb);
  Label debuggerReturn;
  Label resumeCppUnwind;

  auto const start = a.frontier();
  a.    cmpq (0, rvmtl()[unwinderDebuggerReturnSPOff()]);
  a.    jne8 (debuggerReturn);

  // Normal endCatch situation: call back to tc_unwind_resume, which returns
  // the catch trace (or null) in rax and the new vmfp in rdx.
  a.    movq (rvmfp(), rarg(0));
  a.    call (TCA(tc_unwind_resume));
  a.    movq (rdx, rvmfp());
  a.    testq(rax, rax);
  a.    jz8  (resumeCppUnwind);
  a.    jmp  (rax);  // rdx is still live if we're going to code from llvm

asm_label(a, resumeCppUnwind);
  static_assert(sizeof(tl_regState) == 1,
                "The following store must match the size of tl_regState");
  auto vptr = emitTLSAddr(a, tl_regState, rax);
  Vasm::prefix(a, vptr).
        storeb(static_cast<int32_t>(VMRegState::CLEAN), vptr.mr());
  a.    loadq(rvmtl()[unwinderExnOff()], rarg(0));
  emitCall(a, TCA(_Unwind_Resume), arg_regs(1));
  us.endCatchHelperPast = a.frontier();
  a.    ud2();

asm_label(a, debuggerReturn);
  a.    loadq (rvmtl()[unwinderDebuggerReturnSPOff()], rvmsp());
  a.    storeq(0, rvmtl()[unwinderDebuggerReturnSPOff()]);
  svcreq::emit_persistent(a.code(), folly::none, REQ_POST_DEBUGGER_RET);

  return start;
}
Ejemplo n.º 13
0
void dotest(const char* opName, Asm& a, void (Asm::*memFn)(Arg1, Arg2),
            const std::vector<Arg1>& args1, const std::vector<Arg2>& args2) {
  std::vector<std::string> expecteds;

  for (auto& ar1 : args1) {
    for (auto& ar2 : args2) {
      expecteds.push_back(
        folly::format("{}, {}", expected_str(ar1), expected_str(ar2)).str()
      );
      (a.*memFn)(ar1, ar2);
    }
  }

  auto const dump = dump_disasm(a);
  compare(opName, dump, expecteds);
  a.clear();
}
Ejemplo n.º 14
0
void dotest(const char* opName, Asm& a, void (Asm::*memFn)(Arg1, Arg2),
            const std::vector<Arg1>& args1, const std::vector<Arg2>& args2) {
  std::vector<std::string> expecteds;

  for (auto& ar1 : args1) {
    for (auto& ar2 : args2) {
      expecteds.push_back(str(
        boost::format("%s,%s") % expected_str(ar1)
                               % expected_str(ar2)
      ));
      (a.*memFn)(ar1, ar2);
    }
  }

  std::ifstream dump;
  dump_disasm(a, dump);
  compare(opName, dump, expecteds);
  a.clear();
}
Ejemplo n.º 15
0
TCA
emitServiceReqWork(CodeBlock& cb, TCA start, bool persist, SRFlags flags,
                   ServiceRequest req, const ServiceReqArgVec& argv) {
  assert(start);
  const bool align = flags & SRFlags::Align;
  Asm as { cb };

  /*
   * Remember previous state of the code cache.
   */
  boost::optional<CodeCursor> maybeCc = boost::none;
  if (start != as.frontier()) {
    maybeCc = boost::in_place<CodeCursor>(boost::ref(as), start);
  }

  /* max space for moving to align, saving VM regs plus emitting args */
  static const int
    kVMRegSpace = 0x14,
    kMovSize = 0xa,
    kNumServiceRegs = sizeof(serviceReqArgRegs) / sizeof(PhysReg),
    kMaxStubSpace = kJmpTargetAlign - 1 + kVMRegSpace +
      kNumServiceRegs * kMovSize;
  if (align) {
    moveToAlign(cb);
  }
  TCA retval = as.frontier();
  TRACE(3, "Emit Service Req @%p %s(", start, serviceReqName(req));
  /*
   * Move args into appropriate regs. Eager VMReg save may bash flags,
   * so set the CondCode arguments first.
   */
  for (int i = 0; i < argv.size(); ++i) {
    assert(i < kNumServiceReqArgRegs);
    auto reg = serviceReqArgRegs[i];
    const auto& argInfo = argv[i];
    switch(argv[i].m_kind) {
      case ServiceReqArgInfo::Immediate: {
        TRACE(3, "%" PRIx64 ", ", argInfo.m_imm);
        as.    emitImmReg(argInfo.m_imm, reg);
      } break;
      case ServiceReqArgInfo::CondCode: {
        // Already set before VM reg save.
        DEBUG_ONLY TCA start = as.frontier();
        as.    setcc(argInfo.m_cc, rbyte(reg));
        assert(start - as.frontier() <= kMovSize);
        TRACE(3, "cc(%x), ", argInfo.m_cc);
      } break;
      default: not_reached();
    }
  }
  emitEagerVMRegSave(as, RegSaveFlags::SaveFP);
  if (persist) {
    as.  emitImmReg(0, Transl::reg::rAsm);
  } else {
    as.  emitImmReg((uint64_t)start, Transl::reg::rAsm);
  }
  TRACE(3, ")\n");
  as.    emitImmReg(req, Transl::reg::rdi);

  /*
   * Weird hand-shaking with enterTC: reverse-call a service routine.
   *
   * In the case of some special stubs (m_callToExit, m_retHelper), we
   * have already unbalanced the return stack by doing a ret to
   * something other than enterTCHelper.  In that case
   * SRJmpInsteadOfRet indicates to fake the return.
   */
  if (flags & SRFlags::JmpInsteadOfRet) {
    as.  pop(Transl::reg::rax);
    as.  jmp(Transl::reg::rax);
  } else {
    as.  ret();
  }

  // TODO(2796856): we should record an OpServiceRequest pseudo-bytecode here.

  translator_not_reached(as);
  if (!persist) {
    /*
     * Recycled stubs need to be uniformly sized. Make space for the
     * maximal possible service requests.
     */
    assert(as.frontier() - start <= kMaxStubSpace);
    as.emitNop(start + kMaxStubSpace - as.frontier());
    assert(as.frontier() - start == kMaxStubSpace);
  }
  return retval;
}
Ejemplo n.º 16
0
SrcKey emitMagicFuncPrologue(Func* func, uint32_t nPassed, TCA& start) {
  assert(func->isMagic());
  assert(func->numParams() == 2);
  assert(!func->hasVariadicCaptureParam());
  using namespace reg;
  using MkPacked = ArrayData* (*)(uint32_t, const TypedValue*);

  Asm a { mcg->code.main() };
  Label not_magic_call;
  auto const rInvName = r13;
  assert(!kSpecialCrossTraceRegs.contains(r13));

  auto skFuncBody = SrcKey {};
  auto callFixup  = TCA { nullptr };

  /*
   * If nPassed is not 2, we need to generate a non-magic prologue
   * that can be used if there is no invName on the ActRec.
   * (I.e. someone called __call directly.)  In the case where nPassed
   * is 2, whether it's magic or not the prologue we generate at the
   * end will work.
   *
   * This is placed in a ahead of the actual prologue entry point, but
   * only because emitPrologueWork can't easily go to astubs right now.
   */
  if (nPassed != 2) {
    asm_label(a, not_magic_call);
    skFuncBody = emitPrologueWork(func, nPassed);
    // There is a REQ_BIND_JMP at the end of emitPrologueWork.
  }

  // Main prologue entry point is here.
  start = emitFuncGuard(a, func);
  if (RuntimeOption::EvalJitTransCounters) emitTransCounterInc(a);
  a.    pop    (rStashedAR[AROFF(m_savedRip)]);
  maybeEmitStackCheck(a, func);

  /*
   * Detect if this was actually a magic call (i.e. the ActRec has an
   * invName), and shuffle the magic call arguments into a packed
   * array.
   *
   * If it's not a magic call, we jump backward to a normal function
   * prologue (see above) for nPassed.  Except if nPassed is 2, we'll
   * be jumping over the magic call shuffle, to the prologue for 2
   * args below.
   */
  a.    loadq  (rStashedAR[AROFF(m_invName)], rInvName);
  a.    testb  (1, rbyte(rInvName));
  if (nPassed == 2) {
    a.  jz8    (not_magic_call);
  } else {
    not_magic_call.jccAuto(a, CC_Z);
  }
  a.    decq   (rInvName);
  a.    storeq (0, rStashedAR[AROFF(m_varEnv)]);
  if (nPassed != 0) { // for zero args, we use the empty array
    a.  movq   (rStashedAR, argNumToRegName[0]);
    a.  subq   (rVmSp, argNumToRegName[0]);
    a.  shrq   (0x4, argNumToRegName[0]);
    a.  movq   (rVmSp, argNumToRegName[1]);
    emitCall(a, reinterpret_cast<CodeAddress>(
      MkPacked{MixedArray::MakePacked}));
    callFixup = a.frontier();
  }
  if (nPassed != 2) {
    a.  storel (2, rStashedAR[AROFF(m_numArgsAndGenCtorFlags)]);
  }
  if (debug) { // "assertion": the emitPrologueWork path fixes up rVmSp.
    a.  movq   (0, rVmSp);
  }

  // Magic calls expect two arguments---first the name of the called
  // function, and then a packed array of the arguments to the
  // function.  These are where these two TV's will be.
  auto const strTV   = rStashedAR - cellsToBytes(1);
  auto const arrayTV = rStashedAR - cellsToBytes(2);

  // Store the two arguments for the magic call.
  emitStoreTVType(a, KindOfString, strTV[TVOFF(m_type)]);
  a.    storeq (rInvName, strTV[TVOFF(m_data)]);
  emitStoreTVType(a, KindOfArray, arrayTV[TVOFF(m_type)]);
  if (nPassed == 0) {
    emitImmStoreq(a, staticEmptyArray(), arrayTV[TVOFF(m_data)]);
  } else {
    a.  storeq (rax, arrayTV[TVOFF(m_data)]);
  }

  // Every magic call prologue has a case for nPassed == 2, because
  // this is how it works when the call is actually magic.
  if (nPassed == 2) asm_label(a, not_magic_call);
  auto const skFor2Args = emitPrologueWork(func, 2);
  if (nPassed == 2) skFuncBody = skFor2Args;

  if (RuntimeOption::HHProfServerEnabled && callFixup) {
    mcg->fixupMap().recordFixup(
      callFixup,
      Fixup { skFuncBody.offset() - func->base(), func->numSlotsInFrame() }
    );
  }

  return skFuncBody;
}
Ejemplo n.º 17
0
void emitTransCounterInc(Asm& a) {
  emitTransCounterInc(Vauto(a.code()).main());
}
Ejemplo n.º 18
0
void emitCall(Asm& a, CppCall call, RegSet args) {
  emitCall(Vauto(a.code()).main(), call, args);
}
Ejemplo n.º 19
0
TCA emitFreeLocalsHelpers(CodeBlock& cb, UniqueStubs& us) {
  Label doRelease;
  Label release;
  Label loopHead;

  auto const rData     = rarg(0); // not live coming in, but used
                                             // for destructor calls
  auto const rIter     = rarg(1); // live coming in
  auto const rFinished = rdx;
  auto const rType     = ecx;
  int const tvSize     = sizeof(TypedValue);

  Asm a { cb };
  align(cb, Alignment::CacheLine, AlignContext::Dead);
  auto const start = a.frontier();

asm_label(a, release);
  a.    loadq  (rIter[TVOFF(m_data)], rData);
  a.    cmpl   (1, rData[FAST_REFCOUNT_OFFSET]);
  jccBlock<CC_L>(a, [&] {
    a.  jz8    (doRelease);
    a.  decl   (rData[FAST_REFCOUNT_OFFSET]);
  });
  a.    ret    ();
asm_label(a, doRelease);
  a.    push    (rIter);
  a.    push    (rFinished);
  a.    call    (lookupDestructor(a, PhysReg(rType)));
  // Three quads between where %rsp is now and the saved RIP of the call into
  // the stub: two from the pushes above, and one for the saved RIP of the call
  // to `release' done below (e.g., in emitDecLocal).
  mcg->fixupMap().recordFixup(a.frontier(), makeIndirectFixup(3));
  a.    pop     (rFinished);
  a.    pop     (rIter);
  a.    ret     ();

  auto emitDecLocal = [&]() {
    Label skipDecRef;

    // Zero-extend the type while loading so it can be used as an array index
    // to lookupDestructor() above.
    emitLoadTVType(a, rIter[TVOFF(m_type)], rType);
    emitCmpTVType(a, KindOfRefCountThreshold, rbyte(rType));
    a.  jle8   (skipDecRef);
    a.  call   (release);
  asm_label(a, skipDecRef);
  };

  alignJmpTarget(cb);
  us.freeManyLocalsHelper = a.frontier();
  a.    lea    (rvmfp()[-(jit::kNumFreeLocalsHelpers * sizeof(Cell))],
                rFinished);

  // Loop for the first few locals, but unroll the final kNumFreeLocalsHelpers.
asm_label(a, loopHead);
  emitDecLocal();
  a.    addq   (tvSize, rIter);
  a.    cmpq   (rIter, rFinished);
  a.    jnz8   (loopHead);

  for (int i = 0; i < kNumFreeLocalsHelpers; ++i) {
    us.freeLocalsHelpers[kNumFreeLocalsHelpers - i - 1] = a.frontier();
    emitDecLocal();
    if (i != kNumFreeLocalsHelpers - 1) {
      a.addq   (tvSize, rIter);
    }
  }

  a.    ret    ();

  // Keep me small!
  always_assert(Stats::enabled() ||
                (a.frontier() - start <= 4 * kX64CacheLineSize));

  return start;
}
Ejemplo n.º 20
0
void emitAssertFlagsNonNegative(Asm& as) {
  ifThen(as, CC_NGE, [&] { as.ud2(); });
}