Beispiel #1
0
void fixupWork(ExecutionContext* ec, ActRec* nextRbp) {
  assertx(RuntimeOption::EvalJit);

  TRACE(1, "fixup(begin):\n");

  while (true) {
    auto const rbp = nextRbp;
    nextRbp = rbp->m_sfp;
    assertx(nextRbp && nextRbp != rbp && "Missing fixup for native call");
    TRACE(2, "considering frame %p, %p\n", rbp, (void*)rbp->m_savedRip);

    if (isVMFrame(nextRbp)) {
      TRACE(2, "fixup checking vm frame %s\n",
               nextRbp->m_func->name()->data());
      VMRegs regs;
      if (getFrameRegs(rbp, &regs)) {
        TRACE(2, "fixup(end): func %s fp %p sp %p pc %p\n",
              regs.fp->m_func->name()->data(),
              regs.fp, regs.sp, regs.pc);
        auto& vmRegs = vmRegsUnsafe();
        vmRegs.fp = const_cast<ActRec*>(regs.fp);
        vmRegs.pc = reinterpret_cast<PC>(regs.pc);
        vmRegs.stack.top() = regs.sp;
        return;
      }
    }
  }
}
void enterTCImpl(TCA start, ActRec* stashedAR) {
  // We have to force C++ to spill anything that might be in a callee-saved
  // register (aside from %rbp), since enterTCHelper does not save them.
  CALLEE_SAVED_BARRIER();
  auto& regs = vmRegsUnsafe();
  jit::enterTCHelper(regs.stack.top(), regs.fp, start,
                     vmFirstAR(), rds::tl_base, stashedAR);
  CALLEE_SAVED_BARRIER();
}
Beispiel #3
0
void FixupMap::fixupWorkSimulated(ExecutionContext* ec) const {
  TRACE(1, "fixup(begin):\n");

  auto isVMFrame = [] (ActRec* ar, const vixl::Simulator* sim) {
    // If this assert is failing, you may have forgotten a sync point somewhere
    assertx(ar);
    bool ret =
      uintptr_t(ar) - s_stackLimit >= s_stackSize &&
      !sim->is_on_stack(ar);
    assertx(!ret || isValidVMStackAddress(ar) || ar->resumed());
    return ret;
  };

  // For each nested simulator (corresponding to nested VM invocations), look at
  // its PC to find a potential fixup key.
  //
  // Callstack walking is necessary, because we may get called from a
  // uniqueStub.
  for (int i = ec->m_activeSims.size() - 1; i >= 0; --i) {
    auto const* sim = ec->m_activeSims[i];
    auto const fp = arm::x2a(arm::rvmfp());
    auto* rbp = reinterpret_cast<ActRec*>(sim->xreg(fp.code()));
    auto tca = reinterpret_cast<TCA>(sim->pc());
    TRACE(2, "considering frame %p, %p\n", rbp, tca);

    while (rbp && !isVMFrame(rbp, sim)) {
      tca = reinterpret_cast<TCA>(rbp->m_savedRip);
      rbp = rbp->m_sfp;
    }

    if (!rbp) continue;

    auto* ent = m_fixups.find(tca);
    if (!ent) {
      continue;
    }

    if (ent->isIndirect()) {
      not_implemented();
    }

    VMRegs regs;
    regsFromActRec(tca, rbp, ent->fixup, &regs);
    TRACE(2, "fixup(end): func %s fp %p sp %p pc %p\b",
          regs.fp->m_func->name()->data(),
          regs.fp, regs.sp, regs.pc);
    auto& vmRegs = vmRegsUnsafe();
    vmRegs.fp = const_cast<ActRec*>(regs.fp);
    vmRegs.pc = reinterpret_cast<PC>(regs.pc);
    vmRegs.stack.top() = regs.sp;
    return;
  }

  // This shouldn't be reached.
  always_assert(false);
}
Beispiel #4
0
void handleStackOverflow(ActRec* calleeAR) {
  /*
   * First synchronize registers.
   *
   * We're called in two situations: either this is the first frame after a
   * re-entry, in which case calleeAR->m_sfp is enterTCHelper's native stack,
   * or we're called in the middle of one VM entry (from a func prologue).  We
   * want to raise the exception from the caller's FCall instruction in the
   * second case, and in the first case we have to raise in a special way
   * inside this re-entry.
   *
   * Either way the stack depth is below the calleeAR by numArgs, because we
   * haven't run func prologue duties yet.
   */
  auto& unsafeRegs = vmRegsUnsafe();
  auto const isReentry = calleeAR == vmFirstAR();
  auto const arToSync = isReentry ? calleeAR : calleeAR->m_sfp;
  unsafeRegs.fp = arToSync;
  unsafeRegs.stack.top() =
    reinterpret_cast<Cell*>(calleeAR) - calleeAR->numArgs();
  auto const func_base = arToSync->func()->base();
  // calleeAR m_soff is 0 in the re-entry case, so we'll set pc to the func
  // base.  But it also doesn't matter because we're going to throw a special
  // VMReenterStackOverflow in that case so the unwinder won't worry about it.
  unsafeRegs.pc = arToSync->func()->unit()->at(func_base + calleeAR->m_soff);
  tl_regState = VMRegState::CLEAN;

  if (!isReentry) {
    /*
     * The normal case - we were called via FCall, or FCallArray.  We need to
     * construct the pc of the fcall from the return address (which will be
     * after the fcall). Because fcall is a variable length instruction, and
     * because we sometimes delete instructions from the instruction stream, we
     * need to use fpi regions to find the fcall.
     */
    const FPIEnt* fe = liveFunc()->findPrecedingFPI(
      liveUnit()->offsetOf(vmpc()));
    vmpc() = liveUnit()->at(fe->m_fcallOff);
    assertx(isFCallStar(peek_op(vmpc())));
    raise_error("Stack overflow");
  } else {
    /*
     * We were called via re-entry.  Leak the params and the ActRec, and tell
     * the unwinder that there's nothing left to do in this "entry".
     *
     * Also, the caller hasn't set up the m_invName area on the ActRec (unless
     * it was a magic call), since it's the prologue's responsibility if it's a
     * non-magic call.  We can just null it out since we're fatalling.
     */
    vmsp() = reinterpret_cast<Cell*>(calleeAR + 1);
    calleeAR->setVarEnv(nullptr);
    throw VMReenterStackOverflow();
  }
  not_reached();
}
Beispiel #5
0
bool checkCalleeStackOverflow(const ActRec* calleeAR) {
  auto const func = calleeAR->func();
  auto const limit = func->maxStackCells() + kStackCheckPadding;

  const void* const needed_top =
    reinterpret_cast<const TypedValue*>(calleeAR) - limit;

  const void* const lower_limit =
    static_cast<char*>(vmRegsUnsafe().stack.getStackLowAddress()) +
    Stack::sSurprisePageSize;

  return needed_top < lower_limit;
}
TCA fcallHelper(ActRec* ar, void* sp) {
  try {
    assert(!ar->resumed());
    TCA tca =
      mcg->getFuncPrologue((Func*)ar->m_func, ar->numArgs(), ar);
    if (tca) {
      return tca;
    }
    if (!ar->m_func->isClonedClosure()) {
      /*
       * If the func is a cloned closure, then the original
       * closure has already run the prologue, and the prologues
       * array is just being used as entry points for the
       * dv funclets. Dont run the prologue again.
       */
      VMRegAnchor _(ar);
      if (g_context->doFCall(ar, vmpc())) {
        return mcg->tx().uniqueStubs.resumeHelperRet;
      }
      // We've been asked to skip the function body
      // (fb_intercept). frame, stack and pc have
      // already been fixed - flag that with a negative
      // return address.
      return (TCA)-ar->m_savedRip;
    }
    setupAfterPrologue(ar, sp);
    assert(ar == vmRegsUnsafe().fp);
    return mcg->tx().uniqueStubs.resumeHelper;
  } catch (...) {
    /*
      The return address is set to __fcallHelperThunk,
      which has no unwind information. Its "logically"
      part of the tc, but the c++ unwinder wont know
      that. So point our return address at the called
      function's return address (which will be in the
      tc).
      Note that the registers really are clean - we
      cleaned them in the try above - so we just
      have to tell the unwinder that.
    */
    DECLARE_FRAME_POINTER(framePtr);
    tl_regState = VMRegState::CLEAN;
    framePtr->m_savedRip = ar->m_savedRip;
    throw;
  }
}
static void setupAfterPrologue(ActRec* fp, void* sp) {
  auto& regs = vmRegsUnsafe();
  regs.fp = fp;
  regs.stack.top() = (Cell*)sp;
  int nargs = fp->numArgs();
  int nparams = fp->m_func->numNonVariadicParams();
  Offset firstDVInitializer = InvalidAbsoluteOffset;
  if (nargs < nparams) {
    const Func::ParamInfoVec& paramInfo = fp->m_func->params();
    for (int i = nargs; i < nparams; ++i) {
      Offset dvInitializer = paramInfo[i].funcletOff;
      if (dvInitializer != InvalidAbsoluteOffset) {
        firstDVInitializer = dvInitializer;
        break;
      }
    }
  }
  if (firstDVInitializer != InvalidAbsoluteOffset) {
    regs.pc = fp->m_func->unit()->entry() + firstDVInitializer;
  } else {
    regs.pc = fp->m_func->getEntry();
  }
}
Beispiel #8
0
void
FixupMap::fixupWork(ExecutionContext* ec, ActRec* rbp) const {
  assert(RuntimeOption::EvalJit);

  TRACE(1, "fixup(begin):\n");

  auto* nextRbp = rbp;
  rbp = 0;
  do {
    auto* prevRbp = rbp;
    rbp = nextRbp;
    assert(rbp && "Missing fixup for native call");
    nextRbp = rbp->m_sfp;
    TRACE(2, "considering frame %p, %p\n", rbp, (void*)rbp->m_savedRip);

    if (isVMFrame(ec, nextRbp)) {
      TRACE(2, "fixup checking vm frame %s\n",
               nextRbp->m_func->name()->data());
      VMRegs regs;
      if (getFrameRegs(rbp, prevRbp, &regs)) {
        TRACE(2, "fixup(end): func %s fp %p sp %p pc %p\n",
              regs.m_fp->m_func->name()->data(),
              regs.m_fp, regs.m_sp, regs.m_pc);
        auto& vmRegs = vmRegsUnsafe();
        vmRegs.fp = const_cast<ActRec*>(regs.m_fp);
        vmRegs.pc = reinterpret_cast<PC>(regs.m_pc);
        vmRegs.stack.top() = regs.m_sp;
        return;
      }
    }
  } while (rbp && rbp != nextRbp);

  // OK, we've exhausted the entire actRec chain.  We are only
  // invoking ::fixup() from contexts that were known to be called out
  // of the TC, so this cannot happen.
  always_assert(false);
}