Esempio n. 1
0
TCA emitServiceReqWork(CodeBlock& cb, TCA start, bool persist, SRFlags flags,
                       ServiceRequest req, const ServiceReqArgVec& argv) {
  MacroAssembler a { cb };

  folly::Optional<CodeCursor> maybeCc = folly::none;
  if (start != cb.frontier()) {
    maybeCc.emplace(cb, start);
  }

  // There are 6 instructions after the argument-shuffling, and they're all
  // single instructions (i.e. not macros). There are up to 4 instructions per
  // argument (it may take up to 4 instructions to move a 64-bit immediate into
  // a register).
  constexpr auto kMaxStubSpace = 6 * vixl::kInstructionSize +
    (4 * maxArgReg()) * vixl::kInstructionSize;

  for (auto i = 0; i < argv.size(); ++i) {
    auto reg = serviceReqArgReg(i);
    auto const& arg = argv[i];
    switch (arg.m_kind) {
      case ServiceReqArgInfo::Immediate:
        a.   Mov  (reg, arg.m_imm);
        break;
      case ServiceReqArgInfo::CondCode:
        not_implemented();
        break;
      default: not_reached();
    }
  }

  // Save VM regs
  a.     Str   (rVmFp, rGContextReg[offsetof(VMExecutionContext, m_fp)]);
  a.     Str   (rVmSp, rGContextReg[offsetof(VMExecutionContext, m_stack) +
                                    Stack::topOfStackOffset()]);

  if (persist) {
    a.   Mov   (rAsm, 0);
  } else {
    a.   Mov   (rAsm, reinterpret_cast<intptr_t>(start));
  }
  a.     Mov   (argReg(0), req);

  // The x64 equivalent loads to rax. I knew this was a trap.
  if (flags & SRFlags::JmpInsteadOfRet) {
    a.   Ldr   (rAsm, MemOperand(sp, 8, PostIndex));
    a.   Br    (rAsm);
  } else {
    a.   Ret   ();
  }
  a.     Brk   (0);

  if (!persist) {
    assert(cb.frontier() - start <= kMaxStubSpace);
    while (cb.frontier() - start < kMaxStubSpace) {
      a. Nop   ();
    }
  }

  return start;
}
TCA emitServiceReqWork(CodeBlock& cb, TCA start, SRFlags flags,
                       ServiceRequest req, const ServiceReqArgVec& argv) {
    MacroAssembler a { cb };

    const bool persist = flags & SRFlags::Persist;

    folly::Optional<CodeCursor> maybeCc = folly::none;
    if (start != cb.frontier()) {
        maybeCc.emplace(cb, start);
    }

    // There are 4 instructions after the argument-shuffling, and they're all
    // single instructions (i.e. not macros). There are up to 4 instructions per
    // argument (it may take up to 4 instructions to move a 64-bit immediate into
    // a register).
    constexpr auto kMaxStubSpace = 4 * vixl::kInstructionSize +
                                   (4 * maxArgReg()) * vixl::kInstructionSize;

    for (auto i = 0; i < argv.size(); ++i) {
        auto reg = serviceReqArgReg(i);
        auto const& arg = argv[i];
        switch (arg.m_kind) {
        case ServiceReqArgInfo::Immediate:
            a.   Mov  (reg, arg.m_imm);
            break;
        case ServiceReqArgInfo::CondCode:
            not_implemented();
            break;
        default:
            not_reached();
        }
    }

    if (persist) {
        a.   Mov   (rAsm, 0);
    } else {
        a.   Mov   (rAsm, reinterpret_cast<intptr_t>(start));
    }
    a.     Mov   (argReg(0), req);

    a.     Ldr   (rLinkReg, MemOperand(sp, 16, PostIndex));
    if (flags & SRFlags::JmpInsteadOfRet) {
        a.   Br    (rLinkReg);
    } else {
        a.   Ret   ();
    }
    a.     Brk   (0);

    if (!persist) {
        assert(cb.frontier() - start <= kMaxStubSpace);
        while (cb.frontier() - start < kMaxStubSpace) {
            a. Nop   ();
        }
    }

    return start;
}
Esempio n. 3
0
TCA emitServiceReqWork(CodeBlock& cb,
                       TCA start,
                       SRFlags flags,
                       folly::Optional<FPInvOffset> spOff,
                       ServiceRequest req,
                       const ServiceReqArgVec& argv) {
  MacroAssembler a { cb };

  const bool persist = flags & SRFlags::Persist;

  folly::Optional<CodeCursor> maybeCc = folly::none;
  if (start != cb.frontier()) {
    maybeCc.emplace(cb, start);
  }

  const auto kMaxStubSpace = reusableStubSize();

  for (auto i = 0; i < argv.size(); ++i) {
    auto reg = serviceReqArgReg(i);
    auto const& arg = argv[i];
    switch (arg.m_kind) {
      case ServiceReqArgInfo::Immediate:
        a.   Mov  (reg, arg.m_imm);
        break;
      case ServiceReqArgInfo::CondCode:
        not_implemented();
        break;
      default: not_reached();
    }
  }

  if (persist) {
    a.   Mov   (rAsm, 0);
  } else {
    a.   Mov   (rAsm, reinterpret_cast<intptr_t>(start));
  }
  a.     Mov   (argReg(0), req);

  a.     Ldr   (rLinkReg, MemOperand(sp, 16, PostIndex));
  a.     Ret   ();
  a.     Brk   (0);

  if (!persist) {
    assertx(cb.frontier() - start <= kMaxStubSpace);
    while (cb.frontier() - start < kMaxStubSpace) {
      a. Nop   ();
    }
  }

  return start;
}
Esempio n. 4
0
TCA
emitServiceReqWork(CodeBlock& cb, TCA start, bool persist, SRFlags flags,
                   ServiceRequest req, const ServiceReqArgVec& argv) {
  assert(start);
  const bool align = flags & SRFlags::Align;
  Asm as { cb };

  /*
   * Remember previous state of the code cache.
   */
  boost::optional<CodeCursor> maybeCc = boost::none;
  if (start != as.frontier()) {
    maybeCc = boost::in_place<CodeCursor>(boost::ref(as), start);
  }

  /* max space for moving to align, saving VM regs plus emitting args */
  static const int
    kVMRegSpace = 0x14,
    kMovSize = 0xa,
    kNumServiceRegs = sizeof(serviceReqArgRegs) / sizeof(PhysReg),
    kMaxStubSpace = kJmpTargetAlign - 1 + kVMRegSpace +
      kNumServiceRegs * kMovSize;
  if (align) {
    moveToAlign(cb);
  }
  TCA retval = as.frontier();
  TRACE(3, "Emit Service Req @%p %s(", start, serviceReqName(req));
  /*
   * Move args into appropriate regs. Eager VMReg save may bash flags,
   * so set the CondCode arguments first.
   */
  for (int i = 0; i < argv.size(); ++i) {
    assert(i < kNumServiceReqArgRegs);
    auto reg = serviceReqArgRegs[i];
    const auto& argInfo = argv[i];
    switch(argv[i].m_kind) {
      case ServiceReqArgInfo::Immediate: {
        TRACE(3, "%" PRIx64 ", ", argInfo.m_imm);
        as.    emitImmReg(argInfo.m_imm, reg);
      } break;
      case ServiceReqArgInfo::CondCode: {
        // Already set before VM reg save.
        DEBUG_ONLY TCA start = as.frontier();
        as.    setcc(argInfo.m_cc, rbyte(reg));
        assert(start - as.frontier() <= kMovSize);
        TRACE(3, "cc(%x), ", argInfo.m_cc);
      } break;
      default: not_reached();
    }
  }
  emitEagerVMRegSave(as, RegSaveFlags::SaveFP);
  if (persist) {
    as.  emitImmReg(0, Transl::reg::rAsm);
  } else {
    as.  emitImmReg((uint64_t)start, Transl::reg::rAsm);
  }
  TRACE(3, ")\n");
  as.    emitImmReg(req, Transl::reg::rdi);

  /*
   * Weird hand-shaking with enterTC: reverse-call a service routine.
   *
   * In the case of some special stubs (m_callToExit, m_retHelper), we
   * have already unbalanced the return stack by doing a ret to
   * something other than enterTCHelper.  In that case
   * SRJmpInsteadOfRet indicates to fake the return.
   */
  if (flags & SRFlags::JmpInsteadOfRet) {
    as.  pop(Transl::reg::rax);
    as.  jmp(Transl::reg::rax);
  } else {
    as.  ret();
  }

  // TODO(2796856): we should record an OpServiceRequest pseudo-bytecode here.

  translator_not_reached(as);
  if (!persist) {
    /*
     * Recycled stubs need to be uniformly sized. Make space for the
     * maximal possible service requests.
     */
    assert(as.frontier() - start <= kMaxStubSpace);
    as.emitNop(start + kMaxStubSpace - as.frontier());
    assert(as.frontier() - start == kMaxStubSpace);
  }
  return retval;
}