Vreg checkRDSHandleInitialized(Vout& v, rds::Handle ch) {
  assertx(rds::isNormalHandle(ch));
  auto const gen = v.makeReg();
  auto const sf = v.makeReg();
  v << loadb{rvmtl()[rds::genNumberHandleFrom(ch)], gen};
  v << cmpbm{gen, rvmtl()[rds::currentGenNumberHandle()], sf};
  return sf;
}
Beispiel #2
0
void cgStMBase(IRLS& env, const IRInstruction* inst) {
  auto const off = rds::kVmMInstrStateOff + offsetof(MInstrState, base);
  auto const srcLoc = irlower::srcLoc(env, inst, 0);
  vmain(env) << store{srcLoc.reg(0), rvmtl()[off]};
  if (wide_tv_val) {
    vmain(env) << store{srcLoc.reg(1), rvmtl()[off + sizeof(intptr_t)]};
  }
}
Beispiel #3
0
void cgLdMBase(IRLS& env, const IRInstruction* inst) {
  auto const off = rds::kVmMInstrStateOff + offsetof(MInstrState, base);
  auto const dstLoc = irlower::dstLoc(env, inst, 0);
  vmain(env) << load{rvmtl()[off], dstLoc.reg(0)};
  if (wide_tv_val) {
    vmain(env) << load{rvmtl()[off + sizeof(intptr_t)], dstLoc.reg(1)};
  }
}
TCA emitEndCatchHelper(CodeBlock& cb, UniqueStubs& us) {
  auto const udrspo = rvmtl()[unwinderDebuggerReturnSPOff()];

  auto const debuggerReturn = vwrap(cb, [&] (Vout& v) {
    v << load{udrspo, rvmsp()};
    v << storeqi{0, udrspo};
  });
  svcreq::emit_persistent(cb, folly::none, REQ_POST_DEBUGGER_RET);

  auto const resumeCPPUnwind = vwrap(cb, [] (Vout& v) {
    static_assert(sizeof(tl_regState) == 1,
                  "The following store must match the size of tl_regState.");
    auto const regstate = emitTLSAddr(v, tls_datum(tl_regState));
    v << storebi{static_cast<int32_t>(VMRegState::CLEAN), regstate};

    v << load{rvmtl()[unwinderExnOff()], rarg(0)};
    v << call{TCA(_Unwind_Resume), arg_regs(1)};
  });
  us.endCatchHelperPast = cb.frontier();
  vwrap(cb, [] (Vout& v) { v << ud2{}; });

  alignJmpTarget(cb);

  return vwrap(cb, [&] (Vout& v) {
    auto const done1 = v.makeBlock();
    auto const sf1 = v.makeReg();

    v << cmpqim{0, udrspo, sf1};
    v << jcci{CC_NE, sf1, done1, debuggerReturn};
    v = done1;

    // Normal end catch situation: call back to tc_unwind_resume, which returns
    // the catch trace (or null) in %rax, and the new vmfp in %rdx.
    v << copy{rvmfp(), rarg(0)};
    v << call{TCA(tc_unwind_resume)};
    v << copy{reg::rdx, rvmfp()};

    auto const done2 = v.makeBlock();
    auto const sf2 = v.makeReg();

    v << testq{reg::rax, reg::rax, sf2};
    v << jcci{CC_Z, sf2, done2, resumeCPPUnwind};
    v = done2;

    // We need to do a syncForLLVMCatch(), but vmfp is already in rdx.
    v << jmpr{reg::rax};
  });
}
Beispiel #5
0
void cgGetCtxFwdCallDyn(IRLS& env, const IRInstruction* inst) {
  implGetCtxFwdCall(env, inst, [&] (Vout& v, Vreg rthis, Vreg dst) {
    auto const extra = inst->extra<ClsMethodData>();
    auto const ch = StaticMethodFCache::alloc(
      extra->clsName,
      extra->methodName,
      ctxName(inst->marker())
    );

    // The StaticMethodFCache here is guaranteed to already be initialized in
    // RDS by the pre-conditions of this instruction.
    auto const sf = v.makeReg();
    v << cmplim{1, rvmtl()[ch + offsetof(StaticMethodFCache, m_static)], sf};

    return cond(v, CC_E, sf, dst,
      [&] (Vout& v) {
        // Load (this->m_cls | 0x1) into `dst'.
        auto cls = v.makeReg();
        auto tmp = v.makeReg();
        emitLdObjClass(v, rthis, cls);
        v << orqi{1, cls, tmp, v.makeReg()};
        return tmp;
      },
      [&] (Vout& v) {
        // Just incref $this.
        emitIncRef(v, rthis);
        return rthis;
      }
    );
  });
}
Beispiel #6
0
void cgLdStaticLoc(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<LdStaticLoc>();
  auto const link = rds::bindStaticLocal(extra->func, extra->name);
  auto const dst = dstLoc(env, inst, 0).reg();
  auto& v = vmain(env);

  v << lea{rvmtl()[link.handle() + rds::StaticLocalData::ref_offset()], dst};
}
Beispiel #7
0
void cgProfileType(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<RDSHandleData>();

  auto const args = argGroup(env, inst)
    .addr(rvmtl(), safe_cast<int32_t>(extra->handle))
    .typedValue(0);

  cgCallHelper(vmain(env), env, CallSpec::method(&TypeProfile::report),
               kVoidDest, SyncOptions::None, args);
}
Beispiel #8
0
void cgLdClsCns(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<LdClsCns>();
  auto const link = rds::bindClassConstant(extra->clsName, extra->cnsName);
  auto const dst = dstLoc(env, inst, 0).reg();
  auto& v = vmain(env);

  auto const sf = checkRDSHandleInitialized(v, link.handle());
  fwdJcc(v, env, CC_NE, sf, inst->taken());
  v << lea{rvmtl()[link.handle()], dst};
}
Beispiel #9
0
void cgStMIPropState(IRLS& env, const IRInstruction* inst) {
  auto const cls = srcLoc(env, inst, 0).reg();
  auto const slot = srcLoc(env, inst, 1).reg();
  auto const isStatic = inst->src(2)->boolVal();
  auto const off = rds::kVmMInstrStateOff + offsetof(MInstrState, propState);
  auto& v = vmain(env);

  using M = MInstrPropState;

  static_assert(sizeof(M::slotSize() == 4), "");
  static_assert(sizeof(M::clsSize()) == 4 || sizeof(M::clsSize()) == 8, "");

  if (inst->src(0)->isA(TNullptr)) {
    // If the Class* field is null, none of the other fields matter.
    emitStLowPtr(v, v.cns(0), rvmtl()[off + M::clsOff()], M::clsSize());
    return;
  }

  if (inst->src(0)->hasConstVal(TCls) &&
      inst->src(1)->hasConstVal(TInt)) {
    // If everything is a constant, and this is a LowPtr build, we can store the
    // values with a single 64-bit immediate.
    if (M::clsOff() + M::clsSize() == M::slotOff() && M::clsSize() == 4) {
      auto const clsVal = inst->src(0)->clsVal();
      auto const slotVal = inst->src(1)->intVal();
      auto raw = reinterpret_cast<uint64_t>(clsVal);
      raw |= (static_cast<uint64_t>(slotVal) << 32);
      if (isStatic) raw |= 0x1;
      emitImmStoreq(v, raw, rvmtl()[off + M::clsOff()]);
      return;
    }
  }

  auto markedCls = cls;
  if (isStatic) {
    markedCls = v.makeReg();
    v << orqi{0x1, cls, markedCls, v.makeReg()};
  }
  emitStLowPtr(v, markedCls, rvmtl()[off + M::clsOff()], M::clsSize());
  v << storel{slot, rvmtl()[off + M::slotOff()]};
}
void cgProfileMethod(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<ProfileMethodData>();
  auto const sp = srcLoc(env, inst, 0).reg();

  auto const args = argGroup(env, inst)
    .addr(rvmtl(), safe_cast<int32_t>(extra->handle))
    .addr(sp, cellsToBytes(extra->bcSPOff.offset))
    .ssa(1);

  cgCallHelper(vmain(env), env, CallSpec::method(&MethProfile::reportMeth),
               kVoidDest, SyncOptions::None, args);
}
Beispiel #11
0
void cgProfileSubClsCns(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<ProfileSubClsCns>();

  auto const args = argGroup(env, inst)
    .addr(rvmtl(), safe_cast<int32_t>(extra->handle))
    .ssa(0)
    .imm(extra->cnsName);

  auto const dst = dstLoc(env, inst, 0).reg();

  cgCallHelper(vmain(env), env, CallSpec::method(&ClsCnsProfile::reportClsCns),
               callDest(dst), SyncOptions::None, args);
}
Beispiel #12
0
void cgProfileSwitchDest(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<ProfileSwitchDest>();
  auto const idx = srcLoc(env, inst, 0).reg();
  auto& v = vmain(env);

  auto const rcase = v.makeReg();
  auto const sf = v.makeReg();
  v << subq{v.cns(extra->base), idx, rcase, v.makeReg()};
  v << cmpqi{extra->cases - 2, rcase, sf};

  ifThenElse(
    v, CC_AE, sf,
    [&] (Vout& v) {
      // Last vector element is the default case.
      v << inclm{rvmtl()[extra->handle + (extra->cases - 1) * sizeof(int32_t)],
                 v.makeReg()};
    },
    [&] (Vout& v) {
      v << inclm{Vreg{rvmtl()}[rcase * 4 + extra->handle], v.makeReg()};
    }
  );
}
Beispiel #13
0
void cgLdClsInitData(IRLS& env, const IRInstruction* inst) {
  auto const dst = dstLoc(env, inst, 0).reg();
  auto const cls = srcLoc(env, inst, 0).reg();
  auto const offset = Class::propDataCacheOff() +
                      rds::Link<Class::PropInitVec*>::handleOff();
  auto& v = vmain(env);

  auto const handle = v.makeReg();
  auto const vec = v.makeReg();
  v << loadzlq{cls[offset], handle};
  v << load{Vreg(rvmtl())[handle], vec};
  v << load{vec[Class::PropInitVec::dataOff()], dst};
}
Beispiel #14
0
void cgInitStaticLoc(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<InitStaticLoc>();
  auto const link = rds::bindStaticLocal(extra->func, extra->name);
  auto& v = vmain(env);

  // Initialize the RefData by storing the new value into it's TypedValue and
  // incrementing the RefData reference count (which will set it to 1).
  auto mem = rvmtl()[link.handle() + rds::StaticLocalData::ref_offset()];
  storeTV(v, mem + RefData::tvOffset(), srcLoc(env, inst, 0), inst->src(0));
  emitStoreRefCount(v, OneReference, mem);
  v << storebi{uint8_t(HeaderKind::Ref), mem + (int)HeaderKindOffset};
  markRDSHandleInitialized(v, link.handle());

  static_assert(sizeof(HeaderKind) == 1, "");
}
void cgLdClsMethodCacheCls(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<ClsMethodData>();
  auto const dst = dstLoc(env, inst, 0).reg();
  auto& v = vmain(env);

  auto const ch = StaticMethodCache::alloc(
    extra->clsName,
    extra->methodName,
    ctxName(inst->marker())
  );

  // The StaticMethodCache here is guaranteed to already be initialized in RDS
  // by the pre-conditions of this instruction.
  emitLdLowPtr(v, rvmtl()[ch + offsetof(StaticMethodCache, m_cls)],
               dst, sizeof(LowPtr<const Class>));
}
void cgLdClsMethodFCacheFunc(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<ClsMethodData>();
  auto const dst = dstLoc(env, inst, 0).reg();
  auto& v = vmain(env);

  auto const ch = StaticMethodFCache::alloc(
    extra->clsName,
    extra->methodName,
    ctxName(inst->marker())
  );

  auto const sf = checkRDSHandleInitialized(v, ch);
  fwdJcc(v, env, CC_NE, sf, inst->taken());
  emitLdLowPtr(v, rvmtl()[ch + offsetof(StaticMethodFCache, m_func)],
               dst, sizeof(LowPtr<const Func>));
}
Beispiel #17
0
void cgInitClsCns(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<InitClsCns>();
  auto const link = rds::bindClassConstant(extra->clsName, extra->cnsName);
  auto& v = vmain(env);

  auto const args = argGroup(env, inst)
    .addr(rvmtl(), safe_cast<int32_t>(link.handle()))
    .immPtr(NamedEntity::get(extra->clsName))
    .immPtr(extra->clsName)
    .immPtr(extra->cnsName);

  cgCallHelper(v, env, CallSpec::direct(lookupClsCnsHelper),
               callDestTV(env, inst), SyncOptions::Sync, args);

  markRDSHandleInitialized(v, link.handle());
}
Beispiel #18
0
void cgInitObjProps(IRLS& env, const IRInstruction* inst) {
  auto const cls = inst->extra<InitObjProps>()->cls;
  auto const obj = srcLoc(env, inst, 0).reg();
  auto& v = vmain(env);

  // Set the attributes, if any.
  auto const odAttrs = cls->getODAttrs();
  if (odAttrs) {
    static_assert(sizeof(ObjectData::Attribute) == 2,
                  "Codegen expects 2-byte ObjectData attributes");
    assertx(!(odAttrs & 0xffff0000));
    v << orwim{odAttrs, obj[ObjectData::attributeOff()], v.makeReg()};
  }

  // Initialize the properties.
  auto const nprops = cls->numDeclProperties();
  if (nprops > 0) {
    if (cls->pinitVec().size() == 0) {
      // If the Class has no 86pinit property-initializer functions, we can
      // just copy the initial values from a data member on the Class.
      implInitObjPropsFast(v, env, inst, obj, cls, nprops);
    } else {
      // Load the Class's propInitVec from the target cache.  We know it's
      // already been initialized as a pre-condition on this op.
      auto const propHandle = cls->propHandle();
      assertx(rds::isNormalHandle(propHandle));

      auto const propInitVec = v.makeReg();
      auto const propData = v.makeReg();
      v << load{Vreg(rvmtl())[propHandle], propInitVec};
      v << load{propInitVec[Class::PropInitVec::dataOff()], propData};

      auto const propsOff = sizeof(ObjectData) + cls->builtinODTailSize();
      auto args = argGroup(env, inst)
        .addr(obj, safe_cast<int32_t>(propsOff))
        .reg(propData);

      if (!cls->hasDeepInitProps()) {
        cgCallHelper(v, env, CallSpec::direct(memcpy), kVoidDest,
                     SyncOptions::None, args.imm(cellsToBytes(nprops)));
      } else {
        cgCallHelper(v, env, CallSpec::direct(deepInitHelper),
                     kVoidDest, SyncOptions::None, args.imm(nprops));
      }
    }
  }
}
Beispiel #19
0
void cgNativeImpl(IRLS& env, const IRInstruction* inst) {
  auto fp = srcLoc(env, inst, 0).reg();
  auto sp = srcLoc(env, inst, 1).reg();
  auto& v = vmain(env);

  auto const func = inst->marker().func();

  if (FixupMap::eagerRecord(func)) {
    emitEagerSyncPoint(v, func->getEntry(), rvmtl(), fp, sp);
  }
  v << vinvoke{
    CallSpec::direct(func->builtinFuncPtr()),
    v.makeVcallArgs({{fp}}),
    v.makeTuple({}),
    {label(env, inst->next()), label(env, inst->taken())},
    makeFixup(inst->marker(), SyncOptions::Sync)
  };
  env.catch_calls[inst->taken()] = CatchCall::CPP;
}
void cgCheckSurpriseAndStack(IRLS& env, const IRInstruction* inst) {
  auto const fp = srcLoc(env, inst, 0).reg();
  auto const extra = inst->extra<CheckSurpriseAndStack>();
  auto const func = extra->func;

  auto const off = func->getEntryForNumArgs(extra->argc) - func->base();
  auto const fixup = Fixup(off, func->numSlotsInFrame());
  auto& v = vmain(env);

  auto const sf = v.makeReg();
  auto const needed_top = v.makeReg();
  v << lea{fp[-cellsToBytes(func->maxStackCells())], needed_top};
  v << cmpqm{needed_top, rvmtl()[rds::kSurpriseFlagsOff], sf};

  unlikelyIfThen(v, vcold(env), CC_AE, sf, [&] (Vout& v) {
    auto const stub = tc::ustubs().functionSurprisedOrStackOverflow;
    auto const done = v.makeBlock();
    v << vinvoke{CallSpec::stub(stub), v.makeVcallArgs({}), v.makeTuple({}),
                 {done, label(env, inst->taken())}, fixup };
    v = done;
  });
}
Beispiel #21
0
void cgLdCns(IRLS& env, const IRInstruction* inst) {
  auto const cnsName = inst->src(0)->strVal();
  auto const ch = makeCnsHandle(cnsName, false);
  auto const dst = dstLoc(env, inst, 0);
  auto& v = vmain(env);
  assertx(inst->taken());

  if (rds::isNormalHandle(ch)) {
    auto const sf = checkRDSHandleInitialized(v, ch);
    fwdJcc(v, env, CC_NE, sf, inst->taken());
    loadTV(v, inst->dst(), dst, rvmtl()[ch]);
    return;
  }
  assertx(rds::isPersistentHandle(ch));

  auto const& cns = rds::handleToRef<TypedValue>(ch);

  if (cns.m_type == KindOfUninit) {
    loadTV(v, inst->dst(), dst, rvmtl()[ch]);
    auto const sf = v.makeReg();
    irlower::emitTypeTest(
      v, env, TUninit, dst.reg(1), dst.reg(0), sf,
      [&] (ConditionCode cc, Vreg sf) {
        fwdJcc(v, env, cc, sf, inst->taken());
      }
    );
  } else {
    // Statically known constant.
    assertx(!dst.isFullSIMD());
    switch (cns.m_type) {
      case KindOfNull:
        v << copy{v.cns(nullptr), dst.reg(0)};
        break;
      case KindOfBoolean:
        v << copy{v.cns(!!cns.m_data.num), dst.reg(0)};
        break;
      case KindOfInt64:
      case KindOfPersistentString:
      case KindOfPersistentVec:
      case KindOfPersistentDict:
      case KindOfPersistentKeyset:
      case KindOfPersistentArray:
      case KindOfString:
      case KindOfVec:
      case KindOfDict:
      case KindOfKeyset:
      case KindOfArray:
      case KindOfObject:
      case KindOfResource:
      case KindOfRef:
        v << copy{v.cns(cns.m_data.num), dst.reg(0)};
        break;
      case KindOfDouble:
        v << copy{v.cns(cns.m_data.dbl), dst.reg(0)};
        break;
      case KindOfUninit:
      case KindOfClass:
        not_reached();
    }
    v << copy{v.cns(cns.m_type), dst.reg(1)};
  }
}
Beispiel #22
0
void cgLdMIStateAddr(IRLS& env, const IRInstruction* inst) {
  auto const off = rds::kVmMInstrStateOff + inst->src(0)->intVal();
  vmain(env) << lea{rvmtl()[off], dstLoc(env, inst, 0).reg()};
}
Beispiel #23
0
void cgLdMIPropStateAddr(IRLS& env, const IRInstruction* inst) {
  auto const off = rds::kVmMInstrStateOff + offsetof(MInstrState, propState);
  vmain(env) << lea{rvmtl()[off], dstLoc(env, inst, 0).reg()};
}
Beispiel #24
0
void cgCall(IRLS& env, const IRInstruction* inst) {
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const fp = srcLoc(env, inst, 1).reg();
  auto const extra = inst->extra<Call>();
  auto const callee = extra->callee;
  auto const argc = extra->numParams;

  auto& v = vmain(env);
  auto& vc = vcold(env);
  auto const catchBlock = label(env, inst->taken());

  auto const calleeSP = sp[cellsToBytes(extra->spOffset.offset)];
  auto const calleeAR = calleeSP + cellsToBytes(argc);

  v << store{fp, calleeAR + AROFF(m_sfp)};
  v << storeli{safe_cast<int32_t>(extra->after), calleeAR + AROFF(m_soff)};

  if (extra->fcallAwait) {
    // This clobbers any flags that might have already been set on the callee
    // AR (e.g., by SpillFrame), but this is okay because there should never be
    // any conflicts; see the documentation in act-rec.h.
    auto const imm = static_cast<int32_t>(
      ActRec::encodeNumArgsAndFlags(argc, ActRec::Flags::IsFCallAwait)
    );
    v << storeli{imm, calleeAR + AROFF(m_numArgsAndFlags)};
  }

  auto const isNativeImplCall = callee &&
                                callee->builtinFuncPtr() &&
                                !callee->nativeFuncPtr() &&
                                argc == callee->numParams();
  if (isNativeImplCall) {
    // The assumption here is that for builtins, the generated func contains
    // only a single opcode (NativeImpl), and there are no non-argument locals.
    if (do_assert) {
      assertx(argc == callee->numLocals());
      assertx(callee->numIterators() == 0);

      auto addr = callee->getEntry();
      while (peek_op(addr) == Op::AssertRATL) {
        addr += instrLen(addr);
      }
      assertx(peek_op(addr) == Op::NativeImpl);
      assertx(addr + instrLen(addr) ==
              callee->unit()->entry() + callee->past());
    }

    v << store{v.cns(mcg->ustubs().retHelper), calleeAR + AROFF(m_savedRip)};
    if (callee->attrs() & AttrMayUseVV) {
      v << storeqi{0, calleeAR + AROFF(m_invName)};
    }
    v << lea{calleeAR, rvmfp()};

    emitCheckSurpriseFlagsEnter(v, vc, fp, Fixup(0, argc), catchBlock);

    auto const builtinFuncPtr = callee->builtinFuncPtr();
    TRACE(2, "Calling builtin preClass %p func %p\n",
          callee->preClass(), builtinFuncPtr);

    // We sometimes call this while curFunc() isn't really the builtin, so make
    // sure to record the sync point as if we are inside the builtin.
    if (FixupMap::eagerRecord(callee)) {
      auto const syncSP = v.makeReg();
      v << lea{calleeSP, syncSP};
      emitEagerSyncPoint(v, callee->getEntry(), rvmtl(), rvmfp(), syncSP);
    }

    // Call the native implementation.  This will free the locals for us in the
    // normal case.  In the case where an exception is thrown, the VM unwinder
    // will handle it for us.
    auto const done = v.makeBlock();
    v << vinvoke{CallSpec::direct(builtinFuncPtr), v.makeVcallArgs({{rvmfp()}}),
                 v.makeTuple({}), {done, catchBlock}, Fixup(0, argc)};
    env.catch_calls[inst->taken()] = CatchCall::CPP;

    v = done;
    // The native implementation already put the return value on the stack for
    // us, and handled cleaning up the arguments.  We have to update the frame
    // pointer and the stack pointer, and load the return value into the return
    // register so the trace we are returning to has it where it expects.
    // TODO(#1273094): We should probably modify the actual builtins to return
    // values via registers using the C ABI and do a reg-to-reg move.
    loadTV(v, inst->dst(), dstLoc(env, inst, 0), rvmfp()[AROFF(m_r)], true);
    v << load{rvmfp()[AROFF(m_sfp)], rvmfp()};
    emitRB(v, Trace::RBTypeFuncExit, callee->fullName()->data());
    return;
  }

  v << lea{calleeAR, rvmfp()};

  if (RuntimeOption::EvalHHIRGenerateAsserts) {
    v << syncvmsp{v.cns(0x42)};

    constexpr uint64_t kUninitializedRIP = 0xba5eba11acc01ade;
    emitImmStoreq(v, kUninitializedRIP, rvmfp()[AROFF(m_savedRip)]);
  }

  // Emit a smashable call that initially calls a recyclable service request
  // stub.  The stub and the eventual targets take rvmfp() as an argument,
  // pointing to the callee ActRec.
  auto const target = callee
    ? mcg->ustubs().immutableBindCallStub
    : mcg->ustubs().bindCallStub;

  auto const done = v.makeBlock();
  v << callphp{target, php_call_regs(), {{done, catchBlock}}};
  env.catch_calls[inst->taken()] = CatchCall::PHP;
  v = done;

  auto const dst = dstLoc(env, inst, 0);
  v << defvmret{dst.reg(0), dst.reg(1)};
}
Beispiel #25
0
void cgCallBuiltin(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<CallBuiltin>();
  auto const callee = extra->callee;
  auto const returnType = inst->typeParam();
  auto const funcReturnType = callee->returnType();
  auto const returnByValue = callee->isReturnByValue();

  auto const dstData = dstLoc(env, inst, 0).reg(0);
  auto const dstType = dstLoc(env, inst, 0).reg(1);

  auto& v = vmain(env);

  // Whether `t' is passed in/out of C++ as String&/Array&/Object&.
  auto const isReqPtrRef = [] (MaybeDataType t) {
    return isStringType(t) || isArrayLikeType(t) ||
           t == KindOfObject || t == KindOfResource;
  };

  if (FixupMap::eagerRecord(callee)) {
    auto const sp = srcLoc(env, inst, 1).reg();
    auto const spOffset = cellsToBytes(extra->spOffset.offset);
    auto const& marker = inst->marker();
    auto const pc = marker.fixupSk().unit()->entry() + marker.fixupBcOff();

    auto const synced_sp = v.makeReg();
    v << lea{sp[spOffset], synced_sp};
    emitEagerSyncPoint(v, pc, rvmtl(), srcLoc(env, inst, 0).reg(), synced_sp);
  }

  int returnOffset = rds::kVmMInstrStateOff +
                     offsetof(MInstrState, tvBuiltinReturn);
  auto args = argGroup(env, inst);

  if (!returnByValue) {
    if (isBuiltinByRef(funcReturnType)) {
      if (isReqPtrRef(funcReturnType)) {
        returnOffset += TVOFF(m_data);
      }
      // Pass the address of tvBuiltinReturn to the native function as the
      // location where it can construct the return Array, String, Object, or
      // Variant.
      args.addr(rvmtl(), returnOffset);
      args.indirect();
    }
  }

  // The srcs past the first two (sp and fp) are the arguments to the callee.
  auto srcNum = uint32_t{2};

  // Add the this_ or self_ argument for HNI builtins.
  if (callee->isMethod()) {
    if (callee->isStatic()) {
      args.ssa(srcNum);
      ++srcNum;
    } else {
      // Note that we don't support objects with vtables here (if they may need
      // a $this pointer adjustment).  This should be filtered out during irgen
      // or before.
      args.ssa(srcNum);
      ++srcNum;
    }
  }

  // Add the func_num_args() value if needed.
  if (callee->attrs() & AttrNumArgs) {
    // If `numNonDefault' is negative, this is passed as an src.
    if (extra->numNonDefault >= 0) {
      args.imm((int64_t)extra->numNonDefault);
    } else {
      args.ssa(srcNum);
      ++srcNum;
    }
  }

  // Add the positional arguments.
  for (uint32_t i = 0; i < callee->numParams(); ++i, ++srcNum) {
    auto const& pi = callee->params()[i];

    // Non-pointer and NativeArg args are passed by value.  String, Array,
    // Object, and Variant are passed by const&, i.e. a pointer to stack memory
    // holding the value, so we expect PtrToT types for these.  Pointers to
    // req::ptr types (String, Array, Object) need adjusting to point to
    // &ptr->m_data.
    if (TVOFF(m_data) && !pi.nativeArg && isReqPtrRef(pi.builtinType)) {
      assertx(inst->src(srcNum)->type() <= TPtrToGen);
      args.addr(srcLoc(env, inst, srcNum).reg(), TVOFF(m_data));
    } else if (pi.nativeArg && !pi.builtinType && !callee->byRef(i)) {
      // This condition indicates a MixedTV (i.e., TypedValue-by-value) arg.
      args.typedValue(srcNum);
    } else {
      args.ssa(srcNum, pi.builtinType == KindOfDouble);
    }
  }

  auto dest = [&] () -> CallDest {
    if (isBuiltinByRef(funcReturnType)) {
      if (!returnByValue) return kVoidDest; // indirect return
      return funcReturnType
        ? callDest(dstData) // String, Array, or Object
        : callDest(dstData, dstType); // Variant
    }
    return funcReturnType == KindOfDouble
      ? callDestDbl(env, inst)
      : callDest(env, inst);
  }();

  cgCallHelper(v, env, CallSpec::direct(callee->nativeFuncPtr()),
               dest, SyncOptions::Sync, args);

  // For primitive return types (int, bool, double) and returnByValue, the
  // return value is already in dstData/dstType.
  if (returnType.isSimpleType() || returnByValue) return;

  // For return by reference (String, Object, Array, Variant), the builtin
  // writes the return value into MInstrState::tvBuiltinReturn, from where it
  // has to be tested and copied.

  if (returnType.isReferenceType()) {
    // The return type is String, Array, or Object; fold nullptr to KindOfNull.
    assertx(isBuiltinByRef(funcReturnType) && isReqPtrRef(funcReturnType));

    v << load{rvmtl()[returnOffset], dstData};

    if (dstType.isValid()) {
      auto const sf = v.makeReg();
      auto const rtype = v.cns(returnType.toDataType());
      auto const nulltype = v.cns(KindOfNull);
      v << testq{dstData, dstData, sf};
      v << cmovb{CC_Z, sf, rtype, nulltype, dstType};
    }
    return;
  }

  if (returnType <= TCell || returnType <= TBoxedCell) {
    // The return type is Variant; fold KindOfUninit to KindOfNull.
    assertx(isBuiltinByRef(funcReturnType) && !isReqPtrRef(funcReturnType));
    static_assert(KindOfUninit == 0, "KindOfUninit must be 0 for test");

    v << load{rvmtl()[returnOffset + TVOFF(m_data)], dstData};

    if (dstType.isValid()) {
      auto const rtype = v.makeReg();
      v << loadb{rvmtl()[returnOffset + TVOFF(m_type)], rtype};

      auto const sf = v.makeReg();
      auto const nulltype = v.cns(KindOfNull);
      v << testb{rtype, rtype, sf};
      v << cmovb{CC_Z, sf, rtype, nulltype, dstType};
    }
    return;
  }

  not_reached();
}
Beispiel #26
0
void cgLdUnwinderValue(IRLS& env, const IRInstruction* inst) {
  auto& v = vmain(env);
  loadTV(v, inst->dst(), dstLoc(env, inst, 0), rvmtl()[unwinderTVOff()]);
}
void markRDSHandleInitialized(Vout& v, rds::Handle ch) {
  assertx(rds::isNormalHandle(ch));
  auto const gen = v.makeReg();
  v << loadb{rvmtl()[rds::currentGenNumberHandle()], gen};
  v << storeb{gen, rvmtl()[rds::genNumberHandleFrom(ch)]};
}
TCA emitFunctionEnterHelper(CodeBlock& cb, UniqueStubs& us) {
  alignJmpTarget(cb);

  auto const start = vwrap(cb, [&] (Vout& v) {
    auto const ar = v.makeReg();

    v << copy{rvmfp(), ar};

    // Fully set up the call frame for the stub.  We can't skip this like we do
    // in other stubs because we need the return IP for this frame in the %rbp
    // chain, in order to find the proper fixup for the VMRegAnchor in the
    // intercept handler.
    v << stublogue{true};
    v << copy{rsp(), rvmfp()};

    // When we call the event hook, it might tell us to skip the callee
    // (because of fb_intercept).  If that happens, we need to return to the
    // caller, but the handler will have already popped the callee's frame.
    // So, we need to save these values for later.
    v << pushm{ar[AROFF(m_savedRip)]};
    v << pushm{ar[AROFF(m_sfp)]};

    v << copy2{ar, v.cns(EventHook::NormalFunc), rarg(0), rarg(1)};

    bool (*hook)(const ActRec*, int) = &EventHook::onFunctionCall;
    v << call{TCA(hook)};
  });

  us.functionEnterHelperReturn = vwrap2(cb, [&] (Vout& v, Vout& vcold) {
    auto const sf = v.makeReg();
    v << testb{rret(), rret(), sf};

    unlikelyIfThen(v, vcold, CC_Z, sf, [&] (Vout& v) {
      auto const saved_rip = v.makeReg();

      // The event hook has already cleaned up the stack and popped the
      // callee's frame, so we're ready to continue from the original call
      // site.  We just need to grab the fp/rip of the original frame that we
      // saved earlier, and sync rvmsp().
      v << pop{rvmfp()};
      v << pop{saved_rip};

      // Drop our call frame; the stublogue{} instruction guarantees that this
      // is exactly 16 bytes.
      v << lea{rsp()[16], rsp()};

      // Sync vmsp and return to the caller.  This unbalances the return stack
      // buffer, but if we're intercepting, we probably don't care.
      v << load{rvmtl()[rds::kVmspOff], rvmsp()};
      v << jmpr{saved_rip};
    });

    // Skip past the stuff we saved for the intercept case.
    v << lea{rsp()[16], rsp()};

    // Restore rvmfp() and return to the callee's func prologue.
    v << stubret{RegSet(), true};
  });

  return start;
}