Esempio n. 1
0
void cgOrdStrIdx(IRLS& env, const IRInstruction* inst) {
  auto const sd = srcLoc(env, inst, 0).reg();
  auto const idx = srcLoc(env, inst, 1).reg();
  auto& v = vmain(env);

  auto const sf = v.makeReg();
  auto const length = v.makeReg();

  v << loadzlq{sd[StringData::sizeOff()], length};
  v << cmpq{idx, length, sf};

  unlikelyCond(v, vcold(env), CC_B, sf, dstLoc(env, inst, 0).reg(),
    [&] (Vout& v) {
      auto const args = argGroup(env, inst).ssa(0).ssa(1);
      cgCallHelper(v, env, CallSpec::direct(MInstrHelpers::stringGetI),
                   kVoidDest, SyncOptions::Sync, args);
      return v.cns(0);
    },
    [&] (Vout& v) {
      auto const dst = v.makeReg();
      auto const data = v.makeReg();
#ifdef NO_M_DATA
      v << lea{sd[sizeof(StringData)], data};
#else
      v << load{sd[StringData::dataOff()], data};
#endif
      v << loadzbq{data[idx], dst};
      return dst;
    }
  );
}
Esempio n. 2
0
void cgStLocRange(IRLS& env, const IRInstruction* inst) {
  auto const range = inst->extra<StLocRange>();

  if (range->start >= range->end) return;

  auto const fp = srcLoc(env, inst, 0).reg();
  auto const loc = srcLoc(env, inst, 1);
  auto const val = inst->src(1);
  auto& v = vmain(env);

  auto ireg = v.makeReg();
  auto nreg = v.makeReg();

  v << lea{fp[localOffset(range->start)], ireg};
  v << lea{fp[localOffset(range->end)], nreg};

  doWhile(v, CC_NE, {ireg},
    [&] (const VregList& in, const VregList& out) {
      auto const i = in[0];
      auto const res = out[0];
      auto const sf = v.makeReg();

      storeTV(v, i[0], loc, val);
      v << subqi{int32_t{sizeof(Cell)}, i, res, v.makeReg()};
      v << cmpq{res, nreg, sf};
      return sf;
    }
  );
}
Esempio n. 3
0
void cgContEnter(IRLS& env, const IRInstruction* inst) {
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const fp = srcLoc(env, inst, 1).reg();
  auto const genFP = srcLoc(env, inst, 2).reg();
  auto const target = srcLoc(env, inst, 3).reg();

  auto const extra = inst->extra<ContEnter>();
  auto const spOff = extra->spOffset;
  auto const returnOff = extra->returnBCOffset;

  auto& v = vmain(env);

  auto const next = v.makeBlock();

  v << store{fp, genFP[AROFF(m_sfp)]};
  v << storeli{returnOff, genFP[AROFF(m_soff)]};

  v << copy{genFP, fp};
  auto const sync_sp = v.makeReg();
  v << lea{sp[cellsToBytes(spOff.offset)], sync_sp};
  v << syncvmsp{sync_sp};

  v << contenter{fp, target, cross_trace_regs_resumed(),
                 {next, label(env, inst->taken())}};
  v = next;

  auto const dst = dstLoc(env, inst, 0);
  auto const type = inst->dst()->type();
  if (!type.admitsSingleVal()) {
    v << defvmretdata{dst.reg(0)};
  }
  if (type.needsReg()) {
    v << defvmrettype{dst.reg(1)};
  }
}
Esempio n. 4
0
void cgStRef(IRLS& env, const IRInstruction* inst) {
  auto const ptr = srcLoc(env, inst, 0).reg();
  auto const valLoc = srcLoc(env, inst, 1);
  always_assert(!srcLoc(env, inst, 1).isFullSIMD());

  storeTV(vmain(env), ptr[RefData::tvOffset()], valLoc, inst->src(1));
}
Esempio n. 5
0
void cgInitClosureStaticLoc(IRLS& env, const IRInstruction* inst) {
  auto const ref = srcLoc(env, inst, 0).reg();
  auto& v = vmain(env);

  always_assert(!srcLoc(env, inst, 1).isFullSIMD());
  storeTV(v, ref[RefData::tvOffset()], srcLoc(env, inst, 1), inst->src(1));
}
Esempio n. 6
0
void cgAFWHBlockOn(IRLS& env, const IRInstruction* inst) {
  auto parentAR = srcLoc(env, inst, 0).reg();
  auto child = srcLoc(env, inst, 1).reg();
  auto& v = vmain(env);

  // parent->setState(STATE_BLOCKED);
  v << storebi{
    AFWH::toKindState(WH::Kind::AsyncFunction, AFWH::STATE_BLOCKED),
    parentAR[ar_rel(AFWH::stateOff())]
  };

  auto const blockableOff = AFWH::childrenOff() + AFWH::Node::blockableOff();

  // parent->m_blockable.m_bits = child->m_parentChain.m_firstParent|Kind::AFWH;
  static_assert(
    uint8_t(AsioBlockable::Kind::AsyncFunctionWaitHandleNode) == 0,
    "AFWH kind must be 0."
  );
  auto const firstParentOff = c_WaitableWaitHandle::parentChainOff() +
                              AsioBlockableChain::firstParentOff();
  auto const nextParentOff = blockableOff + AsioBlockable::bitsOff();

  auto const firstParent = v.makeReg();
  v << load{child[firstParentOff], firstParent};
  v << store{firstParent, parentAR[ar_rel(nextParentOff)]};

  // child->m_parentChain.m_firstParent = &parent->m_blockable;
  auto objToAR = v.makeReg();
  v << lea{parentAR[ar_rel(blockableOff)], objToAR};
  v << store{objToAR, child[firstParentOff]};

  // parent->m_child = child;
  auto const childOff = AFWH::childrenOff() + AFWH::Node::childOff();
  v << store{child, parentAR[ar_rel(childOff)]};
}
Esempio n. 7
0
void cgStOutValue(IRLS& env, const IRInstruction* inst) {
  auto const fp = srcLoc(env, inst, 0).reg();
  auto const off = cellsToBytes(
    inst->extra<StOutValue>()->index + kNumActRecCells
  );
  storeTV(vmain(env), fp[off], srcLoc(env, inst, 1), inst->src(1));
}
Esempio n. 8
0
void cgContEnter(IRLS& env, const IRInstruction* inst) {
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const fp = srcLoc(env, inst, 1).reg();
  auto const genFP = srcLoc(env, inst, 2).reg();
  auto const target = srcLoc(env, inst, 3).reg();

  auto const extra = inst->extra<ContEnter>();
  auto const spOff = extra->spOffset;
  auto const returnOff = extra->returnBCOffset;

  auto& v = vmain(env);

  auto const next = v.makeBlock();

  v << store{fp, genFP[AROFF(m_sfp)]};
  v << storeli{returnOff, genFP[AROFF(m_soff)]};

  v << copy{genFP, fp};
  auto const sync_sp = v.makeReg();
  v << lea{sp[cellsToBytes(spOff.offset)], sync_sp};
  v << syncvmsp{sync_sp};

  v << contenter{fp, target, cross_trace_regs_resumed(),
                 {next, label(env, inst->taken())}};
  env.catch_calls[inst->taken()] = CatchCall::PHP;
  v = next;
}
Esempio n. 9
0
void cgJmpSSwitchDest(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<JmpSSwitchDest>();
  auto const marker = inst->marker();
  auto& v = vmain(env);

  maybe_syncsp(v, marker, srcLoc(env, inst, 1).reg(), extra->offset);
  v << jmpr{srcLoc(env, inst, 0).reg(), cross_trace_args(marker)};
}
Esempio n. 10
0
void cgMod(IRLS& env, const IRInstruction* inst) {
  auto const d = dstLoc(env, inst, 0).reg();
  auto const dividend = srcLoc(env, inst, 0).reg();
  auto const divisor  = srcLoc(env, inst, 1).reg();
  auto& v = vmain(env);

  v << srem{dividend, divisor, d};
}
Esempio n. 11
0
void cgDbgAssertFunc(IRLS& env, const IRInstruction* inst) {
  auto const s0 = srcLoc(env, inst, 0).reg(0);
  auto const s1 = srcLoc(env, inst, 1).reg(0);
  auto& v = vmain(env);

  auto const sf = v.makeReg();
  v << cmpq{s0, s1, sf};
  ifThen(v, CC_NE, sf, [&](Vout& v) { v << ud2{}; });
}
Esempio n. 12
0
void cgStClsRef(IRLS& env, const IRInstruction* inst) {
  auto const fp = srcLoc(env, inst, 0).reg();
  auto const src = srcLoc(env, inst, 1).reg();
  auto const off = frame_clsref_offset(
    funcFromFp(inst->src(0)),
    inst->extra<ClsRefSlotData>()->slot
  );
  emitStLowPtr(vmain(env), src, fp[off], sizeof(LowPtr<Class>));
}
Esempio n. 13
0
void cgSyncReturnBC(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<SyncReturnBC>();
  auto const spOffset = cellsToBytes(extra->spOffset.offset);
  auto const bcOffset = extra->bcOffset;
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const fp = srcLoc(env, inst, 1).reg();

  auto& v = vmain(env);
  v << storeli{safe_cast<int32_t>(bcOffset), sp[spOffset + AROFF(m_soff)]};
  v << store{fp, sp[spOffset + AROFF(m_sfp)]};
}
Esempio n. 14
0
void cgStElem(IRLS& env, const IRInstruction* inst) {
  auto const rbase = srcLoc(env, inst, 0).reg();
  auto const ridx = srcLoc(env, inst, 1).reg();
  auto const idx = inst->src(1);
  auto& v = vmain(env);

  if (idx->hasConstVal() && deltaFits(idx->intVal(), sz::dword)) {
    storeTV(v, rbase[idx->intVal()], srcLoc(env, inst, 2), inst->src(2));
  } else {
    storeTV(v, rbase[ridx], srcLoc(env, inst, 2), inst->src(2));
  }
}
Esempio n. 15
0
void cgLdElem(IRLS& env, const IRInstruction* inst) {
  auto const rbase = srcLoc(env, inst, 0).reg();
  auto const ridx = srcLoc(env, inst, 1).reg();
  auto const idx = inst->src(1);
  auto& v = vmain(env);

  if (idx->hasConstVal() && deltaFits(idx->intVal(), sz::dword)) {
    loadTV(v, inst->dst(), dstLoc(env, inst, 0), rbase[idx->intVal()]);
  } else {
    loadTV(v, inst->dst(), dstLoc(env, inst, 0), rbase[ridx]);
  }
}
Esempio n. 16
0
void cgDbgAssertARFunc(IRLS& env, const IRInstruction* inst) {
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const func = srcLoc(env, inst, 1).reg(0);
  auto const off = cellsToBytes(inst->extra<DbgAssertARFunc>()->offset.offset);

  auto& v = vmain(env);

  auto const sf = v.makeReg();
  v << cmpqm{func, sp[off + AROFF(m_func)], sf};

  ifThen(v, CC_NE, sf, [&](Vout& v) { v << ud2{}; });
}
Esempio n. 17
0
void cgStringIsset(IRLS& env, const IRInstruction* inst) {
  auto const dst = dstLoc(env, inst, 0).reg();
  auto const sd = srcLoc(env, inst, 0).reg();
  auto const idx = srcLoc(env, inst, 1).reg();
  auto& v = vmain(env);

  auto const length = v.makeReg();
  auto const sf = v.makeReg();
  v << loadzlq{sd[StringData::sizeOff()], length};
  v << cmpq{idx, length, sf};
  v << setcc{CC_NBE, sf, dst};
}
Esempio n. 18
0
void cgContArUpdateIdx(IRLS& env, const IRInstruction* inst) {
  auto contAR = srcLoc(env, inst, 0).reg();
  auto newIdx = srcLoc(env, inst, 1).reg();
  auto& v = vmain(env);

  auto mem_index = v.makeReg();
  auto const idxOff = GENDATAOFF(m_index) - Generator::arOff();
  v << load{contAR[idxOff], mem_index};

  auto const sf = v.makeReg();
  auto res = v.makeReg();
  v << cmpq{mem_index, newIdx, sf};
  v << cmovq{CC_G, sf, mem_index, newIdx, res};
  v << store{res, contAR[idxOff]};
}
Esempio n. 19
0
void cgDbgTrashFrame(IRLS& env, const IRInstruction* inst) {
  auto const fp = srcLoc(env, inst, 0).reg();
  auto const off = cellsToBytes(inst->extra<DbgTrashFrame>()->offset.offset);
  for (auto i = 0; i < kNumActRecCells; ++i) {
    trashTV(vmain(env), fp, off + cellsToBytes(i), kTVTrashJITFrame);
  }
}
Esempio n. 20
0
void cgInlineReturn(IRLS& env, const IRInstruction* inst) {
  auto& v = vmain(env);
  auto const fp = srcLoc(env, inst, 0).reg();
  auto const callerFPOff = inst->extra<InlineReturn>()->offset;
  v << lea{fp[cellsToBytes(callerFPOff.offset)], rvmfp()};
  v << inlineend{};
}
Esempio n. 21
0
void cgContArIncKey(IRLS& env, const IRInstruction* inst) {
  auto const contAR = srcLoc(env, inst, 0).reg();
  auto& v = vmain(env);

  auto const keyOff = GENDATAOFF(m_key) - Generator::arOff();
  v << incqm{contAR[keyOff + TVOFF(m_data)], v.makeReg()};
}
Esempio n. 22
0
void cgContPreNext(IRLS& env, const IRInstruction* inst) {
  auto const cont = srcLoc(env, inst, 0).reg();
  auto const checkStarted = inst->src(1)->boolVal();
  auto const isAsync = inst->extra<IsAsyncData>()->isAsync;
  auto& v = vmain(env);

  auto const sf = v.makeReg();

  // These asserts make sure that the startedCheck work.
  static_assert(uint8_t(BaseGenerator::State::Created) == 0, "used below");
  static_assert(uint8_t(BaseGenerator::State::Started) == 1, "used below");
  static_assert(uint8_t(BaseGenerator::State::Done) > 3, "");

  // These asserts ensure that the state transition works.  If we're in the
  // Created state we want to transition to Priming, and if we're in the
  // Started state we want to transition to Running.  By laying out the enum
  // this way we can avoid the branch and just transition by adding 2 to the
  // current state.
  static_assert(uint8_t(BaseGenerator::State::Priming) ==
                uint8_t(BaseGenerator::State::Created) + 2, "used below");
  static_assert(uint8_t(BaseGenerator::State::Running) ==
                uint8_t(BaseGenerator::State::Started) + 2, "used below");

  // Take exit if state != 1 (checkStarted) or if state > 1 (!checkStarted).
  auto stateOff = BaseGenerator::stateOff() - genOffset(isAsync);
  v << cmpbim{int8_t(BaseGenerator::State::Started), cont[stateOff], sf};
  fwdJcc(v, env, checkStarted ? CC_NE : CC_A, sf, inst->taken());

  // Transition the generator into either the Priming state (if we were just
  // created) or the Running state (if we were started).  Due to the way the
  // enum is layed out, we can model this by just adding 2.
  v << addlim{int8_t(2), cont[stateOff], v.makeReg()};
}
Esempio n. 23
0
void cgLdContResumeAddr(IRLS& env, const IRInstruction* inst) {
  auto const dst = dstLoc(env, inst, 0).reg();
  auto const cont = srcLoc(env, inst, 0).reg();
  auto const isAsync = inst->extra<IsAsyncData>()->isAsync;
  auto const addrOff = BaseGenerator::resumeAddrOff() - genOffset(isAsync);
  vmain(env) << load{cont[addrOff], dst};
}
Esempio n. 24
0
void cgCoerceStk(IRLS& env, const IRInstruction *inst) {
  auto const extra = inst->extra<CoerceStk>();
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const offset = cellsToBytes(extra->offset.offset);

  implCoerce(env, inst, sp, offset, extra->callee, extra->argNum);
}
Esempio n. 25
0
void cgKillClsRef(IRLS& env, const IRInstruction* inst) {
  if (!debug) return;

  auto& v = vmain(env);
  auto const fp = srcLoc(env, inst, 0).reg();
  auto const off = frame_clsref_offset(
    funcFromFp(inst->src(0)),
    inst->extra<ClsRefSlotData>()->slot
  );

  LowPtr<Class> trash;
  memset(&trash, kTrashClsRef, sizeof(trash));
  Immed64 immed(trash.get());

  if (sizeof(trash) == 4) {
    v << storeli{immed.l(), fp[off]};
  } else if (sizeof(trash) == 8) {
    if (immed.fits(sz::dword)) {
      v << storeqi{immed.l(), fp[off]};
    } else {
      v << store{v.cns(immed.q()), fp[off]};
    }
  } else {
    not_implemented();
  }
}
Esempio n. 26
0
void cgInstanceOfIfaceVtable(IRLS& env, const IRInstruction* inst) {
  auto const iface = inst->extra<InstanceOfIfaceVtable>()->cls;
  auto const slot = iface->preClass()->ifaceVtableSlot();

  auto const dst = dstLoc(env, inst, 0).reg();
  auto const rcls = srcLoc(env, inst, 0).reg();
  auto& v = vmain(env);

  auto const sf = v.makeReg();
  emitCmpVecLen(v, sf, static_cast<int32_t>(slot),
                rcls[Class::vtableVecLenOff()]);
  cond(
    v, CC_A, sf, dst,
    [&] (Vout& v) {
      auto const vtableVec = v.makeReg();
      emitLdLowPtr(v, rcls[Class::vtableVecOff()], vtableVec,
                   sizeof(LowPtr<Class::VtableVecSlot>));

      auto const ifaceOff = slot * sizeof(Class::VtableVecSlot) +
                            offsetof(Class::VtableVecSlot, iface);
      auto const sf = v.makeReg();
      emitCmpLowPtr<Class>(v, sf, iface, vtableVec[ifaceOff]);

      auto tmp = v.makeReg();
      v << setcc{CC_E, sf, tmp};
      return tmp;
    },
    [&] (Vout& v) { return v.cns(false); }
  );
}
void cgFwdCtxStaticCall(IRLS& env, const IRInstruction* inst) {
  auto const dstCtx = dstLoc(env, inst, 0).reg();
  auto const srcCtx = srcLoc(env, inst, 0).reg();
  auto const ty = inst->src(0)->type();

  auto& v = vmain(env);

  auto ctx_from_this =  [] (Vout& v, Vreg rthis, Vreg dst) {
    // Load (this->m_cls | 0x1) into `dst'.
    auto const cls = emitLdObjClass(v, rthis, v.makeReg());
    v << orqi{ActRec::kHasClassBit, cls, dst, v.makeReg()};
    return dst;
  };

  if (ty <= TCctx) {
    v << copy{srcCtx, dstCtx};
  } else if (ty <= TObj) {
    ctx_from_this(v, srcCtx, dstCtx);
  } else {
    // If we don't know whether we have a $this, we need to check dynamically.
    auto const sf = v.makeReg();
    v << testqi{ActRec::kHasClassBit, srcCtx, sf};
    unlikelyCond(v, vcold(env), CC_NZ, sf, dstCtx,
         [&] (Vout& v) { return srcCtx; },
         [&] (Vout& v) { return ctx_from_this(v, srcCtx, v.makeReg()); }
        );
  }
}
void cgLookupClsMethodFCache(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<ClsMethodData>();
  auto const dst = dstLoc(env, inst, 0).reg(0);
  auto const cls = inst->src(0)->clsVal();
  auto const fp = srcLoc(env, inst, 1).reg();
  auto& v = vmain(env);

  auto const ch = StaticMethodFCache::alloc(
    cls->name(),
    extra->methodName,
    ctxName(inst->marker())
  );
  assertx(rds::isNormalHandle(ch));

  const Func* (*lookup)(rds::Handle, const Class*,
                        const StringData*, TypedValue*) =
    StaticMethodFCache::lookup;

  auto const args = argGroup(env, inst)
   .imm(ch)
   .immPtr(cls)
   .immPtr(extra->methodName)
   .reg(fp);

  cgCallHelper(v, env, CallSpec::direct(lookup),
               callDest(dst), SyncOptions::Sync, args);
}
void cgLookupClsMethodCache(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<ClsMethodData>();
  auto const dst = dstLoc(env, inst, 0).reg();
  auto const fp = srcLoc(env, inst, 0).reg();
  auto& v = vmain(env);

  auto const ch = StaticMethodCache::alloc(
    extra->clsName,
    extra->methodName,
    ctxName(inst->marker())
  );

  if (false) { // typecheck
    UNUSED TypedValue* fake_fp = nullptr;
    const UNUSED Func* f = StaticMethodCache::lookup(
      ch,
      extra->namedEntity,
      extra->clsName,
      extra->methodName,
      fake_fp
    );
  }

  auto const args = argGroup(env, inst)
    .imm(ch)
    .immPtr(extra->namedEntity)
    .immPtr(extra->clsName)
    .immPtr(extra->methodName)
    .reg(fp);

  // May raise an error if the class is undefined.
  cgCallHelper(v, env, CallSpec::direct(StaticMethodCache::lookup),
               callDest(dst), SyncOptions::Sync, args);
}
Esempio n. 30
0
void cgCallArray(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<CallArray>();
  auto const sp = srcLoc(env, inst, 0).reg();
  auto& v = vmain(env);

  auto const syncSP = v.makeReg();
  v << lea{sp[cellsToBytes(extra->spOffset.offset)], syncSP};
  v << syncvmsp{syncSP};

  auto const target = extra->numParams == 0
    ? mcg->ustubs().fcallArrayHelper
    : mcg->ustubs().fcallUnpackHelper;

  auto const pc = v.cns(extra->pc);
  auto const after = v.cns(extra->after);
  auto const args = extra->numParams == 0
    ? v.makeTuple({pc, after})
    : v.makeTuple({pc, after, v.cns(extra->numParams)});

  auto const done = v.makeBlock();
  v << vcallarray{target, fcall_array_regs(), args,
                  {done, label(env, inst->taken())}};
  env.catch_calls[inst->taken()] = CatchCall::PHP;
  v = done;

  auto const dst = dstLoc(env, inst, 0);
  v << defvmret{dst.reg(0), dst.reg(1)};
}