Пример #1
0
CMP_OPS
#undef CO

#undef CMP_OPS

void cgCmpBool(IRLS& env, const IRInstruction* inst) {
  auto const s0 = srcLoc(env, inst, 0).reg();
  auto const s1 = srcLoc(env, inst, 1).reg();
  auto const d  = dstLoc(env, inst, 0).reg();

  auto& v = vmain(env);
  auto const sf = v.makeReg();
  auto const extended0 = v.makeReg();
  auto const extended1 = v.makeReg();

  assert(inst->src(0)->type() <= TBool);
  assert(inst->src(1)->type() <= TBool);

  v << movzbq{s0, extended0};
  v << movzbq{s1, extended1};
  v << subq{extended1, extended0, d, sf};
}
Пример #2
0
void cgLookupCnsU(IRLS& env, const IRInstruction* inst) {
  auto const cnsName = inst->src(0)->strVal();
  auto const fallbackName = inst->src(1)->strVal();

  auto const fallbackCh = makeCnsHandle(fallbackName);
  assertx(rds::isHandleBound(fallbackCh));

  auto const args = argGroup(env, inst)
    .imm(safe_cast<int32_t>(fallbackCh))
    .immPtr(cnsName)
    .immPtr(fallbackName);

  cgCallHelper(
    vmain(env), env,
    rds::isNormalHandle(fallbackCh)
      ? CallSpec::direct(lookupCnsUHelperNormal)
      : CallSpec::direct(lookupCnsUHelperPersistent),
    callDestTV(env, inst),
    SyncOptions::Sync,
    args
  );
}
Пример #3
0
void cgCheckRange(IRLS& env, const IRInstruction* inst) {
  auto valTmp = inst->src(0);
  auto dst = dstLoc(env, inst, 0).reg();
  auto val = srcLoc(env, inst, 0).reg();
  auto limit = srcLoc(env, inst, 1).reg();
  auto& v = vmain(env);

  ConditionCode cc;
  auto const sf = v.makeReg();

  if (valTmp->hasConstVal()) {
    // Try to put the constant in a position that can get imm-folded.  A
    // suffiently smart imm-folder could handle this for us.  Note that this is
    // an arch-agnostic API bleed.
    v << cmpq{val, limit, sf};
    cc = CC_A;
  } else {
    v << cmpq{limit, val, sf};
    cc = CC_B;
  }
  v << setcc{cc, sf, dst};
}
Пример #4
0
void cgCheckCold(IRLS& env, const IRInstruction* inst) {
  auto const transID = inst->extra<CheckCold>()->transId;
  auto const counterAddr = profData()->transCounterAddr(transID);
  auto& v = vmain(env);

  auto const sf = v.makeReg();
  v << decqmlock{v.cns(counterAddr)[0], sf};
  if (RuntimeOption::EvalJitFilterLease) {
    auto filter = v.makeBlock();
    v << jcc{CC_LE, sf, {label(env, inst->next()), filter}};
    v = filter;
    auto const res = v.makeReg();
    cgCallHelper(v, env, CallSpec::direct(couldAcquireOptimizeLease),
                 callDest(res), SyncOptions::None,
                 argGroup(env, inst).immPtr(inst->func()));
    auto const sf2 = v.makeReg();
    v << testb{res, res, sf2};
    v << jcc{CC_NZ, sf2, {label(env, inst->next()), label(env, inst->taken())}};
  } else {
    v << jcc{CC_LE, sf, {label(env, inst->next()), label(env, inst->taken())}};
  }
}
Пример #5
0
void cgLdSSwitchDestSlow(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<LdSSwitchDestSlow>();
  auto& v = vmain(env);

  auto strtab = v.allocData<const StringData*>(extra->numCases);
  auto jmptab = v.allocData<TCA>(extra->numCases + 1);

  for (int64_t i = 0; i < extra->numCases; ++i) {
    strtab[i] = extra->cases[i].str;
    v << bindaddr{&jmptab[i], extra->cases[i].dest, extra->bcSPOff};
  }
  v << bindaddr{&jmptab[extra->numCases], extra->defaultSk, extra->bcSPOff};

  auto const args = argGroup(env, inst)
    .typedValue(0)
    .dataPtr(strtab)
    .imm(extra->numCases)
    .dataPtr(jmptab);

  cgCallHelper(v, env, CallSpec::direct(sswitchHelperSlow),
               callDest(env, inst), SyncOptions::Sync, args);
}
Пример #6
0
void cgProfileSwitchDest(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<ProfileSwitchDest>();
  auto const idx = srcLoc(env, inst, 0).reg();
  auto& v = vmain(env);

  auto const rcase = v.makeReg();
  auto const sf = v.makeReg();
  v << subq{v.cns(extra->base), idx, rcase, v.makeReg()};
  v << cmpqi{extra->cases - 2, rcase, sf};

  ifThenElse(
    v, CC_AE, sf,
    [&] (Vout& v) {
      // Last vector element is the default case.
      v << inclm{rvmtl()[extra->handle + (extra->cases - 1) * sizeof(int32_t)],
                 v.makeReg()};
    },
    [&] (Vout& v) {
      v << inclm{Vreg{rvmtl()}[rcase * 4 + extra->handle], v.makeReg()};
    }
  );
}
Пример #7
0
void cgCheckSurpriseAndStack(IRLS& env, const IRInstruction* inst) {
  auto const fp = srcLoc(env, inst, 0).reg();
  auto const extra = inst->extra<CheckSurpriseAndStack>();
  auto const func = extra->func;

  auto const off = func->getEntryForNumArgs(extra->argc) - func->base();
  auto const fixup = Fixup(off, func->numSlotsInFrame());
  auto& v = vmain(env);

  auto const sf = v.makeReg();
  auto const needed_top = v.makeReg();
  v << lea{fp[-cellsToBytes(func->maxStackCells())], needed_top};
  v << cmpqm{needed_top, rvmtl()[rds::kSurpriseFlagsOff], sf};

  unlikelyIfThen(v, vcold(env), CC_AE, sf, [&] (Vout& v) {
    auto const stub = tc::ustubs().functionSurprisedOrStackOverflow;
    auto const done = v.makeBlock();
    v << vinvoke{CallSpec::stub(stub), v.makeVcallArgs({}), v.makeTuple({}),
                 {done, label(env, inst->taken())}, fixup };
    v = done;
  });
}
Пример #8
0
void cgIsScalarType(IRLS& env, const IRInstruction* inst) {
  auto rtype = srcLoc(env, inst, 0).reg(1);
  auto dst = dstLoc(env, inst, 0).reg(0);

  // Static asserts for KindOfBoolean <= scalar type <= KindOfString.
  static_assert(KindOfUninit < KindOfBoolean, "fix checks for IsScalar");
  static_assert(KindOfNull < KindOfBoolean,   "fix checks for IsScalar");
  static_assert(KindOfInt64 > KindOfBoolean,  "fix checks for IsScalar");
  static_assert(KindOfDouble > KindOfBoolean, "fix checks for IsScalar");
  static_assert(KindOfPersistentString > KindOfBoolean,
                "fix checks for IsScalar");
  static_assert(KindOfString > KindOfBoolean, "fix checks for IsScalar");

  static_assert(KindOfInt64 < KindOfString,   "fix checks for IsScalar");
  static_assert(KindOfDouble < KindOfString,  "fix checks for IsScalar");
  static_assert(KindOfPersistentString < KindOfString,
                "fix checks for IsScalar");
  static_assert(KindOfArray > KindOfString,   "fix checks for IsScalar");
  static_assert(KindOfObject > KindOfString,  "fix checks for IsScalar");
  static_assert(KindOfResource > KindOfString, "fix checks for IsScalar");

  static_assert(sizeof(DataType) == 1, "");

  auto& v = vmain(env);

  if (rtype == InvalidReg) {
    auto const type = inst->src(0)->type();
    auto const imm = type <= (TBool | TInt | TDbl | TStr);
    v << copy{v.cns(imm), dst};
    return;
  }

  auto const diff = v.makeReg();
  v << subbi{KindOfBoolean, rtype, diff, v.makeReg()};
  auto const sf = v.makeReg();
  v << cmpbi{KindOfString - KindOfBoolean, diff, sf};
  v << setcc{CC_BE, sf, dst};
}
Пример #9
0
CMP_DBL_OPS
#undef CDO

#undef CMP_DBL_OPS

void cgCmpDbl(IRLS& env, const IRInstruction* inst) {
  auto const s0 = srcLoc(env, inst, 0).reg();
  auto const s1 = srcLoc(env, inst, 1).reg();
  auto const d  = dstLoc(env, inst, 0).reg();

  auto& v = vmain(env);
  auto const sf = v.makeReg();
  auto const tmp1 = v.makeReg();
  auto const tmp2 = v.makeReg();

  assert(inst->src(0)->type() <= TDbl);
  assert(inst->src(1)->type() <= TDbl);

  v << ucomisd{s0, s1, sf};
  v << cmovq{CC_A, sf, v.cns(-1), v.cns(1), tmp1};
  v << cmovq{CC_NE, sf, v.cns(0), tmp1, tmp2};
  v << cmovq{CC_P, sf, tmp2, v.cns(-1), d};
}
Пример #10
0
void cgLdTVAux(IRLS& env, const IRInstruction* inst) {
  auto const dst = dstLoc(env, inst, 0).reg();

  auto const tv = srcLoc(env, inst, 0);
  assertx(tv.hasReg(1));
  auto const type = tv.reg(1);

  auto& v = vmain(env);
  v << shrqi{32, type, dst, v.makeReg()};

  if (RuntimeOption::EvalHHIRGenerateAsserts) {
    auto const extra = inst->extra<LdTVAux>();
    auto const mask = -extra->valid - 1;

    if (mask) {
      auto const sf = v.makeReg();
      v << testqi{mask, dst, sf};
      ifThen(v, CC_NZ, sf, [](Vout& v) {
        v << trap{TRAP_REASON};
      });
    }
  }
}
Пример #11
0
void cgLdContArKey(IRLS& env, const IRInstruction* inst) {
  auto const contAR = srcLoc(env, inst, 0).reg();
  auto const keyOff = GENDATAOFF(m_key) - Generator::arOff();
  loadTV(vmain(env), inst->dst(), dstLoc(env, inst, 0), contAR[keyOff]);
}
Пример #12
0
void cgLdContField(IRLS& env, const IRInstruction* inst) {
  loadTV(vmain(env), inst->dst(), dstLoc(env, inst, 0),
         srcLoc(env, inst, 0).reg()[inst->src(1)->intVal()]);
}
Пример #13
0
void cgLdStk(IRLS& env, const IRInstruction* inst) {
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const off = cellsToBytes(inst->extra<LdStk>()->offset.offset);
  loadTV(vmain(env), inst->dst(), dstLoc(env, inst, 0), sp[off]);
}
Пример #14
0
void cgLdRef(IRLS& env, const IRInstruction* inst) {
  auto const ptr = srcLoc(env, inst, 0).reg();
  loadTV(vmain(env), inst->dst(), dstLoc(env, inst, 0),
         ptr[RefData::tvOffset()]);
}
Пример #15
0
void cgLdAsyncArParentChain(IRLS& env, const IRInstruction* inst) {
  auto dst = dstLoc(env, inst, 0).reg();
  auto ar = srcLoc(env, inst, 0).reg();
  auto& v = vmain(env);
  v << load{ar[ar_rel(AFWH::parentChainOff())], dst};
}
Пример #16
0
void cgStAsyncArResult(IRLS& env, const IRInstruction* inst) {
  auto const ar = srcLoc(env, inst, 0).reg();
  storeTV(vmain(env), ar[ar_rel(AFWH::resultOff())],
          srcLoc(env, inst, 1), inst->src(1));
}
Пример #17
0
void cgEnterFrame(IRLS& env, const IRInstruction* inst) {
  auto const fp = srcLoc(env, inst, 0).reg();
  vmain(env) << phplogue{fp};
}
Пример #18
0
void cgDbgTrashStk(IRLS& env, const IRInstruction* inst) {
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const off = cellsToBytes(inst->extra<DbgTrashStk>()->offset.offset);
  trashTV(vmain(env), sp, off, kTVTrashJITStk);
}
Пример #19
0
oop()
{
	register char *ic;
#ifndef u370
	char atube[TUBESIZE + LBSIZE];
#endif
	ttymode f;	/* mjm: was register */
	int resize;

	if (resize = setjmp(venv)) {
		setsize();
		initev = (char *)0;
		inopen = 0;
		addr1 = addr2 = dot;
	}
	(void)signal(SIGWINCH, winch);
	ovbeg();
	if (peekchar() == '/') {
		ignore(compile(getchar(), 1));
		savere(scanre);
		if (execute(0, dot) == 0)
			error("Fail|Pattern not found on addressed line");
		ic = loc1;
		if (ic > linebuf && *ic == 0)
			ic--;
	} else {
		getDOT();
		ic = vskipwh(linebuf);
	}
	newline();

	/*
	 * If overstrike then have to HARDOPEN
	 * else if can move cursor up off current line can use CRTOPEN (~~vi1)
	 * otherwise (ugh) have to use ONEOPEN (like adm3)
	 */
	if (OS && !EO)
		bastate = HARDOPEN;
	else if (CA || UP)
		bastate = CRTOPEN;
	else
		bastate = ONEOPEN;
	setwind();

	/*
	 * To avoid bombing on glass-crt's when the line is too long
	 * pretend that such terminals are 160 columns wide.
	 * If a line is too wide for display, we will dynamically
	 * switch to hardcopy open mode.
	 */
	if (state != CRTOPEN)
		WCOLS = TUBECOLS;
	if (!inglobal)
		savevis();
	vok(atube);
	if (state != CRTOPEN)
		COLUMNS = WCOLS;
	Outchar = vputchar;
	f = ostart();
	if (state == CRTOPEN) {
		if (outcol == UKCOL)
			outcol = 0;
		vmoveitup(1, 1);
	} else
		outline = destline = WBOT;
	vshow(dot, NOLINE);
	vnline(ic);
	vmain();
	if (state != CRTOPEN)
		vclean();
	Command = "open";
	ovend(f);
	(void)signal(SIGWINCH, SIG_DFL);
}
Пример #20
0
void cgMarkRDSInitialized(IRLS& env, const IRInstruction* inst) {
  auto const handle = inst->extra<MarkRDSInitialized>()->handle;
  if (rds::isNormalHandle(handle)) markRDSHandleInitialized(vmain(env), handle);
}
Пример #21
0
void cgLdMIPropStateAddr(IRLS& env, const IRInstruction* inst) {
  auto const off = rds::kVmMInstrStateOff + offsetof(MInstrState, propState);
  vmain(env) << lea{rvmtl()[off], dstLoc(env, inst, 0).reg()};
}
Пример #22
0
void cgLdMIStateAddr(IRLS& env, const IRInstruction* inst) {
  auto const off = rds::kVmMInstrStateOff + inst->src(0)->intVal();
  vmain(env) << lea{rvmtl()[off], dstLoc(env, inst, 0).reg()};
}
Пример #23
0
void cgStContArKey(IRLS& env, const IRInstruction* inst) {
  auto const contAR = srcLoc(env, inst, 0).reg();
  auto const keyOff = GENDATAOFF(m_key) - Generator::arOff();
  storeTV(vmain(env), contAR[keyOff], srcLoc(env, inst, 1), inst->src(1));
}
Пример #24
0
void cgCall(IRLS& env, const IRInstruction* inst) {
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const fp = srcLoc(env, inst, 1).reg();
  auto const extra = inst->extra<Call>();
  auto const callee = extra->callee;
  auto const argc = extra->numParams;

  auto& v = vmain(env);
  auto& vc = vcold(env);
  auto const catchBlock = label(env, inst->taken());

  auto const calleeSP = sp[cellsToBytes(extra->spOffset.offset)];
  auto const calleeAR = calleeSP + cellsToBytes(argc);

  v << store{fp, calleeAR + AROFF(m_sfp)};
  v << storeli{safe_cast<int32_t>(extra->after), calleeAR + AROFF(m_soff)};

  if (extra->fcallAwait) {
    // This clobbers any flags that might have already been set on the callee
    // AR (e.g., by SpillFrame), but this is okay because there should never be
    // any conflicts; see the documentation in act-rec.h.
    auto const imm = static_cast<int32_t>(
      ActRec::encodeNumArgsAndFlags(argc, ActRec::Flags::IsFCallAwait)
    );
    v << storeli{imm, calleeAR + AROFF(m_numArgsAndFlags)};
  }

  auto const isNativeImplCall = callee &&
                                callee->builtinFuncPtr() &&
                                !callee->nativeFuncPtr() &&
                                argc == callee->numParams();
  if (isNativeImplCall) {
    // The assumption here is that for builtins, the generated func contains
    // only a single opcode (NativeImpl), and there are no non-argument locals.
    if (do_assert) {
      assertx(argc == callee->numLocals());
      assertx(callee->numIterators() == 0);

      auto addr = callee->getEntry();
      while (peek_op(addr) == Op::AssertRATL) {
        addr += instrLen(addr);
      }
      assertx(peek_op(addr) == Op::NativeImpl);
      assertx(addr + instrLen(addr) ==
              callee->unit()->entry() + callee->past());
    }

    v << store{v.cns(mcg->ustubs().retHelper), calleeAR + AROFF(m_savedRip)};
    if (callee->attrs() & AttrMayUseVV) {
      v << storeqi{0, calleeAR + AROFF(m_invName)};
    }
    v << lea{calleeAR, rvmfp()};

    emitCheckSurpriseFlagsEnter(v, vc, fp, Fixup(0, argc), catchBlock);

    auto const builtinFuncPtr = callee->builtinFuncPtr();
    TRACE(2, "Calling builtin preClass %p func %p\n",
          callee->preClass(), builtinFuncPtr);

    // We sometimes call this while curFunc() isn't really the builtin, so make
    // sure to record the sync point as if we are inside the builtin.
    if (FixupMap::eagerRecord(callee)) {
      auto const syncSP = v.makeReg();
      v << lea{calleeSP, syncSP};
      emitEagerSyncPoint(v, callee->getEntry(), rvmtl(), rvmfp(), syncSP);
    }

    // Call the native implementation.  This will free the locals for us in the
    // normal case.  In the case where an exception is thrown, the VM unwinder
    // will handle it for us.
    auto const done = v.makeBlock();
    v << vinvoke{CallSpec::direct(builtinFuncPtr), v.makeVcallArgs({{rvmfp()}}),
                 v.makeTuple({}), {done, catchBlock}, Fixup(0, argc)};
    env.catch_calls[inst->taken()] = CatchCall::CPP;

    v = done;
    // The native implementation already put the return value on the stack for
    // us, and handled cleaning up the arguments.  We have to update the frame
    // pointer and the stack pointer, and load the return value into the return
    // register so the trace we are returning to has it where it expects.
    // TODO(#1273094): We should probably modify the actual builtins to return
    // values via registers using the C ABI and do a reg-to-reg move.
    loadTV(v, inst->dst(), dstLoc(env, inst, 0), rvmfp()[AROFF(m_r)], true);
    v << load{rvmfp()[AROFF(m_sfp)], rvmfp()};
    emitRB(v, Trace::RBTypeFuncExit, callee->fullName()->data());
    return;
  }

  v << lea{calleeAR, rvmfp()};

  if (RuntimeOption::EvalHHIRGenerateAsserts) {
    v << syncvmsp{v.cns(0x42)};

    constexpr uint64_t kUninitializedRIP = 0xba5eba11acc01ade;
    emitImmStoreq(v, kUninitializedRIP, rvmfp()[AROFF(m_savedRip)]);
  }

  // Emit a smashable call that initially calls a recyclable service request
  // stub.  The stub and the eventual targets take rvmfp() as an argument,
  // pointing to the callee ActRec.
  auto const target = callee
    ? mcg->ustubs().immutableBindCallStub
    : mcg->ustubs().bindCallStub;

  auto const done = v.makeBlock();
  v << callphp{target, php_call_regs(), {{done, catchBlock}}};
  env.catch_calls[inst->taken()] = CatchCall::PHP;
  v = done;

  auto const dst = dstLoc(env, inst, 0);
  v << defvmret{dst.reg(0), dst.reg(1)};
}
Пример #25
0
void cgLdWHResult(IRLS& env, const IRInstruction* inst) {
  auto const obj = srcLoc(env, inst, 0).reg();
  loadTV(vmain(env), inst->dst(), dstLoc(env, inst, 0), obj[WH::resultOff()]);
}
Пример #26
0
void cgStStk(IRLS& env, const IRInstruction* inst) {
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const off = cellsToBytes(inst->extra<StStk>()->offset.offset);
  storeTV(vmain(env), sp[off], srcLoc(env, inst, 1), inst->src(1));
}
Пример #27
0
void cgLdAFWHActRec(IRLS& env, const IRInstruction* inst) {
  auto const dst = dstLoc(env, inst, 0).reg();
  auto const obj = srcLoc(env, inst, 0).reg();
  auto& v = vmain(env);
  v << lea{obj[AFWH::arOff()], dst};
}
Пример #28
0
void cgLdStkAddr(IRLS& env, const IRInstruction* inst) {
  auto const sp = srcLoc(env, inst, 0).reg();
  auto const off = cellsToBytes(inst->extra<LdStkAddr>()->offset.offset);
  vmain(env) << lea{sp[off], dstLoc(env, inst, 0).reg()};
}
Пример #29
0
void cgCallBuiltin(IRLS& env, const IRInstruction* inst) {
  auto const extra = inst->extra<CallBuiltin>();
  auto const callee = extra->callee;
  auto const returnType = inst->typeParam();
  auto const funcReturnType = callee->returnType();
  auto const returnByValue = callee->isReturnByValue();

  auto const dstData = dstLoc(env, inst, 0).reg(0);
  auto const dstType = dstLoc(env, inst, 0).reg(1);

  auto& v = vmain(env);

  // Whether `t' is passed in/out of C++ as String&/Array&/Object&.
  auto const isReqPtrRef = [] (MaybeDataType t) {
    return isStringType(t) || isArrayLikeType(t) ||
           t == KindOfObject || t == KindOfResource;
  };

  if (FixupMap::eagerRecord(callee)) {
    auto const sp = srcLoc(env, inst, 1).reg();
    auto const spOffset = cellsToBytes(extra->spOffset.offset);
    auto const& marker = inst->marker();
    auto const pc = marker.fixupSk().unit()->entry() + marker.fixupBcOff();

    auto const synced_sp = v.makeReg();
    v << lea{sp[spOffset], synced_sp};
    emitEagerSyncPoint(v, pc, rvmtl(), srcLoc(env, inst, 0).reg(), synced_sp);
  }

  int returnOffset = rds::kVmMInstrStateOff +
                     offsetof(MInstrState, tvBuiltinReturn);
  auto args = argGroup(env, inst);

  if (!returnByValue) {
    if (isBuiltinByRef(funcReturnType)) {
      if (isReqPtrRef(funcReturnType)) {
        returnOffset += TVOFF(m_data);
      }
      // Pass the address of tvBuiltinReturn to the native function as the
      // location where it can construct the return Array, String, Object, or
      // Variant.
      args.addr(rvmtl(), returnOffset);
      args.indirect();
    }
  }

  // The srcs past the first two (sp and fp) are the arguments to the callee.
  auto srcNum = uint32_t{2};

  // Add the this_ or self_ argument for HNI builtins.
  if (callee->isMethod()) {
    if (callee->isStatic()) {
      args.ssa(srcNum);
      ++srcNum;
    } else {
      // Note that we don't support objects with vtables here (if they may need
      // a $this pointer adjustment).  This should be filtered out during irgen
      // or before.
      args.ssa(srcNum);
      ++srcNum;
    }
  }

  // Add the func_num_args() value if needed.
  if (callee->attrs() & AttrNumArgs) {
    // If `numNonDefault' is negative, this is passed as an src.
    if (extra->numNonDefault >= 0) {
      args.imm((int64_t)extra->numNonDefault);
    } else {
      args.ssa(srcNum);
      ++srcNum;
    }
  }

  // Add the positional arguments.
  for (uint32_t i = 0; i < callee->numParams(); ++i, ++srcNum) {
    auto const& pi = callee->params()[i];

    // Non-pointer and NativeArg args are passed by value.  String, Array,
    // Object, and Variant are passed by const&, i.e. a pointer to stack memory
    // holding the value, so we expect PtrToT types for these.  Pointers to
    // req::ptr types (String, Array, Object) need adjusting to point to
    // &ptr->m_data.
    if (TVOFF(m_data) && !pi.nativeArg && isReqPtrRef(pi.builtinType)) {
      assertx(inst->src(srcNum)->type() <= TPtrToGen);
      args.addr(srcLoc(env, inst, srcNum).reg(), TVOFF(m_data));
    } else if (pi.nativeArg && !pi.builtinType && !callee->byRef(i)) {
      // This condition indicates a MixedTV (i.e., TypedValue-by-value) arg.
      args.typedValue(srcNum);
    } else {
      args.ssa(srcNum, pi.builtinType == KindOfDouble);
    }
  }

  auto dest = [&] () -> CallDest {
    if (isBuiltinByRef(funcReturnType)) {
      if (!returnByValue) return kVoidDest; // indirect return
      return funcReturnType
        ? callDest(dstData) // String, Array, or Object
        : callDest(dstData, dstType); // Variant
    }
    return funcReturnType == KindOfDouble
      ? callDestDbl(env, inst)
      : callDest(env, inst);
  }();

  cgCallHelper(v, env, CallSpec::direct(callee->nativeFuncPtr()),
               dest, SyncOptions::Sync, args);

  // For primitive return types (int, bool, double) and returnByValue, the
  // return value is already in dstData/dstType.
  if (returnType.isSimpleType() || returnByValue) return;

  // For return by reference (String, Object, Array, Variant), the builtin
  // writes the return value into MInstrState::tvBuiltinReturn, from where it
  // has to be tested and copied.

  if (returnType.isReferenceType()) {
    // The return type is String, Array, or Object; fold nullptr to KindOfNull.
    assertx(isBuiltinByRef(funcReturnType) && isReqPtrRef(funcReturnType));

    v << load{rvmtl()[returnOffset], dstData};

    if (dstType.isValid()) {
      auto const sf = v.makeReg();
      auto const rtype = v.cns(returnType.toDataType());
      auto const nulltype = v.cns(KindOfNull);
      v << testq{dstData, dstData, sf};
      v << cmovb{CC_Z, sf, rtype, nulltype, dstType};
    }
    return;
  }

  if (returnType <= TCell || returnType <= TBoxedCell) {
    // The return type is Variant; fold KindOfUninit to KindOfNull.
    assertx(isBuiltinByRef(funcReturnType) && !isReqPtrRef(funcReturnType));
    static_assert(KindOfUninit == 0, "KindOfUninit must be 0 for test");

    v << load{rvmtl()[returnOffset + TVOFF(m_data)], dstData};

    if (dstType.isValid()) {
      auto const rtype = v.makeReg();
      v << loadb{rvmtl()[returnOffset + TVOFF(m_type)], rtype};

      auto const sf = v.makeReg();
      auto const nulltype = v.cns(KindOfNull);
      v << testb{rtype, rtype, sf};
      v << cmovb{CC_Z, sf, rtype, nulltype, dstType};
    }
    return;
  }

  not_reached();
}
Пример #30
0
void cgDbgTrashMem(IRLS& env, const IRInstruction* inst) {
  auto const ptr = srcLoc(env, inst, 0).reg();
  trashTV(vmain(env), ptr, 0, kTVTrashJITHeap);
}