SSATmp* Simplifier::simplifyGuardType(IRInstruction* inst) { Type type = inst->getTypeParam(); SSATmp* src = inst->getSrc(0); Type srcType = src->getType(); if (srcType == type || srcType.strictSubtypeOf(type)) { /* * the type of the src is the same or more refined than type, so the * guard is unnecessary. */ return src; } if (type.strictSubtypeOf(srcType)) { if (hoistGuardToLoad(src, type)) { return src; } } else { /* * incompatible types! We should just generate a jump here and * return null. * * For now, this case should currently be impossible, but it may * come up later due to other optimizations. The assert is so * we'll remember this spot ... */ assert(0); } return nullptr; }
static void insertRefCountAssertsAux(Trace* trace, IRFactory* factory) { IRInstruction::List& instructions = trace->getInstructionList(); IRInstruction::Iterator it; for (it = instructions.begin(); it != instructions.end(); ) { IRInstruction* inst = *it; it++; SSATmp* dst = inst->getDst(); if (dst && Type::isStaticallyKnown(dst->getType()) && Type::isRefCounted(dst->getType())) { auto* assertInst = factory->gen(DbgAssertRefCount, dst); assertInst->setParent(trace); instructions.insert(it, assertInst); } } }
bool isUnguardedLoad(IRInstruction* inst) { Opcode opc = inst->getOpcode(); SSATmp* dst = inst->getDst(); if (!dst) return false; Type::Tag type = dst->getType(); return (opc == LdStack && (type == Type::Gen || type == Type::Cell)) || (opc == LdLoc && type == Type::Gen) || (opc == LdRefNR && type == Type::Cell) || (opc == LdMemNR && type == Type::Cell && inst->getSrc(0)->getType() == Type::PtrToCell); }
SSATmp* Simplifier::simplifyLdClsCtx(IRInstruction* inst) { SSATmp* ctx = inst->getSrc(0); Type ctxType = ctx->getType(); if (ctxType.equals(Type::Obj)) { // this pointer... load its class ptr return m_tb->gen(LdObjClass, ctx); } if (ctxType.equals(Type::Cctx)) { return m_tb->gen(LdClsCctx, ctx); } return nullptr; }
void MemMap::optimizeLoad(IRInstruction* inst, int offset) { // check if we still know the value at this memory location. if we do, // then replace the load with a Mov SSATmp* value = getValue(inst->getSrc(0), offset); if (value == NULL) { return; } Type::Tag instTy = inst->getDst()->getType(); Type::Tag valTy = value->getType(); // check for loads that have a guard and will fail it if (inst->getLabel() != NULL && valTy != instTy) { if (!(Type::isString(valTy) && Type::isString(instTy)) && Type::isStaticallyKnown(valTy) && Type::isStaticallyKnown(instTy)) { inst->setOpcode(Jmp_); inst->setNumSrcs(0); inst->setDst(NULL); return; } } Opcode op = inst->getOpcode(); // fix the instruction's arguments and rip off its label if it had one inst->setSrc(0, value); if (op == LdProp) { inst->setSrc(1, NULL); inst->setNumSrcs(1); } else { assert(inst->getNumSrcs() == 1); } inst->setLabel(NULL); // convert the instruction into a Mov with the known value inst->setOpcode(Mov); }
SSATmp* Simplifier::simplifyGuardType(IRInstruction* inst) { Type type = inst->getTypeParam(); SSATmp* src = inst->getSrc(0); Type srcType = src->getType(); if (srcType.subtypeOf(type)) { /* * the type of the src is the same or more refined than type, so the * guard is unnecessary. */ return src; } if (type.strictSubtypeOf(srcType)) { if (hoistGuardToLoad(src, type)) { return src; } } else { if (type.equals(Type::Str) && srcType.maybe(Type::Str)) { // If we're guarding against Str and srcType has StaticStr or CountedStr // in it, refine the output type. This can happen when we have a // KindOfString guard from Translator but internally we know a more // specific subtype of Str. FTRACE(1, "Guarding {} to {}\n", srcType.toString(), type.toString()); inst->setTypeParam(type & srcType); } else { /* * incompatible types! We should just generate a jump here and * return null. * * For now, this case should currently be impossible, but it may * come up later due to other optimizations. The assert is so * we'll remember this spot ... */ not_implemented(); } } return nullptr; }
void LinearScan::rematerializeAux(Trace* trace, SSATmp* curSp, SSATmp* curFp, std::vector<SSATmp*> localValues) { IRInstruction::List& instList = trace->getInstructionList(); for (IRInstruction::Iterator it = instList.begin(); it != instList.end(); ++it) { IRInstruction* inst = *it; Opcode opc = inst->getOpcode(); SSATmp* dst = inst->getDst(); if (opc == DefFP || opc == FreeActRec) { curFp = dst; ASSERT(dst && dst->getReg() == rVmFp); } if (opc == Reload) { // s = Spill t0 // t = Reload s SSATmp* spilledTmp = getSpilledTmp(dst); IRInstruction* spilledInst = spilledTmp->getInstruction(); IRInstruction* newInst = NULL; if (spilledInst->isRematerializable() || (spilledInst->getOpcode() == LdStack && spilledInst->getSrc(0) == curSp)) { // XXX: could change <newInst> to the non-check version. // Rematerialize those rematerializable instructions (i.e., // isRematerializable returns true) and LdStack. newInst = spilledInst->clone(m_irFactory); // The new instruction needn't have an exit label, because it is always // dominated by the original instruction. newInst->setLabel(NULL); } else { // Rematerialize LdLoc. std::vector<SSATmp*>::iterator pos = std::find(localValues.begin(), localValues.end(), canonicalize(spilledTmp)); // Search for a local that stores the value of <spilledTmp>. if (pos != localValues.end()) { size_t locId = pos - localValues.begin(); ASSERT(curFp != NULL); ConstInstruction constInst(curFp, Local(locId)); IRInstruction* ldHomeInst = m_irFactory->cloneInstruction(&constInst); newInst = m_irFactory->ldLoc(m_irFactory->getSSATmp(ldHomeInst), dst->getType(), NULL); } } if (newInst) { newInst->setDst(dst); newInst->getDst()->setInstruction(newInst); *it = newInst; newInst->setParent(trace); } } // Updating <curSp> and <localValues>. if (dst && dst->getReg() == rVmSp) { // <inst> modifies the stack pointer. curSp = dst; } if (opc == LdLoc || opc == StLoc || opc == StLocNT) { // dst = LdLoc home // StLoc/StLocNT home, src int locId = getLocalIdFromHomeOpnd(inst->getSrc(0)); SSATmp* localValue = (opc == LdLoc ? dst : inst->getSrc(1)); if (int(localValues.size()) < locId + 1) { localValues.resize(locId + 1); } localValues[locId] = canonicalize(localValue); } if (inst->isControlFlowInstruction()) { LabelInstruction* label = inst->getLabel(); if (label != NULL && label->getId() == inst->getId() + 1) { rematerializeAux(label->getTrace(), curSp, curFp, localValues); } } } }
// XXX: to be refactored // This function repeats the logic in cg to pre-color tmps that are // going to be used in next native. void LinearScan::computePreColoringHint() { m_preColoringHint.clear(); IRInstruction* nextNative = getNextNative(); if (nextNative == NULL) { return; } auto normalHint = [&](int count, int srcBase = 0, int argBase = 0) { for (int i = 0; i < count; ++i) { m_preColoringHint.add(nextNative->getSrc(i + srcBase), 0, i + argBase); } }; switch (nextNative->getOpcode()) { case Box: if (nextNative->getSrc(0)->getType() == Type::Cell) { m_preColoringHint.add(nextNative->getSrc(0), 1, 0); } m_preColoringHint.add(nextNative->getSrc(0), 0, 1); break; case LdObjMethod: m_preColoringHint.add(nextNative->getSrc(1), 0, 1); m_preColoringHint.add(nextNative->getSrc(0), 0, 2); break; case LdFunc: m_preColoringHint.add(nextNative->getSrc(0), 0, 1); break; case NativeImpl: m_preColoringHint.add(nextNative->getSrc(1), 0, 0); break; case Print: m_preColoringHint.add(nextNative->getSrc(0), 0, 0); break; case AddElem: if (nextNative->getSrc(1)->getType() == Type::Int && nextNative->getSrc(2)->getType() == Type::Int) { normalHint(3, 0, 1); } else { m_preColoringHint.add(nextNative->getSrc(0), 0, 0); m_preColoringHint.add(nextNative->getSrc(1), 0, 1); m_preColoringHint.add(nextNative->getSrc(2), 0, 2); m_preColoringHint.add(nextNative->getSrc(2), 1, 3); } break; case AddNewElem: m_preColoringHint.add(nextNative->getSrc(0), 0, 0); m_preColoringHint.add(nextNative->getSrc(1), 0, 1); m_preColoringHint.add(nextNative->getSrc(1), 1, 2); break; case Concat: { Type::Tag lType = nextNative->getSrc(0)->getType(); Type::Tag rType = nextNative->getSrc(1)->getType(); if ((Type::isString(lType) && Type::isString(rType)) || (Type::isString(lType) && rType == Type::Int) || (lType == Type::Int && Type::isString(rType))) { m_preColoringHint.add(nextNative->getSrc(0), 0, 0); m_preColoringHint.add(nextNative->getSrc(1), 0, 1); } else { m_preColoringHint.add(nextNative->getSrc(0), 0, 1); m_preColoringHint.add(nextNative->getSrc(1), 0, 3); } } break; case ArrayAdd: normalHint(2); break; case DefFunc: normalHint(1); break; case CreateCont: normalHint(4); break; case FillContLocals: normalHint(4); break; case OpEq: case OpNeq: case OpSame: case OpNSame: { auto src1 = nextNative->getSrc(0); auto src2 = nextNative->getSrc(1); auto type1 = src1->getType(); auto type2 = src2->getType(); if ((type1 == Type::Arr && type2 == Type::Arr) || (Type::isString(type1) && Type::isString(type2)) || (Type::isString(type1) && !src1->isConst()) || (type1 == Type::Obj && type2 == Type::Obj)) { m_preColoringHint.add(src1, 0, 0); m_preColoringHint.add(src2, 0, 1); } } break; case Conv: { SSATmp* src = nextNative->getSrc(0); Type::Tag toType = nextNative->getType(); Type::Tag fromType = src->getType(); if (toType == Type::Bool) { switch (fromType) { case Type::Cell: m_preColoringHint.add(src, 0, 0); m_preColoringHint.add(src, 1, 1); break; case Type::Str: case Type::StaticStr: case Type::Arr: case Type::Obj: m_preColoringHint.add(src, 0, 0); break; default: break; } } else if (Type::isString(toType)) { if (fromType == Type::Int) { m_preColoringHint.add(src, 0, 0); } } else if (Type::isString(fromType) && toType == Type::Int) { m_preColoringHint.add(src, 0, 0); } break; } default: break; } }
void LinearScan::rematerializeAux() { struct State { SSATmp *sp, *fp; std::vector<SSATmp*> values; }; StateVector<Block, State*> states(m_irFactory, nullptr); SCOPE_EXIT { for (State* s : states) delete s; }; SSATmp* curSp = nullptr; SSATmp* curFp = nullptr; std::vector<SSATmp*> localValues; auto killLocal = [&](IRInstruction& inst, unsigned src) { if (src < inst.getNumSrcs()) { unsigned loc = inst.getSrc(src)->getValInt(); if (loc < localValues.size()) localValues[loc] = nullptr; } }; auto setLocal = [&](unsigned loc, SSATmp* value) { // Note that when we implement inlining, we will need to deal // with the new local id space of the inlined function. if (loc >= localValues.size()) localValues.resize(loc + 1); localValues[loc] = canonicalize(value); }; // Search for a local that stores <value> auto findLocal = [&](SSATmp* value) -> int { auto pos = std::find(localValues.begin(), localValues.end(), canonicalize(value)); return pos != localValues.end() ? pos - localValues.begin() : -1; }; // save the current state for future use by block; merge if necessary. auto saveState = [&](Block* block) { if (State* state = states[block]) { // merge with saved state assert(curFp == state->fp); if (curSp != state->sp) state->sp = nullptr; for (unsigned i = 0; i < state->values.size(); ++i) { if (i >= localValues.size() || localValues[i] != state->values[i]) { state->values[i] = nullptr; } } } else { // snapshot state for use at target. state = states[block] = new State; state->sp = curSp; state->fp = curFp; state->values = localValues; } }; for (Block* block : m_blocks) { if (State* state = states[block]) { states[block] = nullptr; localValues = state->values; curSp = state->sp; curFp = state->fp; delete state; } for (auto it = block->begin(); it != block->end(); ++it) { IRInstruction& inst = *it; Opcode opc = inst.getOpcode(); if (opc == DefFP || opc == FreeActRec) { assert(inst.getDst()->getReg() == rVmFp); curFp = inst.getDst(); } else if (opc == Reload) { // s = Spill t0 // t = Reload s SSATmp* dst = inst.getDst(); SSATmp* spilledTmp = getSpilledTmp(dst); IRInstruction* spilledInst = spilledTmp->getInstruction(); IRInstruction* newInst = NULL; if (spilledInst->isRematerializable() || (spilledInst->getOpcode() == LdStack && spilledInst->getSrc(0) == curSp)) { // XXX: could change <newInst> to the non-check version. // Rematerialize those rematerializable instructions (i.e., // isRematerializable returns true) and LdStack. newInst = spilledInst->clone(m_irFactory); // The new instruction needn't have an exit label; it must always // be dominated by the original instruction because reloads are // inserted just before uses, which must be dominated by the // original (spilled) def. newInst->setTaken(nullptr); } else if (curFp) { // Rematerialize LdLoc. int loc = findLocal(spilledTmp); if (loc != -1) { LocalId localId(loc); newInst = m_irFactory->gen(LdLoc, dst->getType(), &localId, curFp); } } if (newInst) { UNUSED Type oldType = dst->getType(); newInst->setDst(dst); dst->setInstruction(newInst); assert(outputType(newInst) == oldType); auto* block = inst.getBlock(); auto newIt = block->insert(it, newInst); block->erase(it); it = newIt; } } // Updating curSp and localValues if (inst.hasDst() && inst.getDst()->getReg() == rVmSp) { // inst modifies the stack pointer. curSp = inst.getDst(); } if (opc == LdLoc || opc == StLoc || opc == StLocNT) { setLocal(inst.getExtra<LocalId>()->locId, opc == LdLoc ? inst.getDst() : inst.getSrc(1)); } // Other instructions that may have side effects on locals must // kill the local variable values. else if (opc == IterInit) { killLocal(inst, 3); } else if (opc == IterInitK) { killLocal(inst, 3); killLocal(inst, 4); } else if (opc == IterNext) { killLocal(inst, 2); } else if (opc == IterNextK) { killLocal(inst, 2); killLocal(inst, 3); } } if (Block* taken = block->getTaken()) saveState(taken); if (Block* next = block->getNext()) saveState(next); } }
void LinearScan::computePreColoringHint() { m_preColoringHint.clear(); IRInstruction* inst = getNextNative(); if (inst == nullptr) { return; } Opcode opc = inst->getOpcode(); using namespace NativeCalls; if (CallMap::hasInfo(opc)) { unsigned reg = 0; for (auto const& arg : CallMap::getInfo(opc).args) { switch (arg.type) { case SSA: m_preColoringHint.add(inst->getSrc(arg.srcIdx), 0, reg++); break; case TV: case VecKeyS: case VecKeyIS: m_preColoringHint.add(inst->getSrc(arg.srcIdx), 0, reg++); m_preColoringHint.add(inst->getSrc(arg.srcIdx), 1, reg++); break; } } return; } // For instructions that want to hint a continuous increasing range // of sources to a continuous increasing range of argument // registers. auto normalHint = [&](int count, int srcBase = 0, int argBase = 0) { for (int i = 0; i < count; ++i) { m_preColoringHint.add(inst->getSrc(i + srcBase), 0, i + argBase); } }; switch (opc) { case LdFunc: m_preColoringHint.add(inst->getSrc(0), 0, 1); break; case NativeImpl: m_preColoringHint.add(inst->getSrc(1), 0, 0); break; case Concat: { Type lType = inst->getSrc(0)->getType(); Type rType = inst->getSrc(1)->getType(); if ((lType.isString() && rType.isString()) || (lType.isString() && rType == Type::Int) || (lType == Type::Int && rType.isString())) { m_preColoringHint.add(inst->getSrc(0), 0, 0); m_preColoringHint.add(inst->getSrc(1), 0, 1); } else { m_preColoringHint.add(inst->getSrc(0), 0, 1); m_preColoringHint.add(inst->getSrc(1), 0, 3); } } break; case AKExists: normalHint(2); break; case DefFunc: normalHint(1); break; case OpEq: case OpNeq: case OpSame: case OpNSame: { auto src1 = inst->getSrc(0); auto src2 = inst->getSrc(1); auto type1 = src1->getType(); auto type2 = src2->getType(); if ((type1.isArray() && type2.isArray()) || (type1.isString() && type2.isString()) || (type1.isString() && !src1->isConst()) || (type1 == Type::Obj && type2 == Type::Obj)) { m_preColoringHint.add(src1, 0, 0); m_preColoringHint.add(src2, 0, 1); } } break; case IterInit: { m_preColoringHint.add(inst->getSrc(0), 0, 1); } break; case ConvToArr: break; case ConvToBool: { SSATmp* src = inst->getSrc(0); Type fromType = src->getType(); if (fromType == Type::Cell) { m_preColoringHint.add(src, 0, 0); m_preColoringHint.add(src, 1, 1); } else if (fromType == Type::Str || fromType == Type::StaticStr || fromType.isArray() || fromType == Type::Obj) { m_preColoringHint.add(src, 0, 0); } break; } case ConvToDbl: break; case ConvToInt: { SSATmp* src = inst->getSrc(0); Type fromType = src->getType(); if (fromType.isString()) { m_preColoringHint.add(src, 0, 0); } break; } case ConvToObj: break; case ConvToStr: break; case InstanceOf: case NInstanceOf: case JmpInstanceOf: case JmpNInstanceOf: normalHint(2); break; case LdSSwitchDestFast: normalHint(1); break; case LdSSwitchDestSlow: normalHint(1); break; case LdGblAddr: case LdGblAddrDef: normalHint(1); break; case LdClsPropAddr: normalHint(3); break; case LdCls: m_preColoringHint.add(inst->getSrc(0), 0, 1); break; case BoxPtr: normalHint(1); break; default: break; } }
void LinearScan::rematerializeAux(Trace* trace, SSATmp* curSp, SSATmp* curFp, std::vector<SSATmp*> localValues) { IRInstruction::List& instList = trace->getInstructionList(); for (IRInstruction::Iterator it = instList.begin(); it != instList.end(); ++it) { IRInstruction* inst = *it; Opcode opc = inst->getOpcode(); SSATmp* dst = inst->getDst(); if (opc == DefFP || opc == FreeActRec) { curFp = dst; assert(dst && dst->getReg() == rVmFp); } if (opc == Reload) { // s = Spill t0 // t = Reload s SSATmp* spilledTmp = getSpilledTmp(dst); IRInstruction* spilledInst = spilledTmp->getInstruction(); IRInstruction* newInst = NULL; if (spilledInst->isRematerializable() || (spilledInst->getOpcode() == LdStack && spilledInst->getSrc(0) == curSp)) { // XXX: could change <newInst> to the non-check version. // Rematerialize those rematerializable instructions (i.e., // isRematerializable returns true) and LdStack. newInst = spilledInst->clone(m_irFactory); // The new instruction needn't have an exit label, because it is always // dominated by the original instruction. newInst->setLabel(NULL); } else { // Rematerialize LdLoc. std::vector<SSATmp*>::iterator pos = std::find(localValues.begin(), localValues.end(), canonicalize(spilledTmp)); // Search for a local that stores the value of <spilledTmp>. if (pos != localValues.end()) { size_t locId = pos - localValues.begin(); assert(curFp != NULL); ConstInstruction constInst(curFp, Local(locId)); IRInstruction* ldHomeInst = m_irFactory->cloneInstruction(&constInst); newInst = m_irFactory->gen(LdLoc, dst->getType(), m_irFactory->getSSATmp(ldHomeInst)); } } if (newInst) { UNUSED Type::Tag oldType = dst->getType(); newInst->setDst(dst); dst->setInstruction(newInst); assert(outputType(newInst) == oldType); *it = newInst; newInst->setParent(trace); } } // Updating <curSp> and <localValues>. if (dst && dst->getReg() == rVmSp) { // <inst> modifies the stack pointer. curSp = dst; } if (opc == LdLoc || opc == StLoc || opc == StLocNT) { // dst = LdLoc home // StLoc/StLocNT home, src int locId = getLocalIdFromHomeOpnd(inst->getSrc(0)); // Note that when we implement inlining, we will need to deal // with the new local id space of the inlined function. SSATmp* localValue = (opc == LdLoc ? dst : inst->getSrc(1)); if (int(localValues.size()) < locId + 1) { localValues.resize(locId + 1); } localValues[locId] = canonicalize(localValue); } // Other instructions that may have side effects on locals must // kill the local variable values. else if (opc == IterInit) { int valLocId = inst->getSrc(3)->getConstValAsInt(); localValues[valLocId] = NULL; if (inst->getNumSrcs() == 5) { int keyLocId = inst->getSrc(4)->getConstValAsInt(); localValues[keyLocId] = NULL; } } else if (opc == IterNext) { int valLocId = inst->getSrc(2)->getConstValAsInt(); localValues[valLocId] = NULL; if (inst->getNumSrcs() == 4) { int keyLocId = inst->getSrc(3)->getConstValAsInt(); localValues[keyLocId] = NULL; } } if (inst->isControlFlowInstruction()) { LabelInstruction* label = inst->getLabel(); if (label != NULL && label->getId() == inst->getId() + 1) { rematerializeAux(label->getParent(), curSp, curFp, localValues); } } } }