bool RegisterAllocator::init() { if (!insData.init(mir, graph.numInstructions())) return false; if (!entryPositions.reserve(graph.numBlocks()) || !exitPositions.reserve(graph.numBlocks())) return false; for (size_t i = 0; i < graph.numBlocks(); i++) { LBlock* block = graph.getBlock(i); for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) insData[ins->id()] = *ins; for (size_t j = 0; j < block->numPhis(); j++) { LPhi* phi = block->getPhi(j); insData[phi->id()] = phi; } CodePosition entry = block->numPhis() != 0 ? CodePosition(block->getPhi(0)->id(), CodePosition::INPUT) : inputOf(block->firstInstructionWithId()); CodePosition exit = outputOf(block->lastInstructionWithId()); MOZ_ASSERT(block->mir()->id() == i); entryPositions.infallibleAppend(entry); exitPositions.infallibleAppend(exit); } return true; }
void LIRGeneratorShared::lowerTypedPhiInput(MPhi *phi, uint32_t inputPosition, LBlock *block, size_t lirIndex) { MDefinition *operand = phi->getOperand(inputPosition); LPhi *lir = block->getPhi(lirIndex); lir->setOperand(inputPosition, LUse(operand->virtualRegister(), LUse::ANY)); }
LPhi * LPhi::New(MIRGenerator *gen, MPhi *ins) { LPhi *phi = new(gen->alloc()) LPhi(ins); if (!phi->init(gen)) return nullptr; return phi; }
LPhi * LPhi::New(MIRGenerator *gen, MPhi *ins) { LPhi *phi = new LPhi(ins); if (!phi->init(gen)) return NULL; return phi; }
void LIRGeneratorX86::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex) { MDefinition* operand = phi->getOperand(inputPosition); LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET); LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET); type->setOperand(inputPosition, LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY)); payload->setOperand(inputPosition, LUse(VirtualRegisterOfPayload(operand), LUse::ANY)); }
void LIRGeneratorARM::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex) { MDefinition* operand = phi->getOperand(inputPosition); LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX); LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX); low->setOperand(inputPosition, LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY)); high->setOperand(inputPosition, LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY)); }
bool LiveRangeAllocator<VREG>::init() { if (!RegisterAllocator::init()) return false; liveIn = lir->mir()->allocate<BitSet*>(graph.numBlockIds()); if (!liveIn) return false; // Initialize fixed intervals. for (size_t i = 0; i < AnyRegister::Total; i++) { AnyRegister reg = AnyRegister::FromCode(i); LiveInterval *interval = new LiveInterval(0); interval->setAllocation(LAllocation(reg)); fixedIntervals[i] = interval; } fixedIntervalsUnion = new LiveInterval(0); if (!vregs.init(lir->mir(), graph.numVirtualRegisters())) return false; // Build virtual register objects for (size_t i = 0; i < graph.numBlocks(); i++) { if (mir->shouldCancel("LSRA create data structures (main loop)")) return false; LBlock *block = graph.getBlock(i); for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) { for (size_t j = 0; j < ins->numDefs(); j++) { LDefinition *def = ins->getDef(j); if (def->policy() != LDefinition::PASSTHROUGH) { uint32_t reg = def->virtualRegister(); if (!vregs[reg].init(reg, block, *ins, def, /* isTemp */ false)) return false; } } for (size_t j = 0; j < ins->numTemps(); j++) { LDefinition *def = ins->getTemp(j); if (def->isBogusTemp()) continue; if (!vregs[def].init(def->virtualRegister(), block, *ins, def, /* isTemp */ true)) return false; } } for (size_t j = 0; j < block->numPhis(); j++) { LPhi *phi = block->getPhi(j); LDefinition *def = phi->getDef(0); if (!vregs[def].init(phi->id(), block, phi, def, /* isTemp */ false)) return false; } } return true; }
void LIRGeneratorShared::defineTypedPhi(MPhi* phi, size_t lirIndex) { LPhi* lir = current->getPhi(lirIndex); uint32_t vreg = getVirtualRegister(); phi->setVirtualRegister(vreg); lir->setDef(0, LDefinition(vreg, LDefinition::TypeFrom(phi->type()))); annotate(lir); }
bool LIRGeneratorShared::defineTypedPhi(MPhi *phi, size_t lirIndex) { LPhi *lir = current->getPhi(lirIndex); uint32_t vreg = getVirtualRegister(); if (vreg >= MAX_VIRTUAL_REGISTERS) return false; phi->setVirtualRegister(vreg); lir->setDef(0, LDefinition(vreg, LDefinition::TypeFrom(phi->type()))); annotate(lir); return true; }
bool StupidAllocator::init() { if (!RegisterAllocator::init()) return false; if (!virtualRegisters.reserve(graph.numVirtualRegisters())) return false; for (size_t i = 0; i < graph.numVirtualRegisters(); i++) virtualRegisters.infallibleAppend(NULL); for (size_t i = 0; i < graph.numBlocks(); i++) { LBlock *block = graph.getBlock(i); for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) { for (size_t j = 0; j < ins->numDefs(); j++) { LDefinition *def = ins->getDef(j); if (def->policy() != LDefinition::PASSTHROUGH) virtualRegisters[def->virtualRegister()] = def; } for (size_t j = 0; j < ins->numTemps(); j++) { LDefinition *def = ins->getTemp(j); if (def->isBogusTemp()) continue; virtualRegisters[def->virtualRegister()] = def; } } for (size_t j = 0; j < block->numPhis(); j++) { LPhi *phi = block->getPhi(j); LDefinition *def = phi->getDef(0); uint32 vreg = def->virtualRegister(); virtualRegisters[vreg] = def; } } // Assign physical registers to the tracked allocation. { registerCount = 0; RegisterSet remainingRegisters(allRegisters_); while (!remainingRegisters.empty(/* float = */ false)) registers[registerCount++].reg = AnyRegister(remainingRegisters.takeGeneral()); while (!remainingRegisters.empty(/* float = */ true)) registers[registerCount++].reg = AnyRegister(remainingRegisters.takeFloat()); JS_ASSERT(registerCount <= MAX_REGISTERS); } return true; }
bool StupidAllocator::init() { if (!RegisterAllocator::init()) return false; if (!virtualRegisters.appendN((LDefinition*)nullptr, graph.numVirtualRegisters())) return false; for (size_t i = 0; i < graph.numBlocks(); i++) { LBlock* block = graph.getBlock(i); for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) { for (size_t j = 0; j < ins->numDefs(); j++) { LDefinition* def = ins->getDef(j); virtualRegisters[def->virtualRegister()] = def; } for (size_t j = 0; j < ins->numTemps(); j++) { LDefinition* def = ins->getTemp(j); if (def->isBogusTemp()) continue; virtualRegisters[def->virtualRegister()] = def; } } for (size_t j = 0; j < block->numPhis(); j++) { LPhi* phi = block->getPhi(j); LDefinition* def = phi->getDef(0); uint32_t vreg = def->virtualRegister(); virtualRegisters[vreg] = def; } } // Assign physical registers to the tracked allocation. { registerCount = 0; LiveRegisterSet remainingRegisters(allRegisters_.asLiveSet()); while (!remainingRegisters.emptyGeneral()) registers[registerCount++].reg = AnyRegister(remainingRegisters.takeAnyGeneral()); while (!remainingRegisters.emptyFloat()) registers[registerCount++].reg = AnyRegister(remainingRegisters.takeAnyFloat()); MOZ_ASSERT(registerCount <= MAX_REGISTERS); } return true; }
void StupidAllocator::syncForBlockEnd(LBlock *block, LInstruction *ins) { // Sync any dirty registers, and update the synced state for phi nodes at // each successor of a block. We cannot conflate the storage for phis with // that of their inputs, as we cannot prove the live ranges of the phi and // its input do not overlap. The values for the two may additionally be // different, as the phi could be for the value of the input in a previous // loop iteration. for (size_t i = 0; i < registerCount; i++) syncRegister(ins, i); LMoveGroup *group = nullptr; MBasicBlock *successor = block->mir()->successorWithPhis(); if (successor) { uint32_t position = block->mir()->positionInPhiSuccessor(); LBlock *lirsuccessor = graph.getBlock(successor->id()); for (size_t i = 0; i < lirsuccessor->numPhis(); i++) { LPhi *phi = lirsuccessor->getPhi(i); uint32_t sourcevreg = phi->getOperand(position)->toUse()->virtualRegister(); uint32_t destvreg = phi->getDef(0)->virtualRegister(); if (sourcevreg == destvreg) continue; LAllocation *source = stackLocation(sourcevreg); LAllocation *dest = stackLocation(destvreg); if (!group) { // The moves we insert here need to happen simultaneously with // each other, yet after any existing moves before the instruction. LMoveGroup *input = getInputMoveGroup(ins->id()); if (input->numMoves() == 0) { group = input; } else { group = new LMoveGroup(alloc()); block->insertAfter(input, group); } } group->add(source, dest); } } }
void LIRGeneratorARM::defineUntypedPhi(MPhi* phi, size_t lirIndex) { LPhi* type = current->getPhi(lirIndex + VREG_TYPE_OFFSET); LPhi* payload = current->getPhi(lirIndex + VREG_DATA_OFFSET); uint32_t typeVreg = getVirtualRegister(); phi->setVirtualRegister(typeVreg); uint32_t payloadVreg = getVirtualRegister(); MOZ_ASSERT(typeVreg + 1 == payloadVreg); type->setDef(0, LDefinition(typeVreg, LDefinition::TYPE)); payload->setDef(0, LDefinition(payloadVreg, LDefinition::PAYLOAD)); annotate(type); annotate(payload); }
bool LBlock::init(TempAllocator& alloc) { // Count the number of LPhis we'll need. size_t numLPhis = 0; for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) { MPhi* phi = *i; switch (phi->type()) { case MIRType::Value: numLPhis += BOX_PIECES; break; case MIRType::Int64: numLPhis += INT64_PIECES; break; default: numLPhis += 1; break; } } // Allocate space for the LPhis. if (!phis_.init(alloc, numLPhis)) return false; // For each MIR phi, set up LIR phis as appropriate. We'll fill in their // operands on each incoming edge, and set their definitions at the start of // their defining block. size_t phiIndex = 0; size_t numPreds = block_->numPredecessors(); for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) { MPhi* phi = *i; MOZ_ASSERT(phi->numOperands() == numPreds); int numPhis; switch (phi->type()) { case MIRType::Value: numPhis = BOX_PIECES; break; case MIRType::Int64: numPhis = INT64_PIECES; break; default: numPhis = 1; break; } for (int i = 0; i < numPhis; i++) { LAllocation* inputs = alloc.allocateArray<LAllocation>(numPreds); if (!inputs) return false; void* addr = &phis_[phiIndex++]; LPhi* lphi = new (addr) LPhi(phi, inputs); lphi->setBlock(this); } } return true; }
bool RegisterAllocator::init() { if (!insData.init(mir, graph.numInstructions())) return false; for (size_t i = 0; i < graph.numBlocks(); i++) { LBlock* block = graph.getBlock(i); for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) insData[ins->id()] = *ins; for (size_t j = 0; j < block->numPhis(); j++) { LPhi* phi = block->getPhi(j); insData[phi->id()] = phi; } } return true; }
void LIRGeneratorARM::defineInt64Phi(MPhi* phi, size_t lirIndex) { LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX); LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX); uint32_t lowVreg = getVirtualRegister(); phi->setVirtualRegister(lowVreg); uint32_t highVreg = getVirtualRegister(); MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX); low->setDef(0, LDefinition(lowVreg, LDefinition::INT32)); high->setDef(0, LDefinition(highVreg, LDefinition::INT32)); annotate(high); annotate(low); }
bool GreedyAllocator::buildPhiMoves(LBlock *block) { IonSpew(IonSpew_RegAlloc, " Merging phi state."); phiMoves = Mover(); MBasicBlock *mblock = block->mir(); if (!mblock->successorWithPhis()) return true; // Insert moves from our state into our successor's phi. uint32 pos = mblock->positionInPhiSuccessor(); LBlock *successor = mblock->successorWithPhis()->lir(); for (size_t i = 0; i < successor->numPhis(); i++) { LPhi *phi = successor->getPhi(i); JS_ASSERT(phi->numDefs() == 1); VirtualRegister *phiReg = getVirtualRegister(phi->getDef(0)); allocateStack(phiReg); LAllocation *in = phi->getOperand(pos); VirtualRegister *inReg = getVirtualRegister(in->toUse()); allocateStack(inReg); // Try to get a register for the input. if (!inReg->hasRegister() && !allocatableRegs().empty(inReg->isDouble())) { if (!allocateReg(inReg)) return false; } // Add a move from the input to the phi. if (inReg->hasRegister()) { if (!phiMoves.move(inReg->reg(), phiReg->backingStack())) return false; } else { if (!phiMoves.move(inReg->backingStack(), phiReg->backingStack())) return false; } } return true; }
bool LBlock::init(TempAllocator& alloc) { // Count the number of LPhis we'll need. size_t numLPhis = 0; for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) { MPhi* phi = *i; numLPhis += (phi->type() == MIRType_Value) ? BOX_PIECES : 1; } // Allocate space for the LPhis. if (!phis_.init(alloc, numLPhis)) return false; // For each MIR phi, set up LIR phis as appropriate. We'll fill in their // operands on each incoming edge, and set their definitions at the start of // their defining block. size_t phiIndex = 0; size_t numPreds = block_->numPredecessors(); for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) { MPhi* phi = *i; MOZ_ASSERT(phi->numOperands() == numPreds); int numPhis = (phi->type() == MIRType_Value) ? BOX_PIECES : 1; for (int i = 0; i < numPhis; i++) { void* array = alloc.allocateArray<sizeof(LAllocation)>(numPreds); LAllocation* inputs = static_cast<LAllocation*>(array); if (!inputs) return false; // MSVC 2015 cannot handle "new (&phis_[phiIndex++])" void* addr = &phis_[phiIndex++]; LPhi* lphi = new (addr) LPhi(phi, inputs); lphi->setBlock(this); } } return true; }
bool LIRGeneratorARM::defineUntypedPhi(MPhi *phi, size_t lirIndex) { LPhi *type = current->getPhi(lirIndex + VREG_TYPE_OFFSET); LPhi *payload = current->getPhi(lirIndex + VREG_DATA_OFFSET); uint32_t typeVreg = getVirtualRegister(); if (typeVreg >= MAX_VIRTUAL_REGISTERS) return false; phi->setVirtualRegister(typeVreg); uint32_t payloadVreg = getVirtualRegister(); if (payloadVreg >= MAX_VIRTUAL_REGISTERS) return false; JS_ASSERT(typeVreg + 1 == payloadVreg); type->setDef(0, LDefinition(typeVreg, LDefinition::TYPE)); payload->setDef(0, LDefinition(payloadVreg, LDefinition::PAYLOAD)); annotate(type); annotate(payload); return true; }
void RegisterAllocator::dumpInstructions() { #ifdef DEBUG fprintf(stderr, "Instructions:\n"); for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) { LBlock* block = graph.getBlock(blockIndex); MBasicBlock* mir = block->mir(); fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex)); for (size_t i = 0; i < mir->numSuccessors(); i++) fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id()); fprintf(stderr, "\n"); for (size_t i = 0; i < block->numPhis(); i++) { LPhi* phi = block->getPhi(i); fprintf(stderr, "[%u,%u Phi] [def %s]", inputOf(phi).bits(), outputOf(phi).bits(), phi->getDef(0)->toString()); for (size_t j = 0; j < phi->numOperands(); j++) fprintf(stderr, " [use %s]", phi->getOperand(j)->toString()); fprintf(stderr, "\n"); } for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { LInstruction* ins = *iter; fprintf(stderr, "["); if (ins->id() != 0) fprintf(stderr, "%u,%u ", inputOf(ins).bits(), outputOf(ins).bits()); fprintf(stderr, "%s]", ins->opName()); if (ins->isMoveGroup()) { LMoveGroup* group = ins->toMoveGroup(); for (int i = group->numMoves() - 1; i >= 0; i--) { // Use two printfs, as LAllocation::toString is not reentant. fprintf(stderr, " [%s", group->getMove(i).from()->toString()); fprintf(stderr, " -> %s]", group->getMove(i).to()->toString()); } fprintf(stderr, "\n"); continue; } for (size_t i = 0; i < ins->numDefs(); i++) fprintf(stderr, " [def %s]", ins->getDef(i)->toString()); for (size_t i = 0; i < ins->numTemps(); i++) { LDefinition* temp = ins->getTemp(i); if (!temp->isBogusTemp()) fprintf(stderr, " [temp %s]", temp->toString()); } for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) { if (!alloc->isBogus()) fprintf(stderr, " [use %s]", alloc->toString()); } fprintf(stderr, "\n"); } } fprintf(stderr, "\n"); #endif // DEBUG }
void AllocationIntegrityState::dump() { #ifdef DEBUG fprintf(stderr, "Register Allocation Integrity State:\n"); for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) { LBlock* block = graph.getBlock(blockIndex); MBasicBlock* mir = block->mir(); fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex)); for (size_t i = 0; i < mir->numSuccessors(); i++) fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id()); fprintf(stderr, "\n"); for (size_t i = 0; i < block->numPhis(); i++) { const InstructionInfo& info = blocks[blockIndex].phis[i]; LPhi* phi = block->getPhi(i); CodePosition input(block->getPhi(0)->id(), CodePosition::INPUT); CodePosition output(block->getPhi(block->numPhis() - 1)->id(), CodePosition::OUTPUT); fprintf(stderr, "[%u,%u Phi] [def %s] ", input.bits(), output.bits(), phi->getDef(0)->toString()); for (size_t j = 0; j < phi->numOperands(); j++) fprintf(stderr, " [use %s]", info.inputs[j].toString()); fprintf(stderr, "\n"); } for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { LInstruction* ins = *iter; const InstructionInfo& info = instructions[ins->id()]; CodePosition input(ins->id(), CodePosition::INPUT); CodePosition output(ins->id(), CodePosition::OUTPUT); fprintf(stderr, "["); if (input != CodePosition::MIN) fprintf(stderr, "%u,%u ", input.bits(), output.bits()); fprintf(stderr, "%s]", ins->opName()); if (ins->isMoveGroup()) { LMoveGroup* group = ins->toMoveGroup(); for (int i = group->numMoves() - 1; i >= 0; i--) { // Use two printfs, as LAllocation::toString is not reentrant. fprintf(stderr, " [%s", group->getMove(i).from()->toString()); fprintf(stderr, " -> %s]", group->getMove(i).to()->toString()); } fprintf(stderr, "\n"); continue; } for (size_t i = 0; i < ins->numDefs(); i++) fprintf(stderr, " [def %s]", ins->getDef(i)->toString()); for (size_t i = 0; i < ins->numTemps(); i++) { LDefinition* temp = ins->getTemp(i); if (!temp->isBogusTemp()) fprintf(stderr, " [temp v%u %s]", info.temps[i].virtualRegister(), temp->toString()); } size_t index = 0; for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) { fprintf(stderr, " [use %s", info.inputs[index++].toString()); if (!alloc->isConstant()) fprintf(stderr, " %s", alloc->toString()); fprintf(stderr, "]"); } fprintf(stderr, "\n"); } } // Print discovered allocations at the ends of blocks, in the order they // were discovered. Vector<IntegrityItem, 20, SystemAllocPolicy> seenOrdered; seenOrdered.appendN(IntegrityItem(), seen.count()); for (IntegrityItemSet::Enum iter(seen); !iter.empty(); iter.popFront()) { IntegrityItem item = iter.front(); seenOrdered[item.index] = item; } if (!seenOrdered.empty()) { fprintf(stderr, "Intermediate Allocations:\n"); for (size_t i = 0; i < seenOrdered.length(); i++) { IntegrityItem item = seenOrdered[i]; fprintf(stderr, " block %u reg v%u alloc %s\n", item.block->mir()->id(), item.vreg, item.alloc.toString()); } } fprintf(stderr, "\n"); #endif }
bool AllocationIntegrityState::checkIntegrity(LBlock* block, LInstruction* ins, uint32_t vreg, LAllocation alloc, bool populateSafepoints) { for (LInstructionReverseIterator iter(block->rbegin(ins)); iter != block->rend(); iter++) { ins = *iter; // Follow values through assignments in move groups. All assignments in // a move group are considered to happen simultaneously, so stop after // the first matching move is found. if (ins->isMoveGroup()) { LMoveGroup* group = ins->toMoveGroup(); for (int i = group->numMoves() - 1; i >= 0; i--) { if (*group->getMove(i).to() == alloc) { alloc = *group->getMove(i).from(); break; } } } const InstructionInfo& info = instructions[ins->id()]; // Make sure the physical location being tracked is not clobbered by // another instruction, and that if the originating vreg definition is // found that it is writing to the tracked location. for (size_t i = 0; i < ins->numDefs(); i++) { LDefinition* def = ins->getDef(i); if (def->isBogusTemp()) continue; if (info.outputs[i].virtualRegister() == vreg) { MOZ_ASSERT(*def->output() == alloc); // Found the original definition, done scanning. return true; } else { MOZ_ASSERT(*def->output() != alloc); } } for (size_t i = 0; i < ins->numTemps(); i++) { LDefinition* temp = ins->getTemp(i); if (!temp->isBogusTemp()) MOZ_ASSERT(*temp->output() != alloc); } if (ins->safepoint()) { if (!checkSafepointAllocation(ins, vreg, alloc, populateSafepoints)) return false; } } // Phis are effectless, but change the vreg we are tracking. Check if there // is one which produced this vreg. We need to follow back through the phi // inputs as it is not guaranteed the register allocator filled in physical // allocations for the inputs and outputs of the phis. for (size_t i = 0; i < block->numPhis(); i++) { const InstructionInfo& info = blocks[block->mir()->id()].phis[i]; LPhi* phi = block->getPhi(i); if (info.outputs[0].virtualRegister() == vreg) { for (size_t j = 0, jend = phi->numOperands(); j < jend; j++) { uint32_t newvreg = info.inputs[j].toUse()->virtualRegister(); LBlock* predecessor = block->mir()->getPredecessor(j)->lir(); if (!addPredecessor(predecessor, newvreg, alloc)) return false; } return true; } } // No phi which defined the vreg we are tracking, follow back through all // predecessors with the existing vreg. for (size_t i = 0, iend = block->mir()->numPredecessors(); i < iend; i++) { LBlock* predecessor = block->mir()->getPredecessor(i)->lir(); if (!addPredecessor(predecessor, vreg, alloc)) return false; } return true; }
bool AllocationIntegrityState::record() { // Ignore repeated record() calls. if (!instructions.empty()) return true; if (!instructions.appendN(InstructionInfo(), graph.numInstructions())) return false; if (!virtualRegisters.appendN((LDefinition*)nullptr, graph.numVirtualRegisters())) return false; if (!blocks.reserve(graph.numBlocks())) return false; for (size_t i = 0; i < graph.numBlocks(); i++) { blocks.infallibleAppend(BlockInfo()); LBlock* block = graph.getBlock(i); MOZ_ASSERT(block->mir()->id() == i); BlockInfo& blockInfo = blocks[i]; if (!blockInfo.phis.reserve(block->numPhis())) return false; for (size_t j = 0; j < block->numPhis(); j++) { blockInfo.phis.infallibleAppend(InstructionInfo()); InstructionInfo& info = blockInfo.phis[j]; LPhi* phi = block->getPhi(j); MOZ_ASSERT(phi->numDefs() == 1); uint32_t vreg = phi->getDef(0)->virtualRegister(); virtualRegisters[vreg] = phi->getDef(0); if (!info.outputs.append(*phi->getDef(0))) return false; for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) { if (!info.inputs.append(*phi->getOperand(k))) return false; } } for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { LInstruction* ins = *iter; InstructionInfo& info = instructions[ins->id()]; for (size_t k = 0; k < ins->numTemps(); k++) { if (!ins->getTemp(k)->isBogusTemp()) { uint32_t vreg = ins->getTemp(k)->virtualRegister(); virtualRegisters[vreg] = ins->getTemp(k); } if (!info.temps.append(*ins->getTemp(k))) return false; } for (size_t k = 0; k < ins->numDefs(); k++) { if (!ins->getDef(k)->isBogusTemp()) { uint32_t vreg = ins->getDef(k)->virtualRegister(); virtualRegisters[vreg] = ins->getDef(k); } if (!info.outputs.append(*ins->getDef(k))) return false; } for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) { if (!info.inputs.append(**alloc)) return false; } } } return seen.init(); }
bool LiveRangeAllocator<VREG>::buildLivenessInfo() { if (!init()) return false; Vector<MBasicBlock *, 1, SystemAllocPolicy> loopWorkList; BitSet *loopDone = BitSet::New(alloc(), graph.numBlockIds()); if (!loopDone) return false; for (size_t i = graph.numBlocks(); i > 0; i--) { if (mir->shouldCancel("Build Liveness Info (main loop)")) return false; LBlock *block = graph.getBlock(i - 1); MBasicBlock *mblock = block->mir(); BitSet *live = BitSet::New(alloc(), graph.numVirtualRegisters()); if (!live) return false; liveIn[mblock->id()] = live; // Propagate liveIn from our successors to us for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) { MBasicBlock *successor = mblock->lastIns()->getSuccessor(i); // Skip backedges, as we fix them up at the loop header. if (mblock->id() < successor->id()) live->insertAll(liveIn[successor->id()]); } // Add successor phis if (mblock->successorWithPhis()) { LBlock *phiSuccessor = mblock->successorWithPhis()->lir(); for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) { LPhi *phi = phiSuccessor->getPhi(j); LAllocation *use = phi->getOperand(mblock->positionInPhiSuccessor()); uint32_t reg = use->toUse()->virtualRegister(); live->insert(reg); } } // Variables are assumed alive for the entire block, a define shortens // the interval to the point of definition. for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) { if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(inputOf(block->firstId()), outputOf(block->lastId()).next())) { return false; } } // Shorten the front end of live intervals for live variables to their // point of definition, if found. for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) { // Calls may clobber registers, so force a spill and reload around the callsite. if (ins->isCall()) { for (AnyRegisterIterator iter(allRegisters_); iter.more(); iter++) { if (forLSRA) { if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins))) return false; } else { bool found = false; for (size_t i = 0; i < ins->numDefs(); i++) { if (ins->getDef(i)->isPreset() && *ins->getDef(i)->output() == LAllocation(*iter)) { found = true; break; } } if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next())) return false; } } } for (size_t i = 0; i < ins->numDefs(); i++) { if (ins->getDef(i)->policy() != LDefinition::PASSTHROUGH) { LDefinition *def = ins->getDef(i); CodePosition from; if (def->policy() == LDefinition::PRESET && def->output()->isRegister() && forLSRA) { // The fixed range covers the current instruction so the // interval for the virtual register starts at the next // instruction. If the next instruction has a fixed use, // this can lead to unnecessary register moves. To avoid // special handling for this, assert the next instruction // has no fixed uses. defineFixed guarantees this by inserting // an LNop. JS_ASSERT(!NextInstructionHasFixedUses(block, *ins)); AnyRegister reg = def->output()->toRegister(); if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins).next())) return false; from = outputOf(*ins).next(); } else { from = forLSRA ? inputOf(*ins) : outputOf(*ins); } if (def->policy() == LDefinition::MUST_REUSE_INPUT) { // MUST_REUSE_INPUT is implemented by allocating an output // register and moving the input to it. Register hints are // used to avoid unnecessary moves. We give the input an // LUse::ANY policy to avoid allocating a register for the // input. LUse *inputUse = ins->getOperand(def->getReusedInput())->toUse(); JS_ASSERT(inputUse->policy() == LUse::REGISTER); JS_ASSERT(inputUse->usedAtStart()); *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true); } LiveInterval *interval = vregs[def].getInterval(0); interval->setFrom(from); // Ensure that if there aren't any uses, there's at least // some interval for the output to go into. if (interval->numRanges() == 0) { if (!interval->addRangeAtHead(from, from.next())) return false; } live->remove(def->virtualRegister()); } } for (size_t i = 0; i < ins->numTemps(); i++) { LDefinition *temp = ins->getTemp(i); if (temp->isBogusTemp()) continue; if (forLSRA) { if (temp->policy() == LDefinition::PRESET) { if (ins->isCall()) continue; AnyRegister reg = temp->output()->toRegister(); if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins))) return false; // Fixed intervals are not added to safepoints, so do it // here. if (LSafepoint *safepoint = ins->safepoint()) AddRegisterToSafepoint(safepoint, reg, *temp); } else { JS_ASSERT(!ins->isCall()); if (!vregs[temp].getInterval(0)->addRangeAtHead(inputOf(*ins), outputOf(*ins))) return false; } } else { // Normally temps are considered to cover both the input // and output of the associated instruction. In some cases // though we want to use a fixed register as both an input // and clobbered register in the instruction, so watch for // this and shorten the temp to cover only the output. CodePosition from = inputOf(*ins); if (temp->policy() == LDefinition::PRESET) { AnyRegister reg = temp->output()->toRegister(); for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) { if (alloc->isUse()) { LUse *use = alloc->toUse(); if (use->isFixedRegister()) { if (GetFixedRegister(vregs[use].def(), use) == reg) from = outputOf(*ins); } } } } CodePosition to = ins->isCall() ? outputOf(*ins) : outputOf(*ins).next(); if (!vregs[temp].getInterval(0)->addRangeAtHead(from, to)) return false; } } DebugOnly<bool> hasUseRegister = false; DebugOnly<bool> hasUseRegisterAtStart = false; for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) { if (inputAlloc->isUse()) { LUse *use = inputAlloc->toUse(); // The first instruction, LLabel, has no uses. JS_ASSERT(inputOf(*ins) > outputOf(block->firstId())); // Call uses should always be at-start or fixed, since the fixed intervals // use all registers. JS_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(), use->isFixedRegister() || use->usedAtStart()); #ifdef DEBUG // Don't allow at-start call uses if there are temps of the same kind, // so that we don't assign the same register. if (ins->isCall() && use->usedAtStart()) { for (size_t i = 0; i < ins->numTemps(); i++) JS_ASSERT(vregs[ins->getTemp(i)].isDouble() != vregs[use].isDouble()); } // If there are both useRegisterAtStart(x) and useRegister(y) // uses, we may assign the same register to both operands due to // interval splitting (bug 772830). Don't allow this for now. if (use->policy() == LUse::REGISTER) { if (use->usedAtStart()) { if (!IsInputReused(*ins, use)) hasUseRegisterAtStart = true; } else { hasUseRegister = true; } } JS_ASSERT(!(hasUseRegister && hasUseRegisterAtStart)); #endif // Don't treat RECOVERED_INPUT uses as keeping the vreg alive. if (use->policy() == LUse::RECOVERED_INPUT) continue; CodePosition to; if (forLSRA) { if (use->isFixedRegister()) { AnyRegister reg = GetFixedRegister(vregs[use].def(), use); if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins))) return false; to = inputOf(*ins); // Fixed intervals are not added to safepoints, so do it // here. LSafepoint *safepoint = ins->safepoint(); if (!ins->isCall() && safepoint) AddRegisterToSafepoint(safepoint, reg, *vregs[use].def()); } else { to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins); } } else { to = (use->usedAtStart() || ins->isCall()) ? inputOf(*ins) : outputOf(*ins); if (use->isFixedRegister()) { LAllocation reg(AnyRegister::FromCode(use->registerCode())); for (size_t i = 0; i < ins->numDefs(); i++) { LDefinition *def = ins->getDef(i); if (def->policy() == LDefinition::PRESET && *def->output() == reg) to = inputOf(*ins); } } } LiveInterval *interval = vregs[use].getInterval(0); if (!interval->addRangeAtHead(inputOf(block->firstId()), forLSRA ? to : to.next())) return false; interval->addUse(new(alloc()) UsePosition(use, to)); live->insert(use->virtualRegister()); } } } // Phis have simultaneous assignment semantics at block begin, so at // the beginning of the block we can be sure that liveIn does not // contain any phi outputs. for (unsigned int i = 0; i < block->numPhis(); i++) { LDefinition *def = block->getPhi(i)->getDef(0); if (live->contains(def->virtualRegister())) { live->remove(def->virtualRegister()); } else { // This is a dead phi, so add a dummy range over all phis. This // can go away if we have an earlier dead code elimination pass. if (!vregs[def].getInterval(0)->addRangeAtHead(inputOf(block->firstId()), outputOf(block->firstId()))) { return false; } } } if (mblock->isLoopHeader()) { // A divergence from the published algorithm is required here, as // our block order does not guarantee that blocks of a loop are // contiguous. As a result, a single live interval spanning the // loop is not possible. Additionally, we require liveIn in a later // pass for resolution, so that must also be fixed up here. MBasicBlock *loopBlock = mblock->backedge(); while (true) { // Blocks must already have been visited to have a liveIn set. JS_ASSERT(loopBlock->id() >= mblock->id()); // Add an interval for this entire loop block CodePosition from = inputOf(loopBlock->lir()->firstId()); CodePosition to = outputOf(loopBlock->lir()->lastId()).next(); for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) { if (!vregs[*liveRegId].getInterval(0)->addRange(from, to)) return false; } // Fix up the liveIn set to account for the new interval liveIn[loopBlock->id()]->insertAll(live); // Make sure we don't visit this node again loopDone->insert(loopBlock->id()); // If this is the loop header, any predecessors are either the // backedge or out of the loop, so skip any predecessors of // this block if (loopBlock != mblock) { for (size_t i = 0; i < loopBlock->numPredecessors(); i++) { MBasicBlock *pred = loopBlock->getPredecessor(i); if (loopDone->contains(pred->id())) continue; if (!loopWorkList.append(pred)) return false; } } // Terminate loop if out of work. if (loopWorkList.empty()) break; // Grab the next block off the work list, skipping any OSR block. while (!loopWorkList.empty()) { loopBlock = loopWorkList.popCopy(); if (loopBlock->lir() != graph.osrBlock()) break; } // If end is reached without finding a non-OSR block, then no more work items were found. if (loopBlock->lir() == graph.osrBlock()) { JS_ASSERT(loopWorkList.empty()); break; } } // Clear the done set for other loops loopDone->clear(); } JS_ASSERT_IF(!mblock->numPredecessors(), live->empty()); } validateVirtualRegisters(); // If the script has an infinite loop, there may be no MReturn and therefore // no fixed intervals. Add a small range to fixedIntervalsUnion so that the // rest of the allocator can assume it has at least one range. if (fixedIntervalsUnion->numRanges() == 0) { if (!fixedIntervalsUnion->addRangeAtHead(CodePosition(0, CodePosition::INPUT), CodePosition(0, CodePosition::OUTPUT))) { return false; } } return true; }
bool GreedyAllocator::allocateRegisters() { // Allocate registers bottom-up, such that we see all uses before their // definitions. for (size_t i = graph.numBlocks() - 1; i < graph.numBlocks(); i--) { LBlock *block = graph.getBlock(i); IonSpew(IonSpew_RegAlloc, "Allocating block %d", (uint32)i); // All registers should be free. JS_ASSERT(state.free == RegisterSet::All()); // Allocate stack for any phis. for (size_t j = 0; j < block->numPhis(); j++) { LPhi *phi = block->getPhi(j); VirtualRegister *vreg = getVirtualRegister(phi->getDef(0)); allocateStack(vreg); } // Allocate registers. if (!allocateRegistersInBlock(block)) return false; LMoveGroup *entrySpills = block->getEntryMoveGroup(); // We've reached the top of the block. Spill all registers by inserting // moves from their stack locations. for (AnyRegisterIterator iter(RegisterSet::All()); iter.more(); iter++) { VirtualRegister *vreg = state[*iter]; if (!vreg) { JS_ASSERT(state.free.has(*iter)); continue; } JS_ASSERT(vreg->reg() == *iter); JS_ASSERT(!state.free.has(vreg->reg())); allocateStack(vreg); LAllocation *from = LAllocation::New(vreg->backingStack()); LAllocation *to = LAllocation::New(vreg->reg()); if (!entrySpills->add(from, to)) return false; killReg(vreg); vreg->unsetRegister(); } // Before killing phis, ensure that each phi input has its own stack // allocation. This ensures we won't allocate the same slot for any phi // as its input, which technically may be legal (since the phi becomes // the last use of the slot), but we avoid for sanity. for (size_t i = 0; i < block->numPhis(); i++) { LPhi *phi = block->getPhi(i); for (size_t j = 0; j < phi->numOperands(); j++) { VirtualRegister *in = getVirtualRegister(phi->getOperand(j)->toUse()); allocateStack(in); } } // Kill phis. for (size_t i = 0; i < block->numPhis(); i++) { LPhi *phi = block->getPhi(i); VirtualRegister *vr = getVirtualRegister(phi->getDef(0)); JS_ASSERT(!vr->hasRegister()); killStack(vr); } } return true; }