void StupidAllocator::syncForBlockEnd(LBlock *block, LInstruction *ins) { // Sync any dirty registers, and update the synced state for phi nodes at // each successor of a block. We cannot conflate the storage for phis with // that of their inputs, as we cannot prove the live ranges of the phi and // its input do not overlap. The values for the two may additionally be // different, as the phi could be for the value of the input in a previous // loop iteration. for (size_t i = 0; i < registerCount; i++) syncRegister(ins, i); LMoveGroup *group = nullptr; MBasicBlock *successor = block->mir()->successorWithPhis(); if (successor) { uint32_t position = block->mir()->positionInPhiSuccessor(); LBlock *lirsuccessor = graph.getBlock(successor->id()); for (size_t i = 0; i < lirsuccessor->numPhis(); i++) { LPhi *phi = lirsuccessor->getPhi(i); uint32_t sourcevreg = phi->getOperand(position)->toUse()->virtualRegister(); uint32_t destvreg = phi->getDef(0)->virtualRegister(); if (sourcevreg == destvreg) continue; LAllocation *source = stackLocation(sourcevreg); LAllocation *dest = stackLocation(destvreg); if (!group) { // The moves we insert here need to happen simultaneously with // each other, yet after any existing moves before the instruction. LMoveGroup *input = getInputMoveGroup(ins->id()); if (input->numMoves() == 0) { group = input; } else { group = new LMoveGroup(alloc()); block->insertAfter(input, group); } } group->add(source, dest); } } }
bool GreedyAllocator::allocateRegisters() { // Allocate registers bottom-up, such that we see all uses before their // definitions. for (size_t i = graph.numBlocks() - 1; i < graph.numBlocks(); i--) { LBlock *block = graph.getBlock(i); IonSpew(IonSpew_RegAlloc, "Allocating block %d", (uint32)i); // All registers should be free. JS_ASSERT(state.free == RegisterSet::All()); // Allocate stack for any phis. for (size_t j = 0; j < block->numPhis(); j++) { LPhi *phi = block->getPhi(j); VirtualRegister *vreg = getVirtualRegister(phi->getDef(0)); allocateStack(vreg); } // Allocate registers. if (!allocateRegistersInBlock(block)) return false; LMoveGroup *entrySpills = block->getEntryMoveGroup(); // We've reached the top of the block. Spill all registers by inserting // moves from their stack locations. for (AnyRegisterIterator iter(RegisterSet::All()); iter.more(); iter++) { VirtualRegister *vreg = state[*iter]; if (!vreg) { JS_ASSERT(state.free.has(*iter)); continue; } JS_ASSERT(vreg->reg() == *iter); JS_ASSERT(!state.free.has(vreg->reg())); allocateStack(vreg); LAllocation *from = LAllocation::New(vreg->backingStack()); LAllocation *to = LAllocation::New(vreg->reg()); if (!entrySpills->add(from, to)) return false; killReg(vreg); vreg->unsetRegister(); } // Before killing phis, ensure that each phi input has its own stack // allocation. This ensures we won't allocate the same slot for any phi // as its input, which technically may be legal (since the phi becomes // the last use of the slot), but we avoid for sanity. for (size_t i = 0; i < block->numPhis(); i++) { LPhi *phi = block->getPhi(i); for (size_t j = 0; j < phi->numOperands(); j++) { VirtualRegister *in = getVirtualRegister(phi->getOperand(j)->toUse()); allocateStack(in); } } // Kill phis. for (size_t i = 0; i < block->numPhis(); i++) { LPhi *phi = block->getPhi(i); VirtualRegister *vr = getVirtualRegister(phi->getDef(0)); JS_ASSERT(!vr->hasRegister()); killStack(vr); } } return true; }