bool GreedyAllocator::allocateInputs(LInstruction *ins) { // First deal with fixed-register policies and policies that require // registers. for (size_t i = 0; i < ins->numOperands(); i++) { LAllocation *a = ins->getOperand(i); if (!a->isUse()) continue; LUse *use = a->toUse(); VirtualRegister *vr = getVirtualRegister(use); if (use->policy() == LUse::FIXED) { if (!allocateFixedOperand(a, vr)) return false; } else if (use->policy() == LUse::REGISTER) { if (!allocateRegisterOperand(a, vr)) return false; } } // Finally, deal with things that take either registers or memory. for (size_t i = 0; i < ins->numOperands(); i++) { LAllocation *a = ins->getOperand(i); if (!a->isUse()) continue; VirtualRegister *vr = getVirtualRegister(a->toUse()); if (!allocateAnyOperand(a, vr)) return false; } return true; }
void GreedyAllocator::informSnapshot(LInstruction *ins) { LSnapshot *snapshot = ins->snapshot(); for (size_t i = 0; i < snapshot->numEntries(); i++) { LAllocation *a = snapshot->getEntry(i); if (!a->isUse()) continue; // Every definition in a snapshot gets a stack slot. This // simplification means we can treat normal snapshots and LOsiPoint // snapshots (which follow calls) the same, without adding a special // exception to note that registers are spilled at the LOsiPoint. VirtualRegister *vr = getVirtualRegister(a->toUse()); allocateStack(vr); *a = vr->backingStack(); } }
bool GreedyAllocator::prescanUses(LInstruction *ins) { for (size_t i = 0; i < ins->numOperands(); i++) { LAllocation *a = ins->getOperand(i); if (!a->isUse()) { JS_ASSERT(a->isConstant()); continue; } LUse *use = a->toUse(); VirtualRegister *vr = getVirtualRegister(use); if (use->policy() == LUse::FIXED) { // A def or temp may use the same register, so we have to use the // unchecked version. disallowed.addUnchecked(GetFixedRegister(vr->def, use)); } else if (vr->hasRegister()) { discouraged.addUnchecked(vr->reg()); } } return true; }
bool AllocationIntegrityState::check(bool populateSafepoints) { MOZ_ASSERT(!instructions.empty()); #ifdef DEBUG if (JitSpewEnabled(JitSpew_RegAlloc)) dump(); for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) { LBlock* block = graph.getBlock(blockIndex); // Check that all instruction inputs and outputs have been assigned an allocation. for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { LInstruction* ins = *iter; for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) MOZ_ASSERT(!alloc->isUse()); for (size_t i = 0; i < ins->numDefs(); i++) { LDefinition* def = ins->getDef(i); MOZ_ASSERT(!def->output()->isUse()); LDefinition oldDef = instructions[ins->id()].outputs[i]; MOZ_ASSERT_IF(oldDef.policy() == LDefinition::MUST_REUSE_INPUT, *def->output() == *ins->getOperand(oldDef.getReusedInput())); } for (size_t i = 0; i < ins->numTemps(); i++) { LDefinition* temp = ins->getTemp(i); MOZ_ASSERT_IF(!temp->isBogusTemp(), temp->output()->isRegister()); LDefinition oldTemp = instructions[ins->id()].temps[i]; MOZ_ASSERT_IF(oldTemp.policy() == LDefinition::MUST_REUSE_INPUT, *temp->output() == *ins->getOperand(oldTemp.getReusedInput())); } } } #endif // Check that the register assignment and move groups preserve the original // semantics of the virtual registers. Each virtual register has a single // write (owing to the SSA representation), but the allocation may move the // written value around between registers and memory locations along // different paths through the script. // // For each use of an allocation, follow the physical value which is read // backward through the script, along all paths to the value's virtual // register's definition. for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) { LBlock* block = graph.getBlock(blockIndex); for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) { LInstruction* ins = *iter; const InstructionInfo& info = instructions[ins->id()]; LSafepoint* safepoint = ins->safepoint(); if (safepoint) { for (size_t i = 0; i < ins->numTemps(); i++) { if (ins->getTemp(i)->isBogusTemp()) continue; uint32_t vreg = info.temps[i].virtualRegister(); LAllocation* alloc = ins->getTemp(i)->output(); if (!checkSafepointAllocation(ins, vreg, *alloc, populateSafepoints)) return false; } MOZ_ASSERT_IF(ins->isCall() && !populateSafepoints, safepoint->liveRegs().emptyFloat() && safepoint->liveRegs().emptyGeneral()); } size_t inputIndex = 0; for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) { LAllocation oldInput = info.inputs[inputIndex++]; if (!oldInput.isUse()) continue; uint32_t vreg = oldInput.toUse()->virtualRegister(); if (safepoint && !oldInput.toUse()->usedAtStart()) { if (!checkSafepointAllocation(ins, vreg, **alloc, populateSafepoints)) return false; } // Start checking at the previous instruction, in case this // instruction reuses its input register for an output. LInstructionReverseIterator riter = block->rbegin(ins); riter++; checkIntegrity(block, *riter, vreg, **alloc, populateSafepoints); while (!worklist.empty()) { IntegrityItem item = worklist.popCopy(); checkIntegrity(item.block, *item.block->rbegin(), item.vreg, item.alloc, populateSafepoints); } } } } return true; }