// This analysis converts patterns of the form: // truncate(x + (y << {0,1,2,3})) // truncate(x + (y << {0,1,2,3}) + imm32) // into a single lea instruction, and patterns of the form: // asmload(x + imm32) // asmload(x << {0,1,2,3}) // asmload((x << {0,1,2,3}) + imm32) // asmload((x << {0,1,2,3}) & mask) (where mask is redundant with shift) // asmload(((x << {0,1,2,3}) + imm32) & mask) (where mask is redundant with shift + imm32) // into a single asmload instruction (and for asmstore too). // // Additionally, we should consider the general forms: // truncate(x + y + imm32) // truncate((y << {0,1,2,3}) + imm32) bool EffectiveAddressAnalysis::analyze() { for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { for (MInstructionIterator i = block->begin(); i != block->end(); i++) { // Note that we don't check for MAsmJSCompareExchangeHeap // or MAsmJSAtomicBinopHeap, because the backend and the OOB // mechanism don't support non-zero offsets for them yet. if (i->isLsh()) AnalyzeLsh(graph_.alloc(), i->toLsh()); else if (i->isAsmJSLoadHeap()) analyzeAsmHeapAccess(i->toAsmJSLoadHeap()); else if (i->isAsmJSStoreHeap()) analyzeAsmHeapAccess(i->toAsmJSStoreHeap()); } } return true; }
bool AlignmentMaskAnalysis::analyze() { for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) { for (MInstructionIterator i = block->begin(); i != block->end(); i++) { if (!graph_.alloc().ensureBallast()) return false; // Note that we don't check for MAsmJSCompareExchangeHeap // or MAsmJSAtomicBinopHeap, because the backend and the OOB // mechanism don't support non-zero offsets for them yet. if (i->isAsmJSLoadHeap()) AnalyzeAsmHeapAddress(i->toAsmJSLoadHeap()->base(), graph_); else if (i->isAsmJSStoreHeap()) AnalyzeAsmHeapAddress(i->toAsmJSStoreHeap()->base(), graph_); } } return true; }