// Transform: // // [AddI] // addl $9, %esi // [LoadUnboxedScalar] // movsd 0x0(%rbx,%rsi,8), %xmm4 // // into: // // [LoadUnboxedScalar] // movsd 0x48(%rbx,%rsi,8), %xmm4 // // This is possible when the AddI is only used by the LoadUnboxedScalar opcode. static void AnalyzeLoadUnboxedScalar(TempAllocator& alloc, MLoadUnboxedScalar* load) { if (load->isRecoveredOnBailout()) return; if (!load->getOperand(1)->isAdd()) return; JitSpew(JitSpew_EAA, "analyze: %s%u", load->opName(), load->id()); MAdd* add = load->getOperand(1)->toAdd(); if (add->specialization() != MIRType::Int32 || !add->hasUses() || add->truncateKind() != MDefinition::TruncateKind::Truncate) { return; } MDefinition* lhs = add->lhs(); MDefinition* rhs = add->rhs(); MDefinition* constant = nullptr; MDefinition* node = nullptr; if (lhs->isConstant()) { constant = lhs; node = rhs; } else if (rhs->isConstant()) { constant = rhs; node = lhs; } else return; MOZ_ASSERT(constant->type() == MIRType::Int32); size_t storageSize = Scalar::byteSize(load->storageType()); int32_t c1 = load->offsetAdjustment(); int32_t c2 = 0; if (!SafeMul(constant->maybeConstantValue()->toInt32(), storageSize, &c2)) return; int32_t offset = 0; if (!SafeAdd(c1, c2, &offset)) return; JitSpew(JitSpew_EAA, "set offset: %d + %d = %d on: %s%u", c1, c2, offset, load->opName(), load->id()); load->setOffsetAdjustment(offset); load->replaceOperand(1, node); if (!add->hasLiveDefUses() && DeadIfUnused(add) && add->canRecoverOnBailout()) { JitSpew(JitSpew_EAA, "mark as recovered on bailout: %s%u", add->opName(), add->id()); add->setRecoveredOnBailoutUnchecked(); } }
static void AnalyzeLsh(TempAllocator& alloc, MLsh* lsh) { if (lsh->specialization() != MIRType::Int32) return; if (lsh->isRecoveredOnBailout()) return; MDefinition* index = lsh->lhs(); MOZ_ASSERT(index->type() == MIRType::Int32); MConstant* shiftValue = lsh->rhs()->maybeConstantValue(); if (!shiftValue) return; if (shiftValue->type() != MIRType::Int32 || !IsShiftInScaleRange(shiftValue->toInt32())) return; Scale scale = ShiftToScale(shiftValue->toInt32()); int32_t displacement = 0; MInstruction* last = lsh; MDefinition* base = nullptr; while (true) { if (!last->hasOneUse()) break; MUseIterator use = last->usesBegin(); if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isAdd()) break; MAdd* add = use->consumer()->toDefinition()->toAdd(); if (add->specialization() != MIRType::Int32 || !add->isTruncated()) break; MDefinition* other = add->getOperand(1 - add->indexOf(*use)); if (MConstant* otherConst = other->maybeConstantValue()) { displacement += otherConst->toInt32(); } else { if (base) break; base = other; } last = add; if (last->isRecoveredOnBailout()) return; } if (!base) { uint32_t elemSize = 1 << ScaleToShift(scale); if (displacement % elemSize != 0) return; if (!last->hasOneUse()) return; MUseIterator use = last->usesBegin(); if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isBitAnd()) return; MBitAnd* bitAnd = use->consumer()->toDefinition()->toBitAnd(); if (bitAnd->isRecoveredOnBailout()) return; MDefinition* other = bitAnd->getOperand(1 - bitAnd->indexOf(*use)); MConstant* otherConst = other->maybeConstantValue(); if (!otherConst || otherConst->type() != MIRType::Int32) return; uint32_t bitsClearedByShift = elemSize - 1; uint32_t bitsClearedByMask = ~uint32_t(otherConst->toInt32()); if ((bitsClearedByShift & bitsClearedByMask) != bitsClearedByMask) return; bitAnd->replaceAllUsesWith(last); return; } if (base->isRecoveredOnBailout()) return; MEffectiveAddress* eaddr = MEffectiveAddress::New(alloc, base, index, scale, displacement); last->replaceAllUsesWith(eaddr); last->block()->insertAfter(last, eaddr); }
static void AnalyzeLsh(MBasicBlock *block, MLsh *lsh) { if (lsh->specialization() != MIRType_Int32) return; MDefinition *index = lsh->lhs(); JS_ASSERT(index->type() == MIRType_Int32); MDefinition *shift = lsh->rhs(); if (!shift->isConstant()) return; Value shiftValue = shift->toConstant()->value(); if (!shiftValue.isInt32() || !IsShiftInScaleRange(shiftValue.toInt32())) return; Scale scale = ShiftToScale(shiftValue.toInt32()); int32_t displacement = 0; MInstruction *last = lsh; MDefinition *base = nullptr; while (true) { if (!last->hasOneUse()) break; MUseIterator use = last->usesBegin(); if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isAdd()) break; MAdd *add = use->consumer()->toDefinition()->toAdd(); if (add->specialization() != MIRType_Int32 || !add->isTruncated()) break; MDefinition *other = add->getOperand(1 - use->index()); if (other->isConstant()) { displacement += other->toConstant()->value().toInt32(); } else { if (base) break; base = other; } last = add; } if (!base) { uint32_t elemSize = 1 << ScaleToShift(scale); if (displacement % elemSize != 0) return; if (!last->hasOneUse()) return; MUseIterator use = last->usesBegin(); if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isBitAnd()) return; MBitAnd *bitAnd = use->consumer()->toDefinition()->toBitAnd(); MDefinition *other = bitAnd->getOperand(1 - use->index()); if (!other->isConstant() || !other->toConstant()->value().isInt32()) return; uint32_t bitsClearedByShift = elemSize - 1; uint32_t bitsClearedByMask = ~uint32_t(other->toConstant()->value().toInt32()); if ((bitsClearedByShift & bitsClearedByMask) != bitsClearedByMask) return; bitAnd->replaceAllUsesWith(last); return; } MEffectiveAddress *eaddr = MEffectiveAddress::New(base, index, scale, displacement); last->replaceAllUsesWith(eaddr); block->insertAfter(last, eaddr); }