void FrameState::assertValidRegisterState() const { Registers checkedFreeRegs; FrameEntry *tos = tosFe(); for (uint32 i = 0; i < tracker.nentries; i++) { FrameEntry *fe = tracker[i]; if (fe >= tos) continue; JS_ASSERT(i == fe->trackerIndex()); JS_ASSERT_IF(fe->isCopy(), fe->trackerIndex() > fe->copyOf()->trackerIndex()); JS_ASSERT_IF(fe->isCopy(), !fe->type.inRegister() && !fe->data.inRegister()); JS_ASSERT_IF(fe->isCopy(), fe->copyOf() < tos); JS_ASSERT_IF(fe->isCopy(), fe->copyOf()->isCopied()); if (fe->isCopy()) continue; if (fe->type.inRegister()) { checkedFreeRegs.takeReg(fe->type.reg()); JS_ASSERT(regstate[fe->type.reg()].fe == fe); } if (fe->data.inRegister()) { checkedFreeRegs.takeReg(fe->data.reg()); JS_ASSERT(regstate[fe->data.reg()].fe == fe); } } JS_ASSERT(checkedFreeRegs == freeRegs); }
void FrameState::syncAndKill(Registers kill, Uses uses, Uses ignore) { /* Backwards, so we can allocate registers to backing slots better. */ FrameEntry *tos = tosFe(); FrameEntry *bottom = tos - uses.nuses; tos -= ignore.nuses; if (inTryBlock) bottom = NULL; for (uint32 i = tracker.nentries - 1; i < tracker.nentries; i--) { FrameEntry *fe = tracker[i]; if (fe >= tos) continue; Address address = addressOf(fe); FrameEntry *backing = fe; if (fe->isCopy()) { if (!inTryBlock && fe < bottom) continue; backing = fe->copyOf(); } JS_ASSERT_IF(i == 0, !fe->isCopy()); bool killData = fe->data.inRegister() && kill.hasReg(fe->data.reg()); if (!fe->data.synced() && (killData || fe >= bottom)) { if (backing != fe && backing->data.inMemory()) tempRegForData(backing); syncData(backing, address, masm); fe->data.sync(); if (fe->isConstant() && !fe->type.synced()) fe->type.sync(); } if (killData) { JS_ASSERT(backing == fe); JS_ASSERT(fe->data.synced()); if (regstate[fe->data.reg()].fe) forgetReg(fe->data.reg()); fe->data.setMemory(); } bool killType = fe->type.inRegister() && kill.hasReg(fe->type.reg()); if (!fe->type.synced() && (killType || fe >= bottom)) { if (backing != fe && backing->type.inMemory()) tempRegForType(backing); syncType(backing, address, masm); fe->type.sync(); } if (killType) { JS_ASSERT(backing == fe); JS_ASSERT(fe->type.synced()); if (regstate[fe->type.reg()].fe) forgetReg(fe->type.reg()); fe->type.setMemory(); } } }
void FrameState::pushCopyOf(uint32 index) { FrameEntry *backing = entryFor(index); FrameEntry *fe = rawPush(); fe->resetUnsynced(); if (backing->isConstant()) { fe->setConstant(Jsvalify(backing->getValue())); } else { if (backing->isTypeKnown()) fe->setType(backing->getKnownType()); else fe->type.invalidate(); fe->isNumber = backing->isNumber; fe->data.invalidate(); if (backing->isCopy()) { backing = backing->copyOf(); fe->setCopyOf(backing); } else { fe->setCopyOf(backing); backing->setCopied(); } /* Maintain tracker ordering guarantees for copies. */ JS_ASSERT(backing->isCopied()); if (fe->trackerIndex() < backing->trackerIndex()) swapInTracker(fe, backing); } }
void FrameState::merge(Assembler &masm, Changes changes) const { FrameEntry *tos = tosFe(); Registers temp(Registers::TempRegs); for (uint32 i = 0; i < tracker.nentries; i++) { FrameEntry *fe = tracker[i]; if (fe >= tos) continue; /* Copies do not have registers. */ if (fe->isCopy()) { JS_ASSERT(!fe->data.inRegister()); JS_ASSERT(!fe->type.inRegister()); continue; } if (fe->data.inRegister() && fe->type.inRegister()) masm.loadValueAsComponents(addressOf(fe), fe->type.reg(), fe->data.reg()); else if (fe->data.inRegister()) masm.loadPayload(addressOf(fe), fe->data.reg()); else if (fe->type.inRegister()) masm.loadTypeTag(addressOf(fe), fe->type.reg()); } }
void FrameState::storeLocal(uint32 n, bool popGuaranteed, bool typeChange) { FrameEntry *localFe = getLocal(n); bool cacheable = !eval && !escaping[n]; if (!popGuaranteed && !cacheable) { JS_ASSERT_IF(base[localIndex(n)] && (!eval || n < script->nfixed), entries[localIndex(n)].type.inMemory() && entries[localIndex(n)].data.inMemory()); Address local(JSFrameReg, sizeof(JSStackFrame) + n * sizeof(Value)); storeTo(peek(-1), local, false); forgetAllRegs(getLocal(n)); localFe->resetSynced(); return; } bool wasSynced = localFe->type.synced(); /* Detect something like (x = x) which is a no-op. */ FrameEntry *top = peek(-1); if (top->isCopy() && top->copyOf() == localFe) { JS_ASSERT(localFe->isCopied()); return; } /* Completely invalidate the local variable. */ if (localFe->isCopied()) { uncopy(localFe); if (!localFe->isCopied()) forgetAllRegs(localFe); } else { forgetAllRegs(localFe); } localFe->resetUnsynced(); /* Constants are easy to propagate. */ if (top->isConstant()) { localFe->setCopyOf(NULL); localFe->setNotCopied(); localFe->setConstant(Jsvalify(top->getValue())); return; } /* * When dealing with copies, there are two important invariants: * * 1) The backing store precedes all copies in the tracker. * 2) The backing store of a local is never a stack slot, UNLESS the local * variable itself is a stack slot (blocks) that precedes the stack * slot. * * If the top is a copy, and the second condition holds true, the local * can be rewritten as a copy of the original backing slot. If the first * condition does not hold, force it to hold by swapping in-place. */ FrameEntry *backing = top; if (top->isCopy()) { backing = top->copyOf(); JS_ASSERT(backing->trackerIndex() < top->trackerIndex()); uint32 backingIndex = indexOfFe(backing); uint32 tol = uint32(spBase - base); if (backingIndex < tol || backingIndex < localIndex(n)) { /* local.idx < backing.idx means local cannot be a copy yet */ if (localFe->trackerIndex() < backing->trackerIndex()) swapInTracker(backing, localFe); localFe->setNotCopied(); localFe->setCopyOf(backing); if (backing->isTypeKnown()) localFe->setType(backing->getKnownType()); else localFe->type.invalidate(); localFe->data.invalidate(); localFe->isNumber = backing->isNumber; return; } /* * If control flow lands here, then there was a bytecode sequence like * * ENTERBLOCK 2 * GETLOCAL 1 * SETLOCAL 0 * * The problem is slot N can't be backed by M if M could be popped * before N. We want a guarantee that when we pop M, even if it was * copied, it has no outstanding copies. * * Because of |let| expressions, it's kind of hard to really know * whether a region on the stack will be popped all at once. Bleh! * * This should be rare except in browser code (and maybe even then), * but even so there's a quick workaround. We take all copies of the * backing fe, and redirect them to be copies of the destination. */ FrameEntry *tos = tosFe(); for (uint32 i = backing->trackerIndex() + 1; i < tracker.nentries; i++) { FrameEntry *fe = tracker[i]; if (fe >= tos) continue; if (fe->isCopy() && fe->copyOf() == backing) fe->setCopyOf(localFe); } } backing->setNotCopied(); /* * This is valid from the top->isCopy() path because we're guaranteed a * consistent ordering - all copies of |backing| are tracked after * |backing|. Transitively, only one swap is needed. */ if (backing->trackerIndex() < localFe->trackerIndex()) swapInTracker(backing, localFe); /* * Move the backing store down - we spill registers here, but we could be * smarter and re-use the type reg. */ RegisterID reg = tempRegForData(backing); localFe->data.setRegister(reg); moveOwnership(reg, localFe); if (typeChange) { if (backing->isTypeKnown()) { localFe->setType(backing->getKnownType()); } else { RegisterID reg = tempRegForType(backing); localFe->type.setRegister(reg); moveOwnership(reg, localFe); } } else { if (!wasSynced) masm.storeTypeTag(ImmType(backing->getKnownType()), addressOf(localFe)); localFe->type.setMemory(); } if (!backing->isTypeKnown()) backing->type.invalidate(); backing->data.invalidate(); backing->setCopyOf(localFe); backing->isNumber = localFe->isNumber; localFe->setCopied(); if (!cacheable) { /* TODO: x64 optimization */ if (!localFe->type.synced()) syncType(localFe, addressOf(localFe), masm); if (!localFe->data.synced()) syncData(localFe, addressOf(localFe), masm); forgetAllRegs(localFe); localFe->type.setMemory(); localFe->data.setMemory(); } JS_ASSERT(top->copyOf() == localFe); }
FrameEntry * FrameState::uncopy(FrameEntry *original) { JS_ASSERT(original->isCopied()); /* * Copies have two critical invariants: * 1) The backing store precedes all copies in the tracker. * 2) The backing store of a copy cannot be popped from the stack * while the copy is still live. * * Maintaining this invariant iteratively is kind of hard, so we choose * the "lowest" copy in the frame up-front. * * For example, if the stack is: * [A, B, C, D] * And the tracker has: * [A, D, C, B] * * If B, C, and D are copies of A - we will walk the tracker to the end * and select D, not B (see bug 583684). */ uint32 firstCopy = InvalidIndex; FrameEntry *tos = tosFe(); FrameEntry *bestFe = NULL; uint32 ncopies = 0; for (uint32 i = 0; i < tracker.nentries; i++) { FrameEntry *fe = tracker[i]; if (fe >= tos) continue; if (fe->isCopy() && fe->copyOf() == original) { if (firstCopy == InvalidIndex) { firstCopy = i; bestFe = fe; } else if (fe < bestFe) { bestFe = fe; } ncopies++; } } if (!ncopies) { JS_ASSERT(firstCopy == InvalidIndex); JS_ASSERT(!bestFe); original->copied = false; return NULL; } JS_ASSERT(firstCopy != InvalidIndex); JS_ASSERT(bestFe); /* Mark all extra copies as copies of the new backing index. */ bestFe->setCopyOf(NULL); if (ncopies > 1) { bestFe->setCopied(); for (uint32 i = firstCopy; i < tracker.nentries; i++) { FrameEntry *other = tracker[i]; if (other >= tos || other == bestFe) continue; /* The original must be tracked before copies. */ JS_ASSERT(other != original); if (!other->isCopy() || other->copyOf() != original) continue; other->setCopyOf(bestFe); /* * This is safe even though we're mutating during iteration. There * are two cases. The first is that both indexes are <= i, and :. * will never be observed. The other case is we're placing the * other FE such that it will be observed later. Luckily, copyOf() * will return != original, so nothing will happen. */ if (other->trackerIndex() < bestFe->trackerIndex()) swapInTracker(bestFe, other); } } else { bestFe->setNotCopied(); } FrameEntry *fe = bestFe; /* * Switch the new backing store to the old backing store. During * this process we also necessarily make sure the copy can be * synced. */ if (!original->isTypeKnown()) { /* * If the copy is unsynced, and the original is in memory, * give the original a register. We do this below too; it's * okay if it's spilled. */ if (original->type.inMemory() && !fe->type.synced()) tempRegForType(original); fe->type.inherit(original->type); if (fe->type.inRegister()) moveOwnership(fe->type.reg(), fe); } else { JS_ASSERT(fe->isTypeKnown()); JS_ASSERT(fe->getKnownType() == original->getKnownType()); } if (original->data.inMemory() && !fe->data.synced()) tempRegForData(original); fe->data.inherit(original->data); if (fe->data.inRegister()) moveOwnership(fe->data.reg(), fe); return fe; }
void FrameState::sync(Assembler &masm, Uses uses) const { /* * Keep track of free registers using a bitmask. If we have to drop into * syncFancy(), then this mask will help avoid eviction. */ Registers avail(freeRegs); Registers temp(Registers::TempRegs); FrameEntry *tos = tosFe(); FrameEntry *bottom = tos - uses.nuses; if (inTryBlock) bottom = NULL; for (uint32 i = tracker.nentries - 1; i < tracker.nentries; i--) { FrameEntry *fe = tracker[i]; if (fe >= tos) continue; Address address = addressOf(fe); if (!fe->isCopy()) { /* Keep track of registers that can be clobbered. */ if (fe->data.inRegister()) avail.putReg(fe->data.reg()); if (fe->type.inRegister()) avail.putReg(fe->type.reg()); /* Sync. */ if (!fe->data.synced() && (fe->data.inRegister() || fe >= bottom)) { syncData(fe, address, masm); if (fe->isConstant()) continue; } if (!fe->type.synced() && (fe->type.inRegister() || fe >= bottom)) syncType(fe, addressOf(fe), masm); } else if (fe >= bottom) { FrameEntry *backing = fe->copyOf(); JS_ASSERT(backing != fe); JS_ASSERT(!backing->isConstant() && !fe->isConstant()); /* * If the copy is backed by something not in a register, fall back * to a slower sync algorithm. */ if ((!fe->type.synced() && !backing->type.inRegister()) || (!fe->data.synced() && !backing->data.inRegister())) { syncFancy(masm, avail, i, bottom); return; } if (!fe->type.synced()) { /* :TODO: we can do better, the type is learned for all copies. */ if (fe->isTypeKnown()) { //JS_ASSERT(fe->getTypeTag() == backing->getTypeTag()); masm.storeTypeTag(ImmType(fe->getKnownType()), address); } else { masm.storeTypeTag(backing->type.reg(), address); } } if (!fe->data.synced()) masm.storePayload(backing->data.reg(), address); } } }
void FrameState::allocForBinary(FrameEntry *lhs, FrameEntry *rhs, JSOp op, BinaryAlloc &alloc, bool needsResult) { FrameEntry *backingLeft = lhs; FrameEntry *backingRight = rhs; if (backingLeft->isCopy()) backingLeft = backingLeft->copyOf(); if (backingRight->isCopy()) backingRight = backingRight->copyOf(); /* * For each remat piece of both FEs, if a register is assigned, get it now * and pin it. This is safe - constants and known types will be avoided. */ if (AllocHelper(backingLeft->type, alloc.lhsType)) pinReg(alloc.lhsType.reg()); if (AllocHelper(backingLeft->data, alloc.lhsData)) pinReg(alloc.lhsData.reg()); if (AllocHelper(backingRight->type, alloc.rhsType)) pinReg(alloc.rhsType.reg()); if (AllocHelper(backingRight->data, alloc.rhsData)) pinReg(alloc.rhsData.reg()); /* For each type without a register, give it a register if needed. */ if (!alloc.lhsType.isSet() && backingLeft->type.inMemory()) { alloc.lhsType = tempRegForType(lhs); pinReg(alloc.lhsType.reg()); } if (!alloc.rhsType.isSet() && backingRight->type.inMemory()) { alloc.rhsType = tempRegForType(rhs); pinReg(alloc.rhsType.reg()); } bool commu; switch (op) { case JSOP_EQ: case JSOP_GT: case JSOP_GE: case JSOP_LT: case JSOP_LE: /* fall through */ case JSOP_ADD: case JSOP_MUL: case JSOP_SUB: commu = true; break; case JSOP_DIV: commu = false; break; default: JS_NOT_REACHED("unknown op"); return; } /* * Data is a little more complicated. If the op is MUL, not all CPUs * have multiplication on immediates, so a register is needed. Also, * if the op is not commutative, the LHS _must_ be in a register. */ JS_ASSERT_IF(lhs->isConstant(), !rhs->isConstant()); JS_ASSERT_IF(rhs->isConstant(), !lhs->isConstant()); if (!alloc.lhsData.isSet()) { if (backingLeft->data.inMemory()) { alloc.lhsData = tempRegForData(lhs); pinReg(alloc.lhsData.reg()); } else if (op == JSOP_MUL || !commu) { JS_ASSERT(lhs->isConstant()); alloc.lhsData = allocReg(); alloc.extraFree = alloc.lhsData; masm.move(Imm32(lhs->getValue().toInt32()), alloc.lhsData.reg()); } } if (!alloc.rhsData.isSet()) { if (backingRight->data.inMemory()) { alloc.rhsData = tempRegForData(rhs); pinReg(alloc.rhsData.reg()); } else if (op == JSOP_MUL) { JS_ASSERT(rhs->isConstant()); alloc.rhsData = allocReg(); alloc.extraFree = alloc.rhsData; masm.move(Imm32(rhs->getValue().toInt32()), alloc.rhsData.reg()); } } alloc.lhsNeedsRemat = false; alloc.rhsNeedsRemat = false; if (!needsResult) goto skip; /* * Now a result register is needed. It must contain a mutable copy of the * LHS. For commutative operations, we can opt to use the RHS instead. At * this point, if for some reason either must be in a register, that has * already been guaranteed at this point. */ if (!freeRegs.empty()) { /* Free reg - just grab it. */ alloc.result = allocReg(); if (!alloc.lhsData.isSet()) { JS_ASSERT(alloc.rhsData.isSet()); JS_ASSERT(commu); masm.move(alloc.rhsData.reg(), alloc.result); alloc.resultHasRhs = true; } else { masm.move(alloc.lhsData.reg(), alloc.result); alloc.resultHasRhs = false; } } else { /* * No free regs. Find a good candidate to re-use. Best candidates don't * require syncs on the inline path. */ bool leftInReg = backingLeft->data.inRegister(); bool rightInReg = backingRight->data.inRegister(); bool leftSynced = backingLeft->data.synced(); bool rightSynced = backingRight->data.synced(); if (!commu || (leftInReg && (leftSynced || (!rightInReg || !rightSynced)))) { JS_ASSERT(backingLeft->data.inRegister() || !commu); JS_ASSERT_IF(backingLeft->data.inRegister(), backingLeft->data.reg() == alloc.lhsData.reg()); if (backingLeft->data.inRegister()) { alloc.result = backingLeft->data.reg(); unpinReg(alloc.result); takeReg(alloc.result); alloc.lhsNeedsRemat = true; } else { /* For now, just spill... */ alloc.result = allocReg(); masm.move(alloc.lhsData.reg(), alloc.result); } alloc.resultHasRhs = false; } else { JS_ASSERT(commu); JS_ASSERT(!leftInReg || (rightInReg && rightSynced)); alloc.result = backingRight->data.reg(); unpinReg(alloc.result); takeReg(alloc.result); alloc.resultHasRhs = true; alloc.rhsNeedsRemat = true; } } skip: /* Unpin everything that was pinned. */ if (backingLeft->type.inRegister()) unpinReg(backingLeft->type.reg()); if (backingRight->type.inRegister()) unpinReg(backingRight->type.reg()); if (backingLeft->data.inRegister()) unpinReg(backingLeft->data.reg()); if (backingRight->data.inRegister()) unpinReg(backingRight->data.reg()); }