void FrameState::pushCopyOf(uint32 index) { FrameEntry *backing = entryFor(index); FrameEntry *fe = rawPush(); fe->resetUnsynced(); if (backing->isConstant()) { fe->setConstant(Jsvalify(backing->getValue())); } else { if (backing->isTypeKnown()) fe->setType(backing->getKnownType()); else fe->type.invalidate(); fe->isNumber = backing->isNumber; fe->data.invalidate(); if (backing->isCopy()) { backing = backing->copyOf(); fe->setCopyOf(backing); } else { fe->setCopyOf(backing); backing->setCopied(); } /* Maintain tracker ordering guarantees for copies. */ JS_ASSERT(backing->isCopied()); if (fe->trackerIndex() < backing->trackerIndex()) swapInTracker(fe, backing); } }
JSC::MacroAssembler::RegisterID ImmutableSync::allocReg() { if (!avail.empty()) return avail.takeAnyReg(); uint32 lastResort = FrameState::InvalidIndex; uint32 evictFromFrame = FrameState::InvalidIndex; /* Find something to evict. */ for (uint32 i = 0; i < JSC::MacroAssembler::TotalRegisters; i++) { RegisterID reg = RegisterID(i); if (!(Registers::maskReg(reg) & Registers::AvailRegs)) continue; lastResort = 0; if (!regs[i]) { /* If the frame does not own this register, take it! */ FrameEntry *fe = frame.regstate[i].usedBy(); if (!fe) return reg; evictFromFrame = i; /* * If not copied, we can sync and not have to load again later. * That's about as good as it gets, so just break out now. */ if (!fe->isCopied()) break; } } if (evictFromFrame != FrameState::InvalidIndex) { FrameEntry *fe = frame.regstate[evictFromFrame].usedBy(); SyncEntry &e = entryFor(fe); if (frame.regstate[evictFromFrame].type() == RematInfo::TYPE) { JS_ASSERT(!e.typeClobbered); e.typeClobbered = true; } else { JS_ASSERT(!e.dataClobbered); e.dataClobbered = true; } return RegisterID(evictFromFrame); } JS_ASSERT(lastResort != FrameState::InvalidIndex); JS_ASSERT(regs[lastResort]); SyncEntry *e = regs[lastResort]; RegisterID reg = RegisterID(lastResort); if (e->hasDataReg && e->dataReg == reg) { e->hasDataReg = false; } else if (e->hasTypeReg && e->typeReg == reg) { e->hasTypeReg = false; } else { JS_NOT_REACHED("no way"); } return reg; }
void FrameState::storeLocal(uint32 n, bool popGuaranteed, bool typeChange) { FrameEntry *localFe = getLocal(n); bool cacheable = !eval && !escaping[n]; if (!popGuaranteed && !cacheable) { JS_ASSERT_IF(base[localIndex(n)] && (!eval || n < script->nfixed), entries[localIndex(n)].type.inMemory() && entries[localIndex(n)].data.inMemory()); Address local(JSFrameReg, sizeof(JSStackFrame) + n * sizeof(Value)); storeTo(peek(-1), local, false); forgetAllRegs(getLocal(n)); localFe->resetSynced(); return; } bool wasSynced = localFe->type.synced(); /* Detect something like (x = x) which is a no-op. */ FrameEntry *top = peek(-1); if (top->isCopy() && top->copyOf() == localFe) { JS_ASSERT(localFe->isCopied()); return; } /* Completely invalidate the local variable. */ if (localFe->isCopied()) { uncopy(localFe); if (!localFe->isCopied()) forgetAllRegs(localFe); } else { forgetAllRegs(localFe); } localFe->resetUnsynced(); /* Constants are easy to propagate. */ if (top->isConstant()) { localFe->setCopyOf(NULL); localFe->setNotCopied(); localFe->setConstant(Jsvalify(top->getValue())); return; } /* * When dealing with copies, there are two important invariants: * * 1) The backing store precedes all copies in the tracker. * 2) The backing store of a local is never a stack slot, UNLESS the local * variable itself is a stack slot (blocks) that precedes the stack * slot. * * If the top is a copy, and the second condition holds true, the local * can be rewritten as a copy of the original backing slot. If the first * condition does not hold, force it to hold by swapping in-place. */ FrameEntry *backing = top; if (top->isCopy()) { backing = top->copyOf(); JS_ASSERT(backing->trackerIndex() < top->trackerIndex()); uint32 backingIndex = indexOfFe(backing); uint32 tol = uint32(spBase - base); if (backingIndex < tol || backingIndex < localIndex(n)) { /* local.idx < backing.idx means local cannot be a copy yet */ if (localFe->trackerIndex() < backing->trackerIndex()) swapInTracker(backing, localFe); localFe->setNotCopied(); localFe->setCopyOf(backing); if (backing->isTypeKnown()) localFe->setType(backing->getKnownType()); else localFe->type.invalidate(); localFe->data.invalidate(); localFe->isNumber = backing->isNumber; return; } /* * If control flow lands here, then there was a bytecode sequence like * * ENTERBLOCK 2 * GETLOCAL 1 * SETLOCAL 0 * * The problem is slot N can't be backed by M if M could be popped * before N. We want a guarantee that when we pop M, even if it was * copied, it has no outstanding copies. * * Because of |let| expressions, it's kind of hard to really know * whether a region on the stack will be popped all at once. Bleh! * * This should be rare except in browser code (and maybe even then), * but even so there's a quick workaround. We take all copies of the * backing fe, and redirect them to be copies of the destination. */ FrameEntry *tos = tosFe(); for (uint32 i = backing->trackerIndex() + 1; i < tracker.nentries; i++) { FrameEntry *fe = tracker[i]; if (fe >= tos) continue; if (fe->isCopy() && fe->copyOf() == backing) fe->setCopyOf(localFe); } } backing->setNotCopied(); /* * This is valid from the top->isCopy() path because we're guaranteed a * consistent ordering - all copies of |backing| are tracked after * |backing|. Transitively, only one swap is needed. */ if (backing->trackerIndex() < localFe->trackerIndex()) swapInTracker(backing, localFe); /* * Move the backing store down - we spill registers here, but we could be * smarter and re-use the type reg. */ RegisterID reg = tempRegForData(backing); localFe->data.setRegister(reg); moveOwnership(reg, localFe); if (typeChange) { if (backing->isTypeKnown()) { localFe->setType(backing->getKnownType()); } else { RegisterID reg = tempRegForType(backing); localFe->type.setRegister(reg); moveOwnership(reg, localFe); } } else { if (!wasSynced) masm.storeTypeTag(ImmType(backing->getKnownType()), addressOf(localFe)); localFe->type.setMemory(); } if (!backing->isTypeKnown()) backing->type.invalidate(); backing->data.invalidate(); backing->setCopyOf(localFe); backing->isNumber = localFe->isNumber; localFe->setCopied(); if (!cacheable) { /* TODO: x64 optimization */ if (!localFe->type.synced()) syncType(localFe, addressOf(localFe), masm); if (!localFe->data.synced()) syncData(localFe, addressOf(localFe), masm); forgetAllRegs(localFe); localFe->type.setMemory(); localFe->data.setMemory(); } JS_ASSERT(top->copyOf() == localFe); }