void VM::updateFTLLargestStackSize(size_t stackSize) { if (stackSize > m_largestFTLStackSize) { m_largestFTLStackSize = stackSize; updateStackLimit(); } }
size_t VM::updateReservedZoneSize(size_t reservedZoneSize) { size_t oldReservedZoneSize = m_reservedZoneSize; m_reservedZoneSize = reservedZoneSize; updateStackLimit(); return oldReservedZoneSize; }
bool JSStack::growSlowCase(Register* newEnd) { // If we have already committed enough memory to satisfy this request, // just update the end pointer and return. if (newEnd >= m_commitEnd) { updateStackLimit(newEnd); return true; } // Compute the chunk size of additional memory to commit, and see if we // have it is still within our budget. If not, we'll fail to grow and // return false. long delta = roundUpAllocationSize(reinterpret_cast<char*>(m_commitEnd) - reinterpret_cast<char*>(newEnd), commitSize); if (reinterpret_cast<char*>(m_commitEnd) - delta <= reinterpret_cast<char*>(m_useableEnd)) return false; // Otherwise, the growth is still within our budget. Go ahead and commit // it and return true. m_reservation.commit(reinterpret_cast<char*>(m_commitEnd) - delta, delta); addToCommittedByteCount(delta); m_commitEnd = reinterpret_cast_ptr<Register*>(reinterpret_cast<char*>(m_commitEnd) - delta); updateStackLimit(newEnd); return true; }
JSStack::JSStack(VM& vm, size_t capacity) : m_vm(vm) , m_end(0) , m_topCallFrame(vm.topCallFrame) { ASSERT(capacity && isPageAligned(capacity)); m_reservation = PageReservation::reserve(roundUpAllocationSize(capacity * sizeof(Register), commitSize), OSAllocator::JSVMStackPages); updateStackLimit(highAddress()); m_commitEnd = highAddress(); m_lastStackTop = getBaseOfStack(); disableErrorStackReserve(); m_topCallFrame = 0; }
void VM::setStackPointerAtVMEntry(void* sp) { m_stackPointerAtVMEntry = sp; updateStackLimit(); }