JSStack::JSStack(VM& vm, size_t capacity)
    : m_vm(vm)
    , m_end(0)
    , m_topCallFrame(vm.topCallFrame)
{
    ASSERT(capacity && isPageAligned(capacity));

    m_reservation = PageReservation::reserve(roundUpAllocationSize(capacity * sizeof(Register), commitSize), OSAllocator::JSVMStackPages);
    updateStackLimit(highAddress());
    m_commitEnd = highAddress();
    
    m_lastStackTop = getBaseOfStack();

    disableErrorStackReserve();

    m_topCallFrame = 0;
}
Exemple #2
0
bool JSStack::growSlowCase(Register* newEnd)
{
    if (newEnd <= m_commitEnd) {
        m_end = newEnd;
        return true;
    }

    long delta = roundUpAllocationSize(reinterpret_cast<char*>(newEnd) - reinterpret_cast<char*>(m_commitEnd), commitSize);
    if (reinterpret_cast<char*>(m_commitEnd) + delta > static_cast<char*>(m_reservation.base()) + m_reservation.size())
        return false;

    m_reservation.commit(m_commitEnd, delta);
    addToCommittedByteCount(delta);
    m_commitEnd = reinterpret_cast_ptr<Register*>(reinterpret_cast<char*>(m_commitEnd) + delta);
    m_end = newEnd;
    return true;
}
Exemple #3
0
void RegisterFile::releaseExcessCapacity()
{
#if OS(QNX)
    size_t sizeForGlobals = roundUpAllocationSize(m_maxGlobals * sizeof(Register), commitSize);
    Register *endOfGlobals = reinterpret_cast<Register*>(reinterpret_cast<char*>(m_buffer) + sizeForGlobals);
    size_t decommitSize = (m_max - endOfGlobals) * sizeof(Register);
    if (decommitSize > 0) {
        if (mmap(endOfGlobals, decommitSize, PROT_NONE, MAP_FIXED|MAP_LAZY|MAP_PRIVATE|MAP_ANON, -1, 0) == MAP_FAILED)
            fprintf(stderr, "Could not decommit register file memory: %d\n", errno);
    }
    m_commitEnd = endOfGlobals;

#elif HAVE(MMAP) && HAVE(MADV_FREE) && !HAVE(VIRTUALALLOC)
    while (madvise(m_start, (m_max - m_start) * sizeof(Register), MADV_FREE) == -1 && errno == EAGAIN) { }
#elif HAVE(VIRTUALALLOC)
    VirtualFree(m_start, (m_max - m_start) * sizeof(Register), MEM_DECOMMIT);
    m_commitEnd = m_start;
#endif
    m_maxUsed = m_start;
}
bool JSStack::growSlowCase(Register* newEnd)
{
    // If we have already committed enough memory to satisfy this request,
    // just update the end pointer and return.
    if (newEnd >= m_commitEnd) {
        updateStackLimit(newEnd);
        return true;
    }

    // Compute the chunk size of additional memory to commit, and see if we
    // have it is still within our budget. If not, we'll fail to grow and
    // return false.
    long delta = roundUpAllocationSize(reinterpret_cast<char*>(m_commitEnd) - reinterpret_cast<char*>(newEnd), commitSize);
    if (reinterpret_cast<char*>(m_commitEnd) - delta <= reinterpret_cast<char*>(m_useableEnd))
        return false;

    // Otherwise, the growth is still within our budget. Go ahead and commit
    // it and return true.
    m_reservation.commit(reinterpret_cast<char*>(m_commitEnd) - delta, delta);
    addToCommittedByteCount(delta);
    m_commitEnd = reinterpret_cast_ptr<Register*>(reinterpret_cast<char*>(m_commitEnd) - delta);
    updateStackLimit(newEnd);
    return true;
}