void MemoryManager::sweep() { assert(!sweeping()); if (debug) checkHeap(); if (debug) traceHeap(); m_sweeping = true; SCOPE_EXIT { m_sweeping = false; }; DEBUG_ONLY size_t num_sweepables = 0, num_natives = 0; // iterate until both sweep lists are empty. Entries can be added or // removed from either list during sweeping. do { while (!m_sweepables.empty()) { num_sweepables++; auto obj = m_sweepables.next(); obj->unregister(); obj->sweep(); } while (!m_natives.empty()) { num_natives++; assert(m_natives.back()->sweep_index == m_natives.size() - 1); auto node = m_natives.back(); m_natives.pop_back(); auto obj = Native::obj(node); auto ndi = obj->getVMClass()->getNativeDataInfo(); ndi->sweep(obj); // trash the native data but leave the header and object parsable assert(memset(node+1, kSmartFreeFill, node->obj_offset - sizeof(*node))); } } while (!m_sweepables.empty()); TRACE(1, "sweep: sweepable %lu native %lu\n", num_sweepables, num_natives); if (debug) checkHeap(); }
void MemoryManager::sweep() { assert(!sweeping()); if (debug) checkHeap(); collect("MM::sweep"); m_sweeping = true; SCOPE_EXIT { m_sweeping = false; }; DEBUG_ONLY size_t num_sweepables = 0, num_natives = 0; // iterate until both sweep lists are empty. Entries can be added or // removed from either list during sweeping. do { while (!m_sweepables.empty()) { num_sweepables++; auto obj = m_sweepables.next(); obj->unregister(); obj->sweep(); } while (!m_natives.empty()) { num_natives++; assert(m_natives.back()->sweep_index == m_natives.size() - 1); auto node = m_natives.back(); m_natives.pop_back(); auto obj = Native::obj(node); auto ndi = obj->getVMClass()->getNativeDataInfo(); ndi->sweep(obj); // trash the native data but leave the header and object parsable assert(memset(node+1, kSmallFreeFill, node->obj_offset - sizeof(*node))); } } while (!m_sweepables.empty()); DEBUG_ONLY auto napcs = m_apc_arrays.size(); FTRACE(1, "sweep: sweepable {} native {} apc array {}\n", num_sweepables, num_natives, napcs); if (debug) checkHeap(); // decref apc arrays referenced by this request. This must happen here // (instead of in resetAllocator), because the sweep routine may use // g_context. while (!m_apc_arrays.empty()) { auto a = m_apc_arrays.back(); m_apc_arrays.pop_back(); a->sweep(); } }
void MemoryManager::sweep() { assert(!sweeping()); if (debug) checkHeap(); m_sweeping = true; SCOPE_EXIT { m_sweeping = false; }; UNUSED auto sweepable = Sweepable::SweepAll(); UNUSED auto native = m_natives.size(); Native::sweepNativeData(m_natives); TRACE(1, "sweep: sweepable %u native %lu\n", sweepable, native); }
void MemoryManager::resetRuntimeOptions() { if (debug) { deleteRootMaps(); checkHeap(); // check that every allocation in heap has been freed before reset iterate([&](Header* h) { assert(h->kind() == HeaderKind::Free); }); } MemoryManager::TlsWrapper::destroy(); // ~MemoryManager() MemoryManager::TlsWrapper::getCheck(); // new MemeoryManager() }
/* * Get a new slab, then allocate nbytes from it and install it in our * slab list. Return the newly allocated nbytes-sized block. */ NEVER_INLINE void* MemoryManager::newSlab(uint32_t nbytes) { if (UNLIKELY(m_stats.usage > m_stats.maxBytes)) { refreshStats(); } storeTail(m_front, (char*)m_limit - (char*)m_front); if (debug && RuntimeOption::EvalCheckHeapOnAlloc) checkHeap(); auto slab = m_heap.allocSlab(kSlabSize); assert((uintptr_t(slab.ptr) & kSmallSizeAlignMask) == 0); m_stats.borrow(slab.size); m_stats.alloc += slab.size; if (m_stats.alloc > m_stats.peakAlloc) { m_stats.peakAlloc = m_stats.alloc; } m_front = (void*)(uintptr_t(slab.ptr) + nbytes); m_limit = (void*)(uintptr_t(slab.ptr) + slab.size); FTRACE(3, "newSlab: adding slab at {} to limit {}\n", slab.ptr, m_limit); return slab.ptr; }
/* * Get a new slab, then allocate nbytes from it and install it in our * slab list. Return the newly allocated nbytes-sized block. */ NEVER_INLINE void* MemoryManager::newSlab(size_t nbytes) { if (UNLIKELY(m_stats.usage > m_stats.maxBytes)) { refreshStats(); } initHole(); // enable parsing the leftover space in the old slab if (debug) checkHeap(); auto slab = m_heap.allocSlab(kSlabSize); assert((uintptr_t(slab.ptr) & kSmartSizeAlignMask) == 0); m_stats.borrow(slab.size); m_stats.alloc += slab.size; if (m_stats.alloc > m_stats.peakAlloc) { m_stats.peakAlloc = m_stats.alloc; } m_front = (void*)(uintptr_t(slab.ptr) + nbytes); m_limit = (void*)(uintptr_t(slab.ptr) + slab.size); FTRACE(3, "newSlab: adding slab at {} to limit {}\n", slab.ptr, m_limit); return slab.ptr; }
/* * Get a new slab, then allocate nbytes from it and install it in our * slab list. Return the newly allocated nbytes-sized block. */ NEVER_INLINE void* MemoryManager::newSlab(size_t nbytes) { if (UNLIKELY(m_stats.usage > m_stats.maxBytes)) { refreshStats(); } if (debug) checkHeap(); void* slab = safe_malloc(kSlabSize); assert((uintptr_t(slab) & kSmartSizeAlignMask) == 0); JEMALLOC_STATS_ADJUST(&m_stats, kSlabSize); m_stats.alloc += kSlabSize; if (m_stats.alloc > m_stats.peakAlloc) { m_stats.peakAlloc = m_stats.alloc; } initHole(); // enable parsing the leftover space in the old slab m_slabs.push_back(slab); m_front = (void*)(uintptr_t(slab) + nbytes); m_limit = (void*)(uintptr_t(slab) + kSlabSize); FTRACE(3, "newSlab: adding slab at {} to limit {}\n", slab, m_limit); return slab; }
void MemoryManager::resetRuntimeOptions() { if (debug) { checkHeap(); // check that every allocation in heap has been freed before reset for (auto h = begin(), lim = end(); h != lim; ++h) { if (h->kind_ == HeaderKind::Debug) { auto h2 = h; ++h2; if (h2 != lim) { assert(h2->kind_ == HeaderKind::Free); } } else { assert(h->kind_ == HeaderKind::Free || h->kind_ == HeaderKind::Hole); } } } MemoryManager::TlsWrapper::destroy(); MemoryManager::TlsWrapper::getCheck(); }
bool Heap::checkHeap() { bool chk=checkHeap(root); return chk; }