inline void* MemoryManager::smartRealloc(void* ptr, size_t nbytes) { FTRACE(3, "smartRealloc: {} to {}\n", ptr, nbytes); assert(nbytes > 0); auto const n = static_cast<MallocNode*>(ptr) - 1; if (LIKELY(n->small.padbytes <= kMaxSmartSize)) { void* newmem = smart_malloc(nbytes); auto const copySize = std::min( n->small.padbytes - sizeof(SmallNode), nbytes ); newmem = memcpy(newmem, ptr, copySize); smart_free(ptr); return newmem; } // Ok, it's a big allocation. Since we don't know how big it is // (i.e. how much data we should memcpy), we have no choice but to // ask malloc to realloc for us. auto const oldNext = n->big.next; auto const oldPrev = n->big.prev; auto const newNode = static_cast<BigNode*>( safe_realloc(n, nbytes + sizeof(BigNode)) ); refreshStats(); if (newNode != &n->big) { oldNext->prev = oldPrev->next = newNode; } return newNode + 1; }
void startRequest() { if (UNLIKELY(s_thisThreadIdx == -1)) { s_thisThreadIdx = s_nextThreadIdx.fetch_add(1); } auto const threadIdx = s_thisThreadIdx; GenCount startTime = getTime(); { GenCountGuard g; refreshStats(); checkOldest(); if (threadIdx >= s_inflightRequests.size()) { s_inflightRequests.resize(threadIdx + 1, {kIdleGenCount, 0}); } else { assert(s_inflightRequests[threadIdx].startTime == kIdleGenCount); } s_inflightRequests[threadIdx].startTime = correctTime(startTime); s_inflightRequests[threadIdx].pthreadId = Process::GetThreadId(); FTRACE(1, "threadIdx {} pthreadId {} start @gen {}\n", threadIdx, s_inflightRequests[threadIdx].pthreadId, s_inflightRequests[threadIdx].startTime); if (s_oldestRequestInFlight.load(std::memory_order_relaxed) == 0) { s_oldestRequestInFlight = s_inflightRequests[threadIdx].startTime; } } }
inline void MemoryManager::updateBigStats() { // If we are using jemalloc, it is keeping track of allocations outside of // the slabs and the usage so we should force this after an allocation that // was too large for one of the existing slabs. When we're not using jemalloc // this check won't do anything so avoid the extra overhead. if (use_jemalloc || UNLIKELY(m_stats.usage > m_stats.maxBytes)) { refreshStats(); } }
void MenuCharacter::logic() { if (!visible) return; if (closeButton->checkClick()) { visible = false; } // TODO: this doesn't need to be done every frame. Only call this when something has updated refreshStats(); }
void MemoryManager::checkMemory() { printf("----- MemoryManager for Thread %ld -----\n", (long)pthread_self()); refreshStats(); printf("Current Usage: %" PRId64 " bytes\t", m_stats.usage); printf("Current Alloc: %" PRId64 " bytes\n", m_stats.alloc); printf("Peak Usage: %" PRId64 " bytes\t", m_stats.peakUsage); printf("Peak Alloc: %" PRId64 " bytes\n", m_stats.peakAlloc); printf("Slabs: %lu KiB\n", m_slabs.size() * SLAB_SIZE / 1024); }
void MemoryManager::checkMemory(bool detailed) { printf("----- MemoryManager for Thread %ld -----\n", (long)pthread_self()); refreshStats(); printf("Current Usage: %lld bytes\t", m_stats.usage); printf("Current Alloc: %lld bytes\n", m_stats.alloc); printf("Peak Usage: %lld bytes\t", m_stats.peakUsage); printf("Peak Alloc: %lld bytes\n", m_stats.peakAlloc); for (unsigned int i = 0; i < m_smartAllocators.size(); i++) { m_smartAllocators[i]->checkMemory(detailed); } }
void tmux_changed(GFileMonitor *monitor, GFile *file, GFile *other_file, GFileMonitorEvent event_type, gpointer user_data) { HostNode *n = user_data; g_assert(n); GList *nl = g_list_append(NULL, n); refreshStats(nl); nl = g_list_remove(nl, n); }
inline void* MemoryManager::smartEnlist(BigNode* n) { // If we are using jemalloc, it is keeping track of allocations outside of // the slabs and the usage so we should force this after an allocation that // was too large for one of the existing slabs. When we're not using jemalloc // this check won't do anything so avoid the extra overhead. if (use_jemalloc || UNLIKELY(m_stats.usage > m_stats.maxBytes)) { refreshStats(); } // link after m_bigs auto next = m_bigs.next; n->next = next; n->prev = &m_bigs; next->prev = m_bigs.next = n; assert(((MallocNode*)n)->small.padbytes > kMaxSmartSize); return n + 1; }
/* * Get a new slab, then allocate nbytes from it and install it in our * slab list. Return the newly allocated nbytes-sized block. */ NEVER_INLINE void* MemoryManager::newSlab(uint32_t nbytes) { if (UNLIKELY(m_stats.usage > m_stats.maxBytes)) { refreshStats(); } storeTail(m_front, (char*)m_limit - (char*)m_front); auto slab = m_heap.allocSlab(kSlabSize); assert((uintptr_t(slab.ptr) & kSmallSizeAlignMask) == 0); m_stats.borrow(slab.size); m_stats.alloc += slab.size; if (m_stats.alloc > m_stats.peakAlloc) { m_stats.peakAlloc = m_stats.alloc; } m_front = (void*)(uintptr_t(slab.ptr) + nbytes); m_limit = (void*)(uintptr_t(slab.ptr) + slab.size); FTRACE(3, "newSlab: adding slab at {} to limit {}\n", slab.ptr, m_limit); return slab.ptr; }
/* * Get a new slab, then allocate nbytes from it and install it in our * slab list. Return the newly allocated nbytes-sized block. */ NEVER_INLINE void* MemoryManager::newSlab(size_t nbytes) { if (UNLIKELY(m_stats.usage > m_stats.maxBytes)) { refreshStats(); } void* slab = safe_malloc(kSlabSize); assert((uintptr_t(slab) & kSmartSizeAlignMask) == 0); JEMALLOC_STATS_ADJUST(&m_stats, kSlabSize); m_stats.alloc += kSlabSize; if (m_stats.alloc > m_stats.peakAlloc) { m_stats.peakAlloc = m_stats.alloc; } m_slabs.push_back(slab); m_front = (void*)(uintptr_t(slab) + nbytes); m_limit = (void*)(uintptr_t(slab) + kSlabSize); FTRACE(3, "newSlab: adding slab at {} to limit {}\n", slab, m_limit); return slab; }
/* * Get a new slab, then allocate nbytes from it and install it in our * slab list. Return the newly allocated nbytes-sized block. */ NEVER_INLINE void* MemoryManager::newSlab(size_t nbytes) { if (UNLIKELY(m_stats.usage > m_stats.maxBytes)) { refreshStats(); } initHole(); // enable parsing the leftover space in the old slab if (debug) checkHeap(); auto slab = m_heap.allocSlab(kSlabSize); assert((uintptr_t(slab.ptr) & kSmartSizeAlignMask) == 0); m_stats.borrow(slab.size); m_stats.alloc += slab.size; if (m_stats.alloc > m_stats.peakAlloc) { m_stats.peakAlloc = m_stats.alloc; } m_front = (void*)(uintptr_t(slab.ptr) + nbytes); m_limit = (void*)(uintptr_t(slab.ptr) + slab.size); FTRACE(3, "newSlab: adding slab at {} to limit {}\n", slab.ptr, m_limit); return slab.ptr; }
inline void* MemoryManager::smartRealloc(void* ptr, size_t nbytes) { FTRACE(3, "smartRealloc: {} to {}\n", ptr, nbytes); assert(nbytes > 0); auto const n = static_cast<MallocNode*>(ptr) - 1; if (LIKELY(n->small.padbytes <= kMaxSmartSize)) { void* newmem = smart_malloc(nbytes); auto const copySize = std::min( n->small.padbytes - sizeof(SmallNode), nbytes ); newmem = memcpy(newmem, ptr, copySize); smart_free(ptr); return newmem; } // Ok, it's a big allocation. auto block = m_heap.resizeBig(ptr, nbytes); refreshStats(); return block.ptr; }
/* * Get a new slab, then allocate nbytes from it and install it in our * slab list. Return the newly allocated nbytes-sized block. */ NEVER_INLINE void* MemoryManager::newSlab(uint32_t nbytes) { if (UNLIKELY(m_stats.usage > m_stats.maxBytes)) { refreshStats(); } storeTail(m_front, (char*)m_limit - (char*)m_front); if (debug && RuntimeOption::EvalCheckHeapOnAlloc && !g_context.isNull()) { setSurpriseFlag(PendingGCFlag); // defer heap check until safepoint } auto slab = m_heap.allocSlab(kSlabSize); assert((uintptr_t(slab.ptr) & kSmallSizeAlignMask) == 0); m_stats.borrow(slab.size); m_stats.alloc += slab.size; if (m_stats.alloc > m_stats.peakAlloc) { m_stats.peakAlloc = m_stats.alloc; } m_front = (void*)(uintptr_t(slab.ptr) + nbytes); m_limit = (void*)(uintptr_t(slab.ptr) + slab.size); FTRACE(3, "newSlab: adding slab at {} to limit {}\n", slab.ptr, m_limit); return slab.ptr; }
NEVER_INLINE void MemoryManager::refreshStatsHelper() { refreshStats(); }