Esempio n. 1
0
/*
 * Get a new slab, then allocate nbytes from it and install it in our
 * slab list.  Return the newly allocated nbytes-sized block.
 */
NEVER_INLINE void* MemoryManager::newSlab(size_t nbytes) {
  if (UNLIKELY(m_stats.usage > m_stats.maxBytes)) {
    refreshStatsHelper();
  }
  void* slab = safe_malloc(kSlabSize);
  assert((uintptr_t(slab) & kSmartSizeAlignMask) == 0);
  JEMALLOC_STATS_ADJUST(&m_stats, kSlabSize);
  m_stats.alloc += kSlabSize;
  if (m_stats.alloc > m_stats.peakAlloc) {
    m_stats.peakAlloc = m_stats.alloc;
  }
  m_slabs.push_back(slab);
  m_front = (void*)(uintptr_t(slab) + nbytes);
  m_limit = (void*)(uintptr_t(slab) + kSlabSize);
  FTRACE(3, "newSlab: adding slab at {} to limit {}\n", slab, m_limit);
  return slab;
}
Esempio n. 2
0
/*
 * Get a new slab, then allocate nbytes from it and install it in our
 * slab list.  Return the newly allocated nbytes-sized block.
 */
NEVER_INLINE void* MemoryManager::newSlab(size_t nbytes) {
  if (UNLIKELY(m_stats.usage > m_stats.maxBytes)) {
    refreshStats();
  }
  if (debug) checkHeap();
  void* slab = safe_malloc(kSlabSize);
  assert((uintptr_t(slab) & kSmartSizeAlignMask) == 0);
  JEMALLOC_STATS_ADJUST(&m_stats, kSlabSize);
  m_stats.alloc += kSlabSize;
  if (m_stats.alloc > m_stats.peakAlloc) {
    m_stats.peakAlloc = m_stats.alloc;
  }
  initHole(); // enable parsing the leftover space in the old slab
  m_slabs.push_back(slab);
  m_front = (void*)(uintptr_t(slab) + nbytes);
  m_limit = (void*)(uintptr_t(slab) + kSlabSize);
  FTRACE(3, "newSlab: adding slab at {} to limit {}\n", slab, m_limit);
  return slab;
}
Esempio n. 3
0
/*
 * Get a new slab, then allocate nbytes from it and install it in our
 * slab list.  Return the newly allocated nbytes-sized block.
 */
NEVER_INLINE char* MemoryManager::newSlab(size_t nbytes) {
  if (UNLIKELY(m_stats.usage > m_stats.maxBytes)) {
    refreshStatsHelper();
  }
  char* slab = (char*) Util::safe_malloc(SLAB_SIZE);
  assert(uintptr_t(slab) % 16 == 0);
  JEMALLOC_STATS_ADJUST(&m_stats, SLAB_SIZE);
  m_stats.alloc += SLAB_SIZE;
  if (m_stats.alloc > m_stats.peakAlloc) {
    m_stats.peakAlloc = m_stats.alloc;
  }
  m_slabs.push_back(slab);
  m_front = slab + nbytes;
  m_limit = slab + SLAB_SIZE;
  FTRACE(1, "newSlab: adding slab at {} to limit {}\n",
         static_cast<void*>(slab),
         static_cast<void*>(m_limit));
  return slab;
}
Esempio n. 4
0
NEVER_INLINE
void* MemoryManager::smartMallocSizeBigHelper(void*& ptr,
                                              size_t& szOut,
                                              size_t bytes) {
  ptr = mallocx(debugAddExtra(bytes + sizeof(BigNode)), 0);
  szOut = debugRemoveExtra(sallocx(ptr, 0) - sizeof(BigNode));

  // NB: We don't report the SweepNode size in the stats.
  auto const delta = callerSavesActualSize ? szOut : bytes;
  m_stats.usage += int64_t(delta);
  // Adjust jemalloc otherwise we'll double count the direct allocation.
  JEMALLOC_STATS_ADJUST(&m_stats, delta);

  return debugPostAllocate(
    smartEnlist(static_cast<BigNode*>(ptr)),
    bytes,
    szOut
  );
}
Esempio n. 5
0
template<bool callerSavesActualSize> NEVER_INLINE
MemBlock MemoryManager::smartMallocSizeBig(size_t bytes) {
#ifdef USE_JEMALLOC
  auto const n = static_cast<BigNode*>(
    mallocx(debugAddExtra(bytes + sizeof(BigNode)), 0)
  );
  auto szOut = debugRemoveExtra(sallocx(n, 0) - sizeof(BigNode));
  // NB: We don't report the SweepNode size in the stats.
  auto const delta = callerSavesActualSize ? szOut : bytes;
  m_stats.usage += int64_t(delta);
  // Adjust jemalloc otherwise we'll double count the direct allocation.
  JEMALLOC_STATS_ADJUST(&m_stats, delta);
#else
  m_stats.usage += bytes;
  auto const n = static_cast<BigNode*>(
    safe_malloc(debugAddExtra(bytes + sizeof(BigNode)))
  );
  auto szOut = bytes;
#endif
  auto ptrOut = debugPostAllocate(smartEnlist(n), bytes, szOut);
  FTRACE(3, "smartMallocSizeBig: {} ({} requested, {} usable)\n",
         ptrOut, bytes, szOut);
  return {ptrOut, szOut};
}