Exemplo n.º 1
0
inline void* MemoryManager::smartRealloc(void* inputPtr, size_t nbytes) {
  FTRACE(1, "smartRealloc: {} to {}\n", inputPtr, nbytes);
  assert(nbytes > 0);

  void* ptr = debug ? static_cast<DebugHeader*>(inputPtr) - 1 : inputPtr;

  auto const n = static_cast<SweepNode*>(ptr) - 1;
  if (LIKELY(n->padbytes <= kMaxSmartSize)) {
    void* newmem = smart_malloc(nbytes);
    auto const copySize = std::min(
      n->padbytes - sizeof(SmallNode) - (debug ? sizeof(DebugHeader) : 0),
      nbytes
    );
    newmem = memcpy(newmem, inputPtr, copySize);
    smart_free(inputPtr);
    return newmem;
  }

  // Ok, it's a big allocation.  Since we don't know how big it is
  // (i.e. how much data we should memcpy), we have no choice but to
  // ask malloc to realloc for us.
  auto const oldNext = n->next;
  auto const oldPrev = n->prev;

  auto const newNode = static_cast<SweepNode*>(
    realloc(n, debugAddExtra(nbytes + sizeof(SweepNode)))
  );

  refreshStatsHelper();
  if (newNode != n) {
    oldNext->prev = oldPrev->next = newNode;
  }
  return debugPostAllocate(newNode + 1, 0, 0);
}
Exemplo n.º 2
0
NEVER_INLINE
void* MemoryManager::smartMallocSizeBigHelper(void*& ptr,
                                              size_t& szOut,
                                              size_t bytes) {
  m_stats.usage += bytes;
  allocm(&ptr, &szOut, debugAddExtra(bytes + sizeof(SweepNode)), 0);
  szOut = debugRemoveExtra(szOut - sizeof(SweepNode));
  return debugPostAllocate(
    smartEnlist(static_cast<SweepNode*>(ptr)),
    bytes,
    szOut
  );
}
Exemplo n.º 3
0
/*
 * Allocate `bytes' from the current slab, aligned to kSmartSizeAlign.
 */
void* MemoryManager::slabAlloc(uint32_t bytes, unsigned index) {
  FTRACE(3, "slabAlloc({}, {})\n", bytes, index);
  size_t nbytes = debugAddExtra(smartSizeClass(bytes));

  assert(nbytes <= kSlabSize);
  assert((nbytes & kSmartSizeAlignMask) == 0);
  assert((uintptr_t(m_front) & kSmartSizeAlignMask) == 0);

  if (UNLIKELY(m_bypassSlabAlloc)) {
    // Stats correction; smartMallocSizeBig() pulls stats from jemalloc.
    m_stats.usage -= bytes;
    // smartMallocSizeBig already wraps its allocation in a debug header, but
    // the caller will try to do it again, so we need to adjust this pointer
    // before returning it.
    return ((char*)smartMallocSizeBig<false>(nbytes).ptr) - kDebugExtraSize;
  }

  void* ptr = m_front;
  {
    void* next = (void*)(uintptr_t(ptr) + nbytes);
    if (uintptr_t(next) <= uintptr_t(m_limit)) {
      m_front = next;
    } else {
      ptr = newSlab(nbytes);
    }
  }
  // Preallocate more of the same in order to amortize entry into this method.
  unsigned nPrealloc;
  if (nbytes * kSmartPreallocCountLimit <= kSmartPreallocBytesLimit) {
    nPrealloc = kSmartPreallocCountLimit;
  } else {
    nPrealloc = kSmartPreallocBytesLimit / nbytes;
  }
  {
    void* front = (void*)(uintptr_t(m_front) + nPrealloc*nbytes);
    if (uintptr_t(front) > uintptr_t(m_limit)) {
      nPrealloc = ((uintptr_t)m_limit - uintptr_t(m_front)) / nbytes;
      front = (void*)(uintptr_t(m_front) + nPrealloc*nbytes);
    }
    m_front = front;
  }
  for (void* p = (void*)(uintptr_t(m_front) - nbytes); p != ptr;
       p = (void*)(uintptr_t(p) - nbytes)) {
    auto usable = debugRemoveExtra(nbytes);
    auto ptr = debugPostAllocate(p, usable, usable);
    debugPreFree(ptr, usable, usable);
    m_freelists[index].push(ptr, usable);
  }
  return ptr;
}
Exemplo n.º 4
0
template<bool callerSavesActualSize> NEVER_INLINE
MemBlock MemoryManager::smartMallocSizeBig(size_t bytes) {
#ifdef USE_JEMALLOC
  auto const n = static_cast<BigNode*>(
    mallocx(debugAddExtra(bytes + sizeof(BigNode)), 0)
  );
  auto szOut = debugRemoveExtra(sallocx(n, 0) - sizeof(BigNode));
  // NB: We don't report the SweepNode size in the stats.
  auto const delta = callerSavesActualSize ? szOut : bytes;
  m_stats.usage += int64_t(delta);
  // Adjust jemalloc otherwise we'll double count the direct allocation.
  JEMALLOC_STATS_ADJUST(&m_stats, delta);
#else
  m_stats.usage += bytes;
  auto const n = static_cast<BigNode*>(
    safe_malloc(debugAddExtra(bytes + sizeof(BigNode)))
  );
  auto szOut = bytes;
#endif
  auto ptrOut = debugPostAllocate(smartEnlist(n), bytes, szOut);
  FTRACE(3, "smartMallocSizeBig: {} ({} requested, {} usable)\n",
         ptrOut, bytes, szOut);
  return {ptrOut, szOut};
}
Exemplo n.º 5
0
NEVER_INLINE
void* MemoryManager::smartMallocSizeBigHelper(void*& ptr,
                                              size_t& szOut,
                                              size_t bytes) {
#ifdef USE_JEMALLOC_MALLOCX
  ptr = mallocx(debugAddExtra(bytes + sizeof(SweepNode)), 0);
  szOut = debugRemoveExtra(sallocx(ptr, 0) - sizeof(SweepNode));
#else
  allocm(&ptr, &szOut, debugAddExtra(bytes + sizeof(SweepNode)), 0);
  szOut = debugRemoveExtra(szOut - sizeof(SweepNode));
#endif

  // NB: We don't report the SweepNode size in the stats.
  auto const delta = callerSavesActualSize ? szOut : bytes;
  m_stats.usage += int64_t(delta);
  // Adjust jemalloc otherwise we'll double count the direct allocation.
  JEMALLOC_STATS_ADJUST(&m_stats, delta);

  return debugPostAllocate(
    smartEnlist(static_cast<SweepNode*>(ptr)),
    bytes,
    szOut
  );
}
Exemplo n.º 6
0
/*
 * Allocate `bytes' from the current slab, aligned to kSmartSizeAlign.
 */
void* MemoryManager::slabAlloc(uint32_t bytes, unsigned index) {
  size_t nbytes = debugAddExtra(smartSizeClass(bytes));

  assert(nbytes <= kSlabSize);
  assert((nbytes & kSmartSizeAlignMask) == 0);
  assert((uintptr_t(m_front) & kSmartSizeAlignMask) == 0);

  if (UNLIKELY(m_profctx.flag)) {
    // Stats correction; smartMallocSizeBig() pulls stats from jemalloc.
    m_stats.usage -= bytes;
    return smartMallocSizeBig<false>(nbytes).first;
  }

  void* ptr = m_front;
  {
    void* next = (void*)(uintptr_t(ptr) + nbytes);
    if (uintptr_t(next) <= uintptr_t(m_limit)) {
      m_front = next;
    } else {
      ptr = newSlab(nbytes);
    }
  }

  // Preallocate more of the same in order to amortize entry into this method.
  unsigned nPrealloc;
  if (nbytes * kSmartPreallocCountLimit <= kSmartPreallocBytesLimit) {
    nPrealloc = kSmartPreallocCountLimit;
  } else {
    nPrealloc = kSmartPreallocBytesLimit / nbytes;
  }
  {
    void* front = (void*)(uintptr_t(m_front) + nPrealloc*nbytes);
    if (uintptr_t(front) > uintptr_t(m_limit)) {
      nPrealloc = ((uintptr_t)m_limit - uintptr_t(m_front)) / nbytes;
      front = (void*)(uintptr_t(m_front) + nPrealloc*nbytes);
    }
    m_front = front;
  }
  for (void* p = (void*)(uintptr_t(m_front) - nbytes); p != ptr;
       p = (void*)(uintptr_t(p) - nbytes)) {
    m_freelists[index].push(
        debugPreFree(debugPostAllocate(p, debugRemoveExtra(nbytes),
                                       debugRemoveExtra(nbytes)),
                     debugRemoveExtra(nbytes), debugRemoveExtra(nbytes)));
  }
  return ptr;
}
Exemplo n.º 7
0
template<bool callerSavesActualSize> NEVER_INLINE
MemBlock MemoryManager::smartMallocSizeBig(size_t bytes) {
  auto block = m_heap.allocBig(debugAddExtra(bytes), HeaderKind::BigObj);
  auto szOut = debugRemoveExtra(block.size);
#ifdef USE_JEMALLOC
  // NB: We don't report the SweepNode size in the stats.
  auto const delta = callerSavesActualSize ? szOut : bytes;
  m_stats.usage += int64_t(delta);
  // Adjust jemalloc otherwise we'll double count the direct allocation.
  m_stats.borrow(delta);
#else
  m_stats.usage += bytes;
#endif
  updateBigStats();
  auto ptrOut = debugPostAllocate(block.ptr, bytes, szOut);
  FTRACE(3, "smartMallocSizeBig: {} ({} requested, {} usable)\n",
         ptrOut, bytes, szOut);
  return {ptrOut, szOut};
}