예제 #1
0
// allocate nbytes from the current slab, aligned to 16-bytes
void* MemoryManager::slabAlloc(size_t nbytes) {
  const size_t kAlignMask = 15;
  assert((nbytes & 7) == 0);
  char* ptr = (char*)(uintptr_t(m_front + kAlignMask) & ~kAlignMask);
  if (ptr + nbytes <= m_limit) {
    m_front = ptr + nbytes;
    return ptr;
  }
  return newSlab(nbytes);
}
예제 #2
0
/*
 * Allocate `bytes' from the current slab, aligned to kSmartSizeAlign.
 */
void* MemoryManager::slabAlloc(uint32_t bytes, unsigned index) {
  FTRACE(3, "slabAlloc({}, {})\n", bytes, index);
  size_t nbytes = debugAddExtra(smartSizeClass(bytes));

  assert(nbytes <= kSlabSize);
  assert((nbytes & kSmartSizeAlignMask) == 0);
  assert((uintptr_t(m_front) & kSmartSizeAlignMask) == 0);

  if (UNLIKELY(m_bypassSlabAlloc)) {
    // Stats correction; smartMallocSizeBig() pulls stats from jemalloc.
    m_stats.usage -= bytes;
    // smartMallocSizeBig already wraps its allocation in a debug header, but
    // the caller will try to do it again, so we need to adjust this pointer
    // before returning it.
    return ((char*)smartMallocSizeBig<false>(nbytes).ptr) - kDebugExtraSize;
  }

  void* ptr = m_front;
  {
    void* next = (void*)(uintptr_t(ptr) + nbytes);
    if (uintptr_t(next) <= uintptr_t(m_limit)) {
      m_front = next;
    } else {
      ptr = newSlab(nbytes);
    }
  }
  // Preallocate more of the same in order to amortize entry into this method.
  unsigned nPrealloc;
  if (nbytes * kSmartPreallocCountLimit <= kSmartPreallocBytesLimit) {
    nPrealloc = kSmartPreallocCountLimit;
  } else {
    nPrealloc = kSmartPreallocBytesLimit / nbytes;
  }
  {
    void* front = (void*)(uintptr_t(m_front) + nPrealloc*nbytes);
    if (uintptr_t(front) > uintptr_t(m_limit)) {
      nPrealloc = ((uintptr_t)m_limit - uintptr_t(m_front)) / nbytes;
      front = (void*)(uintptr_t(m_front) + nPrealloc*nbytes);
    }
    m_front = front;
  }
  for (void* p = (void*)(uintptr_t(m_front) - nbytes); p != ptr;
       p = (void*)(uintptr_t(p) - nbytes)) {
    auto usable = debugRemoveExtra(nbytes);
    auto ptr = debugPostAllocate(p, usable, usable);
    debugPreFree(ptr, usable, usable);
    m_freelists[index].push(ptr, usable);
  }
  return ptr;
}
예제 #3
0
/*
 * Allocate `bytes' from the current slab, aligned to kSmallSizeAlign.
 */
inline void* MemoryManager::slabAlloc(uint32_t bytes, unsigned index) {
  FTRACE(3, "slabAlloc({}, {}): m_front={}, m_limit={}\n", bytes, index,
            m_front, m_limit);
  uint32_t nbytes = smallIndex2Size(index);

  assert(bytes <= nbytes);
  assert(nbytes <= kSlabSize);
  assert((nbytes & kSmallSizeAlignMask) == 0);
  assert((uintptr_t(m_front) & kSmallSizeAlignMask) == 0);

  if (UNLIKELY(m_bypassSlabAlloc)) {
    // Stats correction; mallocBigSize() pulls stats from jemalloc.
    m_stats.usage -= bytes;
    return mallocBigSize<false>(nbytes).ptr;
  }

  void* ptr = m_front;
  {
    void* next = (void*)(uintptr_t(ptr) + nbytes);
    if (uintptr_t(next) <= uintptr_t(m_limit)) {
      m_front = next;
    } else {
      ptr = newSlab(nbytes);
    }
  }
  // Preallocate more of the same in order to amortize entry into this method.
  unsigned nPrealloc;
  if (nbytes * kSmallPreallocCountLimit <= kSmallPreallocBytesLimit) {
    nPrealloc = kSmallPreallocCountLimit;
  } else {
    nPrealloc = kSmallPreallocBytesLimit / nbytes;
  }
  {
    void* front = (void*)(uintptr_t(m_front) + nPrealloc*nbytes);
    if (uintptr_t(front) > uintptr_t(m_limit)) {
      nPrealloc = ((uintptr_t)m_limit - uintptr_t(m_front)) / nbytes;
      front = (void*)(uintptr_t(m_front) + nPrealloc*nbytes);
    }
    m_front = front;
  }
  for (void* p = (void*)(uintptr_t(m_front) - nbytes); p != ptr;
       p = (void*)(uintptr_t(p) - nbytes)) {
    m_freelists[index].push(p, nbytes);
  }
  FTRACE(4, "slabAlloc({}, {}) --> ptr={}, m_front={}, m_limit={}\n", bytes,
            index, ptr, m_front, m_limit);
  return ptr;
}
예제 #4
0
/*
 * Allocate `bytes' from the current slab, aligned to kSmartSizeAlign.
 */
void* MemoryManager::slabAlloc(uint32_t bytes, unsigned index) {
  size_t nbytes = debugAddExtra(smartSizeClass(bytes));

  assert(nbytes <= kSlabSize);
  assert((nbytes & kSmartSizeAlignMask) == 0);
  assert((uintptr_t(m_front) & kSmartSizeAlignMask) == 0);

  if (UNLIKELY(m_profctx.flag)) {
    // Stats correction; smartMallocSizeBig() pulls stats from jemalloc.
    m_stats.usage -= bytes;
    return smartMallocSizeBig<false>(nbytes).first;
  }

  void* ptr = m_front;
  {
    void* next = (void*)(uintptr_t(ptr) + nbytes);
    if (uintptr_t(next) <= uintptr_t(m_limit)) {
      m_front = next;
    } else {
      ptr = newSlab(nbytes);
    }
  }

  // Preallocate more of the same in order to amortize entry into this method.
  unsigned nPrealloc;
  if (nbytes * kSmartPreallocCountLimit <= kSmartPreallocBytesLimit) {
    nPrealloc = kSmartPreallocCountLimit;
  } else {
    nPrealloc = kSmartPreallocBytesLimit / nbytes;
  }
  {
    void* front = (void*)(uintptr_t(m_front) + nPrealloc*nbytes);
    if (uintptr_t(front) > uintptr_t(m_limit)) {
      nPrealloc = ((uintptr_t)m_limit - uintptr_t(m_front)) / nbytes;
      front = (void*)(uintptr_t(m_front) + nPrealloc*nbytes);
    }
    m_front = front;
  }
  for (void* p = (void*)(uintptr_t(m_front) - nbytes); p != ptr;
       p = (void*)(uintptr_t(p) - nbytes)) {
    m_freelists[index].push(
        debugPreFree(debugPostAllocate(p, debugRemoveExtra(nbytes),
                                       debugRemoveExtra(nbytes)),
                     debugRemoveExtra(nbytes), debugRemoveExtra(nbytes)));
  }
  return ptr;
}
예제 #5
0
/*
 * Allocate `bytes' from the current slab, aligned to kSmallSizeAlign.
 */
inline void* MemoryManager::slabAlloc(uint32_t bytes, unsigned index) {
  FTRACE(3, "slabAlloc({}, {}): m_front={}, m_limit={}\n", bytes, index,
            m_front, m_limit);
  uint32_t nbytes = smallIndex2Size(index);

  assert(bytes <= nbytes);
  assert(nbytes <= kSlabSize);
  assert((nbytes & kSmallSizeAlignMask) == 0);
  assert((uintptr_t(m_front) & kSmallSizeAlignMask) == 0);

  if (UNLIKELY(m_bypassSlabAlloc)) {
    // Stats correction; mallocBigSize() pulls stats from jemalloc.
    m_stats.usage -= bytes;
    return mallocBigSize<false>(nbytes).ptr;
  }

  void* ptr = m_front;
  {
    void* next = (void*)(uintptr_t(ptr) + nbytes);
    if (uintptr_t(next) <= uintptr_t(m_limit)) {
      m_front = next;
    } else {
      ptr = newSlab(nbytes);
    }
  }
  // Preallocate more of the same in order to amortize entry into this method.
  unsigned nSplit = kNContigTab[index] - 1;
  uintptr_t avail = uintptr_t(m_limit) - uintptr_t(m_front);
  if (UNLIKELY(nSplit * nbytes > avail)) {
    nSplit = avail / nbytes; // Expensive division.
  }
  if (nSplit > 0) {
    void* tail = m_front;
    uint32_t tailBytes = nSplit * nbytes;
    m_front = (void*)(uintptr_t(m_front) + tailBytes);
    splitTail(tail, tailBytes, nSplit, nbytes, index);
  }
  FTRACE(4, "slabAlloc({}, {}) --> ptr={}, m_front={}, m_limit={}\n", bytes,
            index, ptr, m_front, m_limit);
  return ptr;
}
예제 #6
0
// allocate nbytes from the current slab, aligned to kSmartSizeAlign
void* MemoryManager::slabAlloc(size_t nbytes, unsigned index) {
  assert(nbytes <= kSlabSize);
  assert((nbytes & kSmartSizeAlignMask) == 0);
  assert((uintptr_t(m_front) & kSmartSizeAlignMask) == 0);
  void* ptr = m_front;
  {
    void* next = (void*)(uintptr_t(ptr) + nbytes);
    if (uintptr_t(next) <= uintptr_t(m_limit)) {
      m_front = next;
    } else {
      ptr = newSlab(nbytes);
    }
  }

  // Preallocate more of the same in order to amortize entry into this method.
  unsigned nPrealloc;
  if (nbytes * kSmartPreallocCountLimit <= kSmartPreallocBytesLimit) {
    nPrealloc = kSmartPreallocCountLimit;
  } else {
    nPrealloc = kSmartPreallocBytesLimit / nbytes;
  }
  {
    void* front = (void*)(uintptr_t(m_front) + nPrealloc*nbytes);
    if (uintptr_t(front) > uintptr_t(m_limit)) {
      nPrealloc = ((uintptr_t)m_limit - uintptr_t(m_front)) / nbytes;
      front = (void*)(uintptr_t(m_front) + nPrealloc*nbytes);
    }
    m_front = front;
  }
  for (void* p = (void*)(uintptr_t(m_front) - nbytes); p != ptr;
       p = (void*)(uintptr_t(p) - nbytes)) {
    m_freelists[index].push(
        debugPreFree(debugPostAllocate(p, debugRemoveExtra(nbytes),
                                       debugRemoveExtra(nbytes)),
                     debugRemoveExtra(nbytes), debugRemoveExtra(nbytes)));
  }
  return ptr;
}