static void * alloc_do_malloc (struct Alloc *alloc, uint32_t size) { if (size < (alloc->default_mmap_size - chunk_overhead ())) { uint8_t bucket = size_to_bucket (size); if (alloc->buckets[bucket] != 0) { // fast path. struct AllocAvailable *avail = alloc->buckets[bucket]; MARK_DEFINED (avail, sizeof (void *)); struct AllocAvailable *next = avail->next; MARK_UNDEFINED (avail, sizeof (void *)); alloc->buckets[bucket] = next; REPORT_MALLOC (avail, size); return (uint8_t *) avail; } // slow path struct AllocAvailable *avail = (struct AllocAvailable *) alloc_brk (alloc, bucket_to_size (bucket)); REPORT_MALLOC (avail, size); avail->next = 0; return (uint8_t *) avail; } else { alloc_chunk (alloc, size + chunk_overhead ()); uint8_t *buffer = alloc_brk (alloc, size); REPORT_MALLOC (buffer, size); return buffer; } }
Chunk::Chunk(HeapImpl* heap, int bucket) : node_(this), heap_(heap), bucket_(bucket), allocation_size_( bucket_to_size(bucket)), max_allocations_( kUsableChunkSize / allocation_size_), first_free_bitmap_(0), free_count_( max_allocations_), frees_since_purge_(0) { memset(dirty_pages_, 0, sizeof(dirty_pages_)); memset(free_bitmap_, 0xff, sizeof(free_bitmap_)); }