void *malloc(int size) { t_header *tmp; int total_size; if (size <= 0) return (NULL); lock_thread(); if ((size % sizeof(int)) != 0) size += (sizeof(int) - (size % sizeof(int))); if ((tmp = (t_header *)findFreeBlock(size)) == NULL) tmp = (t_header *)getMoreMem(size + sizeof(*tmp)); else { if (tmp->size - size >= THRESHOLD) { total_size = tmp->size + sizeof(*tmp); tmp->size = size; split_mid(tmp, total_size); } deleteFromFreeList(tmp); } unlock_thread(); if (tmp == NULL) return (NULL); return ((void *)((int)tmp + sizeof(*tmp))); }
/// /// Given a size, traverse the arena from the start while looking for /// a free chunk of memory that can satisfy the allocation /// request. The arena free list is updated as the traversal is /// performed. /// void *PArena::allocFromUpdatedFreeList( size_t sz, bool does_need_cache_line_alignment, bool does_need_logging) { #ifdef _FORCE_FAIL fail_program(); #endif // TODO: add support for cache line alignment if (does_need_cache_line_alignment) return nullptr; size_t actual_sz = PMallocUtil::get_actual_alloc_size(sz); char *mem = static_cast<char*>(StartAddr_); while (mem < (char*)CurrAllocAddr_) { size_t mem_sz = PMallocUtil::get_requested_alloc_size_from_mem(mem); size_t actual_mem_sz = PMallocUtil::get_actual_alloc_size(mem_sz); if (!PMallocUtil::is_mem_allocated(mem)) { if (actual_sz > actual_mem_sz) { // This address may be in the free list already. But the // implementation ensures no duplicates are added. insertToFreeList( PMallocUtil::get_bin_number( *(reinterpret_cast<size_t*>(mem))), mem); mem += actual_mem_sz; continue; } void *carved_mem = nullptr; if (actual_sz + PMallocUtil::get_smallest_actual_alloc_size() <= actual_mem_sz) carved_mem = carveExtraMem(mem, actual_sz, actual_mem_sz); else assert(actual_sz == actual_mem_sz); // If we fail here, the above carving does not take effect *(reinterpret_cast<size_t*>(mem)) = sz; // If we fail here or anywhere above, no memory is leaked #ifndef _DISABLE_ALLOC_LOGGING if (does_need_logging) nvm_log_alloc(mem + sizeof(size_t)); #endif *(reinterpret_cast<size_t*>(mem + sizeof(size_t))) = true; // The above metadata updates are to the same cache line NVM_FLUSH(mem); // If we fail here, the above allocated memory may be leaked deleteFromFreeList(PMallocUtil::get_bin_number(mem_sz), mem); if (carved_mem) insertToFreeList( PMallocUtil::get_bin_number( *(reinterpret_cast<size_t*>(carved_mem))), carved_mem); incrementActualAllocedStats(actual_sz); return static_cast<void*>(mem + PMallocUtil::get_metadata_size()); } mem += actual_mem_sz; } return nullptr; }