//this function takes 2 pointers as arguments //it will merge them into one and add the merged to the freelist //NOTE: the order matters. left pointer has to be pointer of header of left mem block ObjectHeader * mergeMems(ObjectHeader * leftHeader, ObjectFooter * rightFooter) { size_t mergedSize = leftHeader->_objectSize + rightFooter->_objectSize; //determine which side is the free one if (leftHeader->_allocated == 0) { //remove the curren free block from list removeFromFreeList(leftHeader); } if (rightFooter->_allocated == 0) { //remove the curren free block from list ObjectHeader * rightHeader = (ObjectHeader *)( (char *)rightFooter + sizeof(ObjectFooter) - rightFooter->_objectSize); removeFromFreeList(rightHeader); } //then change the size. //IMPORTANT: if change the size first, more will be freed then what's supposed to be leftHeader->_objectSize = mergedSize; rightFooter->_objectSize = mergedSize; return leftHeader; }
/********************************************************** * coalesce (Immediate Coalescing) * Covers the 4 cases discussed in the text: * - both neighbours are allocated * - the next block is available for coalescing * - the previous block is available for coalescing * - both neighbours are available for coalescing * based on these cases, we removed the coalesced blocks and * re-insert into the free lists **********************************************************/ void *coalesce(void *bp) { size_t prev_alloc = GET_ALLOC(FTRP(PREV_BLKP(bp))); size_t next_alloc = GET_ALLOC(HDRP(NEXT_BLKP(bp))); size_t size = GET_SIZE(HDRP(bp)); if (prev_alloc && next_alloc) { /* Case 1 */ //add to free list insertToFreeList(bp); return bp; } else if (prev_alloc && !next_alloc) { /* Case 2 */ //remove that one from free list removeFromFreeList(NEXT_BLKP(bp)); size += GET_SIZE(HDRP(NEXT_BLKP(bp))); PUT(HDRP(bp), PACK(size, 0)); PUT(FTRP(bp), PACK(size, 0)); //add the new block to free list insertToFreeList(bp); return (bp); } else if (!prev_alloc && next_alloc) { /* Case 3 */ //remove that one from free list removeFromFreeList(PREV_BLKP(bp)); size += GET_SIZE(HDRP(PREV_BLKP(bp))); PUT(FTRP(bp), PACK(size, 0)); PUT(HDRP(PREV_BLKP(bp)), PACK(size, 0)); //add the new block to free list insertToFreeList(PREV_BLKP(bp)); return (PREV_BLKP(bp)); } else { /* Case 4 */ //remove that one from free list removeFromFreeList(NEXT_BLKP(bp)); removeFromFreeList(PREV_BLKP(bp)); size += GET_SIZE(HDRP(PREV_BLKP(bp))) + GET_SIZE(FTRP(NEXT_BLKP(bp))); PUT(HDRP(PREV_BLKP(bp)), PACK(size, 0)); PUT(FTRP(NEXT_BLKP(bp)), PACK(size, 0)); //add the new block to free list insertToFreeList(PREV_BLKP(bp)); return (PREV_BLKP(bp)); } }
void* splitUntil(void* freeBuffer, int bufferSize, int desiredBufferSize) { int newBufferSize = bufferSize/2; // split and get two pointers void* smallerBufferOne = freeBuffer; void* smallerBufferTwo = (void*)((BYTE*) freeBuffer + newBufferSize); // remove from free list removeFromFreeList(freeBuffer, bufferSize); // add later pointer to smaller free list insertIntoFreeList(smallerBufferTwo, newBufferSize); // either return or recursively call function if (newBufferSize == desiredBufferSize) { setBitmap(smallerBufferOne, desiredBufferSize); bufferData_t* bufferData = (bufferData_t*)smallerBufferOne; bufferData->nextFreeBuffer = NULL; bufferData->bufferSize = desiredBufferSize; return smallerBufferOne; } else { return splitUntil(smallerBufferOne, newBufferSize, desiredBufferSize); } }
// call after unset bitmap for this buffer void coalesceFreeMemory(void** pointer, int* bufferSize) { // calculate buddy location void* startOfPage = BASEADDR(*pointer); // check if buddy is free void* buddyPtr = getBuddyPointer(startOfPage, *pointer, *bufferSize); if (!checkIfBitmapSet(buddyPtr,*bufferSize)) { // can coalesce! // need to remove it from freeList // handle inserting back in in kma_free removeFromFreeList(buddyPtr,*bufferSize); *bufferSize = *bufferSize*2; if (buddyPtr < *pointer) { *pointer = buddyPtr; } if (*bufferSize != PAGESIZE) { coalesceFreeMemory(pointer, bufferSize); } } // base case is either buddy isn't free of full page free }
// Merge two continous blocks, *first* and *second* // Both *first* and *second* must be valid // Afterwards, only *first* will remain valid // but will have size equal to both plus sizeof( block ) void blockMerge( heapAllocator* heap, block* first, block* second ) { //printf( "Allocator: Merging Blocks 0x" xPTRf " and 0x " xPTRf "\n", (uintptr_t)first, (uintptr_t)second ); vAssert( first && second ); vAssert( first->free && second->free ); // Both must be empty vAssert( ((char*)second - ((char*)first->data + first->size)) < kMaxAlignmentSpace ); // Contiguous vAssert( first->next == second && second->prev == first ); vAssert( !first->next || first->next == (void*)((uint8_t*)first->data + first->size )); vAssert( !second->next || second->next == (void*)((uint8_t*)second->data + second->size )); vAssert( second > first ); vAssert( second->next > second || second->next == NULL ); heap->total_free += sizeof( block ); heap->total_allocated -= sizeof( block ); removeFromFreeList( heap, second ); // We can't just add sizes, as there may be alignment padding. size_t true_size = second->size + ( (size_t)second->data - (size_t)second ); first->size += true_size; first->next = second->next; if ( second->next ) second->next->prev = first; memset( second, 0xED, sizeof( block )); vAssert( !first->next || first->next == (void*)((uint8_t*)first->data + first->size )); vAssert( !first->next || first->next->prev == first ); vAssert( !first->prev || first->prev->next == first ); }
void MemMan::freeNow(MemHandle *bsMem) { if (bsMem->cond != MEM_FREED) { _alloced -= bsMem->size; removeFromFreeList(bsMem); free(bsMem->data); bsMem->cond = MEM_FREED; } }
void MemMan::checkMemoryUsage(void) { while ((_alloced > MAX_ALLOC) && _memListFree) { free(_memListFreeEnd->data); _memListFreeEnd->data = NULL; _memListFreeEnd->cond = MEM_FREED; _alloced -= _memListFreeEnd->size; removeFromFreeList(_memListFreeEnd); } }
void MemMan::flush(void) { while (_memListFree) { free(_memListFreeEnd->data); _memListFreeEnd->data = NULL; _memListFreeEnd->cond = MEM_FREED; _alloced -= _memListFreeEnd->size; removeFromFreeList(_memListFreeEnd); } if (_alloced) warning("MemMan::flush: Something's wrong: still %d bytes alloced", _alloced); }
void MemMan::setCondition(MemHandle *bsMem, uint16 pCond) { if ((pCond == MEM_FREED) || (pCond > MEM_DONT_FREE)) error("MemMan::setCondition: program tried to set illegal memory condition"); if (bsMem->cond != pCond) { bsMem->cond = pCond; if (pCond == MEM_DONT_FREE) removeFromFreeList(bsMem); else if (pCond == MEM_CAN_FREE) addToFreeList(bsMem); } }
void MemMan::alloc(MemHandle *bsMem, uint32 pSize, uint16 pCond) { _alloced += pSize; bsMem->data = (void*)malloc(pSize); if (!bsMem->data) error("MemMan::alloc(): Can't alloc %d bytes of memory.", pSize); bsMem->cond = pCond; bsMem->size = pSize; if (pCond == MEM_CAN_FREE) { warning("%d Bytes alloced as FREEABLE.", pSize); // why should one want to alloc mem if it can be freed? addToFreeList(bsMem); } else if (bsMem->next || bsMem->prev) // it's in our _freeAble list, remove it from there removeFromFreeList(bsMem); checkMemoryUsage(); }
/********************************************************** * mm_malloc * Allocate a block of size bytes. * The type of search is determined by find_fit * The decision of splitting is determined by spiltBlock * If no block satisfies the request, the heap is extended **********************************************************/ void *mm_malloc(size_t size) { size_t asize; /* adjusted block size */ size_t extendsize; /* amount to extend heap if no fit */ char * bp; /* Ignore spurious requests */ if (size == 0) return NULL; /* Adjust block size to include overhead and alignment reqs. */ if (size <= DSIZE) asize = 2 * DSIZE; else asize = DSIZE * ((size + (DSIZE) + (DSIZE - 1)) / DSIZE); /* Search the free list for a fit */ if ((bp = find_fit(asize)) != NULL) { //remove from the free list removeFromFreeList(bp); //break the block into smaller one if possible bp = splitBlock(bp, asize); size_t bsize = GET_SIZE(HDRP(bp)); //place the block setBlockHeaderFooter(bp, bsize, 1); return bp; } /* No fit found. Get more memory and place the block */ //Increasing chunksize by 16B gives huge improvment in binary test case. //we incease 16 for the reason that it matches size of header and footer extendsize = MAX(asize, CHUNKSIZE + 16); if ((bp = extend_heap(extendsize / WSIZE)) == NULL) return NULL; splitBlock(bp, asize); place(bp, asize); return bp; }
/********************************************************** * mm_realloc * Deals with a few cases: * 1. if the realloc size is smaller than current size * we split the current block and then put the extra part * to free list * 2. if the next block of the current block is freed, we check if * merge these two blocks can lead to a fit block for realloc * 3. if the current block is at the end of heap, * we just increase the heap by the required amount and then merge * that amount into that block * 4. if the new size is same as old size we do nothing * 5. if the new size is 0, same as free * 6. else we malloc a new block and then copy the data from old block *********************************************************/ void *mm_realloc(void *ptr, size_t size) { /* If size == 0 then this is just free, and we return NULL. */ //case 5 if (size == 0) { mm_free(ptr); return NULL; } /* If oldptr is NULL, then this is just malloc. */ if (ptr == NULL) return (mm_malloc(size)); void *oldptr = ptr; void *newptr; size_t copySize; size_t oldSize = GET_SIZE(HDRP(oldptr)); size_t asize; /* Adjust block size to include overhead and alignment reqs. */ if (size <= DSIZE) asize = 2 * DSIZE; else asize = DSIZE * ((size + (DSIZE) + (DSIZE - 1)) / DSIZE); //case 4 (see above) if (oldSize == asize) { return ptr; } //case 1 else if (oldSize > asize) { void* newptr = splitBlock(ptr, asize); place(newptr, asize); return newptr; } //case 2 else if (GET_SIZE(HDRP(NEXT_BLKP(ptr))) != 0) { if (GET_ALLOC(HDRP(NEXT_BLKP(ptr))) == 0) { //get the merge size after merge with next block size_t msize = oldSize + GET_SIZE(HDRP(NEXT_BLKP(ptr))); if (msize >= asize) { //coalesce next block with current block removeFromFreeList(NEXT_BLKP(ptr)); PUT(HDRP(ptr), PACK(msize, 0)); PUT(FTRP(ptr), PACK(msize, 0)); //split block if there is extra space void* newptr = splitBlock(ptr, asize); place(newptr, asize); return newptr; } } } //case 3 else if (GET_SIZE(HDRP(NEXT_BLKP(ptr))) == 0) { //new size larger than old size and next block is epilogue //we can extend the heap and then coalesce size_t esize = asize - oldSize; //calculate sufficient space to extend //extend heap by the sufficient amount void* ebp = extend_heap(esize / WSIZE); if (ebp != NULL) { //coalesce the extend space into current block PUT(HDRP(ptr), PACK(asize, 1)); PUT(FTRP(ptr), PACK(asize, 1)); return ptr; } } //case 6 newptr = mm_malloc(size); if (newptr == NULL) return NULL; /* Copy the old data. */ copySize = GET_SIZE(HDRP(oldptr)); memcpy(newptr, oldptr, copySize); mm_free(oldptr); return newptr; }
//when find a chunk of memory that's big enough //to tell if needed to split and make the remainder a node in freelist //the remainder has to be >= sizeof(header + footer) + 16 = 64 bytes void * allocateObject( size_t size ) { //step1: check if mem is initialized if ( !_initialized ) { _initialized = 1; initialize(); } // step 2: get the actual size needed // Add the ObjectHeader/Footer to the size and round the total size up to a multiple of // 8 bytes for alignment. size_t roundedSize = (size + sizeof(struct ObjectHeader) + sizeof(struct ObjectFooter) + 7) & ~7; // step3: traverse the freelist to find the first node that's large engough ObjectHeader * needle = _freeList->_next;//needle points to header of the first node in the list int minSize = sizeof(ObjectHeader) + sizeof(ObjectFooter) + 8; void * ptr_2b_returned = NULL; //all nodes in list are free, don't need to check while( needle->_allocated != 2) {//when it's 2, means reach the sentinel node size_t size_dif = (needle->_objectSize) - roundedSize; //step4: if the first fit found. if ( size_dif >= 0 ) { //step5: determine if the remainder is big enough //if so, split; if not use entire chunk as 1 node if (size_dif <= minSize) //one node { //the node is for use, thus has to return mem ptr ptr_2b_returned = (void *)createFormattedMemChunk((char *)needle, needle->_objectSize, 1); //take out "Header of this node" out of the freelist removeFromFreeList(needle); } else //split, one for use, which taken out from list; one to be added into list { char * ptr_remainder = (char *)needle + roundedSize;//gives the starting point of remainder node //createFormattedMemChunk(char * ptr, size_t size) -> for node that'll be used ptr_2b_returned = createFormattedMemChunk((char *)needle, roundedSize, 1); //createFormattedMemChunk(char * ptr, size_t size) -> for remainder node createFormattedMemChunk(ptr_remainder, size_dif, 0); //remove used and add remainder to the freelist //removeFromFreeList_And_addRemainderToFreeList((ObjectHeader *)needle, (ObjectHeader *)ptr_remainder); removeFromFreeList((ObjectHeader *)needle); addToFreeList((ObjectHeader *)ptr_remainder); } } needle = needle->_next; } if (ptr_2b_returned == NULL) //meaning no available mem for the request { //get another 2MB from OS and format it //needle points to the header(real one not fencepost) of the new chunk ObjectHeader * firstHeaderInNewChunk = getNewMemChunkFromOS_and_Initialize(); char * ptr_remainder = (char *)firstHeaderInNewChunk + roundedSize;//header of the free chunk //split it and add the remainder to freeelist. //size needed is roundedSize ptr_2b_returned = createFormattedMemChunk((char *)firstHeaderInNewChunk, roundedSize, 1);//for use size_t size_dif = firstHeaderInNewChunk->_objectSize - roundedSize; ptr_remainder = createFormattedMemChunk(ptr_remainder, size_dif, 0);//remainder to be added to list //freelist handling //removeFromFreeList_And_addRemainderToFreeList((ObjectHeader *)needle, (ObjectHeader *)ptr_remainder); removeFromFreeList(firstHeaderInNewChunk); addToFreeList((ObjectHeader *)ptr_remainder); } //unlock pthread_mutex_unlock(&mutex); // Return a pointer to usable memory return ptr_2b_returned; }
// Allocates *size* bytes from the given heapAllocator *heap* // Will crash if out of memory // NEEDS TO BE THREADSAFE void* heap_allocate_aligned( heapAllocator* heap, size_t toAllocate, size_t alignment, const char* source ) { (void)source; vmutex_lock( &allocator_mutex ); #ifdef MEM_DEBUG_VERBOSE printf( "HeapAllocator request for " dPTRf " bytes, " dPTRf " byte aligned.\n", toAllocate, alignment ); #endif bitpool* bit_pool = heap_findBitpool( heap, toAllocate ); if ( bit_pool ) { void* data = bitpool_allocate( bit_pool, toAllocate ); if ( data ) { vmutex_unlock( &allocator_mutex ); return data; } } size_t size_original = toAllocate; toAllocate += alignment; // Make sure we have enough space to align block* b = heap_findEmptyBlock( heap, toAllocate ); if ( !b ) { heap_dumpBlocks( heap ); printError( "HeapAllocator out of memory on request for " dPTRf " bytes. Total size: " dPTRf " bytes, Used size: " dPTRf " bytes\n", toAllocate, heap->total_size, heap->total_allocated ); vAssert( 0 ); } vAssert( b->free ); assertBlockInvariants( b ); removeFromFreeList( heap, b ); b->free = false; assertBlockInvariants( b ); if ( b->size > ( toAllocate + sizeof( block ) + sizeof( block* ) * 2) ) { void* new_ptr = ((uint8_t*)b->data) + toAllocate; block* remaining = block_create( heap, new_ptr, b->size - toAllocate ); block_insertAfter( b, remaining ); b->size = toAllocate; heap->total_allocated += sizeof( block ); heap->total_free -= sizeof( block ); validateBlockNext(b); validateBlockNext(remaining); } assertBlockInvariants( b ); // Move the data pointer on enough to force alignment uintptr_t offset = alignment - (((uintptr_t)b->data - 1) % alignment + 1); b->data = ((uint8_t*)b->data) + offset; b->size -= offset; // Now move the block on and copy the header, so that it's contiguous with the block block* new_block_position = (block*)(((uint8_t*)b) + offset); // Force alignment for this so we can memcpy (on Android, even memcpy requires alignments when dealing with structs) block block_temp; memcpy( &block_temp, b, sizeof( block )); ////////////////////////////////////////////////////// vAssert( b->prevFree == NULL ); vAssert( b->nextFree == NULL ); b = new_block_position; memcpy( b, &block_temp, sizeof( block )); // Fix up pointers to this block, for the new location if ( b->prev ) { b->prev->next = b; b->prev->size += offset; // Increment previous block size by what we've moved the block } else heap->first = b; if ( b->next ) b->next->prev = b; assertBlockInvariants( b ); ////////////////////////////////////////////////////// validateBlockNext(b); heap->total_allocated += toAllocate; heap->total_free -= toAllocate; ++heap->allocations; // Ensure we have met our requirements uintptr_t align_offset = ((uintptr_t)b->data) % alignment; vAssert( align_offset == 0 ); // Correctly Aligned vAssert( b->size >= size_original ); // Large enough #ifdef MEM_DEBUG_VERBOSE printf("Allocator returned address: " xPTRf ".\n", (uintptr_t)b->data ); #endif #ifdef MEM_STACK_TRACE block_recordAlloc( b, mem_stack_string ); #endif // MEM_STACK_TRACE assertBlockInvariants( b ); #ifdef TRACK_ALLOCATIONS if (source) strncpy( b->source, source, MemSourceLength); else b->source[0] = '\0'; #endif //validateFreeList( heap ); vmutex_unlock( &allocator_mutex ); return b->data; }