void FreeList::getFirstNChunksFromList(size_t n, FreeList* fl) { assert_proper_lock_protection(); assert(fl->count() == 0, "Precondition"); if (count() > 0) { int k = 1; fl->set_head(head()); n--; FreeChunk* tl = head(); while (tl->next() != NULL && n > 0) { tl = tl->next(); n--; k++; } assert(tl != NULL, "Loop Inv."); // First, fix up the list we took from. FreeChunk* new_head = tl->next(); set_head(new_head); set_count(count() - k); if (new_head == NULL) { set_tail(NULL); } else { new_head->linkPrev(NULL); } // Now we can fix up the tail. tl->linkNext(NULL); // And return the result. fl->set_tail(tl); fl->set_count(k); } }
void FreeChunk::verifyList() const { FreeChunk* nextFC = next(); if (nextFC != NULL) { assert(this == nextFC->prev(), "broken chain"); assert(size() == nextFC->size(), "wrong size"); nextFC->verifyList(); } }
status_t rtm_realloc(void** _buffer, size_t newSize) { if (_buffer == NULL) return B_BAD_VALUE; TRACE("rtm_realloc(%p, %lu)\n", *_buffer, newSize); void* oldBuffer = *_buffer; // find pool rtm_pool* pool = pool_for(oldBuffer); if (pool == NULL) { void* buffer = realloc(oldBuffer, newSize); if (buffer != NULL) { *_buffer = buffer; return B_OK; } return B_NO_MEMORY; } MutexLocker _(&pool->lock); if (newSize == 0) { TRACE("realloc(%p, %lu) -> NULL\n", oldBuffer, newSize); pool->Free(oldBuffer); *_buffer = NULL; return B_OK; } size_t copySize = newSize; if (oldBuffer != NULL) { FreeChunk* oldChunk = FreeChunk::SetToAllocated(oldBuffer); // Check if the old buffer still fits, and if it makes sense to keep it if (oldChunk->Size() >= newSize && newSize > oldChunk->Size() / 3) { TRACE("realloc(%p, %lu) old buffer is large enough\n", oldBuffer, newSize); return B_OK; } if (copySize > oldChunk->Size()) copySize = oldChunk->Size(); } void* newBuffer = rtm_alloc(pool, newSize); if (newBuffer == NULL) return B_NO_MEMORY; if (oldBuffer != NULL) { memcpy(newBuffer, oldBuffer, copySize); pool->Free(oldBuffer); } TRACE("realloc(%p, %lu) -> %p\n", oldBuffer, newSize, newBuffer); *_buffer = newBuffer; return B_OK; }
status_t rtm_phys_size_for(void* buffer) { if (buffer == NULL) return 0; FreeChunk* chunk = FreeChunk::SetToAllocated(buffer); return chunk->Size(); }
status_t rtm_size_for(void* buffer) { if (buffer == NULL) return 0; FreeChunk* chunk = FreeChunk::SetToAllocated(buffer); // TODO: we currently always return the actual chunk size, not the allocated // one return chunk->Size(); }
void FreeChunk::Enqueue(rtm_pool* pool) { FreeChunk* chunk = pool->free_anchor.fNext; FreeChunk* last = &pool->free_anchor; while (chunk && chunk->Size() < fSize) { last = chunk; chunk = chunk->fNext; } fNext = chunk; last->fNext = this; }
status_t rtm_create_pool(rtm_pool** _pool, size_t totalSize, const char* name) { rtm_pool* pool = (rtm_pool*)malloc(sizeof(rtm_pool)); if (pool == NULL) return B_NO_MEMORY; if (name == NULL) name = "realtime pool"; status_t status = mutex_init(&pool->lock, name); if (status != B_OK) { free(pool); return status; } // Allocate enough space for at least one allocation over \a totalSize pool->max_size = (totalSize + sizeof(FreeChunk) - 1 + B_PAGE_SIZE) & ~(B_PAGE_SIZE - 1); area_id area = create_area(name, &pool->heap_base, B_ANY_ADDRESS, pool->max_size, B_LAZY_LOCK, B_READ_AREA | B_WRITE_AREA); if (area < 0) { mutex_destroy(&pool->lock); free(pool); return area; } pool->area = area; pool->available = pool->max_size - FreeChunk::NextOffset(); // declare the whole heap as one chunk, and add it // to the free list FreeChunk* chunk = (FreeChunk*)pool->heap_base; chunk->SetTo(pool->max_size, NULL); pool->free_anchor.SetTo(0, chunk); *_pool = pool; static pthread_once_t sOnce = PTHREAD_ONCE_INIT; pthread_once(&sOnce, &pool_init); MutexLocker _(&sPoolsLock); sPools.Add(pool); return B_OK; }
FreeChunk* FreeList::getChunkAtHead() { assert_proper_lock_protection(); assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant"); FreeChunk* fc = head(); if (fc != NULL) { FreeChunk* nextFC = fc->next(); if (nextFC != NULL) { // The chunk fc being removed has a "next". Set the "next" to the // "prev" of fc. nextFC->linkPrev(NULL); } else { // removed tail of list link_tail(NULL); } link_head(nextFC); decrement_count(); } assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant"); return fc; }
// Remove this chunk from the list void FreeList::removeChunk(FreeChunk*fc) { assert_proper_lock_protection(); assert(head() != NULL, "Remove from empty list"); assert(fc != NULL, "Remove a NULL chunk"); assert(size() == fc->size(), "Wrong list"); assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant"); FreeChunk* prevFC = fc->prev(); FreeChunk* nextFC = fc->next(); if (nextFC != NULL) { // The chunk fc being removed has a "next". Set the "next" to the // "prev" of fc. nextFC->linkPrev(prevFC); } else { // removed tail of list link_tail(prevFC); } if (prevFC == NULL) { // removed head of list link_head(nextFC); assert(nextFC == NULL || nextFC->prev() == NULL, "Prev of head should be NULL"); } else { prevFC->linkNext(nextFC); assert(tail() != prevFC || prevFC->next() == NULL, "Next of tail should be NULL"); } decrement_count(); assert(((head() == NULL) + (tail() == NULL) + (count() == 0)) % 3 == 0, "H/T/C Inconsistency"); // clear next and prev fields of fc, debug only NOT_PRODUCT( fc->linkPrev(NULL); fc->linkNext(NULL); )
TreeList* TreeList::removeChunkReplaceIfNeeded(TreeChunk* tc) { TreeList* retTL = this; FreeChunk* list = head(); assert(!list || list != list->next(), "Chunk on list twice"); assert(tc != NULL, "Chunk being removed is NULL"); assert(parent() == NULL || this == parent()->left() || this == parent()->right(), "list is inconsistent"); assert(tc->isFree(), "Header is not marked correctly"); assert(head() == NULL || head()->prev() == NULL, "list invariant"); assert(tail() == NULL || tail()->next() == NULL, "list invariant"); FreeChunk* prevFC = tc->prev(); TreeChunk* nextTC = TreeChunk::as_TreeChunk(tc->next()); assert(list != NULL, "should have at least the target chunk"); // Is this the first item on the list? if (tc == list) { // The "getChunk..." functions for a TreeList will not return the // first chunk in the list unless it is the last chunk in the list // because the first chunk is also acting as the tree node. // When coalescing happens, however, the first chunk in the a tree // list can be the start of a free range. Free ranges are removed // from the free lists so that they are not available to be // allocated when the sweeper yields (giving up the free list lock) // to allow mutator activity. If this chunk is the first in the // list and is not the last in the list, do the work to copy the // TreeList from the first chunk to the next chunk and update all // the TreeList pointers in the chunks in the list. if (nextTC == NULL) { assert(prevFC == NULL, "Not last chunk in the list") set_tail(NULL); set_head(NULL); } else { debug_only( if (PrintGC && Verbose) { gclog_or_tty->print_cr("Removing first but not only chunk in TreeList"); gclog_or_tty->print_cr("Node: " INTPTR_FORMAT " parent: " INTPTR_FORMAT " right: " INTPTR_FORMAT " left: " INTPTR_FORMAT, tc, tc->list()->parent(), tc->list()->right(), tc->list()->left()); gclog_or_tty->print_cr("Next before: " INTPTR_FORMAT " parent: " INTPTR_FORMAT " right: " INTPTR_FORMAT " left: " INTPTR_FORMAT, nextTC, nextTC->list()->parent(), nextTC->list()->right(), nextTC->list()->left()); gclog_or_tty->print_cr(" head: " INTPTR_FORMAT " tail: " INTPTR_FORMAT, nextTC->list()->head(), nextTC->list()->tail()); } ) // copy embedded list. nextTC->set_embedded_list(tc->embedded_list()); retTL = nextTC->embedded_list(); // Fix the pointer to the list in each chunk in the list. // This can be slow for a long list. Consider having // an option that does not allow the first chunk on the // list to be coalesced. for (TreeChunk* curTC = nextTC; curTC != NULL; curTC = TreeChunk::as_TreeChunk(curTC->next())) { curTC->set_list(retTL); } // Fix the parent to point to the new TreeList. if (retTL->parent() != NULL) { if (this == retTL->parent()->left()) { retTL->parent()->setLeft(retTL); } else { assert(this == retTL->parent()->right(), "Parent is incorrect"); retTL->parent()->setRight(retTL); } } // Fix the children's parent pointers to point to the // new list. assert(right() == retTL->right(), "Should have been copied"); if (retTL->right() != NULL) { retTL->right()->setParent(retTL); } assert(left() == retTL->left(), "Should have been copied"); if (retTL->left() != NULL) { retTL->left()->setParent(retTL); } retTL->link_head(nextTC); debug_only( if (PrintGC && Verbose) { gclog_or_tty->print_cr("Next after: " INTPTR_FORMAT " parent: " INTPTR_FORMAT " right: " INTPTR_FORMAT " left: " INTPTR_FORMAT, nextTC, nextTC->list()->parent(), nextTC->list()->right(), nextTC->list()->left()); gclog_or_tty->print_cr(" head: " INTPTR_FORMAT " tail: " INTPTR_FORMAT, nextTC->list()->head(), nextTC->list()->tail()); } ) assert(nextTC->isFree(), "Should be a free chunk"); }
void* rtm_alloc(rtm_pool* pool, size_t size) { if (pool == NULL) return malloc(size); if (pool->heap_base == NULL || size == 0) return NULL; MutexLocker _(&pool->lock); // align the size requirement to a kAlignment bytes boundary size = (size - 1 + kAlignment) & ~(size_t)(kAlignment - 1); if (size > pool->available) { TRACE("malloc(): Out of memory!\n"); return NULL; } FreeChunk* chunk = pool->free_anchor.Next(); FreeChunk* last = &pool->free_anchor; while (chunk && chunk->Size() < size) { last = chunk; chunk = chunk->Next(); } if (chunk == NULL) { // could not find a free chunk as large as needed TRACE("malloc(): Out of memory!\n"); return NULL; } if (chunk->Size() > size + sizeof(FreeChunk) + kAlignment) { // if this chunk is bigger than the requested size, // we split it to form two chunks (with a minimal // size of kAlignment allocatable bytes). FreeChunk* freeChunk = chunk->Split(size); last->SetNext(freeChunk); // re-enqueue the free chunk at the correct position freeChunk->Remove(pool, last); freeChunk->Enqueue(pool); } else { // remove the chunk from the free list last->SetNext(chunk->Next()); } pool->available -= size + sizeof(uint32); TRACE("malloc(%lu) -> %p\n", size, chunk->AllocatedAddress()); return chunk->AllocatedAddress(); }
void rtm_pool::Free(void* allocated) { FreeChunk* freedChunk = FreeChunk::SetToAllocated(allocated); available += freedChunk->CompleteSize(); // try to join the new free chunk with an existing one // it may be joined with up to two chunks FreeChunk* chunk = free_anchor.Next(); FreeChunk* last = &free_anchor; int32 joinCount = 0; while (chunk) { if (chunk->IsTouching(freedChunk)) { // almost "insert" it into the list before joining // because the next pointer is inherited by the chunk freedChunk->SetNext(chunk->Next()); freedChunk = chunk->Join(freedChunk); // remove the joined chunk from the list last->SetNext(freedChunk->Next()); chunk = last; if (++joinCount == 2) break; } last = chunk; chunk = chunk->Next(); } // enqueue the link at the right position; the // free link queue is ordered by size freedChunk->Enqueue(this); }