CodeBlockHeader *expandCodeMemory(int size) { CodeBlockHeader *block; int inc = size < code_increment ? code_increment : ROUND(size, sys_page_size); if(code_size + inc > max_code_size) { inc = max_code_size - code_size; if(inc < size) return NULL; } block = mmap(0, inc, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0); if(block == MAP_FAILED) return NULL; block->len = size; if(inc != size) { CodeBlockHeader *rem = (CodeBlockHeader*)((char*)block + size); rem->len = inc - size; addToFreeList(&rem, 1); } code_size += inc; return block; }
void free(ExecutablePool::Allocation allocation) { void* pointer = allocation.base(); size_t size = allocation.size(); ASSERT(!!m_allocation); // Call release to report to the operating system that this // memory is no longer in use, and need not be paged out. ASSERT(isWithinVMPool(pointer, size)); release(pointer, size); // Common-sized allocations are stored in the m_commonSizedAllocations // vector; all other freed chunks are added to m_freeList. if (size == m_commonSize) m_commonSizedAllocations.append(pointer); else addToFreeList(new FreeListEntry(pointer, size)); // Do some housekeeping. Every time we reach a point that // 16MB of allocations have been freed, sweep m_freeList // coalescing any neighboring fragments. m_countFreedSinceLastCoalesce += size; if (m_countFreedSinceLastCoalesce >= COALESCE_LIMIT) { m_countFreedSinceLastCoalesce = 0; coalesceFreeSpace(); } }
void freeObject( void * ptr ) { //ptr given is pointing to the usable mem //check the attributes of current block, get header and footer ObjectHeader * header = (ObjectHeader *)( (char *)ptr - sizeof(ObjectHeader) ); int currentSize = header->_objectSize;//real size including header and footer ObjectFooter * footer = (ObjectFooter *)((char *)header + currentSize - sizeof(ObjectFooter)); //check left side, get its header and footer ObjectFooter * footer_of_leftBlock = (ObjectFooter *) ( (char *)header - sizeof(ObjectFooter) ); size_t leftBlockSize = footer_of_leftBlock->_objectSize; ObjectHeader * header_of_leftBlock = (ObjectHeader *)( (char *)footer_of_leftBlock + sizeof(ObjectFooter) - leftBlockSize ); int leftAllocated = footer_of_leftBlock->_allocated; //check right side, get its header and footer ObjectHeader * header_of_rightBlock = (ObjectHeader *) ( (char *)header + currentSize ); size_t rightBlockSize = header_of_rightBlock->_objectSize; ObjectFooter * footer_of_rightBlock = (ObjectFooter *)( (char *)header_of_rightBlock + rightBlockSize - sizeof(ObjectFooter) ); int rightAllocated = header_of_rightBlock->_allocated; //try to free mem depending on the case //printf("leftAllocated == %d && rightAllocated == %d\n", leftAllocated, rightAllocated); ObjectHeader * ptr_2_add; if(leftAllocated == 0 && rightAllocated == 0) { //current block will be gone and merged with left block //printf("left is %d, right is %d\n", leftAllocated, rightAllocated); ObjectHeader * temp; temp = mergeMems(header_of_leftBlock, footer);//temp now is left header ptr_2_add = mergeMems(temp, footer_of_rightBlock); } if(leftAllocated == 0 && rightAllocated == 1) { //right block will be gone and merge with already merged left two blocks; //printf("left is %d, right is %d\n", leftAllocated, rightAllocated); ptr_2_add = mergeMems(header_of_leftBlock, footer); } if(leftAllocated == 1 && rightAllocated == 0) { //right block will be gone and merge with already merged left two blocks; //printf("left is %d, right is %d\n", leftAllocated, rightAllocated); ptr_2_add = mergeMems(header, footer_of_rightBlock); } if (leftAllocated == 1 && rightAllocated == 1) { //printf("left is %d, right is %d\n", leftAllocated, rightAllocated); ptr_2_add = header; } addToFreeList(ptr_2_add); }
void freeMethodInlinedInfo(MethodBlock *mb) { Instruction *instruction = mb->code; CodeBlockHeader **blocks = mb->code; QuickPrepareInfo *info; int i; if(!enabled) return; /* Scan handlers within the method */ for(i = mb->code_size; i--; instruction++) { char *handler = (char*)instruction->handler; CodeBlockHeader *block; if(handler >= min_entry_point || handler <= max_entry_point) { /* Handler is within the program text and so does not need freeing. However, sequences which have not been rewritten yet will have associated preparation info. */ if(handler == handler_entry_points[0][OPC_INLINE_REWRITER]) gcPendingFree(instruction->operand.pntr); continue; } /* The handler is an inlined block */ block = ((CodeBlockHeader*)handler) - 1; if(block->u.ref_count <= 0) { /* Either a duplicate block, or a hashed block and this is the only reference to it. Duplicates must be freed as this would be a leak. Hashed blocks potentially will be re-used and so we could keep them around. However, we free them because it's better to free room for a potentially more useful sequence. */ /* Add onto list to be freed */ *blocks++ = block; if(block->u.ref_count == 0) deleteHashEntry(code_hash_table, block, FALSE); } else block->u.ref_count--; } if(blocks > (CodeBlockHeader**)mb->code) addToFreeList(mb->code, blocks - (CodeBlockHeader**)mb->code); for(info = mb->quick_prepare_info; info != NULL;) { QuickPrepareInfo *temp = info; info = info->next; gcPendingFree(temp); } }
WeakBlock::WeakBlock(PageAllocation& allocation) : m_allocation(allocation) { for (size_t i = 0; i < weakImplCount(); ++i) { WeakImpl* weakImpl = &weakImpls()[i]; new (NotNull, weakImpl) WeakImpl; addToFreeList(&m_sweepResult.freeList, weakImpl); } ASSERT(!m_sweepResult.isNull() && m_sweepResult.blockIsFree); }
void MemMan::setCondition(MemHandle *bsMem, uint16 pCond) { if ((pCond == MEM_FREED) || (pCond > MEM_DONT_FREE)) error("MemMan::setCondition: program tried to set illegal memory condition"); if (bsMem->cond != pCond) { bsMem->cond = pCond; if (pCond == MEM_DONT_FREE) removeFromFreeList(bsMem); else if (pCond == MEM_CAN_FREE) addToFreeList(bsMem); } }
WeakBlock::WeakBlock(CellContainer container) : DoublyLinkedListNode<WeakBlock>() , m_container(container) { for (size_t i = 0; i < weakImplCount(); ++i) { WeakImpl* weakImpl = &weakImpls()[i]; new (NotNull, weakImpl) WeakImpl; addToFreeList(&m_sweepResult.freeList, weakImpl); } ASSERT(isEmpty()); }
// Create a new process copying p as the parent. // Sets up stack to return as if from system call. // Caller must set state of returned proc to RUNNABLE. int fork(void) { int i, pid; struct proc *np; // Allocate process. if((np = allocproc()) == 0) return -1; // Copy process state from p. if((np->pgdir = copyuvm(proc->pgdir, proc->sz)) == 0){ kfree(np->kstack); np->kstack = 0; np->state = UNUSED; #ifdef USE_CS333_SCHEDULER acquire(&ptable.lock); addToFreeList(np); release(&ptable.lock); #endif return -1; } np->sz = proc->sz; np->parent = proc; *np->tf = *proc->tf; // Clear %eax so that fork returns 0 in the child. np->tf->eax = 0; for(i = 0; i < NOFILE; i++) if(proc->ofile[i]) np->ofile[i] = filedup(proc->ofile[i]); np->cwd = idup(proc->cwd); safestrcpy(np->name, proc->name, sizeof(proc->name)); pid = np->pid; np->uid = np->parent->uid; np->gid = np->parent->gid; // lock to force the compiler to emit the np->state write last. acquire(&ptable.lock); np->state = RUNNABLE; #ifdef USE_CS333_SCHEDULER if (!setPri(np, DEF_PRI)) cprintf("ERROR: DEF_PRI invalid. Must be between 0 and %d. Current value: %d.\n", N_PRI, DEF_PRI); addToPriQ(np, np->pri); #endif release(&ptable.lock); return pid; }
void MemMan::alloc(MemHandle *bsMem, uint32 pSize, uint16 pCond) { _alloced += pSize; bsMem->data = (void*)malloc(pSize); if (!bsMem->data) error("MemMan::alloc(): Can't alloc %d bytes of memory.", pSize); bsMem->cond = pCond; bsMem->size = pSize; if (pCond == MEM_CAN_FREE) { warning("%d Bytes alloced as FREEABLE.", pSize); // why should one want to alloc mem if it can be freed? addToFreeList(bsMem); } else if (bsMem->next || bsMem->prev) // it's in our _freeAble list, remove it from there removeFromFreeList(bsMem); checkMemoryUsage(); }
// Create and initialise a block in a given piece of memory of *size* bytes block* block_create( heapAllocator* heap, void* data, size_t size ) { block* b = (block*)data; memset( b, 0, sizeof( block )); b->size = size - sizeof( block ); b->data = ((u8*)data) + sizeof( block ); b->free = true; b->prev = b->next = NULL; vAssert( size > sizeof( void* ) * 2 ); vAssert( b->size > sizeof( block* ) * 2 ); addToFreeList( heap, b ); #ifdef MEM_GUARD_BLOCK b->guard = kGuardValue; #endif return b; }
//PAGEBREAK: 32 // Set up first user process. void userinit(void) { struct proc *p; extern char _binary_initcode_start[], _binary_initcode_size[]; #ifdef USE_CS333_SCHEDULER // Initialize free list acquire(&ptable.lock); int i; for (i=0; i<NPROC; i++) addToFreeList(&ptable.proc[i]); release(&ptable.lock); #endif p = allocproc(); initproc = p; if((p->pgdir = setupkvm()) == 0) panic("userinit: out of memory?"); inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size); p->sz = PGSIZE; memset(p->tf, 0, sizeof(*p->tf)); p->tf->cs = (SEG_UCODE << 3) | DPL_USER; p->tf->ds = (SEG_UDATA << 3) | DPL_USER; p->tf->es = p->tf->ds; p->tf->ss = p->tf->ds; p->tf->eflags = FL_IF; p->tf->esp = PGSIZE; p->tf->eip = 0; // beginning of initcode.S safestrcpy(p->name, "initcode", sizeof(p->name)); p->cwd = namei("/"); p->uid = DEF_UID; p->gid = DEF_GID; #ifdef USE_CS333_SCHEDULER // Initialize ready list with init process acquire(&ptable.lock); p->state = RUNNABLE; if (!setPri(p, DEF_PRI)) cprintf("ERROR: DEF_PRI invalid. Must be between 0 and %d. Current value: %d.\n", N_PRI, DEF_PRI); addToPriQ(p, p->pri); release(&ptable.lock); #else p->state = RUNNABLE; #endif }
// Wait for a child process to exit and return its pid. // Return -1 if this process has no children. int wait(void) { struct proc *p; int havekids, pid; acquire(&ptable.lock); for(;;){ // Scan through table looking for zombie children. havekids = 0; for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ if(p->parent != proc) continue; havekids = 1; if(p->state == ZOMBIE){ // Found one. pid = p->pid; kfree(p->kstack); p->kstack = 0; freevm(p->pgdir); p->state = UNUSED; #ifdef USE_CS333_SCHEDULER addToFreeList(p); #endif p->pid = 0; p->parent = 0; p->name[0] = 0; p->killed = 0; release(&ptable.lock); return pid; } } // No point waiting if we don't have any children. if(!havekids || proc->killed){ release(&ptable.lock); return -1; } // Wait for children to exit. (See wakeup1 call in proc_exit.) sleep(proc, &ptable.lock); //DOC: wait-sleep } }
void WeakBlock::sweep() { if (!m_sweepResult.isNull()) return; SweepResult sweepResult; for (size_t i = 0; i < weakImplCount(); ++i) { WeakImpl* weakImpl = &weakImpls()[i]; if (weakImpl->state() == WeakImpl::Dead) finalize(weakImpl); if (weakImpl->state() == WeakImpl::Deallocated) addToFreeList(&sweepResult.freeList, weakImpl); else sweepResult.blockIsFree = false; } m_sweepResult = sweepResult; ASSERT(!m_sweepResult.isNull()); }
DataStackEntry *dse_alloc(void) { DataStackEntry *p; pthread_mutex_lock(&mutex); if (!(p = freeList)) { p = (DataStackEntry *)malloc(INCREMENT * sizeof(DataStackEntry)); if (p) { int i; for (i = 0; i < INCREMENT; i++, p++) addToFreeList(p); p = freeList; } } if (p) freeList = p->value.next; pthread_mutex_unlock(&mutex); if (p) (void)memset(p, 0, sizeof(DataStackEntry)); return p; }
// Release a block from the heapAllocator void heap_deallocate( heapAllocator* heap, void* data ) { if ( data == NULL ) return; vmutex_lock( &allocator_mutex ); // Check if it's in a bitpool bitpool* bit_pool = heap_findBitpoolForData( heap, data ); if ( bit_pool ) { bitpool_free( bit_pool, data ); vmutex_unlock( &allocator_mutex ); return; } block* b = (block*)((uint8_t*)data - sizeof( block )); vAssert( !b->free ); assertBlockInvariants( b ); #ifdef MEM_DEBUG_VERBOSE printf("Allocator freed address: " xPTRf ".\n", (uintptr_t)b->data ); #endif b->free = true; addToFreeList( heap, b ); heap->total_free += b->size; heap->total_allocated -= b->size; checkFree( heap, b ); // Try to merge blocks if ( b->next && b->next->free ) { checkFree( heap, b->next ); blockMerge( heap, b, b->next ); } if ( b->prev && b->prev->free ) { checkFree( heap, b->prev ); blockMerge( heap, b->prev, b ); } --heap->allocations; vmutex_unlock( &allocator_mutex ); }
void WeakBlock::sweep() { // If a block is completely empty, a sweep won't have any effect. if (isEmpty()) return; SweepResult sweepResult; for (size_t i = 0; i < weakImplCount(); ++i) { WeakImpl* weakImpl = &weakImpls()[i]; if (weakImpl->state() == WeakImpl::Dead) finalize(weakImpl); if (weakImpl->state() == WeakImpl::Deallocated) addToFreeList(&sweepResult.freeList, weakImpl); else { sweepResult.blockIsFree = false; if (weakImpl->state() == WeakImpl::Live) sweepResult.blockIsLogicallyEmpty = false; } } m_sweepResult = sweepResult; ASSERT(!m_sweepResult.isNull()); }
void* alloc(size_t size) { void* result; // Freed allocations of the common size are not stored back into the main // m_freeList, but are instead stored in a separate vector. If the request // is for a common sized allocation, check this list. if ((size == m_commonSize) && m_commonSizedAllocations.size()) { result = m_commonSizedAllocations.last(); m_commonSizedAllocations.removeLast(); } else { // Serach m_freeList for a suitable sized chunk to allocate memory from. FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL); // This would be bad news. if (!entry) { // Errk! Lets take a last-ditch desparation attempt at defragmentation... coalesceFreeSpace(); // Did that free up a large enough chunk? entry = m_freeList.search(size, m_freeList.GREATER_EQUAL); // No?... *BOOM!* if (!entry) CRASH(); } ASSERT(entry->size != m_commonSize); // Remove the entry from m_freeList. But! - // Each entry in the tree may represent a chain of multiple chunks of the // same size, and we only want to remove one on them. So, if this entry // does have a chain, just remove the first-but-one item from the chain. if (FreeListEntry* next = entry->nextEntry) { // We're going to leave 'entry' in the tree; remove 'next' from its chain. entry->nextEntry = next->nextEntry; next->nextEntry = 0; entry = next; } else m_freeList.remove(entry->size); // Whoo!, we have a result! ASSERT(entry->size >= size); result = entry->pointer; // If the allocation exactly fits the chunk we found in the, // m_freeList then the FreeListEntry node is no longer needed. if (entry->size == size) delete entry; else { // There is memory left over, and it is not of the common size. // We can reuse the existing FreeListEntry node to add this back // into m_freeList. entry->pointer = (void*)((intptr_t)entry->pointer + size); entry->size -= size; addToFreeList(entry); } } // Call reuse to report to the operating system that this memory is in use. ASSERT(isWithinVMPool(result, size)); reuse(result, size); return result; }
// We do not attempt to coalesce addition, which may lead to fragmentation; // instead we periodically perform a sweep to try to coalesce neigboring // entries in m_freeList. Presently this is triggered at the point 16MB // of memory has been released. void coalesceFreeSpace() { Vector<FreeListEntry*> freeListEntries; SizeSortedFreeTree::Iterator iter; iter.start_iter_least(m_freeList); // Empty m_freeList into a Vector. for (FreeListEntry* entry; (entry = *iter); ++iter) { // Each entry in m_freeList might correspond to multiple // free chunks of memory (of the same size). Walk the chain // (this is likely of couse only be one entry long!) adding // each entry to the Vector (at reseting the next in chain // pointer to separate each node out). FreeListEntry* next; do { next = entry->nextEntry; entry->nextEntry = 0; freeListEntries.append(entry); } while ((entry = next)); } // All entries are now in the Vector; purge the tree. m_freeList.purge(); // Reverse-sort the freeListEntries and m_commonSizedAllocations Vectors. // We reverse-sort so that we can logically work forwards through memory, // whilst popping items off the end of the Vectors using last() and removeLast(). qsort(freeListEntries.begin(), freeListEntries.size(), sizeof(FreeListEntry*), reverseSortFreeListEntriesByPointer); qsort(m_commonSizedAllocations.begin(), m_commonSizedAllocations.size(), sizeof(void*), reverseSortCommonSizedAllocations); // The entries from m_commonSizedAllocations that cannot be // coalesced into larger chunks will be temporarily stored here. Vector<void*> newCommonSizedAllocations; // Keep processing so long as entries remain in either of the vectors. while (freeListEntries.size() || m_commonSizedAllocations.size()) { // We're going to try to find a FreeListEntry node that we can coalesce onto. FreeListEntry* coalescionEntry = 0; // Is the lowest addressed chunk of free memory of common-size, or is it in the free list? if (m_commonSizedAllocations.size() && (!freeListEntries.size() || (m_commonSizedAllocations.last() < freeListEntries.last()->pointer))) { // Pop an item from the m_commonSizedAllocations vector - this is the lowest // addressed free chunk. Find out the begin and end addresses of the memory chunk. void* begin = m_commonSizedAllocations.last(); void* end = (void*)((intptr_t)begin + m_commonSize); m_commonSizedAllocations.removeLast(); // Try to find another free chunk abutting onto the end of the one we have already found. if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) { // There is an existing FreeListEntry for the next chunk of memory! // we can reuse this. Pop it off the end of m_freeList. coalescionEntry = freeListEntries.last(); freeListEntries.removeLast(); // Update the existing node to include the common-sized chunk that we also found. coalescionEntry->pointer = (void*)((intptr_t)coalescionEntry->pointer - m_commonSize); coalescionEntry->size += m_commonSize; } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) { // There is a second common-sized chunk that can be coalesced. // Allocate a new node. m_commonSizedAllocations.removeLast(); coalescionEntry = new FreeListEntry(begin, 2 * m_commonSize); } else { // Nope - this poor little guy is all on his own. :-( // Add him into the newCommonSizedAllocations vector for now, we're // going to end up adding him back into the m_commonSizedAllocations // list when we're done. newCommonSizedAllocations.append(begin); continue; } } else { ASSERT(freeListEntries.size()); ASSERT(!m_commonSizedAllocations.size() || (freeListEntries.last()->pointer < m_commonSizedAllocations.last())); // The lowest addressed item is from m_freeList; pop it from the Vector. coalescionEntry = freeListEntries.last(); freeListEntries.removeLast(); } // Right, we have a FreeListEntry, we just need check if there is anything else // to coalesce onto the end. ASSERT(coalescionEntry); while (true) { // Calculate the end address of the chunk we have found so far. void* end = (void*)((intptr_t)coalescionEntry->pointer - coalescionEntry->size); // Is there another chunk adjacent to the one we already have? if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) { // Yes - another FreeListEntry -pop it from the list. FreeListEntry* coalescee = freeListEntries.last(); freeListEntries.removeLast(); // Add it's size onto our existing node. coalescionEntry->size += coalescee->size; delete coalescee; } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) { // We can coalesce the next common-sized chunk. m_commonSizedAllocations.removeLast(); coalescionEntry->size += m_commonSize; } else break; // Nope, nothing to be added - stop here. } // We've coalesced everything we can onto the current chunk. // Add it back into m_freeList. addToFreeList(coalescionEntry); } // All chunks of free memory larger than m_commonSize should be // back in m_freeList by now. All that remains to be done is to // copy the contents on the newCommonSizedAllocations back into // the m_commonSizedAllocations Vector. ASSERT(m_commonSizedAllocations.size() == 0); m_commonSizedAllocations.append(newCommonSizedAllocations); }
//when find a chunk of memory that's big enough //to tell if needed to split and make the remainder a node in freelist //the remainder has to be >= sizeof(header + footer) + 16 = 64 bytes void * allocateObject( size_t size ) { //step1: check if mem is initialized if ( !_initialized ) { _initialized = 1; initialize(); } // step 2: get the actual size needed // Add the ObjectHeader/Footer to the size and round the total size up to a multiple of // 8 bytes for alignment. size_t roundedSize = (size + sizeof(struct ObjectHeader) + sizeof(struct ObjectFooter) + 7) & ~7; // step3: traverse the freelist to find the first node that's large engough ObjectHeader * needle = _freeList->_next;//needle points to header of the first node in the list int minSize = sizeof(ObjectHeader) + sizeof(ObjectFooter) + 8; void * ptr_2b_returned = NULL; //all nodes in list are free, don't need to check while( needle->_allocated != 2) {//when it's 2, means reach the sentinel node size_t size_dif = (needle->_objectSize) - roundedSize; //step4: if the first fit found. if ( size_dif >= 0 ) { //step5: determine if the remainder is big enough //if so, split; if not use entire chunk as 1 node if (size_dif <= minSize) //one node { //the node is for use, thus has to return mem ptr ptr_2b_returned = (void *)createFormattedMemChunk((char *)needle, needle->_objectSize, 1); //take out "Header of this node" out of the freelist removeFromFreeList(needle); } else //split, one for use, which taken out from list; one to be added into list { char * ptr_remainder = (char *)needle + roundedSize;//gives the starting point of remainder node //createFormattedMemChunk(char * ptr, size_t size) -> for node that'll be used ptr_2b_returned = createFormattedMemChunk((char *)needle, roundedSize, 1); //createFormattedMemChunk(char * ptr, size_t size) -> for remainder node createFormattedMemChunk(ptr_remainder, size_dif, 0); //remove used and add remainder to the freelist //removeFromFreeList_And_addRemainderToFreeList((ObjectHeader *)needle, (ObjectHeader *)ptr_remainder); removeFromFreeList((ObjectHeader *)needle); addToFreeList((ObjectHeader *)ptr_remainder); } } needle = needle->_next; } if (ptr_2b_returned == NULL) //meaning no available mem for the request { //get another 2MB from OS and format it //needle points to the header(real one not fencepost) of the new chunk ObjectHeader * firstHeaderInNewChunk = getNewMemChunkFromOS_and_Initialize(); char * ptr_remainder = (char *)firstHeaderInNewChunk + roundedSize;//header of the free chunk //split it and add the remainder to freeelist. //size needed is roundedSize ptr_2b_returned = createFormattedMemChunk((char *)firstHeaderInNewChunk, roundedSize, 1);//for use size_t size_dif = firstHeaderInNewChunk->_objectSize - roundedSize; ptr_remainder = createFormattedMemChunk(ptr_remainder, size_dif, 0);//remainder to be added to list //freelist handling //removeFromFreeList_And_addRemainderToFreeList((ObjectHeader *)needle, (ObjectHeader *)ptr_remainder); removeFromFreeList(firstHeaderInNewChunk); addToFreeList((ObjectHeader *)ptr_remainder); } //unlock pthread_mutex_unlock(&mutex); // Return a pointer to usable memory return ptr_2b_returned; }
void dse_free(DataStackEntry *p) { pthread_mutex_lock(&mutex); addToFreeList(p); pthread_mutex_unlock(&mutex); }