void GCAlloc::UnlinkChunk(GCBlock *b) { GCAssert(!b->needsSweeping); m_maxAlloc -= m_itemsPerBlock; m_numBlocks--; // Unlink the block from the list if (b == m_firstBlock) { m_firstBlock = Next(b); } else { b->prev->next = Next(b); } if (b == m_lastBlock) { m_lastBlock = b->prev; } else { Next(b)->prev = b->prev; } if(b->nextFree || b->prevFree || b == m_firstFree) { RemoveFromFreeList(b); } #ifdef _DEBUG b->next = b->prev = NULL; b->nextFree = b->prevFree = NULL; #endif }
CacheHandle CacheMgr::NewObject(void *pv, word cb, word wfHints) { // Apply limits if asked if (m_cbLimit != 0) { while (m_cbTotalSize + cb > m_cbLimit) { if (!MakeSpace(m_cbTotalSize + cb - m_cbLimit)) return NULL; } } // Free up an entry if we need to if (m_pceFree == NULL) { // No free slots available. Discard the oldest entry for reuse. for (CacheEntry *pceT = m_pceFirst->pcePrev; pceT != NULL; pceT = pceT->pcePrev) { // If we loop back to m_pceFirst, then all CacheEntries are locked. No way! Assert(pceT != m_pceFirst); if ((pceT->wUniqueLock & kwLockMask) == 0) { Discard(pceT); break; } } } CacheEntry *pce = m_pceFree->pcePrev; Assert(pce != NULL); if (pce == NULL) return NULL; // Alloc the object pce->hmem = gmmgr.AllocHandle(cb, wfHints); Assert(pce->hmem != NULL); if (pce->hmem == NULL) return NULL; pce->cbSize = cb; // Write in data if (pv != NULL) gmmgr.WriteHandle(pce->hmem, 0, pv, cb); // Take off free list, put at start of the alloced list RemoveFromFreeList(pce); Add(pce); m_cbTotalSize += cb; return MakeHandle(pce); }
int AssignOpaqueID(void *inData, opaque_id *outID) { int error; u_int32_t entryToUse; require_action(outID != NULL, bad_parameter, error = EINVAL); *outID = kInvalidOpaqueID; error = pthread_mutex_lock(&gOpaqueEntryMutex); require_noerr(error, pthread_mutex_lock); /* * If there aren't any items in the table, or if the number of free items is * lower than we want, then grow the table. */ if ( (gIndexOfFreeOpaqueEntryHead == 0) || ((gOpaqueEntriesAllocated - gOpaqueEntriesUsed) < kOpaqueIDMinimumFree) ) { u_int32_t newCount; newCount = MIN(gOpaqueEntriesAllocated + 2048, kOpaqueIDMaximumCount); if ( gOpaqueEntriesAllocated < newCount ) { OpaqueEntryArrayPtr nuids; nuids = (OpaqueEntryArrayPtr)realloc(gOpaqueEntryArray, sizeof(struct OpaqueEntry) * newCount); if ( nuids != NULL ) { u_int32_t i; gOpaqueEntryArray = nuids; /* Add all the 'new' OpaqueEntry to the free list. */ for ( i = 0; i < newCount - gOpaqueEntriesAllocated; ++i ) { /* set both count and index to 0 */ gOpaqueEntryArray[gOpaqueEntriesAllocated + i].id = 0; AddToFreeList(gOpaqueEntriesAllocated + i); } gOpaqueEntriesAllocated = newCount; } } } /* get index of an OpaqueEntry to use */ entryToUse = RemoveFromFreeList(); /* release the lock */ pthread_mutex_unlock(&gOpaqueEntryMutex); /* did we get an OpaqueEntry? */ require_action((entryToUse != 0) && (entryToUse < gOpaqueEntriesAllocated), no_opaqueID, error = EINVAL); /* the new id is created with the previous counter + 1, and the index */ gOpaqueEntryArray[entryToUse].id = CreateOpaqueID(GetOpaqueIDCounterPart(gOpaqueEntryArray[entryToUse].id) + 1, entryToUse); gOpaqueEntryArray[entryToUse].data = inData; *outID = gOpaqueEntryArray[entryToUse].id; ++gOpaqueEntriesUsed; no_opaqueID: pthread_mutex_lock: bad_parameter: return ( error ); }
void GCAlloc::Finalize() { m_finalized = true; // Go through every item of every block. Look for items // that are in use but not marked as reachable, and delete // them. GCBlock *next = NULL; for (GCBlock* b = m_firstBlock; b != NULL; b = next) { // we can unlink block below next = Next(b); GCAssert(!b->needsSweeping); // remove from freelist to avoid mutator destructor allocations // from using this block bool putOnFreeList = false; if(m_firstFree == b || b->prevFree != NULL || b->nextFree != NULL) { putOnFreeList = true; RemoveFromFreeList(b); } GCAssert(kMark == 0x1 && kFinalize == 0x4 && kHasWeakRef == 0x8); int numMarkedItems = 0; // TODO: MMX version for IA32 uint32_t *bits = (uint32_t*) b->GetBits(); uint32_t count = b->nextItem ? GetIndex(b, b->nextItem) : m_itemsPerBlock; // round up to eight uint32_t numInts = ((count+7)&~7) >> 3; for(uint32_t i=0; i < numInts; i++) { uint32_t marks = bits[i]; // hmm, is it better to screw around with exact counts or just examine // 8 items on each pass, with the later we open the door to unrolling uint32_t subCount = i==(numInts-1) ? ((count-1)&7)+1 : 8; for(uint32_t j=0; j<subCount;j++,marks>>=4) { int mq = marks & kFreelist; if(mq == kFreelist) continue; if(mq == kMark) { numMarkedItems++; continue; } GCAssertMsg(mq != kQueued, "No queued objects should exist when finalizing"); void* item = (char*)b->items + m_itemSize*((i*8)+j); #ifdef MMGC_HOOKS if(m_gc->heap->HooksEnabled()) { #ifdef MMGC_MEMORY_PROFILER if(m_gc->heap->GetProfiler()) m_totalAskSize -= m_gc->heap->GetProfiler()->GetAskSize(GetUserPointer(item)); #endif m_gc->heap->FinalizeHook(GetUserPointer(item), m_itemSize - DebugSize()); } #endif if(!(marks & (kFinalize|kHasWeakRef))) continue; if (marks & kFinalize) { GCFinalizedObject *obj = (GCFinalizedObject*)GetUserPointer(item); GCAssert(*(intptr_t*)obj != 0); bits[i] &= ~(kFinalize<<(j*4)); // Clear bits first so we won't get second finalization if finalizer longjmps out obj->~GCFinalizedObject(); #if defined(_DEBUG) if(b->alloc->ContainsRCObjects()) { m_gc->RCObjectZeroCheck((RCObject*)obj); } #endif } if (marks & kHasWeakRef) { b->gc->ClearWeakRef(GetUserPointer(item)); } } } // 3 outcomes: // 1) empty, put on list of empty pages // 2) no freed items, partially empty or full, return to free if partially empty // 3) some freed item add to the to be swept list if(numMarkedItems == 0) { // add to list of block to be returned to the Heap after finalization // we don't do this during finalization b/c we want finalizers to be able // to reference the memory of other objects being finalized UnlinkChunk(b); b->gc->AddToSmallEmptyBlockList(b); putOnFreeList = false; } else if(numMarkedItems == b->numItems) { // nothing changed on this page, clear marks // note there will be at least one free item on the page (otherwise it // would not have been scanned) so the page just stays on the freelist ClearMarks(b); } else if(!b->needsSweeping) { // free'ing some items but not all if(b->nextFree || b->prevFree || b == m_firstFree) { RemoveFromFreeList(b); b->nextFree = b->prevFree = NULL; } AddToSweepList(b); putOnFreeList = false; } b->finalizeState = m_gc->finalizedValue; if(putOnFreeList) AddToFreeList(b); } }