Exemple #1
0
int DeleteOpaqueID(opaque_id inID)
{
	int error;
	uint32_t index;

	error = pthread_mutex_lock(&gOpaqueEntryMutex);
	require_noerr(error, pthread_mutex_lock);
	
	index = GetOpaqueIDIndexPart(inID);
	if ( (index != 0) && (index < gOpaqueEntriesAllocated) && (gOpaqueEntryArray[index].id == inID) )
	{
		/*
		 * Keep the old counter so that next time we can increment the
		 * generation count and return a 'new' opaque ID which maps to this
		 * same index. The index is set to zero to indicate this entry is not
		 * in use.
		 */
		gOpaqueEntryArray[index].id = CreateOpaqueID(GetOpaqueIDCounterPart(inID), 0);

		AddToFreeList(index);
		--gOpaqueEntriesUsed;
	}
	else
	{
		error = EINVAL;
	}

	pthread_mutex_unlock(&gOpaqueEntryMutex);

pthread_mutex_lock:

	return ( error );
}
Exemple #2
0
// Pos not required; used for sanity check
static void DeleteFirstInstanceFromIndex(int Kmer, int Pos)
	{
	if (-1 == Kmer)
		return;

	assert(Kmer >= 0 && Kmer < KmerIndexCount);

	INDEX_ENTRY *E = Heads[Kmer];

	if (E == 0)
		Quit("DFI Kmer=%d %s Pos=%d", Kmer, CodeToString(Kmer, k), Pos);
//	assert(E != 0);
	assert(0 == E->Prev);
	assert(Pos == E->Pos);

// Delete from index
	INDEX_ENTRY *NewHead = E->Next;
	if (NewHead == 0)
		{
		Heads[Kmer] = 0;
		Tails[Kmer] = 0;
		}
	else
		{
		assert(NewHead->Prev == E);
		NewHead->Prev = 0;
		}
	Heads[Kmer] = NewHead;

	AddToFreeList(E);
	}
Exemple #3
0
//******************************************************************************
// Private Interface
//******************************************************************************
MEMORY_POOL::SEGMENT* MEMORY_POOL::GetNewSegment()
{
    SEGMENT* NewSegment = (SEGMENT*)malloc( AllocationSize );
    NewSegment->Memory = (void*)alignup( (u8*)(NewSegment) + sizeof(SEGMENT), BlockSize );
    NewSegment->FreeBlock = (BLOCK*)NewSegment->Memory;
    NewSegment->NextFreeSegment = null;
    NewSegment->Left = null;
    NewSegment->Right = null;
    NewSegment->Key = (u64)(NewSegment->Memory);
    SegmentTree.Insert(NewSegment);

    BLOCK* Block = null;
    BLOCK* NextBlock = null;
    int NumBlocks = ( AllocationSize - alignup( sizeof(SEGMENT), BlockSize ) ) / BlockSize;
    for( int i = 0; i < NumBlocks - 1; i++ ) {
        Block     = (BLOCK*)( (u8*)(NewSegment->Memory) + BlockSize*( i ) );
        NextBlock = (BLOCK*)( (u8*)(NewSegment->Memory) + BlockSize*( i + 1 ) );
        Block->Next = NextBlock;
    }
    Block = (BLOCK*)( (u8*)(NewSegment->Memory) + BlockSize*( NumBlocks - 1 ) );
    Block->Next = null;
    
    AddToFreeList(NewSegment);

    NumSegments++;

    return NewSegment;
}
Exemple #4
0
void CacheMgr::Discard(CacheEntry *pce)
{
	Assert((pce->wUniqueLock & kwLockMask) == 0);
	Assert(pce->hmem != NULL);
	Remove(pce);
	AddToFreeList(pce);
	gmmgr.FreeHandle(pce->hmem);
	pce->hmem = NULL;
	pce->wUniqueLock += kwIncUnique;
	if ((pce->wUniqueLock & kwUniqueMask) == 0)
		pce->wUniqueLock += kwIncUnique;
	m_cbTotalSize -= pce->cbSize;
}
Exemple #5
0
//******************************************************************************
void MEMORY_POOL::FreeBlock(void* ptr)
{
    SEGMENT* Segment = FindSegment( ptr );
    // TODO: check for cyclic lists more completely?
    if(ptr == Segment->FreeBlock) {
        return;
    }
    BLOCK* PrevBlock = Segment->FreeBlock;
    Segment->FreeBlock = (BLOCK*)ptr;
    Segment->FreeBlock->Next = PrevBlock;

    AddToFreeList(Segment);

    NumBlocks--;
}
Exemple #6
0
static void AllocateIndex(int Diameter, int k)
	{
	KmerIndexCount = pow4(k);
	KmerWindowCount = Diameter - k + 1;

	Entries = all(INDEX_ENTRY, KmerWindowCount);

	zero(Entries, INDEX_ENTRY, KmerWindowCount);

	Heads = all(INDEX_ENTRY *, KmerIndexCount);
	Tails = all(INDEX_ENTRY *, KmerIndexCount);

	zero(Heads, INDEX_ENTRY *, KmerIndexCount);
	zero(Tails, INDEX_ENTRY *, KmerIndexCount);

	for (int i = 0; i < KmerWindowCount; ++i)
		AddToFreeList(&(Entries[i]));
	}
Exemple #7
0
	bool GCAlloc::Sweep(GCBlock *b)
	{	
		GCAssert(b->needsSweeping);
		RemoveFromSweepList(b);

		SweepGuts(b);

		if(b->numItems == 0)
		{
			UnlinkChunk(b);
			FreeChunk(b);
			return true;
		} 

		AddToFreeList(b);

		return false;
	}
Exemple #8
0
bool CacheMgr::Init()
{
	// Alloc cache entries

	m_pceList = new CacheEntry[kcCacheEntries];
	Assert(m_pceList != NULL, "out of memory!");
	if (m_pceList == NULL)
		return false;
	memset(m_pceList, 0, sizeof(CacheEntry) * kcCacheEntries);

	// Add all entries to free list

	for (CacheEntry *pce = m_pceList; pce < &m_pceList[kcCacheEntries]; pce++) {
		pce->wUniqueLock = kwIncUnique;
		AddToFreeList(pce);
	}

	return true;
}
Exemple #9
0
int AssignOpaqueID(void *inData, opaque_id *outID)
{
	int error;
	u_int32_t entryToUse;
	
	require_action(outID != NULL, bad_parameter, error = EINVAL);
	
	*outID = kInvalidOpaqueID;
	
	error = pthread_mutex_lock(&gOpaqueEntryMutex);
	require_noerr(error, pthread_mutex_lock);
	
	/*
	 * If there aren't any items in the table, or if the number of free items is
	 * lower than we want, then grow the table.
	 */
	if ( (gIndexOfFreeOpaqueEntryHead == 0) || ((gOpaqueEntriesAllocated - gOpaqueEntriesUsed) < kOpaqueIDMinimumFree) )
	{
		u_int32_t newCount;
		
		newCount = MIN(gOpaqueEntriesAllocated + 2048, kOpaqueIDMaximumCount);

		if ( gOpaqueEntriesAllocated < newCount )
		{
			OpaqueEntryArrayPtr nuids;
			
			nuids = (OpaqueEntryArrayPtr)realloc(gOpaqueEntryArray, sizeof(struct OpaqueEntry) * newCount);

			if ( nuids != NULL )
			{
				u_int32_t i;

				gOpaqueEntryArray = nuids;

				/* Add all the 'new' OpaqueEntry to the free list. */
				for ( i = 0; i < newCount - gOpaqueEntriesAllocated; ++i )
				{
					/* set both count and index to 0 */
					gOpaqueEntryArray[gOpaqueEntriesAllocated + i].id = 0;

					AddToFreeList(gOpaqueEntriesAllocated + i);
				}

				gOpaqueEntriesAllocated = newCount;
			}
		}
	}

	/* get index of an OpaqueEntry to use */
	entryToUse = RemoveFromFreeList();

	/* release the lock */
	pthread_mutex_unlock(&gOpaqueEntryMutex);

	/* did we get an OpaqueEntry? */
	require_action((entryToUse != 0) && (entryToUse < gOpaqueEntriesAllocated), no_opaqueID, error = EINVAL);
		
	/* the new id is created with the previous counter + 1, and the index */
	gOpaqueEntryArray[entryToUse].id = CreateOpaqueID(GetOpaqueIDCounterPart(gOpaqueEntryArray[entryToUse].id) + 1, entryToUse);
	gOpaqueEntryArray[entryToUse].data = inData;
	
	*outID = gOpaqueEntryArray[entryToUse].id;

	++gOpaqueEntriesUsed;

no_opaqueID:
pthread_mutex_lock:
bad_parameter:

	return ( error );
}
Exemple #10
0
	void GCAlloc::Finalize()
	{
		m_finalized = true;
		// Go through every item of every block.  Look for items
		// that are in use but not marked as reachable, and delete
		// them.
		
		GCBlock *next = NULL;
		for (GCBlock* b = m_firstBlock; b != NULL; b = next)
		{
			// we can unlink block below
			next = Next(b);

			GCAssert(!b->needsSweeping);

			// remove from freelist to avoid mutator destructor allocations
			// from using this block
			bool putOnFreeList = false;
			if(m_firstFree == b || b->prevFree != NULL || b->nextFree != NULL) {
				putOnFreeList = true;
				RemoveFromFreeList(b);
			}

			GCAssert(kMark == 0x1 && kFinalize == 0x4 && kHasWeakRef == 0x8);

			int numMarkedItems = 0;

			// TODO: MMX version for IA32
			uint32_t *bits = (uint32_t*) b->GetBits();
			uint32_t count = b->nextItem ? GetIndex(b, b->nextItem) : m_itemsPerBlock;
			// round up to eight
			uint32_t numInts = ((count+7)&~7) >> 3;
			for(uint32_t i=0; i < numInts; i++) 
			{
				uint32_t marks = bits[i];					
				// hmm, is it better to screw around with exact counts or just examine
				// 8 items on each pass, with the later we open the door to unrolling
				uint32_t subCount = i==(numInts-1) ? ((count-1)&7)+1 : 8;
				for(uint32_t j=0; j<subCount;j++,marks>>=4)
				{
					int mq = marks & kFreelist;
					if(mq == kFreelist)
						continue;

					if(mq == kMark) {
						numMarkedItems++;
						continue;
					}

					GCAssertMsg(mq != kQueued, "No queued objects should exist when finalizing");

					void* item = (char*)b->items + m_itemSize*((i*8)+j);

#ifdef MMGC_HOOKS
					if(m_gc->heap->HooksEnabled())
					{
					#ifdef MMGC_MEMORY_PROFILER
						if(m_gc->heap->GetProfiler())
							m_totalAskSize -= m_gc->heap->GetProfiler()->GetAskSize(GetUserPointer(item));
					#endif

 						m_gc->heap->FinalizeHook(GetUserPointer(item), m_itemSize - DebugSize());
					}
#endif

					if(!(marks & (kFinalize|kHasWeakRef)))
						continue;
        
					if (marks & kFinalize)
					{     
						GCFinalizedObject *obj = (GCFinalizedObject*)GetUserPointer(item);
						GCAssert(*(intptr_t*)obj != 0);
						bits[i] &= ~(kFinalize<<(j*4));		// Clear bits first so we won't get second finalization if finalizer longjmps out
						obj->~GCFinalizedObject();

#if defined(_DEBUG)
						if(b->alloc->ContainsRCObjects()) {
							m_gc->RCObjectZeroCheck((RCObject*)obj);
						}
#endif
					}

					if (marks & kHasWeakRef) {							
						b->gc->ClearWeakRef(GetUserPointer(item));
					}
				}
			}

			// 3 outcomes:
			// 1) empty, put on list of empty pages
			// 2) no freed items, partially empty or full, return to free if partially empty
			// 3) some freed item add to the to be swept list
			if(numMarkedItems == 0) {
				// add to list of block to be returned to the Heap after finalization
				// we don't do this during finalization b/c we want finalizers to be able
				// to reference the memory of other objects being finalized
				UnlinkChunk(b);
				b->gc->AddToSmallEmptyBlockList(b);
				putOnFreeList = false;
			} else if(numMarkedItems == b->numItems) {
				// nothing changed on this page, clear marks
				// note there will be at least one free item on the page (otherwise it
				// would not have been scanned) so the page just stays on the freelist
				ClearMarks(b);
			} else if(!b->needsSweeping) {
				// free'ing some items but not all
				if(b->nextFree || b->prevFree || b == m_firstFree) {
					RemoveFromFreeList(b);
					b->nextFree = b->prevFree = NULL;
				}
				AddToSweepList(b);
				putOnFreeList = false;
			}
			b->finalizeState = m_gc->finalizedValue;
			if(putOnFreeList)
				AddToFreeList(b);
		}
	}