Beispiel #1
0
// static 
bool RedhawkGCInterface::InitializeSubsystems(GCType gcType)
{
    g_pConfig->Construct();

#ifdef FEATURE_ETW
    MICROSOFT_WINDOWS_REDHAWK_GC_PRIVATE_PROVIDER_Context.IsEnabled = FALSE;
    MICROSOFT_WINDOWS_REDHAWK_GC_PUBLIC_PROVIDER_Context.IsEnabled = FALSE;

    // Register the Redhawk event provider with the system.
    RH_ETW_REGISTER_Microsoft_Windows_Redhawk_GC_Private();
    RH_ETW_REGISTER_Microsoft_Windows_Redhawk_GC_Public();

    MICROSOFT_WINDOWS_REDHAWK_GC_PRIVATE_PROVIDER_Context.RegistrationHandle = Microsoft_Windows_Redhawk_GC_PrivateHandle;
    MICROSOFT_WINDOWS_REDHAWK_GC_PUBLIC_PROVIDER_Context.RegistrationHandle = Microsoft_Windows_Redhawk_GC_PublicHandle;
#endif // FEATURE_ETW

    if (!InitializeSystemInfo())
    {
        return false;
    }

    // Initialize the special EEType used to mark free list entries in the GC heap.
    g_FreeObjectEEType.InitializeAsGcFreeType();

    // Place the pointer to this type in a global cell (typed as the structurally equivalent MethodTable
    // that the GC understands).
    g_pFreeObjectMethodTable = (MethodTable *)&g_FreeObjectEEType;
    g_pFreeObjectEEType = &g_FreeObjectEEType;

    if (!g_SuspendEELock.InitNoThrow(CrstSuspendEE))
        return false;

    // Set the GC heap type.
    bool fUseServerGC = (gcType == GCType_Server);
    GCHeap::InitializeHeapType(fUseServerGC);

    // Create the GC heap itself.
    GCHeap *pGCHeap = GCHeap::CreateGCHeap();
    if (!pGCHeap)
        return false;

    // Initialize the GC subsystem.
    HRESULT hr = pGCHeap->Initialize();
    if (FAILED(hr))
        return false;

    if (!FinalizerThread::Initialize())
        return false;

    // Initialize HandleTable.
    if (!Ref_Initialize())
        return false;

    return true;
}
Beispiel #2
0
    void SystemDelete(void *p)
    {
#ifdef MMGC_MEMORY_PROFILER
        if (p) {
            // heap can be NULL during OOM shutdown
            GCHeap* heap = GCHeap::GetGCHeap();
            if (heap)
                heap->TrackSystemFree(p);
        }
#endif
        VMPI_free(p);
    }
// Block the current thread until at least one object needs to be finalized (returns true) or memory is low
// (returns false and the finalizer thread should initiate a garbage collection).
EXTERN_C REDHAWK_API UInt32_BOOL __cdecl RhpWaitForFinalizerRequest()
{
    // We can wait for two events; finalization queue has been populated and low memory resource notification.
    // But if the latter is signalled we shouldn't wait on it again immediately -- if the garbage collection
    // the finalizer thread initiates as a result is not sufficient to remove the low memory condition the
    // event will still be signalled and we'll end up looping doing cpu intensive collections, which won't
    // help the situation at all and could make it worse. So we remember whether the last event we reported
    // was low memory and if so we'll wait at least two seconds (the CLR value) on just a finalization
    // request.
    static bool fLastEventWasLowMemory = false;

    GCHeap * pHeap = GCHeap::GetGCHeap();

    // Wait in a loop because we may have to retry if we decide to only wait for finalization events but the
    // two second timeout expires.
    do
    {
        HANDLE  lowMemEvent = NULL;
#if 0 // TODO: hook up low memory notification
        lowMemEvent = pHeap->GetLowMemoryNotificationEvent();
#endif // 0
        HANDLE  rgWaitHandles[] = { FinalizerThread::GetFinalizerEvent(), lowMemEvent };
        UInt32  cWaitHandles = (fLastEventWasLowMemory || (lowMemEvent == NULL)) ? 1 : 2;
        UInt32  uTimeout = fLastEventWasLowMemory ? 2000 : INFINITE;

        UInt32 uResult = PalWaitForMultipleObjectsEx(cWaitHandles, rgWaitHandles, FALSE, uTimeout, FALSE);
        switch (uResult)
        {
        case WAIT_OBJECT_0:
            // At least one object is ready for finalization.
            return TRUE;

        case WAIT_OBJECT_0 + 1:
            // Memory is low, tell the finalizer thread to garbage collect.
            ASSERT(!fLastEventWasLowMemory);
            fLastEventWasLowMemory = true;
            return FALSE;

        case WAIT_TIMEOUT:
            // We were waiting only for finalization events but didn't get one within the timeout period. Go
            // back to waiting for any event.
            ASSERT(fLastEventWasLowMemory);
            fLastEventWasLowMemory = false;
            break;

        default:
            ASSERT(!"Unexpected PalWaitForMultipleObjectsEx() result");
            return FALSE;
        }
    } while (true);
}
Beispiel #4
0
 void AttachSampler(avmplus::Sampler* sampler)
 {
     GCHeap* heap = GCHeap::GetGCHeap();     // May be NULL during OOM shutdown
     if (heap)
     {
         EnterFrame* ef = heap->GetEnterFrame();
         if (ef)
         {
             GC* gc = ef->GetActiveGC();
             if (gc)
                 gc->SetAttachedSampler(sampler);
         }
     }
 }
Beispiel #5
0
 avmplus::Sampler* GetSampler()
 {
     GCHeap* heap = GCHeap::GetGCHeap();     // May be NULL during OOM shutdown
     if (heap)
     {
         EnterFrame* ef = heap->GetEnterFrame();
         if (ef)
         {
             GC* gc = ef->GetActiveGC();
             if (gc)
                 return (avmplus::Sampler*)gc->GetAttachedSampler();
         }
     }
     return NULL;
 }
// Need to know the maximum segment size for both the normal GC heap and the
// large object heap, as well as the top user-accessible address within the
// address space (ie, theoretically 2^31 - 1 on a 32 bit machine, but a tad 
// lower in practice).  This will help out with 32 bit machines running in 
// 3 GB mode.
FCIMPL2(void, COMMemoryFailPoint::GetMemorySettings, UINT64* pMaxGCSegmentSize, UINT64* pTopOfMemory)
{
    FCALL_CONTRACT;

    GCHeap * pGC = GCHeap::GetGCHeap();
    size_t segment_size = pGC->GetValidSegmentSize(FALSE);
    size_t large_segment_size = pGC->GetValidSegmentSize(TRUE);
    _ASSERTE(segment_size < SIZE_T_MAX && large_segment_size < SIZE_T_MAX);
    if (segment_size > large_segment_size)
        *pMaxGCSegmentSize = (UINT64) segment_size;
    else
        *pMaxGCSegmentSize = (UINT64) large_segment_size;

    // GetTopMemoryAddress returns a void*, which can't be cast
    // directly to a UINT64 without causing an error from GCC.
    void * topOfMem = GetTopMemoryAddress();
    *pTopOfMemory = (UINT64) (size_t) topOfMem;
}
Beispiel #7
0
    void GCLargeAlloc::Free(const void *item)
    {
        LargeBlock *b = GetLargeBlock(item);

#ifdef GCDEBUG
        // RCObject have contract that they must clean themselves, since they
        // have to scan themselves to decrement other RCObjects they might as well
        // clean themselves too, better than suffering a memset later
        if(b->rcobject)
            m_gc->RCObjectZeroCheck((RCObject*)GetUserPointer(item));
#endif


        // We can't allow free'ing something during Sweeping, otherwise alloc counters
        // get decremented twice and destructors will be called twice.
        GCAssert(m_gc->collecting == false || m_gc->marking == true);
        if (m_gc->marking && (m_gc->collecting || IsProtectedAgainstFree(b))) {
            m_gc->AbortFree(GetUserPointer(item));
            return;
        }

        m_gc->policy.signalFreeWork(b->size);

#ifdef MMGC_HOOKS
        GCHeap* heap = GCHeap::GetGCHeap();
        if(heap->HooksEnabled())
        {
            const void* p = GetUserPointer(item);
            size_t userSize = GC::Size(p);
#ifdef MMGC_MEMORY_PROFILER
            if(heap->GetProfiler())
                m_totalAskSize -= heap->GetProfiler()->GetAskSize(p);
#endif
            heap->FinalizeHook(p, userSize);
            heap->FreeHook(p, userSize, uint8_t(GCHeap::GCFreedPoison));
        }
#endif

        if(b->flags[0] & kHasWeakRef)
            m_gc->ClearWeakRef(GetUserPointer(item));

        LargeBlock **prev = &m_blocks;
        while(*prev)
        {
            if(b == *prev)
            {
                *prev = Next(b);
                size_t numBlocks = b->GetNumBlocks();
                m_totalAllocatedBytes -= b->size;
                VALGRIND_MEMPOOL_FREE(b, b);
                VALGRIND_MEMPOOL_FREE(b, item);
                VALGRIND_DESTROY_MEMPOOL(b);
                m_gc->FreeBlock(b, (uint32_t)numBlocks, m_partitionIndex);
                return;
            }
            prev = (LargeBlock**)(&(*prev)->next);
        }
        GCAssertMsg(false, "Bad free!");
    }
Beispiel #8
0
    void *SystemNew(size_t size, FixedMallocOpts opts)
    {
        void *space = VMPI_alloc(size);
        if (space == NULL)
        {
            if (opts & MMgc::kCanFail)
                return NULL;

            int attempt = 0;
            do {
                GCHeap::GetGCHeap()->SystemOOMEvent(size, attempt++);
                space = VMPI_alloc(size);
            } while (space == NULL);
        }
#ifdef MMGC_MEMORY_PROFILER
        GCHeap* heap = GCHeap::GetGCHeap();
        if (heap)
            heap->TrackSystemAlloc(space, size);
#endif
        if (opts & MMgc::kZero)
            VMPI_memset(space, 0, size);
        return space;
    }
Beispiel #9
0
	/* static */
	void GCAlloc::Free(const void *item)
	{
		GCBlock *b = GetBlock(item);
		GCAlloc *a = b->alloc;
	
#ifdef MMGC_HOOKS
		GCHeap* heap = GCHeap::GetGCHeap();
		if(heap->HooksEnabled())
		{
			const void* p = GetUserPointer(item);
			size_t userSize = GC::Size(p);
#ifdef MMGC_MEMORY_PROFILER
			if(heap->GetProfiler())
				a->m_totalAskSize -= heap->GetProfiler()->GetAskSize(p);
#endif
			heap->FinalizeHook(p, userSize);
			heap->FreeHook(p, userSize, 0xca);
		}
#endif

#ifdef _DEBUG		
		// check that its not already been freed
		void *free = b->firstFree;
		while(free) {
			GCAssert(free != item);
			free = *((void**) free);
		}
#endif

		int index = GetIndex(b, item);
		if(GetBit(b, index, kHasWeakRef)) {
			b->gc->ClearWeakRef(GetUserPointer(item));
		}

		bool wasFull = b->IsFull();

		if(b->needsSweeping) {
#ifdef _DEBUG
			bool gone =
#endif
				a->Sweep(b);
			GCAssertMsg(!gone, "How can a page I'm about to free an item on be empty?");
			wasFull = false;
		}

		if(wasFull) {
			a->AddToFreeList(b);
		}

		b->FreeItem(item, index);

		if(b->numItems == 0) {
			a->UnlinkChunk(b);
			a->FreeChunk(b);
		}
	}
Beispiel #10
0
	/*static*/
	void FixedAlloc::Free(void *item)
	{
		FixedBlock *b = (FixedBlock*) ((uintptr_t)item & ~0xFFF);

		GCAssertMsg(b->alloc->m_heap->IsAddressInHeap(item), "Bogus pointer passed to free");

#ifdef MMGC_HOOKS
		GCHeap *heap = b->alloc->m_heap;
		if(heap->HooksEnabled()) {
		#ifdef MMGC_MEMORY_PROFILER
			if(heap->GetProfiler())
				b->alloc->m_totalAskSize -= heap->GetProfiler()->GetAskSize(item);
		#endif

			heap->FinalizeHook(item, b->size - DebugSize());
			heap->FreeHook(item, b->size - DebugSize(), 0xed);
		}
#endif
		item = GetRealPointer(item);

		// Add this item to the free list
		*((void**)item) = b->firstFree;
		b->firstFree = item;

		// We were full but now we have a free spot, add us to the free block list.
		if (b->numAlloc == b->alloc->m_itemsPerBlock)
		{
			GCAssert(!b->nextFree && !b->prevFree);
			b->nextFree = b->alloc->m_firstFree;
			if (b->alloc->m_firstFree)
				b->alloc->m_firstFree->prevFree = b;
			b->alloc->m_firstFree = b;
		}
#ifdef _DEBUG
		else // we should already be on the free list
		{
			GCAssert ((b == b->alloc->m_firstFree) || b->prevFree);
		}
#endif

		b->numAlloc--;

		if(b->numAlloc == 0) {
			b->alloc->FreeChunk(b);
		}
	}
Beispiel #11
0
int main(int argc, char* argv[])
{
    //
    // Initialize system info
    //
    InitializeSystemInfo();

    //
    // Initialize free object methodtable. The GC uses a special array-like methodtable as placeholder
    // for collected free space.
    //
    static MethodTable freeObjectMT;
    freeObjectMT.InitializeFreeObject();
    g_pFreeObjectMethodTable = &freeObjectMT;

    //
    // Initialize handle table
    //
    if (!Ref_Initialize())
        return -1;

    //
    // Initialize GC heap
    //
    GCHeap *pGCHeap = GCHeap::CreateGCHeap();
    if (!pGCHeap)
        return -1;

    if (FAILED(pGCHeap->Initialize()))
        return -1;

    //
    // Initialize current thread
    //
    ThreadStore::AttachCurrentThread(false);

    //
    // Create a Methodtable with GCDesc
    //

    class My : Object {
    public:
        Object * m_pOther1;
        int dummy_inbetween;
        Object * m_pOther2;
    };

    static struct My_MethodTable
    {
        // GCDesc
        CGCDescSeries m_series[2];
        size_t m_numSeries;

        // The actual methodtable
        MethodTable m_MT;
    }
    My_MethodTable;

    // 'My' contains the MethodTable*
    size_t baseSize = sizeof(My);
    // GC expects the size of ObjHeader (extra void*) to be included in the size.
    baseSize = baseSize + sizeof(ObjHeader);
    // Add padding as necessary. GC requires the object size to be at least MIN_OBJECT_SIZE.
    My_MethodTable.m_MT.m_baseSize = max(baseSize, MIN_OBJECT_SIZE);

    My_MethodTable.m_MT.m_componentSize = 0;    // Array component size
    My_MethodTable.m_MT.m_flags = MTFlag_ContainsPointers;

    My_MethodTable.m_numSeries = 2;

    // The GC walks the series backwards. It expects the offsets to be sorted in descending order.
    My_MethodTable.m_series[0].SetSeriesOffset(offsetof(My, m_pOther2));
    My_MethodTable.m_series[0].SetSeriesCount(1);
    My_MethodTable.m_series[0].seriessize -= My_MethodTable.m_MT.m_baseSize;

    My_MethodTable.m_series[1].SetSeriesOffset(offsetof(My, m_pOther1));
    My_MethodTable.m_series[1].SetSeriesCount(1);
    My_MethodTable.m_series[1].seriessize -= My_MethodTable.m_MT.m_baseSize;

    MethodTable * pMyMethodTable = &My_MethodTable.m_MT;

    // Allocate instance of MyObject
    Object * pObj = AllocateObject(pMyMethodTable);
    if (pObj == NULL)
        return -1;

    // Create strong handle and store the object into it
    OBJECTHANDLE oh = CreateGlobalHandle(pObj);
    if (oh == NULL)
        return -1;

    for (int i = 0; i < 1000000; i++)
    {
        Object * pBefore = ((My *)ObjectFromHandle(oh))->m_pOther1;

        // Allocate more instances of the same object
        Object * p = AllocateObject(pMyMethodTable);
        if (p == NULL)
            return -1;

        Object * pAfter = ((My *)ObjectFromHandle(oh))->m_pOther1;

        // Uncomment this assert to see how GC triggered inside AllocateObject moved objects around
        // assert(pBefore == pAfter);

        // Store the newly allocated object into a field using WriteBarrier
        WriteBarrier(&(((My *)ObjectFromHandle(oh))->m_pOther1), p);
    }

    // Create weak handle that points to our object
    OBJECTHANDLE ohWeak = CreateGlobalWeakHandle(ObjectFromHandle(oh));
    if (ohWeak == NULL)
        return -1;

    // Destroy the strong handle so that nothing will be keeping out object alive
    DestroyGlobalHandle(oh);

    // Explicitly trigger full GC
    pGCHeap->GarbageCollect();

    // Verify that the weak handle got cleared by the GC
    assert(ObjectFromHandle(ohWeak) == NULL);

    printf("Done\n");

    return 0;
}
Beispiel #12
0
    GCAlloc::GCBlock* GCAlloc::CreateChunk(int flags)
    {
        // Too many definitions of kBlockSize, make sure they're at least in sync.

        GCAssert(uint32_t(kBlockSize) == GCHeap::kBlockSize);

        // Get bitmap space; this may trigger OOM handling.

        gcbits_t* bits = m_bitsInPage ? NULL : (gcbits_t*)m_gc->AllocBits(m_numBitmapBytes, m_sizeClassIndex);

        // Allocate a new block; this may trigger OOM handling (though that
        // won't affect the bitmap space, which is not GC'd individually).

        GCBlock* b = (GCBlock*) m_gc->AllocBlock(1, PageMap::kGCAllocPage, /*zero*/true,  (flags&GC::kCanFail) != 0);

        if (b)
        {
            VALGRIND_CREATE_MEMPOOL(b, 0/*redZoneSize*/, 1/*zeroed*/);

            // treat block header as a separate allocation
            VALGRIND_MEMPOOL_ALLOC(b, b, sizeof(GCBlock));


            b->gc = m_gc;
            b->alloc = this;
            b->size = m_itemSize;
            b->slowFlags = 0;
            if(m_gc->collecting && m_finalized)
                b->finalizeState = m_gc->finalizedValue;
            else
                b->finalizeState = !m_gc->finalizedValue;

            b->bibopTag = m_bibopTag;

#ifdef MMGC_FASTBITS
            b->bitsShift = (uint8_t) m_bitsShift;
#endif
            b->containsPointers = ContainsPointers();
            b->rcobject = ContainsRCObjects();

            if (m_bitsInPage)
                b->bits = (gcbits_t*)b + sizeof(GCBlock);
            else
                b->bits = bits;

            // ditto for in page bits
            if (m_bitsInPage) {
                VALGRIND_MEMPOOL_ALLOC(b, b->bits, m_numBitmapBytes);
            }

            // Link the block at the end of the list
            b->prev = m_lastBlock;
            b->next = 0;

            if (m_lastBlock) {
                m_lastBlock->next = b;
            }
            if (!m_firstBlock) {
                m_firstBlock = b;
            }
            m_lastBlock = b;

            // Add our new ChunkBlock to the firstFree list (which should be empty)
            if (m_firstFree)
            {
                GCAssert(m_firstFree->prevFree == 0);
                m_firstFree->prevFree = b;
            }
            b->nextFree = m_firstFree;
            b->prevFree = 0;
            m_firstFree = b;

            // calculate back from end (better alignment, no dead space at end)
            b->items = (char*)b+GCHeap::kBlockSize - m_itemsPerBlock * m_itemSize;
            b->numFree = (short)m_itemsPerBlock;

            // explode the new block onto its free list
            //
            // We must make the object look free, which means poisoning it properly and setting
            // the mark bits correctly.

            b->firstFree = b->items;
            void** p = (void**)(void*)b->items;
            int limit = m_itemsPerBlock-1;
#ifdef MMGC_HOOKS
            GCHeap* heap = GCHeap::GetGCHeap();
#endif
            for ( int i=0 ; i < limit ; i++ ) {
#ifdef MMGC_HOOKS
#ifdef MMGC_MEMORY_INFO // DebugSize is 0 if MEMORY_INFO is off, so we get an "obviously true" warning from GCC.
                GCAssert(m_itemSize >= DebugSize());
#endif
                if(heap->HooksEnabled())
                    heap->PseudoFreeHook(GetUserPointer(p), m_itemSize - DebugSize(), uint8_t(GCHeap::GCSweptPoison));
#endif
                p = FLSeed(p, (char*)p + m_itemSize);
            }
#ifdef MMGC_HOOKS
            if(heap->HooksEnabled())
                heap->PseudoFreeHook(GetUserPointer(p), m_itemSize - DebugSize(), uint8_t(GCHeap::GCSweptPoison));
#endif
            p[0] = NULL;

            // Set all the mark bits to 'free'
            
            GCAssert(sizeof(gcbits_t) == 1);
            GCAssert(kFreelist == 3);
            GCAssert(m_numBitmapBytes % 4 == 0);
            
            uint32_t *pbits = (uint32_t*)(void *)b->bits;
            for(int i=0, n=m_numBitmapBytes>>2; i < n; i++)
                pbits[i] = 0x03030303;

#ifdef MMGC_MEMORY_INFO
            VerifyFreeBlockIntegrity(b->firstFree, m_itemSize);
#endif
        }
        else {
            if (bits)
Beispiel #13
0
    void* GCLargeAlloc::Alloc(size_t requestSize, int flags)
#endif
    {
#ifdef DEBUG
        m_gc->heap->CheckForOOMAbortAllocation();
#endif
        GCHeap::CheckForAllocSizeOverflow(requestSize, sizeof(LargeBlock)+GCHeap::kBlockSize);

        int blocks = (int)((requestSize+sizeof(LargeBlock)+GCHeap::kBlockSize-1) / GCHeap::kBlockSize);
        uint32_t computedSize = blocks*GCHeap::kBlockSize - sizeof(LargeBlock);

        // Allocation must be signalled before we allocate because no GC work must be allowed to
        // come between an allocation and an initialization - if it does, we may crash, as
        // GCFinalizedObject subclasses may not have a valid vtable, but the GC depends on them
        // having it.  In principle we could signal allocation late but only set the object
        // flags after signaling, but we might still cause trouble for the profiler, which also
        // depends on non-interruptibility.

        m_gc->SignalAllocWork(computedSize);

        // Pointer containing memory is always zeroed (see bug 594533).
        if((flags&GC::kContainsPointers) != 0)
            flags |= GC::kZero;

        LargeBlock *block = (LargeBlock*) m_gc->AllocBlock(blocks, PageMap::kGCLargeAllocPageFirst,
                                                           (flags&GC::kZero) != 0, (flags&GC::kCanFail) != 0);
        void *item = NULL;

        if (block)
        {
            // Code below uses these optimizations
            GCAssert((unsigned long)GC::kFinalize == (unsigned long)kFinalizable);
            GCAssert((unsigned long)GC::kInternalExact == (unsigned long)kVirtualGCTrace);
            
            gcbits_t flagbits0 = 0;
            gcbits_t flagbits1 = 0;

#if defined VMCFG_EXACT_TRACING
            flagbits0 = (flags & (GC::kFinalize|GC::kInternalExact));
#elif defined VMCFG_SELECTABLE_EXACT_TRACING
            flagbits0 = (flags & (GC::kFinalize|m_gc->runtimeSelectableExactnessFlag));  // 0 or GC::kInternalExact
#else
            flagbits0 = (flags & GC::kFinalize);
#endif

            VALGRIND_CREATE_MEMPOOL(block, /*rdzone*/0, (flags&GC::kZero) != 0);
            VALGRIND_MEMPOOL_ALLOC(block, block, sizeof(LargeBlock));

            block->gc = this->m_gc;
            block->alloc= this;
            block->next = m_blocks;
            block->size = computedSize;
            block->bibopTag = 0;
#ifdef MMGC_FASTBITS
            block->bitsShift = 12;     // Always use bits[0]
#endif
            block->containsPointers = ((flags&GC::kContainsPointers) != 0) ? 1 : 0;
            block->rcobject = ((flags&GC::kRCObject) != 0) ? 1 : 0;
            block->bits = block->flags;
            m_blocks = block;

            item = block->GetObject();

            if(m_gc->collecting && !m_startedFinalize)
                flagbits0 |= kMark;

            block->flags[0] = flagbits0;
            block->flags[1] = flagbits1;
#ifdef _DEBUG
            (void)originalSize;
            if (flags & GC::kZero)
            {
                if (!RUNNING_ON_VALGRIND)
                {
                    // AllocBlock should take care of this
                    for(int i=0, n=(int)(requestSize/sizeof(int)); i<n; i++) {
                        if(((int*)item)[i] != 0)
                            GCAssert(false);
                    }
                }
            }
#endif

            // see comments in GCAlloc about using full size instead of ask size
            VALGRIND_MEMPOOL_ALLOC(block, item, computedSize);

#ifdef MMGC_HOOKS
            GCHeap* heap = GCHeap::GetGCHeap();
            if(heap->HooksEnabled()) {
                size_t userSize = block->size - DebugSize();
#ifdef MMGC_MEMORY_PROFILER
                m_totalAskSize += originalSize;
                heap->AllocHook(GetUserPointer(item), originalSize, userSize, /*managed=*/true);
#else
                heap->AllocHook(GetUserPointer(item), 0, userSize, /*managed=*/true);
#endif
            }
#endif
        }
        return item;
    }
Beispiel #14
0
	void* GCAlloc::Alloc(int flags)
#endif
	{
		GCAssertMsg(((size_t)m_itemSize >= size), "allocator itemsize too small");

		// Allocation must be signalled before we allocate because no GC work must be allowed to
		// come between an allocation and an initialization - if it does, we may crash, as 
		// GCFinalizedObject subclasses may not have a valid vtable, but the GC depends on them
		// having it.  In principle we could signal allocation late but only set the object
		// flags after signaling, but we might still cause trouble for the profiler, which also
		// depends on non-interruptibility.

		m_gc->SignalAllocWork(m_itemSize);
		
		GCBlock* b = m_firstFree;
	start:
		if (b == NULL) {
			if (m_needsSweeping && !m_gc->collecting) {
				Sweep(m_needsSweeping);
				b = m_firstFree;
				goto start;
			}
			
			bool canFail = (flags & GC::kCanFail) != 0;
			CreateChunk(canFail);
			b = m_firstFree;
			if (b == NULL) {
				GCAssert(canFail);
				return NULL;
			}
		}
		
		GCAssert(!b->needsSweeping);
		GCAssert(b == m_firstFree);
		GCAssert(b && !b->IsFull());
		
		void *item;
		if(b->firstFree) {
			item = b->firstFree;
			b->firstFree = *((void**)item);
			// clear free list pointer, the rest was zero'd in free
			*(intptr_t*) item = 0;
#ifdef MMGC_MEMORY_INFO
			//check for writes on deleted memory
			VerifyFreeBlockIntegrity(item, b->size);
#endif
		} else {
			item = b->nextItem;
			if(((uintptr_t)((char*)item + b->size) & 0xfff) != 0) {
				b->nextItem = (char*)item + b->size;
			} else {
				b->nextItem = NULL;
			}
		}

		// set up bits, items start out white and whether they need finalization
		// is determined by the caller

		// make sure we ended up in the right place
		GCAssert(((flags&GC::kContainsPointers) != 0) == ContainsPointers());

		// this assumes what we assert
		GCAssert((unsigned long)GC::kFinalize == (unsigned long)GCAlloc::kFinalize);
		
		int index = GetIndex(b, item);
		GCAssert(index >= 0);
		Clear4BitsAndSet(b, index, flags & kFinalize);

		b->numItems++;
#ifdef MMGC_MEMORY_INFO
		m_numAlloc++;
#endif

		// If we're out of free items, be sure to remove ourselves from the
		// list of blocks with free items.  TODO Minor optimization: when we
		// carve an item off the end of the block, we don't need to check here
		// unless we just set b->nextItem to NULL.

		if (b->IsFull()) {
			m_firstFree = b->nextFree;
			b->nextFree = NULL;
			GCAssert(b->prevFree == NULL);

			if (m_firstFree)
				m_firstFree->prevFree = 0;
		}

		// prevent mid-collection (ie destructor) allocations on un-swept pages from
		// getting swept.  If the page is finalized and doesn't need sweeping we don't want
		// to set the mark otherwise it will be marked when we start the next marking phase
		// and write barriers won't fire (since its black)
		if(m_gc->collecting)
		{ 
			if((b->finalizeState != m_gc->finalizedValue) || b->needsSweeping)
				SetBit(b, index, kMark);
		}

		GCAssert((uintptr_t(item) & ~0xfff) == (uintptr_t) b);
		GCAssert((uintptr_t(item) & 7) == 0);

#ifdef MMGC_HOOKS
		GCHeap* heap = GCHeap::GetGCHeap();
		if(heap->HooksEnabled())
		{
			size_t userSize = m_itemSize - DebugSize();
#ifdef MMGC_MEMORY_PROFILER
			m_totalAskSize += size;
			heap->AllocHook(GetUserPointer(item), size, userSize);
#else
			heap->AllocHook(GetUserPointer(item), 0, userSize);
#endif
		}
#endif

		return item;
	}