Ejemplo n.º 1
0
	void FixedAlloc::CreateChunk(bool canFail)
	{
		// Allocate a new block
		m_maxAlloc += m_itemsPerBlock;

		vmpi_spin_lock_t *lock = NULL;
		if(m_isFixedAllocSafe) {
			lock = &((FixedAllocSafe*)this)->m_spinlock;
			VMPI_lockRelease(lock);
		}

		FixedBlock* b = (FixedBlock*) m_heap->Alloc(1, GCHeap::kExpand | (canFail ? GCHeap::kCanFail : 0));

		if(lock != NULL)
			VMPI_lockAcquire(lock);
		
		GCAssert(m_itemSize <= 0xffff);

		if(!b)
			return;
		
		b->numAlloc = 0;
		b->size = (uint16_t)m_itemSize;
		b->firstFree = 0;
		b->nextItem = b->items;
		b->alloc = this;

#ifdef _DEBUG
		// deleted and unused memory is 0xed'd, this is important for leak diagnostics
		VMPI_memset(b->items, 0xed, m_itemSize * m_itemsPerBlock);
#endif

		// Link the block at the end of the list
		b->prev = m_lastBlock;
		b->next = 0;
		if (m_lastBlock) {
			m_lastBlock->next = b;
		}
		if (!m_firstBlock) {
			m_firstBlock = b;
		}
		m_lastBlock = b;

		// Add our new ChunkBlock to the firstFree list (which should
		// be empty but might not because we let go of the lock above)
		if (m_firstFree)
		{
			GCAssert(m_firstFree->prevFree == 0);
			m_firstFree->prevFree = b;
		}
		b->nextFree = m_firstFree;
		b->prevFree = 0;
		m_firstFree = b;

		return;
	}
Ejemplo n.º 2
0
    GCMarkStack::GCMarkStack()
        : m_base(NULL)
        , m_top(NULL)
        , m_limit(NULL)
        , m_topSegment(NULL)
        , m_hiddenCount(0)
        , m_extraSegment(NULL)
#ifdef MMGC_MARKSTACK_DEPTH
        , m_maxDepth(0)
#endif
    {
        GCAssert(sizeof(GCMarkStack::GCStackSegment) <= GCHeap::kBlockSize);
        PushSegment(true);
        GCAssert(Invariants());
    }
Ejemplo n.º 3
0
	void GCAlloc::UnlinkChunk(GCBlock *b)
	{
		GCAssert(!b->needsSweeping);
		m_maxAlloc -= m_itemsPerBlock;
		m_numBlocks--;

		// Unlink the block from the list
		if (b == m_firstBlock) {
			m_firstBlock = Next(b);
		} else {
			b->prev->next = Next(b);
		}
		
		if (b == m_lastBlock) {
			m_lastBlock = b->prev;
		} else {
			Next(b)->prev = b->prev;
		}

		if(b->nextFree || b->prevFree || b == m_firstFree) {
			RemoveFromFreeList(b);
		}
#ifdef _DEBUG
		b->next = b->prev = NULL;
		b->nextFree = b->prevFree = NULL;
#endif
	}
Ejemplo n.º 4
0
void VMPI_spyCallback()
{
	if(mmgc_spy_signal) 
	{
		VMPI_lockAcquire(&lock);
		if(mmgc_spy_signal) 
		{
			mmgc_spy_signal = 0;
			
			void *pipe = OpenAndConnectToNamedPipe("MMgc_Spy");
			
			spyStream = HandleToStream(pipe);
			GCAssert(spyStream != NULL);
			RedirectLogOutput(SpyLog);
			
			MMgc::GCHeap::GetGCHeap()->DumpMemoryInfo();
			
			fflush(spyStream);
			
			CloseNamedPipe(pipe);
			RedirectLogOutput(NULL);
			spyStream = NULL;	
		}
		VMPI_lockRelease(&lock);
	}
}
Ejemplo n.º 5
0
    void GCLargeAlloc::Free(const void *item)
    {
        LargeBlock *b = GetLargeBlock(item);

#ifdef GCDEBUG
        // RCObject have contract that they must clean themselves, since they
        // have to scan themselves to decrement other RCObjects they might as well
        // clean themselves too, better than suffering a memset later
        if(b->rcobject)
            m_gc->RCObjectZeroCheck((RCObject*)GetUserPointer(item));
#endif


        // We can't allow free'ing something during Sweeping, otherwise alloc counters
        // get decremented twice and destructors will be called twice.
        GCAssert(m_gc->collecting == false || m_gc->marking == true);
        if (m_gc->marking && (m_gc->collecting || IsProtectedAgainstFree(b))) {
            m_gc->AbortFree(GetUserPointer(item));
            return;
        }

        m_gc->policy.signalFreeWork(b->size);

#ifdef MMGC_HOOKS
        GCHeap* heap = GCHeap::GetGCHeap();
        if(heap->HooksEnabled())
        {
            const void* p = GetUserPointer(item);
            size_t userSize = GC::Size(p);
#ifdef MMGC_MEMORY_PROFILER
            if(heap->GetProfiler())
                m_totalAskSize -= heap->GetProfiler()->GetAskSize(p);
#endif
            heap->FinalizeHook(p, userSize);
            heap->FreeHook(p, userSize, uint8_t(GCHeap::GCFreedPoison));
        }
#endif

        if(b->flags[0] & kHasWeakRef)
            m_gc->ClearWeakRef(GetUserPointer(item));

        LargeBlock **prev = &m_blocks;
        while(*prev)
        {
            if(b == *prev)
            {
                *prev = Next(b);
                size_t numBlocks = b->GetNumBlocks();
                m_totalAllocatedBytes -= b->size;
                VALGRIND_MEMPOOL_FREE(b, b);
                VALGRIND_MEMPOOL_FREE(b, item);
                VALGRIND_DESTROY_MEMPOOL(b);
                m_gc->FreeBlock(b, (uint32_t)numBlocks, m_partitionIndex);
                return;
            }
            prev = (LargeBlock**)(&(*prev)->next);
        }
        GCAssertMsg(false, "Bad free!");
    }
Ejemplo n.º 6
0
	/*static*/
	void FixedAlloc::Free(void *item)
	{
		FixedBlock *b = (FixedBlock*) ((uintptr_t)item & ~0xFFF);

		GCAssertMsg(b->alloc->m_heap->IsAddressInHeap(item), "Bogus pointer passed to free");

#ifdef MMGC_HOOKS
		GCHeap *heap = b->alloc->m_heap;
		if(heap->HooksEnabled()) {
		#ifdef MMGC_MEMORY_PROFILER
			if(heap->GetProfiler())
				b->alloc->m_totalAskSize -= heap->GetProfiler()->GetAskSize(item);
		#endif

			heap->FinalizeHook(item, b->size - DebugSize());
			heap->FreeHook(item, b->size - DebugSize(), 0xed);
		}
#endif
		item = GetRealPointer(item);

		// Add this item to the free list
		*((void**)item) = b->firstFree;
		b->firstFree = item;

		// We were full but now we have a free spot, add us to the free block list.
		if (b->numAlloc == b->alloc->m_itemsPerBlock)
		{
			GCAssert(!b->nextFree && !b->prevFree);
			b->nextFree = b->alloc->m_firstFree;
			if (b->alloc->m_firstFree)
				b->alloc->m_firstFree->prevFree = b;
			b->alloc->m_firstFree = b;
		}
#ifdef _DEBUG
		else // we should already be on the free list
		{
			GCAssert ((b == b->alloc->m_firstFree) || b->prevFree);
		}
#endif

		b->numAlloc--;

		if(b->numAlloc == 0) {
			b->alloc->FreeChunk(b);
		}
	}
Ejemplo n.º 7
0
    void FixedAlloc::CreateChunk(bool canFail)
    {
        // Allocate a new block
        m_numBlocks++;

        vmpi_spin_lock_t *lock = NULL;
        if(m_isFixedAllocSafe) {
            lock = &((FixedAllocSafe*)this)->m_spinlock;
            VMPI_lockRelease(lock);
        }

        FixedBlock* b = (FixedBlock*) m_heap->Alloc(1, GCHeap::kExpand | (canFail ? GCHeap::kCanFail : 0));
        VALGRIND_CREATE_MEMPOOL(b,  0/*redZoneSize*/, 0/*zeroed*/);

        // treat block header as allocation so reads write are okay
        VALGRIND_MEMPOOL_ALLOC(b, b, (char*)b->items - (char*)b);

        if(lock != NULL)
            VMPI_lockAcquire(lock);

        if(!b)
            return;

        b->numAlloc = 0;
        b->size = (uint16_t)m_itemSize;
        b->firstFree = 0;
        b->nextItem = b->items;
        b->alloc = this;

#ifdef GCDEBUG
        // Deleted and unused memory is poisoned, this is important for leak diagnostics.
        if (!RUNNING_ON_VALGRIND)
            VMPI_memset(b->items, uint8_t(GCHeap::FXFreedPoison), m_itemSize * m_itemsPerBlock);
#endif

        // Link the block at the end of the list.
        b->prev = m_lastBlock;
        b->next = 0;
        if (m_lastBlock)
            m_lastBlock->next = b;
        if (!m_firstBlock)
            m_firstBlock = b;
        m_lastBlock = b;

        // Add our new ChunkBlock to the firstFree list (which should
        // be empty but might not because we let go of the lock above)
        if (m_firstFree)
        {
            GCAssert(m_firstFree->prevFree == 0);
            m_firstFree->prevFree = b;
        }
        b->nextFree = m_firstFree;
        b->prevFree = 0;
        m_firstFree = b;

        return;
    }
Ejemplo n.º 8
0
	void weakRefSweepLarge()
	{
		GCWeakRef *ref = createWeakRef(5000);
		collect();
		gc->CleanStack(true);
		collect();
		(void)ref;
		GCAssert(ref->get() == NULL);
	}
Ejemplo n.º 9
0
	REALLY_INLINE void GCAlloc::GCBlock::FreeItem(const void *item, int index)
	{
#ifdef MMGC_MEMORY_INFO
		GCAssert(alloc->m_numAlloc != 0);
#endif

#ifdef _DEBUG		
		// check that its not already been freed
		void *free = firstFree;
		while(free) {
			GCAssert(free != item);
			free = *((void**) free);
		}
#endif

		void *oldFree = firstFree;
		firstFree = (void*)item;
#ifdef MMGC_MEMORY_INFO
		alloc->m_numAlloc--;
#endif
		numItems--;

		GCAssert(!GetBit(this, index, kQueued));
		SetBit(this, index, kFreelist);

#ifndef _DEBUG
		// memset rest of item not including free list pointer, in _DEBUG
		// we poison the memory (and clear in Alloc)
		// FIXME: can we do something faster with MMX here?
		//
		// BTW, experiments show that clearing on alloc instead of on free 
		// benefits microbenchmark that do massive amounts of double-boxing,
		// but nothing else enough to worry about it.  (The trick is that
		// no clearing on alloc is needed when carving objects off the end
		// of a block, whereas every object is cleared on free even if the
		// page is subsequently emptied out and returned to the block manager.
		// Massively boxing programs have alloc/free patterns that are biased
		// toward non-RC objects carved off the ends of blocks.)
		if(!alloc->ContainsRCObjects())
			VMPI_memset((char*)item, 0, size);
#endif
		// Add this item to the free list
		*((void**)item) = oldFree;	
	}
Ejemplo n.º 10
0
	/* static */
	void GCAlloc::Free(const void *item)
	{
		GCBlock *b = GetBlock(item);
		GCAlloc *a = b->alloc;
	
#ifdef MMGC_HOOKS
		GCHeap* heap = GCHeap::GetGCHeap();
		if(heap->HooksEnabled())
		{
			const void* p = GetUserPointer(item);
			size_t userSize = GC::Size(p);
#ifdef MMGC_MEMORY_PROFILER
			if(heap->GetProfiler())
				a->m_totalAskSize -= heap->GetProfiler()->GetAskSize(p);
#endif
			heap->FinalizeHook(p, userSize);
			heap->FreeHook(p, userSize, 0xca);
		}
#endif

#ifdef _DEBUG		
		// check that its not already been freed
		void *free = b->firstFree;
		while(free) {
			GCAssert(free != item);
			free = *((void**) free);
		}
#endif

		int index = GetIndex(b, item);
		if(GetBit(b, index, kHasWeakRef)) {
			b->gc->ClearWeakRef(GetUserPointer(item));
		}

		bool wasFull = b->IsFull();

		if(b->needsSweeping) {
#ifdef _DEBUG
			bool gone =
#endif
				a->Sweep(b);
			GCAssertMsg(!gone, "How can a page I'm about to free an item on be empty?");
			wasFull = false;
		}

		if(wasFull) {
			a->AddToFreeList(b);
		}

		b->FreeItem(item, index);

		if(b->numItems == 0) {
			a->UnlinkChunk(b);
			a->FreeChunk(b);
		}
	}
Ejemplo n.º 11
0
	void GCAlloc::SweepNeedsSweeping()
	{
		GCBlock* next;
		for (GCBlock* b = m_needsSweeping; b != NULL; b = next)
		{
			next = b->nextFree;	
			Sweep(b);
		}
		GCAssert(m_needsSweeping == NULL);
	}
Ejemplo n.º 12
0
FixedAlloc::~FixedAlloc()
{
    // Free all of the blocks
    while (m_firstBlock) {
#ifdef MEMORY_INFO
        if(m_firstBlock->numAlloc > 0) {
            // go through every memory location, if the fourth 4 bytes cast as
            // an integer isn't 0xedededed then its allocated space and the integer is
            // an index into the stack trace table, the first 4 bytes will contain
            // the freelist pointer for free'd items (which is why the trace index is
            // stored in the second 4)
            // first 4 bytes - free list pointer
            // 2nd 4 bytes - alloc stack trace
            // 3rd 4 bytes - free stack trace
            // 4th 4 bytes - 0xedededed if freed correctly
            unsigned int *mem = (unsigned int*) m_firstBlock->items;
            unsigned int itemNum = 0;
            while(itemNum++ < m_itemsPerBlock) {
                unsigned int fourthInt = *(mem+3);
                if(fourthInt != 0xedededed) {
                    GCDebugMsg(false, "Leaked %d byte item.  Addr: 0x%x\n", GetItemSize(), mem+2);
                    PrintStackTraceByIndex(*(mem+1));
                }
                mem += (m_itemSize / sizeof(int));
            }
            GCAssert(false);
        }

        // go through every item on the free list and make sure it wasn't written to
        // after being poisoned.
        void *item = m_firstBlock->firstFree;
        while(item) {
#ifdef MMGC_64BIT
            for(int i=3, n=(m_firstBlock->size>>2)-3; i<n; i++)
#else
            for(int i=3, n=(m_firstBlock->size>>2)-1; i<n; i++)
#endif
            {
                unsigned int data = ((int*)item)[i];
                if(data != 0xedededed)
                {
                    GCDebugMsg(false, "Object 0x%x was written to after it was deleted, allocation trace:");
                    PrintStackTrace((int*)item+2);
                    GCDebugMsg(false, "Deletion trace:");
                    PrintStackTrace((int*)item+3);
                    GCDebugMsg(true, "Deleted item write violation!");
                }
            }
            // next free item
            item = *((void**)item);
        }
#endif
        FreeChunk(m_firstBlock);
    }
}
Ejemplo n.º 13
0
FixedAlloc::FixedBlock* FixedAlloc::CreateChunk()
{
    // Allocate a new block
    m_maxAlloc += m_itemsPerBlock;

    FixedBlock* b = (FixedBlock*) m_heap->Alloc(1, true, false);

    GCAssert(m_itemSize <= 0xffff);
    b->numAlloc = 0;
    b->size = (uint16)m_itemSize;
    b->firstFree = 0;
    b->nextItem = b->items;
    b->alloc = this;

#ifdef _DEBUG
    // deleted and unused memory is 0xed'd, this is important for leak diagnostics
    memset(b->items, 0xed, m_itemSize * m_itemsPerBlock);
#endif

    // Link the block at the end of the list
    b->prev = m_lastBlock;
    b->next = 0;
    if (m_lastBlock) {
        m_lastBlock->next = b;
    }
    if (!m_firstBlock) {
        m_firstBlock = b;
    }
    m_lastBlock = b;

    // Add our new ChunkBlock to the firstFree list (which should be empty)
    if (m_firstFree)
    {
        GCAssert(m_firstFree->prevFree == 0);
        m_firstFree->prevFree = b;
    }
    b->nextFree = m_firstFree;
    b->prevFree = 0;
    m_firstFree = b;

    return b;
}
Ejemplo n.º 14
0
    bool GCMarkStack::TransferOneFullSegmentFrom(GCMarkStack& other)
    {
        GCAssert(other.EntirelyFullSegments() > 0);
        GCStackSegment* seg;

        if (other.m_topSegment->m_prev == NULL) {
            // Picking off the only segment
            GCAssert(other.m_top == other.m_limit);
            seg = other.m_topSegment;
            other.m_topSegment = NULL;
            other.m_base = NULL;
            other.m_top = NULL;
            other.m_limit = NULL;
            if (!other.PushSegment()) {
                // Oops: couldn't push it, so undo.  We're out of memory but we
                // don't want to signal OOM here, we want to recover, signal failure,
                // and let the caller handle it.
                other.m_topSegment = seg;
                other.m_base = seg->m_items;
                other.m_top = other.m_limit = other.m_base + kMarkStackItems;
                return false;
            }
        }
        else {
            // Picking off the one below the top always
            seg = other.m_topSegment->m_prev;
            other.m_topSegment->m_prev = seg->m_prev;
            other.m_hiddenCount -= kMarkStackItems;
        }

        // Insert it below our top segment
        seg->m_prev = m_topSegment->m_prev;
        m_topSegment->m_prev = seg;
        m_hiddenCount += kMarkStackItems;

        // Special case that occurs if a segment was inserted into an empty stack.
        if (m_top == m_base)
            PopSegment();
        GCAssert(Invariants());
        GCAssert(other.Invariants());
        return true;
    }
Ejemplo n.º 15
0
    GCAlloc::GCAlloc(GC* _gc, int _itemSize, bool _containsPointers, bool _isRC, bool _isFinalized, int _sizeClassIndex, uint8_t _bibopTag) :
        m_firstBlock(NULL),
        m_lastBlock(NULL),
        m_firstFree(NULL),
        m_needsSweeping(NULL),
        m_qList(NULL),
        m_qBudget(0),
        m_qBudgetObtained(0),
        m_itemSize((_itemSize+7)&~7), // Round itemSize to the nearest boundary of 8
        m_itemsPerBlock((kBlockSize - sizeof(GCBlock)) / m_itemSize),
        m_totalAllocatedBytes(0),
    #ifdef MMGC_FASTBITS
        m_bitsShift(log2(m_itemSize)),
        m_numBitmapBytes(kBlockSize / (1 << m_bitsShift)),
    #else
        m_numBitmapBytes(((m_itemsPerBlock * sizeof(gcbits_t))+3)&~3), // round up to 4 bytes so we can go through the bits several items at a time
    #endif
        m_sizeClassIndex(_sizeClassIndex),
    #ifdef MMGC_MEMORY_PROFILER
        m_totalAskSize(0),
    #endif
        m_bitsInPage(_containsPointers && kBlockSize - int(m_itemsPerBlock * m_itemSize + sizeof(GCBlock)) >= m_numBitmapBytes),
        m_bibopTag(_bibopTag),
        multiple(ComputeMultiply((uint16_t)m_itemSize)),
        shift(ComputeShift((uint16_t)m_itemSize)),
        containsPointers(_containsPointers),
        containsRCObjects(_isRC),
        containsFinalizedObjects(_isFinalized),
        m_finalized(false),
        m_gc(_gc)
    {
#ifdef DEBUG
        int usedSpace = m_itemsPerBlock * m_itemSize + sizeof(GCBlock);
#endif
        GCAssert((unsigned)kBlockSize == GCHeap::kBlockSize);
        GCAssert(usedSpace <= kBlockSize);
        GCAssert(kBlockSize - usedSpace < (int)m_itemSize);
        GCAssert(m_itemSize < GCHeap::kBlockSize);
        GCAssert(!_isRC || _isFinalized);
        m_gc->ObtainQuickListBudget(m_itemSize*m_itemsPerBlock);
        m_qBudget = m_qBudgetObtained = m_itemsPerBlock;
    }
Ejemplo n.º 16
0
 bool GCMarkStack::PushSegment(bool mustSucceed)
 {
     GCAssert(sizeof(GCStackSegment) <= 4096);
     GCAssert(m_top == m_limit);
     if (m_extraSegment == NULL) {
         void *memory = AllocStackSegment(mustSucceed);
         if (memory == NULL)
             return false;
         m_extraSegment = new (memory) GCStackSegment();
     }
     if (m_topSegment != NULL)
         m_hiddenCount += kMarkStackItems;
     GCStackSegment* seg = m_extraSegment;
     m_extraSegment = NULL;
     seg->m_prev = m_topSegment;
     m_topSegment = seg;
     m_base = m_topSegment->m_items;
     m_limit = m_base + kMarkStackItems;
     m_top = m_base;
     return true;
 }
Ejemplo n.º 17
0
	void GCAlloc::FreeChunk(GCBlock* b)
	{
		GCAssert(b->numItems == 0);
		if(!m_bitsInPage) {
			VMPI_memset(b->GetBits(), 0, m_numBitmapBytes);
			m_gc->FreeBits(b->GetBits(), m_sizeClassIndex);
			b->bits = NULL;
		}

		// Free the memory
		m_gc->FreeBlock(b, 1);
	}
Ejemplo n.º 18
0
	void* GCLargeAlloc::Alloc(size_t size, int flags)
	{
#ifdef MMGC_THREADSAFE
		GCAssert(m_gc->m_lock.IsHeld());
#endif
		int blocks = (int)((size+sizeof(LargeBlock)+GCHeap::kBlockSize-1) / GCHeap::kBlockSize);
		
		LargeBlock *block = (LargeBlock*) m_gc->AllocBlock(blocks, GC::kGCLargeAllocPageFirst, (flags&GC::kZero) != 0);
		void *item = NULL;

		if (block)
		{
			block->flags = ((flags&GC::kFinalize) != 0) ? kFinalizeFlag : 0;
			block->flags |= ((flags&GC::kContainsPointers) != 0) ? kContainsPointers : 0;
			block->flags |= ((flags&GC::kRCObject) != 0) ? kRCObject : 0;
			block->gc = this->m_gc;
			block->next = m_blocks;
			block->usableSize = blocks*GCHeap::kBlockSize - sizeof(LargeBlock);
			m_blocks = block;
			
			item = (void*)(block+1);

			if(m_gc->collecting && !m_startedFinalize)
				block->flags |= kMarkFlag;

#ifdef _DEBUG
			if (flags & GC::kZero)
			{
				// AllocBlock should take care of this
				for(int i=0, n=(int)(size/sizeof(int)); i<n; i++) {
					if(((int*)item)[i] != 0)
						GCAssert(false);
				}
			}
#endif
		}
		return item;
	}
Ejemplo n.º 19
0
    void GCMarkStack::Clear()
    {
        // Clear out the elements
        while (m_topSegment->m_prev != NULL)
            PopSegment();
        m_top = m_base;

        // Discard the cached segment
        if (m_extraSegment != NULL) {
            FreeStackSegment(m_extraSegment);
            m_extraSegment = NULL;
        }
        GCAssert(Invariants());
    }
Ejemplo n.º 20
0
    void DictionaryObject::init(bool weakKeys)
    {
        GCAssert(vtable->traits->isDictionary());
        MMgc::GC* gc = this->gc();

        HeapHashtable* ht = weakKeys ? WeakKeyHashtable::create(gc) : HeapHashtable::create(gc);

        //store pointer of newly created hashtable, encapsulated with writebarrier,
        //at the hashtable offset address of the corresponding traits
        union {
            uint8_t* p;
            HeapHashtable** hht;
        };
        p = (uint8_t*)this + vtable->traits->getHashtableOffset();
        WB(gc, this, hht, ht);
    }
Ejemplo n.º 21
0
	void GCAlloc::CheckFreelist()
	{	
		GCBlock *b = m_firstFree;
		while(b)
		{
			void *freelist = b->firstFree;
			while(freelist)
			{			
				// b->firstFree should be either 0 end of free list or a pointer into b, otherwise, someone
				// wrote to freed memory and hosed our freelist
				GCAssert(freelist == 0 || ((uintptr_t) freelist >= (uintptr_t) b->items && (uintptr_t) freelist < (uintptr_t) b + GCHeap::kBlockSize));
				freelist = *((void**)freelist);
			}
			b = b->nextFree;
		}
	}
Ejemplo n.º 22
0
	bool GCAlloc::Sweep(GCBlock *b)
	{	
		GCAssert(b->needsSweeping);
		RemoveFromSweepList(b);

		SweepGuts(b);

		if(b->numItems == 0)
		{
			UnlinkChunk(b);
			FreeChunk(b);
			return true;
		} 

		AddToFreeList(b);

		return false;
	}
Ejemplo n.º 23
0
	void GCAlloc::ClearMarks(GCAlloc::GCBlock* block)
	{
        // Clear all the mark bits
		uint32_t *pbits = block->GetBits();
		const static uint32_t mq32 = 0x33333333;
		GCAssert((kMark|kQueued) == 0x3);
		// TODO: MMX version for IA32
		for(int i=0, n=m_numBitmapBytes>>2; i < n; i++) {
			pbits[i] &= ~mq32;
        }
		
		const void *item = block->firstFree;
		while(item != NULL) {
			// set freelist bit pattern
			SetBit(block, GetIndex(block, item), kFreelist);
			item = *(const void**)item;
		}
	}
Ejemplo n.º 24
0
 bool GCMarkStack::Invariants()
 {
     GCAssert(m_base+kMarkStackItems == m_limit);
     GCAssert(m_top >= m_base);
     GCAssert(m_top <= m_limit);
     GCAssert(m_topSegment->m_prev == NULL || m_top > m_base);
     uint32_t hc = 0;
     uint32_t ns = 0;
     for ( GCStackSegment* seg=m_topSegment->m_prev ; seg != NULL ; seg = seg->m_prev ) {
         hc += kMarkStackItems;
         ns++;
     }
     GCAssert(ns == EntirelyFullSegments() || (m_top == m_limit && ns+1 == EntirelyFullSegments()));
     GCAssert(hc == m_hiddenCount);
     GCAssert(Count() == hc + (m_top - m_base));
     return true;
 }
Ejemplo n.º 25
0
 DictionaryObject::DictionaryObject(VTable *vtable, ScriptObject *delegate)
     : ScriptObject(vtable, delegate)
 {
     GCAssert(vtable->traits->isDictionary());
 }
Ejemplo n.º 26
0
	void weakRefFreeLarge()
	{
		GCWeakRef *ref = createWeakRef(5000);
		delete ref->get();
		GCAssert(ref->get() == NULL);
	}
Ejemplo n.º 27
0
	void weakRefFreeSmall()
	{
		GCWeakRef *ref = createWeakRef();
		delete ref->get();
		GCAssert(ref->get() == NULL);
	}
Ejemplo n.º 28
0
    GCAlloc::GCBlock* GCAlloc::CreateChunk(int flags)
    {
        // Too many definitions of kBlockSize, make sure they're at least in sync.

        GCAssert(uint32_t(kBlockSize) == GCHeap::kBlockSize);

        // Get bitmap space; this may trigger OOM handling.

        gcbits_t* bits = m_bitsInPage ? NULL : (gcbits_t*)m_gc->AllocBits(m_numBitmapBytes, m_sizeClassIndex);

        // Allocate a new block; this may trigger OOM handling (though that
        // won't affect the bitmap space, which is not GC'd individually).

        GCBlock* b = (GCBlock*) m_gc->AllocBlock(1, PageMap::kGCAllocPage, /*zero*/true,  (flags&GC::kCanFail) != 0);

        if (b)
        {
            VALGRIND_CREATE_MEMPOOL(b, 0/*redZoneSize*/, 1/*zeroed*/);

            // treat block header as a separate allocation
            VALGRIND_MEMPOOL_ALLOC(b, b, sizeof(GCBlock));


            b->gc = m_gc;
            b->alloc = this;
            b->size = m_itemSize;
            b->slowFlags = 0;
            if(m_gc->collecting && m_finalized)
                b->finalizeState = m_gc->finalizedValue;
            else
                b->finalizeState = !m_gc->finalizedValue;

            b->bibopTag = m_bibopTag;

#ifdef MMGC_FASTBITS
            b->bitsShift = (uint8_t) m_bitsShift;
#endif
            b->containsPointers = ContainsPointers();
            b->rcobject = ContainsRCObjects();

            if (m_bitsInPage)
                b->bits = (gcbits_t*)b + sizeof(GCBlock);
            else
                b->bits = bits;

            // ditto for in page bits
            if (m_bitsInPage) {
                VALGRIND_MEMPOOL_ALLOC(b, b->bits, m_numBitmapBytes);
            }

            // Link the block at the end of the list
            b->prev = m_lastBlock;
            b->next = 0;

            if (m_lastBlock) {
                m_lastBlock->next = b;
            }
            if (!m_firstBlock) {
                m_firstBlock = b;
            }
            m_lastBlock = b;

            // Add our new ChunkBlock to the firstFree list (which should be empty)
            if (m_firstFree)
            {
                GCAssert(m_firstFree->prevFree == 0);
                m_firstFree->prevFree = b;
            }
            b->nextFree = m_firstFree;
            b->prevFree = 0;
            m_firstFree = b;

            // calculate back from end (better alignment, no dead space at end)
            b->items = (char*)b+GCHeap::kBlockSize - m_itemsPerBlock * m_itemSize;
            b->numFree = (short)m_itemsPerBlock;

            // explode the new block onto its free list
            //
            // We must make the object look free, which means poisoning it properly and setting
            // the mark bits correctly.

            b->firstFree = b->items;
            void** p = (void**)(void*)b->items;
            int limit = m_itemsPerBlock-1;
#ifdef MMGC_HOOKS
            GCHeap* heap = GCHeap::GetGCHeap();
#endif
            for ( int i=0 ; i < limit ; i++ ) {
#ifdef MMGC_HOOKS
#ifdef MMGC_MEMORY_INFO // DebugSize is 0 if MEMORY_INFO is off, so we get an "obviously true" warning from GCC.
                GCAssert(m_itemSize >= DebugSize());
#endif
                if(heap->HooksEnabled())
                    heap->PseudoFreeHook(GetUserPointer(p), m_itemSize - DebugSize(), uint8_t(GCHeap::GCSweptPoison));
#endif
                p = FLSeed(p, (char*)p + m_itemSize);
            }
#ifdef MMGC_HOOKS
            if(heap->HooksEnabled())
                heap->PseudoFreeHook(GetUserPointer(p), m_itemSize - DebugSize(), uint8_t(GCHeap::GCSweptPoison));
#endif
            p[0] = NULL;

            // Set all the mark bits to 'free'
            
            GCAssert(sizeof(gcbits_t) == 1);
            GCAssert(kFreelist == 3);
            GCAssert(m_numBitmapBytes % 4 == 0);
            
            uint32_t *pbits = (uint32_t*)(void *)b->bits;
            for(int i=0, n=m_numBitmapBytes>>2; i < n; i++)
                pbits[i] = 0x03030303;

#ifdef MMGC_MEMORY_INFO
            VerifyFreeBlockIntegrity(b->firstFree, m_itemSize);
#endif
        }
        else {
            if (bits)
Ejemplo n.º 29
0
 GCLargeAlloc::~GCLargeAlloc()
 {
     GCAssert(!m_blocks);
 }
Ejemplo n.º 30
0
    void* GCLargeAlloc::Alloc(size_t requestSize, int flags)
#endif
    {
#ifdef DEBUG
        m_gc->heap->CheckForOOMAbortAllocation();
#endif
        GCHeap::CheckForAllocSizeOverflow(requestSize, sizeof(LargeBlock)+GCHeap::kBlockSize);

        int blocks = (int)((requestSize+sizeof(LargeBlock)+GCHeap::kBlockSize-1) / GCHeap::kBlockSize);
        uint32_t computedSize = blocks*GCHeap::kBlockSize - sizeof(LargeBlock);

        // Allocation must be signalled before we allocate because no GC work must be allowed to
        // come between an allocation and an initialization - if it does, we may crash, as
        // GCFinalizedObject subclasses may not have a valid vtable, but the GC depends on them
        // having it.  In principle we could signal allocation late but only set the object
        // flags after signaling, but we might still cause trouble for the profiler, which also
        // depends on non-interruptibility.

        m_gc->SignalAllocWork(computedSize);

        // Pointer containing memory is always zeroed (see bug 594533).
        if((flags&GC::kContainsPointers) != 0)
            flags |= GC::kZero;

        LargeBlock *block = (LargeBlock*) m_gc->AllocBlock(blocks, PageMap::kGCLargeAllocPageFirst,
                                                           (flags&GC::kZero) != 0, (flags&GC::kCanFail) != 0);
        void *item = NULL;

        if (block)
        {
            // Code below uses these optimizations
            GCAssert((unsigned long)GC::kFinalize == (unsigned long)kFinalizable);
            GCAssert((unsigned long)GC::kInternalExact == (unsigned long)kVirtualGCTrace);
            
            gcbits_t flagbits0 = 0;
            gcbits_t flagbits1 = 0;

#if defined VMCFG_EXACT_TRACING
            flagbits0 = (flags & (GC::kFinalize|GC::kInternalExact));
#elif defined VMCFG_SELECTABLE_EXACT_TRACING
            flagbits0 = (flags & (GC::kFinalize|m_gc->runtimeSelectableExactnessFlag));  // 0 or GC::kInternalExact
#else
            flagbits0 = (flags & GC::kFinalize);
#endif

            VALGRIND_CREATE_MEMPOOL(block, /*rdzone*/0, (flags&GC::kZero) != 0);
            VALGRIND_MEMPOOL_ALLOC(block, block, sizeof(LargeBlock));

            block->gc = this->m_gc;
            block->alloc= this;
            block->next = m_blocks;
            block->size = computedSize;
            block->bibopTag = 0;
#ifdef MMGC_FASTBITS
            block->bitsShift = 12;     // Always use bits[0]
#endif
            block->containsPointers = ((flags&GC::kContainsPointers) != 0) ? 1 : 0;
            block->rcobject = ((flags&GC::kRCObject) != 0) ? 1 : 0;
            block->bits = block->flags;
            m_blocks = block;

            item = block->GetObject();

            if(m_gc->collecting && !m_startedFinalize)
                flagbits0 |= kMark;

            block->flags[0] = flagbits0;
            block->flags[1] = flagbits1;
#ifdef _DEBUG
            (void)originalSize;
            if (flags & GC::kZero)
            {
                if (!RUNNING_ON_VALGRIND)
                {
                    // AllocBlock should take care of this
                    for(int i=0, n=(int)(requestSize/sizeof(int)); i<n; i++) {
                        if(((int*)item)[i] != 0)
                            GCAssert(false);
                    }
                }
            }
#endif

            // see comments in GCAlloc about using full size instead of ask size
            VALGRIND_MEMPOOL_ALLOC(block, item, computedSize);

#ifdef MMGC_HOOKS
            GCHeap* heap = GCHeap::GetGCHeap();
            if(heap->HooksEnabled()) {
                size_t userSize = block->size - DebugSize();
#ifdef MMGC_MEMORY_PROFILER
                m_totalAskSize += originalSize;
                heap->AllocHook(GetUserPointer(item), originalSize, userSize, /*managed=*/true);
#else
                heap->AllocHook(GetUserPointer(item), 0, userSize, /*managed=*/true);
#endif
            }
#endif
        }
        return item;
    }