Esempio n. 1
0
void VMPI_spyCallback()
{
	if(mmgc_spy_signal) 
	{
		VMPI_lockAcquire(&lock);
		if(mmgc_spy_signal) 
		{
			mmgc_spy_signal = 0;
			
			void *pipe = OpenAndConnectToNamedPipe("MMgc_Spy");
			
			spyStream = HandleToStream(pipe);
			GCAssert(spyStream != NULL);
			RedirectLogOutput(SpyLog);
			
			MMgc::GCHeap::GetGCHeap()->DumpMemoryInfo();
			
			fflush(spyStream);
			
			CloseNamedPipe(pipe);
			RedirectLogOutput(NULL);
			spyStream = NULL;	
		}
		VMPI_lockRelease(&lock);
	}
}
Esempio n. 2
0
    void FixedAlloc::CreateChunk(bool canFail)
    {
        // Allocate a new block
        m_numBlocks++;

        vmpi_spin_lock_t *lock = NULL;
        if(m_isFixedAllocSafe) {
            lock = &((FixedAllocSafe*)this)->m_spinlock;
            VMPI_lockRelease(lock);
        }

        FixedBlock* b = (FixedBlock*) m_heap->Alloc(1, GCHeap::kExpand | (canFail ? GCHeap::kCanFail : 0));
        VALGRIND_CREATE_MEMPOOL(b,  0/*redZoneSize*/, 0/*zeroed*/);

        // treat block header as allocation so reads write are okay
        VALGRIND_MEMPOOL_ALLOC(b, b, (char*)b->items - (char*)b);

        if(lock != NULL)
            VMPI_lockAcquire(lock);

        if(!b)
            return;

        b->numAlloc = 0;
        b->size = (uint16_t)m_itemSize;
        b->firstFree = 0;
        b->nextItem = b->items;
        b->alloc = this;

#ifdef GCDEBUG
        // Deleted and unused memory is poisoned, this is important for leak diagnostics.
        if (!RUNNING_ON_VALGRIND)
            VMPI_memset(b->items, uint8_t(GCHeap::FXFreedPoison), m_itemSize * m_itemsPerBlock);
#endif

        // Link the block at the end of the list.
        b->prev = m_lastBlock;
        b->next = 0;
        if (m_lastBlock)
            m_lastBlock->next = b;
        if (!m_firstBlock)
            m_firstBlock = b;
        m_lastBlock = b;

        // Add our new ChunkBlock to the firstFree list (which should
        // be empty but might not because we let go of the lock above)
        if (m_firstFree)
        {
            GCAssert(m_firstFree->prevFree == 0);
            m_firstFree->prevFree = b;
        }
        b->nextFree = m_firstFree;
        b->prevFree = 0;
        m_firstFree = b;

        return;
    }
Esempio n. 3
0
	void FixedAlloc::CreateChunk(bool canFail)
	{
		// Allocate a new block
		m_maxAlloc += m_itemsPerBlock;

		vmpi_spin_lock_t *lock = NULL;
		if(m_isFixedAllocSafe) {
			lock = &((FixedAllocSafe*)this)->m_spinlock;
			VMPI_lockRelease(lock);
		}

		FixedBlock* b = (FixedBlock*) m_heap->Alloc(1, GCHeap::kExpand | (canFail ? GCHeap::kCanFail : 0));

		if(lock != NULL)
			VMPI_lockAcquire(lock);
		
		GCAssert(m_itemSize <= 0xffff);

		if(!b)
			return;
		
		b->numAlloc = 0;
		b->size = (uint16_t)m_itemSize;
		b->firstFree = 0;
		b->nextItem = b->items;
		b->alloc = this;

#ifdef _DEBUG
		// deleted and unused memory is 0xed'd, this is important for leak diagnostics
		VMPI_memset(b->items, 0xed, m_itemSize * m_itemsPerBlock);
#endif

		// Link the block at the end of the list
		b->prev = m_lastBlock;
		b->next = 0;
		if (m_lastBlock) {
			m_lastBlock->next = b;
		}
		if (!m_firstBlock) {
			m_firstBlock = b;
		}
		m_lastBlock = b;

		// Add our new ChunkBlock to the firstFree list (which should
		// be empty but might not because we let go of the lock above)
		if (m_firstFree)
		{
			GCAssert(m_firstFree->prevFree == 0);
			m_firstFree->prevFree = b;
		}
		b->nextFree = m_firstFree;
		b->prevFree = 0;
		m_firstFree = b;

		return;
	}
Esempio n. 4
0
    void FixedAlloc::FreeChunk(FixedBlock* b)
    {
        if ( ((b->prevFree && (b->prevFree->nextFree!=b))) ||
            ((b->nextFree && (b->nextFree->prevFree!=b))) )
            VMPI_abort();

        m_numBlocks--;

        // Unlink the block from the list
        if (b == m_firstBlock)
            m_firstBlock = b->next;
        else
            b->prev->next = b->next;

        if (b == m_lastBlock)
            m_lastBlock = b->prev;
        else
            b->next->prev = b->prev;

        // If this is the first free block, pick a new one...
        if ( m_firstFree == b )
            m_firstFree = b->nextFree;
        else if (b->prevFree)
            b->prevFree->nextFree = b->nextFree;

        if (b->nextFree)
            b->nextFree->prevFree = b->prevFree;

        // Any lock can't be held across the call to FreeNoProfile, so if there
        // is a lock obtain it, release it, and then reacquire it.  This works
        // because Destroy caches no state across the call to FreeChunk.

        vmpi_spin_lock_t *lock = NULL;

        if(m_isFixedAllocSafe) {
            lock = &((FixedAllocSafe*)this)->m_spinlock;
            VMPI_lockRelease(lock);
        }

        // Free the memory
        m_heap->FreeNoProfile(b);

        if(lock != NULL)
            VMPI_lockAcquire(lock);

        VALGRIND_MEMPOOL_FREE(b, b);
        VALGRIND_DESTROY_MEMPOOL(b);
    }