Beispiel #1
0
    void* GCLargeAlloc::Alloc(size_t requestSize, int flags)
#endif
    {
#ifdef DEBUG
        m_gc->heap->CheckForOOMAbortAllocation();
#endif
        GCHeap::CheckForAllocSizeOverflow(requestSize, sizeof(LargeBlock)+GCHeap::kBlockSize);

        int blocks = (int)((requestSize+sizeof(LargeBlock)+GCHeap::kBlockSize-1) / GCHeap::kBlockSize);
        uint32_t computedSize = blocks*GCHeap::kBlockSize - sizeof(LargeBlock);

        // Allocation must be signalled before we allocate because no GC work must be allowed to
        // come between an allocation and an initialization - if it does, we may crash, as
        // GCFinalizedObject subclasses may not have a valid vtable, but the GC depends on them
        // having it.  In principle we could signal allocation late but only set the object
        // flags after signaling, but we might still cause trouble for the profiler, which also
        // depends on non-interruptibility.

        m_gc->SignalAllocWork(computedSize);

        // Pointer containing memory is always zeroed (see bug 594533).
        if((flags&GC::kContainsPointers) != 0)
            flags |= GC::kZero;

        LargeBlock *block = (LargeBlock*) m_gc->AllocBlock(blocks, PageMap::kGCLargeAllocPageFirst,
                                                           (flags&GC::kZero) != 0, (flags&GC::kCanFail) != 0);
        void *item = NULL;

        if (block)
        {
            // Code below uses these optimizations
            GCAssert((unsigned long)GC::kFinalize == (unsigned long)kFinalizable);
            GCAssert((unsigned long)GC::kInternalExact == (unsigned long)kVirtualGCTrace);
            
            gcbits_t flagbits0 = 0;
            gcbits_t flagbits1 = 0;

#if defined VMCFG_EXACT_TRACING
            flagbits0 = (flags & (GC::kFinalize|GC::kInternalExact));
#elif defined VMCFG_SELECTABLE_EXACT_TRACING
            flagbits0 = (flags & (GC::kFinalize|m_gc->runtimeSelectableExactnessFlag));  // 0 or GC::kInternalExact
#else
            flagbits0 = (flags & GC::kFinalize);
#endif

            VALGRIND_CREATE_MEMPOOL(block, /*rdzone*/0, (flags&GC::kZero) != 0);
            VALGRIND_MEMPOOL_ALLOC(block, block, sizeof(LargeBlock));

            block->gc = this->m_gc;
            block->alloc= this;
            block->next = m_blocks;
            block->size = computedSize;
            block->bibopTag = 0;
#ifdef MMGC_FASTBITS
            block->bitsShift = 12;     // Always use bits[0]
#endif
            block->containsPointers = ((flags&GC::kContainsPointers) != 0) ? 1 : 0;
            block->rcobject = ((flags&GC::kRCObject) != 0) ? 1 : 0;
            block->bits = block->flags;
            m_blocks = block;

            item = block->GetObject();

            if(m_gc->collecting && !m_startedFinalize)
                flagbits0 |= kMark;

            block->flags[0] = flagbits0;
            block->flags[1] = flagbits1;
#ifdef _DEBUG
            (void)originalSize;
            if (flags & GC::kZero)
            {
                if (!RUNNING_ON_VALGRIND)
                {
                    // AllocBlock should take care of this
                    for(int i=0, n=(int)(requestSize/sizeof(int)); i<n; i++) {
                        if(((int*)item)[i] != 0)
                            GCAssert(false);
                    }
                }
            }
#endif

            // see comments in GCAlloc about using full size instead of ask size
            VALGRIND_MEMPOOL_ALLOC(block, item, computedSize);

#ifdef MMGC_HOOKS
            GCHeap* heap = GCHeap::GetGCHeap();
            if(heap->HooksEnabled()) {
                size_t userSize = block->size - DebugSize();
#ifdef MMGC_MEMORY_PROFILER
                m_totalAskSize += originalSize;
                heap->AllocHook(GetUserPointer(item), originalSize, userSize, /*managed=*/true);
#else
                heap->AllocHook(GetUserPointer(item), 0, userSize, /*managed=*/true);
#endif
            }
#endif
        }
        return item;
    }
Beispiel #2
0
	void* GCAlloc::Alloc(int flags)
#endif
	{
		GCAssertMsg(((size_t)m_itemSize >= size), "allocator itemsize too small");

		// Allocation must be signalled before we allocate because no GC work must be allowed to
		// come between an allocation and an initialization - if it does, we may crash, as 
		// GCFinalizedObject subclasses may not have a valid vtable, but the GC depends on them
		// having it.  In principle we could signal allocation late but only set the object
		// flags after signaling, but we might still cause trouble for the profiler, which also
		// depends on non-interruptibility.

		m_gc->SignalAllocWork(m_itemSize);
		
		GCBlock* b = m_firstFree;
	start:
		if (b == NULL) {
			if (m_needsSweeping && !m_gc->collecting) {
				Sweep(m_needsSweeping);
				b = m_firstFree;
				goto start;
			}
			
			bool canFail = (flags & GC::kCanFail) != 0;
			CreateChunk(canFail);
			b = m_firstFree;
			if (b == NULL) {
				GCAssert(canFail);
				return NULL;
			}
		}
		
		GCAssert(!b->needsSweeping);
		GCAssert(b == m_firstFree);
		GCAssert(b && !b->IsFull());
		
		void *item;
		if(b->firstFree) {
			item = b->firstFree;
			b->firstFree = *((void**)item);
			// clear free list pointer, the rest was zero'd in free
			*(intptr_t*) item = 0;
#ifdef MMGC_MEMORY_INFO
			//check for writes on deleted memory
			VerifyFreeBlockIntegrity(item, b->size);
#endif
		} else {
			item = b->nextItem;
			if(((uintptr_t)((char*)item + b->size) & 0xfff) != 0) {
				b->nextItem = (char*)item + b->size;
			} else {
				b->nextItem = NULL;
			}
		}

		// set up bits, items start out white and whether they need finalization
		// is determined by the caller

		// make sure we ended up in the right place
		GCAssert(((flags&GC::kContainsPointers) != 0) == ContainsPointers());

		// this assumes what we assert
		GCAssert((unsigned long)GC::kFinalize == (unsigned long)GCAlloc::kFinalize);
		
		int index = GetIndex(b, item);
		GCAssert(index >= 0);
		Clear4BitsAndSet(b, index, flags & kFinalize);

		b->numItems++;
#ifdef MMGC_MEMORY_INFO
		m_numAlloc++;
#endif

		// If we're out of free items, be sure to remove ourselves from the
		// list of blocks with free items.  TODO Minor optimization: when we
		// carve an item off the end of the block, we don't need to check here
		// unless we just set b->nextItem to NULL.

		if (b->IsFull()) {
			m_firstFree = b->nextFree;
			b->nextFree = NULL;
			GCAssert(b->prevFree == NULL);

			if (m_firstFree)
				m_firstFree->prevFree = 0;
		}

		// prevent mid-collection (ie destructor) allocations on un-swept pages from
		// getting swept.  If the page is finalized and doesn't need sweeping we don't want
		// to set the mark otherwise it will be marked when we start the next marking phase
		// and write barriers won't fire (since its black)
		if(m_gc->collecting)
		{ 
			if((b->finalizeState != m_gc->finalizedValue) || b->needsSweeping)
				SetBit(b, index, kMark);
		}

		GCAssert((uintptr_t(item) & ~0xfff) == (uintptr_t) b);
		GCAssert((uintptr_t(item) & 7) == 0);

#ifdef MMGC_HOOKS
		GCHeap* heap = GCHeap::GetGCHeap();
		if(heap->HooksEnabled())
		{
			size_t userSize = m_itemSize - DebugSize();
#ifdef MMGC_MEMORY_PROFILER
			m_totalAskSize += size;
			heap->AllocHook(GetUserPointer(item), size, userSize);
#else
			heap->AllocHook(GetUserPointer(item), 0, userSize);
#endif
		}
#endif

		return item;
	}