Exemplo n.º 1
1
/**
 * Attempt to allocate an object in this TLH.
 */
void *
MM_TLHAllocationSupport::allocateFromTLH(MM_EnvironmentBase *env, MM_AllocateDescription *allocDescription, bool shouldCollectOnFailure)
{
	void *memPtr = NULL;

	Assert_MM_true(!env->getExtensions()->isSegregatedHeap());
	uintptr_t sizeInBytesRequired = allocDescription->getContiguousBytes();
	/* If there's insufficient space, refresh the current TLH */
	if (sizeInBytesRequired > getSize()) {
		refresh(env, allocDescription, shouldCollectOnFailure);
	}

	/* Try to fit the allocate into the current TLH */
	if(sizeInBytesRequired <= getSize()) {
		memPtr = (void *)getAlloc();
		setAlloc((void *)((uintptr_t)getAlloc() + sizeInBytesRequired));
#if defined(OMR_GC_TLH_PREFETCH_FTA)
		if (*_pointerToTlhPrefetchFTA < (intptr_t)sizeInBytesRequired) {
			*_pointerToTlhPrefetchFTA = 0;
		} else {
			*_pointerToTlhPrefetchFTA -= (intptr_t)sizeInBytesRequired;
		}
#endif /* OMR_GC_TLH_PREFETCH_FTA */
		allocDescription->setObjectFlags(getObjectFlags());
		allocDescription->setMemorySubSpace((MM_MemorySubSpace *)_tlh->memorySubSpace);
		allocDescription->completedFromTlh();
	}

	return memPtr;
};
Exemplo n.º 2
0
	/*
	 * This method should be used with care.  In particular, it is wrong to detach from a freelist
	 * while iterating over it unless the detach stops further iteration.
	 */
	void
	detachInternal(MM_HeapRegionDescriptorSegregated *cur)
	{
		_length--;
		MM_HeapRegionDescriptorSegregated *prev = cur->getPrev();
		MM_HeapRegionDescriptorSegregated *next = cur->getNext();
		if (prev != NULL) {
			Assert_MM_true(prev->getNext() == cur);
			prev->setNext(next);
		} else {
			Assert_MM_true(cur == _head);
		}
		if (next != NULL) {
			Assert_MM_true(next->getPrev() == cur);
			next->setPrev(prev);
		} else {
			Assert_MM_true(cur == _tail);
		}
		cur->setPrev(NULL);
		cur->setNext(NULL);
		if (_head == cur) {
			_head = next;
		}
		if (_tail == cur) {
			_tail = prev;
		}
	}
bool
MM_ConcurrentGCIncrementalUpdate::createCardTable(MM_EnvironmentBase *env)
{
	bool result = false;

	Assert_MM_true(NULL == _cardTable);
	Assert_MM_true(NULL == _extensions->cardTable);

#if defined(AIXPPC) || defined(LINUXPPC)
	OMRPORT_ACCESS_FROM_OMRPORT(env->getPortLibrary());

	if ((uintptr_t)omrsysinfo_get_number_CPUs_by_type(OMRPORT_CPU_ONLINE) > 1 ) {
		_cardTable = MM_ConcurrentCardTableForWC::newInstance(env, _extensions->getHeap(), _markingScheme, this);
	} else
#endif /* AIXPPC || LINUXPPC */
	{
		_cardTable = MM_ConcurrentCardTable::newInstance(env, _extensions->getHeap(), _markingScheme, this);
	}

	if(NULL != _cardTable) {
		result = true;
		/* Set card table address in GC Extensions */
		_extensions->cardTable = _cardTable;
	}

	return result;
}
Exemplo n.º 4
0
void
MM_MemoryManager::destroyVirtualMemory(MM_EnvironmentBase* env, MM_MemoryHandle* handle)
{
	Assert_MM_true(NULL != handle);
	MM_VirtualMemory* memory = handle->getVirtualMemory();
	if (NULL != memory) {
		Assert_MM_true(memory->getConsumerCount() > 0);
		memory->decrementConsumerCount();
		if (0 == memory->getConsumerCount()) {
			/* this is last consumer attached to this Virtual Memory instance - delete it */
			memory->kill(env);

			/*
			 * If this instance has been used as a preallocated (but not taken) memory it should be cleared as well
			 */
			if (memory == _preAllocated.getVirtualMemory()) {
				_preAllocated.setVirtualMemory(NULL);
			}
		}
	}

	handle->setVirtualMemory(NULL);
	handle->setMemoryBase(NULL);
	handle->setMemoryTop(NULL);

#if defined(OMR_VALGRIND_MEMCHECK)
	valgrindDestroyMempool(env->getExtensions());
#endif /* defined(OMR_VALGRIND_MEMCHECK) */

}
Exemplo n.º 5
0
bool
MM_HeapRegionManagerTarok::setContiguousHeapRange(MM_EnvironmentBase *env, void *lowHeapEdge, void *highHeapEdge)
{
	writeLock();
	/* ensure that this manager was configured with a valid region size */
	Assert_MM_true(0 != _regionSize);
	/* we don't yet support multiple enabling calls (split heaps) */
	/* This assertion would triggered at multiple attempts to initialize heap in gcInitializeDefaults() */
	/* Assert_MM_true(NULL == _regionTable); */
	/* the regions must be aligned (in present implementation) */
	Assert_MM_true(0 == ((uintptr_t)lowHeapEdge % _regionSize));
	Assert_MM_true(0 == ((uintptr_t)highHeapEdge % _regionSize));
	/* make sure that the range is in the right order and of non-zero size*/
	Assert_MM_true(highHeapEdge > lowHeapEdge);
	/* allocate the table */
	uintptr_t size = (uintptr_t)highHeapEdge - (uintptr_t)lowHeapEdge;
	_tableRegionCount = size / _regionSize;
	_regionTable = internalAllocateAndInitializeRegionTable(env, lowHeapEdge, highHeapEdge);
	bool success = false;
	if (NULL != _regionTable) {
		_lowTableEdge = lowHeapEdge;
		_highTableEdge = highHeapEdge;
		success = true;
	}
	writeUnlock();
	return success;
}
bool
MM_MemoryPoolSplitAddressOrderedListBase::recycleHeapChunk(MM_EnvironmentBase* env, void* addrBase, void* addrTop,
													   MM_HeapLinkedFreeHeader* previousFreeEntry, MM_HeapLinkedFreeHeader* nextFreeEntry, uintptr_t curFreeList)
{
	Assert_MM_true(addrBase <= addrTop);
	Assert_MM_true((NULL == nextFreeEntry) || (addrTop <= nextFreeEntry));
	if (internalRecycleHeapChunk(addrBase, addrTop, nextFreeEntry)) {
		if (previousFreeEntry) {
			Assert_MM_true(previousFreeEntry < addrBase);
			previousFreeEntry->setNext((MM_HeapLinkedFreeHeader*)addrBase);
		} else {
			_heapFreeLists[curFreeList]._freeList = (MM_HeapLinkedFreeHeader*)addrBase;
		}

		return true;
	}

	if (previousFreeEntry) {
		Assert_MM_true((NULL == nextFreeEntry) || (previousFreeEntry < nextFreeEntry));
		previousFreeEntry->setNext(nextFreeEntry);
	} else {
		_heapFreeLists[curFreeList]._freeList = nextFreeEntry;
	}

	return false;
}
Exemplo n.º 7
0
bool
MM_MemoryManager::commitMemory(MM_MemoryHandle* handle, void* address, uintptr_t size)
{
	Assert_MM_true(NULL != handle);
	MM_VirtualMemory* memory = handle->getVirtualMemory();
	Assert_MM_true(NULL != memory);
	return memory->commitMemory(address, size);
}
Exemplo n.º 8
0
bool
MM_MemoryManager::decommitMemory(MM_MemoryHandle* handle, void* address, uintptr_t size, void* lowValidAddress, void* highValidAddress)
{
	Assert_MM_true(NULL != handle);
	MM_VirtualMemory* memory = handle->getVirtualMemory();
	Assert_MM_true(NULL != memory);
	return memory->decommitMemory(address, size, lowValidAddress, highValidAddress);
}
Exemplo n.º 9
0
bool
MM_MemoryManager::setNumaAffinity(const MM_MemoryHandle* handle, uintptr_t numaNode, void* address, uintptr_t byteAmount)
{
	Assert_MM_true(NULL != handle);
	MM_VirtualMemory* memory = handle->getVirtualMemory();
	Assert_MM_true(NULL != memory);
	return memory->setNumaAffinity(numaNode, address, byteAmount);
}
Exemplo n.º 10
0
void
MM_MasterGCThread::masterThreadEntryPoint()
{
	OMR_VMThread *omrVMThread = NULL;
	Assert_MM_true(NULL != _collectorControlMutex);
	Assert_MM_true(NULL == _masterGCThread);

	/* Attach the thread as a system daemon thread */	
	/* You need a VM thread so that the stack walker can work */
	omrVMThread = MM_EnvironmentBase::attachVMThread(_extensions->getOmrVM(), "Dedicated GC Master", MM_EnvironmentBase::ATTACH_GC_MASTER_THREAD);
	if (NULL == omrVMThread) {
		/* we failed to attach so notify the creating thread that we should fail to start up */
		omrthread_monitor_enter(_collectorControlMutex);
		_masterThreadState = STATE_ERROR;
		omrthread_monitor_notify(_collectorControlMutex);
		omrthread_exit(_collectorControlMutex);
	} else {
		/* thread attached successfully */
		MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(omrVMThread);

		/* attachVMThread could allocate an execute a barrier (since it that point, this thread acted as a mutator thread.
		 * Flush GC chaches (like barrier buffers) before turning into the master thread */
		env->flushGCCaches();

		env->setThreadType(GC_MASTER_THREAD);

		/* Begin running the thread */
		omrthread_monitor_enter(_collectorControlMutex);
		
		_collector->preMasterGCThreadInitialize(env);
		
		_masterThreadState = STATE_WAITING;
		_masterGCThread = omrthread_self();
		omrthread_monitor_notify(_collectorControlMutex);
		do {
			if (STATE_GC_REQUESTED == _masterThreadState) {
				if (_runAsImplicit) {
					handleConcurrent(env);
				} else {
					handleSTW(env);
				}
			}

			if (STATE_WAITING == _masterThreadState) {
				if (_runAsImplicit || !handleConcurrent(env)) {
					omrthread_monitor_wait(_collectorControlMutex);
				}
			}
		} while (STATE_TERMINATION_REQUESTED != _masterThreadState);
		/* notify the other side that we are active so that they can continue running */
		_masterThreadState = STATE_TERMINATED;
		_masterGCThread = NULL;
		omrthread_monitor_notify(_collectorControlMutex);
		MM_EnvironmentBase::detachVMThread(_extensions->getOmrVM(), omrVMThread, MM_EnvironmentBase::ATTACH_GC_MASTER_THREAD);
		omrthread_exit(_collectorControlMutex);
	}
}
Exemplo n.º 11
0
bool
MM_EnvironmentBase::tryAcquireExclusiveVMAccessForGC(MM_Collector *collector)
{
	MM_GCExtensionsBase *extensions = getExtensions();
	uintptr_t collectorAccessCount = collector->getExclusiveAccessCount();

	_exclusiveAccessBeatenByOtherThread = false;

	while(_omrVMThread != extensions->gcExclusiveAccessThreadId) {
		if(NULL == extensions->gcExclusiveAccessThreadId) {
			/* there is a chance the thread can win the race to acquiring exclusive for GC */
			omrthread_monitor_enter(extensions->gcExclusiveAccessMutex);
			if(NULL == extensions->gcExclusiveAccessThreadId) {
				/* thread is the winner and will request the GC */
				extensions->gcExclusiveAccessThreadId = _omrVMThread;
			}
			omrthread_monitor_exit(extensions->gcExclusiveAccessMutex);
		}

		if(_omrVMThread != extensions->gcExclusiveAccessThreadId) {
			/* thread was not the winner for requesting a GC - allow the GC to proceed and wait for it to complete */
			Assert_MM_true(NULL != extensions->gcExclusiveAccessThreadId);

			uintptr_t accessMask;
			_envLanguageInterface->releaseCriticalHeapAccess(&accessMask);

			/* there is a chance the GC will already have executed at this point or other threads will re-win and re-execute.  loop until the
			 * thread sees that no more GCs are being requested.
			 */
			omrthread_monitor_enter(extensions->gcExclusiveAccessMutex);
			while(NULL != extensions->gcExclusiveAccessThreadId) {
				omrthread_monitor_wait(extensions->gcExclusiveAccessMutex);
			}
			omrthread_monitor_exit(extensions->gcExclusiveAccessMutex);

			_envLanguageInterface->reacquireCriticalHeapAccess(accessMask);

			/* May have been beaten to a GC, but perhaps not the one we wanted.  Check and if in fact the collection we intended has been
			 * completed, we will not acquire exclusive access.
			 */
			if(collector->getExclusiveAccessCount() != collectorAccessCount) {
				return false;
			}
		}
	}

	/* thread is the winner for requesting a GC (possibly through recursive calls).  proceed with acquiring exclusive access. */
	Assert_MM_true(_omrVMThread == extensions->gcExclusiveAccessThreadId);

	this->acquireExclusiveVMAccess();

	collector->incrementExclusiveAccessCount();

	GC_OMRVMInterface::flushCachesForGC(this);

	return true;
}
Exemplo n.º 12
0
void
MM_ParallelMarkTask::setup(MM_EnvironmentBase *env)
{
	if(env->isMasterThread()) {
		Assert_MM_true(_cycleState == env->_cycleState);
	} else {
		Assert_MM_true(NULL == env->_cycleState);
		env->_cycleState = _cycleState;
	}
}
Exemplo n.º 13
0
void
MM_ConcurrentFinalCleanCardsTask::setup(MM_EnvironmentBase *env)
{
	if (env->isMasterThread()) {
		Assert_MM_true(_cycleState == env->_cycleState);
	} else {
		Assert_MM_true(NULL == env->_cycleState);
		env->_cycleState = _cycleState;
	}
}
Exemplo n.º 14
0
/**
 * Reset stack 
 * 
 * @param workpacket - Reference to work packets object
 *
 */
void
WorkStack::reset(EnvironmentMetaData *env, MM_WorkPackets *workPackets)
{
	_stats.clear();
	_workPackets = workPackets;
	/* if any of these are non-NULL, we would be leaking memory */
	Assert_MM_true(NULL == _inputPacket);
	Assert_MM_true(NULL == _outputPacket);
	Assert_MM_true(NULL == _deferredPacket);
}
Exemplo n.º 15
0
bool
MM_EnvironmentBase::tryAcquireExclusiveForConcurrentKickoff(MM_ConcurrentGCStats *stats)
{
	MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(_omrVM);
	uintptr_t gcCount = extensions->globalGCStats.gcCount;

	while (_omrVMThread != extensions->gcExclusiveAccessThreadId) {
		if (NULL == extensions->gcExclusiveAccessThreadId) {
			/* there is a chance the thread can win the race to acquiring exclusive for GC */
			omrthread_monitor_enter(extensions->gcExclusiveAccessMutex);
			if (NULL == extensions->gcExclusiveAccessThreadId) {
				/* thread is the winner and will request the GC */
				extensions->gcExclusiveAccessThreadId =_omrVMThread ;
			}
			omrthread_monitor_exit(extensions->gcExclusiveAccessMutex);
		}

		if (_omrVMThread != extensions->gcExclusiveAccessThreadId) {
			/* thread was not the winner for requesting a GC - allow the GC to proceed and wait for it to complete */
			Assert_MM_true(NULL != extensions->gcExclusiveAccessThreadId);

			uintptr_t accessMask = 0;

			_envLanguageInterface->releaseCriticalHeapAccess(&accessMask);

			/* there is a chance the GC will already have executed at this point or other threads will re-win and re-execute.  loop until the
			 * thread sees that no more GCs are being requested.
			 */
			omrthread_monitor_enter(extensions->gcExclusiveAccessMutex);
			while (NULL != extensions->gcExclusiveAccessThreadId) {
				omrthread_monitor_wait(extensions->gcExclusiveAccessMutex);
			}
			omrthread_monitor_exit(extensions->gcExclusiveAccessMutex);

			_envLanguageInterface->reacquireCriticalHeapAccess(accessMask);

			/* May have been beaten to a GC, but perhaps not the one we wanted.  Check and if in fact the collection we intended has been
			 * completed, we will not acquire exclusive access.
			 */
			if ((gcCount != extensions->globalGCStats.gcCount) || (CONCURRENT_INIT_COMPLETE != stats->getExecutionMode())) {
				return false;
			}
		}
	}

	Assert_MM_true(_omrVMThread == extensions->gcExclusiveAccessThreadId);
	Assert_MM_true(CONCURRENT_INIT_COMPLETE == stats->getExecutionMode());

	/* thread is the winner for requesting a GC (possibly through recursive calls).  proceed with acquiring exclusive access. */
	this->acquireExclusiveVMAccess();

	return true;
}
Exemplo n.º 16
0
void
WorkStack::prepareForWork(EnvironmentMetaData *env, MM_WorkPackets *workPackets)
{
	if (NULL == _workPackets) {
		_workPackets = workPackets;
		/* this is our first time using this work stack instance so the packets should be NULL */
		Assert_MM_true(NULL == _inputPacket);
		Assert_MM_true(NULL == _outputPacket);
		Assert_MM_true(NULL == _deferredPacket);
	} else {
		Assert_MM_true(_workPackets == workPackets);
	}
}
Exemplo n.º 17
0
uintptr_t
MM_ParallelSweepScheme::performSamplingCalculations(MM_ParallelSweepChunk *sweepChunk, uintptr_t* markMapCurrent, uintptr_t* heapSlotFreeCurrent)
{
	const uintptr_t minimumFreeEntrySize = sweepChunk->memoryPool->getMinimumFreeEntrySize();
	uintptr_t darkMatter = 0;

	/* this word has objects in it. Sample them for dark matter */
	MM_HeapMapWordIterator markedObjectIterator(*markMapCurrent, heapSlotFreeCurrent);

	/* Hole at the beginning of the sample is not considered, since we do not know
	 * if that's part of a preceding object or part of hole.
	 */
	omrobjectptr_t prevObject = markedObjectIterator.nextObject();
	Assert_MM_true(NULL != prevObject);
	uintptr_t prevObjectSize = _extensions->objectModel.getConsumedSizeInBytesWithHeader(prevObject);

	omrobjectptr_t object = NULL;
	while (NULL != (object = markedObjectIterator.nextObject())) {
		uintptr_t holeSize = (uintptr_t)object - ((uintptr_t)prevObject + prevObjectSize);
		Assert_MM_true(holeSize < minimumFreeEntrySize);
		darkMatter += holeSize;
		prevObject = object;
		prevObjectSize = _extensions->objectModel.getConsumedSizeInBytesWithHeader(prevObject);
	}

	/* find the trailing dark matter */
	uintptr_t * endOfPrevObject = (uintptr_t*)((uintptr_t)prevObject + prevObjectSize);
	uintptr_t * startSearchAt = (uintptr_t*)MM_Math::roundToFloor(J9MODRON_HEAP_SLOTS_PER_MARK_SLOT * sizeof(uintptr_t), (uintptr_t)endOfPrevObject);
	uintptr_t * endSearchAt = (uintptr_t*)MM_Math::roundToCeiling(J9MODRON_HEAP_SLOTS_PER_MARK_SLOT * sizeof(uintptr_t), (uintptr_t)endOfPrevObject + minimumFreeEntrySize);
	startSearchAt = OMR_MAX(startSearchAt, heapSlotFreeCurrent + J9MODRON_HEAP_SLOTS_PER_MARK_SLOT);
	endSearchAt = OMR_MIN(endSearchAt, (uintptr_t*)sweepChunk->chunkTop);
	if (startSearchAt < endSearchAt) {
		while ( startSearchAt < endSearchAt ) {
			MM_HeapMapWordIterator nextMarkedObjectIterator(_currentMarkMap, startSearchAt);
			omrobjectptr_t nextObject = nextMarkedObjectIterator.nextObject();
			if (NULL != nextObject) {
				uintptr_t holeSize = (uintptr_t)nextObject - (uintptr_t)endOfPrevObject;
				if (holeSize < minimumFreeEntrySize) {
					darkMatter += holeSize;
				}
				break;
			}
			startSearchAt += J9MODRON_HEAP_SLOTS_PER_MARK_SLOT;
		}
	} else if (endSearchAt > endOfPrevObject) {
		darkMatter += (uintptr_t)endSearchAt - (uintptr_t)endOfPrevObject;
	}

	return darkMatter;
}
Exemplo n.º 18
0
MM_HeapRegionDescriptor *
MM_HeapRegionManagerTarok::acquireSingleTableRegion(MM_EnvironmentBase *env, MM_MemorySubSpace *subSpace, uintptr_t numaNode)
{
	MM_HeapRegionDescriptor *toReturn = NULL;
	writeLock();
	Trc_MM_HeapRegionManager_acquireSingleTableRegions_Entry(env->getLanguageVMThread(), subSpace, numaNode);
	Assert_MM_true(numaNode < _freeRegionTableSize);
	if (NULL != _freeRegionTable[numaNode]) {
		toReturn = internalAcquireSingleTableRegion(env, subSpace, numaNode);
		Assert_MM_true(NULL != toReturn);
	}
	Trc_MM_HeapRegionManager_acquireSingleTableRegions_Exit(env->getLanguageVMThread(), toReturn, numaNode);
	writeUnlock();
	return toReturn;
}
	MMINLINE MM_HeapLinkedFreeHeader* getReservedFreeEntry()
	{
		MM_HeapLinkedFreeHeader* freeEntry = NULL;
		if (_reservedFreeEntryAvaliable) {
			Assert_MM_true(_heapFreeListCount > _reservedFreeListIndex);
			Assert_MM_true((void *)UDATA_MAX != _previousReservedFreeEntry);
			if (NULL == _previousReservedFreeEntry) {
				freeEntry = _heapFreeLists[_reservedFreeListIndex]._freeList;
			} else {
				freeEntry = _previousReservedFreeEntry->getNext();
			}
			Assert_MM_true(_reservedFreeEntrySize == freeEntry->getSize());
		}
		return freeEntry;
	}
Exemplo n.º 20
0
/**
 * Empty a packet on overflow
 * 
 * Empty a packet to resolve overflow by dirtying the appropriate 
 * cards for each object withing a given packet
 * 
 * @param packet - Reference to packet to be empited
 * @param type - ignored for concurrent collector
 *  
 */
void
MM_ConcurrentOverflow::emptyToOverflow(MM_EnvironmentBase *env, MM_Packet *packet, MM_OverflowType type)
{
	MM_ConcurrentGC *collector = (MM_ConcurrentGC *)_extensions->getGlobalCollector();
	void *objectPtr;

	_overflow = true;

	/* Broadcast the overflow to the concurrent collector
	 * so it can take any remedial action */
	collector->concurrentWorkStackOverflow();

	_extensions->globalGCStats.workPacketStats.setSTWWorkStackOverflowOccured(true);
	_extensions->globalGCStats.workPacketStats.incrementSTWWorkStackOverflowCount();
	_extensions->globalGCStats.workPacketStats.setSTWWorkpacketCountAtOverflow(_workPackets->getActivePacketCount());

#if defined(OMR_GC_MODRON_SCAVENGER)
	clearCardsForNewSpace(MM_EnvironmentStandard::getEnvironment(env), collector);
#endif /*  OMR_GC_MODRON_SCAVENGER */
	
	/* Empty the current packet by dirtying its cards now */
	while(NULL != (objectPtr = packet->pop(env))) {
		overflowItemInternal(env, objectPtr, collector->getCardTable());
	}
	
	Assert_MM_true(packet->isEmpty());
}
Exemplo n.º 21
0
omrobjectptr_t
OMR_GC_AllocateObject(OMR_VMThread * omrVMThread, MM_AllocateInitialization *allocator)
{
	MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(omrVMThread);
        Assert_MM_true(NULL != env->getExtensions()->getGlobalCollector());
	return allocator->allocateAndInitializeObject(omrVMThread);
}
/**
 * Find the address of the next entry on free list entry.
 * @param currentFreeListIndex will return the index of the free list containing the next free starting address but
 * ONLY if the free list index has changed from the current free address to the next free address.
 *
 * @return The address of next free entry or NULL
 */
void*
MM_MemoryPoolSplitAddressOrderedListBase::getNextFreeStartingAddr(MM_EnvironmentBase* env, void* currentFree, uintptr_t* currentFreeListIndex)
{
	Assert_MM_true(currentFree != NULL);
	if (NULL != ((MM_HeapLinkedFreeHeader*)currentFree)->getNext()) {
		return ((MM_HeapLinkedFreeHeader*)currentFree)->getNext();
	}
	uintptr_t startFreeListIndex = 0;
	if (currentFreeListIndex != NULL) {
		startFreeListIndex = *currentFreeListIndex;
		if (startFreeListIndex >= _heapFreeListCount) {
			startFreeListIndex = 0;
		} else if (_heapFreeLists[startFreeListIndex]._freeList > currentFree) {
			startFreeListIndex = 0;
		}
	}
	for (uintptr_t i = startFreeListIndex; i < _heapFreeListCount; ++i) {
		if ((uintptr_t)_heapFreeLists[i]._freeList > (uintptr_t)currentFree) {
			if (NULL != currentFreeListIndex) {
				*currentFreeListIndex = i;
			}
			return _heapFreeLists[i]._freeList;
		}
	}

	if (NULL != currentFreeListIndex) {
		*currentFreeListIndex = _heapFreeListCount;
	}

	return NULL;
}
Exemplo n.º 23
0
void
MM_ParallelMarkTask::cleanup(MM_EnvironmentBase *env)
{
	_markingScheme->workerCleanupAfterGC(env);

	if (env->isMasterThread()) {
		Assert_MM_true(_cycleState == env->_cycleState);
	} else {
		env->_cycleState = NULL;
	}
	
	/* record the thread-specific parallelism stats in the trace buffer. This partially duplicates info in -Xtgc:parallel */
	OMRPORT_ACCESS_FROM_OMRPORT(env->getPortLibrary());
	Trc_MM_ParallelMarkTask_parallelStats(
		env->getLanguageVMThread(),
		(uint32_t)env->getSlaveID(),
		(uint32_t)omrtime_hires_delta(0, env->_workPacketStats._workStallTime, OMRPORT_TIME_DELTA_IN_MILLISECONDS),
		(uint32_t)omrtime_hires_delta(0, env->_workPacketStats._completeStallTime, OMRPORT_TIME_DELTA_IN_MILLISECONDS),
		(uint32_t)omrtime_hires_delta(0, env->_markStats._syncStallTime, OMRPORT_TIME_DELTA_IN_MILLISECONDS),
		(uint32_t)env->_workPacketStats._workStallCount,
		(uint32_t)env->_workPacketStats._completeStallCount,
		(uint32_t)env->_markStats._syncStallCount,
		env->_workPacketStats.workPacketsAcquired,
		env->_workPacketStats.workPacketsReleased,
		env->_workPacketStats.workPacketsExchanged,
		0/* TODO CRG figure out to get the array split size*/);
}
Exemplo n.º 24
0
/**
 * One time initialization of the receivers state.
 * @return true on successful initialization, false otherwise.
 */
bool
MM_SegregatedAllocationInterface::initialize(MM_EnvironmentBase *env)
{
	MM_GCExtensionsBase* extensions = env->getExtensions();
	bool result = true;

	Assert_MM_true(NULL == _frequentObjectsStats);

	if (extensions->doFrequentObjectAllocationSampling){
		_frequentObjectsStats = MM_FrequentObjectsStats::newInstance(env);
		result = (NULL != _frequentObjectsStats);
	}
	
	if (result) {
		_allocationCache = _languageAllocationCache.getLanguageSegregatedAllocationCacheStruct(env);
		_sizeClasses = extensions->defaultSizeClasses;
		_cachedAllocationsEnabled = true;

		memset(_allocationCache, 0, sizeof(LanguageSegregatedAllocationCache));
		memset(&_allocationCacheStats, 0, sizeof(_allocationCacheStats));
		for (uintptr_t sizeClass = OMR_SIZECLASSES_MIN_SMALL; sizeClass <= OMR_SIZECLASSES_MAX_SMALL; sizeClass++) {
			_replenishSizes[sizeClass] = extensions->allocationCacheInitialSize;
		}
	}
	
	return result;
}
Exemplo n.º 25
0
/**
 * Commit the address range into physical memory.
 * @return true if successful, false otherwise.
 */
bool
MM_VirtualMemory::commitMemory(void* address, uintptr_t size)
{
	OMRPORT_ACCESS_FROM_OMRVM(_extensions->getOmrVM());
	Assert_MM_true(0 != _pageSize);

	bool success = true;

	/* port library takes page aligned addresses and sizes only */
	void* commitBase = (void*)MM_Math::roundToFloor(_pageSize, (uintptr_t)address);
	void* commitTop = (void*)MM_Math::roundToCeiling(_pageSize, (uintptr_t)address + size + _tailPadding);
	uintptr_t commitSize;

	if (commitBase <= commitTop) {
		commitSize = (uintptr_t)commitTop - (uintptr_t)commitBase;
	} else {
		/* wrapped around - this is end of the memory */
		commitSize = UDATA_MAX - (uintptr_t)commitBase + 1;
	}

	if (0 < commitSize) {
		success = omrvmem_commit_memory(commitBase, commitSize, &_identifier) != 0;
	}

	if (success) {
		Trc_MM_VirtualMemory_commitMemory_success(address, size);
	} else {
		Trc_MM_VirtualMemory_commitMemory_failure(address, size);
	}

	return success;
}
Exemplo n.º 26
0
/**
 * Walk all segments and calculate the maximum number of chunks needed to represent the current heap.
 * The chunk calculation is done on a per segment basis (no segment can represent memory from more than 1 chunk),
 * and partial sized chunks (ie: less than the chunk size) are reserved for any remaining space at the end of a
 * segment.
 * @return number of chunks required to represent the current heap memory.
 */
uintptr_t
MM_SweepHeapSectioningSegmented::calculateActualChunkNumbers() const
{
	uintptr_t totalChunkCount = 0;

	MM_HeapRegionDescriptor *region;
	MM_Heap *heap = _extensions->heap;
	MM_HeapRegionManager *regionManager = heap->getHeapRegionManager();
	GC_HeapRegionIterator regionIterator(regionManager);

	while((region = regionIterator.nextRegion()) != NULL) {
		if ((region)->isCommitted()) {
			/* TODO:  this must be rethought for Tarok since it treats all regions identically but some might require different sweep logic */
			MM_MemorySubSpace *subspace = region->getSubSpace();
			/* if this is a committed region, it requires a non-NULL subspace */
			Assert_MM_true(NULL != subspace);
			uintptr_t poolCount = subspace->getMemoryPoolCount();

			totalChunkCount += MM_Math::roundToCeiling(_extensions->parSweepChunkSize, region->getSize()) / _extensions->parSweepChunkSize;

			/* Add extra chunks if more than one memory pool */
			totalChunkCount += (poolCount - 1);
		}
	}

	return totalChunkCount;
}
Exemplo n.º 27
0
void
MM_HeapRegionManagerTarok::internalReleaseTableRegions(MM_EnvironmentBase *env, MM_HeapRegionDescriptor *rootRegion)
{
	/* must be an allocated table region */
	Assert_MM_true(rootRegion >= _regionTable);
	Assert_MM_true(rootRegion < (MM_HeapRegionDescriptor *)((uintptr_t)_regionTable + (_tableRegionCount * _tableDescriptorSize)));
	Assert_MM_true(NULL == rootRegion->_nextInSet);
	Assert_MM_true(rootRegion->_isAllocated);

	rootRegion->_isAllocated = false;
	rootRegion->setRegionType(MM_HeapRegionDescriptor::RESERVED);
	rootRegion->disassociateWithSubSpace();

	uintptr_t freeListIndex = rootRegion->getNumaNode();
	rootRegion->_nextInSet = _freeRegionTable[freeListIndex];
	_freeRegionTable[freeListIndex] = rootRegion;
}
Exemplo n.º 28
0
/**
 * Flush all allocation contexts such that the cells allocated to them becomes safe for traversal.
 */
void
MM_GlobalAllocationManager::flushAllocationContexts(MM_EnvironmentBase *env)
{
	Assert_MM_true(_managedAllocationContextCount > 0);
	for (uintptr_t i = 0; i < _managedAllocationContextCount; i++) {
		_managedAllocationContexts[i]->flush(env);
	}
}
Exemplo n.º 29
0
bool
MM_VirtualMemory::setNumaAffinity(uintptr_t numaNode, void* address, uintptr_t byteAmount)
{
	Assert_MM_true(0 != _pageSize);

	/* start address must be above heap start address */
	Assert_MM_true(address >= _heapBase);
	/* start address must be below heap top address */
	Assert_MM_true(address <= _heapTop);

	/* start address must be aligned to physical page size */
	Assert_MM_true(0 == ((uintptr_t)address % _pageSize));

	void* topAddress = (void*)((uintptr_t)address + byteAmount);

	/* top address must be above heap start address */
	Assert_MM_true(topAddress >= _heapBase);
	/* top address must be below heap top address */
	Assert_MM_true(topAddress <= _heapTop);

	bool didSetAffinity = true;
	if (_extensions->_numaManager.isPhysicalNUMASupported()) {
		OMRPORT_ACCESS_FROM_OMRVM(_extensions->getOmrVM());

		uintptr_t byteAmountPageAligned = MM_Math::roundToCeiling(_pageSize, byteAmount);
		/* aligned high address might be higher then heapTop but 
		 * must be in the heap reserved memory range
		 */
		Assert_MM_true(((uintptr_t)address + byteAmountPageAligned) <= ((uintptr_t)_heapBase + _reserveSize));

		didSetAffinity = (0 == omrvmem_numa_set_affinity(numaNode, address, byteAmountPageAligned, &_identifier));
	}
	return didSetAffinity;
}
Exemplo n.º 30
0
void
MM_EnvironmentBase::restoreObjects(omrobjectptr_t *objectPtrIndirect)
{
	void *heapBase = getExtensions()->heap->getHeapBase();
	void *heapTop = getExtensions()->heap->getHeapTop();

	if (NULL != _omrVMThread->_savedObject2) {
		Assert_MM_true((heapBase <= _omrVMThread->_savedObject2) && (heapTop > _omrVMThread->_savedObject2));
		*objectPtrIndirect = (omrobjectptr_t)_omrVMThread->_savedObject2;
		_omrVMThread->_savedObject2 = NULL;
	} else if (NULL != _omrVMThread->_savedObject1) {
		Assert_MM_true((heapBase <= _omrVMThread->_savedObject1) && (heapTop > _omrVMThread->_savedObject1));
		*objectPtrIndirect = (omrobjectptr_t)_omrVMThread->_savedObject1;
		_omrVMThread->_savedObject1 = NULL;
	} else {
		Assert_MM_unreachable();
	}
}