Esempio n. 1
0
bool
MM_MasterGCThread::garbageCollect(MM_EnvironmentBase *env, MM_AllocateDescription *allocDescription)
{
	Assert_MM_mustHaveExclusiveVMAccess(env->getOmrVMThread());
	bool didAttemptCollect = false;
	
	if (NULL != _collector) {
		/* the collector has started up so try to run */
		/* once the master thread has stored itself in the _masterGCThread, it should never need to collect - this would hang */
		Assert_MM_true(omrthread_self() != _masterGCThread);
		if (_runAsImplicit || (NULL == _masterGCThread)) {
			/* We might not have _masterGCThread in the startup phase or late in the shutdown phase.
			 * For example, there may be a native out-of-memory during startup or RAS may 
			 * trigger a GC after we've shutdown the master thread.
			 */
			Assert_MM_true(0 == env->getSlaveID());
			_collector->preMasterGCThreadInitialize(env);
			_collector->masterThreadGarbageCollect(env, allocDescription);

			if (_runAsImplicit && _collector->isConcurrentWorkAvailable(env)) {
				omrthread_monitor_enter(_collectorControlMutex);

				if (STATE_WAITING == _masterThreadState) {
					_masterThreadState = STATE_GC_REQUESTED;
					omrthread_monitor_notify(_collectorControlMutex);
				}

				omrthread_monitor_exit(_collectorControlMutex);
			}
		} else {
			/* this is the general case, when the master thread is running internally */
			omrthread_monitor_enter(_collectorControlMutex);
			/* The variable assignments below are safe because we hold Xaccess.  Otherwise, it is possible (based on the wait/notify mechanism here)
			 * that another thread could come in under this mutex and stomp on the "parameters" while another thread is waiting.
			 */
			_allocDesc = allocDescription;
			_incomingCycleState = env->_cycleState;
			MasterGCThreadState previousState = _masterThreadState;
			_masterThreadState = STATE_GC_REQUESTED;
			if (STATE_WAITING == previousState) {
				omrthread_monitor_notify(_collectorControlMutex);
			} else if (STATE_RUNNING_CONCURRENT == previousState) {
				_collector->forceConcurrentFinish();
			} else {
				Assert_MM_unreachable();
			}
			
			/* The master thread will claim exclusive VM access. Artificially give it up in this thread so that tools like -Xcheck:vm continue to work. */
			uintptr_t savedExclusiveCount = env->relinquishExclusiveVMAccess();
			while (STATE_GC_REQUESTED == _masterThreadState) {
				omrthread_monitor_wait(_collectorControlMutex);
			}
			env->assumeExclusiveVMAccess(savedExclusiveCount);

			Assert_MM_true(NULL == _incomingCycleState);
			omrthread_monitor_exit(_collectorControlMutex);
		}
		
		didAttemptCollect = true;
	}
	return didAttemptCollect;
}
void *
MM_MemorySubSpaceGenerational::allocationRequestFailed(MM_EnvironmentBase *env, MM_AllocateDescription *allocateDescription, AllocationType allocationType, MM_ObjectAllocationInterface *objectAllocationInterface, MM_MemorySubSpace *baseSubSpace, MM_MemorySubSpace *previousSubSpace)
{
	/* TODO: This code is nearly the same as Flat and Concurrent - all three should be merged into a common superclass */
	void *addr = NULL;

	if (previousSubSpace == _memorySubSpaceNew) {
		/* Handle a failure coming from new space - attempt the old area before doing any collection work */
		addr = _memorySubSpaceOld->allocationRequestFailed(env, allocateDescription, allocationType, objectAllocationInterface, baseSubSpace, this);
		if(NULL != addr) {
			return addr;
		}
	}

	allocateDescription->saveObjects(env);
	if (!env->acquireExclusiveVMAccessForGC(_collector, true, true)) {
		allocateDescription->restoreObjects(env);
		addr = allocateGeneric(env, allocateDescription, allocationType, objectAllocationInterface, baseSubSpace);
		if(NULL != addr) {
			return addr;
		}

		if (!env->acquireExclusiveVMAccessForGC(_collector)) {
			allocateDescription->restoreObjects(env);
			addr = allocateGeneric(env, allocateDescription, allocationType, objectAllocationInterface, baseSubSpace);
			if(NULL != addr) {
				/* Satisfied the allocate after having grabbed exclusive access to perform a GC (without actually performing the GC).  Raise
				 * an event for tracing / verbose to report the occurrence.
				 */
				reportAcquiredExclusiveToSatisfyAllocate(env, allocateDescription);
				return addr;
			}

			reportAllocationFailureStart(env, allocateDescription);
			performResize(env, allocateDescription);
			addr = allocateGeneric(env, allocateDescription, allocationType, objectAllocationInterface, baseSubSpace);

			if(NULL != addr) {
				/* Satisfied the allocate after having grabbed exclusive access to perform a GC (without actually performing the GC).  Raise
				 * an event for tracing / verbose to report the occurrence.
				 */
				reportAcquiredExclusiveToSatisfyAllocate(env, allocateDescription);
				reportAllocationFailureEnd(env);
				return addr;
			}
			allocateDescription->saveObjects(env);
		} else {
			reportAllocationFailureStart(env, allocateDescription);
		}
	} else {
		reportAllocationFailureStart(env, allocateDescription);
	}

	Assert_MM_mustHaveExclusiveVMAccess(env->getOmrVMThread());

	allocateDescription->setAllocationType(allocationType);
	addr = _collector->garbageCollect(env, this, allocateDescription, J9MMCONSTANT_IMPLICIT_GC_DEFAULT, objectAllocationInterface, baseSubSpace, NULL);
	allocateDescription->restoreObjects(env);

	if(NULL != addr) {
		reportAllocationFailureEnd(env);
		return addr;
	}

	/* A more aggressive collect here on failure */
	allocateDescription->saveObjects(env);
	addr = _collector->garbageCollect(env, this, allocateDescription, J9MMCONSTANT_IMPLICIT_GC_AGGRESSIVE, objectAllocationInterface, baseSubSpace, NULL);
	allocateDescription->restoreObjects(env);
	
	reportAllocationFailureEnd(env);
	return addr;
}
Esempio n. 3
0
void*
MM_MemorySubSpaceFlat::allocationRequestFailed(MM_EnvironmentBase* env, MM_AllocateDescription* allocateDescription, AllocationType allocationType, MM_ObjectAllocationInterface* objectAllocationInterface, MM_MemorySubSpace* baseSubSpace, MM_MemorySubSpace* previousSubSpace)
{
	void* addr = NULL;

	/* If the request came from the parent, forward the failure handling to the child first */
	if (previousSubSpace == _parent) {
		addr = _memorySubSpace->allocationRequestFailed(env, allocateDescription, allocationType, objectAllocationInterface, baseSubSpace, this);
		if (NULL != addr) {
			return addr;
		}
	}

	/* If there is a collector present, execute and retry the failure on the child */
	if (_collector) {
		allocateDescription->saveObjects(env);
		/* acquire exclusive access and, after we get it, see if we need to perform a collect or if someone else beat us to it */
		if (!env->acquireExclusiveVMAccessForGC(_collector, true, true)) {
			allocateDescription->restoreObjects(env);
			/* Beaten to exclusive access for our collector by another thread - a GC must have occurred.  This thread
			 * does NOT have exclusive access at this point.  Try and satisfy the allocate based on a GC having occurred.
			 */
			addr = allocateGeneric(env, allocateDescription, allocationType, objectAllocationInterface, _memorySubSpace);
			if (NULL != addr) {
				return addr;
			}

			/* Failed to satisfy allocate - now really go for a GC */
			allocateDescription->saveObjects(env);
			/* acquire exclusive access and, after we get it, see if we need to perform a collect or if someone else beat us to it */
			if (!env->acquireExclusiveVMAccessForGC(_collector)) {
				/* we have exclusive access but another thread beat us to the GC so see if they collected enough to satisfy our request */
				allocateDescription->restoreObjects(env);
				addr = allocateGeneric(env, allocateDescription, allocationType, objectAllocationInterface, _memorySubSpace);
				if (NULL != addr) {
					/* Satisfied the allocate after having grabbed exclusive access to perform a GC (without actually performing the GC).  Raise
					 * an event for tracing / verbose to report the occurrence.
					 */
					reportAcquiredExclusiveToSatisfyAllocate(env, allocateDescription);
					return addr;
				}

				/* we still failed the allocate so try a resize to get more space */
				reportAllocationFailureStart(env, allocateDescription);
				performResize(env, allocateDescription);
				addr = allocateGeneric(env, allocateDescription, allocationType, objectAllocationInterface, baseSubSpace);

				if (addr) {
					/* Satisfied the allocate after having grabbed exclusive access to perform a GC (without actually performing the GC).  Raise
					 * an event for tracing / verbose to report the occurrence.
					 */
					reportAcquiredExclusiveToSatisfyAllocate(env, allocateDescription);
					reportAllocationFailureEnd(env);
					return addr;
				}
				allocateDescription->saveObjects(env);
				/* we still failed so we will need to run a GC */
			} else {
				/* we have exclusive and no other thread beat us to it so we can now collect */
				reportAllocationFailureStart(env, allocateDescription);
			}
		} else {
			/* we have exclusive and no other thread beat us to it so we can now collect */
			reportAllocationFailureStart(env, allocateDescription);
		}

		Assert_MM_mustHaveExclusiveVMAccess(env->getOmrVMThread());

		/* run the collector in the default mode (ie:  not explicitly aggressive) */
		allocateDescription->setAllocationType(allocationType);
		addr = _collector->garbageCollect(env, this, allocateDescription, J9MMCONSTANT_IMPLICIT_GC_DEFAULT, objectAllocationInterface, baseSubSpace, NULL);
		allocateDescription->restoreObjects(env);

		if (addr) {
			reportAllocationFailureEnd(env);
			return addr;
		}

		allocateDescription->saveObjects(env);
		/* The collect wasn't good enough to satisfy the allocate so attempt an aggressive collection */
		addr = _collector->garbageCollect(env, this, allocateDescription, J9MMCONSTANT_IMPLICIT_GC_AGGRESSIVE, objectAllocationInterface, baseSubSpace, NULL);
		allocateDescription->restoreObjects(env);

		reportAllocationFailureEnd(env);

		if (addr) {
			return addr;
		}
		/* there was nothing we could do to satisfy the allocate at this level (we will either OOM or attempt a collect at the higher level in our parent) */
	}

	/* If the caller was the child, forward the failure notification to the parent for handling */
	if ((NULL != _parent) && (previousSubSpace != _parent)) {
		/* see if the parent can find us some space */
		return _parent->allocationRequestFailed(env, allocateDescription, allocationType, objectAllocationInterface, baseSubSpace, this);
	}

	/* Nothing else to try - fail */
	return NULL;
}