Exemple #1
0
bool
MM_EnvironmentBase::tryAcquireExclusiveVMAccessForGC(MM_Collector *collector)
{
	MM_GCExtensionsBase *extensions = getExtensions();
	uintptr_t collectorAccessCount = collector->getExclusiveAccessCount();

	_exclusiveAccessBeatenByOtherThread = false;

	while(_omrVMThread != extensions->gcExclusiveAccessThreadId) {
		if(NULL == extensions->gcExclusiveAccessThreadId) {
			/* there is a chance the thread can win the race to acquiring exclusive for GC */
			omrthread_monitor_enter(extensions->gcExclusiveAccessMutex);
			if(NULL == extensions->gcExclusiveAccessThreadId) {
				/* thread is the winner and will request the GC */
				extensions->gcExclusiveAccessThreadId = _omrVMThread;
			}
			omrthread_monitor_exit(extensions->gcExclusiveAccessMutex);
		}

		if(_omrVMThread != extensions->gcExclusiveAccessThreadId) {
			/* thread was not the winner for requesting a GC - allow the GC to proceed and wait for it to complete */
			Assert_MM_true(NULL != extensions->gcExclusiveAccessThreadId);

			uintptr_t accessMask;
			_envLanguageInterface->releaseCriticalHeapAccess(&accessMask);

			/* there is a chance the GC will already have executed at this point or other threads will re-win and re-execute.  loop until the
			 * thread sees that no more GCs are being requested.
			 */
			omrthread_monitor_enter(extensions->gcExclusiveAccessMutex);
			while(NULL != extensions->gcExclusiveAccessThreadId) {
				omrthread_monitor_wait(extensions->gcExclusiveAccessMutex);
			}
			omrthread_monitor_exit(extensions->gcExclusiveAccessMutex);

			_envLanguageInterface->reacquireCriticalHeapAccess(accessMask);

			/* May have been beaten to a GC, but perhaps not the one we wanted.  Check and if in fact the collection we intended has been
			 * completed, we will not acquire exclusive access.
			 */
			if(collector->getExclusiveAccessCount() != collectorAccessCount) {
				return false;
			}
		}
	}

	/* thread is the winner for requesting a GC (possibly through recursive calls).  proceed with acquiring exclusive access. */
	Assert_MM_true(_omrVMThread == extensions->gcExclusiveAccessThreadId);

	this->acquireExclusiveVMAccess();

	collector->incrementExclusiveAccessCount();

	GC_OMRVMInterface::flushCachesForGC(this);

	return true;
}
Exemple #2
0
void
MM_MasterGCThread::masterThreadEntryPoint()
{
	OMR_VMThread *omrVMThread = NULL;
	Assert_MM_true(NULL != _collectorControlMutex);
	Assert_MM_true(NULL == _masterGCThread);

	/* Attach the thread as a system daemon thread */	
	/* You need a VM thread so that the stack walker can work */
	omrVMThread = MM_EnvironmentBase::attachVMThread(_extensions->getOmrVM(), "Dedicated GC Master", MM_EnvironmentBase::ATTACH_GC_MASTER_THREAD);
	if (NULL == omrVMThread) {
		/* we failed to attach so notify the creating thread that we should fail to start up */
		omrthread_monitor_enter(_collectorControlMutex);
		_masterThreadState = STATE_ERROR;
		omrthread_monitor_notify(_collectorControlMutex);
		omrthread_exit(_collectorControlMutex);
	} else {
		/* thread attached successfully */
		MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(omrVMThread);

		/* attachVMThread could allocate an execute a barrier (since it that point, this thread acted as a mutator thread.
		 * Flush GC chaches (like barrier buffers) before turning into the master thread */
		env->flushGCCaches();

		env->setThreadType(GC_MASTER_THREAD);

		/* Begin running the thread */
		omrthread_monitor_enter(_collectorControlMutex);
		
		_collector->preMasterGCThreadInitialize(env);
		
		_masterThreadState = STATE_WAITING;
		_masterGCThread = omrthread_self();
		omrthread_monitor_notify(_collectorControlMutex);
		do {
			if (STATE_GC_REQUESTED == _masterThreadState) {
				if (_runAsImplicit) {
					handleConcurrent(env);
				} else {
					handleSTW(env);
				}
			}

			if (STATE_WAITING == _masterThreadState) {
				if (_runAsImplicit || !handleConcurrent(env)) {
					omrthread_monitor_wait(_collectorControlMutex);
				}
			}
		} while (STATE_TERMINATION_REQUESTED != _masterThreadState);
		/* notify the other side that we are active so that they can continue running */
		_masterThreadState = STATE_TERMINATED;
		_masterGCThread = NULL;
		omrthread_monitor_notify(_collectorControlMutex);
		MM_EnvironmentBase::detachVMThread(_extensions->getOmrVM(), omrVMThread, MM_EnvironmentBase::ATTACH_GC_MASTER_THREAD);
		omrthread_exit(_collectorControlMutex);
	}
}
Exemple #3
0
bool
MM_EnvironmentBase::tryAcquireExclusiveForConcurrentKickoff(MM_ConcurrentGCStats *stats)
{
	MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(_omrVM);
	uintptr_t gcCount = extensions->globalGCStats.gcCount;

	while (_omrVMThread != extensions->gcExclusiveAccessThreadId) {
		if (NULL == extensions->gcExclusiveAccessThreadId) {
			/* there is a chance the thread can win the race to acquiring exclusive for GC */
			omrthread_monitor_enter(extensions->gcExclusiveAccessMutex);
			if (NULL == extensions->gcExclusiveAccessThreadId) {
				/* thread is the winner and will request the GC */
				extensions->gcExclusiveAccessThreadId =_omrVMThread ;
			}
			omrthread_monitor_exit(extensions->gcExclusiveAccessMutex);
		}

		if (_omrVMThread != extensions->gcExclusiveAccessThreadId) {
			/* thread was not the winner for requesting a GC - allow the GC to proceed and wait for it to complete */
			Assert_MM_true(NULL != extensions->gcExclusiveAccessThreadId);

			uintptr_t accessMask = 0;

			_envLanguageInterface->releaseCriticalHeapAccess(&accessMask);

			/* there is a chance the GC will already have executed at this point or other threads will re-win and re-execute.  loop until the
			 * thread sees that no more GCs are being requested.
			 */
			omrthread_monitor_enter(extensions->gcExclusiveAccessMutex);
			while (NULL != extensions->gcExclusiveAccessThreadId) {
				omrthread_monitor_wait(extensions->gcExclusiveAccessMutex);
			}
			omrthread_monitor_exit(extensions->gcExclusiveAccessMutex);

			_envLanguageInterface->reacquireCriticalHeapAccess(accessMask);

			/* May have been beaten to a GC, but perhaps not the one we wanted.  Check and if in fact the collection we intended has been
			 * completed, we will not acquire exclusive access.
			 */
			if ((gcCount != extensions->globalGCStats.gcCount) || (CONCURRENT_INIT_COMPLETE != stats->getExecutionMode())) {
				return false;
			}
		}
	}

	Assert_MM_true(_omrVMThread == extensions->gcExclusiveAccessThreadId);
	Assert_MM_true(CONCURRENT_INIT_COMPLETE == stats->getExecutionMode());

	/* thread is the winner for requesting a GC (possibly through recursive calls).  proceed with acquiring exclusive access. */
	this->acquireExclusiveVMAccess();

	return true;
}
Exemple #4
0
TEST_F(ThreadCreateTest, NumaSetAffinity)
{
	uintptr_t status = 0;
	omrthread_t thread;
	omrthread_monitor_t monitor;
	numadata_t data;
	uintptr_t nodeCount = 0;
	intptr_t affinityResultCode = 0;

	if (0 != J9THREAD_VERBOSE(omrthread_monitor_init(&monitor, 0))) {
		omrTestEnv->log(LEVEL_ERROR, "Failed to initialize monitor\n");
		status |= NULL_ATTR;
		goto endtest;
	}

	data.monitor = monitor;
	data.status = 0;
	data.expectedAffinity = 0;

	omrthread_monitor_enter(monitor);

	nodeCount = 1;
	affinityResultCode = omrthread_numa_get_node_affinity(omrthread_self(), &data.expectedAffinity, &nodeCount);
	if (J9THREAD_NUMA_ERR_AFFINITY_NOT_SUPPORTED == affinityResultCode) {
		/* this platform can't meaningfully run this test so just end */
		omrTestEnv->log(LEVEL_ERROR, "NUMA-level thread affinity not supported on this platform\n");
		goto endtest;
	}
	if (J9THREAD_NUMA_OK != affinityResultCode) {
		omrTestEnv->log(LEVEL_ERROR, "Failed to get parent thread's affinity\n");
		status |= CREATE_FAILED;
		goto endtest;
	}

	if (J9THREAD_SUCCESS != J9THREAD_VERBOSE(omrthread_create_ex(&thread, J9THREAD_ATTR_DEFAULT, 0, numaSetAffinityThreadMain, &data))) {
		omrTestEnv->log(LEVEL_ERROR, "Failed to create the thread\n");
		status |= CREATE_FAILED;
		goto endtest;
	}

	if (0 != omrthread_monitor_wait(monitor)) {
		omrTestEnv->log(LEVEL_ERROR, "Failed to wait on monitor\n");
		status |= NULL_ATTR;
		goto endtest;
	}

	status |= data.status;
endtest:
	omrthread_monitor_exit(monitor);
	omrthread_monitor_destroy(monitor);
	ASSERT_EQ((uintptr_t)0, status) << "Failed with Code: " << std::hex << status;
}
Exemple #5
0
intptr_t
sem_wait_zos(j9sem_t s)
{
	zos_sem_t *zs = (zos_sem_t *) s;

	omrthread_monitor_enter(zs->monitor);
	while (zs->count == 0) {
		omrthread_monitor_wait(zs->monitor);
	}
	zs->count--;
	omrthread_monitor_exit(zs->monitor);

	return 0;
}
Exemple #6
0
void
MM_MasterGCThread::shutdown()
{
	Assert_MM_true(NULL != _collectorControlMutex);
	if ((STATE_ERROR != _masterThreadState) && (STATE_DISABLED != _masterThreadState)) {
		/* tell the background thread to shut down and then wait for it to exit */
		omrthread_monitor_enter(_collectorControlMutex);
		while(STATE_TERMINATED != _masterThreadState) {
			_masterThreadState = STATE_TERMINATION_REQUESTED;
			omrthread_monitor_notify(_collectorControlMutex);
			omrthread_monitor_wait(_collectorControlMutex);
		}
		omrthread_monitor_exit(_collectorControlMutex);
		
		/* don't NULL _collector as RAS could still trigger a collection after we've started shutting down */
	}
}
Exemple #7
0
static omr_error_t
waitForTestChildThread(OMRTestVM *testVM, omrthread_t childThead, TestChildThreadData *childData)
{
	omr_error_t childRc = OMR_ERROR_NONE;
	OMRPORT_ACCESS_FROM_OMRPORT(testVM->portLibrary);

	omrthread_monitor_enter(childData->shutdownCond);
	while (!childData->isDead) {
		omrthread_monitor_wait(childData->shutdownCond);
	}
	omrthread_monitor_exit(childData->shutdownCond);

	childRc = childData->childRc;

	omrthread_monitor_destroy(childData->shutdownCond);
	omrmem_free_memory(childData);
	return childRc;
}
Exemple #8
0
bool
MM_MasterGCThread::startup()
{
	/* set the success flag to false and we will set it true if everything succeeds */
	bool success = false;

	if (_extensions->fvtest_disableExplictMasterThread) {
		/* GC should be able to act even if master thread is not created (or late) */
		_masterThreadState = STATE_DISABLED;
		success = true;
	} else {
		/* hold the monitor over start-up of this thread so that we eliminate any timing hole where it might notify us of its start-up state before we wait */
		omrthread_monitor_enter(_collectorControlMutex);
		_masterThreadState = STATE_STARTING;
		intptr_t forkResult = createThreadWithCategory(
			NULL,
			OMR_OS_STACK_SIZE,
			J9THREAD_PRIORITY_NORMAL,
			0,
			master_thread_proc,
			this,
			J9THREAD_CATEGORY_SYSTEM_GC_THREAD);
		if (forkResult == 0) {
			/* thread creation success */
			/* wait to find out if they started up, successfully */
			while (STATE_STARTING == _masterThreadState) {
				omrthread_monitor_wait(_collectorControlMutex);
			}
			if (STATE_ERROR != _masterThreadState) {
				/* the master thread managed to start up and is in the waiting state, ready for GC requests */
				success = true;
			}
		} else {
			_masterThreadState = STATE_ERROR;
		}
		omrthread_monitor_exit(_collectorControlMutex);
	}

	return success;
}
Exemple #9
0
bool
MM_EnvironmentBase::acquireExclusiveVMAccessForGC(MM_Collector *collector)
{
	MM_GCExtensionsBase *extensions = getExtensions();
	uintptr_t collectorAccessCount = collector->getExclusiveAccessCount();

	_exclusiveAccessBeatenByOtherThread = false;

	while(_omrVMThread != extensions->gcExclusiveAccessThreadId) {
		if(NULL == extensions->gcExclusiveAccessThreadId) {
			/* there is a chance the thread can win the race to acquiring
			 * exclusive for GC */
			omrthread_monitor_enter(extensions->gcExclusiveAccessMutex);
			if(NULL == extensions->gcExclusiveAccessThreadId) {
				/* thread is the winner and will request the GC */
				extensions->gcExclusiveAccessThreadId = _omrVMThread;
			}
			omrthread_monitor_exit(extensions->gcExclusiveAccessMutex);
		}

		if(_omrVMThread != extensions->gcExclusiveAccessThreadId) {
			/* thread was not the winner for requesting a GC - allow the GC to
			 * proceed and wait for it to complete */
			Assert_MM_true(NULL != extensions->gcExclusiveAccessThreadId);

			_envLanguageInterface->exclusiveAccessForGCBeatenByOtherThread();

			_envLanguageInterface->releaseVMAccess();

			/* there is a chance the GC will already have executed at this
			 * point or other threads will re-win and re-execute.  loop until
			 * the thread sees that no more GCs are being requested.
			 */
			omrthread_monitor_enter(extensions->gcExclusiveAccessMutex);
			while(NULL != extensions->gcExclusiveAccessThreadId) {
				omrthread_monitor_wait(extensions->gcExclusiveAccessMutex);
			}
			/* thread can now win and will request a GC */
			extensions->gcExclusiveAccessThreadId = _omrVMThread;

			omrthread_monitor_exit(extensions->gcExclusiveAccessMutex);

			this->acquireVMAccess();
			_envLanguageInterface->exclusiveAccessForGCObtainedAfterBeatenByOtherThread();
		}
	}

	/* thread is the winner for requesting a GC (possibly through recursive
	 * calls).  proceed with acquiring exclusive access. */
	Assert_MM_true(_omrVMThread == extensions->gcExclusiveAccessThreadId);

	this->acquireExclusiveVMAccess();

	_exclusiveAccessBeatenByOtherThread = !(collector->getExclusiveAccessCount() == collectorAccessCount);

	collector->incrementExclusiveAccessCount();

	GC_OMRVMInterface::flushCachesForGC(this);

	return !_exclusiveAccessBeatenByOtherThread;

}
Exemple #10
0
TEST_F(ThreadCreateTest, NumaSetAffinitySuspended)
{
	omrthread_t thread;
	omrthread_monitor_t monitor;
	numadata_t data;

	intptr_t result = 0;
	uintptr_t status = 0;
	uintptr_t numaMaxNode = omrthread_numa_get_max_node();
	uintptr_t numaNode = 0;
	uintptr_t expectedAffinityBeforeStart = 0;
	omrthread_monitor_init(&monitor, 0);

	if (numaMaxNode > 0) {
		uintptr_t nodeCount = 1;
		/* first, see if we can even run this test */
		intptr_t affinityResultCode = omrthread_numa_get_node_affinity(omrthread_self(), &data.expectedAffinity, &nodeCount);
		data.monitor = monitor;
		data.status = 0;

		if (J9THREAD_NUMA_ERR_AFFINITY_NOT_SUPPORTED == affinityResultCode) {
			/* this platform can't meaningfully run this test so just end */
			omrTestEnv->log(LEVEL_ERROR, "NUMA-level thread affinity not supported on this platform\n");
			goto endtest;
		}
		/* Create the thread suspended */
		if (J9THREAD_SUCCESS != J9THREAD_VERBOSE(omrthread_create_ex(&thread, J9THREAD_ATTR_DEFAULT, 1, numaSetAffinitySuspendedThreadMain, &data))) {
			status |= CREATE_FAILED;
			goto endtest;
		}

		/* Set the affinity to the highest node which has CPUs associated to it */
		numaNode = numaMaxNode;
		while (numaNode > 0) {
			omrTestEnv->log(LEVEL_ERROR, "Setting thread numa affinity to %zu\n", numaNode);
			result = omrthread_numa_set_node_affinity(thread, &numaNode, 1, 0);
			if (result == J9THREAD_NUMA_ERR_NO_CPUS_FOR_NODE) {
				omrTestEnv->log(LEVEL_ERROR, "Tried to set thread numa affinity to node %zu, but no CPUs associated with node\n", numaNode);
				numaNode--;
				continue;
			} else if (result != 0) {
				omrTestEnv->log(LEVEL_ERROR, "Failed to set affinity to %zu\n", numaNode);
				status |= EXPECTED_VALID;
				goto endtest;
			} else {
				data.expectedAffinity = numaNode;
				break;
			}
		}

		/* Check that the affinity on the suspended thread is indeed what we set it to */
		if (0 != J9THREAD_VERBOSE(omrthread_numa_get_node_affinity(thread, &expectedAffinityBeforeStart, &nodeCount))) {
			omrTestEnv->log(LEVEL_ERROR, "Failed to get affinity on the thread while it's still suspended\n");
			status |= EXPECTED_VALID;
			goto endtest;
		}

		if (expectedAffinityBeforeStart != data.expectedAffinity) {
			omrTestEnv->log(LEVEL_ERROR, "Suspended thread's deferred affinity is not what it should be. Expected:%zu Actual:%zu\n", data.expectedAffinity, expectedAffinityBeforeStart);
			status |= EXPECTED_VALID;
			goto endtest;
		}

		J9THREAD_VERBOSE(omrthread_monitor_enter(monitor));
		if (1 != omrthread_resume(thread)) {
			omrTestEnv->log(LEVEL_ERROR, "Failed to resume the thread\n");
			goto endtest;
		}

		if (0 != J9THREAD_VERBOSE(omrthread_monitor_wait(monitor))) {
			status |= NULL_ATTR;
			goto endtest;
		}

		status |= data.status;
	} else {
		omrTestEnv->log("Doesn't look like NUMA is available on this system\n");
	}

endtest:
	omrthread_monitor_exit(monitor);
	omrthread_monitor_destroy(monitor);
	ASSERT_EQ((uintptr_t)0, status) << "Failed with Code: " << std::hex << status;
}
Exemple #11
0
intptr_t
CMonitor::Wait(void)
{
	return omrthread_monitor_wait(m_monitor);
}
Exemple #12
0
bool
MM_EnvironmentBase::acquireExclusiveVMAccessForGC(MM_Collector *collector, bool failIfNotFirst, bool flushCaches)
{
	MM_GCExtensionsBase *extensions = getExtensions();
	uintptr_t collectorAccessCount = collector->getExclusiveAccessCount();

	/* Does the current thread have exclusive vm access? */
	if (_omrVMThread->exclusiveCount > 0) {
		/* Did the current thread get exclusive vm access via (or already come through this code) the
		 * GC exclusive vm access APIs.
		 */
		if (_omrVMThread != extensions->gcExclusiveAccessThreadId) {
			/* The current thread did not get exclusive vm access via the GC exclusive vm access API */
			/* If another thread has started to get exclusive vm access for a GC cache that value so it
			 * can be restored later. It is possible for a thread to win setting itself as the thread
			 * requesting exclusive vm access for a GC but not actually be the next thread to perform
			 * a GC.  It can happen if a system gc is requested while holding exclusive.
			 */
			_cachedGCExclusiveAccessThreadId = (OMR_VMThread *)extensions->gcExclusiveAccessThreadId;
			/* Current thread has exclusive VM access so there is no reason to grab the mutex! */
			extensions->gcExclusiveAccessThreadId = _omrVMThread;
		}
	} else {
		while(_omrVMThread != extensions->gcExclusiveAccessThreadId) {
			if(NULL == extensions->gcExclusiveAccessThreadId) {
				/* there is a chance the thread can win the race to acquiring
				 * exclusive for GC */
				omrthread_monitor_enter(extensions->gcExclusiveAccessMutex);
				if(NULL == extensions->gcExclusiveAccessThreadId) {
					/* thread is the winner and will request the GC */
					extensions->gcExclusiveAccessThreadId = _omrVMThread;
				}
				omrthread_monitor_exit(extensions->gcExclusiveAccessMutex);
			}

			if(_omrVMThread != extensions->gcExclusiveAccessThreadId) {
				/* thread was not the winner for requesting a GC - allow the GC to
				 * proceed and wait for it to complete */
				Assert_MM_true(NULL != extensions->gcExclusiveAccessThreadId);

				uintptr_t accessMask;
				_delegate.releaseCriticalHeapAccess(&accessMask);

				/* there is a chance the GC will already have executed at this
				 * point or other threads will re-win and re-execute.  loop until
				 * the thread sees that no more GCs are being requested.
				 */
				omrthread_monitor_enter(extensions->gcExclusiveAccessMutex);
				while(NULL != extensions->gcExclusiveAccessThreadId) {
					omrthread_monitor_wait(extensions->gcExclusiveAccessMutex);
				}

				if (failIfNotFirst) {
					if(collector->getExclusiveAccessCount() != collectorAccessCount) {
						_exclusiveAccessBeatenByOtherThread = true;
						omrthread_monitor_exit(extensions->gcExclusiveAccessMutex);
						_delegate.reacquireCriticalHeapAccess(accessMask);
						return false;
					}
				}

				/* thread can now win and will request a GC */
				extensions->gcExclusiveAccessThreadId = _omrVMThread;

				omrthread_monitor_exit(extensions->gcExclusiveAccessMutex);

				_delegate.reacquireCriticalHeapAccess(accessMask);
			}
		}
	}

	_exclusiveAccessBeatenByOtherThread = !(collector->getExclusiveAccessCount() == collectorAccessCount);

	/* thread is the winner for requesting a GC (possibly through recursive
	 * calls).  proceed with acquiring exclusive access. */
	Assert_MM_true(_omrVMThread == extensions->gcExclusiveAccessThreadId);

	acquireExclusiveVMAccess();

	collector->incrementExclusiveAccessCount();

	if (flushCaches) {
		GC_OMRVMInterface::flushCachesForGC(this);
	}

	return !_exclusiveAccessBeatenByOtherThread;

}
Exemple #13
0
bool
MM_MasterGCThread::garbageCollect(MM_EnvironmentBase *env, MM_AllocateDescription *allocDescription)
{
	Assert_MM_mustHaveExclusiveVMAccess(env->getOmrVMThread());
	bool didAttemptCollect = false;
	
	if (NULL != _collector) {
		/* the collector has started up so try to run */
		/* once the master thread has stored itself in the _masterGCThread, it should never need to collect - this would hang */
		Assert_MM_true(omrthread_self() != _masterGCThread);
		if (_runAsImplicit || (NULL == _masterGCThread)) {
			/* We might not have _masterGCThread in the startup phase or late in the shutdown phase.
			 * For example, there may be a native out-of-memory during startup or RAS may 
			 * trigger a GC after we've shutdown the master thread.
			 */
			Assert_MM_true(0 == env->getSlaveID());
			_collector->preMasterGCThreadInitialize(env);
			_collector->masterThreadGarbageCollect(env, allocDescription);

			if (_runAsImplicit && _collector->isConcurrentWorkAvailable(env)) {
				omrthread_monitor_enter(_collectorControlMutex);

				if (STATE_WAITING == _masterThreadState) {
					_masterThreadState = STATE_GC_REQUESTED;
					omrthread_monitor_notify(_collectorControlMutex);
				}

				omrthread_monitor_exit(_collectorControlMutex);
			}
		} else {
			/* this is the general case, when the master thread is running internally */
			omrthread_monitor_enter(_collectorControlMutex);
			/* The variable assignments below are safe because we hold Xaccess.  Otherwise, it is possible (based on the wait/notify mechanism here)
			 * that another thread could come in under this mutex and stomp on the "parameters" while another thread is waiting.
			 */
			_allocDesc = allocDescription;
			_incomingCycleState = env->_cycleState;
			MasterGCThreadState previousState = _masterThreadState;
			_masterThreadState = STATE_GC_REQUESTED;
			if (STATE_WAITING == previousState) {
				omrthread_monitor_notify(_collectorControlMutex);
			} else if (STATE_RUNNING_CONCURRENT == previousState) {
				_collector->forceConcurrentFinish();
			} else {
				Assert_MM_unreachable();
			}
			
			/* The master thread will claim exclusive VM access. Artificially give it up in this thread so that tools like -Xcheck:vm continue to work. */
			uintptr_t savedExclusiveCount = env->relinquishExclusiveVMAccess();
			while (STATE_GC_REQUESTED == _masterThreadState) {
				omrthread_monitor_wait(_collectorControlMutex);
			}
			env->assumeExclusiveVMAccess(savedExclusiveCount);

			Assert_MM_true(NULL == _incomingCycleState);
			omrthread_monitor_exit(_collectorControlMutex);
		}
		
		didAttemptCollect = true;
	}
	return didAttemptCollect;
}