omrobjectptr_t OMR_GC_AllocateObject(OMR_VMThread * omrVMThread, MM_AllocateInitialization *allocator) { MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(omrVMThread); Assert_MM_true(NULL != env->getExtensions()->getGlobalCollector()); return allocator->allocateAndInitializeObject(omrVMThread); }
void MM_VerboseHandlerOutputStandard::handleConcurrentKickoff(J9HookInterface** hook, uintptr_t eventNum, void* eventData) { MM_ConcurrentKickoffEvent* event = (MM_ConcurrentKickoffEvent*)eventData; MM_VerboseManager* manager = getManager(); MM_VerboseWriterChain* writer = manager->getWriterChain(); MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(event->currentThread); MM_GCExtensionsBase* extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); OMRPORT_ACCESS_FROM_ENVIRONMENT(env); char tagTemplate[200]; enterAtomicReportingBlock(); getTagTemplate(tagTemplate, sizeof(tagTemplate), manager->getIdAndIncrement(), omrtime_current_time_millis()); writer->formatAndOutput(env, 0, "<concurrent-kickoff %s>", tagTemplate); const char* reasonString = getConcurrentKickoffReason(eventData); if (extensions->scavengerEnabled) { writer->formatAndOutput( env, 1, "<kickoff reason=\"%s\" targetBytes=\"%zu\" thresholdFreeBytes=\"%zu\" remainingFree=\"%zu\" tenureFreeBytes=\"%zu\" nurseryFreeBytes=\"%zu\" />", reasonString, event->traceTarget, event->kickOffThreshold, event->remainingFree, event->commonData->tenureFreeBytes, event->commonData->nurseryFreeBytes); } else { writer->formatAndOutput( env, 1, "<kickoff reason=\"%s\" targetBytes=\"%zu\" thresholdFreeBytes=\"%zu\" remainingFree=\"%zu\" tenureFreeBytes=\"%zu\" />", reasonString, event->traceTarget, event->kickOffThreshold, event->remainingFree, event->commonData->tenureFreeBytes); } writer->formatAndOutput(env, 0, "</concurrent-kickoff>"); writer->flush(env); handleConcurrentKickoffInternal(env, eventData); exitAtomicReportingBlock(); }
void MM_VerboseHandlerOutputStandard::handleCompactEnd(J9HookInterface** hook, uintptr_t eventNum, void* eventData) { MM_CompactEndEvent* event = (MM_CompactEndEvent*)eventData; MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(event->omrVMThread); MM_VerboseManager* manager = getManager(); MM_VerboseWriterChain* writer = manager->getWriterChain(); MM_CompactStats *compactStats = &MM_GCExtensionsBase::getExtensions(env->getOmrVM())->globalGCStats.compactStats; uint64_t duration = 0; bool deltaTimeSuccess = getTimeDeltaInMicroSeconds(&duration, compactStats->_startTime, compactStats->_endTime); enterAtomicReportingBlock(); handleGCOPOuterStanzaStart(env, "compact", env->_cycleState->_verboseContextID, duration, deltaTimeSuccess); if(COMPACT_PREVENTED_NONE == compactStats->_compactPreventedReason) { writer->formatAndOutput(env, 1, "<compact-info movecount=\"%zu\" movebytes=\"%zu\" reason=\"%s\" />", compactStats->_movedObjects, compactStats->_movedBytes, getCompactionReasonAsString(compactStats->_compactReason)); } else { writer->formatAndOutput(env, 1, "<compact-info reason=\"%s\" />", getCompactionReasonAsString(compactStats->_compactReason)); writer->formatAndOutput(env, 1, "<warning details=\"compaction prevented due to %s\" />", getCompactionPreventedReasonAsString(compactStats->_compactPreventedReason)); } handleCompactEndInternal(env, eventData); handleGCOPOuterStanzaEnd(env); writer->flush(env); exitAtomicReportingBlock(); }
void MM_VerboseHandlerOutputStandard::handleScavengeEnd(J9HookInterface** hook, uintptr_t eventNum, void* eventData) { MM_ScavengeEndEvent* event = (MM_ScavengeEndEvent*)eventData; MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(event->currentThread); MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); MM_VerboseManager* manager = getManager(); MM_VerboseWriterChain* writer = manager->getWriterChain(); MM_ScavengerStats *scavengerStats = &extensions->scavengerStats; OMRPORT_ACCESS_FROM_OMRPORT(env->getPortLibrary()); uint64_t duration = 0; bool deltaTimeSuccess = getTimeDeltaInMicroSeconds(&duration, scavengerStats->_startTime, scavengerStats->_endTime); enterAtomicReportingBlock(); handleGCOPOuterStanzaStart(env, "scavenge", env->_cycleState->_verboseContextID, duration, deltaTimeSuccess); writer->formatAndOutput(env, 1, "<scavenger-info tenureage=\"%zu\" tenuremask=\"%4zx\" tiltratio=\"%zu\" />", scavengerStats->_tenureAge, scavengerStats->getFlipHistory(0)->_tenureMask, scavengerStats->_tiltRatio); if (0 != scavengerStats->_flipCount) { writer->formatAndOutput(env, 1, "<memory-copied type=\"nursery\" objects=\"%zu\" bytes=\"%zu\" bytesdiscarded=\"%zu\" />", scavengerStats->_flipCount, scavengerStats->_flipBytes, scavengerStats->_flipDiscardBytes); } if (0 != scavengerStats->_tenureAggregateCount) { writer->formatAndOutput(env, 1, "<memory-copied type=\"tenure\" objects=\"%zu\" bytes=\"%zu\" bytesdiscarded=\"%zu\" />", scavengerStats->_tenureAggregateCount, scavengerStats->_tenureAggregateBytes, scavengerStats->_tenureDiscardBytes); } if (0 != scavengerStats->_failedFlipCount) { writer->formatAndOutput(env, 1, "<copy-failed type=\"nursery\" objects=\"%zu\" bytes=\"%zu\" />", scavengerStats->_failedFlipCount, scavengerStats->_failedFlipBytes); } if (0 != scavengerStats->_failedTenureCount) { writer->formatAndOutput(env, 1, "<copy-failed type=\"tenure\" objects=\"%zu\" bytes=\"%zu\" />", scavengerStats->_failedTenureCount, scavengerStats->_failedTenureBytes); } handleScavengeEndInternal(env, eventData); if(0 != scavengerStats->_tenureExpandedCount) { uint64_t expansionMicros = omrtime_hires_delta(0, scavengerStats->_tenureExpandedTime, OMRPORT_TIME_DELTA_IN_MICROSECONDS); outputCollectorHeapResizeInfo(env, 1, HEAP_EXPAND, scavengerStats->_tenureExpandedBytes, scavengerStats->_tenureExpandedCount, MEMORY_TYPE_OLD, SATISFY_COLLECTOR, expansionMicros); } if(scavengerStats->_rememberedSetOverflow) { writer->formatAndOutput(env, 1, "<warning details=\"remembered set overflow detected\" />"); if(scavengerStats->_causedRememberedSetOverflow) { writer->formatAndOutput(env, 1, "<warning details=\"remembered set overflow triggered\" />"); } } if(scavengerStats->_scanCacheOverflow) { writer->formatAndOutput(env, 1, "<warning details=\"scan cache overflow (storage acquired from heap)\" />"); } if(scavengerStats->_backout) { writer->formatAndOutput(env, 1, "<warning details=\"aborted collection due to insufficient free space\" />"); } handleGCOPOuterStanzaEnd(env); writer->flush(env); exitAtomicReportingBlock(); }
void MM_MasterGCThread::masterThreadEntryPoint() { OMR_VMThread *omrVMThread = NULL; Assert_MM_true(NULL != _collectorControlMutex); Assert_MM_true(NULL == _masterGCThread); /* Attach the thread as a system daemon thread */ /* You need a VM thread so that the stack walker can work */ omrVMThread = MM_EnvironmentBase::attachVMThread(_extensions->getOmrVM(), "Dedicated GC Master", MM_EnvironmentBase::ATTACH_GC_MASTER_THREAD); if (NULL == omrVMThread) { /* we failed to attach so notify the creating thread that we should fail to start up */ omrthread_monitor_enter(_collectorControlMutex); _masterThreadState = STATE_ERROR; omrthread_monitor_notify(_collectorControlMutex); omrthread_exit(_collectorControlMutex); } else { /* thread attached successfully */ MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(omrVMThread); /* attachVMThread could allocate an execute a barrier (since it that point, this thread acted as a mutator thread. * Flush GC chaches (like barrier buffers) before turning into the master thread */ env->flushGCCaches(); env->setThreadType(GC_MASTER_THREAD); /* Begin running the thread */ omrthread_monitor_enter(_collectorControlMutex); _collector->preMasterGCThreadInitialize(env); _masterThreadState = STATE_WAITING; _masterGCThread = omrthread_self(); omrthread_monitor_notify(_collectorControlMutex); do { if (STATE_GC_REQUESTED == _masterThreadState) { if (_runAsImplicit) { handleConcurrent(env); } else { handleSTW(env); } } if (STATE_WAITING == _masterThreadState) { if (_runAsImplicit || !handleConcurrent(env)) { omrthread_monitor_wait(_collectorControlMutex); } } } while (STATE_TERMINATION_REQUESTED != _masterThreadState); /* notify the other side that we are active so that they can continue running */ _masterThreadState = STATE_TERMINATED; _masterGCThread = NULL; omrthread_monitor_notify(_collectorControlMutex); MM_EnvironmentBase::detachVMThread(_extensions->getOmrVM(), omrVMThread, MM_EnvironmentBase::ATTACH_GC_MASTER_THREAD); omrthread_exit(_collectorControlMutex); } }
omr_error_t OMR_GC_SystemCollect(OMR_VMThread* omrVMThread, uint32_t gcCode) { omr_error_t result = OMR_ERROR_NONE; MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(omrVMThread); MM_GCExtensionsBase *extensions = env->getExtensions(); if (NULL == extensions->getGlobalCollector()) { result = OMR_GC_InitializeCollector(omrVMThread); } if (OMR_ERROR_NONE == result) { extensions->heap->systemGarbageCollect(env, gcCode); } return result; }
void MM_VerboseHandlerOutputStandard::handleSweepEnd(J9HookInterface** hook, uintptr_t eventNum, void* eventData) { MM_SweepEndEvent* event = (MM_SweepEndEvent*)eventData; MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(event->currentThread); MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); MM_SweepStats *sweepStats = &extensions->globalGCStats.sweepStats; uint64_t duration = 0; bool deltaTimeSuccess = getTimeDeltaInMicroSeconds(&duration, sweepStats->_startTime, sweepStats->_endTime); enterAtomicReportingBlock(); handleGCOPStanza(env, "sweep", env->_cycleState->_verboseContextID, duration, deltaTimeSuccess); handleSweepEndInternal(env, eventData); exitAtomicReportingBlock(); }
MM_EnvironmentBase * MM_EnvironmentBase::newInstance(MM_GCExtensionsBase *extensions, OMR_VMThread *omrVMThread) { void *envPtr; MM_EnvironmentBase *env = NULL; envPtr = (void *)pool_newElement(extensions->environments); if (NULL != envPtr) { env = new(envPtr) MM_EnvironmentBase(omrVMThread); if (!env->initialize(extensions)) { env->kill(); env = NULL; } } return env; }
/** * Allocate a new area for the fragment from the parent sublist. * * @return 0 on success, non-zero on failure */ uintptr_t allocateMemoryForSublistFragment(void *vmThreadRawPtr, J9VMGC_SublistFragment *fragmentPrimitive) { OMR_VMThread *omrVMThread = (OMR_VMThread*) vmThreadRawPtr; MM_SublistFragment fragment(fragmentPrimitive); MM_SublistFragment::flush(fragmentPrimitive); MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(omrVMThread); bool result = ((MM_SublistPool *)fragmentPrimitive->parentList)->allocate(env, &fragment); if (result) { return 0; } else { #if defined(OMR_GC_MODRON_SCAVENGER) env->getExtensions()->setRememberedSetOverflowState(); #endif /* OMR_GC_MODRON_SCAVENGER */ return 1; } }
void MM_VerboseHandlerOutputStandard::handleScavengePercolate(J9HookInterface** hook, uintptr_t eventNum, void* eventData) { MM_PercolateCollectEvent *event = (MM_PercolateCollectEvent *)eventData; MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(event->currentThread); OMRPORT_ACCESS_FROM_OMRPORT(env->getPortLibrary()); MM_VerboseManager* manager = getManager(); MM_VerboseWriterChain* writer = manager->getWriterChain(); char tagTemplate[200]; getTagTemplate(tagTemplate, sizeof(tagTemplate), omrtime_current_time_millis()); enterAtomicReportingBlock(); writer->formatAndOutput(env, 0, "<percolate-collect id=\"%zu\" from=\"%s\" to=\"%s\" reason=\"%s\" %s/>", manager->getIdAndIncrement(), "nursery", "global", getPercolateReasonAsString((PercolateReason)event->reason), tagTemplate); writer->flush(env); handleScavengePercolateInternal(env, eventData); exitAtomicReportingBlock(); }
omrobjectptr_t static inline allocHelper(OMR_VMThread * omrVMThread, size_t sizeInBytes, uintptr_t flags, bool collectOnFailure) { MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(omrVMThread->_vm); MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(omrVMThread); uintptr_t vmState = env->pushVMstate(J9VMSTATE_GC_ALLOCATE_OBJECT); #if defined(OMR_GC_THREAD_LOCAL_HEAP) if (!env->_envLanguageInterface->isInlineTLHAllocateEnabled()) { /* For duration of call restore TLH allocate fields; * we will hide real heapAlloc again on exit to fool JIT/Interpreter * into thinking TLH is full if needed */ env->_envLanguageInterface->enableInlineTLHAllocate(); } #endif /* OMR_GC_THREAD_LOCAL_HEAP */ uintptr_t allocatedFlags = 0; uintptr_t sizeAdjusted = extensions->objectModel.adjustSizeInBytes(sizeInBytes); bool threadAtSafePoint = J9_ARE_ALL_BITS_SET(flags, OMR_GC_THREAD_AT_SAFEPOINT); MM_AllocateDescription allocdescription(sizeAdjusted, allocatedFlags, collectOnFailure, threadAtSafePoint); /* OMRTODO: Under what conditions could this assert fail? */ assert(omrVMThread->memorySpace == env->getMemorySpace()); omrobjectptr_t heapBytes = (omrobjectptr_t)env->_objectAllocationInterface->allocateObject(env, &allocdescription, env->getMemorySpace(), collectOnFailure); /* OMRTODO: Should we use zero TLH instead of memset? */ if (NULL != heapBytes) { if (J9_ARE_ALL_BITS_SET(flags, OMR_GC_ALLOCATE_ZERO_MEMORY)) { uintptr_t size = allocdescription.getBytesRequested(); memset(heapBytes, 0, size); } } allocdescription.setAllocationSucceeded(NULL != heapBytes); /* Issue Allocation Failure Report if required */ env->allocationFailureEndReportIfRequired(&allocdescription); if (collectOnFailure) { /* Done allocation - successful or not */ /* OMRTODO: We are releasing exclusive before writing any of the object header, meaning * it may not be walkable in the event of another GC or heap traversal. Is this * the right place to release or should the caller be responsible? */ env->unwindExclusiveVMAccessForGC(); } env->popVMstate(vmState); #if defined(OMR_GC_THREAD_LOCAL_HEAP) if (extensions->fvtest_disableInlineAllocation || extensions->instrumentableAllocateHookEnabled || extensions->disableInlineCacheForAllocationThreshold) { env->_envLanguageInterface->disableInlineTLHAllocate(); } #endif /* OMR_GC_THREAD_LOCAL_HEAP */ return heapBytes; }
void MM_VerboseHandlerOutputStandard::handleMarkEnd(J9HookInterface** hook, uintptr_t eventNum, void* eventData) { MM_MarkEndEvent* event = (MM_MarkEndEvent*)eventData; MM_EnvironmentBase* env = MM_EnvironmentBase::getEnvironment(event->currentThread); MM_GCExtensionsBase *extensions = MM_GCExtensionsBase::getExtensions(env->getOmrVM()); MM_VerboseManager* manager = getManager(); MM_VerboseWriterChain* writer = manager->getWriterChain(); MM_MarkStats *markStats = &extensions->globalGCStats.markStats; uint64_t duration = 0; bool deltaTimeSuccess = getTimeDeltaInMicroSeconds(&duration, markStats->_startTime, markStats->_endTime); enterAtomicReportingBlock(); handleGCOPOuterStanzaStart(env, "mark", env->_cycleState->_verboseContextID, duration, deltaTimeSuccess); writer->formatAndOutput(env, 1, "<trace-info objectcount=\"%zu\" scancount=\"%zu\" scanbytes=\"%zu\" />", markStats->_objectsMarked, markStats->_objectsScanned, markStats->_bytesScanned); handleMarkEndInternal(env, eventData); handleGCOPOuterStanzaEnd(env); writer->flush(env); exitAtomicReportingBlock(); }
int testMain(int argc, char ** argv, char **envp) { /* Start up */ OMR_VM_Example exampleVM; OMR_VMThread *omrVMThread = NULL; omrthread_t self = NULL; exampleVM._omrVM = NULL; exampleVM.rootTable = NULL; /* Initialize the VM */ omr_error_t rc = OMR_Initialize(&exampleVM, &exampleVM._omrVM); Assert_MM_true(OMR_ERROR_NONE == rc); /* Recursive omrthread_attach() (i.e. re-attaching a thread that is already attached) is cheaper and less fragile * than non-recursive. If performing a sequence of function calls that are likely to attach & detach internally, * it is more efficient to call omrthread_attach() before the entire block. */ int j9rc = (int) omrthread_attach_ex(&self, J9THREAD_ATTR_DEFAULT); Assert_MM_true(0 == j9rc); /* Initialize root table */ exampleVM.rootTable = hashTableNew( exampleVM._omrVM->_runtime->_portLibrary, OMR_GET_CALLSITE(), 0, sizeof(RootEntry), 0, 0, OMRMEM_CATEGORY_MM, rootTableHashFn, rootTableHashEqualFn, NULL, NULL); /* Initialize heap and collector */ { /* This has to be done in local scope because MM_StartupManager has a destructor that references the OMR VM */ MM_StartupManagerImpl startupManager(exampleVM._omrVM); rc = OMR_GC_IntializeHeapAndCollector(exampleVM._omrVM, &startupManager); } Assert_MM_true(OMR_ERROR_NONE == rc); /* Attach current thread to the VM */ rc = OMR_Thread_Init(exampleVM._omrVM, NULL, &omrVMThread, "GCTestMailThread"); Assert_MM_true(OMR_ERROR_NONE == rc); /* Kick off the dispatcher therads */ rc = OMR_GC_InitializeDispatcherThreads(omrVMThread); Assert_MM_true(OMR_ERROR_NONE == rc); OMRPORT_ACCESS_FROM_OMRVM(exampleVM._omrVM); omrtty_printf("VM/GC INITIALIZED\n"); /* Do stuff */ MM_EnvironmentBase *env = MM_EnvironmentBase::getEnvironment(omrVMThread); MM_ObjectAllocationInterface *allocationInterface = env->_objectAllocationInterface; MM_GCExtensionsBase *extensions = env->getExtensions(); omrtty_printf("configuration is %s\n", extensions->configuration->getBaseVirtualTypeId()); omrtty_printf("collector interface is %s\n", env->getExtensions()->collectorLanguageInterface->getBaseVirtualTypeId()); omrtty_printf("garbage collector is %s\n", env->getExtensions()->getGlobalCollector()->getBaseVirtualTypeId()); omrtty_printf("allocation interface is %s\n", allocationInterface->getBaseVirtualTypeId()); /* Allocate objects without collection until heap exhausted */ uintptr_t allocatedFlags = 0; uintptr_t size = extensions->objectModel.adjustSizeInBytes(24); MM_AllocateDescription mm_allocdescription(size, allocatedFlags, true, true); uintptr_t allocatedCount = 0; while (true) { omrobjectptr_t obj = (omrobjectptr_t)allocationInterface->allocateObject(env, &mm_allocdescription, env->getMemorySpace(), false); if (NULL != obj) { extensions->objectModel.setObjectSize(obj, mm_allocdescription.getBytesRequested()); RootEntry rEntry = {"root1", obj}; RootEntry *entryInTable = (RootEntry *)hashTableAdd(exampleVM.rootTable, &rEntry); if (NULL == entryInTable) { omrtty_printf("failed to add new root to root table!\n"); } /* update entry if it already exists in table */ entryInTable->rootPtr = obj; allocatedCount++; } else { break; } } /* Print/verify thread allocation stats before GC */ MM_AllocationStats *allocationStats = allocationInterface->getAllocationStats(); omrtty_printf("thread allocated %d tlh bytes, %d non-tlh bytes, from %d allocations before NULL\n", allocationStats->tlhBytesAllocated(), allocationStats->nontlhBytesAllocated(), allocatedCount); uintptr_t allocationTotalBytes = allocationStats->tlhBytesAllocated() + allocationStats->nontlhBytesAllocated(); uintptr_t allocatedTotalBytes = size * allocatedCount; Assert_MM_true(allocatedTotalBytes == allocationTotalBytes); /* Force GC to print verbose system allocation stats -- should match thread allocation stats from before GC */ omrobjectptr_t obj = (omrobjectptr_t)allocationInterface->allocateObject(env, &mm_allocdescription, env->getMemorySpace(), true); env->unwindExclusiveVMAccessForGC(); Assert_MM_false(NULL == obj); extensions->objectModel.setObjectSize(obj, mm_allocdescription.getBytesRequested()); omrtty_printf("ALL TESTS PASSED\n"); /* Shut down */ /* Shut down the dispatcher therads */ rc = OMR_GC_ShutdownDispatcherThreads(omrVMThread); Assert_MM_true(OMR_ERROR_NONE == rc); /* Shut down collector */ rc = OMR_GC_ShutdownCollector(omrVMThread); Assert_MM_true(OMR_ERROR_NONE == rc); /* Detach from VM */ rc = OMR_Thread_Free(omrVMThread); Assert_MM_true(OMR_ERROR_NONE == rc); /* Shut down heap */ rc = OMR_GC_ShutdownHeap(exampleVM._omrVM); Assert_MM_true(OMR_ERROR_NONE == rc); /* Free root hash table */ hashTableFree(exampleVM.rootTable); /* Balance the omrthread_attach_ex() issued above */ omrthread_detach(self); /* Shut down VM * This destroys the port library and the omrthread library. * Don't use any port library or omrthread functions after this. * * (This also shuts down trace functionality, so the trace assertion * macros might not work after this.) */ rc = OMR_Shutdown(exampleVM._omrVM); Assert_MM_true(OMR_ERROR_NONE == rc); return rc; }
/** * Acquire exclusive VM access. */ virtual void acquireExclusiveVMAccess() { _omrThread->exclusiveCount = 1; omrthread_monitor_enter(_env->getOmrVM()->_vmThreadListMutex); }
/** * Releases exclusive VM access. */ virtual void releaseExclusiveVMAccess() { _omrThread->exclusiveCount = 0; omrthread_monitor_exit(_env->getOmrVM()->_vmThreadListMutex); }