/* This method is moved here so that AC can see the large full page and cache it */ uintptr_t * MM_AllocationContextSegregated::allocateLarge(MM_EnvironmentBase *env, uintptr_t sizeInBytesRequired) { uintptr_t neededRegions = _regionPool->divideUpRegion(sizeInBytesRequired); MM_HeapRegionDescriptorSegregated *region = NULL; uintptr_t excess = 0; while (region == NULL && excess < MAX_UINT) { region = _regionPool->allocateFromRegionPool(env, neededRegions, OMR_SIZECLASSES_LARGE, excess); excess = (2 * excess) + 1; } uintptr_t *result = (region == NULL) ? NULL : (uintptr_t *)region->getLowAddress(); /* Flush the large page right away. */ if (region != NULL) { /* cache the large full region in AC */ _perContextLargeFullRegions->enqueue(region); /* reset ACL counts */ region->getMemoryPoolACL()->resetCounts(); } return result; }
bool MM_AllocationContextSegregated::tryAllocateFromRegionPool(MM_EnvironmentBase *env, uintptr_t sizeClass) { MM_HeapRegionDescriptorSegregated *region = _regionPool->allocateFromRegionPool(env, 1, sizeClass, MAX_UINT); bool result = false; if(NULL != region) { /* cache the small full region in AC */ _perContextSmallFullRegions[sizeClass]->enqueue(region); region->formatFresh(env, sizeClass, region->getLowAddress()); /* A store barrier is required here since the initialization of the new region needs to write-back before * we make it reachable via _smallRegions (_smallRegions is accessed from outside the lock which covers this write) */ MM_AtomicOperations::storeSync(); _smallRegions[sizeClass] = region; result = true; } return result; }