bool Target::AllocateAndFillRandomDataWriteBuffer() { assert(_pRandomDataWriteBuffer == nullptr); bool fOk = true; size_t cb = static_cast<size_t>(GetRandomDataWriteBufferSize()); assert(cb > 0); // TODO: make sure the size if <= max value for size_t if (GetUseLargePages()) { size_t cbMinLargePage = GetLargePageMinimum(); size_t cbRoundedSize = (cb + cbMinLargePage - 1) & ~(cbMinLargePage - 1); _pRandomDataWriteBuffer = (BYTE *)VirtualAlloc(nullptr, cbRoundedSize, MEM_COMMIT | MEM_RESERVE | MEM_LARGE_PAGES, PAGE_EXECUTE_READWRITE); } else { _pRandomDataWriteBuffer = (BYTE *)VirtualAlloc(nullptr, cb, MEM_COMMIT, PAGE_READWRITE); } fOk = (_pRandomDataWriteBuffer != nullptr); if (fOk) { fOk = _FillRandomDataWriteBuffer(); } return fOk; }
/* Should only be reached from CLR*/ DLL_EXPORT VOID __stdcall __INIT_TRINITY_C__() { #ifdef TRINITY_PLATFORM_WINDOWS Memory::LargePageMinimum = GetLargePageMinimum(); #endif Memory::GetWorkingSetSize(); #ifndef TRINITY_DISABLE_PREEMPTIVE Runtime::ProbeCLRMethodAddresses(); #endif #ifdef TRINITY_OPTIONAL_PREEMPTIVE Runtime::__transition_enabled = true; #endif BackgroundThread::TaskScheduler::Start(); }
bool ThreadParameters::AllocateAndFillBufferForTarget(const Target& target) { bool fOk = true; BYTE *pDataBuffer = nullptr; size_t cbDataBuffer = target.GetBlockSizeInBytes() * target.GetRequestCount(); if (target.GetUseLargePages()) { size_t cbMinLargePage = GetLargePageMinimum(); size_t cbRoundedSize = (cbDataBuffer + cbMinLargePage - 1) & ~(cbMinLargePage - 1); pDataBuffer = (BYTE *)VirtualAlloc(nullptr, cbRoundedSize, MEM_COMMIT | MEM_RESERVE | MEM_LARGE_PAGES, PAGE_EXECUTE_READWRITE); } else { pDataBuffer = (BYTE *)VirtualAlloc(nullptr, cbDataBuffer, MEM_COMMIT, PAGE_READWRITE); } fOk = (pDataBuffer != nullptr); //fill buffer (useful only for write tests) if (fOk && target.GetWriteRatio() > 0) { if (target.GetZeroWriteBuffers()) { memset(pDataBuffer, 0, cbDataBuffer); } else { for (size_t i = 0; i < cbDataBuffer; i++) { pDataBuffer[i] = (BYTE)(i % 256); } } } if (fOk) { vpDataBuffers.push_back(pDataBuffer); } return fOk; }
size_t pixie_align_huge(size_t size) { size_t align; #if defined(WIN32) align = GetLargePageMinimum(); #else /* Assume x86/ARM sizes if there is no easy API to retrieve that * information for us */ if (sizeof(void*) == 8) align = 2 * 1024 * 1024; else align = 4 * 1024 * 1024; #endif size = (size + (align-1)) & (~(align-1)); return size; }
/* ANT_MEMORY::ANT_MEMORY() ------------------------ */ ANT_memory::ANT_memory(long long block_size_for_allocation, long long memory_ceiling) { #ifdef _MSC_VER OSVERSIONINFO os_info; SYSTEM_INFO hardware_info = {0}; os_info.dwOSVersionInfoSize = sizeof(os_info); GetVersionEx(&os_info); has_large_pages = os_info.dwMajorVersion >= 6; // version 6 is Vista & Server2008 - prior to this there was no large page support large_page_size = has_large_pages ? GetLargePageMinimum() : 0; GetSystemInfo(&hardware_info); short_page_size = hardware_info.dwPageSize; #else if ((short_page_size = large_page_size = sysconf(_SC_PAGESIZE)) <= 0) short_page_size = large_page_size = 4096; // use 4K blocks by default (as this is the Pentium (and ARM) small page size) #endif //printf("Large Page Size: %lld Small Page Size:%lld\n", (long long)large_page_size, (long long)short_page_size); this->block_size = block_size_for_allocation; this->memory_ceiling = memory_ceiling == 0 ? LLONG_MAX : memory_ceiling; chunk = NULL; rewind(); }
/* * PGSharedMemoryCreate * * Create a shared memory segment of the given size and initialize its * standard header. * * makePrivate means to always create a new segment, rather than attach to * or recycle any existing segment. On win32, we always create a new segment, * since there is no need for recycling (segments go away automatically * when the last backend exits) */ PGShmemHeader * PGSharedMemoryCreate(Size size, bool makePrivate, int port, PGShmemHeader **shim) { void *memAddress; PGShmemHeader *hdr; HANDLE hmap, hmap2; char *szShareMem; int i; DWORD size_high; DWORD size_low; SIZE_T largePageSize = 0; Size orig_size = size; DWORD flProtect = PAGE_READWRITE; /* Room for a header? */ Assert(size > MAXALIGN(sizeof(PGShmemHeader))); szShareMem = GetSharedMemName(); UsedShmemSegAddr = NULL; if (huge_pages == HUGE_PAGES_ON || huge_pages == HUGE_PAGES_TRY) { /* Does the processor support large pages? */ largePageSize = GetLargePageMinimum(); if (largePageSize == 0) { ereport(huge_pages == HUGE_PAGES_ON ? FATAL : DEBUG1, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("the processor does not support large pages"))); ereport(DEBUG1, (errmsg("disabling huge pages"))); } else if (!EnableLockPagesPrivilege(huge_pages == HUGE_PAGES_ON ? FATAL : DEBUG1)) { ereport(DEBUG1, (errmsg("disabling huge pages"))); } else { /* Huge pages available and privilege enabled, so turn on */ flProtect = PAGE_READWRITE | SEC_COMMIT | SEC_LARGE_PAGES; /* Round size up as appropriate. */ if (size % largePageSize != 0) size += largePageSize - (size % largePageSize); } } retry: #ifdef _WIN64 size_high = size >> 32; #else size_high = 0; #endif size_low = (DWORD) size; /* * When recycling a shared memory segment, it may take a short while * before it gets dropped from the global namespace. So re-try after * sleeping for a second, and continue retrying 10 times. (both the 1 * second time and the 10 retries are completely arbitrary) */ for (i = 0; i < 10; i++) { /* * In case CreateFileMapping() doesn't set the error code to 0 on * success */ SetLastError(0); hmap = CreateFileMapping(INVALID_HANDLE_VALUE, /* Use the pagefile */ NULL, /* Default security attrs */ flProtect, size_high, /* Size Upper 32 Bits */ size_low, /* Size Lower 32 bits */ szShareMem); if (!hmap) { if (GetLastError() == ERROR_NO_SYSTEM_RESOURCES && huge_pages == HUGE_PAGES_TRY && (flProtect & SEC_LARGE_PAGES) != 0) { elog(DEBUG1, "CreateFileMapping(%zu) with SEC_LARGE_PAGES failed, " "huge pages disabled", size); /* * Use the original size, not the rounded-up value, when falling back * to non-huge pages. */ size = orig_size; flProtect = PAGE_READWRITE; goto retry; } else ereport(FATAL, (errmsg("could not create shared memory segment: error code %lu", GetLastError()), errdetail("Failed system call was CreateFileMapping(size=%zu, name=%s).", size, szShareMem))); } /* * If the segment already existed, CreateFileMapping() will return a * handle to the existing one and set ERROR_ALREADY_EXISTS. */ if (GetLastError() == ERROR_ALREADY_EXISTS) { CloseHandle(hmap); /* Close the handle, since we got a valid one * to the previous segment. */ hmap = NULL; Sleep(1000); continue; } break; } /* * If the last call in the loop still returned ERROR_ALREADY_EXISTS, this * shared memory segment exists and we assume it belongs to somebody else. */ if (!hmap) ereport(FATAL, (errmsg("pre-existing shared memory block is still in use"), errhint("Check if there are any old server processes still running, and terminate them."))); free(szShareMem); /* * Make the handle inheritable */ if (!DuplicateHandle(GetCurrentProcess(), hmap, GetCurrentProcess(), &hmap2, 0, TRUE, DUPLICATE_SAME_ACCESS)) ereport(FATAL, (errmsg("could not create shared memory segment: error code %lu", GetLastError()), errdetail("Failed system call was DuplicateHandle."))); /* * Close the old, non-inheritable handle. If this fails we don't really * care. */ if (!CloseHandle(hmap)) elog(LOG, "could not close handle to shared memory: error code %lu", GetLastError()); /* * Get a pointer to the new shared memory segment. Map the whole segment * at once, and let the system decide on the initial address. */ memAddress = MapViewOfFileEx(hmap2, FILE_MAP_WRITE | FILE_MAP_READ, 0, 0, 0, NULL); if (!memAddress) ereport(FATAL, (errmsg("could not create shared memory segment: error code %lu", GetLastError()), errdetail("Failed system call was MapViewOfFileEx."))); /* * OK, we created a new segment. Mark it as created by this process. The * order of assignments here is critical so that another Postgres process * can't see the header as valid but belonging to an invalid PID! */ hdr = (PGShmemHeader *) memAddress; hdr->creatorPID = getpid(); hdr->magic = PGShmemMagic; /* * Initialize space allocation status for segment. */ hdr->totalsize = size; hdr->freeoffset = MAXALIGN(sizeof(PGShmemHeader)); hdr->dsm_control = 0; /* Save info for possible future use */ UsedShmemSegAddr = memAddress; UsedShmemSegSize = size; UsedShmemSegID = hmap2; /* Register on-exit routine to delete the new segment */ on_shmem_exit(pgwin32_SharedMemoryDelete, PointerGetDatum(hmap2)); *shim = hdr; return hdr; }
void *BigAllocInternal( size_t sizeToAllocate, size_t *sizeAllocated, bool reserveOnly, size_t *pageSize) /*++ Routine Description: Allocate memory, using large pages if both appropriate and possible, and always using VirtualAlloc (meaning that this will always use at least one VM page, so you shouldn't use it for small stuff, only gigantic data structures for which you want to reduce TLB misses and cache misses on the page table). Use malloc or new for ordinary allocations. Arguments: sizeToAllocate - The amount of memory that is needed sizeAllocated - Optional parameter that if provided returns the amount of memory actually allocated, which will always be >= sizeToAllocate (unless the allocation fails). reserveOnly - If TRUE, will only reserve address space, must call BigCommit to commit memory pageSize - Optional parameter that if provided returns the page size (not large page size) Return Value: pointer to the memory allocated, or NULL if the allocation failed. --*/ { if (sizeToAllocate == 0) { sizeToAllocate = 1; } static bool warningPrinted = false; void *allocatedMemory; SYSTEM_INFO systemInfo[1]; GetSystemInfo(systemInfo); size_t virtualAllocSize = ((sizeToAllocate + systemInfo->dwPageSize - 1) / systemInfo->dwPageSize) * systemInfo->dwPageSize; if (pageSize != NULL) { *pageSize = systemInfo->dwPageSize; } // // Try to do the VirtualAlloc using large pages if the size we're getting is at last one large page. // Callers should have asserted the SeLockMemoryPrivilege if they want large pages. // size_t largePageSize = GetLargePageMinimum(); DWORD commitFlag = reserveOnly ? 0 : MEM_COMMIT; if (0 != largePageSize && virtualAllocSize >= largePageSize) { // // Start by asserting the SeLockMemoryPrivilege, which is necessary for large page allocations. It's overkill to // do this every time, it only has to happen once/thread. However, a BigAllocation is a big deal and shouldn't be // happening very much, so we just don't worry about the extra cost. // BOOL assertPrivilegeWorked = AssertPrivilege("SeLockMemoryPrivilege"); DWORD assertPrivilegeError = GetLastError(); size_t largePageSizeToAllocate = ((virtualAllocSize + largePageSize - 1) / largePageSize) * largePageSize; #if _DEBUG largePageSizeToAllocate += largePageSize; // For the guard page. #endif // DEBUG allocatedMemory = (BYTE *)VirtualAlloc(0,largePageSizeToAllocate,commitFlag|MEM_RESERVE|((BigAllocUseHugePages && !reserveOnly) ? MEM_LARGE_PAGES : 0),PAGE_READWRITE); if (NULL != allocatedMemory) { #if _DEBUG DWORD oldProtect; if (!VirtualProtect((char *)allocatedMemory + virtualAllocSize, systemInfo->dwPageSize, PAGE_NOACCESS, &oldProtect)) { static bool printedVirtualProtectedWarning = false; if (! printedVirtualProtectedWarning) { fprintf(stderr,"VirtualProtect for guard page failed, %d\n", GetLastError()); printedVirtualProtectedWarning = true; } } largePageSizeToAllocate -= largePageSize; // Back out the guard page #endif // DEBUG if (NULL != sizeAllocated) { *sizeAllocated = largePageSizeToAllocate; } return allocatedMemory; } else if (!warningPrinted) { // // The first time we fail, print out a warning and then fall back to VirtualAlloc. We want be able to use // the fallback because the caller might not be able to assert the appropriate privilege and we'd still like // to run. The check for printing only once isn't thread safe, so you might get more than one printed // if multiple threads fail at the same time. // warningPrinted = true; fprintf(stderr,"BigAlloc: WARNING: Unable to allocate large page memory, %d. Falling back to VirtualAlloc. Performance may be adversely affected. Size = %lld\n", GetLastError(), largePageSizeToAllocate); if (!assertPrivilegeWorked || GetLastError() == 1314) { // TODO: Look up the error code name for 1314. fprintf(stderr,"BigAlloc: Unable to assert the SeLockMemoryPrivilege (%d), which is probably why it failed.\n",assertPrivilegeError); fprintf(stderr,"Try secpol.msc, then SecuritySettings, Local Policies, User Rights Assignment.\n"); fprintf(stderr,"Then double click 'Lock Pages in Memory,' add the current user directly or by being\n"); fprintf(stderr,"In a group and then reboot (you MUST reboot) for it to work.\n"); } } } allocatedMemory = (BYTE *)VirtualAlloc(0,virtualAllocSize,commitFlag|MEM_RESERVE,PAGE_READWRITE); if (NULL != allocatedMemory && NULL != sizeAllocated) { *sizeAllocated = virtualAllocSize; } if (NULL == allocatedMemory) { fprintf(stderr,"BigAlloc of size %lld failed.\n", sizeToAllocate); soft_exit(1); } return allocatedMemory; }