bool fillSpaceBeforeStagingArea(int &tempChunks, void *stagingArea, void **chunkPool, bool addressesGrowDown) { // Make sure there are no available chunks before the staging area. tempChunks = 0; chunkPool[tempChunks++] = mapMemory(2 * Chunk); while (tempChunks < MaxTempChunks && chunkPool[tempChunks - 1] && (chunkPool[tempChunks - 1] < stagingArea) ^ addressesGrowDown) { chunkPool[tempChunks++] = mapMemory(2 * Chunk); if (!chunkPool[tempChunks - 1]) break; // We already have our staging area, so OOM here is okay. if ((chunkPool[tempChunks - 1] < chunkPool[tempChunks - 2]) ^ addressesGrowDown) break; // The address growth direction is inconsistent! } // OOM also means success in this case. if (!chunkPool[tempChunks - 1]) { --tempChunks; return true; } // Bail if we can't guarantee the right address space layout. if ((chunkPool[tempChunks - 1] < stagingArea) ^ addressesGrowDown || (tempChunks > 1 && (chunkPool[tempChunks - 1] < chunkPool[tempChunks - 2]) ^ addressesGrowDown)) { while (--tempChunks >= 0) unmapPages(chunkPool[tempChunks], 2 * Chunk); unmapPages(stagingArea, StagingSize); return false; } return true; }
bool addressesGrowUp() { void *p1 = mapMemory(2 * Chunk); void *p2 = mapMemory(2 * Chunk); unmapPages(p1, 2 * Chunk); unmapPages(p2, 2 * Chunk); return p1 < p2; }
bool testGCAllocatorDown(const size_t PageSize) { const size_t UnalignedSize = StagingSize + Alignment - PageSize; void *chunkPool[MaxTempChunks]; // Allocate a contiguous chunk that we can partition for testing. void *stagingArea = mapMemory(UnalignedSize); if (!stagingArea) return false; // Ensure that the staging area is aligned. unmapPages(stagingArea, UnalignedSize); if (offsetFromAligned(stagingArea)) { void *stagingEnd = (void *)(uintptr_t(stagingArea) + UnalignedSize); const size_t Offset = offsetFromAligned(stagingEnd); // Place the area at the highest aligned address. stagingArea = (void *)(uintptr_t(stagingEnd) - Offset - StagingSize); } mapMemoryAt(stagingArea, StagingSize); // Make sure there are no available chunks above the staging area. int tempChunks; if (!fillSpaceBeforeStagingArea(tempChunks, stagingArea, chunkPool, true)) return false; // Unmap the staging area so we can set it up for testing. unmapPages(stagingArea, StagingSize); // Check that the first chunk is used if it is aligned. CHECK(positionIsCorrect("---------xxxooxx", stagingArea, chunkPool, tempChunks)); // Check that the first chunk is used if it can be aligned. CHECK(positionIsCorrect("---------xxxoo-x", stagingArea, chunkPool, tempChunks)); // Check that an aligned chunk after a single unalignable chunk is used. CHECK(positionIsCorrect("-------xxxoox--x", stagingArea, chunkPool, tempChunks)); // Check that we fall back to the slow path after two unalignable chunks. CHECK(positionIsCorrect("-xxx--oox--xx--x", stagingArea, chunkPool, tempChunks)); // Check that we also fall back after an unalignable and an alignable chunk. CHECK(positionIsCorrect("-x--oo-x---xx--x", stagingArea, chunkPool, tempChunks)); // Check that the last ditch allocator works as expected. CHECK(positionIsCorrect("---xoo-xx--xx--x", stagingArea, chunkPool, tempChunks, UseLastDitchAllocator)); // Clean up. while (--tempChunks >= 0) unmapPages(chunkPool[tempChunks], 2 * Chunk); return true; }
bool addressesGrowUp(bool* resultOut) { /* * Try to detect whether the OS allocates memory in increasing or decreasing * address order by making several allocations and comparing the addresses. */ static const unsigned ChunksToTest = 20; static const int ThresholdCount = 15; void* chunks[ChunksToTest]; for (unsigned i = 0; i < ChunksToTest; i++) { chunks[i] = mapMemory(2 * Chunk); CHECK(chunks[i]); } int upCount = 0; int downCount = 0; for (unsigned i = 0; i < ChunksToTest - 1; i++) { if (chunks[i] < chunks[i + 1]) upCount++; else downCount++; } for (unsigned i = 0; i < ChunksToTest; i++) unmapPages(chunks[i], 2 * Chunk); /* Check results were mostly consistent. */ CHECK(abs(upCount - downCount) >= ThresholdCount); *resultOut = upCount > downCount; return true; }