// Set the initial size based on any parameters specified on the command line void HeapSizeParameters::SetHeapParameters(unsigned minsize, unsigned maxsize, unsigned percent) { minHeapSize = K_to_words(minsize); // If these overflow assume the result will be zero maxHeapSize = K_to_words(maxsize); POLYUNSIGNED memsize = 0; if (minHeapSize == 0 || maxHeapSize == 0) memsize = GetPhysicalMemorySize() / sizeof(PolyWord); // If no maximum is given default it to 80% of the physical memory. // This allows some space for the OS and other things. if (maxHeapSize == 0 || maxHeapSize > MAXIMUMADDRESS) { if (memsize == 0) maxHeapSize = MAXIMUMADDRESS; else maxHeapSize = memsize - memsize / 5; } // Set the initial size to the minimum if that has been provided. POLYUNSIGNED initialSize = minHeapSize; if (initialSize == 0) { // If no -H option was given set the default initial size to a quarter of the memory. if (memsize == 0) // Unable to determine memory size so default to 64M. initialSize = 64 * 1024 * 1024; else initialSize = memsize / 4; } // Initially we divide the space equally between the major and // minor heaps. That means that there will definitely be space // for the first minor GC to copy its data. This division can be // changed later on. gMem.SetSpaceForHeap(initialSize); gMem.SetSpaceBeforeMinorGC(initialSize/2); lastFreeSpace = initialSize; highWaterMark = initialSize; if (percent == 0) userGCRatio = 1.0 / 9.0; // Default to 10% GC to 90% application else userGCRatio = (float)percent / (float)(100 - percent); predictedRatio = lastMajorGCRatio = userGCRatio; if (debugOptions & DEBUG_HEAPSIZE) { Log("Heap: Initial settings: Initial heap "); LogSize(initialSize); Log(" minimum "); LogSize(minHeapSize); Log(" maximum "); LogSize(maxHeapSize); Log(" target ratio %f\n", userGCRatio); } }
void SetupMapping(size_t* mapping_size, void** mapping) { *mapping_size = static_cast<uint64_t>((static_cast<double>(GetPhysicalMemorySize()) * fraction_of_physical_memory)); *mapping = mmap(NULL, *mapping_size, PROT_READ | PROT_WRITE, MAP_POPULATE | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); assert(*mapping != (void*)-1); end_addr = (uintptr_t)((uintptr_t)*mapping + *mapping_size); // Initialize the mapping so that the pages are non-empty. printf("[!] Initializing large memory mapping ..."); for (uint64_t index = 0; index < *mapping_size; index += 0x1000) { uint64_t* temporary = reinterpret_cast<uint64_t*>( static_cast<uint8_t*>(*mapping) + index); temporary[0] = index; } printf("done\n"); }
// ============================================================================ void DumpMemory( const string& prefix) // ============================================================================ { Uint8 totalMemory = GetPhysicalMemorySize(); size_t usedMemory; size_t residentMemory; size_t sharedMemory; if (!GetMemoryUsage(&usedMemory, &residentMemory, &sharedMemory)) { cerr << "Unable to get memory counts!" << endl; } else { cerr << prefix << "Total:" << totalMemory << " Used:" << usedMemory << "(" << (100*usedMemory)/totalMemory <<"%)" << " Resident:" << residentMemory << "(" << int((100.0*residentMemory)/totalMemory) <<"%)" << endl; } }
int main(int argc, char **argv) { // On successful completion, print out the output file sizes. std::vector<std::string> output_files; try { std::string progname = argv[0]; // Process commandline options int argn; bool help = false; std::string outdir; int index_version = 0; int sortbuf = kDefaultSortBufferMegabytes; uint32 numcpus = kDefaultNumCPUs; uint32 read_cache_max_blocks = kDefaultReadCacheBlocks; uint32 read_cache_block_size = kDefaultReadCacheBlockKilobyteSize; khGetopt options; options.flagOpt("help", help); options.flagOpt("?", help); options.opt("output", outdir); options.opt("indexversion", index_version); options.opt("sortbuf", sortbuf); options.opt("numcpus", numcpus, &khGetopt::RangeValidator<uint32, 1, kMaxNumJobsLimit_2>); options.opt("read_cache_max_blocks", read_cache_max_blocks, &khGetopt::RangeValidator<uint32, 0, 1024>); options.opt("read_cache_block_size", read_cache_block_size, &khGetopt::RangeValidator<uint32, 1, 1024>); if (!options.processAll(argc, argv, argn)) { usage(progname); } if (help) { usage(progname); } if (argn == argc) { usage(progname, "No input indexes specified"); } numcpus = std::min(numcpus, CommandlineNumCPUsDefault()); // Validate commandline options if (!outdir.size()) { usage(progname, "No output specified"); } if (index_version <= 0) { usage(progname, "Index version not specified or <= 0"); } if (numcpus < 1) { usage(progname, "Number of CPUs should not be less than 1"); } if (sortbuf <= 0) { notify(NFY_FATAL, "--sortbuf must be > 0, is %d", sortbuf); } // Create a merge of the terrain indices JOBSTATS_BEGIN(job_stats, MERGER_CREATED); // validate // We'll need to limit the number of filebundles opened by the filepool // at a single time, to keep from overflowing memory. // Allow 50 files for other operations outside the filepool. int max_open_fds = GetMaxFds(-50); // Read Cache is enabled only if read_cache_max_blocks is > 2. if (read_cache_max_blocks < 2) { notify(NFY_WARN, "Read caching is disabled. This will cause %s" "to be much slower. To enable, set the " "read_cache_blocks setting\n" "to a number 2 or greater.\n", argv[0]); } else { // Get the physical memory size to help choose the read_cache_max_blocks. uint64 physical_memory_size = GetPhysicalMemorySize(); if (physical_memory_size == 0) { physical_memory_size = kDefaultMinMemoryAssumed; notify(NFY_WARN, "Physical Memory available not found. " "Assuming min recommended system size: %llu bytes", static_cast<long long unsigned int>(physical_memory_size)); } else { notify(NFY_NOTICE, "Physical Memory available: %llu bytes", static_cast<long long unsigned int>(physical_memory_size)); } // Convert this read cache block size from kilobytes to bytes. read_cache_block_size *= 1024U; // Figure out the worst case size of the read cache // (if all of max_open_fds are open simultaneously) uint64 estimated_read_cache_bytes = max_open_fds * static_cast<uint64>(read_cache_max_blocks * read_cache_block_size); notify(NFY_NOTICE, "Read Cache Settings: %u count %u byte blocks per resource " "(max files open set to %u)\n" "This will use approximately %llu bytes in memory.", read_cache_max_blocks, read_cache_block_size, max_open_fds, static_cast<long long unsigned int>(estimated_read_cache_bytes)); if (estimated_read_cache_bytes > physical_memory_size) { // If our worst case read cache blows out our memory, then // lower the max_open_fds to bring it to within 90% of the memory. // Be careful with overflow here. max_open_fds = (physical_memory_size * 90ULL)/ (100ULL * read_cache_max_blocks * read_cache_block_size); notify(NFY_WARN, "The estimated read cache size (%llu bytes) exceeds\n" "the Physical Memory available: %llu bytes.\n" "We are reducing the max files open to %d to eliminate" "memory overruns.\n", static_cast<long long unsigned int>(estimated_read_cache_bytes), static_cast<long long unsigned int>(physical_memory_size), max_open_fds); } } geFilePool file_pool(max_open_fds); geterrain::CountedPacketFileReaderPool packet_reader_pool( "TerrainReaderPool", file_pool); // Note: read cache's will not work without at least 2 blocks. if (read_cache_max_blocks >= 2) { packet_reader_pool.EnableReadCache(read_cache_max_blocks, read_cache_block_size); } khDeleteGuard<TerrainMergeType> merger( TransferOwnership(new TerrainMergeType("Terrain Merger"))); // Print the input file sizes for diagnostic log file info. std::vector<std::string> input_files; fprintf(stderr, "index version: %d\n", index_version); for (int i = argn; i < argc; ++i) { notify(NFY_INFO, "Opening terrain index: %s", argv[i]); merger->AddSource( TransferOwnership( new TranslatingTerrainTraverser(&packet_reader_pool, argv[i]))); input_files.push_back(argv[i]); } khPrintFileSizes("Input File Sizes", input_files); merger->Start(); JOBSTATS_END(job_stats, MERGER_CREATED); // Feed this merge into a QuadsetGather operation JOBSTATS_BEGIN(job_stats, GATHERER_CREATED); // validate qtpacket::QuadsetGather<geterrain::TerrainPacketItem> gather("TerrainQuadsetGather", TransferOwnership(merger)); // Create the output packetfile geterrain::TerrainCombiner combiner(packet_reader_pool, outdir, numcpus); combiner.StartThreads(); notify(NFY_DEBUG, "started combineterrain"); // We need to wrap the combiner with a try/catch because otherwise, the // exception causes a deconstructor failure which masks the real error // which could be a CRC error in one of the terrain packets. std::string error_message; try { do { combiner.CombineTerrainPackets(gather.Current()); } while (gather.Advance()); } catch (const khAbortedException &e) { notify(NFY_FATAL, "Unable to proceed: See previous warnings: %s", e.what()); } catch (const std::exception &e) { notify(NFY_FATAL, "%s", e.what()); } catch (...) { notify(NFY_FATAL, "Unknown error"); } notify(NFY_DEBUG, "waiting for compress and write threads to finish"); combiner.WaitForThreadsToFinish(); notify(NFY_DEBUG, "closing the gatherer"); gather.Close(); JOBSTATS_END(job_stats, GATHERER_CREATED); // Finish the packet file JOBSTATS_BEGIN(job_stats, COMBINE); // validate notify(NFY_DEBUG, "writing the packet index"); combiner.Close(static_cast<size_t>(sortbuf) * 1024 * 1024); JOBSTATS_END(job_stats, COMBINE); // On successful completion, print the output file sizes. output_files.push_back(outdir); } catch (const khAbortedException &e) { notify(NFY_FATAL, "Unable to proceed: See previous warnings"); } catch (const std::exception &e) { notify(NFY_FATAL, "%s", e.what()); } catch (...) { notify(NFY_FATAL, "Unknown error"); } // at the end, call dump all JOBSTATS_DUMPALL(); // On successful completion, print the output file sizes. // The print occurs here to allow progress to go out of scope. khPrintFileSizes("Output File Sizes", output_files); return 0; }
// Set the initial size based on any parameters specified on the command line. // Any of these can be zero indicating they should default. void HeapSizeParameters::SetHeapParameters(POLYUNSIGNED minsize, POLYUNSIGNED maxsize, POLYUNSIGNED initialsize, unsigned percent) { minHeapSize = K_to_words(minsize); // If these overflow assume the result will be zero maxHeapSize = K_to_words(maxsize); POLYUNSIGNED initialSize = K_to_words(initialsize); POLYUNSIGNED memsize = GetPhysicalMemorySize() / sizeof(PolyWord); // If no maximum is given default it to 80% of the physical memory. // This allows some space for the OS and other things. if (maxHeapSize == 0 || maxHeapSize > MAXIMUMADDRESS) { if (memsize != 0) maxHeapSize = memsize - memsize / 5; else maxHeapSize = MAXIMUMADDRESS; // But if this must not be smaller than the minimum size. if (maxHeapSize < minHeapSize) maxHeapSize = minHeapSize; if (maxHeapSize < initialSize) maxHeapSize = initialSize; } // The default minimum is zero; in practice the live data size. // The default initial size is the minimum if that has been provided, // otherwise 8M words. There are applications that only require a small // heap and if we set the heap large to begin with we'll never do a // full GC and reduce it. if (initialSize == 0) { if (minHeapSize != 0) initialSize = minHeapSize; else initialSize = 8 * gMem.DefaultSpaceSize(); // But not more than the maximum if (initialSize > maxHeapSize) initialSize = maxHeapSize; } // Together with the constraints on user settings that ensures this holds. ASSERT(initialSize >= minHeapSize && initialSize <= maxHeapSize); // Initially we divide the space equally between the major and // minor heaps. That means that there will definitely be space // for the first minor GC to copy its data. This division can be // changed later on. gMem.SetSpaceForHeap(initialSize); gMem.SetSpaceBeforeMinorGC(initialSize/2); lastFreeSpace = initialSize; highWaterMark = initialSize; if (percent == 0) userGCRatio = 1.0 / 9.0; // Default to 10% GC to 90% application else userGCRatio = (float)percent / (float)(100 - percent); predictedRatio = lastMajorGCRatio = userGCRatio; if (debugOptions & DEBUG_HEAPSIZE) { Log("Heap: Initial settings: Initial heap "); LogSize(initialSize); Log(" minimum "); LogSize(minHeapSize); Log(" maximum "); LogSize(maxHeapSize); Log(" target ratio %f\n", userGCRatio); } }