// initialize our arenas (this is required to be able to set the chunk hooks) static void initialize_arenas(void) { size_t s_narenas; unsigned narenas; unsigned arena; // "thread.arena" takes an unsigned, but num_arenas is a size_t. s_narenas = get_num_arenas(); if (s_narenas > (size_t) UINT_MAX) { chpl_internal_error("narenas too large to fit into unsigned"); } narenas = (unsigned) s_narenas; // for each non-zero arena, set the current thread to use it (this // initializes each arena). arena 0 is automatically initialized. // // jemalloc 4.0.4 man: "If the specified arena was not initialized // beforehand, it will be automatically initialized as a side effect of // calling this interface." for (arena=1; arena<narenas; arena++) { if (je_mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)) != 0) { chpl_internal_error("could not change current thread's arena"); } } // then set the current thread back to using arena 0 arena = 0; if (je_mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)) != 0) { chpl_internal_error("could not change current thread's arena back to 0"); } }
size_t mallctl_int64(const char* name, size_t* newval) { size_t v = 0; size_t len = sizeof(v); if(newval) { je_mallctl(name, &v, &len, newval, sizeof(size_t)); } else { je_mallctl(name, &v, &len, NULL, 0); } // printf("name: %s, value: %zd\n", name, v); return v; }
void set_jemalloc() { //je_malloc_conf = "narenas:4"; int narenas = 0; size_t sz = sizeof(narenas); je_mallctl("opt.narenas", &narenas, &sz, nullptr, 0); ::wprintf(L"number of arenas: %d\n", narenas); //je_malloc_stats_print(nullptr, nullptr, nullptr); sz = sizeof(jemalloc_pre_allocated); je_mallctl("stats.active", &jemalloc_pre_allocated, &sz, nullptr, 0); }
// replace the chunk hooks for each arena with the hooks we provided above static void replaceChunkHooks(void) { size_t narenas; size_t arena; // set the pointers for the new_hooks to our above functions chunk_hooks_t new_hooks = { chunk_alloc, null_dalloc, null_commit, null_decommit, null_purge, null_split, null_merge }; // for each arena, change the chunk hooks narenas = get_num_arenas(); for (arena=0; arena<narenas; arena++) { char path[128]; snprintf(path, sizeof(path), "arena.%zu.chunk_hooks", arena); if (je_mallctl(path, NULL, NULL, &new_hooks, sizeof(chunk_hooks_t)) != 0) { chpl_internal_error("could not update the chunk hooks"); } } }
int mallctl_opt(const char* name, int* newval) { int v = 0; size_t len = sizeof(v); if(newval) { int ret = je_mallctl(name, &v, &len, newval, sizeof(int)); if(ret == 0) { printf("set new value(%d) for (%s) succeed\n", *newval, name); } else { printf("set new value(%d) for (%s) failed: error -> %d\n", *newval, name, ret); } } else { je_mallctl(name, &v, &len, NULL, 0); } return v; }
void je_malloc_enable() { jemalloc_postfork_parent(); if (malloc_disabled_tcache) { je_mallctl("thread.tcache.enabled", NULL, NULL, &malloc_disabled_tcache, sizeof(malloc_disabled_tcache)); } pthread_mutex_unlock(&malloc_disabled_lock); }
// get the number of arenas static size_t get_num_arenas(void) { size_t narenas; size_t sz; sz = sizeof(narenas); if (je_mallctl("opt.narenas", &narenas, &sz, NULL, 0) != 0) { chpl_internal_error("could not get number of arenas from jemalloc"); } return narenas; }
int zmalloc_get_allocator_info(size_t *allocated, size_t *active, size_t *resident) { uint64_t epoch = 1; size_t sz; *allocated = *resident = *active = 0; /* Update the statistics cached by mallctl. */ sz = sizeof(epoch); je_mallctl("epoch", &epoch, &sz, &epoch, sz); sz = sizeof(size_t); /* Unlike RSS, this does not include RSS from shared libraries and other non * heap mappings. */ je_mallctl("stats.resident", resident, &sz, NULL, 0); /* Unlike resident, this doesn't not include the pages jemalloc reserves * for re-use (purge will clean that). */ je_mallctl("stats.active", active, &sz, NULL, 0); /* Unlike zmalloc_used_memory, this matches the stats.resident by taking * into account all allocations done by this process (not only zmalloc). */ je_mallctl("stats.allocated", allocated, &sz, NULL, 0); return 1; }
void je_malloc_disable() { static pthread_once_t once_control = PTHREAD_ONCE_INIT; pthread_once(&once_control, je_malloc_disable_init); pthread_mutex_lock(&malloc_disabled_lock); bool new_tcache = false; size_t old_len = sizeof(malloc_disabled_tcache); je_mallctl("thread.tcache.enabled", &malloc_disabled_tcache, &old_len, &new_tcache, sizeof(new_tcache)); jemalloc_prefork(); }
void as_join(int id) { if (as_flags[id] != 0) { log_gas("address space %d already joined\n", id); return; } const chunk_hooks_t *hooks = _hooks[id]; // If there aren't any custom hooks set for this space, then the basic local // allocator is fine, which means that we don't need any special flags for // this address space. if (!hooks) { log_gas("no custom allocator for %d, using local\n", id); return; } // Create an arena that uses the right hooks. unsigned arena; size_t sz = sizeof(arena); dbg_check( je_mallctl("arenas.extend", &arena, &sz, NULL, 0) ); char path[128]; snprintf(path, 128, "arena.%u.chunk_hooks", arena); dbg_check( je_mallctl(path, NULL, NULL, (void*)hooks, sizeof(*hooks)) ); // // Disable dirty page purging for this arena // snprintf(path, 124, "arena.%u.lg_dirty_mult", arena); // ssize_t i = -1; // dbg_check( je_mallctl(path, NULL, NULL, (void*)&i, sizeof(i)) ); // Create a cache. unsigned cache; sz = sizeof(cache); dbg_check( je_mallctl("tcache.create", &cache, &sz, NULL, 0) ); // And set the flags. as_flags[id] = MALLOCX_ARENA(arena) | MALLOCX_TCACHE(cache); }
int main(void) { /* Check how many areans we have */ unsigned narenas; size_t len = sizeof(narenas); if (je_mallctl("arenas.narenas", &narenas, &len, NULL, 0) != 0) { fprintf(stderr, "Failed to get narenas"); return 1; } if (narenas != 1) { fprintf(stderr, "Incorrect number of arenas: %d != 1\n", narenas); return 2; } return 0; }
void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { int err; uint64_t epoch; size_t u64sz; bool general = true; bool merged = true; bool unmerged = true; bool bins = true; bool large = true; /* * Refresh stats, in case mallctl() was called by the application. * * Check for OOM here, since refreshing the ctl cache can trigger * allocation. In practice, none of the subsequent mallctl()-related * calls in this function will cause OOM if this one succeeds. * */ epoch = 1; u64sz = sizeof(uint64_t); err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); if (err != 0) { if (err == EAGAIN) { malloc_write("<jemalloc>: Memory allocation failure in " "mallctl(\"epoch\", ...)\n"); return; } malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", " "...)\n"); abort(); } if (opts != NULL) { unsigned i; for (i = 0; opts[i] != '\0'; i++) { switch (opts[i]) { case 'g': general = false; break; case 'm': merged = false; break; case 'a': unmerged = false; break; case 'b': bins = false; break; case 'l': large = false; break; default:; } } } malloc_cprintf(write_cb, cbopaque, "___ Begin jemalloc statistics ___\n"); if (general) { int err; const char *cpv; bool bv; unsigned uv; ssize_t ssv; size_t sv, bsz, ssz, sssz, cpsz; bsz = sizeof(bool); ssz = sizeof(size_t); sssz = sizeof(ssize_t); cpsz = sizeof(const char *); CTL_GET("version", &cpv, const char *); malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); CTL_GET("config.debug", &bv, bool); malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", bv ? "enabled" : "disabled"); #define OPT_WRITE_BOOL(n) \ if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %s\n", bv ? "true" : "false"); \ } #define OPT_WRITE_SIZE_T(n) \ if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zu\n", sv); \ } #define OPT_WRITE_SSIZE_T(n) \ if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zd\n", ssv); \ } #define OPT_WRITE_CHAR_P(n) \ if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": \"%s\"\n", cpv); \ } malloc_cprintf(write_cb, cbopaque, "Run-time option settings:\n"); OPT_WRITE_BOOL(abort) OPT_WRITE_SIZE_T(lg_chunk) OPT_WRITE_CHAR_P(dss) OPT_WRITE_SIZE_T(narenas) OPT_WRITE_SSIZE_T(lg_dirty_mult) OPT_WRITE_BOOL(stats_print) OPT_WRITE_BOOL(junk) OPT_WRITE_SIZE_T(quarantine) OPT_WRITE_BOOL(redzone) OPT_WRITE_BOOL(zero) OPT_WRITE_BOOL(utrace) OPT_WRITE_BOOL(valgrind) OPT_WRITE_BOOL(xmalloc) OPT_WRITE_BOOL(tcache) OPT_WRITE_SSIZE_T(lg_tcache_max) OPT_WRITE_BOOL(prof) OPT_WRITE_CHAR_P(prof_prefix) OPT_WRITE_BOOL(prof_active) OPT_WRITE_SSIZE_T(lg_prof_sample) OPT_WRITE_BOOL(prof_accum) OPT_WRITE_SSIZE_T(lg_prof_interval) OPT_WRITE_BOOL(prof_gdump) OPT_WRITE_BOOL(prof_final) OPT_WRITE_BOOL(prof_leak) #undef OPT_WRITE_BOOL #undef OPT_WRITE_SIZE_T #undef OPT_WRITE_SSIZE_T #undef OPT_WRITE_CHAR_P malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); CTL_GET("arenas.narenas", &uv, unsigned); malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", sizeof(void *)); CTL_GET("arenas.quantum", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); CTL_GET("arenas.page", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t); if (ssv >= 0) { malloc_cprintf(write_cb, cbopaque, "Min active:dirty page ratio per arena: %u:1\n", (1U << ssv)); } else { malloc_cprintf(write_cb, cbopaque, "Min active:dirty page ratio per arena: N/A\n"); } if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0)) == 0) { malloc_cprintf(write_cb, cbopaque, "Maximum thread-cached size class: %zu\n", sv); } if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 && bv) { CTL_GET("opt.lg_prof_sample", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Average profile sample interval: %"PRIu64 " (2^%zu)\n", (((uint64_t)1U) << sv), sv); CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); if (ssv >= 0) { malloc_cprintf(write_cb, cbopaque, "Average profile dump interval: %"PRIu64 " (2^%zd)\n", (((uint64_t)1U) << ssv), ssv); } else { malloc_cprintf(write_cb, cbopaque, "Average profile dump interval: N/A\n"); } } CTL_GET("opt.lg_chunk", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv); } if (config_stats) { size_t *cactive; size_t allocated, active, mapped; size_t chunks_current, chunks_high; uint64_t chunks_total; size_t huge_allocated; uint64_t huge_nmalloc, huge_ndalloc; CTL_GET("stats.cactive", &cactive, size_t *); CTL_GET("stats.allocated", &allocated, size_t); CTL_GET("stats.active", &active, size_t); CTL_GET("stats.mapped", &mapped, size_t); malloc_cprintf(write_cb, cbopaque, "Allocated: %zu, active: %zu, mapped: %zu\n", allocated, active, mapped); malloc_cprintf(write_cb, cbopaque, "Current active ceiling: %zu\n", atomic_read_z(cactive)); /* Print chunk stats. */ CTL_GET("stats.chunks.total", &chunks_total, uint64_t); CTL_GET("stats.chunks.high", &chunks_high, size_t); CTL_GET("stats.chunks.current", &chunks_current, size_t); malloc_cprintf(write_cb, cbopaque, "chunks: nchunks " "highchunks curchunks\n"); malloc_cprintf(write_cb, cbopaque, " %13"PRIu64" %12zu %12zu\n", chunks_total, chunks_high, chunks_current); /* Print huge stats. */ CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t); CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t); CTL_GET("stats.huge.allocated", &huge_allocated, size_t); malloc_cprintf(write_cb, cbopaque, "huge: nmalloc ndalloc allocated\n"); malloc_cprintf(write_cb, cbopaque, " %12"PRIu64" %12"PRIu64" %12zu\n", huge_nmalloc, huge_ndalloc, huge_allocated); if (merged) { unsigned narenas; CTL_GET("arenas.narenas", &narenas, unsigned); { VARIABLE_ARRAY(bool, initialized, narenas); size_t isz; unsigned i, ninitialized; isz = sizeof(bool) * narenas; xmallctl("arenas.initialized", initialized, &isz, NULL, 0); for (i = ninitialized = 0; i < narenas; i++) { if (initialized[i]) ninitialized++; } if (ninitialized > 1 || unmerged == false) { /* Print merged arena stats. */ malloc_cprintf(write_cb, cbopaque, "\nMerged arenas stats:\n"); stats_arena_print(write_cb, cbopaque, narenas, bins, large); } } } if (unmerged) { unsigned narenas; /* Print stats for each arena. */ CTL_GET("arenas.narenas", &narenas, unsigned); { VARIABLE_ARRAY(bool, initialized, narenas); size_t isz; unsigned i; isz = sizeof(bool) * narenas; xmallctl("arenas.initialized", initialized, &isz, NULL, 0); for (i = 0; i < narenas; i++) { if (initialized[i]) { malloc_cprintf(write_cb, cbopaque, "\narenas[%u]:\n", i); stats_arena_print(write_cb, cbopaque, i, bins, large); } } } } } malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n"); }
int main() { #if defined(CRT_LFH) ::wprintf(L"crt lfh\n"); set_lfh(); #elif defined(TBBMALLOC) ::wprintf(L"tbbmalloc\n"); #elif defined(TCMALLOC) ::wprintf(L"tcmalloc\n"); #elif defined(JEMALLOC) ::wprintf(L"jemalloc\n"); set_jemalloc(); #elif defined(MS_CONCURRENCY) ::wprintf(L"ms concurrency\n"); #elif defined(MY_ALLOCATOR) ::wprintf(L"my allocator\n"); if (allocator.create(true, 0) == false) { ::wprintf(L"allocator.create() failed.\n"); return 0; } #endif // for crt tbb tcmalloc char* dummy = new char[10]; SecureZeroMemory(dummy, 10); ::wprintf(L"start memory info\n"); print_memory_info(); HANDLE thread_handle[thread_count] = { nullptr, }; ULONGLONG start = ::GetTickCount64(); for (std::size_t i = 0; i < thread_count; ++i) { unsigned int id = 0; thread_handle[i] = reinterpret_cast< HANDLE >(::_beginthreadex(nullptr, 0, memory_alloc_test_thread_func, nullptr, 0, &id)); if (thread_handle[i] == nullptr) { ::wprintf(L"%llu: _beginthreadex failed.\n", i); } } ::WaitForMultipleObjects(thread_count, thread_handle, TRUE, INFINITE); ULONGLONG run_time = ::GetTickCount64() - start; ::wprintf(L"run time: %llu\n", run_time); for (std::size_t i = 0; i < thread_count; ++i) { if (thread_handle[i] != nullptr) { ::CloseHandle(thread_handle[i]); thread_handle[i] = nullptr; } } delete[] dummy; dummy = nullptr; ::wprintf(L"end memory info\n"); print_memory_info(); #if defined(MY_ALLOCATOR) print_allocator_stat(allocator.get_statistics()); allocator.destroy(); #elif defined(JEMALLOC) { //je_malloc_stats_print(nullptr, nullptr, nullptr); size_t sz = sizeof(jemalloc_post_allocated); je_mallctl("stats.active", &jemalloc_post_allocated, &sz, nullptr, 0); size_t leaked = jemalloc_post_allocated - jemalloc_pre_allocated; ::wprintf(L"\nDone. Leaked: %zd bytes\n", leaked); bool failed = leaked > 65536; // in case C++ runtime allocated something (e.g. iostream locale or facet) ::wprintf(L"\nTest %s!\n", (failed ? L"FAILED" : L"successful")); } #endif return 0; }
/// Launch the Trinity server extern int main(int argc, char** argv) { ///- Command line parsing to get the configuration file name char const* cfg_file = _TRINITY_CORE_CONFIG; int c = 1; while (c < argc) { if (!strcmp(argv[c], "-c")) { if (++c >= argc) { printf("Runtime-Error: -c option requires an input argument"); usage(argv[0]); return 1; } else cfg_file = argv[c]; } #ifdef _WIN32 if (strcmp(argv[c], "-s") == 0) // Services { if (++c >= argc) { printf("Runtime-Error: -s option requires an input argument"); usage(argv[0]); return 1; } if (strcmp(argv[c], "install") == 0) { if (WinServiceInstall()) printf("Installing service\n"); return 1; } else if (strcmp(argv[c], "uninstall") == 0) { if (WinServiceUninstall()) printf("Uninstalling service\n"); return 1; } else { printf("Runtime-Error: unsupported option %s", argv[c]); usage(argv[0]); return 1; } } if (strcmp(argv[c], "--service") == 0) WinServiceRun(); #endif ++c; } if (!sConfigMgr->LoadInitial(cfg_file)) { printf("Invalid or missing configuration file : %s\n", cfg_file); printf("Verify that the file exists and has \'[worldserver]' written in the top of the file!\n"); return 1; } TC_LOG_INFO("server.worldserver", "Using configuration file %s.", cfg_file); TC_LOG_INFO("server.worldserver", "Using SSL version: %s (library: %s)", OPENSSL_VERSION_TEXT, SSLeay_version(SSLEAY_VERSION)); TC_LOG_INFO("server.worldserver", "Using ACE version: %s", ACE_VERSION); #ifdef HAVE_JEMALLOC { char const *jmVersion; size_t jmVersionSize = sizeof(jmVersion); je_mallctl("version", &jmVersion, &jmVersionSize, NULL, 0); TC_LOG_INFO("server.worldserver", "Using Jemalloc: %s", jmVersion); } #endif ///- and run the 'Master' /// @todo Why do we need this 'Master'? Can't all of this be in the Main as for Realmd? int ret = sMaster->Run(); // at sMaster return function exist with codes // 0 - normal shutdown // 1 - shutdown at error // 2 - restart command used, this code can be used by restarter for restart Trinityd return ret; }
size_t as_bytes_per_chunk(void) { size_t log2_bytes_per_chunk = 0; size_t sz = sizeof(log2_bytes_per_chunk); je_mallctl("opt.lg_chunk", &log2_bytes_per_chunk, &sz, NULL, 0); return (1lu << log2_bytes_per_chunk); }