void as_join(int id) { if (as_flags[id] != 0) { log_gas("address space %d already joined\n", id); return; } const chunk_hooks_t *hooks = _hooks[id]; // If there aren't any custom hooks set for this space, then the basic local // allocator is fine, which means that we don't need any special flags for // this address space. if (!hooks) { log_gas("no custom allocator for %d, using local\n", id); return; } // Create an arena that uses the right hooks. unsigned arena; size_t sz = sizeof(arena); dbg_check( je_mallctl("arenas.extend", &arena, &sz, NULL, 0) ); char path[128]; snprintf(path, 128, "arena.%u.chunk_hooks", arena); dbg_check( je_mallctl(path, NULL, NULL, (void*)hooks, sizeof(*hooks)) ); // // Disable dirty page purging for this arena // snprintf(path, 124, "arena.%u.lg_dirty_mult", arena); // ssize_t i = -1; // dbg_check( je_mallctl(path, NULL, NULL, (void*)&i, sizeof(i)) ); // Create a cache. unsigned cache; sz = sizeof(cache); dbg_check( je_mallctl("tcache.create", &cache, &sz, NULL, 0) ); // And set the flags. as_flags[id] = MALLOCX_ARENA(arena) | MALLOCX_TCACHE(cache); }
TEST_END TEST_BEGIN(test_tcache) { #define NTCACHES 10 unsigned tis[NTCACHES]; void *ps[NTCACHES]; void *qs[NTCACHES]; unsigned i; size_t sz, psz, qsz; psz = 42; qsz = nallocx(psz, 0) + 1; /* Create tcaches. */ for (i = 0; i < NTCACHES; i++) { sz = sizeof(unsigned); assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, 0), 0, "Unexpected mallctl() failure, i=%u", i); } /* Exercise tcache ID recycling. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.destroy", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } for (i = 0; i < NTCACHES; i++) { sz = sizeof(unsigned); assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, 0), 0, "Unexpected mallctl() failure, i=%u", i); } /* Flush empty tcaches. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } /* Cache some allocations. */ for (i = 0; i < NTCACHES; i++) { ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", i); dallocx(ps[i], MALLOCX_TCACHE(tis[i])); qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u", i); dallocx(qs[i], MALLOCX_TCACHE(tis[i])); } /* Verify that tcaches allocate cached regions. */ for (i = 0; i < NTCACHES; i++) { void *p0 = ps[i]; ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", i); assert_ptr_eq(ps[i], p0, "Expected mallocx() to allocate cached region, i=%u", i); } /* Verify that reallocation uses cached regions. */ for (i = 0; i < NTCACHES; i++) { void *q0 = qs[i]; qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u", i); assert_ptr_eq(qs[i], q0, "Expected rallocx() to allocate cached region, i=%u", i); /* Avoid undefined behavior in case of test failure. */ if (qs[i] == NULL) { qs[i] = ps[i]; } } for (i = 0; i < NTCACHES; i++) { dallocx(qs[i], MALLOCX_TCACHE(tis[i])); } /* Flush some non-empty tcaches. */ for (i = 0; i < NTCACHES/2; i++) { assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } /* Destroy tcaches. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.destroy", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } }