/* * verify_pages_linear() - verify pages are interleaved * @pages: array of pages to be verified * @status: the NUMA node of each page * @num: the no. of pages */ void verify_pages_linear(void **pages, int *status, unsigned int num) { #if HAVE_NUMA_H unsigned int i; unsigned int n = 0; int nodes[num]; int num_allowed_nodes; int *allowed_nodes; int ret; ret = get_allowed_nodes_arr(NH_MEMS, &num_allowed_nodes, &allowed_nodes); if (ret < 0) tst_brkm(TBROK | TERRNO, NULL, "get_allowed_nodes(): %d", ret); for (i = 0; i < num; i++) { nodes[i] = allowed_nodes[n]; n++; if (n >= num_allowed_nodes) n = 0; } free(allowed_nodes); verify_pages_on_nodes(pages, status, num, nodes); #endif }
/* * get_allowed_nodes - convenience function to get fixed number of nodes * @count: how many nodes to get * @...: int pointers, where node ids will be stored * RETURNS: * 0 on success * -1 on allocation failure * -2 on get_mempolicy failure * -3 on not enough allowed nodes */ int get_allowed_nodes(int flag, int count, ...) { int ret; int i, *nodep; va_list ap; int num_nodes = 0; int *nodes = NULL; ret = get_allowed_nodes_arr(flag, &num_nodes, &nodes); if (ret < 0) return ret; va_start(ap, count); for (i = 0; i < count; i++) { nodep = va_arg(ap, int *); if (i < num_nodes) { *nodep = nodes[i]; } else { ret = -3; errno = EINVAL; break; } } free(nodes); va_end(ap); return ret; }
/* * is_numa - judge a system is NUMA system or not * NOTE: the function is designed to try to find more than * 1 available node, at least each node contains memory. * WARN: Don't use this func in child, as it calls tst_brkm() * RETURNS: * 0 - it's not a NUMA system * 1 - it's a NUMA system */ int is_numa(void (*cleanup_fn)(void)) { int ret; int numa_nodes = 0; ret = get_allowed_nodes_arr(NH_MEMS, &numa_nodes, NULL); if (ret < 0) tst_brkm(TBROK | TERRNO, cleanup_fn, "get_allowed_nodes_arr"); if (numa_nodes > 1) return 1; else return 0; }
static void print_node_info(int flag) { int *allowed_nodes = NULL; int i, ret, num_nodes; ret = get_allowed_nodes_arr(flag, &num_nodes, &allowed_nodes); printf("nodes (flag=%d): ", flag); if (ret == 0) { for (i = 0; i < num_nodes; i++) printf("%d ", allowed_nodes[i]); printf("\n"); } else printf("error(%d)\n", ret); free(allowed_nodes); }
int main(int argc, char *argv[]) { tst_parse_opts(argc, argv, NULL, NULL); ncpus = count_cpu(); if (get_allowed_nodes_arr(NH_MEMS | NH_CPUS, &nnodes, &nodes) < 0) tst_brkm(TBROK | TERRNO, NULL, "get_allowed_nodes_arr"); if (nnodes <= 1) tst_brkm(TCONF, NULL, "requires a NUMA system."); setup(); testcpuset(); cleanup(); tst_exit(); }
static void set_global_mempolicy(int mempolicy) { #if HAVE_NUMA_H && HAVE_LINUX_MEMPOLICY_H && HAVE_NUMAIF_H \ && HAVE_MPOL_CONSTANTS unsigned long nmask[MAXNODES / BITS_PER_LONG] = { 0 }; int num_nodes, *nodes; int ret; if (mempolicy) { ret = get_allowed_nodes_arr(NH_MEMS|NH_CPUS, &num_nodes, &nodes); if (ret != 0) tst_brkm(TBROK|TERRNO, cleanup, "get_allowed_nodes_arr"); if (num_nodes < 2) { tst_resm(TINFO, "mempolicy need NUMA system support"); free(nodes); return; } switch(mempolicy) { case MPOL_BIND: /* bind the second node */ set_node(nmask, nodes[1]); break; case MPOL_INTERLEAVE: case MPOL_PREFERRED: if (num_nodes == 2) { tst_resm(TINFO, "The mempolicy need " "more than 2 numa nodes"); free(nodes); return; } else { /* Using the 2nd,3rd node */ set_node(nmask, nodes[1]); set_node(nmask, nodes[2]); } break; default: tst_brkm(TBROK|TERRNO, cleanup, "Bad mempolicy mode"); } if (set_mempolicy(mempolicy, nmask, MAXNODES) == -1) tst_brkm(TBROK|TERRNO, cleanup, "set_mempolicy"); } #endif }
/* * check_config() - check for required configuration * @min_nodes: the minimum required NUMA nodes * * Checks if numa support is availabe, kernel is >= 2.6.18, arch is * one of the supported architectures. */ void check_config(unsigned int min_nodes) { #if HAVE_NUMA_H && HAVE_NUMAIF_H int num_allowed_nodes; int ret; ret = get_allowed_nodes_arr(NH_MEMS, &num_allowed_nodes, NULL); if (ret < 0) tst_brkm(TBROK | TERRNO, NULL, "get_allowed_nodes(): %d", ret); if (numa_available() < 0) { tst_brkm(TCONF, NULL, "NUMA support is not available"); } else if (num_allowed_nodes < min_nodes) { tst_brkm(TCONF, NULL, "at least %d allowed NUMA nodes" " are required", min_nodes); } else if (tst_kvercmp(2, 6, 18) < 0) { tst_brkm(TCONF, NULL, "2.6.18 or greater kernel required"); } #else tst_brkm(TCONF, NULL, "NUMA support not provided"); #endif }
static void test_invalid_nodes(void) { int *nodes; int num_nodes, ret, i; int invalid_node = 0; unsigned long *old_nodes, *new_nodes; tst_resm(TINFO, "test_invalid_nodes"); ret = get_allowed_nodes_arr(NH_MEMS, &num_nodes, &nodes); if (ret < 0) tst_brkm(TBROK | TERRNO, cleanup, "get_allowed_nodes_arr: %d", ret); /* get first node which is not in nodes */ for (i = 0; i < num_nodes; i++, invalid_node++) if (invalid_node != nodes[i]) break; if (invalid_node < sane_max_node) { old_nodes = SAFE_MALLOC(NULL, sane_nodemask_size); new_nodes = SAFE_MALLOC(NULL, sane_nodemask_size); memcpy(old_nodes, sane_old_nodes, sane_nodemask_size); memset(new_nodes, 0, sane_nodemask_size); set_bit(new_nodes, invalid_node, 1); TEST(ltp_syscall(__NR_migrate_pages, 0, sane_max_node, old_nodes, new_nodes)); check_ret(-1); check_errno(EINVAL); free(old_nodes); free(new_nodes); } else { tst_resm(TCONF, "All possible nodes are present"); } free(nodes); }
static void setup(void) { int ret, i, j; int pagesize = getpagesize(); void *p; tst_require_root(NULL); TEST(ltp_syscall(__NR_migrate_pages, 0, 0, NULL, NULL)); if (numa_available() == -1) tst_brkm(TCONF, NULL, "NUMA not available"); ret = get_allowed_nodes_arr(NH_MEMS, &num_nodes, &nodes); if (ret < 0) tst_brkm(TBROK | TERRNO, NULL, "get_allowed_nodes(): %d", ret); if (num_nodes < 2) tst_brkm(TCONF, NULL, "at least 2 allowed NUMA nodes" " are required"); else if (tst_kvercmp(2, 6, 18) < 0) tst_brkm(TCONF, NULL, "2.6.18 or greater kernel required"); /* * find 2 nodes, which can hold NODE_MIN_FREEMEM bytes * The reason is that: * 1. migrate_pages() is expected to succeed * 2. this test avoids hitting: * Bug 870326 - migrate_pages() reports success, but pages are * not moved to desired node * https://bugzilla.redhat.com/show_bug.cgi?id=870326 */ nodeA = nodeB = -1; for (i = 0; i < num_nodes; i++) { p = numa_alloc_onnode(NODE_MIN_FREEMEM, nodes[i]); if (p == NULL) break; memset(p, 0xff, NODE_MIN_FREEMEM); j = 0; while (j < NODE_MIN_FREEMEM) { if (addr_on_node(p + j) != nodes[i]) break; j += pagesize; } numa_free(p, NODE_MIN_FREEMEM); if (j >= NODE_MIN_FREEMEM) { if (nodeA == -1) nodeA = nodes[i]; else if (nodeB == -1) nodeB = nodes[i]; else break; } } if (nodeA == -1 || nodeB == -1) tst_brkm(TCONF, NULL, "at least 2 NUMA nodes with " "free mem > %d are needed", NODE_MIN_FREEMEM); tst_resm(TINFO, "Using nodes: %d %d", nodeA, nodeB); ltpuser = getpwnam(nobody_uid); if (ltpuser == NULL) tst_brkm(TBROK | TERRNO, NULL, "getpwnam failed"); TEST_PAUSE; }
void test_ksm_merge_across_nodes(unsigned long nr_pages) { char **memory; int i, ret; int num_nodes, *nodes; unsigned long length; unsigned long pagesize; #if HAVE_NUMA_H && HAVE_LINUX_MEMPOLICY_H && HAVE_NUMAIF_H \ && HAVE_MPOL_CONSTANTS unsigned long nmask[MAXNODES / BITS_PER_LONG] = { 0 }; #endif ret = get_allowed_nodes_arr(NH_MEMS|NH_CPUS, &num_nodes, &nodes); if (ret != 0) tst_brkm(TBROK|TERRNO, cleanup, "get_allowed_nodes_arr"); if (num_nodes < 2) { tst_resm(TINFO, "need NUMA system support"); free(nodes); return; } pagesize = sysconf(_SC_PAGE_SIZE); length = nr_pages * pagesize; memory = malloc(num_nodes * sizeof(char *)); for (i = 0; i < num_nodes; i++) { memory[i] = mmap(NULL, length, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); if (memory[i] == MAP_FAILED) tst_brkm(TBROK|TERRNO, tst_exit, "mmap"); #ifdef HAVE_MADV_MERGEABLE if (madvise(memory[i], length, MADV_MERGEABLE) == -1) tst_brkm(TBROK|TERRNO, tst_exit, "madvise"); #endif #if HAVE_NUMA_H && HAVE_LINUX_MEMPOLICY_H && HAVE_NUMAIF_H \ && HAVE_MPOL_CONSTANTS clean_node(nmask); set_node(nmask, nodes[i]); /* * Use mbind() to make sure each node contains * length size memory. */ ret = mbind(memory[i], length, MPOL_BIND, nmask, MAXNODES, 0); if (ret == -1) tst_brkm(TBROK|TERRNO, tst_exit, "mbind"); #endif memset(memory[i], 10, length); } SAFE_FILE_PRINTF(cleanup, PATH_KSM "sleep_millisecs", "0"); SAFE_FILE_PRINTF(cleanup, PATH_KSM "pages_to_scan", "%ld", nr_pages * num_nodes); /* * merge_across_nodes setting can be changed only when there * are no ksm shared pages in system, so set run 2 to unmerge * pages first, then to 1 after changing merge_across_nodes, * to remerge according to the new setting. */ SAFE_FILE_PRINTF(cleanup, PATH_KSM "run", "2"); wait_ksmd_done(); tst_resm(TINFO, "Start to test KSM with merge_across_nodes=1"); SAFE_FILE_PRINTF(cleanup, PATH_KSM "merge_across_nodes", "1"); SAFE_FILE_PRINTF(cleanup, PATH_KSM "run", "1"); group_check(1, 1, nr_pages * num_nodes - 1, 0, 0, 0, nr_pages * num_nodes); SAFE_FILE_PRINTF(cleanup, PATH_KSM "run", "2"); wait_ksmd_done(); tst_resm(TINFO, "Start to test KSM with merge_across_nodes=0"); SAFE_FILE_PRINTF(cleanup, PATH_KSM "merge_across_nodes", "0"); SAFE_FILE_PRINTF(cleanup, PATH_KSM "run", "1"); group_check(1, num_nodes, nr_pages * num_nodes - num_nodes, 0, 0, 0, nr_pages * num_nodes); SAFE_FILE_PRINTF(cleanup, PATH_KSM "run", "2"); wait_ksmd_done(); }