static int migrate_to_node(pid_t pid, int node) { unsigned long nodemask_size, max_node; unsigned long *old_nodes, *new_nodes; int i; tst_resm(TINFO, "pid(%d) migrate pid %d to node -> %d", getpid(), pid, node); max_node = LTP_ALIGN(get_max_node(), sizeof(unsigned long)*8); nodemask_size = max_node / 8; old_nodes = SAFE_MALLOC(NULL, nodemask_size); new_nodes = SAFE_MALLOC(NULL, nodemask_size); memset(old_nodes, 0, nodemask_size); memset(new_nodes, 0, nodemask_size); for (i = 0; i < num_nodes; i++) set_bit(old_nodes, nodes[i], 1); set_bit(new_nodes, node, 1); TEST(ltp_syscall(__NR_migrate_pages, pid, max_node, old_nodes, new_nodes)); if (TEST_RETURN != 0) { if (TEST_RETURN < 0) tst_resm(TFAIL | TERRNO, "migrate_pages failed " "ret: %ld, ", TEST_RETURN); else tst_resm(TWARN, "migrate_pages could not migrate all " "pages, not migrated: %ld", TEST_RETURN); print_mem_stats(pid, node); } free(old_nodes); free(new_nodes); return TEST_RETURN; }
static void setup(void) { int node, ret; tst_require_root(); TEST(ltp_syscall(__NR_migrate_pages, 0, 0, NULL, NULL)); if (numa_available() == -1) tst_brkm(TCONF, NULL, "NUMA not available"); ret = get_allowed_nodes(NH_MEMS, 1, &node); if (ret < 0) tst_brkm(TBROK | TERRNO, NULL, "get_allowed_nodes_arr: %d", ret); sane_max_node = LTP_ALIGN(get_max_node(), sizeof(unsigned long)*8); sane_nodemask_size = sane_max_node / 8; sane_old_nodes = SAFE_MALLOC(NULL, sane_nodemask_size); sane_new_nodes = SAFE_MALLOC(NULL, sane_nodemask_size); memset(sane_old_nodes, 0, sane_nodemask_size); memset(sane_new_nodes, 0, sane_nodemask_size); set_bit(sane_old_nodes, node, 1); set_bit(sane_new_nodes, node, 1); TEST_PAUSE; }
stu_node *stu_sort_link(stu_node *head) { stu_node *head_new = NULL; stu_node *max_p=NULL; while(head != NULL) { max_p = get_max_node(head); head = del_max_node(head,max_p); head_new = add_node_new(head_new,max_p); } return head_new; }
/* * get_allowed_nodes_arr - get number and array of available nodes * @num_nodes: pointer where number of available nodes will be stored * @nodes: array of available node ids, this is MPOL_F_MEMS_ALLOWED * node bitmask compacted (without holes), so that each field * contains node number. If NULL only num_nodes is * returned, otherwise it cotains new allocated array, * which caller is responsible to free. * RETURNS: * 0 on success * -1 on allocation failure * -2 on get_mempolicy failure */ int get_allowed_nodes_arr(int flag, int *num_nodes, int **nodes) { int ret = 0; #if HAVE_NUMA_H int i; nodemask_t *nodemask = NULL; #endif *num_nodes = 0; if (nodes) *nodes = NULL; #if HAVE_NUMA_H unsigned long max_node = LTP_ALIGN(get_max_node(), sizeof(unsigned long)*8); unsigned long nodemask_size = max_node / 8; nodemask = malloc(nodemask_size); if (nodes) *nodes = malloc(sizeof(int) * max_node); do { if (nodemask == NULL || (nodes && (*nodes == NULL))) { ret = -1; break; } /* allow all nodes at start, then filter based on flags */ get_nodemask_allnodes(nodemask, max_node); if ((flag & NH_MEMS) == NH_MEMS) { ret = filter_nodemask_mem(nodemask, max_node); if (ret < 0) break; } if ((flag & NH_CPUS) == NH_CPUS) filter_nodemask_cpu(nodemask, max_node); for (i = 0; i < max_node; i++) { if (nodemask_isset(nodemask, i)) { if (nodes) (*nodes)[*num_nodes] = i; (*num_nodes)++; } } } while (0); free(nodemask); #endif return ret; }