void record_page_association(const void *ptr, const size_t size, const size_t page_size) { size_t pages_number = get_num_of_pages(size, page_size); std::vector<void *> address = get_address_of_pages(ptr, pages_number, page_size); int max_node_id = numa_max_node(); std::vector<int> nodes(pages_number); std::vector<int> pages_on_node(max_node_id+1); if (move_pages(0, pages_number, address.data(), NULL, nodes.data(), MPOL_MF_MOVE)) { fprintf(stderr, "Error: move_pages() returned %s\n", strerror(errno)); return; } for (size_t i = 0; i < pages_number; i++) { if (nodes[i] < 0) { fprintf(stderr,"Error: status of page %p is %d\n", address[i], nodes[i]); return; } else { pages_on_node[nodes[i]]++; } } for (size_t i = 0; i < (size_t)max_node_id + 1; i++) { if (pages_on_node[i] > 0) { char buffer[1024]; snprintf(buffer, sizeof(buffer), "Node%zd", i); GTestAdapter::RecordProperty(buffer, pages_on_node[i]); } } }
void force_move_pages(const void* data_, const size_t n, const size_t selem, const enum numa_distrib_type distrib, const size_t distrib_parameter) { const char* data = (const char*)data_; const size_t elem_per_page = ASSUMED_PAGE_SIZE/selem; const size_t np = n / elem_per_page; int status[np]; int nodes[np]; const char* pages[np]; size_t i; long res; #ifndef __MIC__ const int nmn = numa_num_configured_nodes(); // fprintf(stderr, "%s:%d elem_per_page = %zd, nmn = %d ; np = %zd\n", __PRETTY_FUNCTION__, __LINE__, elem_per_page, nmn, np); for (i = 0 ; i < np ; i++) { pages[i] = data + i * ASSUMED_PAGE_SIZE; switch (distrib) { case HYDRO_NUMA_NONE: nodes[i] = -1; break; case HYDRO_NUMA_INTERLEAVED: nodes[i] = i % nmn; break; case HYDRO_NUMA_ONE_BLOCK_PER_NODE: { const size_t ppernode = np / nmn; size_t nnode = i / ppernode; if (nnode > (nmn-1)) nnode = nmn - 1; nodes[i] = nnode; } break; case HYDRO_NUMA_SIZED_BLOCK_RR: { const size_t numb = i / (distrib_parameter/elem_per_page); size_t nnode = numb % nmn; nodes[i] = nnode; } break; } } if (HYDRO_NUMA_NONE != distrib) { res = move_pages(0, np, (void**)pages, nodes, status, MPOL_MF_MOVE); } else { res = move_pages(0, np, (void**)pages, NULL , status, MPOL_MF_MOVE); } if (res != 0) { fprintf(stderr, "%s:%d: move_pages -> errno = %d\n", __PRETTY_FUNCTION__, __LINE__, errno); } else { int last_node = status[0]; const char* last; const char* cur = data; // fprintf(stderr, "%s:%d: move_pages for %p of %zd elements (%zd bytes)\n", __PRETTY_FUNCTION__, __LINE__, data, n, n * selem); // fprintf(stderr, "\t%d: %p ... ", last_node, cur ); last = cur; for (i = 1 ; i < np ; i++) { if (status[i] != last_node) { cur += ASSUMED_PAGE_SIZE; // fprintf(stderr, "%p (%llu)\n", cur, (unsigned long long)cur - (unsigned long long)last); last_node = status[i]; // fprintf(stderr, "\t%d: %p ... ", last_node, cur); last = cur; } else { cur += ASSUMED_PAGE_SIZE; } } // fprintf(stderr, "%p (%llu)\n", cur, (unsigned long long)cur - (unsigned long long)last); } #endif }
MEMKIND_EXPORT int hbw_verify_memory_region(void* addr, size_t size, int flags) { /* * if size is invalid, flags have unsupported bit set or if addr is NULL. */ if (addr == NULL || size == 0 || flags & ~HBW_TOUCH_PAGES) { return EINVAL; } /* * 4KB is the smallest pagesize. When pagesize is bigger, pages are verified more than once */ const size_t page_size = sysconf(_SC_PAGESIZE); const size_t page_mask = ~(page_size-1); /* * block size should be power of two to enable compiler optimizations */ const unsigned block_size = 64; char *end = addr + size; char *aligned_beg = (char*)((uintptr_t)addr & page_mask); nodemask_t nodemask; struct bitmask expected_nodemask = {NUMA_NUM_NODES, nodemask.n}; memkind_hbw_all_get_mbind_nodemask(NULL, expected_nodemask.maskp, expected_nodemask.size); while(aligned_beg < end) { int nodes[block_size]; void* pages[block_size]; int i = 0, page_count = 0; char *iter_end = aligned_beg + block_size*page_size; if (iter_end > end) { iter_end = end; } while (aligned_beg < iter_end) { if (flags & HBW_TOUCH_PAGES) { hbw_touch_page(aligned_beg); } pages[page_count++] = aligned_beg; aligned_beg += page_size; } if (move_pages(0, page_count, pages, NULL, nodes, MPOL_MF_MOVE)) { return EFAULT; } for (i = 0; i < page_count; i++) { /* * negative value of nodes[i] indicates that move_pages could not establish * page location, e.g. addr is not pointing to valid virtual mapping */ if(nodes[i] < 0) { return -1; } /* * if nodes[i] is not present in expected_nodemask then * physical memory backing page is not hbw */ if (!numa_bitmask_isbitset(&expected_nodemask, nodes[i])) { return -1; } } } return 0; }