int main(void) { int max = numa_max_node(); int maxmask = numa_num_possible_nodes(); struct bitmask *nodes, *mask; int pagesize = getpagesize(); int i; int pol; int node; int err = 0; nodes = numa_bitmask_alloc(maxmask); mask = numa_bitmask_alloc(maxmask); for (i = max; i >= 0; --i) { char *mem = mmap(NULL, pagesize*(max+1), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); char *adr = mem; if (mem == (char *)-1) err("mmap"); printf("%d offset %lx\n", i, (long)(adr - mem)); numa_bitmask_clearall(nodes); numa_bitmask_clearall(mask); numa_bitmask_setbit(nodes, i); if (mbind(adr, pagesize, MPOL_PREFERRED, nodes->maskp, nodes->size, 0) < 0) err("mbind"); ++*adr; if (get_mempolicy(&pol, mask->maskp, mask->size, adr, MPOL_F_ADDR) < 0) err("get_mempolicy"); assert(pol == MPOL_PREFERRED); assert(numa_bitmask_isbitset(mask, i)); node = 0x123; if (get_mempolicy(&node, NULL, 0, adr, MPOL_F_ADDR|MPOL_F_NODE) < 0) err("get_mempolicy2"); printf("got node %d expected %d\n", node, i); if (node != i) err = 1; } return err; }
int rte_vhost_get_numa_node(int vid) { #ifdef RTE_LIBRTE_VHOST_NUMA struct virtio_net *dev = get_device(vid); int numa_node; int ret; if (dev == NULL) return -1; ret = get_mempolicy(&numa_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR); if (ret < 0) { RTE_LOG(ERR, VHOST_CONFIG, "(%d) failed to query numa node: %s\n", vid, rte_strerror(errno)); return -1; } return numa_node; #else RTE_SET_USED(vid); return -1; #endif }
int get_numa_node_id(void* ptr) { int status = -1; get_mempolicy(&status, NULL, 0, ptr, MPOL_F_NODE | MPOL_F_ADDR); return status; }
int main() { int ret; int len; int policy = -1; unsigned char *p; unsigned long mask[MAXNODE] = { 0 }; unsigned long retmask[MAXNODE] = { 0 }; len = getpagesize(); p = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); if (p == MAP_FAILED) printf("mbind err: %d\n", errno); mask[0] = 1; ret = mbind(p, len, MPOL_BIND, mask, MAXNODE, 0); if (ret < 0) printf("mbind err: %d %d\n", ret, errno); ret = get_mempolicy(&policy, retmask, MAXNODE, p, MPOL_F_ADDR); if (ret < 0) printf("get_mempolicy err: %d %d\n", ret, errno); if (policy == MPOL_BIND) printf("OK\n"); else printf("ERROR: policy is %d\n", policy); return 0; }
void check_numa_nodes(unique_bitmask_ptr &expected_bitmask, int policy, void *ptr, size_t size) { const size_t page_size = sysconf(_SC_PAGESIZE); size_t pages_number = get_num_of_pages(size, page_size); std::vector<void *> address = get_address_of_pages(ptr, pages_number, page_size); unique_bitmask_ptr returned_bitmask = make_nodemask_ptr(); int status = -1; for (size_t page_num = 0; page_num < address.size(); page_num++) { ASSERT_EQ(0, get_mempolicy(&status, returned_bitmask->maskp, returned_bitmask->size, address[page_num], MPOL_F_ADDR)); ASSERT_EQ(policy, status); switch(policy) { case MPOL_INTERLEAVE: EXPECT_TRUE(numa_bitmask_equal(expected_bitmask.get(), returned_bitmask.get())); break; case MPOL_DEFAULT: break; case MPOL_BIND: case MPOL_PREFERRED: for(int i=0; i < numa_num_possible_nodes(); i++) { if(numa_bitmask_isbitset(returned_bitmask.get(), i)) { EXPECT_TRUE(numa_bitmask_isbitset(expected_bitmask.get(), i)); } } break; default: assert(!"Unknown policy\n"); } } }
/* * get_node() -- fetch numa node id of page at vaddr * [from Ray Bryant's [SGI] memory migration tests] */ static int get_node(void *vaddr) { int rc, node; rc = get_mempolicy(&node, NULL, 0, vaddr, MPOL_F_NODE | MPOL_F_ADDR); if (rc) return -1; return node; }
void show(void) { unsigned long prefnode; struct bitmask *membind, *interleave, *cpubind; unsigned long cur; int policy; int numa_num_nodes = numa_num_possible_nodes(); if (numa_available() < 0) { show_physcpubind(); printf("No NUMA support available on this system.\n"); exit(1); } cpubind = numa_get_run_node_mask(); prefnode = numa_preferred(); interleave = numa_get_interleave_mask(); membind = numa_get_membind(); cur = numa_get_interleave_node(); policy = 0; if (get_mempolicy(&policy, NULL, 0, 0, 0) < 0) perror("get_mempolicy"); printf("policy: %s\n", policy_name(policy)); printf("preferred node: "); switch (policy) { case MPOL_PREFERRED: if (prefnode != -1) { printf("%ld\n", prefnode); break; } /*FALL THROUGH*/ case MPOL_DEFAULT: printf("current\n"); break; case MPOL_INTERLEAVE: printf("%ld (interleave next)\n",cur); break; case MPOL_BIND: printf("%d\n", find_first_bit(&membind, numa_num_nodes)); break; } if (policy == MPOL_INTERLEAVE) { printmask("interleavemask", interleave); printf("interleavenode: %ld\n", cur); } show_physcpubind(); printmask("cpubind", cpubind); // for compatibility printmask("nodebind", cpubind); printmask("membind", membind); }
/* This test is run with overridden MEMKIND_HBW_NODES environment variable * and tries to perform allocation from DRAM using hbw_malloc() using * default HBW_POLICY_PREFERRED policy. */ int main() { struct bitmask *expected_nodemask = NULL; struct bitmask *returned_nodemask = NULL; void *ptr = NULL; int ret = 0; int status = 0; ptr = hbw_malloc(KB); if (ptr == NULL) { printf("Error: allocation failed\n"); goto exit; } expected_nodemask = numa_allocate_nodemask(); status = memkind_hbw_all_get_mbind_nodemask(NULL, expected_nodemask->maskp, expected_nodemask->size); if (status != MEMKIND_ERROR_ENVIRON) { printf("Error: wrong return value from memkind_hbw_all_get_mbind_nodemask()\n"); printf("Expected: %d\n", MEMKIND_ERROR_ENVIRON); printf("Actual: %d\n", status); goto exit; } returned_nodemask = numa_allocate_nodemask(); status = get_mempolicy(NULL, returned_nodemask->maskp, returned_nodemask->size, ptr, MPOL_F_ADDR); if (status) { printf("Error: get_mempolicy() returned %d\n", status); goto exit; } ret = numa_bitmask_equal(returned_nodemask, expected_nodemask); if (!ret) { printf("Error: Memkind hbw and allocated pointer nodemasks are not equal\n"); } exit: if (expected_nodemask) { numa_free_nodemask(expected_nodemask); } if (returned_nodemask) { numa_free_nodemask(returned_nodemask); } if (ptr) { hbw_free(ptr); } return ret; }
void setpol(unsigned long offset, unsigned long length, int policy, unsigned long nodes) { long i, end; printf("off:%lx length:%lx policy:%d nodes:%lx\n", offset, length, policy, nodes); if (mbind(map + offset*pagesize, length*pagesize, policy, &nodes, 8, 0) < 0) { printf("mbind: %s offset %lx length %lx policy %d nodes %lx\n", strerror(errno), offset*pagesize, length*pagesize, policy, nodes); return; } for (i = offset; i < offset+length; i++) { pages[i].mask = nodes; pages[i].policy = policy; } i = offset - 20; if (i < 0) i = 0; end = offset+length+20; if (end > PAGES) end = PAGES; for (; i < end; i++) { int pol2; unsigned long nodes2; if (get_mempolicy(&pol2, &nodes2, sizeof(long)*8, map+i*pagesize, MPOL_F_ADDR) < 0) err("get_mempolicy"); if (pol2 != pages[i].policy) { printf("%lx: got policy %d expected %d, nodes got %lx expected %lx\n", i, pol2, pages[i].policy, nodes2, pages[i].mask); } if (policy != MPOL_DEFAULT && nodes2 != pages[i].mask) { printf("%lx: nodes %lx, expected %lx, policy %d\n", i, nodes2, pages[i].mask, policy); } } }
int print_mem_affinity() { mem_aff_mask mask; unsigned int len = 8*sizeof(mask); char spol[16]; int policy; spol[0]='\0'; /* Memory policy to the current process */ if ((get_mempolicy(&policy,&mask,len,0,0)) < 0) { perror("mem_getaffinity"); return -1; } if (policy == MPOL_INTERLEAVE) strcpy(spol,"INTERLEAVE"); else if (policy == MPOL_BIND) strcpy(spol,"BIND"); else strcpy(spol,"PREFERRED"); CmiPrintf("%d: Mem affinity mask is: %08lx with policy %s\n", CmiMyPe(),mask,spol); return 0; }
/* * verify_pages_on_nodes() - verify pages are in specified nodes * @pages: array of pages to be verified * @status: the NUMA node of each page * @num: the no. of pages * @nodes: the expected NUMA nodes */ void verify_pages_on_nodes(void **pages, int *status, unsigned int num, int *nodes) { #if HAVE_NUMA_H unsigned int i; int which_node; int ret; for (i = 0; i < num; i++) { if (status[i] != nodes[i]) { tst_resm(TFAIL, "page %d on node %d, " "expected on node %d", i, status[i], nodes[i]); return; } /* Based on inputs from Andi Kleen. * * Retrieves numa node for the given page. This does * not seem to be documented in the man pages. */ ret = get_mempolicy(&which_node, NULL, 0, pages[i], MPOL_F_NODE | MPOL_F_ADDR); if (ret == -1) { tst_resm(TBROK | TERRNO, "error getting memory policy " "for page %p", pages[i]); return; } if (which_node != nodes[i]) { tst_resm(TFAIL, "page %p is not in node %d ", pages[i], nodes[i]); return; } } tst_resm(TPASS, "pages are present in expected nodes"); #else tst_resm(TCONF, "NUMA support not provided"); #endif }
/* * Get the memory policy of a process */ void linux_get_mempol(int *node, int *mem_pol) { unsigned long nset; unsigned long addr; int mempol=-1,nnodes,i, error; nnodes = local_topo->nnodes; if (nnodes != 0 ){ (*node) = 0; error = get_mempolicy (&mempol,NULL,0,0,0); switch(mempol) { case MPOL_DEFAULT: (*mem_pol) = OS; break; case MPOL_BIND: (*mem_pol) = LOCAL; (*node) = linux_get_nodeid(); break; case MPOL_INTERLEAVE: (*mem_pol) = INTERLEAVE; (*node) = -1; break; default: (*mem_pol) = -1; (*node) = -1; break; } } else (*mem_pol) = -1; if (error < 0) printf("\nWARNING: Can not retrieve memory binding.\n"); }
int proc_numa_init(void) { int errno; uint32_t i; if (get_mempolicy(NULL, NULL, 0, 0, 0) < 0 && errno == ENOSYS) { numa_info.numberOfNodes = 0; numa_info.nodes = NULL; return -1; } /* First determine maximum number of nodes */ numa_info.numberOfNodes = setConfiguredNodes()+1; numa_info.nodes = (NumaNode*) malloc(numa_info.numberOfNodes * sizeof(NumaNode)); if (!numa_info.nodes) { return -ENOMEM; } for (i=0; i<numa_info.numberOfNodes; i++) { numa_info.nodes[i].id = i; nodeMeminfo(i, &numa_info.nodes[i].totalMemory, &numa_info.nodes[i].freeMemory); numa_info.nodes[i].numberOfProcessors = nodeProcessorList(i,&numa_info.nodes[i].processors); if (numa_info.nodes[i].numberOfProcessors == 0) { return -EFAULT; } numa_info.nodes[i].numberOfDistances = nodeDistanceList(i, numa_info.numberOfNodes, &numa_info.nodes[i].distances); if (numa_info.nodes[i].numberOfDistances == 0) { return -EFAULT; } } return 0; }
int main (int argc, char** argv) { int ret, c; int i, repeat = 5; int cpu = 2; static int errortype = 1; static int verbose = 1; static int disableHuge = 0; static int madvisePoison = 0; static int poll_exit=0; static long length; struct bitmask *nodes, *gnodes; int gpolicy; unsigned long error_opt; void *vaddrmin = (void *)-1UL, *vaddrmax = NULL; static size_t pdcount=0; unsigned long mattr, addrend, pages, count, nodeid, paddr = 0; unsigned long addr_start=0, nodeid_start=-1, mattr_start=-1; unsigned int pagesize = getpagesize(); char pte_str[20]; struct dlook_get_map_info req; static page_desc_t *pdbegin=NULL; page_desc_t *pd, *pdend; length = memsize("100k"); nodes = numa_allocate_nodemask(); gnodes = numa_allocate_nodemask(); progname = argv[0]; while (1) { static struct option long_options[] = { {"verbose", no_argument, &verbose, 1}, {"delay", no_argument, &delay, 1}, {"disableHuge", no_argument, &disableHuge, 1}, {"poll", no_argument, &poll_exit, 1}, {"madvisePoison", no_argument, &madvisePoison, 1}, {"manual", no_argument, &manual, 1}, {"cpu", required_argument, 0, 'c'}, {"errortype", required_argument, 0, 'e'}, {"help", no_argument, 0, 'h'}, {"length", required_argument, 0, 'l'} }; /* getopt_long stores the option index here. */ int option_index = 0; c = getopt_long (argc, argv, "hc:e:l:", long_options, &option_index); /* Detect the end of the options. */ if (c == -1) break; switch (c) { case 'c': cpu = atoi(optarg); break; case 'e': errortype = atoi(optarg); break; case 'h': help(); case 'l': /* Not exposed */ printf ("option -l with value `%s'\n", optarg); length = memsize("optarg"); break; case '?': /* getopt_long already printed an error message. */ exit(-1); } } cpu_process_setaffinity(getpid(), cpu); error_opt = get_etype(errortype); buf = mmap(NULL, length, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); if (mbind((void *)buf, length, MPOL_DEFAULT, nodes->maskp, nodes->size, 0) < 0){ perror("mbind error\n"); } /* Disable Hugepages */ if (disableHuge) madvise((void *)buf, length, MADV_NOHUGEPAGE); if (madvisePoison) madvise((void *)buf, length,MADV_HWPOISON ); gpolicy = -1; if (get_mempolicy(&gpolicy, gnodes->maskp, gnodes->size, (void *)buf, MPOL_F_ADDR) < 0) perror("get_mempolicy"); if (!numa_bitmask_equal(gnodes, nodes)) { printf("nodes differ %lx, %lx!\n", gnodes->maskp[0], nodes->maskp[0]); } strcpy(pte_str, ""); addrend = ((unsigned long)buf)+length; pages = (addrend-((unsigned long)buf))/pagesize; if (pages > pdcount) { pdbegin = realloc(pdbegin, sizeof(page_desc_t)*pages); pdcount = pages; } req.pid = getpid(); req.start_vaddr = (unsigned long)buf; req.end_vaddr = addrend; req.pd = pdbegin; sigaction(SIGBUS, &recover_act, NULL); /*Fault in Pages */ if(!poll_exit) hog((void *)buf, length); /* Get mmap phys_addrs */ if ((fd = open(UVMCE_DEVICE, O_RDWR)) < 0) { printf("Failed to open: %s\n", UVMCE_DEVICE); exit (1); } if (ioctl(fd, UVMCE_DLOOK, &req ) < 0){ printf("Failed to INJECT_UCE\n"); exit(1); } process_map(pd,pdbegin, pdend, pages, buf, addrend, pagesize, mattr, nodeid, paddr, pte_str, nodeid_start, mattr_start, addr_start); printf("\n\tstart_vaddr\t 0x%016lx length\t 0x%x\n\tend_vaddr\t 0x%016lx pages\t %ld\n", buf , length, addrend, pages); uv_inject(pd,pdbegin, pdend, pages, (unsigned long)buf, addrend, pagesize, mattr, nodeid, paddr, pte_str, nodeid_start, mattr_start, addr_start, error_opt); if (delay){ printf("Enter char to consume bad memory.."); getchar(); } if (error_opt != UVMCE_PATROL_SCRUB_UCE){ consume_it((void *)buf, length); } out: close(fd); return 0; }
static struct virtio_net* numa_realloc(struct virtio_net *dev, int index) { int oldnode, newnode; struct virtio_net *old_dev; struct vhost_virtqueue *old_vq, *vq; int ret; old_dev = dev; vq = old_vq = dev->virtqueue[index]; ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc, MPOL_F_NODE | MPOL_F_ADDR); /* check if we need to reallocate vq */ ret |= get_mempolicy(&oldnode, NULL, 0, old_vq, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { RTE_LOG(ERR, VHOST_CONFIG, "Unable to get vq numa information.\n"); return dev; } if (oldnode != newnode) { RTE_LOG(INFO, VHOST_CONFIG, "reallocate vq from %d to %d node\n", oldnode, newnode); vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode); if (!vq) return dev; memcpy(vq, old_vq, sizeof(*vq)); rte_free(old_vq); } /* check if we need to reallocate dev */ ret = get_mempolicy(&oldnode, NULL, 0, old_dev, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { RTE_LOG(ERR, VHOST_CONFIG, "Unable to get dev numa information.\n"); goto out; } if (oldnode != newnode) { RTE_LOG(INFO, VHOST_CONFIG, "reallocate dev from %d to %d node\n", oldnode, newnode); dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode); if (!dev) { dev = old_dev; goto out; } memcpy(dev, old_dev, sizeof(*dev)); rte_free(old_dev); } out: dev->virtqueue[index] = vq; vhost_devices[dev->vid] = dev; if (old_vq != vq) vhost_user_iotlb_init(dev, index); return dev; }
static struct virtio_net* numa_realloc(struct virtio_net *dev, int index) { int oldnode, newnode; struct virtio_net *old_dev; struct vhost_virtqueue *old_vq, *vq; int ret; /* * vq is allocated on pairs, we should try to do realloc * on first queue of one queue pair only. */ if (index % VIRTIO_QNUM != 0) return dev; old_dev = dev; vq = old_vq = dev->virtqueue[index]; ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc, MPOL_F_NODE | MPOL_F_ADDR); /* check if we need to reallocate vq */ ret |= get_mempolicy(&oldnode, NULL, 0, old_vq, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { RTE_LOG(ERR, VHOST_CONFIG, "Unable to get vq numa information.\n"); return dev; } if (oldnode != newnode) { RTE_LOG(INFO, VHOST_CONFIG, "reallocate vq from %d to %d node\n", oldnode, newnode); vq = rte_malloc_socket(NULL, sizeof(*vq) * VIRTIO_QNUM, 0, newnode); if (!vq) return dev; memcpy(vq, old_vq, sizeof(*vq) * VIRTIO_QNUM); rte_free(old_vq); } /* check if we need to reallocate dev */ ret = get_mempolicy(&oldnode, NULL, 0, old_dev, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { RTE_LOG(ERR, VHOST_CONFIG, "Unable to get dev numa information.\n"); goto out; } if (oldnode != newnode) { RTE_LOG(INFO, VHOST_CONFIG, "reallocate dev from %d to %d node\n", oldnode, newnode); dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode); if (!dev) { dev = old_dev; goto out; } memcpy(dev, old_dev, sizeof(*dev)); rte_free(old_dev); } out: dev->virtqueue[index] = vq; dev->virtqueue[index + 1] = vq + 1; vhost_devices[dev->device_fh] = dev; return dev; }
static struct virtio_net* numa_realloc(struct virtio_net *dev, int index) { int oldnode, newnode; struct virtio_net_config_ll *old_ll_dev, *new_ll_dev = NULL; struct vhost_virtqueue *old_vq, *new_vq = NULL; int ret; int realloc_dev = 0, realloc_vq = 0; old_ll_dev = (struct virtio_net_config_ll *)dev; old_vq = dev->virtqueue[index]; ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc, MPOL_F_NODE | MPOL_F_ADDR); ret = ret | get_mempolicy(&oldnode, NULL, 0, old_ll_dev, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { RTE_LOG(ERR, VHOST_CONFIG, "Unable to get vring desc or dev numa information.\n"); return dev; } if (oldnode != newnode) realloc_dev = 1; ret = get_mempolicy(&oldnode, NULL, 0, old_vq, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { RTE_LOG(ERR, VHOST_CONFIG, "Unable to get vq numa information.\n"); return dev; } if (oldnode != newnode) realloc_vq = 1; if (realloc_dev == 0 && realloc_vq == 0) return dev; if (realloc_dev) new_ll_dev = rte_malloc_socket(NULL, sizeof(struct virtio_net_config_ll), 0, newnode); if (realloc_vq) new_vq = rte_malloc_socket(NULL, sizeof(struct vhost_virtqueue), 0, newnode); if (!new_ll_dev && !new_vq) return dev; if (realloc_vq) memcpy(new_vq, old_vq, sizeof(*new_vq)); if (realloc_dev) memcpy(new_ll_dev, old_ll_dev, sizeof(*new_ll_dev)); (new_ll_dev ? new_ll_dev : old_ll_dev)->dev.virtqueue[index] = new_vq ? new_vq : old_vq; if (realloc_vq) rte_free(old_vq); if (realloc_dev) { if (ll_root == old_ll_dev) ll_root = new_ll_dev; else { struct virtio_net_config_ll *prev = ll_root; while (prev->next != old_ll_dev) prev = prev->next; prev->next = new_ll_dev; new_ll_dev->next = old_ll_dev->next; } rte_free(old_ll_dev); } return realloc_dev ? &new_ll_dev->dev : dev; }
static void * s_numa_alloc(size_t sz, int cpu) { void *ret = NULL; if (likely(sz > 0)) { if (likely(cpu >= 0)) { if (likely(s_numa_nodes != NULL && s_n_cpus > 0)) { unsigned int node = s_numa_nodes[cpu]; unsigned int allocd_node = UINT_MAX; struct bitmask *bmp; int r; bmp = numa_allocate_nodemask(); numa_bitmask_setbit(bmp, node); errno = 0; r = (int)set_mempolicy(MPOL_BIND, bmp->maskp, bmp->size + 1); if (likely(r == 0)) { errno = 0; ret = numa_alloc_onnode(sz, (int)node); if (likely(ret != NULL)) { lagopus_result_t rl; /* * We need this "first touch" even using the * numa_alloc_onnode(). */ (void)memset(ret, 0, sz); errno = 0; r = (int)get_mempolicy((int *)&allocd_node, NULL, 0, ret, MPOL_F_NODE|MPOL_F_ADDR); if (likely(r == 0)) { if (unlikely(node != allocd_node)) { /* * The memory is not allocated on the node, but it is * still usable. Just return it. */ lagopus_msg_warning("can't allocate " PFSZ(u) " bytes memory " "for CPU %d (NUMA node %d).\n", sz, cpu, node); } } else { lagopus_perror(LAGOPUS_RESULT_POSIX_API_ERROR); lagopus_msg_error("get_mempolicy() returned %d.\n", r); } rl = s_add_addr(ret, sz); if (unlikely(rl != LAGOPUS_RESULT_OK)) { lagopus_perror(rl); lagopus_msg_error("can't register the allocated address.\n"); numa_free(ret, sz); ret = NULL; } } } else { /* r == 0 */ lagopus_perror(LAGOPUS_RESULT_POSIX_API_ERROR); lagopus_msg_error("set_mempolicy() returned %d.\n", r); } numa_free_nodemask(bmp); set_mempolicy(MPOL_DEFAULT, NULL, 0); } else { /* s_numa_nodes != NULL && s_n_cpus > 0 */ /* * Not initialized or initialization failure. */ lagopus_msg_warning("The NUMA related information is not initialized. " "Use malloc(3) instead.\n"); ret = malloc(sz); } } else { /* cpu >= 0 */ /* * Use pure malloc(3). */ ret = malloc(sz); } } return ret; }