/** * virNumaGetNodeMemory: * @node: identifier of the requested NUMA node * @memsize: returns the total size of memory in the NUMA node * @memfree: returns the total free memory in a NUMA node * * Returns the size of the memory in one NUMA node in bytes via the @size * argument and free memory of a node in the @free argument. The caller has to * guarantee that @node is in range (see virNumaGetMaxNode). * * Returns 0 on success, -1 on error. Does not report errors. */ int virNumaGetNodeMemory(int node, unsigned long long *memsize, unsigned long long *memfree) { long long node_size; long long node_free; if (memsize) *memsize = 0; if (memfree) *memfree = 0; if ((node_size = numa_node_size64(node, &node_free)) < 0) return -1; if (memsize) *memsize = node_size; if (memfree) *memfree = node_free; return 0; }
unsigned long long nodeGetFreeMemory(virConnectPtr conn ATTRIBUTE_UNUSED) { unsigned long long freeMem = 0; int n; if (numa_available() < 0) { nodeReportError(VIR_ERR_NO_SUPPORT, "%s", _("NUMA not supported on this host")); goto cleanup; } for (n = 0 ; n <= numa_max_node() ; n++) { long long mem; if (numa_node_size64(n, &mem) < 0) { nodeReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Failed to query NUMA free memory")); goto cleanup; } freeMem += mem; } cleanup: return freeMem; }
int memkind_default_get_size(struct memkind *kind, size_t *total, size_t *free) { nodemask_t nodemask; struct bitmask nodemask_bm = {NUMA_NUM_NODES, nodemask.n}; long long f; int err = 0; int i; *total = 0; *free = 0; if (kind->ops->get_mbind_nodemask) { err = kind->ops->get_mbind_nodemask(kind, nodemask.n, NUMA_NUM_NODES); } else { copy_bitmask_to_bitmask(numa_all_nodes_ptr, &nodemask_bm); } if (!err) { for (i = 0; i < NUMA_NUM_NODES; ++i) { if (numa_bitmask_isbitset(&nodemask_bm, i)) { *total += numa_node_size64(i, &f); *free += f; } } } return err; }
static int filter_nodemask_mem(nodemask_t * nodemask, unsigned long max_node) { #if MPOL_F_MEMS_ALLOWED unsigned long nodemask_size = max_node / 8 + 1; memset(nodemask, 0, nodemask_size); /* * avoid numa_get_mems_allowed(), because of bug in getpol() * utility function in older versions: * http://www.spinics.net/lists/linux-numa/msg00849.html */ if (ltp_syscall(__NR_get_mempolicy, NULL, nodemask->n, max_node, 0, MPOL_F_MEMS_ALLOWED) < 0) return -2; #else int i; /* * old libnuma/kernel don't have MPOL_F_MEMS_ALLOWED, so let's assume * that we can use any node with memory > 0 */ for (i = 0; i < max_node; i++) { if (!nodemask_isset(nodemask, i)) continue; if (numa_node_size64(i, NULL) <= 0) nodemask_clr(nodemask, i); } #endif /* MPOL_F_MEMS_ALLOWED */ return 0; }
void hardware(void) { int i; int numnodes=0; int prevnode=-1; int skip=0; int maxnode = numa_max_node(); for (i=0; i<=maxnode; i++) if (numa_bitmask_isbitset(numa_nodes_ptr, i)) numnodes++; printf("available: %d nodes (", numnodes); for (i=0; i<=maxnode; i++) { if (numa_bitmask_isbitset(numa_nodes_ptr, i)) { if (prevnode == -1) { printf("%d", i); prevnode=i; continue; } if (i > prevnode + 1) { if (skip) { printf("%d", prevnode); skip=0; } printf(",%d", i); prevnode=i; continue; } if (i == prevnode + 1) { if (!skip) { printf("-"); skip=1; } prevnode=i; } if ((i == maxnode) && skip) printf("%d", prevnode); } } printf(")\n"); for (i = 0; i <= maxnode; i++) { char buf[64]; long long fr; unsigned long long sz = numa_node_size64(i, &fr); if (!numa_bitmask_isbitset(numa_nodes_ptr, i)) continue; printf("node %d cpus:", i); print_node_cpus(i); printf("node %d size: %s\n", i, fmt_mem(sz, buf)); printf("node %d free: %s\n", i, fmt_mem(fr, buf)); } print_distances(maxnode); }
// display information about total/free memory on each numa node void DumpMemoryStats() { int nodes = numa_max_node(),n; for(n = 0; n <= nodes; n++ ) { long long freememsize; long long memsize = numa_node_size64( n, &freememsize ); printf( "node %d has %lld bytes of memory (%lld bytes free)\n", n, memsize, freememsize ); } }
static void print_mem_stats(pid_t pid, int node) { char s[64]; long long node_size, freep; if (pid == 0) pid = getpid(); tst_resm(TINFO, "mem_stats pid: %d, node: %d", pid, node); /* dump pid's VM info */ sprintf(s, "cat /proc/%d/status", pid); system(s); sprintf(s, "cat /proc/%d/numa_maps", pid); system(s); /* dump node free mem */ node_size = numa_node_size64(node, &freep); tst_resm(TINFO, "Node id: %d, size: %lld, free: %lld", node, node_size, freep); }
int nodeGetCellsFreeMemory(virConnectPtr conn ATTRIBUTE_UNUSED, unsigned long long *freeMems, int startCell, int maxCells) { int n, lastCell, numCells; int ret = -1; int maxCell; if (numa_available() < 0) { nodeReportError(VIR_ERR_NO_SUPPORT, "%s", _("NUMA not supported on this host")); goto cleanup; } maxCell = numa_max_node(); if (startCell > maxCell) { nodeReportError(VIR_ERR_INTERNAL_ERROR, _("start cell %d out of range (0-%d)"), startCell, maxCell); goto cleanup; } lastCell = startCell + maxCells - 1; if (lastCell > maxCell) lastCell = maxCell; for (numCells = 0, n = startCell ; n <= lastCell ; n++) { long long mem; if (numa_node_size64(n, &mem) < 0) { nodeReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Failed to query NUMA free memory")); goto cleanup; } freeMems[numCells++] = mem; } ret = numCells; cleanup: return ret; }
void myhbwmalloc_init(void) { /* set to NULL before trying to initialize. if we return before * successful creation of the mspace, then it will still be NULL, * and we can use that in subsequent library calls to determine * that the library failed to initialize. */ myhbwmalloc_mspace = NULL; /* verbose printout? */ myhbwmalloc_verbose = 0; { char * env_char = getenv("HBWMALLOC_VERBOSE"); if (env_char != NULL) { myhbwmalloc_verbose = 1; printf("hbwmalloc: HBWMALLOC_VERBOSE set\n"); } } /* fail hard or soft? */ myhbwmalloc_hardfail = 1; { char * env_char = getenv("HBWMALLOC_SOFTFAIL"); if (env_char != NULL) { myhbwmalloc_hardfail = 0; printf("hbwmalloc: HBWMALLOC_SOFTFAIL set\n"); } } /* set the atexit handler that will destroy the mspace and free the numa allocation */ atexit(myhbwmalloc_final); /* detect and configure use of NUMA memory nodes */ { int max_possible_node = numa_max_possible_node(); int num_possible_nodes = numa_num_possible_nodes(); int max_numa_nodes = numa_max_node(); int num_configured_nodes = numa_num_configured_nodes(); int num_configured_cpus = numa_num_configured_cpus(); if (myhbwmalloc_verbose) { printf("hbwmalloc: numa_max_possible_node() = %d\n", max_possible_node); printf("hbwmalloc: numa_num_possible_nodes() = %d\n", num_possible_nodes); printf("hbwmalloc: numa_max_node() = %d\n", max_numa_nodes); printf("hbwmalloc: numa_num_configured_nodes() = %d\n", num_configured_nodes); printf("hbwmalloc: numa_num_configured_cpus() = %d\n", num_configured_cpus); } /* FIXME this is a hack. assumes HBW is only numa node 1. */ if (num_configured_nodes <= 2) { myhbwmalloc_numa_node = num_configured_nodes-1; } else { fprintf(stderr,"hbwmalloc: we support only 2 numa nodes, not %d\n", num_configured_nodes); } if (myhbwmalloc_verbose) { for (int i=0; i<num_configured_nodes; i++) { unsigned max_numa_cpus = numa_num_configured_cpus(); struct bitmask * mask = numa_bitmask_alloc( max_numa_cpus ); int rc = numa_node_to_cpus(i, mask); if (rc != 0) { fprintf(stderr, "hbwmalloc: numa_node_to_cpus failed\n"); } else { printf("hbwmalloc: numa node %d cpu mask:", i); for (unsigned j=0; j<max_numa_cpus; j++) { int bit = numa_bitmask_isbitset(mask,j); printf(" %d", bit); } printf("\n"); } numa_bitmask_free(mask); } fflush(stdout); } } #if 0 /* unused */ /* see if the user specifies a slab size */ size_t slab_size_requested = 0; { char * env_char = getenv("HBWMALLOC_BYTES"); if (env_char!=NULL) { long units = 1L; if ( NULL != strstr(env_char,"G") ) units = 1000000000L; else if ( NULL != strstr(env_char,"M") ) units = 1000000L; else if ( NULL != strstr(env_char,"K") ) units = 1000L; else units = 1L; int num_count = strspn(env_char, "0123456789"); memset( &env_char[num_count], ' ', strlen(env_char)-num_count); slab_size_requested = units * atol(env_char); } if (myhbwmalloc_verbose) { printf("hbwmalloc: requested slab_size_requested = %zu\n", slab_size_requested); } } #endif /* see what libnuma says is available */ size_t myhbwmalloc_slab_size; { int node = myhbwmalloc_numa_node; long long freemem; long long maxmem = numa_node_size64(node, &freemem); if (myhbwmalloc_verbose) { printf("hbwmalloc: numa_node_size64 says maxmem=%lld freemem=%lld for numa node %d\n", maxmem, freemem, node); } myhbwmalloc_slab_size = freemem; } /* assume threads, disable if MPI knows otherwise, then allow user to override. */ int multithreaded = 1; #ifdef HAVE_MPI int nprocs; { int is_init, is_final; MPI_Initialized(&is_init); MPI_Finalized(&is_final); if (is_init && !is_final) { MPI_Comm_size(MPI_COMM_WORLD, &nprocs); } /* give equal portion to every MPI process */ myhbwmalloc_slab_size /= nprocs; /* if the user initializes MPI with MPI_Init or * MPI_Init_thread(MPI_THREAD_SINGLE), they assert there * are no threads at all, which means we can skip the * malloc mspace lock. * * if the user lies to MPI, they deserve any bad thing * that comes of it. */ int provided; MPI_Query_thread(&provided); if (provided==MPI_THREAD_SINGLE) { multithreaded = 0; } else { multithreaded = 1; } if (myhbwmalloc_verbose) { printf("hbwmalloc: MPI processes = %d (threaded = %d)\n", nprocs, multithreaded); printf("hbwmalloc: myhbwmalloc_slab_size = %d\n", myhbwmalloc_slab_size); } } #endif /* user can assert that hbwmalloc and friends need not be thread-safe */ { char * env_char = getenv("HBWMALLOC_LOCKLESS"); if (env_char != NULL) { multithreaded = 0; if (myhbwmalloc_verbose) { printf("hbwmalloc: user has disabled locking in mspaces by setting HBWMALLOC_LOCKLESS\n"); } } } myhbwmalloc_slab = numa_alloc_onnode( myhbwmalloc_slab_size, myhbwmalloc_numa_node); if (myhbwmalloc_slab==NULL) { fprintf(stderr, "hbwmalloc: numa_alloc_onnode returned NULL for size = %zu\n", myhbwmalloc_slab_size); return; } else { if (myhbwmalloc_verbose) { printf("hbwmalloc: numa_alloc_onnode succeeded for size %zu\n", myhbwmalloc_slab_size); } /* part (less than 128*sizeof(size_t) bytes) of this space is used for bookkeeping, * so the capacity must be at least this large */ if (myhbwmalloc_slab_size < 128*sizeof(size_t)) { fprintf(stderr, "hbwmalloc: not enough space for mspace bookkeeping\n"); return; } /* see above regarding if the user lies to MPI. */ int locked = multithreaded; myhbwmalloc_mspace = create_mspace_with_base( myhbwmalloc_slab, myhbwmalloc_slab_size, locked); if (myhbwmalloc_mspace == NULL) { fprintf(stderr, "hbwmalloc: create_mspace_with_base returned NULL\n"); return; } else if (myhbwmalloc_verbose) { printf("hbwmalloc: create_mspace_with_base succeeded for size %zu\n", myhbwmalloc_slab_size); } } }