void * hwloc_alloc_heap(hwloc_topology_t topology __hwloc_attribute_unused, size_t len) { void *p = NULL; #if defined(hwloc_getpagesize) && defined(HAVE_POSIX_MEMALIGN) errno = posix_memalign(&p, hwloc_getpagesize(), len); if (errno) p = NULL; #elif defined(hwloc_getpagesize) && defined(HAVE_MEMALIGN) p = memalign(hwloc_getpagesize(), len); #else p = malloc(len); #endif return p; }
static void look_rset(int sdl, hwloc_obj_type_t type, struct hwloc_topology *topology, int level) { rsethandle_t rset, rad; int i,maxcpus,j; int nbnodes; struct hwloc_obj *obj; if ((topology->flags & HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM)) rset = rs_alloc(RS_ALL); else rset = rs_alloc(RS_PARTITION); rad = rs_alloc(RS_EMPTY); nbnodes = rs_numrads(rset, sdl, 0); if (nbnodes == -1) { perror("rs_numrads"); return; } for (i = 0; i < nbnodes; i++) { if (rs_getrad(rset, rad, sdl, i, 0)) { fprintf(stderr,"rs_getrad(%d) failed: %s\n", i, strerror(errno)); continue; } if (!rs_getinfo(rad, R_NUMPROCS, 0)) continue; /* It seems logical processors are numbered from 1 here, while the * bindprocessor functions numbers them from 0... */ obj = hwloc_alloc_setup_object(type, i - (type == HWLOC_OBJ_PU)); obj->cpuset = hwloc_bitmap_alloc(); obj->os_level = sdl; maxcpus = rs_getinfo(rad, R_MAXPROCS, 0); for (j = 0; j < maxcpus; j++) { if (rs_op(RS_TESTRESOURCE, rad, NULL, R_PROCS, j)) hwloc_bitmap_set(obj->cpuset, j); } switch(type) { case HWLOC_OBJ_NODE: obj->nodeset = hwloc_bitmap_alloc(); hwloc_bitmap_set(obj->nodeset, i); obj->memory.local_memory = 0; /* TODO: odd, rs_getinfo(rad, R_MEMSIZE, 0) << 10 returns the total memory ... */ obj->memory.page_types_len = 2; obj->memory.page_types = malloc(2*sizeof(*obj->memory.page_types)); memset(obj->memory.page_types, 0, 2*sizeof(*obj->memory.page_types)); obj->memory.page_types[0].size = hwloc_getpagesize(); #ifdef HAVE__SC_LARGE_PAGESIZE obj->memory.page_types[1].size = sysconf(_SC_LARGE_PAGESIZE); #endif /* TODO: obj->memory.page_types[1].count = rs_getinfo(rset, R_LGPGFREE, 0) / hugepagesize */ break; case HWLOC_OBJ_CACHE: obj->attr->cache.size = _system_configuration.L2_cache_size; obj->attr->cache.associativity = _system_configuration.L2_cache_asc; obj->attr->cache.linesize = 0; /* TODO: ? */ obj->attr->cache.depth = 2; obj->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED; /* FIXME? */ break; case HWLOC_OBJ_GROUP: obj->attr->group.depth = level; break; case HWLOC_OBJ_CORE: { hwloc_obj_t obj2, obj3; obj2 = hwloc_alloc_setup_object(HWLOC_OBJ_CACHE, i); obj2->cpuset = hwloc_bitmap_dup(obj->cpuset); obj2->attr->cache.size = _system_configuration.dcache_size; obj2->attr->cache.associativity = _system_configuration.dcache_asc; obj2->attr->cache.linesize = _system_configuration.dcache_line; obj2->attr->cache.depth = 1; if (_system_configuration.cache_attrib & (1<<30)) { /* Unified cache */ obj2->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED; hwloc_debug("Adding an L1u cache for core %d\n", i); hwloc_insert_object_by_cpuset(topology, obj2); } else { /* Separate Instruction and Data caches */ obj2->attr->cache.type = HWLOC_OBJ_CACHE_DATA; hwloc_debug("Adding an L1d cache for core %d\n", i); hwloc_insert_object_by_cpuset(topology, obj2); obj3 = hwloc_alloc_setup_object(HWLOC_OBJ_CACHE, i); obj3->cpuset = hwloc_bitmap_dup(obj->cpuset); obj3->attr->cache.size = _system_configuration.icache_size; obj3->attr->cache.associativity = _system_configuration.icache_asc; obj3->attr->cache.linesize = _system_configuration.icache_line; obj3->attr->cache.depth = 1; obj3->attr->cache.type = HWLOC_OBJ_CACHE_INSTRUCTION; hwloc_debug("Adding an L1i cache for core %d\n", i); hwloc_insert_object_by_cpuset(topology, obj3); } break; } default: break; } hwloc_debug_2args_bitmap("%s %d has cpuset %s\n", hwloc_obj_type_string(type), i, obj->cpuset); hwloc_insert_object_by_cpuset(topology, obj); } rs_free(rset); rs_free(rad); }
static int test(hwloc_topology_t orig, const char *callname) { unsigned long forced_addr; unsigned long fileoffset; size_t shmem_length; int synthetic_with_distances = (hwloc_obj_get_info_by_name(hwloc_get_root_obj(orig), "SyntheticDescription") != NULL); char tmpname[] = "/tmp/hwloc_test_shmem.XXXXXX"; char cmd[512]; struct stat st; int fd, err; int ret = EXIT_SKIP; printf("opening temporary file\n"); fd = mkstemp(tmpname); if (fd < 0) { perror("mkstemp"); goto out; } printf("opened %s\n", tmpname); printf("exporting XML\n"); err = hwloc_topology_export_xml(orig, tmpname, 0); assert(!err); err = stat(tmpname, &st); assert(!err); printf("exported %lu bytes\n", (unsigned long) st.st_size); fileoffset = st.st_size+1; /* skip a couple bytes to make sure the XML is don" */ fileoffset = (fileoffset + hwloc_getpagesize() - 1) &~(hwloc_getpagesize() - 1); printf("will mmap at file offset %lu\n", fileoffset); err = hwloc_shmem_topology_get_length(orig, &shmem_length, 0); assert(!err); printf("need mmap length %lu\n", (unsigned long) shmem_length); #if SIZEOF_VOID_P == 8 forced_addr = 0x300000000000UL; #else forced_addr = 0xb0000000UL; #endif printf("write to shmem at address %lx in file %s offset %lu\n", forced_addr, tmpname, fileoffset); err = hwloc_shmem_topology_write(orig, fd, fileoffset, (void*)(uintptr_t)forced_addr, shmem_length, 0); if (err == -1 && errno == EBUSY) { fprintf(stderr, "Failed to shmem write, requested mapping is busy\n"); goto out_with_fd; } assert(!err); printf("wrote length %lu\n", (unsigned long) shmem_length); printf("adopting locally\n"); ret = adopt(fd, fileoffset, forced_addr, shmem_length, synthetic_with_distances); assert(ret == EXIT_SUCCESS || ret == EXIT_SKIP); printf("adopting in other child process\n"); snprintf(cmd, sizeof(cmd), "%s %s %lu %lu %lu %d", callname, tmpname, fileoffset, forced_addr, (unsigned long) shmem_length, synthetic_with_distances); printf("running command %s\n", cmd); err = system(cmd); assert(WIFEXITED(err)); printf("child process returned %d\n", WEXITSTATUS(err)); assert(WEXITSTATUS(err) == EXIT_SUCCESS || WEXITSTATUS(err) == EXIT_SKIP); /* we caught errors above. * return SKIP if both returned SKIP. otherwise SUCCESS */ if (WEXITSTATUS(err) == EXIT_SKIP && ret == EXIT_SKIP) ret = EXIT_SKIP; else ret = EXIT_SUCCESS; out_with_fd: close(fd); unlink(tmpname); out: return ret; }
static int hwloc_look_osf(struct hwloc_backend *backend) { struct hwloc_topology *topology = backend->topology; cpu_cursor_t cursor; unsigned nbnodes; radid_t radid, radid2; radset_t radset, radset2; cpuid_t cpuid; cpuset_t cpuset; struct hwloc_obj *obj; unsigned distance; if (topology->levels[0][0]->cpuset) /* somebody discovered things */ return 0; hwloc_alloc_obj_cpusets(topology->levels[0][0]); nbnodes = rad_get_num(); cpusetcreate(&cpuset); radsetcreate(&radset); radsetcreate(&radset2); { hwloc_obj_t *nodes = calloc(nbnodes, sizeof(hwloc_obj_t)); unsigned *indexes = calloc(nbnodes, sizeof(unsigned)); float *distances = calloc(nbnodes*nbnodes, sizeof(float)); unsigned nfound; numa_attr_t attr; attr.nattr_type = R_RAD; attr.nattr_descr.rd_radset = radset; attr.nattr_flags = 0; for (radid = 0; radid < (radid_t) nbnodes; radid++) { rademptyset(radset); radaddset(radset, radid); cpuemptyset(cpuset); if (rad_get_cpus(radid, cpuset)==-1) { fprintf(stderr,"rad_get_cpus(%d) failed: %s\n",radid,strerror(errno)); continue; } indexes[radid] = radid; nodes[radid] = obj = hwloc_alloc_setup_object(HWLOC_OBJ_NODE, radid); obj->cpuset = hwloc_bitmap_alloc(); obj->memory.local_memory = rad_get_physmem(radid) * hwloc_getpagesize(); obj->memory.page_types_len = 2; obj->memory.page_types = malloc(2*sizeof(*obj->memory.page_types)); memset(obj->memory.page_types, 0, 2*sizeof(*obj->memory.page_types)); obj->memory.page_types[0].size = hwloc_getpagesize(); #ifdef HAVE__SC_LARGE_PAGESIZE obj->memory.page_types[1].size = sysconf(_SC_LARGE_PAGESIZE); #endif cursor = SET_CURSOR_INIT; while((cpuid = cpu_foreach(cpuset, 0, &cursor)) != CPU_NONE) hwloc_bitmap_set(obj->cpuset, cpuid); hwloc_debug_1arg_bitmap("node %d has cpuset %s\n", radid, obj->cpuset); hwloc_insert_object_by_cpuset(topology, obj); nfound = 0; for (radid2 = 0; radid2 < (radid_t) nbnodes; radid2++) distances[radid*nbnodes+radid2] = RAD_DIST_REMOTE; for (distance = RAD_DIST_LOCAL; distance < RAD_DIST_REMOTE; distance++) { attr.nattr_distance = distance; /* get set of NUMA nodes at distance <= DISTANCE */ if (nloc(&attr, radset2)) { fprintf(stderr,"nloc failed: %s\n", strerror(errno)); continue; } cursor = SET_CURSOR_INIT; while ((radid2 = rad_foreach(radset2, 0, &cursor)) != RAD_NONE) { if (distances[radid*nbnodes+radid2] == RAD_DIST_REMOTE) { distances[radid*nbnodes+radid2] = (float) distance; nfound++; } } if (nfound == nbnodes) /* Finished finding distances, no need to go up to RAD_DIST_REMOTE */ break; } } hwloc_distances_set(topology, HWLOC_OBJ_NODE, nbnodes, indexes, nodes, distances, 0 /* OS cannot force */); } radsetdestroy(&radset2); radsetdestroy(&radset); cpusetdestroy(&cpuset); /* add PU objects */ hwloc_setup_pu_level(topology, hwloc_fallback_nbprocessors(topology)); hwloc_obj_add_info(topology->levels[0][0], "Backend", "OSF"); if (topology->is_thissystem) hwloc_add_uname_info(topology); return 1; }
static int hwloc_look_darwin(struct hwloc_backend *backend) { struct hwloc_topology *topology = backend->topology; int64_t _nprocs; unsigned nprocs; int64_t _npackages; unsigned i, j, cpu; struct hwloc_obj *obj; size_t size; int64_t l1dcachesize, l1icachesize; int64_t cacheways[2]; int64_t l2cachesize; int64_t cachelinesize; int64_t memsize; char cpumodel[64]; if (topology->levels[0][0]->cpuset) /* somebody discovered things */ return -1; hwloc_alloc_obj_cpusets(topology->levels[0][0]); if (hwloc_get_sysctlbyname("hw.ncpu", &_nprocs) || _nprocs <= 0) return -1; nprocs = _nprocs; topology->support.discovery->pu = 1; hwloc_debug("%u procs\n", nprocs); size = sizeof(cpumodel); if (sysctlbyname("machdep.cpu.brand_string", cpumodel, &size, NULL, 0)) cpumodel[0] = '\0'; if (!hwloc_get_sysctlbyname("hw.packages", &_npackages) && _npackages > 0) { unsigned npackages = _npackages; int64_t _cores_per_package; int64_t _logical_per_package; unsigned logical_per_package; hwloc_debug("%u packages\n", npackages); if (!hwloc_get_sysctlbyname("machdep.cpu.logical_per_package", &_logical_per_package) && _logical_per_package > 0) logical_per_package = _logical_per_package; else /* Assume the trivia. */ logical_per_package = nprocs / npackages; hwloc_debug("%u threads per package\n", logical_per_package); if (nprocs == npackages * logical_per_package && hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_PACKAGE)) for (i = 0; i < npackages; i++) { obj = hwloc_alloc_setup_object(HWLOC_OBJ_PACKAGE, i); obj->cpuset = hwloc_bitmap_alloc(); for (cpu = i*logical_per_package; cpu < (i+1)*logical_per_package; cpu++) hwloc_bitmap_set(obj->cpuset, cpu); hwloc_debug_1arg_bitmap("package %u has cpuset %s\n", i, obj->cpuset); if (cpumodel[0] != '\0') hwloc_obj_add_info(obj, "CPUModel", cpumodel); hwloc_insert_object_by_cpuset(topology, obj); } else if (cpumodel[0] != '\0') hwloc_obj_add_info(topology->levels[0][0], "CPUModel", cpumodel); if (!hwloc_get_sysctlbyname("machdep.cpu.cores_per_package", &_cores_per_package) && _cores_per_package > 0 && hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_CORE)) { unsigned cores_per_package = _cores_per_package; hwloc_debug("%u cores per package\n", cores_per_package); if (!(logical_per_package % cores_per_package)) for (i = 0; i < npackages * cores_per_package; i++) { obj = hwloc_alloc_setup_object(HWLOC_OBJ_CORE, i); obj->cpuset = hwloc_bitmap_alloc(); for (cpu = i*(logical_per_package/cores_per_package); cpu < (i+1)*(logical_per_package/cores_per_package); cpu++) hwloc_bitmap_set(obj->cpuset, cpu); hwloc_debug_1arg_bitmap("core %u has cpuset %s\n", i, obj->cpuset); hwloc_insert_object_by_cpuset(topology, obj); } } } else if (cpumodel[0] != '\0') hwloc_obj_add_info(topology->levels[0][0], "CPUModel", cpumodel); if (hwloc_get_sysctlbyname("hw.l1dcachesize", &l1dcachesize)) l1dcachesize = 0; if (hwloc_get_sysctlbyname("hw.l1icachesize", &l1icachesize)) l1icachesize = 0; if (hwloc_get_sysctlbyname("hw.l2cachesize", &l2cachesize)) l2cachesize = 0; if (hwloc_get_sysctlbyname("machdep.cpu.cache.L1_associativity", &cacheways[0])) cacheways[0] = 0; else if (cacheways[0] == 0xff) cacheways[0] = -1; if (hwloc_get_sysctlbyname("machdep.cpu.cache.L2_associativity", &cacheways[1])) cacheways[1] = 0; else if (cacheways[1] == 0xff) cacheways[1] = -1; if (hwloc_get_sysctlbyname("hw.cachelinesize", &cachelinesize)) cachelinesize = 0; if (hwloc_get_sysctlbyname("hw.memsize", &memsize)) memsize = 0; if (!sysctlbyname("hw.cacheconfig", NULL, &size, NULL, 0)) { unsigned n = size / sizeof(uint32_t); uint64_t *cacheconfig = NULL; uint64_t *cachesize = NULL; uint32_t *cacheconfig32 = NULL; cacheconfig = malloc(sizeof(uint64_t) * n); if (NULL == cacheconfig) { goto out; } cachesize = malloc(sizeof(uint64_t) * n); if (NULL == cachesize) { goto out; } cacheconfig32 = malloc(sizeof(uint32_t) * n); if (NULL == cacheconfig32) { goto out; } if ((!sysctlbyname("hw.cacheconfig", cacheconfig, &size, NULL, 0))) { /* Yeech. Darwin seemingly has changed from 32bit to 64bit integers for * cacheconfig, with apparently no way for detection. Assume the machine * won't have more than 4 billion cpus */ if (cacheconfig[0] > 0xFFFFFFFFUL) { memcpy(cacheconfig32, cacheconfig, size); for (i = 0 ; i < size / sizeof(uint32_t); i++) cacheconfig[i] = cacheconfig32[i]; } memset(cachesize, 0, sizeof(uint64_t) * n); size = sizeof(uint64_t) * n; if (sysctlbyname("hw.cachesize", cachesize, &size, NULL, 0)) { if (n > 0) cachesize[0] = memsize; if (n > 1) cachesize[1] = l1dcachesize; if (n > 2) cachesize[2] = l2cachesize; } hwloc_debug("%s", "caches"); for (i = 0; i < n && cacheconfig[i]; i++) hwloc_debug(" %"PRIu64"(%"PRIu64"kB)", cacheconfig[i], cachesize[i] / 1024); /* Now we know how many caches there are */ n = i; hwloc_debug("\n%u cache levels\n", n - 1); /* For each cache level (0 is memory) */ for (i = 0; i < n; i++) { /* cacheconfig tells us how many cpus share it, let's iterate on each cache */ for (j = 0; j < (nprocs / cacheconfig[i]); j++) { if (!i) { obj = hwloc_alloc_setup_object(HWLOC_OBJ_NUMANODE, j); obj->nodeset = hwloc_bitmap_alloc(); hwloc_bitmap_set(obj->nodeset, j); } else { obj = hwloc_alloc_setup_object(HWLOC_OBJ_L1CACHE+i-1, -1); } obj->cpuset = hwloc_bitmap_alloc(); for (cpu = j*cacheconfig[i]; cpu < ((j+1)*cacheconfig[i]); cpu++) hwloc_bitmap_set(obj->cpuset, cpu); if (i == 1 && l1icachesize && hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_L1ICACHE)) { /* FIXME assuming that L1i and L1d are shared the same way. Darwin * does not yet provide a way to know. */ hwloc_obj_t l1i = hwloc_alloc_setup_object(HWLOC_OBJ_L1ICACHE, -1); l1i->cpuset = hwloc_bitmap_dup(obj->cpuset); hwloc_debug_1arg_bitmap("L1icache %u has cpuset %s\n", j, l1i->cpuset); l1i->attr->cache.depth = i; l1i->attr->cache.size = l1icachesize; l1i->attr->cache.linesize = cachelinesize; l1i->attr->cache.associativity = 0; l1i->attr->cache.type = HWLOC_OBJ_CACHE_INSTRUCTION; hwloc_insert_object_by_cpuset(topology, l1i); } if (i) { hwloc_debug_2args_bitmap("L%ucache %u has cpuset %s\n", i, j, obj->cpuset); obj->attr->cache.depth = i; obj->attr->cache.size = cachesize[i]; obj->attr->cache.linesize = cachelinesize; if (i <= sizeof(cacheways) / sizeof(cacheways[0])) obj->attr->cache.associativity = cacheways[i-1]; else obj->attr->cache.associativity = 0; if (i == 1 && l1icachesize) obj->attr->cache.type = HWLOC_OBJ_CACHE_DATA; else obj->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED; } else { hwloc_debug_1arg_bitmap("node %u has cpuset %s\n", j, obj->cpuset); obj->memory.local_memory = cachesize[i]; obj->memory.page_types_len = 2; obj->memory.page_types = malloc(2*sizeof(*obj->memory.page_types)); memset(obj->memory.page_types, 0, 2*sizeof(*obj->memory.page_types)); obj->memory.page_types[0].size = hwloc_getpagesize(); #ifdef HAVE__SC_LARGE_PAGESIZE obj->memory.page_types[1].size = sysconf(_SC_LARGE_PAGESIZE); #endif } if (hwloc_filter_check_keep_object_type(topology, obj->type)) hwloc_insert_object_by_cpuset(topology, obj); else hwloc_free_unlinked_object(obj); /* FIXME: don't built at all, just build the cpuset in case l1i needs it */ } } } out: free(cacheconfig); free(cachesize); free(cacheconfig32); } /* add PU objects */ hwloc_setup_pu_level(topology, nprocs); hwloc_obj_add_info(topology->levels[0][0], "Backend", "Darwin"); hwloc_add_uname_info(topology, NULL); return 0; }
{ size_t *tma_length = tma->data; *tma_length += (length + HWLOC_SHMEM_MALLOC_ALIGN - 1) & ~(HWLOC_SHMEM_MALLOC_ALIGN - 1); return malloc(length); } int hwloc_shmem_topology_get_length(hwloc_topology_t topology, size_t *lengthp, unsigned long flags) { hwloc_topology_t new; struct hwloc_tma tma; size_t length = 0; unsigned long pagesize = hwloc_getpagesize(); /* round-up to full page for mmap() */ int err; if (flags) { errno = EINVAL; return -1; } tma.malloc = tma_get_length_malloc; tma.dontfree = 0; tma.data = &length; err = hwloc__topology_dup(&new, topology, &tma); if (err < 0) return err; hwloc_topology_destroy(new);
static void browse(struct hwloc_topology *topology, lgrp_cookie_t cookie, lgrp_id_t lgrp, hwloc_obj_t *glob_lgrps, unsigned *curlgrp) { int n; hwloc_obj_t obj; lgrp_mem_size_t mem_size; n = lgrp_cpus(cookie, lgrp, NULL, 0, LGRP_CONTENT_HIERARCHY); if (n == -1) return; /* Is this lgrp a NUMA node? */ if ((mem_size = lgrp_mem_size(cookie, lgrp, LGRP_MEM_SZ_INSTALLED, LGRP_CONTENT_DIRECT)) > 0) { int i; processorid_t *cpuids; cpuids = malloc(sizeof(processorid_t) * n); assert(cpuids != NULL); obj = hwloc_alloc_setup_object(HWLOC_OBJ_NUMANODE, lgrp); obj->nodeset = hwloc_bitmap_alloc(); hwloc_bitmap_set(obj->nodeset, lgrp); obj->cpuset = hwloc_bitmap_alloc(); glob_lgrps[(*curlgrp)++] = obj; lgrp_cpus(cookie, lgrp, cpuids, n, LGRP_CONTENT_HIERARCHY); for (i = 0; i < n ; i++) { hwloc_debug("node %ld's cpu %d is %d\n", lgrp, i, cpuids[i]); hwloc_bitmap_set(obj->cpuset, cpuids[i]); } hwloc_debug_1arg_bitmap("node %ld has cpuset %s\n", lgrp, obj->cpuset); /* or LGRP_MEM_SZ_FREE */ hwloc_debug("node %ld has %lldkB\n", lgrp, mem_size/1024); obj->memory.local_memory = mem_size; obj->memory.page_types_len = 2; obj->memory.page_types = malloc(2*sizeof(*obj->memory.page_types)); memset(obj->memory.page_types, 0, 2*sizeof(*obj->memory.page_types)); obj->memory.page_types[0].size = hwloc_getpagesize(); #if HAVE_DECL__SC_LARGE_PAGESIZE obj->memory.page_types[1].size = sysconf(_SC_LARGE_PAGESIZE); #endif hwloc_insert_object_by_cpuset(topology, obj); free(cpuids); } n = lgrp_children(cookie, lgrp, NULL, 0); { lgrp_id_t *lgrps; int i; lgrps = malloc(sizeof(lgrp_id_t) * n); assert(lgrps != NULL); lgrp_children(cookie, lgrp, lgrps, n); hwloc_debug("lgrp %ld has %d children\n", lgrp, n); for (i = 0; i < n ; i++) { browse(topology, cookie, lgrps[i], glob_lgrps, curlgrp); } hwloc_debug("lgrp %ld's children done\n", lgrp); free(lgrps); } }
static void look_rset(int sdl, hwloc_obj_type_t type, struct hwloc_topology *topology, int level) { rsethandle_t rset, rad; int i,maxcpus,j; int nbnodes; struct hwloc_obj *obj; if ((topology->flags & HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM)) rset = rs_alloc(RS_ALL); else rset = rs_alloc(RS_PARTITION); rad = rs_alloc(RS_EMPTY); nbnodes = rs_numrads(rset, sdl, 0); if (nbnodes == -1) { perror("rs_numrads"); return; } for (i = 0; i < nbnodes; i++) { hwloc_bitmap_t cpuset; unsigned os_index = (unsigned) -1; /* no os_index except for PU and NUMANODE below */ if (rs_getrad(rset, rad, sdl, i, 0)) { fprintf(stderr,"rs_getrad(%d) failed: %s\n", i, strerror(errno)); continue; } if (!rs_getinfo(rad, R_NUMPROCS, 0)) continue; maxcpus = rs_getinfo(rad, R_MAXPROCS, 0); cpuset = hwloc_bitmap_alloc(); for (j = 0; j < maxcpus; j++) { if (rs_op(RS_TESTRESOURCE, rad, NULL, R_PROCS, j)) hwloc_bitmap_set(cpuset, j); } if (type == HWLOC_OBJ_PU) { os_index = hwloc_bitmap_first(cpuset); hwloc_debug("Found PU #%u inside node %d for sdl %d\n", os_index, i, sdl); assert(hwloc_bitmap_weight(cpuset) == 1); } else if (type == HWLOC_OBJ_NUMANODE) { /* NUMA node os_index isn't used for binding, just use the rad number to get unique values. * Note that we'll use that fact in hwloc_aix_prepare_membind(). */ os_index = i; hwloc_debug("Using os_index #%u for NUMA node inside node %d for sdl %d\n", os_index, i, sdl); } obj = hwloc_alloc_setup_object(type, os_index); obj->cpuset = cpuset; obj->os_level = sdl; switch(type) { case HWLOC_OBJ_NUMANODE: obj->nodeset = hwloc_bitmap_alloc(); hwloc_bitmap_set(obj->nodeset, i); obj->memory.local_memory = 0; /* TODO: odd, rs_getinfo(rad, R_MEMSIZE, 0) << 10 returns the total memory ... */ obj->memory.page_types_len = 2; obj->memory.page_types = malloc(2*sizeof(*obj->memory.page_types)); memset(obj->memory.page_types, 0, 2*sizeof(*obj->memory.page_types)); obj->memory.page_types[0].size = hwloc_getpagesize(); #ifdef HAVE__SC_LARGE_PAGESIZE obj->memory.page_types[1].size = sysconf(_SC_LARGE_PAGESIZE); #endif /* TODO: obj->memory.page_types[1].count = rs_getinfo(rset, R_LGPGFREE, 0) / hugepagesize */ break; case HWLOC_OBJ_CACHE: obj->attr->cache.size = _system_configuration.L2_cache_size; obj->attr->cache.associativity = _system_configuration.L2_cache_asc; obj->attr->cache.linesize = 0; /* unknown by default */ if (__power_pc()) if (__power_4() || __power_5() || __power_6() || __power_7()) obj->attr->cache.linesize = 128; obj->attr->cache.depth = 2; obj->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED; /* OK for power[4567], unknown for others */ break; case HWLOC_OBJ_GROUP: obj->attr->group.depth = level; break; case HWLOC_OBJ_CORE: { hwloc_obj_t obj2, obj3; obj2 = hwloc_alloc_setup_object(HWLOC_OBJ_CACHE, i); obj2->cpuset = hwloc_bitmap_dup(obj->cpuset); obj2->attr->cache.size = _system_configuration.dcache_size; obj2->attr->cache.associativity = _system_configuration.dcache_asc; obj2->attr->cache.linesize = _system_configuration.dcache_line; obj2->attr->cache.depth = 1; if (_system_configuration.cache_attrib & (1<<30)) { /* Unified cache */ obj2->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED; hwloc_debug("Adding an L1u cache for core %d\n", i); hwloc_insert_object_by_cpuset(topology, obj2); } else { /* Separate Instruction and Data caches */ obj2->attr->cache.type = HWLOC_OBJ_CACHE_DATA; hwloc_debug("Adding an L1d cache for core %d\n", i); hwloc_insert_object_by_cpuset(topology, obj2); obj3 = hwloc_alloc_setup_object(HWLOC_OBJ_CACHE, i); obj3->cpuset = hwloc_bitmap_dup(obj->cpuset); obj3->attr->cache.size = _system_configuration.icache_size; obj3->attr->cache.associativity = _system_configuration.icache_asc; obj3->attr->cache.linesize = _system_configuration.icache_line; obj3->attr->cache.depth = 1; obj3->attr->cache.type = HWLOC_OBJ_CACHE_INSTRUCTION; hwloc_debug("Adding an L1i cache for core %d\n", i); hwloc_insert_object_by_cpuset(topology, obj3); } break; } default: break; } hwloc_debug_2args_bitmap("%s %d has cpuset %s\n", hwloc_obj_type_string(type), i, obj->cpuset); hwloc_insert_object_by_cpuset(topology, obj); } rs_free(rset); rs_free(rad); }
static int hwloc_look_darwin(struct hwloc_backend *backend) { struct hwloc_topology *topology = backend->topology; int64_t _nprocs; unsigned nprocs; int64_t _npackages; unsigned i, j, cpu; struct hwloc_obj *obj; size_t size; int64_t l1dcachesize, l1icachesize; int64_t cacheways[2]; int64_t l2cachesize; int64_t l3cachesize; int64_t cachelinesize; int64_t memsize; int64_t _tmp; char cpumodel[64]; char cpuvendor[64]; char cpufamilynumber[20], cpumodelnumber[20], cpustepping[20]; int gotnuma = 0; int gotnumamemory = 0; if (topology->levels[0][0]->cpuset) /* somebody discovered things */ return -1; hwloc_alloc_root_sets(topology->levels[0][0]); /* Don't use hwloc_fallback_nbprocessors() because it would return online cpus only, * while we need all cpus when computing logical_per_package, etc below. * We don't know which CPUs are offline, but Darwin doesn't support binding anyway. * * TODO: try hw.logicalcpu_max */ if (hwloc_get_sysctlbyname("hw.logicalcpu", &_nprocs) || _nprocs <= 0) /* fallback to deprecated way */ if (hwloc_get_sysctlbyname("hw.ncpu", &_nprocs) || _nprocs <= 0) return -1; nprocs = _nprocs; topology->support.discovery->pu = 1; hwloc_debug("%u procs\n", nprocs); size = sizeof(cpuvendor); if (sysctlbyname("machdep.cpu.vendor", cpuvendor, &size, NULL, 0)) cpuvendor[0] = '\0'; size = sizeof(cpumodel); if (sysctlbyname("machdep.cpu.brand_string", cpumodel, &size, NULL, 0)) cpumodel[0] = '\0'; if (hwloc_get_sysctlbyname("machdep.cpu.family", &_tmp)) cpufamilynumber[0] = '\0'; else snprintf(cpufamilynumber, sizeof(cpufamilynumber), "%lld", (long long) _tmp); if (hwloc_get_sysctlbyname("machdep.cpu.model", &_tmp)) cpumodelnumber[0] = '\0'; else snprintf(cpumodelnumber, sizeof(cpumodelnumber), "%lld", (long long) _tmp); /* .extfamily and .extmodel are already added to .family and .model */ if (hwloc_get_sysctlbyname("machdep.cpu.stepping", &_tmp)) cpustepping[0] = '\0'; else snprintf(cpustepping, sizeof(cpustepping), "%lld", (long long) _tmp); if (!hwloc_get_sysctlbyname("hw.packages", &_npackages) && _npackages > 0) { unsigned npackages = _npackages; int64_t _cores_per_package; unsigned cores_per_package; int64_t _logical_per_package; unsigned logical_per_package; hwloc_debug("%u packages\n", npackages); if (!hwloc_get_sysctlbyname("machdep.cpu.thread_count", &_logical_per_package) && _logical_per_package > 0) /* official/modern way */ logical_per_package = _logical_per_package; else if (!hwloc_get_sysctlbyname("machdep.cpu.logical_per_package", &_logical_per_package) && _logical_per_package > 0) /* old way, gives the max supported by this "kind" of processor, * can be larger than the actual number for this model. */ logical_per_package = _logical_per_package; else /* Assume the trivia. */ logical_per_package = nprocs / npackages; hwloc_debug("%u threads per package\n", logical_per_package); if (nprocs == npackages * logical_per_package && hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_PACKAGE)) for (i = 0; i < npackages; i++) { obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_PACKAGE, i); obj->cpuset = hwloc_bitmap_alloc(); for (cpu = i*logical_per_package; cpu < (i+1)*logical_per_package; cpu++) hwloc_bitmap_set(obj->cpuset, cpu); hwloc_debug_1arg_bitmap("package %u has cpuset %s\n", i, obj->cpuset); if (cpuvendor[0] != '\0') hwloc_obj_add_info(obj, "CPUVendor", cpuvendor); if (cpumodel[0] != '\0') hwloc_obj_add_info(obj, "CPUModel", cpumodel); if (cpufamilynumber[0] != '\0') hwloc_obj_add_info(obj, "CPUFamilyNumber", cpufamilynumber); if (cpumodelnumber[0] != '\0') hwloc_obj_add_info(obj, "CPUModelNumber", cpumodelnumber); if (cpustepping[0] != '\0') hwloc_obj_add_info(obj, "CPUStepping", cpustepping); hwloc_insert_object_by_cpuset(topology, obj); } else { if (cpuvendor[0] != '\0') hwloc_obj_add_info(topology->levels[0][0], "CPUVendor", cpuvendor); if (cpumodel[0] != '\0') hwloc_obj_add_info(topology->levels[0][0], "CPUModel", cpumodel); if (cpufamilynumber[0] != '\0') hwloc_obj_add_info(topology->levels[0][0], "CPUFamilyNumber", cpufamilynumber); if (cpumodelnumber[0] != '\0') hwloc_obj_add_info(topology->levels[0][0], "CPUModelNumber", cpumodelnumber); if (cpustepping[0] != '\0') hwloc_obj_add_info(topology->levels[0][0], "CPUStepping", cpustepping); } if (!hwloc_get_sysctlbyname("machdep.cpu.core_count", &_cores_per_package) && _cores_per_package > 0) /* official/modern way */ cores_per_package = _cores_per_package; else if (!hwloc_get_sysctlbyname("machdep.cpu.cores_per_package", &_cores_per_package) && _cores_per_package > 0) /* old way, gives the max supported by this "kind" of processor, * can be larger than the actual number for this model. */ cores_per_package = _cores_per_package; else /* no idea */ cores_per_package = 0; if (cores_per_package > 0 && hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_CORE)) { hwloc_debug("%u cores per package\n", cores_per_package); if (!(logical_per_package % cores_per_package)) for (i = 0; i < npackages * cores_per_package; i++) { obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_CORE, i); obj->cpuset = hwloc_bitmap_alloc(); for (cpu = i*(logical_per_package/cores_per_package); cpu < (i+1)*(logical_per_package/cores_per_package); cpu++) hwloc_bitmap_set(obj->cpuset, cpu); hwloc_debug_1arg_bitmap("core %u has cpuset %s\n", i, obj->cpuset); hwloc_insert_object_by_cpuset(topology, obj); } } } else { if (cpuvendor[0] != '\0') hwloc_obj_add_info(topology->levels[0][0], "CPUVendor", cpuvendor); if (cpumodel[0] != '\0') hwloc_obj_add_info(topology->levels[0][0], "CPUModel", cpumodel); if (cpufamilynumber[0] != '\0') hwloc_obj_add_info(topology->levels[0][0], "CPUFamilyNumber", cpufamilynumber); if (cpumodelnumber[0] != '\0') hwloc_obj_add_info(topology->levels[0][0], "CPUModelNumber", cpumodelnumber); if (cpustepping[0] != '\0') hwloc_obj_add_info(topology->levels[0][0], "CPUStepping", cpustepping); } if (hwloc_get_sysctlbyname("hw.l1dcachesize", &l1dcachesize)) l1dcachesize = 0; if (hwloc_get_sysctlbyname("hw.l1icachesize", &l1icachesize)) l1icachesize = 0; if (hwloc_get_sysctlbyname("hw.l2cachesize", &l2cachesize)) l2cachesize = 0; if (hwloc_get_sysctlbyname("hw.l3cachesize", &l3cachesize)) l3cachesize = 0; if (hwloc_get_sysctlbyname("machdep.cpu.cache.L1_associativity", &cacheways[0])) cacheways[0] = 0; else if (cacheways[0] == 0xff) cacheways[0] = -1; if (hwloc_get_sysctlbyname("machdep.cpu.cache.L2_associativity", &cacheways[1])) cacheways[1] = 0; else if (cacheways[1] == 0xff) cacheways[1] = -1; if (hwloc_get_sysctlbyname("hw.cachelinesize", &cachelinesize)) cachelinesize = 0; if (hwloc_get_sysctlbyname("hw.memsize", &memsize)) memsize = 0; if (!sysctlbyname("hw.cacheconfig", NULL, &size, NULL, 0)) { unsigned n = size / sizeof(uint32_t); uint64_t cacheconfig[n]; uint64_t cachesize[n]; uint32_t cacheconfig32[n]; if ((!sysctlbyname("hw.cacheconfig", cacheconfig, &size, NULL, 0))) { /* Yeech. Darwin seemingly has changed from 32bit to 64bit integers for * cacheconfig, with apparently no way for detection. Assume the machine * won't have more than 4 billion cpus */ if (cacheconfig[0] > 0xFFFFFFFFUL) { memcpy(cacheconfig32, cacheconfig, size); for (i = 0 ; i < size / sizeof(uint32_t); i++) cacheconfig[i] = cacheconfig32[i]; } memset(cachesize, 0, sizeof(uint64_t) * n); size = sizeof(uint64_t) * n; if (sysctlbyname("hw.cachesize", cachesize, &size, NULL, 0)) { if (n > 0) cachesize[0] = memsize; if (n > 1) cachesize[1] = l1dcachesize; if (n > 2) cachesize[2] = l2cachesize; if (n > 3) cachesize[3] = l3cachesize; } hwloc_debug("%s", "caches"); for (i = 0; i < n && cacheconfig[i]; i++) hwloc_debug(" %"PRIu64"(%"PRIu64"kB)", cacheconfig[i], cachesize[i] / 1024); /* Now we know how many caches there are */ n = i; hwloc_debug("\n%u cache levels\n", n - 1); /* For each cache level (0 is memory) */ for (i = 0; i < n; i++) { /* cacheconfig tells us how many cpus share it, let's iterate on each cache */ for (j = 0; j < (nprocs / cacheconfig[i]); j++) { if (!i) { obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_NUMANODE, j); obj->nodeset = hwloc_bitmap_alloc(); hwloc_bitmap_set(obj->nodeset, j); gotnuma++; } else { obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_L1CACHE+i-1, HWLOC_UNKNOWN_INDEX); } obj->cpuset = hwloc_bitmap_alloc(); for (cpu = j*cacheconfig[i]; cpu < ((j+1)*cacheconfig[i]); cpu++) hwloc_bitmap_set(obj->cpuset, cpu); if (i == 1 && l1icachesize && hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_L1ICACHE)) { /* FIXME assuming that L1i and L1d are shared the same way. Darwin * does not yet provide a way to know. */ hwloc_obj_t l1i = hwloc_alloc_setup_object(topology, HWLOC_OBJ_L1ICACHE, HWLOC_UNKNOWN_INDEX); l1i->cpuset = hwloc_bitmap_dup(obj->cpuset); hwloc_debug_1arg_bitmap("L1icache %u has cpuset %s\n", j, l1i->cpuset); l1i->attr->cache.depth = i; l1i->attr->cache.size = l1icachesize; l1i->attr->cache.linesize = cachelinesize; l1i->attr->cache.associativity = 0; l1i->attr->cache.type = HWLOC_OBJ_CACHE_INSTRUCTION; hwloc_insert_object_by_cpuset(topology, l1i); } if (i) { hwloc_debug_2args_bitmap("L%ucache %u has cpuset %s\n", i, j, obj->cpuset); obj->attr->cache.depth = i; obj->attr->cache.size = cachesize[i]; obj->attr->cache.linesize = cachelinesize; if (i <= sizeof(cacheways) / sizeof(cacheways[0])) obj->attr->cache.associativity = cacheways[i-1]; else obj->attr->cache.associativity = 0; if (i == 1 && l1icachesize) obj->attr->cache.type = HWLOC_OBJ_CACHE_DATA; else obj->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED; } else { hwloc_debug_1arg_bitmap("node %u has cpuset %s\n", j, obj->cpuset); if (cachesize[i]) { obj->attr->numanode.local_memory = cachesize[i]; gotnumamemory++; } obj->attr->numanode.page_types_len = 2; obj->attr->numanode.page_types = malloc(2*sizeof(*obj->attr->numanode.page_types)); memset(obj->attr->numanode.page_types, 0, 2*sizeof(*obj->attr->numanode.page_types)); obj->attr->numanode.page_types[0].size = hwloc_getpagesize(); #if HAVE_DECL__SC_LARGE_PAGESIZE obj->attr->numanode.page_types[1].size = sysconf(_SC_LARGE_PAGESIZE); #endif } if (hwloc_filter_check_keep_object_type(topology, obj->type)) hwloc_insert_object_by_cpuset(topology, obj); else hwloc_free_unlinked_object(obj); /* FIXME: don't built at all, just build the cpuset in case l1i needs it */ } } } } if (gotnuma) topology->support.discovery->numa = 1; if (gotnumamemory) topology->support.discovery->numa = 1; /* add PU objects */ hwloc_setup_pu_level(topology, nprocs); hwloc_obj_add_info(topology->levels[0][0], "Backend", "Darwin"); hwloc_add_uname_info(topology, NULL); return 0; }