int get_max_objs_inside_cpuset_by_type(hwloc_topology_t topology, hwloc_cpuset_t cpuset, hwloc_obj_type_t type){ int depth = hwloc_get_type_depth(topology, type); if(depth == HWLOC_TYPE_DEPTH_UNKNOWN){ fprintf(stderr, "Cannot find depth %s\n", hwloc_type_name(type)); return -1; } if(depth == HWLOC_TYPE_DEPTH_MULTIPLE){ hwloc_obj_t deepest_of_type = hwloc_get_obj_inside_cpuset_by_type(topology, cpuset, HWLOC_OBJ_PU,0); while(deepest_of_type !=NULL && deepest_of_type->type != type) deepest_of_type = deepest_of_type->parent; if(deepest_of_type == NULL) return -1; else depth = deepest_of_type->depth; } return hwloc_get_nbobjs_inside_cpuset_by_depth(topology, cpuset, depth); }
int hmonitor_eventset_add_named_event(void * monitor_eventset, const char * event) { struct accumulate_eventset * set = (struct accumulate_eventset *) monitor_eventset; harray child_events = NULL; hwloc_obj_t obj; unsigned depth,index,i, n_events = 0; hmon m; if(event==NULL || monitor_eventset == NULL) return -1; /* Decend depth by depth to look if event exists */ int topo_depth = hwloc_topology_get_depth(hmon_topology); for(depth=set->location->depth; depth<topo_depth; depth++){ /* Traverse depth to find matching event */ for(index=0; index<hwloc_get_nbobjs_inside_cpuset_by_depth(hmon_topology, set->location->cpuset, depth); index++){ obj = hwloc_get_obj_inside_cpuset_by_depth(hmon_topology, set->location->cpuset, depth, index); child_events = obj->userdata; if(child_events != NULL){ /* Walk monitor list at this location to find matching event */ for(i=0; i<harray_length(child_events); i++){ m = harray_get(child_events,i); if(!strcmp(m->id, event)){ harray_push(set->child_events, m); n_events=m->n_samples; } } } } } if(n_events == 0){ /* Exit failure */ fprintf(stderr, "Unrecognized event name %s, expected defined monitor name, deeper than %s.\n", event, hwloc_type_name(set->location->type)); return -1; } return n_events; }
static int hwloc_look_windows(struct hwloc_backend *backend) { struct hwloc_topology *topology = backend->topology; hwloc_bitmap_t groups_pu_set = NULL; SYSTEM_INFO SystemInfo; DWORD length; if (topology->levels[0][0]->cpuset) /* somebody discovered things */ return -1; hwloc_alloc_obj_cpusets(topology->levels[0][0]); GetSystemInfo(&SystemInfo); if (!GetLogicalProcessorInformationExProc && GetLogicalProcessorInformationProc) { PSYSTEM_LOGICAL_PROCESSOR_INFORMATION procInfo, tmpprocInfo; unsigned id; unsigned i; struct hwloc_obj *obj; hwloc_obj_type_t type; length = 0; procInfo = NULL; while (1) { if (GetLogicalProcessorInformationProc(procInfo, &length)) break; if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) return -1; tmpprocInfo = realloc(procInfo, length); if (!tmpprocInfo) { free(procInfo); goto out; } procInfo = tmpprocInfo; } assert(!length || procInfo); for (i = 0; i < length / sizeof(*procInfo); i++) { /* Ignore unknown caches */ if (procInfo->Relationship == RelationCache && procInfo->Cache.Type != CacheUnified && procInfo->Cache.Type != CacheData && procInfo->Cache.Type != CacheInstruction) continue; id = -1; switch (procInfo[i].Relationship) { case RelationNumaNode: type = HWLOC_OBJ_NUMANODE; id = procInfo[i].NumaNode.NodeNumber; break; case RelationProcessorPackage: type = HWLOC_OBJ_PACKAGE; break; case RelationCache: type = (procInfo[i].Cache.Type == CacheInstruction ? HWLOC_OBJ_L1ICACHE : HWLOC_OBJ_L1CACHE) + procInfo[i].Cache.Level - 1; break; case RelationProcessorCore: type = HWLOC_OBJ_CORE; break; case RelationGroup: default: type = HWLOC_OBJ_GROUP; break; } if (!hwloc_filter_check_keep_object_type(topology, type)) continue; obj = hwloc_alloc_setup_object(topology, type, id); obj->cpuset = hwloc_bitmap_alloc(); hwloc_debug("%s#%u mask %lx\n", hwloc_type_name(type), id, procInfo[i].ProcessorMask); /* ProcessorMask is a ULONG_PTR */ hwloc_bitmap_set_ith_ULONG_PTR(obj->cpuset, 0, procInfo[i].ProcessorMask); hwloc_debug_2args_bitmap("%s#%u bitmap %s\n", hwloc_type_name(type), id, obj->cpuset); switch (type) { case HWLOC_OBJ_NUMANODE: { ULONGLONG avail; obj->nodeset = hwloc_bitmap_alloc(); hwloc_bitmap_set(obj->nodeset, id); if ((GetNumaAvailableMemoryNodeExProc && GetNumaAvailableMemoryNodeExProc(id, &avail)) || (GetNumaAvailableMemoryNodeProc && GetNumaAvailableMemoryNodeProc(id, &avail))) obj->memory.local_memory = avail; obj->memory.page_types_len = 2; obj->memory.page_types = malloc(2 * sizeof(*obj->memory.page_types)); memset(obj->memory.page_types, 0, 2 * sizeof(*obj->memory.page_types)); obj->memory.page_types_len = 1; obj->memory.page_types[0].size = SystemInfo.dwPageSize; #if HAVE_DECL__SC_LARGE_PAGESIZE obj->memory.page_types_len++; obj->memory.page_types[1].size = sysconf(_SC_LARGE_PAGESIZE); #endif break; } case HWLOC_OBJ_L1CACHE: case HWLOC_OBJ_L2CACHE: case HWLOC_OBJ_L3CACHE: case HWLOC_OBJ_L4CACHE: case HWLOC_OBJ_L5CACHE: case HWLOC_OBJ_L1ICACHE: case HWLOC_OBJ_L2ICACHE: case HWLOC_OBJ_L3ICACHE: obj->attr->cache.size = procInfo[i].Cache.Size; obj->attr->cache.associativity = procInfo[i].Cache.Associativity == CACHE_FULLY_ASSOCIATIVE ? -1 : procInfo[i].Cache.Associativity ; obj->attr->cache.linesize = procInfo[i].Cache.LineSize; obj->attr->cache.depth = procInfo[i].Cache.Level; switch (procInfo->Cache.Type) { case CacheUnified: obj->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED; break; case CacheData: obj->attr->cache.type = HWLOC_OBJ_CACHE_DATA; break; case CacheInstruction: obj->attr->cache.type = HWLOC_OBJ_CACHE_INSTRUCTION; break; default: hwloc_free_unlinked_object(obj); continue; } break; case HWLOC_OBJ_GROUP: obj->attr->group.kind = procInfo[i].Relationship == RelationGroup ? HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP : HWLOC_GROUP_KIND_WINDOWS_RELATIONSHIP_UNKNOWN; break; default: break; } hwloc_insert_object_by_cpuset(topology, obj); } free(procInfo); } if (GetLogicalProcessorInformationExProc) { PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX procInfoTotal, tmpprocInfoTotal, procInfo; unsigned id; struct hwloc_obj *obj; hwloc_obj_type_t type; length = 0; procInfoTotal = NULL; while (1) { if (GetLogicalProcessorInformationExProc(RelationAll, procInfoTotal, &length)) break; if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) return -1; tmpprocInfoTotal = realloc(procInfoTotal, length); if (!tmpprocInfoTotal) { free(procInfoTotal); goto out; } procInfoTotal = tmpprocInfoTotal; } for (procInfo = procInfoTotal; (void*) procInfo < (void*) ((uintptr_t) procInfoTotal + length); procInfo = (void*) ((uintptr_t) procInfo + procInfo->Size)) { unsigned num, i; GROUP_AFFINITY *GroupMask; /* Ignore unknown caches */ if (procInfo->Relationship == RelationCache && procInfo->Cache.Type != CacheUnified && procInfo->Cache.Type != CacheData && procInfo->Cache.Type != CacheInstruction) continue; id = -1; switch (procInfo->Relationship) { case RelationNumaNode: type = HWLOC_OBJ_NUMANODE; num = 1; GroupMask = &procInfo->NumaNode.GroupMask; id = procInfo->NumaNode.NodeNumber; break; case RelationProcessorPackage: type = HWLOC_OBJ_PACKAGE; num = procInfo->Processor.GroupCount; GroupMask = procInfo->Processor.GroupMask; break; case RelationCache: type = (procInfo->Cache.Type == CacheInstruction ? HWLOC_OBJ_L1ICACHE : HWLOC_OBJ_L1CACHE) + procInfo->Cache.Level - 1; num = 1; GroupMask = &procInfo->Cache.GroupMask; break; case RelationProcessorCore: type = HWLOC_OBJ_CORE; num = procInfo->Processor.GroupCount; GroupMask = procInfo->Processor.GroupMask; break; case RelationGroup: /* So strange an interface... */ for (id = 0; id < procInfo->Group.ActiveGroupCount; id++) { KAFFINITY mask; hwloc_bitmap_t set; set = hwloc_bitmap_alloc(); mask = procInfo->Group.GroupInfo[id].ActiveProcessorMask; hwloc_debug("group %u %d cpus mask %lx\n", id, procInfo->Group.GroupInfo[id].ActiveProcessorCount, mask); /* KAFFINITY is ULONG_PTR */ hwloc_bitmap_set_ith_ULONG_PTR(set, id, mask); /* FIXME: what if running 32bits on a 64bits windows with 64-processor groups? * ULONG_PTR is 32bits, so half the group is invisible? * maybe scale id to id*8/sizeof(ULONG_PTR) so that groups are 64-PU aligned? */ hwloc_debug_2args_bitmap("group %u %d bitmap %s\n", id, procInfo->Group.GroupInfo[id].ActiveProcessorCount, set); /* save the set of PUs so that we can create them at the end */ if (!groups_pu_set) groups_pu_set = hwloc_bitmap_alloc(); hwloc_bitmap_or(groups_pu_set, groups_pu_set, set); if (hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_GROUP)) { obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, id); obj->cpuset = set; obj->attr->group.kind = HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP; hwloc_insert_object_by_cpuset(topology, obj); } else hwloc_bitmap_free(set); } continue; default: /* Don't know how to get the mask. */ hwloc_debug("unknown relation %d\n", procInfo->Relationship); continue; } if (!hwloc_filter_check_keep_object_type(topology, type)) continue; obj = hwloc_alloc_setup_object(topology, type, id); obj->cpuset = hwloc_bitmap_alloc(); for (i = 0; i < num; i++) { hwloc_debug("%s#%u %d: mask %d:%lx\n", hwloc_type_name(type), id, i, GroupMask[i].Group, GroupMask[i].Mask); /* GROUP_AFFINITY.Mask is KAFFINITY, which is ULONG_PTR */ hwloc_bitmap_set_ith_ULONG_PTR(obj->cpuset, GroupMask[i].Group, GroupMask[i].Mask); /* FIXME: scale id to id*8/sizeof(ULONG_PTR) as above? */ } hwloc_debug_2args_bitmap("%s#%u bitmap %s\n", hwloc_type_name(type), id, obj->cpuset); switch (type) { case HWLOC_OBJ_NUMANODE: { ULONGLONG avail; obj->nodeset = hwloc_bitmap_alloc(); hwloc_bitmap_set(obj->nodeset, id); if ((GetNumaAvailableMemoryNodeExProc && GetNumaAvailableMemoryNodeExProc(id, &avail)) || (GetNumaAvailableMemoryNodeProc && GetNumaAvailableMemoryNodeProc(id, &avail))) obj->memory.local_memory = avail; obj->memory.page_types = malloc(2 * sizeof(*obj->memory.page_types)); memset(obj->memory.page_types, 0, 2 * sizeof(*obj->memory.page_types)); obj->memory.page_types_len = 1; obj->memory.page_types[0].size = SystemInfo.dwPageSize; #if HAVE_DECL__SC_LARGE_PAGESIZE obj->memory.page_types_len++; obj->memory.page_types[1].size = sysconf(_SC_LARGE_PAGESIZE); #endif break; } case HWLOC_OBJ_L1CACHE: case HWLOC_OBJ_L2CACHE: case HWLOC_OBJ_L3CACHE: case HWLOC_OBJ_L4CACHE: case HWLOC_OBJ_L5CACHE: case HWLOC_OBJ_L1ICACHE: case HWLOC_OBJ_L2ICACHE: case HWLOC_OBJ_L3ICACHE: obj->attr->cache.size = procInfo->Cache.CacheSize; obj->attr->cache.associativity = procInfo->Cache.Associativity == CACHE_FULLY_ASSOCIATIVE ? -1 : procInfo->Cache.Associativity ; obj->attr->cache.linesize = procInfo->Cache.LineSize; obj->attr->cache.depth = procInfo->Cache.Level; switch (procInfo->Cache.Type) { case CacheUnified: obj->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED; break; case CacheData: obj->attr->cache.type = HWLOC_OBJ_CACHE_DATA; break; case CacheInstruction: obj->attr->cache.type = HWLOC_OBJ_CACHE_INSTRUCTION; break; default: hwloc_free_unlinked_object(obj); continue; } break; default: break; } hwloc_insert_object_by_cpuset(topology, obj); } free(procInfoTotal); } if (groups_pu_set) { /* the system supports multiple Groups. * PU indexes may be discontiguous, especially if Groups contain less than 64 procs. */ hwloc_obj_t obj; unsigned idx; hwloc_bitmap_foreach_begin(idx, groups_pu_set) { obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_PU, idx); obj->cpuset = hwloc_bitmap_alloc(); hwloc_bitmap_only(obj->cpuset, idx); hwloc_debug_1arg_bitmap("cpu %u has cpuset %s\n", idx, obj->cpuset); hwloc_insert_object_by_cpuset(topology, obj); } hwloc_bitmap_foreach_end(); hwloc_bitmap_free(groups_pu_set); } else {
char * location_name(hwloc_obj_t obj){ char * ret = malloc(64); memset(ret,0,64); snprintf(ret, 63, "%s:%u", hwloc_type_name(obj->type), obj->logical_index); return ret; }