static int hwloc_look_synthetic(struct hwloc_backend *backend) { struct hwloc_topology *topology = backend->topology; struct hwloc_synthetic_backend_data_s *data = backend->private_data; hwloc_bitmap_t cpuset = hwloc_bitmap_alloc(); unsigned i; assert(!topology->levels[0][0]->cpuset); hwloc_alloc_obj_cpusets(topology->levels[0][0]); topology->support.discovery->pu = 1; /* start with os_index 0 for each level */ for (i = 0; data->level[i].arity > 0; i++) data->level[i].next_os_index = 0; /* ... including the last one */ data->level[i].next_os_index = 0; /* update first level type according to the synthetic type array */ topology->levels[0][0]->type = data->level[0].type; hwloc_synthetic__post_look_hooks(&data->level[0], topology->levels[0][0]); for (i = 0; i < data->level[0].arity; i++) hwloc__look_synthetic(topology, data, 1, cpuset); hwloc_bitmap_free(cpuset); hwloc_obj_add_info(topology->levels[0][0], "Backend", "Synthetic"); hwloc_obj_add_info(topology->levels[0][0], "SyntheticDescription", data->string); return 1; }
void hwloc_look_synthetic(struct hwloc_topology *topology) { hwloc_bitmap_t cpuset = hwloc_bitmap_alloc(); unsigned first_cpu = 0, i; topology->support.discovery->pu = 1; /* start with id=0 for each level */ for (i = 0; topology->backend_params.synthetic.arity[i] > 0; i++) topology->backend_params.synthetic.id[i] = 0; /* ... including the last one */ topology->backend_params.synthetic.id[i] = 0; /* update first level type according to the synthetic type array */ topology->levels[0][0]->type = topology->backend_params.synthetic.type[0]; for (i = 0; i < topology->backend_params.synthetic.arity[0]; i++) first_cpu = hwloc__look_synthetic(topology, 1, first_cpu, cpuset); hwloc_bitmap_free(cpuset); hwloc_obj_add_info(topology->levels[0][0], "Backend", "Synthetic"); hwloc_obj_add_info(topology->levels[0][0], "SyntheticDescription", topology->backend_params.synthetic.string); }
/* * Recursively build objects whose cpu start at first_cpu * - level gives where to look in the type, arity and id arrays * - the id array is used as a variable to get unique IDs for a given level. * - generated memory should be added to *memory_kB. * - generated cpus should be added to parent_cpuset. * - next cpu number to be used should be returned. */ static void hwloc__look_synthetic(struct hwloc_topology *topology, struct hwloc_synthetic_backend_data_s *data, int level, hwloc_bitmap_t parent_cpuset) { hwloc_obj_t obj; unsigned i; struct hwloc_synthetic_level_data_s *curlevel = &data->level[level]; hwloc_obj_type_t type = curlevel->type; unsigned os_index; /* pre-hooks */ switch (type) { case HWLOC_OBJ_GROUP: break; case HWLOC_OBJ_MACHINE: break; case HWLOC_OBJ_NUMANODE: break; case HWLOC_OBJ_PACKAGE: break; case HWLOC_OBJ_CACHE: break; case HWLOC_OBJ_CORE: break; case HWLOC_OBJ_PU: break; case HWLOC_OBJ_SYSTEM: case HWLOC_OBJ_BRIDGE: case HWLOC_OBJ_PCI_DEVICE: case HWLOC_OBJ_OS_DEVICE: case HWLOC_OBJ_MISC: case HWLOC_OBJ_TYPE_MAX: /* Should never happen */ assert(0); break; } os_index = curlevel->next_os_index++; if (curlevel->index_array) os_index = curlevel->index_array[os_index]; obj = hwloc_alloc_setup_object(type, os_index); obj->cpuset = hwloc_bitmap_alloc(); if (!curlevel->arity) { hwloc_bitmap_set(obj->cpuset, os_index); } else { for (i = 0; i < curlevel->arity; i++) hwloc__look_synthetic(topology, data, level + 1, obj->cpuset); } if (type == HWLOC_OBJ_NUMANODE) { obj->nodeset = hwloc_bitmap_alloc(); hwloc_bitmap_set(obj->nodeset, os_index); } hwloc_bitmap_or(parent_cpuset, parent_cpuset, obj->cpuset); hwloc_synthetic__post_look_hooks(curlevel, obj); hwloc_insert_object_by_cpuset(topology, obj); }
/* * Recursively build objects whose cpu start at first_cpu * - level gives where to look in the type, arity and id arrays * - the id array is used as a variable to get unique IDs for a given level. * - generated memory should be added to *memory_kB. * - generated cpus should be added to parent_cpuset. * - next cpu number to be used should be returned. */ static unsigned hwloc__look_synthetic(struct hwloc_topology *topology, int level, unsigned first_cpu, hwloc_bitmap_t parent_cpuset) { hwloc_obj_t obj; unsigned i; hwloc_obj_type_t type = topology->backend_params.synthetic.type[level]; /* pre-hooks */ switch (type) { case HWLOC_OBJ_MISC: break; case HWLOC_OBJ_GROUP: break; case HWLOC_OBJ_SYSTEM: case HWLOC_OBJ_BRIDGE: case HWLOC_OBJ_PCI_DEVICE: case HWLOC_OBJ_OS_DEVICE: /* Shouldn't happen. */ abort(); break; case HWLOC_OBJ_MACHINE: break; case HWLOC_OBJ_NODE: break; case HWLOC_OBJ_SOCKET: break; case HWLOC_OBJ_CACHE: break; case HWLOC_OBJ_CORE: break; case HWLOC_OBJ_PU: break; case HWLOC_OBJ_TYPE_MAX: /* Should never happen */ assert(0); break; } obj = hwloc_alloc_setup_object(type, topology->backend_params.synthetic.id[level]++); obj->cpuset = hwloc_bitmap_alloc(); if (!topology->backend_params.synthetic.arity[level]) { hwloc_bitmap_set(obj->cpuset, first_cpu++); } else { for (i = 0; i < topology->backend_params.synthetic.arity[level]; i++) first_cpu = hwloc__look_synthetic(topology, level + 1, first_cpu, obj->cpuset); } if (type == HWLOC_OBJ_NODE) { obj->nodeset = hwloc_bitmap_alloc(); hwloc_bitmap_set(obj->nodeset, obj->os_index); } hwloc_bitmap_or(parent_cpuset, parent_cpuset, obj->cpuset); /* post-hooks */ switch (type) { case HWLOC_OBJ_MISC: break; case HWLOC_OBJ_GROUP: obj->attr->group.depth = topology->backend_params.synthetic.depth[level]; break; case HWLOC_OBJ_SYSTEM: case HWLOC_OBJ_BRIDGE: case HWLOC_OBJ_PCI_DEVICE: case HWLOC_OBJ_OS_DEVICE: abort(); break; case HWLOC_OBJ_MACHINE: break; case HWLOC_OBJ_NODE: /* 1GB in memory nodes, 256k 4k-pages. */ obj->memory.local_memory = 1024*1024*1024; obj->memory.page_types_len = 1; obj->memory.page_types = malloc(sizeof(*obj->memory.page_types)); memset(obj->memory.page_types, 0, sizeof(*obj->memory.page_types)); obj->memory.page_types[0].size = 4096; obj->memory.page_types[0].count = 256*1024; break; case HWLOC_OBJ_SOCKET: break; case HWLOC_OBJ_CACHE: obj->attr->cache.depth = topology->backend_params.synthetic.depth[level]; obj->attr->cache.linesize = 64; if (obj->attr->cache.depth == 1) /* 32Kb in L1 */ obj->attr->cache.size = 32*1024; else /* *4 at each level, starting from 1MB for L2 */ obj->attr->cache.size = 256*1024 << (2*obj->attr->cache.depth); break; case HWLOC_OBJ_CORE: break; case HWLOC_OBJ_PU: break; case HWLOC_OBJ_TYPE_MAX: /* Should never happen */ assert(0); break; } hwloc_insert_object_by_cpuset(topology, obj); return first_cpu; }