my_bool test_get_first_bit(MY_BITMAP *map, uint bitsize) { uint i, test_bit= 0; uint no_loops= bitsize > 128 ? 128 : bitsize; bitmap_set_all(map); for (i=0; i < bitsize; i++) bitmap_clear_bit(map, i); if (bitmap_get_first_set(map) != MY_BIT_NONE) goto error1; bitmap_clear_all(map); for (i=0; i < bitsize; i++) bitmap_set_bit(map, i); if (bitmap_get_first(map) != MY_BIT_NONE) goto error2; bitmap_clear_all(map); for (i=0; i < no_loops; i++) { test_bit=get_rand_bit(bitsize); bitmap_set_bit(map, test_bit); if (bitmap_get_first_set(map) != test_bit) goto error1; bitmap_set_all(map); bitmap_clear_bit(map, test_bit); if (bitmap_get_first(map) != test_bit) goto error2; bitmap_clear_all(map); } return FALSE; error1: diag("get_first_set error bitsize=%u,prefix_size=%u",bitsize,test_bit); return TRUE; error2: diag("get_first error bitsize= %u, prefix_size= %u",bitsize,test_bit); return TRUE; }
/** * \brief allocates size bytes of memory with the current NUMA policy. * * \param size size of the memory region in bytes * \param pagesize preferred page size to be used * \returns pointer to the mapped memory region * * The memory must be freed with numa_free(). On errors NULL is returned. */ void *numa_alloc(size_t size, size_t pagesize) { NUMA_DEBUG_ALLOC("allocate according to policy\n"); /* check if we use interleaved mode */ if (bitmap_get_weight(numa_alloc_interleave_mask)) { return numa_alloc_interleaved_subset(size, pagesize, numa_alloc_interleave_mask); } /* check membind */ if (bitmap_get_weight(numa_alloc_bind_mask) == 1) { nodeid_t node = (nodeid_t) bitmap_get_first(numa_alloc_bind_mask); return numa_alloc_onnode(size, node, pagesize); } /* TODO: * - handle the case where multiple nodes are set in membind */ /* just return some memory */ return malloc(size); }
/** * \brief allocates size bytes of memory page interleaved the nodes specified in * the nodemask. * * \param size size of the memory region in bytes * \param nodemask subset of nodes to consider for allocation * \param pagesize preferred page size to be used * * \returns pointer to the mapped memory region * * should only be used for large areas consisting of multiple pages. * The memory must be freed with numa_free(). On errors NULL is returned. */ void *numa_alloc_interleaved_subset(size_t size, size_t pagesize, struct bitmap *nodemask) { errval_t err; /* clear out invalid bits */ bitmap_clear_range(nodemask, numa_num_configured_nodes(), bitmap_get_nbits(nodemask)); /* get the number of nodes */ nodeid_t nodes = bitmap_get_weight(nodemask); if (nodes == 0) { return NULL; } NUMA_DEBUG_ALLOC("allocating interleaved using %" PRIuNODEID " nodes\n", nodes); assert(nodes <= numa_num_configured_nodes()); vregion_flags_t flags; validate_page_size(&pagesize, &flags); size_t stride = pagesize; size_t node_size = size / nodes; node_size = (node_size + pagesize - 1) & ~(pagesize - 1); /* update total size as this may change due to rounding of node sizes*/ size = nodes * node_size; /* * XXX: we may want to keep track of numa alloced frames */ struct memobj_numa *memobj = calloc(1, sizeof(struct memobj_numa)); err = memobj_create_numa(memobj, size, 0, numa_num_configured_nodes(), stride); if (err_is_fail(err)) { return NULL; } bitmap_bit_t node = bitmap_get_first(nodemask); nodeid_t node_idx=0; while(node != BITMAP_BIT_NONE) { struct capref frame; err = numa_frame_alloc_on_node(&frame, node_size, (nodeid_t)node, NULL); if (err_is_fail(err)) { DEBUG_ERR(err, "numa_frame_alloc_on_node"); goto out_err; } memobj->m.f.fill(&memobj->m, node_idx, frame, 0); ++node_idx; node = bitmap_get_next(nodemask, node); } struct vregion *vreg = calloc(1, sizeof(struct vregion)); if (vreg == NULL) { goto out_err; } err = vregion_map_aligned(vreg, get_current_vspace(), &memobj->m, 0, size, flags, pagesize); if (err_is_fail(err)) { DEBUG_ERR(err, "vregion_map_aligned"); goto out_err; } err = memobj->m.f.pagefault(&memobj->m, vreg, 0, 0); if (err_is_fail(err)) { vregion_destroy(vreg); free(vreg); DEBUG_ERR(err, "memobj.m.f.pagefault"); goto out_err; } // XXX - Is this right? return (void *)(uintptr_t)vregion_get_base_addr(vreg); out_err: for (int i = 0; i < node_idx; ++i) { struct capref frame; memobj->m.f.unfill(&memobj->m, node_idx, &frame, NULL); cap_delete(frame); } return NULL; }