int util_buf_pool_create_ex(struct util_buf_pool **buf_pool, size_t size, size_t alignment, size_t max_cnt, size_t chunk_cnt, util_buf_region_alloc_hndlr alloc_hndlr, util_buf_region_free_hndlr free_hndlr, void *pool_ctx) { size_t entry_sz; (*buf_pool) = calloc(1, sizeof(**buf_pool)); if (!*buf_pool) return -FI_ENOMEM; (*buf_pool)->alloc_hndlr = alloc_hndlr; (*buf_pool)->free_hndlr = free_hndlr; (*buf_pool)->data_sz = size; (*buf_pool)->alignment = alignment; (*buf_pool)->max_cnt = max_cnt; (*buf_pool)->chunk_cnt = chunk_cnt; (*buf_pool)->ctx = pool_ctx; entry_sz = util_buf_use_ftr(*buf_pool) ? (size + sizeof(struct util_buf_footer)) : size; (*buf_pool)->entry_sz = fi_get_aligned_sz(entry_sz, alignment); slist_init(&(*buf_pool)->buf_list); slist_init(&(*buf_pool)->region_list); if (util_buf_grow(*buf_pool)) { free(*buf_pool); return -FI_ENOMEM; } return FI_SUCCESS; }
struct util_buf_pool *util_buf_pool_create_ex(size_t size, size_t alignment, size_t max_cnt, size_t chunk_cnt, util_buf_region_alloc_hndlr alloc_hndlr, util_buf_region_free_hndlr free_hndlr) { size_t entry_sz; struct util_buf_pool *buf_pool; buf_pool = calloc(1, sizeof(*buf_pool)); if (!buf_pool) return NULL; buf_pool->alloc_hndlr = alloc_hndlr; buf_pool->free_hndlr = free_hndlr; buf_pool->data_sz = size; buf_pool->alignment = alignment; buf_pool->max_cnt = max_cnt; buf_pool->chunk_cnt = chunk_cnt; entry_sz = util_buf_use_ftr(buf_pool) ? (size + sizeof(struct util_buf_footer)) : size; buf_pool->entry_sz = fi_get_aligned_sz(entry_sz, alignment); slist_init(&buf_pool->buf_list); slist_init(&buf_pool->region_list); if (util_buf_grow(buf_pool)) { free(buf_pool); return NULL; } return buf_pool; }
int util_buf_pool_create_attr(struct util_buf_attr *attr, struct util_buf_pool **buf_pool) { size_t entry_sz; ssize_t hp_size; (*buf_pool) = calloc(1, sizeof(**buf_pool)); if (!*buf_pool) return -FI_ENOMEM; (*buf_pool)->attr = *attr; entry_sz = (attr->size + sizeof(struct util_buf_footer)); (*buf_pool)->entry_sz = fi_get_aligned_sz(entry_sz, attr->alignment); hp_size = ofi_get_hugepage_size(); if ((*buf_pool)->attr.chunk_cnt * (*buf_pool)->entry_sz < hp_size) (*buf_pool)->attr.is_mmap_region = 0; else (*buf_pool)->attr.is_mmap_region = 1; if (!(*buf_pool)->attr.indexing.ordered) slist_init(&(*buf_pool)->list.buffers); else dlist_init(&(*buf_pool)->list.regions); return FI_SUCCESS; }
int util_buf_grow(struct util_buf_pool *pool) { void *buf; int ret; size_t i; struct util_buf_region *buf_region; ssize_t hp_size; struct util_buf_footer *buf_ftr; if (pool->attr.max_cnt && pool->num_allocated >= pool->attr.max_cnt) { return -1; } buf_region = calloc(1, sizeof(*buf_region)); if (!buf_region) return -1; buf_region->pool = pool; dlist_init(&buf_region->buf_list); if (pool->attr.is_mmap_region) { hp_size = ofi_get_hugepage_size(); if (hp_size < 0) goto err1; buf_region->size = fi_get_aligned_sz(pool->attr.chunk_cnt * pool->entry_sz, hp_size); ret = ofi_alloc_hugepage_buf((void **)&buf_region->mem_region, buf_region->size); if (ret) { FI_DBG(&core_prov, FI_LOG_CORE, "Huge page allocation failed: %s\n", fi_strerror(-ret)); if (pool->num_allocated > 0) goto err1; pool->attr.is_mmap_region = 0; } } if (!pool->attr.is_mmap_region) { buf_region->size = pool->attr.chunk_cnt * pool->entry_sz; ret = ofi_memalign((void **)&buf_region->mem_region, pool->attr.alignment, buf_region->size); if (ret) goto err1; } if (pool->attr.alloc_hndlr) { ret = pool->attr.alloc_hndlr(pool->attr.ctx, buf_region->mem_region, buf_region->size, &buf_region->context); if (ret) goto err2; } if (!(pool->regions_cnt % UTIL_BUF_POOL_REGION_CHUNK_CNT)) { struct util_buf_region **new_table = realloc(pool->regions_table, (pool->regions_cnt + UTIL_BUF_POOL_REGION_CHUNK_CNT) * sizeof(*pool->regions_table)); if (!new_table) goto err3; pool->regions_table = new_table; } pool->regions_table[pool->regions_cnt] = buf_region; pool->regions_cnt++; for (i = 0; i < pool->attr.chunk_cnt; i++) { buf = (buf_region->mem_region + i * pool->entry_sz); buf_ftr = util_buf_get_ftr(pool, buf); if (pool->attr.init) { #if ENABLE_DEBUG if (!pool->attr.indexing.ordered) { buf_ftr->entry.slist.next = (void *) OFI_MAGIC_64; pool->attr.init(pool->attr.ctx, buf); assert(buf_ftr->entry.slist.next == (void *) OFI_MAGIC_64); } else { buf_ftr->entry.dlist.next = (void *) OFI_MAGIC_64; buf_ftr->entry.dlist.prev = (void *) OFI_MAGIC_64; pool->attr.init(pool->attr.ctx, buf); assert((buf_ftr->entry.dlist.next == (void *) OFI_MAGIC_64) && (buf_ftr->entry.dlist.prev == (void *) OFI_MAGIC_64)); } #else pool->attr.init(pool->attr.ctx, buf); #endif } buf_ftr->region = buf_region; buf_ftr->index = pool->num_allocated + i; if (!pool->attr.indexing.ordered) { slist_insert_tail(&buf_ftr->entry.slist, &pool->list.buffers); } else { dlist_insert_tail(&buf_ftr->entry.dlist, &buf_region->buf_list); } } if (pool->attr.indexing.ordered) { dlist_insert_tail(&buf_region->entry, &pool->list.regions); } pool->num_allocated += pool->attr.chunk_cnt; return 0; err3: if (pool->attr.free_hndlr) pool->attr.free_hndlr(pool->attr.ctx, buf_region->context); err2: ofi_freealign(buf_region->mem_region); err1: free(buf_region); return -1; }