int util_buf_grow(struct util_buf_pool *pool) { int ret; size_t i; union util_buf *util_buf; struct util_buf_region *buf_region; if (pool->max_cnt && pool->num_allocated >= pool->max_cnt) { return -1; } buf_region = calloc(1, sizeof(*buf_region)); if (!buf_region) return -1; ret = ofi_memalign((void **)&buf_region->mem_region, pool->alignment, pool->chunk_cnt * pool->entry_sz); if (ret) goto err; if (pool->alloc_hndlr) { ret = pool->alloc_hndlr(pool->ctx, buf_region->mem_region, pool->chunk_cnt * pool->entry_sz, &buf_region->context); if (ret) goto err; } for (i = 0; i < pool->chunk_cnt; i++) { util_buf = (union util_buf *) (buf_region->mem_region + i * pool->entry_sz); util_buf_set_region(util_buf, buf_region, pool); slist_insert_tail(&util_buf->entry, &pool->buf_list); } slist_insert_tail(&buf_region->entry, &pool->region_list); pool->num_allocated += pool->chunk_cnt; return 0; err: free(buf_region); return -1; }
/* Inserts a single AH to AV. */ static int efa_av_insert_ah(struct efa_av *av, struct efa_ep_addr *addr, fi_addr_t *fi_addr) { struct efa_pd *pd = container_of(av->domain->pd, struct efa_pd, ibv_pd); struct ibv_ah_attr ah_attr = { 0 }; char str[INET6_ADDRSTRLEN] = { 0 }; struct efa_reverse_av *reverse_av; struct efa_ah_qpn key; struct efa_conn *conn; int err; inet_ntop(AF_INET6, addr->raw, str, INET6_ADDRSTRLEN); EFA_INFO(FI_LOG_AV, "Insert address: GID[%s] QP[%u]\n", str, addr->qpn); if (!efa_av_is_valid_address(addr)) { EFA_INFO(FI_LOG_AV, "Failed to insert bad addr"); err = -FI_EADDRNOTAVAIL; goto err_invalid; } err = ofi_memalign((void **)&conn, EFA_MEM_ALIGNMENT, sizeof(*conn)); if (err) { err = -FI_ENOMEM; goto err_invalid; } ah_attr.port_num = 1; memcpy(ah_attr.grh.dgid.raw, addr->raw, sizeof(addr->raw)); conn->ah = efa_cmd_create_ah(pd, &ah_attr); if (!conn->ah) { err = -FI_EINVAL; goto err_free_conn; } memcpy((void *)&conn->ep_addr, addr, sizeof(*addr)); switch (av->type) { case FI_AV_MAP: *fi_addr = (uintptr_t)(void *)conn; break; case FI_AV_TABLE: av->next = efa_av_tbl_find_first_empty(av, av->next); assert(av->next != -1); *fi_addr = av->next; av->conn_table[av->next] = conn; av->next++; break; default: assert(0); break; } key.efa_ah = conn->ah->efa_address_handle; key.qpn = addr->qpn; /* This is correct since the same address should be mapped to the same ah. */ HASH_FIND(hh, av->reverse_av, &key, sizeof(key), reverse_av); if (!reverse_av) { reverse_av = malloc(sizeof(*reverse_av)); if (!reverse_av) { err = -FI_ENOMEM; goto err_destroy_ah; } memcpy(&reverse_av->key, &key, sizeof(key)); reverse_av->fi_addr = *fi_addr; HASH_ADD(hh, av->reverse_av, key, sizeof(reverse_av->key), reverse_av); } EFA_INFO(FI_LOG_AV, "av successfully inserted conn[%p] fi_addr[%" PRIu64 "]\n", conn, *fi_addr); av->used++; return FI_SUCCESS; err_destroy_ah: efa_cmd_destroy_ah(conn->ah); err_free_conn: free(conn); err_invalid: *fi_addr = FI_ADDR_NOTAVAIL; return err; }
int util_buf_grow(struct util_buf_pool *pool) { void *buf; int ret; size_t i; struct util_buf_region *buf_region; ssize_t hp_size; struct util_buf_footer *buf_ftr; if (pool->attr.max_cnt && pool->num_allocated >= pool->attr.max_cnt) { return -1; } buf_region = calloc(1, sizeof(*buf_region)); if (!buf_region) return -1; buf_region->pool = pool; dlist_init(&buf_region->buf_list); if (pool->attr.is_mmap_region) { hp_size = ofi_get_hugepage_size(); if (hp_size < 0) goto err1; buf_region->size = fi_get_aligned_sz(pool->attr.chunk_cnt * pool->entry_sz, hp_size); ret = ofi_alloc_hugepage_buf((void **)&buf_region->mem_region, buf_region->size); if (ret) { FI_DBG(&core_prov, FI_LOG_CORE, "Huge page allocation failed: %s\n", fi_strerror(-ret)); if (pool->num_allocated > 0) goto err1; pool->attr.is_mmap_region = 0; } } if (!pool->attr.is_mmap_region) { buf_region->size = pool->attr.chunk_cnt * pool->entry_sz; ret = ofi_memalign((void **)&buf_region->mem_region, pool->attr.alignment, buf_region->size); if (ret) goto err1; } if (pool->attr.alloc_hndlr) { ret = pool->attr.alloc_hndlr(pool->attr.ctx, buf_region->mem_region, buf_region->size, &buf_region->context); if (ret) goto err2; } if (!(pool->regions_cnt % UTIL_BUF_POOL_REGION_CHUNK_CNT)) { struct util_buf_region **new_table = realloc(pool->regions_table, (pool->regions_cnt + UTIL_BUF_POOL_REGION_CHUNK_CNT) * sizeof(*pool->regions_table)); if (!new_table) goto err3; pool->regions_table = new_table; } pool->regions_table[pool->regions_cnt] = buf_region; pool->regions_cnt++; for (i = 0; i < pool->attr.chunk_cnt; i++) { buf = (buf_region->mem_region + i * pool->entry_sz); buf_ftr = util_buf_get_ftr(pool, buf); if (pool->attr.init) { #if ENABLE_DEBUG if (!pool->attr.indexing.ordered) { buf_ftr->entry.slist.next = (void *) OFI_MAGIC_64; pool->attr.init(pool->attr.ctx, buf); assert(buf_ftr->entry.slist.next == (void *) OFI_MAGIC_64); } else { buf_ftr->entry.dlist.next = (void *) OFI_MAGIC_64; buf_ftr->entry.dlist.prev = (void *) OFI_MAGIC_64; pool->attr.init(pool->attr.ctx, buf); assert((buf_ftr->entry.dlist.next == (void *) OFI_MAGIC_64) && (buf_ftr->entry.dlist.prev == (void *) OFI_MAGIC_64)); } #else pool->attr.init(pool->attr.ctx, buf); #endif } buf_ftr->region = buf_region; buf_ftr->index = pool->num_allocated + i; if (!pool->attr.indexing.ordered) { slist_insert_tail(&buf_ftr->entry.slist, &pool->list.buffers); } else { dlist_insert_tail(&buf_ftr->entry.dlist, &buf_region->buf_list); } } if (pool->attr.indexing.ordered) { dlist_insert_tail(&buf_region->entry, &pool->list.regions); } pool->num_allocated += pool->attr.chunk_cnt; return 0; err3: if (pool->attr.free_hndlr) pool->attr.free_hndlr(pool->attr.ctx, buf_region->context); err2: ofi_freealign(buf_region->mem_region); err1: free(buf_region); return -1; }