static void mlx4_free_buf_huge_ex(struct mlx4_context *mctx, struct mlx4_buf *buf, int do_fork) { struct mlx4_hugetlb_mem *hmem; if (do_fork) ibv_dofork_range(buf->buf, buf->length); if (buf->hmem == NULL) { if (mlx4_trace) perror("No hugetlb mem"); return; } hmem = (struct mlx4_hugetlb_mem *) buf->hmem; mlx4_spin_lock(&mctx->hugetlb_lock); mlx4_bitmap_free_range(&hmem->bitmap, buf->base, buf->length/MLX4_Q_CHUNK_SIZE); if (is_bitmap_empty(&hmem->bitmap)) { list_del(&hmem->list); mlx4_hugetlb_mem_free(hmem); } mlx4_spin_unlock(&mctx->hugetlb_lock); }
void mlx4_free_buf(struct mlx4_buf *buf) { if (buf->length) { ibv_dofork_range(buf->buf, buf->length); munmap(buf->buf, buf->length); } }
void bnxt_re_free_aligned(struct bnxt_re_queue *que) { if (que->bytes) { ibv_dofork_range(que->va, que->bytes); munmap(que->va, que->bytes); que->bytes = 0; } }
int __ibv_dereg_mr(struct ibv_mr *mr) { int ret; void *addr = mr->addr; size_t length = mr->length; ret = mr->context->ops.dereg_mr(mr); if (!ret) ibv_dofork_range(addr, length); return ret; }
int __ibv_dereg_mr(struct ibv_mr *mr) { int ret; void *addr = mr->addr; size_t length = mr->length; fprintf(stderr, "%s:%s:%d\n", __func__, __FILE__, __LINE__); ret = mr->context->ops.dereg_mr(mr); if (!ret) ibv_dofork_range(addr, length); return ret; }
struct ibv_mr *__ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access) { struct ibv_mr *mr; if (ibv_dontfork_range(addr, length)) return NULL; mr = pd->context->ops.reg_mr(pd, addr, length, access); if (mr) { mr->context = pd->context; mr->pd = pd; mr->addr = addr; mr->length = length; } else ibv_dofork_range(addr, length); return mr; }
struct ibv_mr *__ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access) { struct ibv_mr *mr; fprintf(stderr, "%s:%s:%d\n", __func__, __FILE__, __LINE__); if (ibv_dontfork_range(addr, length)) return NULL; mr = pd->context->ops.reg_mr(pd, addr, length, access); if (mr) { mr->context = pd->context; mr->pd = pd; mr->addr = addr; mr->length = length; } else ibv_dofork_range(addr, length); return mr; }
void mthca_free_buf(struct mthca_buf *buf) { ibv_dofork_range(buf->buf, buf->length); munmap(buf->buf, buf->length); }
int __ibv_exp_rereg_mr(struct ibv_mr *mr, int flags, struct ibv_pd *pd, void *addr, size_t length, uint64_t access, struct ibv_exp_rereg_mr_attr *attr) { int dofork_onfail = 0; int err; struct verbs_context_exp *vctx; void *old_addr; size_t old_len; struct ibv_exp_rereg_out out; if (attr->comp_mask & ~(IBV_EXP_REREG_MR_ATTR_RESERVED - 1)) return errno = EINVAL; if (flags & ~IBV_EXP_REREG_MR_FLAGS_SUPPORTED) return errno = EINVAL; if ((flags & IBV_EXP_REREG_MR_CHANGE_TRANSLATION) && (0 >= length)) return errno = EINVAL; if (!(flags & IBV_EXP_REREG_MR_CHANGE_ACCESS)) access = 0; if ((access & IBV_EXP_ACCESS_ALLOCATE_MR) && (!(flags & IBV_EXP_REREG_MR_CHANGE_TRANSLATION) || (addr != NULL))) return errno = EINVAL; if ((!(access & IBV_EXP_ACCESS_ALLOCATE_MR)) && (flags & IBV_EXP_REREG_MR_CHANGE_TRANSLATION) && (addr == NULL)) return errno = EINVAL; vctx = verbs_get_exp_ctx_op(mr->context, drv_exp_rereg_mr); if (!vctx) return errno = ENOSYS; /* If address will be allocated internally fork support is handled by the provider */ if (!(access & IBV_EXP_ACCESS_ALLOCATE_MR) && flags & IBV_EXP_REREG_MR_CHANGE_TRANSLATION) { err = ibv_dontfork_range(addr, length); if (err) return err; dofork_onfail = 1; } old_addr = mr->addr; old_len = mr->length; memset(&out, 0, sizeof(out)); if (flags & IBV_EXP_REREG_MR_CHANGE_TRANSLATION) out.need_dofork = 1; err = vctx->drv_exp_rereg_mr(mr, flags, pd, addr, length, access, attr, &out); if (!err) { if (flags & IBV_EXP_REREG_MR_CHANGE_TRANSLATION) { if (out.need_dofork) ibv_dofork_range(old_addr, old_len); if (access & IBV_EXP_ACCESS_ALLOCATE_MR) { ; } else { /* In case that internal allocator was used addr already set internally */ mr->addr = addr; mr->length = length; } } if (flags & IBV_EXP_REREG_MR_CHANGE_PD) mr->pd = pd; } else if (dofork_onfail) { ibv_dofork_range(addr, length); } return err; }