Пример #1
0
int mthca_alloc_buf(struct mthca_buf *buf, size_t size, int page_size)
{
	int ret;

	buf->length = align(size, page_size);
	buf->buf = mmap(NULL, buf->length, PROT_READ | PROT_WRITE,
			MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
	if (buf->buf == MAP_FAILED)
		return errno;

	ret = ibv_dontfork_range(buf->buf, size);
	if (ret)
		munmap(buf->buf, buf->length);

	return ret;
}
Пример #2
0
struct ibv_mr *__ibv_reg_mr(struct ibv_pd *pd, void *addr,
			    size_t length, int access)
{
	struct ibv_mr *mr;

	if (ibv_dontfork_range(addr, length))
		return NULL;

	mr = pd->context->ops.reg_mr(pd, addr, length, access);
	if (mr) {
		mr->context = pd->context;
		mr->pd      = pd;
		mr->addr    = addr;
		mr->length  = length;
	} else
		ibv_dofork_range(addr, length);

	return mr;
}
Пример #3
0
struct ibv_mr *__ibv_reg_mr(struct ibv_pd *pd, void *addr,
			    size_t length, int access)
{
	struct ibv_mr *mr;
	fprintf(stderr, "%s:%s:%d\n", __func__, __FILE__, __LINE__);
	if (ibv_dontfork_range(addr, length))
		return NULL;

	mr = pd->context->ops.reg_mr(pd, addr, length, access);
	if (mr) {
		mr->context = pd->context;
		mr->pd      = pd;
		mr->addr    = addr;
		mr->length  = length;
	} else
		ibv_dofork_range(addr, length);

	return mr;
}
Пример #4
0
int bnxt_re_alloc_aligned(struct bnxt_re_queue *que, uint32_t pg_size)
{
	int ret, bytes;

	bytes = (que->depth * que->stride);
	que->bytes = get_aligned(bytes, pg_size);
	que->va = mmap(NULL, que->bytes, PROT_READ | PROT_WRITE,
		       MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
	if (que->va == MAP_FAILED) {
		que->bytes = 0;
		return errno;
	}
	/* Touch pages before proceeding. */
	memset(que->va, 0, que->bytes);

	ret = ibv_dontfork_range(que->va, que->bytes);
	if (ret) {
		munmap(que->va, que->bytes);
		que->bytes = 0;
	}

	return ret;
}
Пример #5
0
int __ibv_exp_rereg_mr(struct ibv_mr *mr, int flags,
		       struct ibv_pd *pd, void *addr,
		       size_t length, uint64_t access,
		       struct ibv_exp_rereg_mr_attr *attr)
{
	int dofork_onfail = 0;
	int err;
	struct verbs_context_exp *vctx;
	void *old_addr;
	size_t old_len;
	struct ibv_exp_rereg_out out;

	if (attr->comp_mask & ~(IBV_EXP_REREG_MR_ATTR_RESERVED - 1))
		return errno = EINVAL;

	if (flags & ~IBV_EXP_REREG_MR_FLAGS_SUPPORTED)
		return errno = EINVAL;

	if ((flags & IBV_EXP_REREG_MR_CHANGE_TRANSLATION) &&
	    (0 >= length))
		return errno = EINVAL;

	if (!(flags & IBV_EXP_REREG_MR_CHANGE_ACCESS))
		access = 0;

	if ((access & IBV_EXP_ACCESS_ALLOCATE_MR) &&
	    (!(flags & IBV_EXP_REREG_MR_CHANGE_TRANSLATION) ||
	    (addr != NULL)))
			return errno = EINVAL;

	if ((!(access & IBV_EXP_ACCESS_ALLOCATE_MR)) &&
	    (flags & IBV_EXP_REREG_MR_CHANGE_TRANSLATION) &&
	    (addr == NULL))
		return errno = EINVAL;

	vctx = verbs_get_exp_ctx_op(mr->context, drv_exp_rereg_mr);
	if (!vctx)
		return errno = ENOSYS;

	/* If address will be allocated internally fork support is handled by the provider */
	if (!(access & IBV_EXP_ACCESS_ALLOCATE_MR) &&
	    flags & IBV_EXP_REREG_MR_CHANGE_TRANSLATION) {
		err = ibv_dontfork_range(addr, length);
		if (err)
			return err;
		dofork_onfail = 1;
	}

	old_addr = mr->addr;
	old_len = mr->length;
	memset(&out, 0, sizeof(out));
	if (flags & IBV_EXP_REREG_MR_CHANGE_TRANSLATION)
		out.need_dofork = 1;

	err = vctx->drv_exp_rereg_mr(mr, flags, pd, addr, length, access, attr, &out);
	if (!err) {
		if (flags & IBV_EXP_REREG_MR_CHANGE_TRANSLATION) {
			if (out.need_dofork)
				ibv_dofork_range(old_addr, old_len);
			if (access & IBV_EXP_ACCESS_ALLOCATE_MR) {
				;
			} else {
				/* In case that internal allocator was used
				     addr already set internally
				*/
				mr->addr    = addr;
				mr->length  = length;
			}
		}
		if (flags & IBV_EXP_REREG_MR_CHANGE_PD)
			mr->pd = pd;
	} else if (dofork_onfail) {
		ibv_dofork_range(addr, length);
	}

	return err;
}