int mlx4_alloc_cq_buf(struct mlx4_device *dev, struct mlx4_buf *buf, int nent) { if (mlx4_alloc_buf(buf, align(nent * MLX4_CQ_ENTRY_SIZE, dev->page_size), dev->page_size)) return -1; memset(buf->buf, 0, nent * MLX4_CQ_ENTRY_SIZE); return 0; }
int mlx4_alloc_cq_buf(struct mlx4_device *dev, struct mlx4_buf *buf, int nent, int entry_size) { if (mlx4_alloc_buf(buf, align(nent * entry_size, dev->page_size), dev->page_size)) return -1; memset(buf->buf, 0, nent * entry_size); return 0; }
int mlx4_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr, struct mlx4_srq *srq) { struct mlx4_wqe_srq_next_seg *next; struct mlx4_wqe_data_seg *scatter; int size; int buf_size; int i; srq->wrid = malloc(srq->max * sizeof (uint64_t)); if (!srq->wrid) return -1; size = sizeof (struct mlx4_wqe_srq_next_seg) + srq->max_gs * sizeof (struct mlx4_wqe_data_seg); for (srq->wqe_shift = 5; 1 << srq->wqe_shift < size; ++srq->wqe_shift) ; /* nothing */ buf_size = srq->max << srq->wqe_shift; if (mlx4_alloc_buf(&srq->buf, buf_size, to_mdev(pd->context->device)->page_size)) { free(srq->wrid); return -1; } memset(srq->buf.buf, 0, buf_size); /* * Now initialize the SRQ buffer so that all of the WQEs are * linked into the list of free WQEs. */ for (i = 0; i < srq->max; ++i) { next = get_wqe(srq, i); next->next_wqe_index = htobe16((i + 1) & (srq->max - 1)); for (scatter = (void *) (next + 1); (void *) scatter < (void *) next + (1 << srq->wqe_shift); ++scatter) scatter->lkey = htobe32(MLX4_INVALID_LKEY); } srq->head = 0; srq->tail = srq->max - 1; return 0; }
int mlx4_alloc_prefered_buf(struct mlx4_context *mctx, struct mlx4_buf *buf, size_t size, int page_size, enum mlx4_alloc_type alloc_type, const char *component) { int ret = 1; buf->hmem = NULL; /* Fallback mechanism is used below: priority is: huge pages , contig pages, default allocation */ if (alloc_type == MLX4_ALLOC_TYPE_HUGE || alloc_type == MLX4_ALLOC_TYPE_PREFER_HUGE || alloc_type == MLX4_ALLOC_TYPE_ALL) { ret = mlx4_alloc_buf_huge(mctx, buf, size, page_size); if (!ret) return 0; /* Checking whether HUGE is forced */ if (alloc_type == MLX4_ALLOC_TYPE_HUGE) return -1; if (mlx4_trace) printf(PFX "Huge mode allocation has failed,fallback to %s mode\n", MLX4_ALLOC_TYPE_ALL ? "contig" : "default"); } if (alloc_type == MLX4_ALLOC_TYPE_CONTIG || alloc_type == MLX4_ALLOC_TYPE_PREFER_CONTIG || alloc_type == MLX4_ALLOC_TYPE_ALL) { ret = mlx4_alloc_buf_contig(mctx, buf, size, page_size, component); if (!ret) return 0; /* Checking whether CONTIG is forced */ if (alloc_type == MLX4_ALLOC_TYPE_CONTIG) return -1; if (mlx4_trace) printf(PFX "Contig mode allocation has failed,fallback to default mode\n"); } return mlx4_alloc_buf(buf, size, page_size); }
int mlx4_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap, enum ibv_qp_type type, struct mlx4_qp *qp) { qp->rq.max_gs = cap->max_recv_sge; qp->sq.wrid = malloc(qp->sq.wqe_cnt * sizeof (uint64_t)); if (!qp->sq.wrid) return -1; if (qp->rq.wqe_cnt) { qp->rq.wrid = malloc(qp->rq.wqe_cnt * sizeof (uint64_t)); if (!qp->rq.wrid) { free(qp->sq.wrid); return -1; } } for (qp->rq.wqe_shift = 4; 1 << qp->rq.wqe_shift < qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg); qp->rq.wqe_shift++) ; /* nothing */ qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + (qp->sq.wqe_cnt << qp->sq.wqe_shift); if (qp->rq.wqe_shift > qp->sq.wqe_shift) { qp->rq.offset = 0; qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; } else { qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; qp->sq.offset = 0; } if (mlx4_alloc_buf(&qp->buf, align(qp->buf_size, to_mdev(pd->context->device)->page_size), to_mdev(pd->context->device)->page_size)) { free(qp->sq.wrid); free(qp->rq.wrid); return -1; } memset(qp->buf.buf, 0, qp->buf_size); return 0; }