static struct mthca_ah_page *__add_page(struct mthca_pd *pd, int page_size, int per_page) { struct mthca_ah_page *page; int i; page = malloc(sizeof *page + per_page * sizeof (int)); if (!page) return NULL; if (mthca_alloc_buf(&page->buf, page_size, page_size)) { free(page); return NULL; } page->mr = mthca_reg_mr(&pd->ibv_pd, page->buf.buf, page_size, 0); if (!page->mr) { mthca_free_buf(&page->buf); free(page); return NULL; } page->mr->context = pd->ibv_pd.context; page->use_cnt = 0; for (i = 0; i < per_page; ++i) page->free[i] = ~0; page->prev = NULL; page->next = pd->ah_list; pd->ah_list = page; if (page->next) page->next->prev = page; return page; }
int mthca_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr, struct mthca_srq *srq) { struct mthca_data_seg *scatter; void *wqe; int size; int i; srq->wrid = malloc(srq->max * sizeof (uint64_t)); if (!srq->wrid) return -1; size = sizeof (struct mthca_next_seg) + srq->max_gs * sizeof (struct mthca_data_seg); for (srq->wqe_shift = 6; 1 << srq->wqe_shift < size; ++srq->wqe_shift) ; /* nothing */ srq->buf_size = srq->max << srq->wqe_shift; if (mthca_alloc_buf(&srq->buf, align(srq->buf_size, to_mdev(pd->context->device)->page_size), to_mdev(pd->context->device)->page_size)) { free(srq->wrid); return -1; } memset(srq->buf.buf, 0, srq->buf_size); /* * Now initialize the SRQ buffer so that all of the WQEs are * linked into the list of free WQEs. In addition, set the * scatter list L_Keys to the sentry value of 0x100. */ for (i = 0; i < srq->max; ++i) { struct mthca_next_seg *next; next = wqe = get_wqe(srq, i); if (i < srq->max - 1) { *wqe_to_link(wqe) = i + 1; next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1); } else { *wqe_to_link(wqe) = -1; next->nda_op = 0; } for (scatter = wqe + sizeof (struct mthca_next_seg); (void *) scatter < wqe + (1 << srq->wqe_shift); ++scatter) scatter->lkey = htonl(MTHCA_INVAL_LKEY); } srq->first_free = 0; srq->last_free = srq->max - 1; srq->last = get_wqe(srq, srq->max - 1); return 0; }