static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent) { int err; err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe), PAGE_SIZE * 2, &buf->buf); if (err) goto out; err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift, &buf->mtt); if (err) goto err_buf; err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf); if (err) goto err_mtt; return 0; err_mtt: mlx4_mtt_cleanup(dev->dev, &buf->mtt); err_buf: mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe), &buf->buf); out: return err; }
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, int size, int max_direct) { int err; err = mlx4_db_alloc(dev, &wqres->db, 1); if (err) return err; *wqres->db.db = 0; err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf); if (err) goto err_db; err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, &wqres->mtt); if (err) goto err_buf; err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf); if (err) goto err_mtt; return 0; err_mtt: mlx4_mtt_cleanup(dev, &wqres->mtt); err_buf: mlx4_buf_free(dev, size, &wqres->buf); err_db: mlx4_db_free(dev, &wqres->db); return err; }
static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context, struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, u64 buf_addr, int cqe) { int err; *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe), IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(*umem)) return PTR_ERR(*umem); err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem), ilog2((*umem)->page_size), &buf->mtt); if (err) goto err_buf; err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); if (err) goto err_mtt; return 0; err_mtt: mlx4_mtt_cleanup(dev->dev, &buf->mtt); err_buf: ib_umem_release(*umem); return err; }
static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata, struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, u64 buf_addr, int cqe) { int err; int cqe_size = dev->dev->caps.cqe_size; int shift; int n; *umem = ib_umem_get(udata, buf_addr, cqe * cqe_size, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(*umem)) return PTR_ERR(*umem); n = ib_umem_page_count(*umem); shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n); err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt); if (err) goto err_buf; err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); if (err) goto err_mtt; return 0; err_mtt: mlx4_mtt_cleanup(dev->dev, &buf->mtt); err_buf: ib_umem_release(*umem); return err; }
struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(pd->device); struct mlx4_ib_srq *srq; struct mlx4_wqe_srq_next_seg *next; struct mlx4_wqe_data_seg *scatter; u32 cqn; u16 xrcdn; int desc_size; int buf_size; int err; int i; /* Sanity check SRQ size before proceeding */ if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes || init_attr->attr.max_sge > dev->dev->caps.max_srq_sge) return ERR_PTR(-EINVAL); srq = kmalloc(sizeof *srq, GFP_KERNEL); if (!srq) return ERR_PTR(-ENOMEM); mutex_init(&srq->mutex); spin_lock_init(&srq->lock); srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); srq->msrq.max_gs = init_attr->attr.max_sge; desc_size = max(32UL, roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) + srq->msrq.max_gs * sizeof (struct mlx4_wqe_data_seg))); srq->msrq.wqe_shift = ilog2(desc_size); buf_size = srq->msrq.max * desc_size; if (pd->uobject) { struct mlx4_ib_create_srq ucmd; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { err = -EFAULT; goto err_srq; } srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, 0, 0); if (IS_ERR(srq->umem)) { err = PTR_ERR(srq->umem); goto err_srq; } err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), ilog2(srq->umem->page_size), &srq->mtt); if (err) goto err_buf; err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); if (err) goto err_mtt; err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), ucmd.db_addr, &srq->db); if (err) goto err_mtt; } else { err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL); if (err) goto err_srq; *srq->db.db = 0; if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf, GFP_KERNEL)) { err = -ENOMEM; goto err_db; } srq->head = 0; srq->tail = srq->msrq.max - 1; srq->wqe_ctr = 0; for (i = 0; i < srq->msrq.max; ++i) { next = get_wqe(srq, i); next->next_wqe_index = cpu_to_be16((i + 1) & (srq->msrq.max - 1)); for (scatter = (void *) (next + 1); (void *) scatter < (void *) next + desc_size; ++scatter) scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY); } err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, &srq->mtt); if (err) goto err_buf; err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL); if (err) goto err_mtt; srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL); if (!srq->wrid) { err = -ENOMEM; goto err_mtt; } } cqn = (init_attr->srq_type == IB_SRQT_XRC) ? to_mcq(init_attr->ext.xrc.cq)->mcq.cqn : 0; xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ? to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn : (u16) dev->dev->caps.reserved_xrcds; err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt, srq->db.dma, &srq->msrq); if (err) goto err_wrid; srq->msrq.event = mlx4_ib_srq_event; srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; if (pd->uobject) if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { err = -EFAULT; goto err_wrid; } init_attr->attr.max_wr = srq->msrq.max - 1; return &srq->ibsrq; err_wrid: if (pd->uobject) mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); else kfree(srq->wrid); err_mtt: mlx4_mtt_cleanup(dev->dev, &srq->mtt); err_buf: if (pd->uobject) ib_umem_release(srq->umem); else mlx4_buf_free(dev->dev, buf_size, &srq->buf); err_db: if (!pd->uobject) mlx4_db_free(dev->dev, &srq->db); err_srq: kfree(srq); return ERR_PTR(err); }
struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(pd->device); struct mlx4_ib_srq *srq; struct mlx4_wqe_srq_next_seg *next; int desc_size; int buf_size; int err; int i; if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes || init_attr->attr.max_sge > dev->dev->caps.max_srq_sge) return ERR_PTR(-EINVAL); srq = kmalloc(sizeof *srq, GFP_KERNEL); if (!srq) return ERR_PTR(-ENOMEM); mutex_init(&srq->mutex); spin_lock_init(&srq->lock); srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); srq->msrq.max_gs = init_attr->attr.max_sge; desc_size = max(32UL, roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) + srq->msrq.max_gs * sizeof (struct mlx4_wqe_data_seg))); srq->msrq.wqe_shift = ilog2(desc_size); buf_size = srq->msrq.max * desc_size; if (pd->uobject) { struct mlx4_ib_create_srq ucmd; if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { err = -EFAULT; goto err_srq; } srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, 0, 0); if (IS_ERR(srq->umem)) { err = PTR_ERR(srq->umem); goto err_srq; } err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), ilog2(srq->umem->page_size), &srq->mtt); if (err) goto err_buf; err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); if (err) goto err_mtt; err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), ucmd.db_addr, &srq->db); if (err) goto err_mtt; } else { err = mlx4_db_alloc(dev->dev, &srq->db, 0); if (err) goto err_srq; *srq->db.db = 0; if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) { err = -ENOMEM; goto err_db; } srq->head = 0; srq->tail = srq->msrq.max - 1; srq->wqe_ctr = 0; for (i = 0; i < srq->msrq.max; ++i) { next = get_wqe(srq, i); next->next_wqe_index = cpu_to_be16((i + 1) & (srq->msrq.max - 1)); } err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, &srq->mtt); if (err) goto err_buf; err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf); if (err) goto err_mtt; srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL); if (!srq->wrid) { err = -ENOMEM; goto err_mtt; } } err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, &srq->mtt, srq->db.dma, &srq->msrq); if (err) goto err_wrid; srq->msrq.event = mlx4_ib_srq_event; if (pd->uobject) if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { err = -EFAULT; goto err_wrid; } init_attr->attr.max_wr = srq->msrq.max - 1; return &srq->ibsrq; err_wrid: if (pd->uobject) mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); else kfree(srq->wrid); err_mtt: mlx4_mtt_cleanup(dev->dev, &srq->mtt); err_buf: if (pd->uobject) ib_umem_release(srq->umem); else mlx4_buf_free(dev->dev, buf_size, &srq->buf); err_db: if (!pd->uobject) mlx4_db_free(dev->dev, &srq->db); err_srq: kfree(srq); return ERR_PTR(err); }
/* CQ allocation and modification test */ int cq_test(struct mlx4_dev *dev, char* log) { struct mlx4_cq *cq; struct mlx4_mtt *mtt; struct mlx4_uar *uar; struct mlx4_db *db; int err; int expected_rc = 0; int collapsed = 0; int timestamp_en = 0; int npages = 1; int page_shift = get_order(dev->caps.cqe_size) + PAGE_SHIFT; int ret_val = FAIL; int vector = 0; int nent = 2 * MLX4_NUM_TUNNEL_BUFS; u16 count = 88; u16 period = 0; u64 mtt_addr; uar = malloc(sizeof *uar ,M_CQ_VAL, M_WAITOK ); VL_CHECK_MALLOC(uar, goto without_free, log); mtt = malloc(sizeof *mtt ,M_CQ_VAL, M_WAITOK ); VL_CHECK_MALLOC(mtt, goto free_uar, log); cq = malloc(sizeof *cq ,M_CQ_VAL, M_WAITOK ); VL_CHECK_MALLOC(cq, goto free_mtt, log); db = malloc(sizeof *db ,M_CQ_VAL, M_WAITOK ); VL_CHECK_MALLOC(db, goto free_cq, log); err = mlx4_mtt_init(dev, npages, page_shift, mtt); VL_CHECK_RC(err, expected_rc, goto free_db , log, "failed to initialize MTT"); uprintf("MTT was initialized successfuly\n"); VL_CHECK_INT_VALUE(mtt->order, 0, goto cleanup_mtt, log, "mtt->order is wrong"); VL_CHECK_INT_VALUE(mtt->page_shift, 12, goto cleanup_mtt, log, "mtt->page_shift is wrong"); mtt_addr = mlx4_mtt_addr(dev, mtt); uprintf("MTT address is: %lu\n", mtt_addr); err = mlx4_uar_alloc(dev, uar); VL_CHECK_RC(err, expected_rc, goto cleanup_mtt , log, "failed to allocate UAR"); uprintf("UAR was allocated successfuly\n"); err = mlx4_db_alloc(dev, db, 1); VL_CHECK_RC(err, expected_rc, goto dealloc_uar , log, "failed to allocate DB"); uprintf("DB was allocated successfuly\n"); err = mlx4_cq_alloc(dev, nent, mtt, uar, db->dma, cq, vector, collapsed, timestamp_en); VL_CHECK_RC(err, expected_rc, goto dealloc_db , log, "failed to allocate CQ"); uprintf("CQ allocated successfuly\n"); VL_CHECK_INT_VALUE(cq->cons_index, 0, goto dealloc_cq, log, "cq->cons_index is wrong"); VL_CHECK_INT_VALUE(cq->arm_sn, 1, goto dealloc_cq, log, "cq->arm_sn is wrong"); uprintf("cq->cqn = %d, cq->uar->pfn = %lu, cq->eqn = %d, cq->irq = %u\n", cq->cqn, cq->uar->pfn, cq->eqn, cq->irq ); VL_CHECK_UNSIGNED_INT_VALUE(cq->cons_index, (unsigned int)0, goto dealloc_cq, log, "cq->cons_index != 0"); VL_CHECK_INT_VALUE(cq->arm_sn, 1, goto dealloc_cq, log, "cq->arm_sn != 1"); err = mlx4_cq_modify(dev, cq, count, period); VL_CHECK_RC(err, expected_rc, goto dealloc_cq , log, "failed to modify CQ"); uprintf("CQ was modifyed successfuly\n"); ret_val = SUCCESS; dealloc_cq: mlx4_cq_free(dev, cq); uprintf("CQ was freed successfuly\n"); dealloc_db: mlx4_db_free(dev, db); uprintf( "DB free was successful\n"); dealloc_uar: mlx4_uar_free(dev,uar); uprintf("UAR free was successful\n"); cleanup_mtt: mlx4_mtt_cleanup(dev, mtt); uprintf( "mtt clean-up was successful\n"); free_db: free(db, M_CQ_VAL); free_cq: free(cq, M_CQ_VAL); free_mtt: free(mtt, M_CQ_VAL); free_uar: free(uar, M_CQ_VAL); without_free: return ret_val; }
/* static void mlx4_unmap_uar(struct mlx4_priv *priv) { struct mlx4_priv *priv = mlx4_priv(&priv->dev); int i; for (i = 0; i < mlx4_num_eq_uar(&priv->dev); ++i) if (priv->eq_table.uar_map[i]) { iounmap(priv->eq_table.uar_map[i]); priv->eq_table.uar_map[i] = NULL; } } */ static int mlx4_create_eq(struct mlx4_priv *priv, int nent, u8 intr, struct mlx4_eq *eq) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_eq_context *eq_context; int npages; u64 *dma_list = NULL; genpaddr_t t = 0; u64 mtt_addr; int err = -ENOMEM; int i; eq->priv = priv; eq->nent = roundup_pow_of_two(max(nent, 2)); /* CX3 is capable of extending the CQE\EQE from 32 to 64 bytes*/ npages = PAGE_ALIGN( eq->nent * (MLX4_EQ_ENTRY_SIZE << priv->dev.caps.eqe_factor)) / BASE_PAGE_SIZE; eq->page_list = malloc(npages * sizeof *eq->page_list); if (!eq->page_list) goto err_out; for (i = 0; i < npages; ++i) eq->page_list[i].buf = NULL; dma_list = malloc(npages * sizeof *dma_list); if (!dma_list) goto err_out_free; mailbox = mlx4_alloc_cmd_mailbox(); if (IS_ERR(mailbox)) goto err_out_free; eq_context = mailbox->buf; for (i = 0; i < npages; ++i) { eq->page_list[i].buf = dma_alloc(BASE_PAGE_SIZE, &t); if (!eq->page_list[i].buf) goto err_out_free_pages; dma_list[i] = t; eq->page_list[i].map = t; memset(eq->page_list[i].buf, 0, BASE_PAGE_SIZE); } eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); if (eq->eqn == -1) goto err_out_free_pages; eq->doorbell = mlx4_get_eq_uar(priv, eq); if (!eq->doorbell) { err = -ENOMEM; goto err_out_free_eq; } err = mlx4_mtt_init(&priv->dev, npages, PAGE_SHIFT, &eq->mtt); if (err) goto err_out_free_eq; err = mlx4_write_mtt(&priv->dev, &eq->mtt, 0, npages, dma_list); if (err) goto err_out_free_mtt; memset(eq_context, 0, sizeof *eq_context); eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | MLX4_EQ_STATE_ARMED); eq_context->log_eq_size = ilog2(eq->nent); eq_context->intr = intr; eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; /*printf("mtt_addr: %lx\n", mlx4_mtt_addr(&priv->dev, &eq->mtt)); printf("off: %d\n", eq->mtt.offset); printf("size: %d\n", priv->dev.caps.mtt_entry_sz);*/ mtt_addr = mlx4_mtt_addr(&priv->dev, &eq->mtt); eq_context->mtt_base_addr_h = mtt_addr >> 32; eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); err = mlx4_SW2HW_EQ(priv, mailbox, eq->eqn); if (err) { MLX4_DEBUG("SW2HW_EQ failed (%d)\n", err); goto err_out_free_mtt; } free(dma_list); mlx4_free_cmd_mailbox(mailbox); eq->cons_index = 0; return err; /*TODO*/ err_out_free_mtt: /*mlx4_mtt_cleanup(&priv->dev, &eq->mtt);*/ err_out_free_eq: /*mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);*/ err_out_free_pages: /*for (i = 0; i < npages; ++i) if (eq->page_list[i].buf) dma_free(&priv->dev.pdev->dev, PAGE_SIZE, eq->page_list[i].buf, eq->page_list[i].map);*/ mlx4_free_cmd_mailbox(mailbox); err_out_free: free(eq->page_list); free(dma_list); err_out: return err; }