static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue, struct ehca_wqe *wqe_p, struct ib_recv_wr *recv_wr) { u8 cnt_ds; if (unlikely((recv_wr->num_sge < 0) || (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) { ehca_gen_err("Invalid number of WQE SGE. " "num_sqe=%x max_nr_of_sg=%x", recv_wr->num_sge, ipz_rqueue->act_nr_of_sg); return -EINVAL; /* invalid SG list length */ } /* clear wqe header until sglist */ memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); wqe_p->work_request_id = recv_wr->wr_id; wqe_p->nr_of_data_seg = recv_wr->num_sge; for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) { wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr = recv_wr->sg_list[cnt_ds].addr; wqe_p->u.all_rcv.sg_list[cnt_ds].lkey = recv_wr->sg_list[cnt_ds].lkey; wqe_p->u.all_rcv.sg_list[cnt_ds].length = recv_wr->sg_list[cnt_ds].length; } if (ehca_debug_level) { ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue); ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); } return 0; }
static int ehca_create_slab_caches(void) { int ret; ret = ehca_init_pd_cache(); if (ret) { ehca_gen_err("Cannot create PD SLAB cache."); return ret; } ret = ehca_init_cq_cache(); if (ret) { ehca_gen_err("Cannot create CQ SLAB cache."); goto create_slab_caches2; } ret = ehca_init_qp_cache(); if (ret) { ehca_gen_err("Cannot create QP SLAB cache."); goto create_slab_caches3; } ret = ehca_init_av_cache(); if (ret) { ehca_gen_err("Cannot create AV SLAB cache."); goto create_slab_caches4; } ret = ehca_init_mrmw_cache(); if (ret) { ehca_gen_err("Cannot create MR&MW SLAB cache."); goto create_slab_caches5; } ret = ehca_init_small_qp_cache(); if (ret) { ehca_gen_err("Cannot create small queue SLAB cache."); goto create_slab_caches6; } #ifdef CONFIG_PPC_64K_PAGES ctblk_cache = kmem_cache_create("ehca_cache_ctblk", EHCA_PAGESIZE, H_CB_ALIGNMENT, SLAB_HWCACHE_ALIGN, NULL); if (!ctblk_cache) { ehca_gen_err("Cannot create ctblk SLAB cache."); ehca_cleanup_small_qp_cache(); goto create_slab_caches6; } #endif return 0; create_slab_caches6: ehca_cleanup_mrmw_cache(); create_slab_caches5: ehca_cleanup_av_cache(); create_slab_caches4: ehca_cleanup_qp_cache(); create_slab_caches3: ehca_cleanup_cq_cache(); create_slab_caches2: ehca_cleanup_pd_cache(); return ret; }
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, struct ehca_alloc_qp_parms *parms, int is_user) { int rc; u64 ret; u64 allocate_controls, max_r10_reg, r11, r12; unsigned long outs[PLPAR_HCALL9_BUFSIZE]; allocate_controls = EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type) | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0) | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype) | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype) | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage) | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE, parms->squeue.page_size) | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE, parms->rqueue.page_size) | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING, !!(parms->ll_comp_flags & LLQP_RECV_COMP)) | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING, !!(parms->ll_comp_flags & LLQP_SEND_COMP)) | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL, parms->ud_av_l_key_ctl) | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1); max_r10_reg = EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR, parms->squeue.max_wr + 1) | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR, parms->rqueue.max_wr + 1) | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE, parms->squeue.max_sge) | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE, parms->rqueue.max_sge); r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token); if (parms->ext_type == EQPT_SRQ) r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit); else r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn); ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, adapter_handle.handle, allocate_controls, parms->send_cq_handle.handle, parms->recv_cq_handle.handle, parms->eq_handle.handle, ((u64)parms->token << 32) | parms->pd.value, max_r10_reg, r11, r12); parms->qp_handle.handle = outs[0]; parms->real_qp_num = (u32)outs[1]; parms->squeue.act_nr_wqes = (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]); parms->rqueue.act_nr_wqes = (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]); parms->squeue.act_nr_sges = (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]); parms->rqueue.act_nr_sges = (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]); parms->squeue.queue_size = (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]); parms->rqueue.queue_size = (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); if (ret == H_SUCCESS) { rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]); if (rc) { ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx", rc, outs[6]); ehca_plpar_hcall_norets(H_FREE_RESOURCE, adapter_handle.handle, parms->qp_handle.handle, 0, 0, 0, 0, 0); ret = H_NO_MEM; } } if (ret == H_NOT_ENOUGH_RESOURCES) ehca_gen_err("Not enough resources. ret=%lli", ret); return ret; }
static inline int ehca_write_swqe(struct ehca_qp *qp, struct ehca_wqe *wqe_p, const struct ib_send_wr *send_wr) { u32 idx; u64 dma_length; struct ehca_av *my_av; u32 remote_qkey = send_wr->wr.ud.remote_qkey; if (unlikely((send_wr->num_sge < 0) || (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) { ehca_gen_err("Invalid number of WQE SGE. " "num_sqe=%x max_nr_of_sg=%x", send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg); return -EINVAL; /* invalid SG list length */ } /* clear wqe header until sglist */ memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); wqe_p->work_request_id = send_wr->wr_id; switch (send_wr->opcode) { case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: wqe_p->optype = WQE_OPTYPE_SEND; break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: wqe_p->optype = WQE_OPTYPE_RDMAWRITE; break; case IB_WR_RDMA_READ: wqe_p->optype = WQE_OPTYPE_RDMAREAD; break; default: ehca_gen_err("Invalid opcode=%x", send_wr->opcode); return -EINVAL; /* invalid opcode */ } wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE; wqe_p->wr_flag = 0; if (send_wr->send_flags & IB_SEND_SIGNALED) wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM; if (send_wr->opcode == IB_WR_SEND_WITH_IMM || send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { /* this might not work as long as HW does not support it */ wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data); wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT; } wqe_p->nr_of_data_seg = send_wr->num_sge; switch (qp->qp_type) { case IB_QPT_SMI: case IB_QPT_GSI: /* no break is intential here */ case IB_QPT_UD: /* IB 1.2 spec C10-15 compliance */ if (send_wr->wr.ud.remote_qkey & 0x80000000) remote_qkey = qp->qkey; wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8; wqe_p->local_ee_context_qkey = remote_qkey; if (!send_wr->wr.ud.ah) { ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp); return -EINVAL; } my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah); wqe_p->u.ud_av.ud_av = my_av->av; /* * omitted check of IB_SEND_INLINE * since HW does not support it */ for (idx = 0; idx < send_wr->num_sge; idx++) { wqe_p->u.ud_av.sg_list[idx].vaddr = send_wr->sg_list[idx].addr; wqe_p->u.ud_av.sg_list[idx].lkey = send_wr->sg_list[idx].lkey; wqe_p->u.ud_av.sg_list[idx].length = send_wr->sg_list[idx].length; } /* eof for idx */ if (qp->qp_type == IB_QPT_SMI || qp->qp_type == IB_QPT_GSI) wqe_p->u.ud_av.ud_av.pmtu = 1; if (qp->qp_type == IB_QPT_GSI) { wqe_p->pkeyi = send_wr->wr.ud.pkey_index; #ifdef DEBUG_GSI_SEND_WR trace_send_wr_ud(send_wr); #endif /* DEBUG_GSI_SEND_WR */ } break; case IB_QPT_UC: if (send_wr->send_flags & IB_SEND_FENCE) wqe_p->wr_flag |= WQE_WRFLAG_FENCE; /* no break is intentional here */ case IB_QPT_RC: /* TODO: atomic not implemented */ wqe_p->u.nud.remote_virtual_adress = send_wr->wr.rdma.remote_addr; wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey; /* * omitted checking of IB_SEND_INLINE * since HW does not support it */ dma_length = 0; for (idx = 0; idx < send_wr->num_sge; idx++) { wqe_p->u.nud.sg_list[idx].vaddr = send_wr->sg_list[idx].addr; wqe_p->u.nud.sg_list[idx].lkey = send_wr->sg_list[idx].lkey; wqe_p->u.nud.sg_list[idx].length = send_wr->sg_list[idx].length; dma_length += send_wr->sg_list[idx].length; } /* eof idx */ wqe_p->u.nud.atomic_1st_op_dma_len = dma_length; break; default: ehca_gen_err("Invalid qptype=%x", qp->qp_type); return -EINVAL; } if (ehca_debug_level) { ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp); ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe"); } return 0; }
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, struct ehca_qp *qp, struct ehca_alloc_qp_parms *parms) { u64 ret; u64 allocate_controls; u64 max_r10_reg; u64 outs[PLPAR_HCALL9_BUFSIZE]; u16 max_nr_receive_wqes = qp->init_attr.cap.max_recv_wr + 1; u16 max_nr_send_wqes = qp->init_attr.cap.max_send_wr + 1; int daqp_ctrl = parms->daqp_ctrl; allocate_controls = EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, (daqp_ctrl & DAQP_CTRL_ENABLE) ? 1 : 0) | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0) | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype) | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype) | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING, (daqp_ctrl & DAQP_CTRL_RECV_COMP) ? 1 : 0) | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING, (daqp_ctrl & DAQP_CTRL_SEND_COMP) ? 1 : 0) | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL, parms->ud_av_l_key_ctl) | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1); max_r10_reg = EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR, max_nr_send_wqes) | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR, max_nr_receive_wqes) | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE, parms->max_send_sge) | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE, parms->max_recv_sge); ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, adapter_handle.handle, /* r4 */ allocate_controls, /* r5 */ qp->send_cq->ipz_cq_handle.handle, qp->recv_cq->ipz_cq_handle.handle, parms->ipz_eq_handle.handle, ((u64)qp->token << 32) | parms->pd.value, max_r10_reg, /* r10 */ parms->ud_av_l_key_ctl, /* r11 */ 0); qp->ipz_qp_handle.handle = outs[0]; qp->real_qp_num = (u32)outs[1]; parms->act_nr_send_wqes = (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]); parms->act_nr_recv_wqes = (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]); parms->act_nr_send_sges = (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]); parms->act_nr_recv_sges = (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]); parms->nr_sq_pages = (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]); parms->nr_rq_pages = (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); if (ret == H_SUCCESS) hcp_galpas_ctor(&qp->galpas, outs[6], outs[6]); if (ret == H_NOT_ENOUGH_RESOURCES) ehca_gen_err("Not enough resources. ret=%lx", ret); return ret; }
static long ehca_plpar_hcall9(unsigned long opcode, unsigned long *outs, /* array of 9 outputs */ unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5, unsigned long arg6, unsigned long arg7, unsigned long arg8, unsigned long arg9) { long ret; int i, sleep_msecs, lock_is_set = 0; unsigned long flags; ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx " "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx", opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); for (i = 0; i < 5; i++) { if ((opcode == H_ALLOC_RESOURCE) && (arg2 == 5)) { spin_lock_irqsave(&hcall_lock, flags); lock_is_set = 1; } ret = plpar_hcall9(opcode, outs, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); if (lock_is_set) spin_unlock_irqrestore(&hcall_lock, flags); if (H_IS_LONG_BUSY(ret)) { sleep_msecs = get_longbusy_msecs(ret); msleep_interruptible(sleep_msecs); continue; } if (ret < H_SUCCESS) ehca_gen_err("opcode=%lx ret=%lx" " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" " arg5=%lx arg6=%lx arg7=%lx arg8=%lx" " arg9=%lx" " out1=%lx out2=%lx out3=%lx out4=%lx" " out5=%lx out6=%lx out7=%lx out8=%lx" " out9=%lx", opcode, ret, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, outs[0], outs[1], outs[2], outs[3], outs[4], outs[5], outs[6], outs[7], outs[8]); ehca_gen_dbg("opcode=%lx ret=%lx out1=%lx out2=%lx out3=%lx " "out4=%lx out5=%lx out6=%lx out7=%lx out8=%lx " "out9=%lx", opcode, ret, outs[0], outs[1], outs[2], outs[3], outs[4], outs[5], outs[6], outs[7], outs[8]); return ret; } return H_BUSY; }