int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq) { unsigned long flags; u64 h_ret; spin_lock_irqsave(&eq->spinlock, flags); ibmebus_free_irq(eq->ist, (void *)shca); h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq); spin_unlock_irqrestore(&eq->spinlock, flags); if (h_ret != H_SUCCESS) { ehca_err(&shca->ib_device, "Can't free EQ resources."); return -EINVAL; } ipz_queue_dtor(NULL, &eq->ipz_queue); return 0; }
/* * init_qp_queues initializes/constructs r/squeue and registers queue pages. */ static inline int init_qp_queues(struct ehca_shca *shca, struct ehca_qp *my_qp, int nr_sq_pages, int nr_rq_pages, int swqe_size, int rwqe_size, int nr_send_sges, int nr_receive_sges) { int ret, cnt, ipz_rc; void *vpage; u64 rpage, h_ret; struct ib_device *ib_dev = &shca->ib_device; struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle; ipz_rc = ipz_queue_ctor(&my_qp->ipz_squeue, nr_sq_pages, EHCA_PAGESIZE, swqe_size, nr_send_sges); if (!ipz_rc) { ehca_err(ib_dev,"Cannot allocate page for squeue. ipz_rc=%x", ipz_rc); return -EBUSY; } ipz_rc = ipz_queue_ctor(&my_qp->ipz_rqueue, nr_rq_pages, EHCA_PAGESIZE, rwqe_size, nr_receive_sges); if (!ipz_rc) { ehca_err(ib_dev, "Cannot allocate page for rqueue. ipz_rc=%x", ipz_rc); ret = -EBUSY; goto init_qp_queues0; } /* register SQ pages */ for (cnt = 0; cnt < nr_sq_pages; cnt++) { vpage = ipz_qpageit_get_inc(&my_qp->ipz_squeue); if (!vpage) { ehca_err(ib_dev, "SQ ipz_qpageit_get_inc() " "failed p_vpage= %p", vpage); ret = -EINVAL; goto init_qp_queues1; } rpage = virt_to_abs(vpage); h_ret = hipz_h_register_rpage_qp(ipz_hca_handle, my_qp->ipz_qp_handle, &my_qp->pf, 0, 0, rpage, 1, my_qp->galpas.kernel); if (h_ret < H_SUCCESS) { ehca_err(ib_dev, "SQ hipz_qp_register_rpage()" " failed rc=%lx", h_ret); ret = ehca2ib_return_code(h_ret); goto init_qp_queues1; } } ipz_qeit_reset(&my_qp->ipz_squeue); /* register RQ pages */ for (cnt = 0; cnt < nr_rq_pages; cnt++) { vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue); if (!vpage) { ehca_err(ib_dev, "RQ ipz_qpageit_get_inc() " "failed p_vpage = %p", vpage); ret = -EINVAL; goto init_qp_queues1; } rpage = virt_to_abs(vpage); h_ret = hipz_h_register_rpage_qp(ipz_hca_handle, my_qp->ipz_qp_handle, &my_qp->pf, 0, 1, rpage, 1,my_qp->galpas.kernel); if (h_ret < H_SUCCESS) { ehca_err(ib_dev, "RQ hipz_qp_register_rpage() failed " "rc=%lx", h_ret); ret = ehca2ib_return_code(h_ret); goto init_qp_queues1; } if (cnt == (nr_rq_pages - 1)) { /* last page! */ if (h_ret != H_SUCCESS) { ehca_err(ib_dev, "RQ hipz_qp_register_rpage() " "h_ret= %lx ", h_ret); ret = ehca2ib_return_code(h_ret); goto init_qp_queues1; } vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue); if (vpage) { ehca_err(ib_dev, "ipz_qpageit_get_inc() " "should not succeed vpage=%p", vpage); ret = -EINVAL; goto init_qp_queues1; } } else { if (h_ret != H_PAGE_REGISTERED) { ehca_err(ib_dev, "RQ hipz_qp_register_rpage() " "h_ret= %lx ", h_ret); ret = ehca2ib_return_code(h_ret); goto init_qp_queues1; } } } ipz_qeit_reset(&my_qp->ipz_rqueue); return 0; init_qp_queues1: ipz_queue_dtor(&my_qp->ipz_rqueue); init_qp_queues0: ipz_queue_dtor(&my_qp->ipz_squeue); return ret; }
int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq, const enum ehca_eq_type type, const u32 length) { int ret; u64 h_ret; u32 nr_pages; u32 i; void *vpage; struct ib_device *ib_dev = &shca->ib_device; spin_lock_init(&eq->spinlock); spin_lock_init(&eq->irq_spinlock); eq->is_initialized = 0; if (type != EHCA_EQ && type != EHCA_NEQ) { ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq); return -EINVAL; } if (!length) { ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq); return -EINVAL; } h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle, &eq->pf, type, length, &eq->ipz_eq_handle, &eq->length, &nr_pages, &eq->ist); if (h_ret != H_SUCCESS) { ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq); return -EINVAL; } ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages, EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0, 0); if (!ret) { ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq); goto create_eq_exit1; } for (i = 0; i < nr_pages; i++) { u64 rpage; vpage = ipz_qpageit_get_inc(&eq->ipz_queue); if (!vpage) goto create_eq_exit2; rpage = virt_to_abs(vpage); h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle, eq->ipz_eq_handle, &eq->pf, 0, 0, rpage, 1); if (i == (nr_pages - 1)) { /* last page */ vpage = ipz_qpageit_get_inc(&eq->ipz_queue); if (h_ret != H_SUCCESS || vpage) goto create_eq_exit2; } else { if (h_ret != H_PAGE_REGISTERED) goto create_eq_exit2; } } ipz_qeit_reset(&eq->ipz_queue); /* register interrupt handlers and initialize work queues */ if (type == EHCA_EQ) { ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq, IRQF_DISABLED, "ehca_eq", (void *)shca); if (ret < 0) ehca_err(ib_dev, "Can't map interrupt handler."); tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); } else if (type == EHCA_NEQ) { ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq, IRQF_DISABLED, "ehca_neq", (void *)shca); if (ret < 0) ehca_err(ib_dev, "Can't map interrupt handler."); tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca); } eq->is_initialized = 1; return 0; create_eq_exit2: ipz_queue_dtor(NULL, &eq->ipz_queue); create_eq_exit1: hipz_h_destroy_eq(shca->ipz_hca_handle, eq); return -EINVAL; }