/* * This needs to be very careful to not leave IS_ERR pointers around for * cleanup to trip over. */ static int rds_iw_setup_qp(struct rds_connection *conn) { struct rds_iw_connection *ic = conn->c_transport_data; struct ib_device *dev = ic->i_cm_id->device; struct ib_qp_init_attr attr; struct rds_iw_device *rds_iwdev; int ret; /* rds_iw_add_one creates a rds_iw_device object per IB device, * and allocates a protection domain, memory range and MR pool * for each. If that fails for any reason, it will not register * the rds_iwdev at all. */ rds_iwdev = ib_get_client_data(dev, &rds_iw_client); if (!rds_iwdev) { if (printk_ratelimit()) printk(KERN_NOTICE "RDS/IW: No client_data for device %s\n", dev->name); return -EOPNOTSUPP; } /* Protection domain and memory range */ ic->i_pd = rds_iwdev->pd; ic->i_mr = rds_iwdev->mr; ret = rds_iw_init_qp_attrs(&attr, rds_iwdev, &ic->i_send_ring, rds_iw_send_cq_comp_handler, &ic->i_recv_ring, rds_iw_recv_cq_comp_handler, conn); if (ret < 0) goto out; ic->i_send_cq = attr.send_cq; ic->i_recv_cq = attr.recv_cq; /* * XXX this can fail if max_*_wr is too large? Are we supposed * to back off until we get a value that the hardware can support? */ ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr); if (ret) { rdsdebug("rdma_create_qp failed: %d\n", ret); goto out; } ic->i_send_hdrs = ib_dma_alloc_coherent(dev, ic->i_send_ring.w_nr * sizeof(struct rds_header), &ic->i_send_hdrs_dma, GFP_KERNEL); if (!ic->i_send_hdrs) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent send failed\n"); goto out; } ic->i_recv_hdrs = ib_dma_alloc_coherent(dev, ic->i_recv_ring.w_nr * sizeof(struct rds_header), &ic->i_recv_hdrs_dma, GFP_KERNEL); if (!ic->i_recv_hdrs) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent recv failed\n"); goto out; } ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), &ic->i_ack_dma, GFP_KERNEL); if (!ic->i_ack) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent ack failed\n"); goto out; } ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_iw_send_work)); if (!ic->i_sends) { ret = -ENOMEM; rdsdebug("send allocation failed\n"); goto out; } rds_iw_send_init_ring(ic); ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_iw_recv_work)); if (!ic->i_recvs) { ret = -ENOMEM; rdsdebug("recv allocation failed\n"); goto out; } rds_iw_recv_init_ring(ic); rds_iw_recv_init_ack(ic); /* Post receive buffers - as a side effect, this will update * the posted credit count. */ rds_iw_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1); rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr, ic->i_send_cq, ic->i_recv_cq); out: return ret; }
static int rds_iw_setup_qp(struct rds_connection *conn) { struct rds_iw_connection *ic = conn->c_transport_data; struct ib_device *dev = ic->i_cm_id->device; struct ib_qp_init_attr attr; struct rds_iw_device *rds_iwdev; int ret; /* */ rds_iwdev = ib_get_client_data(dev, &rds_iw_client); if (!rds_iwdev) { printk_ratelimited(KERN_NOTICE "RDS/IW: No client_data for device %s\n", dev->name); return -EOPNOTSUPP; } /* */ ic->i_pd = rds_iwdev->pd; ic->i_mr = rds_iwdev->mr; ret = rds_iw_init_qp_attrs(&attr, rds_iwdev, &ic->i_send_ring, rds_iw_send_cq_comp_handler, &ic->i_recv_ring, rds_iw_recv_cq_comp_handler, conn); if (ret < 0) goto out; ic->i_send_cq = attr.send_cq; ic->i_recv_cq = attr.recv_cq; /* */ ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr); if (ret) { rdsdebug("rdma_create_qp failed: %d\n", ret); goto out; } ic->i_send_hdrs = ib_dma_alloc_coherent(dev, ic->i_send_ring.w_nr * sizeof(struct rds_header), &ic->i_send_hdrs_dma, GFP_KERNEL); if (!ic->i_send_hdrs) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent send failed\n"); goto out; } ic->i_recv_hdrs = ib_dma_alloc_coherent(dev, ic->i_recv_ring.w_nr * sizeof(struct rds_header), &ic->i_recv_hdrs_dma, GFP_KERNEL); if (!ic->i_recv_hdrs) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent recv failed\n"); goto out; } ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), &ic->i_ack_dma, GFP_KERNEL); if (!ic->i_ack) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent ack failed\n"); goto out; } ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_iw_send_work)); if (!ic->i_sends) { ret = -ENOMEM; rdsdebug("send allocation failed\n"); goto out; } rds_iw_send_init_ring(ic); ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_iw_recv_work)); if (!ic->i_recvs) { ret = -ENOMEM; rdsdebug("recv allocation failed\n"); goto out; } rds_iw_recv_init_ring(ic); rds_iw_recv_init_ack(ic); /* */ rds_iw_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1); rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr, ic->i_send_cq, ic->i_recv_cq); out: return ret; }