static int sdp_get_max_dev_sge(struct ib_device *dev) { struct ib_device_attr attr; static int max_sges = -1; int rc; if (max_sges > 0) goto out; rc = ib_query_device(dev, &attr); if (rc) { sdp_warn(NULL, "ib_query_device failed: %d\n", rc); goto out; } max_sges = attr.max_sge; out: return max_sges; }
static void add_device(struct ib_device* dev) { // first check this is the mellanox device int retval = ib_query_port(dev, 1, &rdma_ib_device.attr); CHECK_MSG_NO_RET(retval == 0, "Error querying port"); if(rdma_ib_device.attr.active_mtu == 5) { rdma_ib_device.dev = dev; } else { return; } // get device attributes ib_query_device(dev, &rdma_ib_device.mlnx_device_attr); // register handler INIT_IB_EVENT_HANDLER(&rdma_ib_device.ieh, dev, async_event_handler); ib_register_event_handler(&rdma_ib_device.ieh); // We got the right device // library can be used now rdma_ib_device.ready_to_use = true; }
/* * Create unconnected endpoint. */ int rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, struct rpcrdma_create_data_internal *cdata) { struct ib_device_attr devattr; struct ib_cq *sendcq, *recvcq; int rc, err; rc = ib_query_device(ia->ri_id->device, &devattr); if (rc) { dprintk("RPC: %s: ib_query_device failed %d\n", __func__, rc); return rc; } /* check provider's send/recv wr limits */ if (cdata->max_requests > devattr.max_qp_wr) cdata->max_requests = devattr.max_qp_wr; ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; ep->rep_attr.qp_context = ep; /* send_cq and recv_cq initialized below */ ep->rep_attr.srq = NULL; ep->rep_attr.cap.max_send_wr = cdata->max_requests; switch (ia->ri_memreg_strategy) { case RPCRDMA_FRMR: { int depth = 7; /* Add room for frmr register and invalidate WRs. * 1. FRMR reg WR for head * 2. FRMR invalidate WR for head * 3. N FRMR reg WRs for pagelist * 4. N FRMR invalidate WRs for pagelist * 5. FRMR reg WR for tail * 6. FRMR invalidate WR for tail * 7. The RDMA_SEND WR */ /* Calculate N if the device max FRMR depth is smaller than * RPCRDMA_MAX_DATA_SEGS. */ if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) { int delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth; do { depth += 2; /* FRMR reg + invalidate */ delta -= ia->ri_max_frmr_depth; } while (delta > 0); } ep->rep_attr.cap.max_send_wr *= depth; if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) { cdata->max_requests = devattr.max_qp_wr / depth; if (!cdata->max_requests) return -EINVAL; ep->rep_attr.cap.max_send_wr = cdata->max_requests * depth; } break; } default: break; } ep->rep_attr.cap.max_recv_wr = cdata->max_requests; ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2); ep->rep_attr.cap.max_recv_sge = 1; ep->rep_attr.cap.max_inline_data = 0; ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; ep->rep_attr.qp_type = IB_QPT_RC; ep->rep_attr.port_num = ~0; dprintk("RPC: %s: requested max: dtos: send %d recv %d; " "iovs: send %d recv %d\n", __func__, ep->rep_attr.cap.max_send_wr, ep->rep_attr.cap.max_recv_wr, ep->rep_attr.cap.max_send_sge, ep->rep_attr.cap.max_recv_sge); /* set trigger for requesting send completion */ ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1; if (ep->rep_cqinit <= 2) ep->rep_cqinit = 0; INIT_CQCOUNT(ep); ep->rep_ia = ia; init_waitqueue_head(&ep->rep_connect_wait); INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall, rpcrdma_cq_async_error_upcall, ep, ep->rep_attr.cap.max_send_wr + 1, 0); if (IS_ERR(sendcq)) { rc = PTR_ERR(sendcq); dprintk("RPC: %s: failed to create send CQ: %i\n", __func__, rc); goto out1; } rc = ib_req_notify_cq(sendcq, IB_CQ_NEXT_COMP); if (rc) { dprintk("RPC: %s: ib_req_notify_cq failed: %i\n", __func__, rc); goto out2; } recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall, rpcrdma_cq_async_error_upcall, ep, ep->rep_attr.cap.max_recv_wr + 1, 0); if (IS_ERR(recvcq)) { rc = PTR_ERR(recvcq); dprintk("RPC: %s: failed to create recv CQ: %i\n", __func__, rc); goto out2; } rc = ib_req_notify_cq(recvcq, IB_CQ_NEXT_COMP); if (rc) { dprintk("RPC: %s: ib_req_notify_cq failed: %i\n", __func__, rc); ib_destroy_cq(recvcq); goto out2; } ep->rep_attr.send_cq = sendcq; ep->rep_attr.recv_cq = recvcq; /* Initialize cma parameters */ /* RPC/RDMA does not use private data */ ep->rep_remote_cma.private_data = NULL; ep->rep_remote_cma.private_data_len = 0; /* Client offers RDMA Read but does not initiate */ ep->rep_remote_cma.initiator_depth = 0; if (devattr.max_qp_rd_atom > 32) /* arbitrary but <= 255 */ ep->rep_remote_cma.responder_resources = 32; else ep->rep_remote_cma.responder_resources = devattr.max_qp_rd_atom; ep->rep_remote_cma.retry_count = 7; ep->rep_remote_cma.flow_control = 0; ep->rep_remote_cma.rnr_retry_count = 0; return 0; out2: err = ib_destroy_cq(sendcq); if (err) dprintk("RPC: %s: ib_destroy_cq returned %i\n", __func__, err); out1: return rc; }
/* * Open and initialize an Interface Adapter. * o initializes fields of struct rpcrdma_ia, including * interface and provider attributes and protection zone. */ int rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) { int rc, mem_priv; struct ib_device_attr devattr; struct rpcrdma_ia *ia = &xprt->rx_ia; ia->ri_id = rpcrdma_create_id(xprt, ia, addr); if (IS_ERR(ia->ri_id)) { rc = PTR_ERR(ia->ri_id); goto out1; } ia->ri_pd = ib_alloc_pd(ia->ri_id->device); if (IS_ERR(ia->ri_pd)) { rc = PTR_ERR(ia->ri_pd); dprintk("RPC: %s: ib_alloc_pd() failed %i\n", __func__, rc); goto out2; } /* * Query the device to determine if the requested memory * registration strategy is supported. If it isn't, set the * strategy to a globally supported model. */ rc = ib_query_device(ia->ri_id->device, &devattr); if (rc) { dprintk("RPC: %s: ib_query_device failed %d\n", __func__, rc); goto out2; } if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { ia->ri_have_dma_lkey = 1; ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey; } if (memreg == RPCRDMA_FRMR) { /* Requires both frmr reg and local dma lkey */ if ((devattr.device_cap_flags & (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) != (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) { dprintk("RPC: %s: FRMR registration " "not supported by HCA\n", __func__); memreg = RPCRDMA_MTHCAFMR; } else { /* Mind the ia limit on FRMR page list depth */ ia->ri_max_frmr_depth = min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, devattr.max_fast_reg_page_list_len); } } if (memreg == RPCRDMA_MTHCAFMR) { if (!ia->ri_id->device->alloc_fmr) { dprintk("RPC: %s: MTHCAFMR registration " "not supported by HCA\n", __func__); #if RPCRDMA_PERSISTENT_REGISTRATION memreg = RPCRDMA_ALLPHYSICAL; #else rc = -ENOMEM; goto out2; #endif } } /* * Optionally obtain an underlying physical identity mapping in * order to do a memory window-based bind. This base registration * is protected from remote access - that is enabled only by binding * for the specific bytes targeted during each RPC operation, and * revoked after the corresponding completion similar to a storage * adapter. */ switch (memreg) { case RPCRDMA_FRMR: break; #if RPCRDMA_PERSISTENT_REGISTRATION case RPCRDMA_ALLPHYSICAL: mem_priv = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ; goto register_setup; #endif case RPCRDMA_MTHCAFMR: if (ia->ri_have_dma_lkey) break; mem_priv = IB_ACCESS_LOCAL_WRITE; #if RPCRDMA_PERSISTENT_REGISTRATION register_setup: #endif ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv); if (IS_ERR(ia->ri_bind_mem)) { printk(KERN_ALERT "%s: ib_get_dma_mr for " "phys register failed with %lX\n", __func__, PTR_ERR(ia->ri_bind_mem)); rc = -ENOMEM; goto out2; } break; default: printk(KERN_ERR "RPC: Unsupported memory " "registration mode: %d\n", memreg); rc = -ENOMEM; goto out2; } dprintk("RPC: %s: memory registration strategy is %d\n", __func__, memreg); /* Else will do memory reg/dereg for each chunk */ ia->ri_memreg_strategy = memreg; return 0; out2: rdma_destroy_id(ia->ri_id); ia->ri_id = NULL; out1: return rc; }
static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma) { struct sockaddr_in cl = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), }; int port, err = -EINVAL; for (port = P9_DEF_MAX_RESVPORT; port >= P9_DEF_MIN_RESVPORT; port--) { cl.sin_port = htons((ushort)port); err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl); if (err != -EADDRINUSE) break; } return err; } /** * trans_create_rdma - Transport method for creating atransport instance * @client: client instance * @addr: IP address string * @args: Mount options string */ static int rdma_create_trans(struct p9_client *client, const char *addr, char *args) { int err; struct p9_rdma_opts opts; struct p9_trans_rdma *rdma; struct rdma_conn_param conn_param; struct ib_qp_init_attr qp_attr; struct ib_device_attr devattr; struct ib_cq_init_attr cq_attr = {}; /* Parse the transport specific mount options */ err = parse_opts(args, &opts); if (err < 0) return err; /* Create and initialize the RDMA transport structure */ rdma = alloc_rdma(&opts); if (!rdma) return -ENOMEM; /* Create the RDMA CM ID */ rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(rdma->cm_id)) goto error; /* Associate the client with the transport */ client->trans = rdma; /* Bind to a privileged port if we need to */ if (opts.privport) { err = p9_rdma_bind_privport(rdma); if (err < 0) { pr_err("%s (%d): problem binding to privport: %d\n", __func__, task_pid_nr(current), -err); goto error; } } /* Resolve the server's address */ rdma->addr.sin_family = AF_INET; rdma->addr.sin_addr.s_addr = in_aton(addr); rdma->addr.sin_port = htons(opts.port); err = rdma_resolve_addr(rdma->cm_id, NULL, (struct sockaddr *)&rdma->addr, rdma->timeout); if (err) goto error; err = wait_for_completion_interruptible(&rdma->cm_done); if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED)) goto error; /* Resolve the route to the server */ err = rdma_resolve_route(rdma->cm_id, rdma->timeout); if (err) goto error; err = wait_for_completion_interruptible(&rdma->cm_done); if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED)) goto error; /* Query the device attributes */ err = ib_query_device(rdma->cm_id->device, &devattr); if (err) goto error; /* Create the Completion Queue */ cq_attr.cqe = opts.sq_depth + opts.rq_depth + 1; rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler, cq_event_handler, client, &cq_attr); if (IS_ERR(rdma->cq)) goto error; ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP); /* Create the Protection Domain */ rdma->pd = ib_alloc_pd(rdma->cm_id->device); if (IS_ERR(rdma->pd)) goto error; /* Cache the DMA lkey in the transport */ rdma->dma_mr = NULL; if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) rdma->lkey = rdma->cm_id->device->local_dma_lkey; else { rdma->dma_mr = ib_get_dma_mr(rdma->pd, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(rdma->dma_mr)) goto error; rdma->lkey = rdma->dma_mr->lkey; } /* Create the Queue Pair */ memset(&qp_attr, 0, sizeof qp_attr); qp_attr.event_handler = qp_event_handler; qp_attr.qp_context = client; qp_attr.cap.max_send_wr = opts.sq_depth; qp_attr.cap.max_recv_wr = opts.rq_depth; qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE; qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; qp_attr.send_cq = rdma->cq; qp_attr.recv_cq = rdma->cq; err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr); if (err) goto error; rdma->qp = rdma->cm_id->qp; /* Request a connection */ memset(&conn_param, 0, sizeof(conn_param)); conn_param.private_data = NULL; conn_param.private_data_len = 0; conn_param.responder_resources = P9_RDMA_IRD; conn_param.initiator_depth = P9_RDMA_ORD; err = rdma_connect(rdma->cm_id, &conn_param); if (err) goto error; err = wait_for_completion_interruptible(&rdma->cm_done); if (err || (rdma->state != P9_RDMA_CONNECTED)) goto error; client->status = Connected; return 0; error: rdma_destroy_trans(rdma); return -ENOTCONN; }
/** * trans_create_rdma - Transport method for creating atransport instance * @client: client instance * @addr: IP address string * @args: Mount options string */ static int rdma_create_trans(struct p9_client *client, const char *addr, char *args) { int err; struct p9_rdma_opts opts; struct p9_trans_rdma *rdma; struct rdma_conn_param conn_param; struct ib_qp_init_attr qp_attr; struct ib_device_attr devattr; /* Parse the transport specific mount options */ err = parse_opts(args, &opts); if (err < 0) return err; /* Create and initialize the RDMA transport structure */ rdma = alloc_rdma(&opts); if (!rdma) return -ENOMEM; /* Create the RDMA CM ID */ rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP); if (IS_ERR(rdma->cm_id)) goto error; /* Associate the client with the transport */ client->trans = rdma; /* Resolve the server's address */ rdma->addr.sin_family = AF_INET; rdma->addr.sin_addr.s_addr = in_aton(addr); rdma->addr.sin_port = htons(opts.port); err = rdma_resolve_addr(rdma->cm_id, NULL, (struct sockaddr *)&rdma->addr, rdma->timeout); if (err) goto error; err = wait_for_completion_interruptible(&rdma->cm_done); if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED)) goto error; /* Resolve the route to the server */ err = rdma_resolve_route(rdma->cm_id, rdma->timeout); if (err) goto error; err = wait_for_completion_interruptible(&rdma->cm_done); if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED)) goto error; /* Query the device attributes */ err = ib_query_device(rdma->cm_id->device, &devattr); if (err) goto error; /* Create the Completion Queue */ rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler, cq_event_handler, client, opts.sq_depth + opts.rq_depth + 1, 0); if (IS_ERR(rdma->cq)) goto error; ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP); /* Create the Protection Domain */ rdma->pd = ib_alloc_pd(rdma->cm_id->device); if (IS_ERR(rdma->pd)) goto error; /* Cache the DMA lkey in the transport */ rdma->dma_mr = NULL; if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) rdma->lkey = rdma->cm_id->device->local_dma_lkey; else { rdma->dma_mr = ib_get_dma_mr(rdma->pd, IB_ACCESS_LOCAL_WRITE); if (IS_ERR(rdma->dma_mr)) goto error; rdma->lkey = rdma->dma_mr->lkey; } /* Create the Queue Pair */ memset(&qp_attr, 0, sizeof qp_attr); qp_attr.event_handler = qp_event_handler; qp_attr.qp_context = client; qp_attr.cap.max_send_wr = opts.sq_depth; qp_attr.cap.max_recv_wr = opts.rq_depth; qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE; qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; qp_attr.send_cq = rdma->cq; qp_attr.recv_cq = rdma->cq; err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr); if (err) goto error; rdma->qp = rdma->cm_id->qp; /* Request a connection */ memset(&conn_param, 0, sizeof(conn_param)); conn_param.private_data = NULL; conn_param.private_data_len = 0; conn_param.responder_resources = P9_RDMA_IRD; conn_param.initiator_depth = P9_RDMA_ORD; err = rdma_connect(rdma->cm_id, &conn_param); if (err) goto error; err = wait_for_completion_interruptible(&rdma->cm_done); if (err || (rdma->state != P9_RDMA_CONNECTED)) goto error; client->status = Connected; return 0; error: rdma_destroy_trans(rdma); return -ENOTCONN; }
/* A vanilla 2.6.19 or older kernel without backported OFED kernel headers. */ static void isert_cq_comp_work_cb(void *ctx) { struct isert_cq *cq_desc = ctx; #else static void isert_cq_comp_work_cb(struct work_struct *work) { struct isert_cq *cq_desc = container_of(work, struct isert_cq, cq_comp_work); #endif int ret; TRACE_ENTRY(); ret = isert_poll_cq(cq_desc); if (unlikely(ret < 0)) { /* poll error */ pr_err("ib_poll_cq failed\n"); goto out; } ib_req_notify_cq(cq_desc->cq, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); /* * not all HCAs support IB_CQ_REPORT_MISSED_EVENTS, * so we need to make sure we don't miss any events between * last call to ib_poll_cq() and ib_req_notify_cq() */ isert_poll_cq(cq_desc); out: TRACE_EXIT(); return; } static void isert_cq_comp_handler(struct ib_cq *cq, void *context) { struct isert_cq *cq_desc = context; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) queue_work(cq_desc->cq_workqueue, &cq_desc->cq_comp_work); #else queue_work_on(smp_processor_id(), cq_desc->cq_workqueue, &cq_desc->cq_comp_work); #endif } static const char *ib_event_type_str(enum ib_event_type ev_type) { switch (ev_type) { case IB_EVENT_COMM_EST: return "COMM_EST"; case IB_EVENT_QP_FATAL: return "QP_FATAL"; case IB_EVENT_QP_REQ_ERR: return "QP_REQ_ERR"; case IB_EVENT_QP_ACCESS_ERR: return "QP_ACCESS_ERR"; case IB_EVENT_SQ_DRAINED: return "SQ_DRAINED"; case IB_EVENT_PATH_MIG: return "PATH_MIG"; case IB_EVENT_PATH_MIG_ERR: return "PATH_MIG_ERR"; case IB_EVENT_QP_LAST_WQE_REACHED: return "QP_LAST_WQE_REACHED"; case IB_EVENT_CQ_ERR: return "CQ_ERR"; case IB_EVENT_SRQ_ERR: return "SRQ_ERR"; case IB_EVENT_SRQ_LIMIT_REACHED: return "SRQ_LIMIT_REACHED"; case IB_EVENT_PORT_ACTIVE: return "PORT_ACTIVE"; case IB_EVENT_PORT_ERR: return "PORT_ERR"; case IB_EVENT_LID_CHANGE: return "LID_CHANGE"; case IB_EVENT_PKEY_CHANGE: return "PKEY_CHANGE"; case IB_EVENT_SM_CHANGE: return "SM_CHANGE"; case IB_EVENT_CLIENT_REREGISTER: return "CLIENT_REREGISTER"; case IB_EVENT_DEVICE_FATAL: return "DEVICE_FATAL"; default: return "UNKNOWN"; } } static void isert_async_evt_handler(struct ib_event *async_ev, void *context) { struct isert_cq *cq = context; struct isert_device *isert_dev = cq->dev; struct ib_device *ib_dev = isert_dev->ib_dev; char *dev_name = ib_dev->name; enum ib_event_type ev_type = async_ev->event; struct isert_connection *isert_conn; TRACE_ENTRY(); switch (ev_type) { case IB_EVENT_COMM_EST: isert_conn = async_ev->element.qp->qp_context; pr_info("conn:0x%p cm_id:0x%p dev:%s, QP evt: %s\n", isert_conn, isert_conn->cm_id, dev_name, ib_event_type_str(IB_EVENT_COMM_EST)); /* force "connection established" event */ rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); break; /* rest of QP-related events */ case IB_EVENT_QP_FATAL: case IB_EVENT_QP_REQ_ERR: case IB_EVENT_QP_ACCESS_ERR: case IB_EVENT_SQ_DRAINED: case IB_EVENT_PATH_MIG: case IB_EVENT_PATH_MIG_ERR: case IB_EVENT_QP_LAST_WQE_REACHED: isert_conn = async_ev->element.qp->qp_context; pr_err("conn:0x%p cm_id:0x%p dev:%s, QP evt: %s\n", isert_conn, isert_conn->cm_id, dev_name, ib_event_type_str(ev_type)); break; /* CQ-related events */ case IB_EVENT_CQ_ERR: pr_err("dev:%s CQ evt: %s\n", dev_name, ib_event_type_str(ev_type)); break; /* SRQ events */ case IB_EVENT_SRQ_ERR: case IB_EVENT_SRQ_LIMIT_REACHED: pr_err("dev:%s SRQ evt: %s\n", dev_name, ib_event_type_str(ev_type)); break; /* Port events */ case IB_EVENT_PORT_ACTIVE: case IB_EVENT_PORT_ERR: case IB_EVENT_LID_CHANGE: case IB_EVENT_PKEY_CHANGE: case IB_EVENT_SM_CHANGE: case IB_EVENT_CLIENT_REREGISTER: pr_err("dev:%s port:%d evt: %s\n", dev_name, async_ev->element.port_num, ib_event_type_str(ev_type)); break; /* HCA events */ case IB_EVENT_DEVICE_FATAL: pr_err("dev:%s HCA evt: %s\n", dev_name, ib_event_type_str(ev_type)); break; default: pr_err("dev:%s evt: %s\n", dev_name, ib_event_type_str(ev_type)); break; } TRACE_EXIT(); } static struct isert_device *isert_device_create(struct ib_device *ib_dev) { struct isert_device *isert_dev; struct ib_device_attr *dev_attr; int cqe_num, err; struct ib_pd *pd; struct ib_mr *mr; struct ib_cq *cq; char wq_name[64]; int i, j; TRACE_ENTRY(); isert_dev = kzalloc(sizeof(*isert_dev), GFP_KERNEL); if (unlikely(isert_dev == NULL)) { pr_err("Failed to allocate iser dev\n"); err = -ENOMEM; goto out; } dev_attr = &isert_dev->device_attr; err = ib_query_device(ib_dev, dev_attr); if (unlikely(err)) { pr_err("Failed to query device, err: %d\n", err); goto fail_query; } isert_dev->num_cqs = min_t(int, num_online_cpus(), ib_dev->num_comp_vectors); isert_dev->cq_qps = kzalloc(sizeof(*isert_dev->cq_qps) * isert_dev->num_cqs, GFP_KERNEL); if (unlikely(isert_dev->cq_qps == NULL)) { pr_err("Failed to allocate iser cq_qps\n"); err = -ENOMEM; goto fail_cq_qps; } isert_dev->cq_desc = vmalloc(sizeof(*isert_dev->cq_desc) * isert_dev->num_cqs); if (unlikely(isert_dev->cq_desc == NULL)) { pr_err("Failed to allocate %ld bytes for iser cq_desc\n", sizeof(*isert_dev->cq_desc) * isert_dev->num_cqs); err = -ENOMEM; goto fail_alloc_cq_desc; } pd = ib_alloc_pd(ib_dev); if (unlikely(IS_ERR(pd))) { err = PTR_ERR(pd); pr_err("Failed to alloc iser dev pd, err:%d\n", err); goto fail_pd; } mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE); if (unlikely(IS_ERR(mr))) { err = PTR_ERR(mr); pr_err("Failed to get dma mr, err: %d\n", err); goto fail_mr; } cqe_num = min(isert_dev->device_attr.max_cqe, ISER_CQ_ENTRIES); cqe_num = cqe_num / isert_dev->num_cqs; #ifdef CONFIG_SCST_EXTRACHECKS if (isert_dev->device_attr.max_cqe == 0) pr_err("Zero max_cqe encountered: you may have a compilation problem\n"); #endif for (i = 0; i < isert_dev->num_cqs; ++i) { struct isert_cq *cq_desc = &isert_dev->cq_desc[i]; cq_desc->dev = isert_dev; cq_desc->idx = i; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) INIT_WORK(&cq_desc->cq_comp_work, isert_cq_comp_work_cb, NULL); #else INIT_WORK(&cq_desc->cq_comp_work, isert_cq_comp_work_cb); #endif snprintf(wq_name, sizeof(wq_name), "isert_cq_%p", cq_desc); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) cq_desc->cq_workqueue = create_singlethread_workqueue(wq_name); #else #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 36) cq_desc->cq_workqueue = alloc_workqueue(wq_name, WQ_CPU_INTENSIVE| WQ_RESCUER, 1); #else cq_desc->cq_workqueue = alloc_workqueue(wq_name, WQ_CPU_INTENSIVE| WQ_MEM_RECLAIM, 1); #endif #endif if (unlikely(!cq_desc->cq_workqueue)) { pr_err("Failed to alloc iser cq work queue for dev:%s\n", ib_dev->name); err = -ENOMEM; goto fail_cq; } #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) cq = ib_create_cq(ib_dev, isert_cq_comp_handler, isert_async_evt_handler, cq_desc, /* context */ cqe_num, i); /* completion vector */ #else { struct ib_cq_init_attr ia = { .cqe = cqe_num, .comp_vector = i, }; cq = ib_create_cq(ib_dev, isert_cq_comp_handler, isert_async_evt_handler, cq_desc, /* context */ &ia); } #endif if (unlikely(IS_ERR(cq))) { cq_desc->cq = NULL; err = PTR_ERR(cq); pr_err("Failed to create iser dev cq, err:%d\n", err); goto fail_cq; } cq_desc->cq = cq; err = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); if (unlikely(err)) { pr_err("Failed to request notify cq, err: %d\n", err); goto fail_cq; } } isert_dev->ib_dev = ib_dev; isert_dev->pd = pd; isert_dev->mr = mr; INIT_LIST_HEAD(&isert_dev->conn_list); lockdep_assert_held(&dev_list_mutex); isert_dev_list_add(isert_dev); pr_info("iser created device:%p\n", isert_dev); return isert_dev; fail_cq: for (j = 0; j <= i; ++j) { if (isert_dev->cq_desc[j].cq) ib_destroy_cq(isert_dev->cq_desc[j].cq); if (isert_dev->cq_desc[j].cq_workqueue) destroy_workqueue(isert_dev->cq_desc[j].cq_workqueue); } ib_dereg_mr(mr); fail_mr: ib_dealloc_pd(pd); fail_pd: vfree(isert_dev->cq_desc); fail_alloc_cq_desc: kfree(isert_dev->cq_qps); fail_cq_qps: fail_query: kfree(isert_dev); out: TRACE_EXIT_RES(err); return ERR_PTR(err); } static void isert_device_release(struct isert_device *isert_dev) { int err, i; TRACE_ENTRY(); lockdep_assert_held(&dev_list_mutex); isert_dev_list_remove(isert_dev); /* remove from global list */ for (i = 0; i < isert_dev->num_cqs; ++i) { struct isert_cq *cq_desc = &isert_dev->cq_desc[i]; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) /* * cancel_work_sync() was introduced in 2.6.22. We can * only wait until all scheduled work is done. */ flush_workqueue(cq_desc->cq_workqueue); #else cancel_work_sync(&cq_desc->cq_comp_work); #endif err = ib_destroy_cq(cq_desc->cq); if (unlikely(err)) pr_err("Failed to destroy cq, err:%d\n", err); destroy_workqueue(cq_desc->cq_workqueue); } err = ib_dereg_mr(isert_dev->mr); if (unlikely(err)) pr_err("Failed to destroy mr, err:%d\n", err); err = ib_dealloc_pd(isert_dev->pd); if (unlikely(err)) pr_err("Failed to destroy pd, err:%d\n", err); vfree(isert_dev->cq_desc); isert_dev->cq_desc = NULL; kfree(isert_dev->cq_qps); isert_dev->cq_qps = NULL; kfree(isert_dev); TRACE_EXIT(); }
/* * Create unconnected endpoint. */ int rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, struct rpcrdma_create_data_internal *cdata) { struct ib_device_attr devattr; int rc, err; rc = ib_query_device(ia->ri_id->device, &devattr); if (rc) { dprintk("RPC: %s: ib_query_device failed %d\n", __func__, rc); return rc; } /* check provider's send/recv wr limits */ if (cdata->max_requests > devattr.max_qp_wr) cdata->max_requests = devattr.max_qp_wr; ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; ep->rep_attr.qp_context = ep; /* send_cq and recv_cq initialized below */ ep->rep_attr.srq = NULL; ep->rep_attr.cap.max_send_wr = cdata->max_requests; switch (ia->ri_memreg_strategy) { case RPCRDMA_FRMR: /* Add room for frmr register and invalidate WRs */ ep->rep_attr.cap.max_send_wr *= 3; if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) return -EINVAL; break; case RPCRDMA_MEMWINDOWS_ASYNC: case RPCRDMA_MEMWINDOWS: /* Add room for mw_binds+unbinds - overkill! */ ep->rep_attr.cap.max_send_wr++; ep->rep_attr.cap.max_send_wr *= (2 * RPCRDMA_MAX_SEGS); if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) return -EINVAL; break; default: break; } ep->rep_attr.cap.max_recv_wr = cdata->max_requests; ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2); ep->rep_attr.cap.max_recv_sge = 1; ep->rep_attr.cap.max_inline_data = 0; ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; ep->rep_attr.qp_type = IB_QPT_RC; ep->rep_attr.port_num = ~0; dprintk("RPC: %s: requested max: dtos: send %d recv %d; " "iovs: send %d recv %d\n", __func__, ep->rep_attr.cap.max_send_wr, ep->rep_attr.cap.max_recv_wr, ep->rep_attr.cap.max_send_sge, ep->rep_attr.cap.max_recv_sge); /* set trigger for requesting send completion */ ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 /* - 1*/; switch (ia->ri_memreg_strategy) { case RPCRDMA_MEMWINDOWS_ASYNC: case RPCRDMA_MEMWINDOWS: ep->rep_cqinit -= RPCRDMA_MAX_SEGS; break; default: break; } if (ep->rep_cqinit <= 2) ep->rep_cqinit = 0; INIT_CQCOUNT(ep); ep->rep_ia = ia; init_waitqueue_head(&ep->rep_connect_wait); /* * Create a single cq for receive dto and mw_bind (only ever * care about unbind, really). Send completions are suppressed. * Use single threaded tasklet upcalls to maintain ordering. */ ep->rep_cq = ib_create_cq(ia->ri_id->device, rpcrdma_cq_event_upcall, rpcrdma_cq_async_error_upcall, NULL, ep->rep_attr.cap.max_recv_wr + ep->rep_attr.cap.max_send_wr + 1, 0); if (IS_ERR(ep->rep_cq)) { rc = PTR_ERR(ep->rep_cq); dprintk("RPC: %s: ib_create_cq failed: %i\n", __func__, rc); goto out1; } rc = ib_req_notify_cq(ep->rep_cq, IB_CQ_NEXT_COMP); if (rc) { dprintk("RPC: %s: ib_req_notify_cq failed: %i\n", __func__, rc); goto out2; } ep->rep_attr.send_cq = ep->rep_cq; ep->rep_attr.recv_cq = ep->rep_cq; /* Initialize cma parameters */ /* RPC/RDMA does not use private data */ ep->rep_remote_cma.private_data = NULL; ep->rep_remote_cma.private_data_len = 0; /* Client offers RDMA Read but does not initiate */ ep->rep_remote_cma.initiator_depth = 0; if (ia->ri_memreg_strategy == RPCRDMA_BOUNCEBUFFERS) ep->rep_remote_cma.responder_resources = 0; else if (devattr.max_qp_rd_atom > 32) /* arbitrary but <= 255 */ ep->rep_remote_cma.responder_resources = 32; else ep->rep_remote_cma.responder_resources = devattr.max_qp_rd_atom; ep->rep_remote_cma.retry_count = 7; ep->rep_remote_cma.flow_control = 0; ep->rep_remote_cma.rnr_retry_count = 0; return 0; out2: err = ib_destroy_cq(ep->rep_cq); if (err) dprintk("RPC: %s: ib_destroy_cq returned %i\n", __func__, err); out1: return rc; }
/* * Open and initialize an Interface Adapter. * o initializes fields of struct rpcrdma_ia, including * interface and provider attributes and protection zone. */ int rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) { int rc, mem_priv; struct ib_device_attr devattr; struct rpcrdma_ia *ia = &xprt->rx_ia; ia->ri_id = rpcrdma_create_id(xprt, ia, addr); if (IS_ERR(ia->ri_id)) { rc = PTR_ERR(ia->ri_id); goto out1; } ia->ri_pd = ib_alloc_pd(ia->ri_id->device); if (IS_ERR(ia->ri_pd)) { rc = PTR_ERR(ia->ri_pd); dprintk("RPC: %s: ib_alloc_pd() failed %i\n", __func__, rc); goto out2; } /* * Query the device to determine if the requested memory * registration strategy is supported. If it isn't, set the * strategy to a globally supported model. */ rc = ib_query_device(ia->ri_id->device, &devattr); if (rc) { dprintk("RPC: %s: ib_query_device failed %d\n", __func__, rc); goto out2; } if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { ia->ri_have_dma_lkey = 1; ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey; } switch (memreg) { case RPCRDMA_MEMWINDOWS: case RPCRDMA_MEMWINDOWS_ASYNC: if (!(devattr.device_cap_flags & IB_DEVICE_MEM_WINDOW)) { dprintk("RPC: %s: MEMWINDOWS registration " "specified but not supported by adapter, " "using slower RPCRDMA_REGISTER\n", __func__); memreg = RPCRDMA_REGISTER; } break; case RPCRDMA_MTHCAFMR: if (!ia->ri_id->device->alloc_fmr) { #if RPCRDMA_PERSISTENT_REGISTRATION dprintk("RPC: %s: MTHCAFMR registration " "specified but not supported by adapter, " "using riskier RPCRDMA_ALLPHYSICAL\n", __func__); memreg = RPCRDMA_ALLPHYSICAL; #else dprintk("RPC: %s: MTHCAFMR registration " "specified but not supported by adapter, " "using slower RPCRDMA_REGISTER\n", __func__); memreg = RPCRDMA_REGISTER; #endif } break; case RPCRDMA_FRMR: /* Requires both frmr reg and local dma lkey */ if ((devattr.device_cap_flags & (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) != (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) { #if RPCRDMA_PERSISTENT_REGISTRATION dprintk("RPC: %s: FRMR registration " "specified but not supported by adapter, " "using riskier RPCRDMA_ALLPHYSICAL\n", __func__); memreg = RPCRDMA_ALLPHYSICAL; #else dprintk("RPC: %s: FRMR registration " "specified but not supported by adapter, " "using slower RPCRDMA_REGISTER\n", __func__); memreg = RPCRDMA_REGISTER; #endif } break; } /* * Optionally obtain an underlying physical identity mapping in * order to do a memory window-based bind. This base registration * is protected from remote access - that is enabled only by binding * for the specific bytes targeted during each RPC operation, and * revoked after the corresponding completion similar to a storage * adapter. */ switch (memreg) { case RPCRDMA_BOUNCEBUFFERS: case RPCRDMA_REGISTER: case RPCRDMA_FRMR: break; #if RPCRDMA_PERSISTENT_REGISTRATION case RPCRDMA_ALLPHYSICAL: mem_priv = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ; goto register_setup; #endif case RPCRDMA_MEMWINDOWS_ASYNC: case RPCRDMA_MEMWINDOWS: mem_priv = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; goto register_setup; case RPCRDMA_MTHCAFMR: if (ia->ri_have_dma_lkey) break; mem_priv = IB_ACCESS_LOCAL_WRITE; register_setup: ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv); if (IS_ERR(ia->ri_bind_mem)) { printk(KERN_ALERT "%s: ib_get_dma_mr for " "phys register failed with %lX\n\t" "Will continue with degraded performance\n", __func__, PTR_ERR(ia->ri_bind_mem)); memreg = RPCRDMA_REGISTER; ia->ri_bind_mem = NULL; } break; default: printk(KERN_ERR "%s: invalid memory registration mode %d\n", __func__, memreg); rc = -EINVAL; goto out2; } dprintk("RPC: %s: memory registration strategy is %d\n", __func__, memreg); /* Else will do memory reg/dereg for each chunk */ ia->ri_memreg_strategy = memreg; return 0; out2: rdma_destroy_id(ia->ri_id); ia->ri_id = NULL; out1: return rc; }
ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, const char __user *buf, int in_len, int out_len) { struct ib_uverbs_query_device cmd; struct ib_uverbs_query_device_resp resp; struct ib_device_attr attr; int ret; if (out_len < sizeof resp) return -ENOSPC; if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; ret = ib_query_device(file->device->ib_dev, &attr); if (ret) return ret; memset(&resp, 0, sizeof resp); resp.fw_ver = attr.fw_ver; resp.node_guid = file->device->ib_dev->node_guid; resp.sys_image_guid = attr.sys_image_guid; resp.max_mr_size = attr.max_mr_size; resp.page_size_cap = attr.page_size_cap; resp.vendor_id = attr.vendor_id; resp.vendor_part_id = attr.vendor_part_id; resp.hw_ver = attr.hw_ver; resp.max_qp = attr.max_qp; resp.max_qp_wr = attr.max_qp_wr; resp.device_cap_flags = attr.device_cap_flags; resp.max_sge = attr.max_sge; resp.max_sge_rd = attr.max_sge_rd; resp.max_cq = attr.max_cq; resp.max_cqe = attr.max_cqe; resp.max_mr = attr.max_mr; resp.max_pd = attr.max_pd; resp.max_qp_rd_atom = attr.max_qp_rd_atom; resp.max_ee_rd_atom = attr.max_ee_rd_atom; resp.max_res_rd_atom = attr.max_res_rd_atom; resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; resp.atomic_cap = attr.atomic_cap; resp.max_ee = attr.max_ee; resp.max_rdd = attr.max_rdd; resp.max_mw = attr.max_mw; resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; resp.max_mcast_grp = attr.max_mcast_grp; resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; resp.max_ah = attr.max_ah; resp.max_fmr = attr.max_fmr; resp.max_map_per_fmr = attr.max_map_per_fmr; resp.max_srq = attr.max_srq; resp.max_srq_wr = attr.max_srq_wr; resp.max_srq_sge = attr.max_srq_sge; resp.max_pkeys = attr.max_pkeys; resp.local_ca_ack_delay = attr.local_ca_ack_delay; resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp)) return -EFAULT; return in_len; }
/* * Open and initialize an Interface Adapter. * o initializes fields of struct rpcrdma_ia, including * interface and provider attributes and protection zone. */ int rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) { struct rpcrdma_ia *ia = &xprt->rx_ia; struct ib_device_attr *devattr = &ia->ri_devattr; int rc; ia->ri_dma_mr = NULL; ia->ri_id = rpcrdma_create_id(xprt, ia, addr); if (IS_ERR(ia->ri_id)) { rc = PTR_ERR(ia->ri_id); goto out1; } ia->ri_device = ia->ri_id->device; ia->ri_pd = ib_alloc_pd(ia->ri_device); if (IS_ERR(ia->ri_pd)) { rc = PTR_ERR(ia->ri_pd); dprintk("RPC: %s: ib_alloc_pd() failed %i\n", __func__, rc); goto out2; } rc = ib_query_device(ia->ri_device, devattr); if (rc) { dprintk("RPC: %s: ib_query_device failed %d\n", __func__, rc); goto out3; } if (memreg == RPCRDMA_FRMR) { if (!(devattr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) || (devattr->max_fast_reg_page_list_len == 0)) { dprintk("RPC: %s: FRMR registration " "not supported by HCA\n", __func__); memreg = RPCRDMA_MTHCAFMR; } } if (memreg == RPCRDMA_MTHCAFMR) { if (!ia->ri_device->alloc_fmr) { dprintk("RPC: %s: MTHCAFMR registration " "not supported by HCA\n", __func__); rc = -EINVAL; goto out3; } } switch (memreg) { case RPCRDMA_FRMR: ia->ri_ops = &rpcrdma_frwr_memreg_ops; break; case RPCRDMA_ALLPHYSICAL: ia->ri_ops = &rpcrdma_physical_memreg_ops; break; case RPCRDMA_MTHCAFMR: ia->ri_ops = &rpcrdma_fmr_memreg_ops; break; default: printk(KERN_ERR "RPC: Unsupported memory " "registration mode: %d\n", memreg); rc = -ENOMEM; goto out3; } dprintk("RPC: %s: memory registration strategy is '%s'\n", __func__, ia->ri_ops->ro_displayname); rwlock_init(&ia->ri_qplock); return 0; out3: ib_dealloc_pd(ia->ri_pd); ia->ri_pd = NULL; out2: rpcrdma_destroy_id(ia->ri_id); ia->ri_id = NULL; out1: return rc; }
static struct isert_device *isert_device_create(struct ib_device *ib_dev) { struct isert_device *isert_dev; struct ib_device_attr *dev_attr; int cqe_num, err; struct ib_pd *pd; struct ib_mr *mr; struct ib_cq *cq; char wq_name[64]; int i, j; TRACE_ENTRY(); isert_dev = kzalloc(sizeof(*isert_dev), GFP_KERNEL); if (unlikely(isert_dev == NULL)) { pr_err("Failed to allocate iser dev\n"); err = -ENOMEM; goto out; } dev_attr = &isert_dev->device_attr; err = ib_query_device(ib_dev, dev_attr); if (unlikely(err)) { pr_err("Failed to query device, err: %d\n", err); goto fail_query; } isert_dev->num_cqs = min_t(int, num_online_cpus(), ib_dev->num_comp_vectors); isert_dev->cq_qps = kzalloc(sizeof(*isert_dev->cq_qps) * isert_dev->num_cqs, GFP_KERNEL); if (unlikely(isert_dev->cq_qps == NULL)) { pr_err("Failed to allocate iser cq_qps\n"); err = -ENOMEM; goto fail_cq_qps; } isert_dev->cq_desc = vmalloc(sizeof(*isert_dev->cq_desc) * isert_dev->num_cqs); if (unlikely(isert_dev->cq_desc == NULL)) { pr_err("Failed to allocate %ld bytes for iser cq_desc\n", sizeof(*isert_dev->cq_desc) * isert_dev->num_cqs); err = -ENOMEM; goto fail_alloc_cq_desc; } pd = ib_alloc_pd(ib_dev); if (unlikely(IS_ERR(pd))) { err = PTR_ERR(pd); pr_err("Failed to alloc iser dev pd, err:%d\n", err); goto fail_pd; } mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE); if (unlikely(IS_ERR(mr))) { err = PTR_ERR(mr); pr_err("Failed to get dma mr, err: %d\n", err); goto fail_mr; } cqe_num = min(isert_dev->device_attr.max_cqe, ISER_CQ_ENTRIES); cqe_num = cqe_num / isert_dev->num_cqs; #ifdef CONFIG_SCST_EXTRACHECKS if (isert_dev->device_attr.max_cqe == 0) pr_err("Zero max_cqe encountered: you may have a compilation problem\n"); #endif for (i = 0; i < isert_dev->num_cqs; ++i) { struct isert_cq *cq_desc = &isert_dev->cq_desc[i]; cq_desc->dev = isert_dev; cq_desc->idx = i; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) INIT_WORK(&cq_desc->cq_comp_work, isert_cq_comp_work_cb, NULL); #else INIT_WORK(&cq_desc->cq_comp_work, isert_cq_comp_work_cb); #endif snprintf(wq_name, sizeof(wq_name), "isert_cq_%p", cq_desc); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) cq_desc->cq_workqueue = create_singlethread_workqueue(wq_name); #else #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 36) cq_desc->cq_workqueue = alloc_workqueue(wq_name, WQ_CPU_INTENSIVE| WQ_RESCUER, 1); #else cq_desc->cq_workqueue = alloc_workqueue(wq_name, WQ_CPU_INTENSIVE| WQ_MEM_RECLAIM, 1); #endif #endif if (unlikely(!cq_desc->cq_workqueue)) { pr_err("Failed to alloc iser cq work queue for dev:%s\n", ib_dev->name); err = -ENOMEM; goto fail_cq; } cq = ib_create_cq(ib_dev, isert_cq_comp_handler, isert_async_evt_handler, cq_desc, /* context */ cqe_num, i); /* completion vector */ if (unlikely(IS_ERR(cq))) { cq_desc->cq = NULL; err = PTR_ERR(cq); pr_err("Failed to create iser dev cq, err:%d\n", err); goto fail_cq; } cq_desc->cq = cq; err = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); if (unlikely(err)) { pr_err("Failed to request notify cq, err: %d\n", err); goto fail_cq; } } isert_dev->ib_dev = ib_dev; isert_dev->pd = pd; isert_dev->mr = mr; INIT_LIST_HEAD(&isert_dev->conn_list); lockdep_assert_held(&dev_list_mutex); isert_dev_list_add(isert_dev); pr_info("iser created device:%p\n", isert_dev); return isert_dev; fail_cq: for (j = 0; j <= i; ++j) { if (isert_dev->cq_desc[j].cq) ib_destroy_cq(isert_dev->cq_desc[j].cq); if (isert_dev->cq_desc[j].cq_workqueue) destroy_workqueue(isert_dev->cq_desc[j].cq_workqueue); } ib_dereg_mr(mr); fail_mr: ib_dealloc_pd(pd); fail_pd: vfree(isert_dev->cq_desc); fail_alloc_cq_desc: kfree(isert_dev->cq_qps); fail_cq_qps: fail_query: kfree(isert_dev); out: TRACE_EXIT_RES(err); return ERR_PTR(err); }
static void verbs_add_device (struct ib_device *dev) { int ret; struct ib_qp_init_attr attrs; if (ib_dev) return; /* durty hack for ib_dma_map_single not to segfault */ dev->dma_ops = NULL; ib_dev = dev; printk (KERN_INFO "IB add device called. Name = %s\n", dev->name); ret = ib_query_device (dev, &dev_attr); if (ret) { printk (KERN_INFO "ib_quer_device failed: %d\n", ret); return; } printk (KERN_INFO "IB device caps: max_qp %d, max_mcast_grp: %d, max_pkeys: %d\n", dev_attr.max_qp, dev_attr.max_mcast_grp, (int)dev_attr.max_pkeys); /* We'll work with first port. It's a sample module, anyway. Who is that moron which decided * to count ports from one? */ ret = ib_query_port (dev, 1, &port_attr); if (ret) { printk (KERN_INFO "ib_query_port failed: %d\n", ret); return; } printk (KERN_INFO "Port info: lid: %u, sm_lid: %u, max_msg_size: %u\n", (unsigned)port_attr.lid, (unsigned)port_attr.sm_lid, port_attr.max_msg_sz); pd = ib_alloc_pd (dev); if (IS_ERR (pd)) { ret = PTR_ERR (pd); printk (KERN_INFO "pd allocation failed: %d\n", ret); return; } printk (KERN_INFO "PD allocated\n"); mr = ib_get_dma_mr (pd, IB_ACCESS_LOCAL_WRITE); if (IS_ERR (mr)) { ret = PTR_ERR (mr); printk (KERN_INFO "get_dma_mr failed: %d\n", ret); return; } send_cq = ib_create_cq (dev, NULL, NULL, NULL, 1, 1); if (IS_ERR (send_cq)) { ret = PTR_ERR (send_cq); printk (KERN_INFO "ib_create_cq failed: %d\n", ret); return; } recv_cq = ib_create_cq (dev, verbs_comp_handler_recv, NULL, NULL, 1, 1); if (IS_ERR (recv_cq)) { ret = PTR_ERR (recv_cq); printk (KERN_INFO "ib_create_cq failed: %d\n", ret); return; } ib_req_notify_cq (recv_cq, IB_CQ_NEXT_COMP); printk (KERN_INFO "CQs allocated\n"); ib_query_pkey (dev, 1, 0, &pkey); /* allocate memory */ send_buf = kmalloc (buf_size + 40, GFP_KERNEL); recv_buf = kmalloc (buf_size + 40, GFP_KERNEL); if (!send_buf || !recv_buf) { printk (KERN_INFO "Memory allocation error\n"); return; } printk (KERN_INFO "Trying to register regions\n"); if (ib_dev->dma_ops) printk (KERN_INFO "DMA ops are defined\n"); memset (send_buf, 0, buf_size+40); memset (send_buf, 0, buf_size+40); send_key = ib_dma_map_single (ib_dev, send_buf, buf_size, DMA_FROM_DEVICE); printk (KERN_INFO "send_key obtained %llx\n", send_key); recv_key = ib_dma_map_single (ib_dev, recv_buf, buf_size, DMA_TO_DEVICE); printk (KERN_INFO "recv_key obtained %llx\n", recv_key); if (ib_dma_mapping_error (ib_dev, send_key)) { printk (KERN_INFO "Error mapping send buffer\n"); return; } if (ib_dma_mapping_error (ib_dev, recv_key)) { printk (KERN_INFO "Error mapping recv buffer\n"); return; } memset (&attrs, 0, sizeof (attrs)); attrs.qp_type = IB_QPT_UD; attrs.sq_sig_type = IB_SIGNAL_ALL_WR; attrs.event_handler = verbs_qp_event; attrs.cap.max_send_wr = CQ_SIZE; attrs.cap.max_recv_wr = CQ_SIZE; attrs.cap.max_send_sge = 1; attrs.cap.max_recv_sge = 1; attrs.send_cq = send_cq; attrs.recv_cq = recv_cq; qp = ib_create_qp (pd, &attrs); if (IS_ERR (qp)) { ret = PTR_ERR (qp); printk (KERN_INFO "qp allocation failed: %d\n", ret); return; } printk (KERN_INFO "Create QP with num %x\n", qp->qp_num); if (init_qp (qp)) { printk (KERN_INFO "Failed to initialize QP\n"); return; } ret = ib_query_gid (ib_dev, 1, 0, &local_info.gid); if (ret) { printk (KERN_INFO "query_gid failed %d\n", ret); return; } local_info.qp_num = qp->qp_num; local_info.lid = port_attr.lid; /* now we are ready to send our QP number and other stuff to other party */ if (!server_addr) { schedule_work (&sock_accept); flush_scheduled_work (); } else exchange_info (server_addr); if (!have_remote_info) { printk (KERN_INFO "Have no remote info, give up\n"); return; } ret = path_rec_lookup_start (); if (ret) { printk (KERN_INFO "path_rec lookup start failed: %d\n", ret); return; } /* post receive request */ verbs_post_recv_req (); mod_timer (&verbs_timer, NEXTJIFF(1)); }
/* * Open and initialize an Interface Adapter. * o initializes fields of struct rpcrdma_ia, including * interface and provider attributes and protection zone. */ int rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) { int rc, mem_priv; struct rpcrdma_ia *ia = &xprt->rx_ia; struct ib_device_attr *devattr = &ia->ri_devattr; ia->ri_id = rpcrdma_create_id(xprt, ia, addr); if (IS_ERR(ia->ri_id)) { rc = PTR_ERR(ia->ri_id); goto out1; } ia->ri_pd = ib_alloc_pd(ia->ri_id->device); if (IS_ERR(ia->ri_pd)) { rc = PTR_ERR(ia->ri_pd); dprintk("RPC: %s: ib_alloc_pd() failed %i\n", __func__, rc); goto out2; } rc = ib_query_device(ia->ri_id->device, devattr); if (rc) { dprintk("RPC: %s: ib_query_device failed %d\n", __func__, rc); goto out3; } if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { ia->ri_have_dma_lkey = 1; ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey; } if (memreg == RPCRDMA_FRMR) { /* Requires both frmr reg and local dma lkey */ if (((devattr->device_cap_flags & (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) != (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) || (devattr->max_fast_reg_page_list_len == 0)) { dprintk("RPC: %s: FRMR registration " "not supported by HCA\n", __func__); memreg = RPCRDMA_MTHCAFMR; } } if (memreg == RPCRDMA_MTHCAFMR) { if (!ia->ri_id->device->alloc_fmr) { dprintk("RPC: %s: MTHCAFMR registration " "not supported by HCA\n", __func__); memreg = RPCRDMA_ALLPHYSICAL; } } /* * Optionally obtain an underlying physical identity mapping in * order to do a memory window-based bind. This base registration * is protected from remote access - that is enabled only by binding * for the specific bytes targeted during each RPC operation, and * revoked after the corresponding completion similar to a storage * adapter. */ switch (memreg) { case RPCRDMA_FRMR: ia->ri_ops = &rpcrdma_frwr_memreg_ops; break; case RPCRDMA_ALLPHYSICAL: ia->ri_ops = &rpcrdma_physical_memreg_ops; mem_priv = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ; goto register_setup; case RPCRDMA_MTHCAFMR: ia->ri_ops = &rpcrdma_fmr_memreg_ops; if (ia->ri_have_dma_lkey) break; mem_priv = IB_ACCESS_LOCAL_WRITE; register_setup: ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv); if (IS_ERR(ia->ri_bind_mem)) { printk(KERN_ALERT "%s: ib_get_dma_mr for " "phys register failed with %lX\n", __func__, PTR_ERR(ia->ri_bind_mem)); rc = -ENOMEM; goto out3; } break; default: printk(KERN_ERR "RPC: Unsupported memory " "registration mode: %d\n", memreg); rc = -ENOMEM; goto out3; } dprintk("RPC: %s: memory registration strategy is '%s'\n", __func__, ia->ri_ops->ro_displayname); /* Else will do memory reg/dereg for each chunk */ ia->ri_memreg_strategy = memreg; rwlock_init(&ia->ri_qplock); return 0; out3: ib_dealloc_pd(ia->ri_pd); ia->ri_pd = NULL; out2: rdma_destroy_id(ia->ri_id); ia->ri_id = NULL; out1: return rc; }