int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct ipath_ibdev *dev = to_idev(ibqp->device); struct ipath_qp *qp = to_iqp(ibqp); enum ib_qp_state cur_state, new_state; int lastwqe = 0; int ret; spin_lock_irq(&qp->s_lock); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) goto inval; if (attr_mask & IB_QP_AV) { if (attr->ah_attr.dlid == 0 || attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) goto inval; if ((attr->ah_attr.ah_flags & IB_AH_GRH) && (attr->ah_attr.grh.sgid_index > 1)) goto inval; } if (attr_mask & IB_QP_PKEY_INDEX) if (attr->pkey_index >= ipath_get_npkeys(dev->dd)) goto inval; if (attr_mask & IB_QP_MIN_RNR_TIMER) if (attr->min_rnr_timer > 31) goto inval; if (attr_mask & IB_QP_PORT) if (attr->port_num == 0 || attr->port_num > ibqp->device->phys_port_cnt) goto inval; /* * don't allow invalid Path MTU values or greater than 2048 * unless we are configured for a 4KB MTU */ if ((attr_mask & IB_QP_PATH_MTU) && (ib_mtu_enum_to_int(attr->path_mtu) == -1 || (attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096))) goto inval; if (attr_mask & IB_QP_PATH_MIG_STATE) if (attr->path_mig_state != IB_MIG_MIGRATED && attr->path_mig_state != IB_MIG_REARM) goto inval; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC) goto inval; switch (new_state) { case IB_QPS_RESET: if (qp->state != IB_QPS_RESET) { qp->state = IB_QPS_RESET; spin_lock(&dev->pending_lock); if (!list_empty(&qp->timerwait)) list_del_init(&qp->timerwait); if (!list_empty(&qp->piowait)) list_del_init(&qp->piowait); spin_unlock(&dev->pending_lock); qp->s_flags &= ~IPATH_S_ANY_WAIT; spin_unlock_irq(&qp->s_lock); /* Stop the sending tasklet */ tasklet_kill(&qp->s_task); wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); spin_lock_irq(&qp->s_lock); } ipath_reset_qp(qp, ibqp->qp_type); break; case IB_QPS_SQD: qp->s_draining = qp->s_last != qp->s_cur; qp->state = new_state; break; case IB_QPS_SQE: if (qp->ibqp.qp_type == IB_QPT_RC) goto inval; qp->state = new_state; break; case IB_QPS_ERR: lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); break; default: qp->state = new_state; break; } if (attr_mask & IB_QP_PKEY_INDEX) qp->s_pkey_index = attr->pkey_index; if (attr_mask & IB_QP_DEST_QPN) qp->remote_qpn = attr->dest_qp_num; if (attr_mask & IB_QP_SQ_PSN) { qp->s_psn = qp->s_next_psn = attr->sq_psn; qp->s_last_psn = qp->s_next_psn - 1; } if (attr_mask & IB_QP_RQ_PSN) qp->r_psn = attr->rq_psn; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->qp_access_flags = attr->qp_access_flags; if (attr_mask & IB_QP_AV) { qp->remote_ah_attr = attr->ah_attr; qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate); } if (attr_mask & IB_QP_PATH_MTU) qp->path_mtu = attr->path_mtu; if (attr_mask & IB_QP_RETRY_CNT) qp->s_retry = qp->s_retry_cnt = attr->retry_cnt; if (attr_mask & IB_QP_RNR_RETRY) { qp->s_rnr_retry = attr->rnr_retry; if (qp->s_rnr_retry > 7) qp->s_rnr_retry = 7; qp->s_rnr_retry_cnt = qp->s_rnr_retry; } if (attr_mask & IB_QP_MIN_RNR_TIMER) qp->r_min_rnr_timer = attr->min_rnr_timer; if (attr_mask & IB_QP_TIMEOUT) qp->timeout = attr->timeout; if (attr_mask & IB_QP_QKEY) qp->qkey = attr->qkey; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->r_max_rd_atomic = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) qp->s_max_rd_atomic = attr->max_rd_atomic; spin_unlock_irq(&qp->s_lock); if (lastwqe) { struct ib_event ev; ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } ret = 0; goto bail; inval: spin_unlock_irq(&qp->s_lock); ret = -EINVAL; bail: return ret; }
struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { struct ipath_qp *qp; int err; struct ipath_swqe *swq = NULL; struct ipath_ibdev *dev; size_t sz; size_t sg_list_sz; struct ib_qp *ret; if (init_attr->create_flags) { ret = ERR_PTR(-EINVAL); goto bail; } if (init_attr->cap.max_send_sge > ib_ipath_max_sges || init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) { ret = ERR_PTR(-EINVAL); goto bail; } /* Check receive queue parameters if no SRQ is specified. */ if (!init_attr->srq) { if (init_attr->cap.max_recv_sge > ib_ipath_max_sges || init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) { ret = ERR_PTR(-EINVAL); goto bail; } if (init_attr->cap.max_send_sge + init_attr->cap.max_send_wr + init_attr->cap.max_recv_sge + init_attr->cap.max_recv_wr == 0) { ret = ERR_PTR(-EINVAL); goto bail; } } switch (init_attr->qp_type) { case IB_QPT_UC: case IB_QPT_RC: case IB_QPT_UD: case IB_QPT_SMI: case IB_QPT_GSI: sz = sizeof(struct ipath_sge) * init_attr->cap.max_send_sge + sizeof(struct ipath_swqe); swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz); if (swq == NULL) { ret = ERR_PTR(-ENOMEM); goto bail; } sz = sizeof(*qp); sg_list_sz = 0; if (init_attr->srq) { struct ipath_srq *srq = to_isrq(init_attr->srq); if (srq->rq.max_sge > 1) sg_list_sz = sizeof(*qp->r_sg_list) * (srq->rq.max_sge - 1); } else if (init_attr->cap.max_recv_sge > 1) sg_list_sz = sizeof(*qp->r_sg_list) * (init_attr->cap.max_recv_sge - 1); qp = kmalloc(sz + sg_list_sz, GFP_KERNEL); if (!qp) { ret = ERR_PTR(-ENOMEM); goto bail_swq; } if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD || init_attr->qp_type == IB_QPT_SMI || init_attr->qp_type == IB_QPT_GSI)) { qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL); if (!qp->r_ud_sg_list) { ret = ERR_PTR(-ENOMEM); goto bail_qp; } } else qp->r_ud_sg_list = NULL; if (init_attr->srq) { sz = 0; qp->r_rq.size = 0; qp->r_rq.max_sge = 0; qp->r_rq.wq = NULL; init_attr->cap.max_recv_wr = 0; init_attr->cap.max_recv_sge = 0; } else { qp->r_rq.size = init_attr->cap.max_recv_wr + 1; qp->r_rq.max_sge = init_attr->cap.max_recv_sge; sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + sizeof(struct ipath_rwqe); qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + qp->r_rq.size * sz); if (!qp->r_rq.wq) { ret = ERR_PTR(-ENOMEM); goto bail_sg_list; } } /* * ib_create_qp() will initialize qp->ibqp * except for qp->ibqp.qp_num. */ spin_lock_init(&qp->s_lock); spin_lock_init(&qp->r_rq.lock); atomic_set(&qp->refcount, 0); init_waitqueue_head(&qp->wait); init_waitqueue_head(&qp->wait_dma); tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp); INIT_LIST_HEAD(&qp->piowait); INIT_LIST_HEAD(&qp->timerwait); qp->state = IB_QPS_RESET; qp->s_wq = swq; qp->s_size = init_attr->cap.max_send_wr + 1; qp->s_max_sge = init_attr->cap.max_send_sge; if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) qp->s_flags = IPATH_S_SIGNAL_REQ_WR; else qp->s_flags = 0; dev = to_idev(ibpd->device); err = ipath_alloc_qpn(&dev->qp_table, qp, init_attr->qp_type); if (err) { ret = ERR_PTR(err); vfree(qp->r_rq.wq); goto bail_sg_list; } qp->ip = NULL; qp->s_tx = NULL; ipath_reset_qp(qp, init_attr->qp_type); break; default: /* Don't support raw QPs */ ret = ERR_PTR(-ENOSYS); goto bail; } init_attr->cap.max_inline_data = 0; /* * Return the address of the RWQ as the offset to mmap. * See ipath_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { if (!qp->r_rq.wq) { __u64 offset = 0; err = ib_copy_to_udata(udata, &offset, sizeof(offset)); if (err) { ret = ERR_PTR(err); goto bail_ip; } } else { u32 s = sizeof(struct ipath_rwq) + qp->r_rq.size * sz; qp->ip = ipath_create_mmap_info(dev, s, ibpd->uobject->context, qp->r_rq.wq); if (!qp->ip) { ret = ERR_PTR(-ENOMEM); goto bail_ip; } err = ib_copy_to_udata(udata, &(qp->ip->offset), sizeof(qp->ip->offset)); if (err) { ret = ERR_PTR(err); goto bail_ip; } } } spin_lock(&dev->n_qps_lock); if (dev->n_qps_allocated == ib_ipath_max_qps) { spin_unlock(&dev->n_qps_lock); ret = ERR_PTR(-ENOMEM); goto bail_ip; } dev->n_qps_allocated++; spin_unlock(&dev->n_qps_lock); if (qp->ip) { spin_lock_irq(&dev->pending_lock); list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); spin_unlock_irq(&dev->pending_lock); } ret = &qp->ibqp; goto bail; bail_ip: if (qp->ip) kref_put(&qp->ip->ref, ipath_release_mmap_info); else vfree(qp->r_rq.wq); ipath_free_qp(&dev->qp_table, qp); free_qpn(&dev->qp_table, qp->ibqp.qp_num); bail_sg_list: kfree(qp->r_ud_sg_list); bail_qp: kfree(qp); bail_swq: vfree(swq); bail: return ret; }
/** * ipath_modify_qp - modify the attributes of a queue pair * @ibqp: the queue pair who's attributes we're modifying * @attr: the new attributes * @attr_mask: the mask of attributes to modify * @udata: user data for ipathverbs.so * * Returns 0 on success, otherwise returns an errno. */ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct ipath_ibdev *dev = to_idev(ibqp->device); struct ipath_qp *qp = to_iqp(ibqp); enum ib_qp_state cur_state, new_state; unsigned long flags; int lastwqe = 0; int ret; spin_lock_irqsave(&qp->s_lock, flags); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) goto inval; if (attr_mask & IB_QP_AV) { if (attr->ah_attr.dlid == 0 || attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) goto inval; if ((attr->ah_attr.ah_flags & IB_AH_GRH) && (attr->ah_attr.grh.sgid_index > 1)) goto inval; } if (attr_mask & IB_QP_PKEY_INDEX) if (attr->pkey_index >= ipath_get_npkeys(dev->dd)) goto inval; if (attr_mask & IB_QP_MIN_RNR_TIMER) if (attr->min_rnr_timer > 31) goto inval; if (attr_mask & IB_QP_PORT) if (attr->port_num == 0 || attr->port_num > ibqp->device->phys_port_cnt) goto inval; /* * Note: the chips support a maximum MTU of 4096, but the driver * hasn't implemented this feature yet, so don't allow Path MTU * values greater than 2048. */ if (attr_mask & IB_QP_PATH_MTU) if (attr->path_mtu > IB_MTU_2048) goto inval; if (attr_mask & IB_QP_PATH_MIG_STATE) if (attr->path_mig_state != IB_MIG_MIGRATED && attr->path_mig_state != IB_MIG_REARM) goto inval; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC) goto inval; switch (new_state) { case IB_QPS_RESET: ipath_reset_qp(qp, ibqp->qp_type); break; case IB_QPS_ERR: lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); break; default: break; } if (attr_mask & IB_QP_PKEY_INDEX) qp->s_pkey_index = attr->pkey_index; if (attr_mask & IB_QP_DEST_QPN) qp->remote_qpn = attr->dest_qp_num; if (attr_mask & IB_QP_SQ_PSN) { qp->s_psn = qp->s_next_psn = attr->sq_psn; qp->s_last_psn = qp->s_next_psn - 1; } if (attr_mask & IB_QP_RQ_PSN) qp->r_psn = attr->rq_psn; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->qp_access_flags = attr->qp_access_flags; if (attr_mask & IB_QP_AV) qp->remote_ah_attr = attr->ah_attr; if (attr_mask & IB_QP_PATH_MTU) qp->path_mtu = attr->path_mtu; if (attr_mask & IB_QP_RETRY_CNT) qp->s_retry = qp->s_retry_cnt = attr->retry_cnt; if (attr_mask & IB_QP_RNR_RETRY) { qp->s_rnr_retry = attr->rnr_retry; if (qp->s_rnr_retry > 7) qp->s_rnr_retry = 7; qp->s_rnr_retry_cnt = qp->s_rnr_retry; } if (attr_mask & IB_QP_MIN_RNR_TIMER) qp->r_min_rnr_timer = attr->min_rnr_timer; if (attr_mask & IB_QP_TIMEOUT) qp->timeout = attr->timeout; if (attr_mask & IB_QP_QKEY) qp->qkey = attr->qkey; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->r_max_rd_atomic = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) qp->s_max_rd_atomic = attr->max_rd_atomic; qp->state = new_state; spin_unlock_irqrestore(&qp->s_lock, flags); if (lastwqe) { struct ib_event ev; ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } ret = 0; goto bail; inval: spin_unlock_irqrestore(&qp->s_lock, flags); ret = -EINVAL; bail: return ret; }