int ipath_destroy_qp(struct ib_qp *ibqp) { struct ipath_qp *qp = to_iqp(ibqp); struct ipath_ibdev *dev = to_idev(ibqp->device); /* Make sure HW and driver activity is stopped. */ spin_lock_irq(&qp->s_lock); if (qp->state != IB_QPS_RESET) { qp->state = IB_QPS_RESET; spin_lock(&dev->pending_lock); if (!list_empty(&qp->timerwait)) list_del_init(&qp->timerwait); if (!list_empty(&qp->piowait)) list_del_init(&qp->piowait); spin_unlock(&dev->pending_lock); qp->s_flags &= ~IPATH_S_ANY_WAIT; spin_unlock_irq(&qp->s_lock); /* Stop the sending tasklet */ tasklet_kill(&qp->s_task); wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); } else spin_unlock_irq(&qp->s_lock); ipath_free_qp(&dev->qp_table, qp); if (qp->s_tx) { atomic_dec(&qp->refcount); if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) kfree(qp->s_tx->txreq.map_addr); spin_lock_irq(&dev->pending_lock); list_add(&qp->s_tx->txreq.list, &dev->txreq_free); spin_unlock_irq(&dev->pending_lock); qp->s_tx = NULL; } wait_event(qp->wait, !atomic_read(&qp->refcount)); /* all user's cleaned up, mark it available */ free_qpn(&dev->qp_table, qp->ibqp.qp_num); spin_lock(&dev->n_qps_lock); dev->n_qps_allocated--; spin_unlock(&dev->n_qps_lock); if (qp->ip) kref_put(&qp->ip->ref, ipath_release_mmap_info); else vfree(qp->r_rq.wq); kfree(qp->r_ud_sg_list); vfree(qp->s_wq); kfree(qp); return 0; }
int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr) { struct ipath_qp *qp = to_iqp(ibqp); attr->qp_state = qp->state; attr->cur_qp_state = attr->qp_state; attr->path_mtu = qp->path_mtu; attr->path_mig_state = 0; attr->qkey = qp->qkey; attr->rq_psn = qp->r_psn; attr->sq_psn = qp->s_next_psn; attr->dest_qp_num = qp->remote_qpn; attr->qp_access_flags = qp->qp_access_flags; attr->cap.max_send_wr = qp->s_size - 1; attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; attr->cap.max_send_sge = qp->s_max_sge; attr->cap.max_recv_sge = qp->r_rq.max_sge; attr->cap.max_inline_data = 0; attr->ah_attr = qp->remote_ah_attr; memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr)); attr->pkey_index = qp->s_pkey_index; attr->alt_pkey_index = 0; attr->en_sqd_async_notify = 0; attr->sq_draining = qp->s_draining; attr->max_rd_atomic = qp->s_max_rd_atomic; attr->max_dest_rd_atomic = qp->r_max_rd_atomic; attr->min_rnr_timer = qp->r_min_rnr_timer; attr->port_num = 1; attr->timeout = qp->timeout; attr->retry_cnt = qp->s_retry_cnt; attr->rnr_retry = qp->s_rnr_retry_cnt; attr->alt_port_num = 0; attr->alt_timeout = 0; init_attr->event_handler = qp->ibqp.event_handler; init_attr->qp_context = qp->ibqp.qp_context; init_attr->send_cq = qp->ibqp.send_cq; init_attr->recv_cq = qp->ibqp.recv_cq; init_attr->srq = qp->ibqp.srq; init_attr->cap = attr->cap; if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR) init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; else init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; init_attr->qp_type = qp->ibqp.qp_type; init_attr->port_num = 1; return 0; }
/** * hfi1_destroy_qp - destroy a queue pair * @ibqp: the queue pair to destroy * * Returns 0 on success. * * Note that this can be called while the QP is actively sending or * receiving! */ int hfi1_destroy_qp(struct ib_qp *ibqp) { struct hfi1_qp *qp = to_iqp(ibqp); struct hfi1_ibdev *dev = to_idev(ibqp->device); /* Make sure HW and driver activity is stopped. */ spin_lock_irq(&qp->r_lock); spin_lock(&qp->s_lock); if (qp->state != IB_QPS_RESET) { qp->state = IB_QPS_RESET; flush_iowait(qp); qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT); spin_unlock(&qp->s_lock); spin_unlock_irq(&qp->r_lock); cancel_work_sync(&qp->s_iowait.iowork); del_timer_sync(&qp->s_timer); iowait_sdma_drain(&qp->s_iowait); flush_tx_list(qp); remove_qp(dev, qp); wait_event(qp->wait, !atomic_read(&qp->refcount)); spin_lock_irq(&qp->r_lock); spin_lock(&qp->s_lock); clear_mr_refs(qp, 1); clear_ahg(qp); } spin_unlock(&qp->s_lock); spin_unlock_irq(&qp->r_lock); /* all user's cleaned up, mark it available */ free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num); spin_lock(&dev->n_qps_lock); dev->n_qps_allocated--; spin_unlock(&dev->n_qps_lock); if (qp->ip) kref_put(&qp->ip->ref, hfi1_release_mmap_info); else vfree(qp->r_rq.wq); vfree(qp->s_wq); kfree(qp->s_hdr); kfree(qp); return 0; }
/** * ipath_destroy_qp - destroy a queue pair * @ibqp: the queue pair to destroy * * Returns 0 on success. * * Note that this can be called while the QP is actively sending or * receiving! */ int ipath_destroy_qp(struct ib_qp *ibqp) { struct ipath_qp *qp = to_iqp(ibqp); struct ipath_ibdev *dev = to_idev(ibqp->device); unsigned long flags; spin_lock_irqsave(&qp->s_lock, flags); qp->state = IB_QPS_ERR; spin_unlock_irqrestore(&qp->s_lock, flags); spin_lock(&dev->n_qps_lock); dev->n_qps_allocated--; spin_unlock(&dev->n_qps_lock); /* Stop the sending tasklet. */ tasklet_kill(&qp->s_task); /* Make sure the QP isn't on the timeout list. */ spin_lock_irqsave(&dev->pending_lock, flags); if (!list_empty(&qp->timerwait)) list_del_init(&qp->timerwait); if (!list_empty(&qp->piowait)) list_del_init(&qp->piowait); spin_unlock_irqrestore(&dev->pending_lock, flags); /* * Make sure that the QP is not in the QPN table so receive * interrupts will discard packets for this QP. XXX Also remove QP * from multicast table. */ if (atomic_read(&qp->refcount) != 0) ipath_free_qp(&dev->qp_table, qp); if (qp->ip) kref_put(&qp->ip->ref, ipath_release_mmap_info); else vfree(qp->r_rq.wq); vfree(qp->s_wq); kfree(qp); return 0; }
int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct ipath_ibdev *dev = to_idev(ibqp->device); struct ipath_qp *qp = to_iqp(ibqp); enum ib_qp_state cur_state, new_state; int lastwqe = 0; int ret; spin_lock_irq(&qp->s_lock); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) goto inval; if (attr_mask & IB_QP_AV) { if (attr->ah_attr.dlid == 0 || attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) goto inval; if ((attr->ah_attr.ah_flags & IB_AH_GRH) && (attr->ah_attr.grh.sgid_index > 1)) goto inval; } if (attr_mask & IB_QP_PKEY_INDEX) if (attr->pkey_index >= ipath_get_npkeys(dev->dd)) goto inval; if (attr_mask & IB_QP_MIN_RNR_TIMER) if (attr->min_rnr_timer > 31) goto inval; if (attr_mask & IB_QP_PORT) if (attr->port_num == 0 || attr->port_num > ibqp->device->phys_port_cnt) goto inval; /* */ if ((attr_mask & IB_QP_PATH_MTU) && (ib_mtu_enum_to_int(attr->path_mtu) == -1 || (attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096))) goto inval; if (attr_mask & IB_QP_PATH_MIG_STATE) if (attr->path_mig_state != IB_MIG_MIGRATED && attr->path_mig_state != IB_MIG_REARM) goto inval; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC) goto inval; switch (new_state) { case IB_QPS_RESET: if (qp->state != IB_QPS_RESET) { qp->state = IB_QPS_RESET; spin_lock(&dev->pending_lock); if (!list_empty(&qp->timerwait)) list_del_init(&qp->timerwait); if (!list_empty(&qp->piowait)) list_del_init(&qp->piowait); spin_unlock(&dev->pending_lock); qp->s_flags &= ~IPATH_S_ANY_WAIT; spin_unlock_irq(&qp->s_lock); /* */ tasklet_kill(&qp->s_task); wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); spin_lock_irq(&qp->s_lock); } ipath_reset_qp(qp, ibqp->qp_type); break; case IB_QPS_SQD: qp->s_draining = qp->s_last != qp->s_cur; qp->state = new_state; break; case IB_QPS_SQE: if (qp->ibqp.qp_type == IB_QPT_RC) goto inval; qp->state = new_state; break; case IB_QPS_ERR: lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); break; default: qp->state = new_state; break; } if (attr_mask & IB_QP_PKEY_INDEX) qp->s_pkey_index = attr->pkey_index; if (attr_mask & IB_QP_DEST_QPN) qp->remote_qpn = attr->dest_qp_num; if (attr_mask & IB_QP_SQ_PSN) { qp->s_psn = qp->s_next_psn = attr->sq_psn; qp->s_last_psn = qp->s_next_psn - 1; } if (attr_mask & IB_QP_RQ_PSN) qp->r_psn = attr->rq_psn; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->qp_access_flags = attr->qp_access_flags; if (attr_mask & IB_QP_AV) { qp->remote_ah_attr = attr->ah_attr; qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate); } if (attr_mask & IB_QP_PATH_MTU) qp->path_mtu = attr->path_mtu; if (attr_mask & IB_QP_RETRY_CNT) qp->s_retry = qp->s_retry_cnt = attr->retry_cnt; if (attr_mask & IB_QP_RNR_RETRY) { qp->s_rnr_retry = attr->rnr_retry; if (qp->s_rnr_retry > 7) qp->s_rnr_retry = 7; qp->s_rnr_retry_cnt = qp->s_rnr_retry; } if (attr_mask & IB_QP_MIN_RNR_TIMER) qp->r_min_rnr_timer = attr->min_rnr_timer; if (attr_mask & IB_QP_TIMEOUT) qp->timeout = attr->timeout; if (attr_mask & IB_QP_QKEY) qp->qkey = attr->qkey; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->r_max_rd_atomic = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) qp->s_max_rd_atomic = attr->max_rd_atomic; spin_unlock_irq(&qp->s_lock); if (lastwqe) { struct ib_event ev; ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } ret = 0; goto bail; inval: spin_unlock_irq(&qp->s_lock); ret = -EINVAL; bail: return ret; }
/** * hfi1_modify_qp - modify the attributes of a queue pair * @ibqp: the queue pair who's attributes we're modifying * @attr: the new attributes * @attr_mask: the mask of attributes to modify * @udata: user data for libibverbs.so * * Returns 0 on success, otherwise returns an errno. */ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct hfi1_ibdev *dev = to_idev(ibqp->device); struct hfi1_qp *qp = to_iqp(ibqp); enum ib_qp_state cur_state, new_state; struct ib_event ev; int lastwqe = 0; int mig = 0; int ret; u32 pmtu = 0; /* for gcc warning only */ struct hfi1_devdata *dd; spin_lock_irq(&qp->r_lock); spin_lock(&qp->s_lock); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, IB_LINK_LAYER_UNSPECIFIED)) goto inval; if (attr_mask & IB_QP_AV) { if (attr->ah_attr.dlid >= HFI1_MULTICAST_LID_BASE) goto inval; if (hfi1_check_ah(qp->ibqp.device, &attr->ah_attr)) goto inval; } if (attr_mask & IB_QP_ALT_PATH) { if (attr->alt_ah_attr.dlid >= HFI1_MULTICAST_LID_BASE) goto inval; if (hfi1_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) goto inval; if (attr->alt_pkey_index >= hfi1_get_npkeys(dd_from_dev(dev))) goto inval; } if (attr_mask & IB_QP_PKEY_INDEX) if (attr->pkey_index >= hfi1_get_npkeys(dd_from_dev(dev))) goto inval; if (attr_mask & IB_QP_MIN_RNR_TIMER) if (attr->min_rnr_timer > 31) goto inval; if (attr_mask & IB_QP_PORT) if (qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI || attr->port_num == 0 || attr->port_num > ibqp->device->phys_port_cnt) goto inval; if (attr_mask & IB_QP_DEST_QPN) if (attr->dest_qp_num > HFI1_QPN_MASK) goto inval; if (attr_mask & IB_QP_RETRY_CNT) if (attr->retry_cnt > 7) goto inval; if (attr_mask & IB_QP_RNR_RETRY) if (attr->rnr_retry > 7) goto inval; /* * Don't allow invalid path_mtu values. OK to set greater * than the active mtu (or even the max_cap, if we have tuned * that to a small mtu. We'll set qp->path_mtu * to the lesser of requested attribute mtu and active, * for packetizing messages. * Note that the QP port has to be set in INIT and MTU in RTR. */ if (attr_mask & IB_QP_PATH_MTU) { int mtu, pidx = qp->port_num - 1; dd = dd_from_dev(dev); mtu = verbs_mtu_enum_to_int(ibqp->device, attr->path_mtu); if (mtu == -1) goto inval; if (mtu > dd->pport[pidx].ibmtu) pmtu = mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048); else pmtu = attr->path_mtu; } if (attr_mask & IB_QP_PATH_MIG_STATE) { if (attr->path_mig_state == IB_MIG_REARM) { if (qp->s_mig_state == IB_MIG_ARMED) goto inval; if (new_state != IB_QPS_RTS) goto inval; } else if (attr->path_mig_state == IB_MIG_MIGRATED) { if (qp->s_mig_state == IB_MIG_REARM) goto inval; if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) goto inval; if (qp->s_mig_state == IB_MIG_ARMED) mig = 1; } else goto inval; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) if (attr->max_dest_rd_atomic > HFI1_MAX_RDMA_ATOMIC) goto inval; switch (new_state) { case IB_QPS_RESET: if (qp->state != IB_QPS_RESET) { qp->state = IB_QPS_RESET; flush_iowait(qp); qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT); spin_unlock(&qp->s_lock); spin_unlock_irq(&qp->r_lock); /* Stop the sending work queue and retry timer */ cancel_work_sync(&qp->s_iowait.iowork); del_timer_sync(&qp->s_timer); iowait_sdma_drain(&qp->s_iowait); flush_tx_list(qp); remove_qp(dev, qp); wait_event(qp->wait, !atomic_read(&qp->refcount)); spin_lock_irq(&qp->r_lock); spin_lock(&qp->s_lock); clear_mr_refs(qp, 1); clear_ahg(qp); reset_qp(qp, ibqp->qp_type); } break; case IB_QPS_RTR: /* Allow event to re-trigger if QP set to RTR more than once */ qp->r_flags &= ~HFI1_R_COMM_EST; qp->state = new_state; break; case IB_QPS_SQD: qp->s_draining = qp->s_last != qp->s_cur; qp->state = new_state; break; case IB_QPS_SQE: if (qp->ibqp.qp_type == IB_QPT_RC) goto inval; qp->state = new_state; break; case IB_QPS_ERR: lastwqe = hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR); break; default: qp->state = new_state; break; } if (attr_mask & IB_QP_PKEY_INDEX) qp->s_pkey_index = attr->pkey_index; if (attr_mask & IB_QP_PORT) qp->port_num = attr->port_num; if (attr_mask & IB_QP_DEST_QPN) qp->remote_qpn = attr->dest_qp_num; if (attr_mask & IB_QP_SQ_PSN) { qp->s_next_psn = attr->sq_psn & PSN_MODIFY_MASK; qp->s_psn = qp->s_next_psn; qp->s_sending_psn = qp->s_next_psn; qp->s_last_psn = qp->s_next_psn - 1; qp->s_sending_hpsn = qp->s_last_psn; } if (attr_mask & IB_QP_RQ_PSN) qp->r_psn = attr->rq_psn & PSN_MODIFY_MASK; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->qp_access_flags = attr->qp_access_flags; if (attr_mask & IB_QP_AV) { qp->remote_ah_attr = attr->ah_attr; qp->s_srate = attr->ah_attr.static_rate; qp->srate_mbps = ib_rate_to_mbps(qp->s_srate); } if (attr_mask & IB_QP_ALT_PATH) { qp->alt_ah_attr = attr->alt_ah_attr; qp->s_alt_pkey_index = attr->alt_pkey_index; } if (attr_mask & IB_QP_PATH_MIG_STATE) { qp->s_mig_state = attr->path_mig_state; if (mig) { qp->remote_ah_attr = qp->alt_ah_attr; qp->port_num = qp->alt_ah_attr.port_num; qp->s_pkey_index = qp->s_alt_pkey_index; qp->s_flags |= HFI1_S_AHG_CLEAR; } } if (attr_mask & IB_QP_PATH_MTU) { struct hfi1_ibport *ibp; u8 sc, vl; u32 mtu; dd = dd_from_dev(dev); ibp = &dd->pport[qp->port_num - 1].ibport_data; sc = ibp->sl_to_sc[qp->remote_ah_attr.sl]; vl = sc_to_vlt(dd, sc); mtu = verbs_mtu_enum_to_int(ibqp->device, pmtu); if (vl < PER_VL_SEND_CONTEXTS) mtu = min_t(u32, mtu, dd->vld[vl].mtu); pmtu = mtu_to_enum(mtu, OPA_MTU_8192); qp->path_mtu = pmtu; qp->pmtu = mtu; } if (attr_mask & IB_QP_RETRY_CNT) { qp->s_retry_cnt = attr->retry_cnt; qp->s_retry = attr->retry_cnt; } if (attr_mask & IB_QP_RNR_RETRY) { qp->s_rnr_retry_cnt = attr->rnr_retry; qp->s_rnr_retry = attr->rnr_retry; } if (attr_mask & IB_QP_MIN_RNR_TIMER) qp->r_min_rnr_timer = attr->min_rnr_timer; if (attr_mask & IB_QP_TIMEOUT) { qp->timeout = attr->timeout; qp->timeout_jiffies = usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL); } if (attr_mask & IB_QP_QKEY) qp->qkey = attr->qkey; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->r_max_rd_atomic = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) qp->s_max_rd_atomic = attr->max_rd_atomic; spin_unlock(&qp->s_lock); spin_unlock_irq(&qp->r_lock); if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) insert_qp(dev, qp); if (lastwqe) { ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } if (mig) { ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_PATH_MIG; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } ret = 0; goto bail; inval: spin_unlock(&qp->s_lock); spin_unlock_irq(&qp->r_lock); ret = -EINVAL; bail: return ret; }
/** * ipath_modify_qp - modify the attributes of a queue pair * @ibqp: the queue pair who's attributes we're modifying * @attr: the new attributes * @attr_mask: the mask of attributes to modify * @udata: user data for ipathverbs.so * * Returns 0 on success, otherwise returns an errno. */ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct ipath_ibdev *dev = to_idev(ibqp->device); struct ipath_qp *qp = to_iqp(ibqp); enum ib_qp_state cur_state, new_state; unsigned long flags; int lastwqe = 0; int ret; spin_lock_irqsave(&qp->s_lock, flags); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) goto inval; if (attr_mask & IB_QP_AV) { if (attr->ah_attr.dlid == 0 || attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) goto inval; if ((attr->ah_attr.ah_flags & IB_AH_GRH) && (attr->ah_attr.grh.sgid_index > 1)) goto inval; } if (attr_mask & IB_QP_PKEY_INDEX) if (attr->pkey_index >= ipath_get_npkeys(dev->dd)) goto inval; if (attr_mask & IB_QP_MIN_RNR_TIMER) if (attr->min_rnr_timer > 31) goto inval; if (attr_mask & IB_QP_PORT) if (attr->port_num == 0 || attr->port_num > ibqp->device->phys_port_cnt) goto inval; /* * Note: the chips support a maximum MTU of 4096, but the driver * hasn't implemented this feature yet, so don't allow Path MTU * values greater than 2048. */ if (attr_mask & IB_QP_PATH_MTU) if (attr->path_mtu > IB_MTU_2048) goto inval; if (attr_mask & IB_QP_PATH_MIG_STATE) if (attr->path_mig_state != IB_MIG_MIGRATED && attr->path_mig_state != IB_MIG_REARM) goto inval; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC) goto inval; switch (new_state) { case IB_QPS_RESET: ipath_reset_qp(qp, ibqp->qp_type); break; case IB_QPS_ERR: lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); break; default: break; } if (attr_mask & IB_QP_PKEY_INDEX) qp->s_pkey_index = attr->pkey_index; if (attr_mask & IB_QP_DEST_QPN) qp->remote_qpn = attr->dest_qp_num; if (attr_mask & IB_QP_SQ_PSN) { qp->s_psn = qp->s_next_psn = attr->sq_psn; qp->s_last_psn = qp->s_next_psn - 1; } if (attr_mask & IB_QP_RQ_PSN) qp->r_psn = attr->rq_psn; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->qp_access_flags = attr->qp_access_flags; if (attr_mask & IB_QP_AV) qp->remote_ah_attr = attr->ah_attr; if (attr_mask & IB_QP_PATH_MTU) qp->path_mtu = attr->path_mtu; if (attr_mask & IB_QP_RETRY_CNT) qp->s_retry = qp->s_retry_cnt = attr->retry_cnt; if (attr_mask & IB_QP_RNR_RETRY) { qp->s_rnr_retry = attr->rnr_retry; if (qp->s_rnr_retry > 7) qp->s_rnr_retry = 7; qp->s_rnr_retry_cnt = qp->s_rnr_retry; } if (attr_mask & IB_QP_MIN_RNR_TIMER) qp->r_min_rnr_timer = attr->min_rnr_timer; if (attr_mask & IB_QP_TIMEOUT) qp->timeout = attr->timeout; if (attr_mask & IB_QP_QKEY) qp->qkey = attr->qkey; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->r_max_rd_atomic = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) qp->s_max_rd_atomic = attr->max_rd_atomic; qp->state = new_state; spin_unlock_irqrestore(&qp->s_lock, flags); if (lastwqe) { struct ib_event ev; ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } ret = 0; goto bail; inval: spin_unlock_irqrestore(&qp->s_lock, flags); ret = -EINVAL; bail: return ret; }