/** * hfi1_check_send_wqe - validate wqe * @qp - The qp * @wqe - The built wqe * * validate wqe. This is called * prior to inserting the wqe into * the ring but after the wqe has been * setup. * * Returns 0 on success, -EINVAL on failure * */ int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct rvt_ah *ah; switch (qp->ibqp.qp_type) { case IB_QPT_RC: case IB_QPT_UC: if (wqe->length > 0x80000000U) return -EINVAL; break; case IB_QPT_SMI: ah = ibah_to_rvtah(wqe->ud_wr.ah); if (wqe->length > (1 << ah->log_pmtu)) return -EINVAL; break; case IB_QPT_GSI: case IB_QPT_UD: ah = ibah_to_rvtah(wqe->ud_wr.ah); if (wqe->length > (1 << ah->log_pmtu)) return -EINVAL; if (ibp->sl_to_sc[ah->attr.sl] == 0xf) return -EINVAL; default: break; } return wqe->length <= piothreshold; }
/** * _hfi1_schedule_send - schedule progress * @qp: the QP * * This schedules qp progress w/o regard to the s_flags. * * It is only used in the post send, which doesn't hold * the s_lock. */ void _hfi1_schedule_send(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, priv->s_sde ? priv->s_sde->cpu : cpumask_first(cpumask_of_node(dd->node))); }
/* * Remove the QP from the table so it can't be found asynchronously by * the receive interrupt routine. */ static void remove_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num); unsigned long flags; int removed = 1; spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags); if (rcu_dereference_protected(ibp->qp[0], lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) { RCU_INIT_POINTER(ibp->qp[0], NULL); } else if (rcu_dereference_protected(ibp->qp[1], lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) { RCU_INIT_POINTER(ibp->qp[1], NULL); } else { struct hfi1_qp *q; struct hfi1_qp __rcu **qpp; removed = 0; qpp = &dev->qp_dev->qp_table[n]; for (; (q = rcu_dereference_protected(*qpp, lockdep_is_held(&dev->qp_dev->qpt_lock))) != NULL; qpp = &q->next) if (q == qp) { RCU_INIT_POINTER(*qpp, rcu_dereference_protected(qp->next, lockdep_is_held(&dev->qp_dev->qpt_lock))); removed = 1; trace_hfi1_qpremove(qp, n); break; } } spin_unlock_irqrestore(&dev->qp_dev->qpt_lock, flags); if (removed) { synchronize_rcu(); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } }
/* * Put the QP into the hash table. * The hash table holds a reference to the QP. */ static void insert_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); unsigned long flags; atomic_inc(&qp->refcount); spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags); if (qp->ibqp.qp_num <= 1) { rcu_assign_pointer(ibp->qp[qp->ibqp.qp_num], qp); } else { u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num); qp->next = dev->qp_dev->qp_table[n]; rcu_assign_pointer(dev->qp_dev->qp_table[n], qp); trace_hfi1_qpinsert(qp, n); } spin_unlock_irqrestore(&dev->qp_dev->qpt_lock, flags); }