/** * ipath_do_send - perform a send on a QP * @data: contains a pointer to the QP * * Process entries in the send work queue until credit or queue is * exhausted. Only allow one CPU to send a packet per QP (tasklet). * Otherwise, two threads could send packets out of order. */ void ipath_do_send(unsigned long data) { struct ipath_qp *qp = (struct ipath_qp *)data; struct ipath_ibdev *dev = to_idev(qp->ibqp.device); int (*make_req)(struct ipath_qp *qp); unsigned long flags; if ((qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) && qp->remote_ah_attr.dlid == dev->dd->ipath_lid) { ipath_ruc_loopback(qp); goto bail; } if (qp->ibqp.qp_type == IB_QPT_RC) make_req = ipath_make_rc_req; else if (qp->ibqp.qp_type == IB_QPT_UC) make_req = ipath_make_uc_req; else make_req = ipath_make_ud_req; spin_lock_irqsave(&qp->s_lock, flags); /* Return if we are already busy processing a work request. */ if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) { spin_unlock_irqrestore(&qp->s_lock, flags); goto bail; } qp->s_flags |= IPATH_S_BUSY; spin_unlock_irqrestore(&qp->s_lock, flags); again: /* Check for a constructed packet to be sent. */ if (qp->s_hdrwords != 0) { /* * If no PIO bufs are available, return. An interrupt will * call ipath_ib_piobufavail() when one is available. */ if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords, qp->s_cur_sge, qp->s_cur_size)) { if (ipath_no_bufs_available(qp, dev)) goto bail; } dev->n_unicast_xmit++; /* Record that we sent the packet and s_hdr is empty. */ qp->s_hdrwords = 0; } if (make_req(qp)) goto again; bail:; }
void ipath_do_send(unsigned long data) { struct ipath_qp *qp = (struct ipath_qp *)data; struct ipath_ibdev *dev = to_idev(qp->ibqp.device); int (*make_req)(struct ipath_qp *qp); unsigned long flags; if ((qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) && qp->remote_ah_attr.dlid == dev->dd->ipath_lid) { ipath_ruc_loopback(qp); goto bail; } if (qp->ibqp.qp_type == IB_QPT_RC) make_req = ipath_make_rc_req; else if (qp->ibqp.qp_type == IB_QPT_UC) make_req = ipath_make_uc_req; else make_req = ipath_make_ud_req; spin_lock_irqsave(&qp->s_lock, flags); if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) { spin_unlock_irqrestore(&qp->s_lock, flags); goto bail; } qp->s_flags |= IPATH_S_BUSY; spin_unlock_irqrestore(&qp->s_lock, flags); again: if (qp->s_hdrwords != 0) { if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords, qp->s_cur_sge, qp->s_cur_size)) { if (ipath_no_bufs_available(qp, dev)) goto bail; } dev->n_unicast_xmit++; qp->s_hdrwords = 0; } if (make_req(qp)) goto again; bail:; }