/** * ipath_do_send - perform a send on a QP * @data: contains a pointer to the QP * * Process entries in the send work queue until credit or queue is * exhausted. Only allow one CPU to send a packet per QP (tasklet). * Otherwise, two threads could send packets out of order. */ void ipath_do_send(unsigned long data) { struct ipath_qp *qp = (struct ipath_qp *)data; struct ipath_ibdev *dev = to_idev(qp->ibqp.device); int (*make_req)(struct ipath_qp *qp); unsigned long flags; if ((qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) && qp->remote_ah_attr.dlid == dev->dd->ipath_lid) { ipath_ruc_loopback(qp); goto bail; } if (qp->ibqp.qp_type == IB_QPT_RC) make_req = ipath_make_rc_req; else if (qp->ibqp.qp_type == IB_QPT_UC) make_req = ipath_make_uc_req; else make_req = ipath_make_ud_req; spin_lock_irqsave(&qp->s_lock, flags); /* Return if we are already busy processing a work request. */ if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) { spin_unlock_irqrestore(&qp->s_lock, flags); goto bail; } qp->s_flags |= IPATH_S_BUSY; spin_unlock_irqrestore(&qp->s_lock, flags); again: /* Check for a constructed packet to be sent. */ if (qp->s_hdrwords != 0) { /* * If no PIO bufs are available, return. An interrupt will * call ipath_ib_piobufavail() when one is available. */ if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords, qp->s_cur_sge, qp->s_cur_size)) { if (ipath_no_bufs_available(qp, dev)) goto bail; } dev->n_unicast_xmit++; /* Record that we sent the packet and s_hdr is empty. */ qp->s_hdrwords = 0; } if (make_req(qp)) goto again; bail:; }
void ipath_do_send(unsigned long data) { struct ipath_qp *qp = (struct ipath_qp *)data; struct ipath_ibdev *dev = to_idev(qp->ibqp.device); int (*make_req)(struct ipath_qp *qp); unsigned long flags; if ((qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) && qp->remote_ah_attr.dlid == dev->dd->ipath_lid) { ipath_ruc_loopback(qp); goto bail; } if (qp->ibqp.qp_type == IB_QPT_RC) make_req = ipath_make_rc_req; else if (qp->ibqp.qp_type == IB_QPT_UC) make_req = ipath_make_uc_req; else make_req = ipath_make_ud_req; spin_lock_irqsave(&qp->s_lock, flags); if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) { spin_unlock_irqrestore(&qp->s_lock, flags); goto bail; } qp->s_flags |= IPATH_S_BUSY; spin_unlock_irqrestore(&qp->s_lock, flags); again: if (qp->s_hdrwords != 0) { if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords, qp->s_cur_sge, qp->s_cur_size)) { if (ipath_no_bufs_available(qp, dev)) goto bail; } dev->n_unicast_xmit++; qp->s_hdrwords = 0; } if (make_req(qp)) goto again; bail:; }
/** * ipath_do_ruc_send - perform a send on an RC or UC QP * @data: contains a pointer to the QP * * Process entries in the send work queue until credit or queue is * exhausted. Only allow one CPU to send a packet per QP (tasklet). * Otherwise, after we drop the QP s_lock, two threads could send * packets out of order. */ void ipath_do_ruc_send(unsigned long data) { struct ipath_qp *qp = (struct ipath_qp *)data; struct ipath_ibdev *dev = to_idev(qp->ibqp.device); unsigned long flags; u16 lrh0; u32 nwords; u32 extra_bytes; u32 bth0; u32 bth2; u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); struct ipath_other_headers *ohdr; if (test_and_set_bit(IPATH_S_BUSY, &qp->s_busy)) goto bail; if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) { ipath_ruc_loopback(qp); goto clear; } ohdr = &qp->s_hdr.u.oth; if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) ohdr = &qp->s_hdr.u.l.oth; again: /* Check for a constructed packet to be sent. */ if (qp->s_hdrwords != 0) { /* * If no PIO bufs are available, return. An interrupt will * call ipath_ib_piobufavail() when one is available. */ if (ipath_verbs_send(dev->dd, qp->s_hdrwords, (u32 *) &qp->s_hdr, qp->s_cur_size, qp->s_cur_sge)) { ipath_no_bufs_available(qp, dev); goto bail; } dev->n_unicast_xmit++; /* Record that we sent the packet and s_hdr is empty. */ qp->s_hdrwords = 0; } /* * The lock is needed to synchronize between setting * qp->s_ack_state, resend timer, and post_send(). */ spin_lock_irqsave(&qp->s_lock, flags); if (!((qp->ibqp.qp_type == IB_QPT_RC) ? ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) : ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) { /* * Clear the busy bit before unlocking to avoid races with * adding new work queue items and then failing to process * them. */ clear_bit(IPATH_S_BUSY, &qp->s_busy); spin_unlock_irqrestore(&qp->s_lock, flags); goto bail; } spin_unlock_irqrestore(&qp->s_lock, flags); /* Construct the header. */ extra_bytes = (4 - qp->s_cur_size) & 3; nwords = (qp->s_cur_size + extra_bytes) >> 2; lrh0 = IPATH_LRH_BTH; if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh, &qp->remote_ah_attr.grh, qp->s_hdrwords, nwords); lrh0 = IPATH_LRH_GRH; } lrh0 |= qp->remote_ah_attr.sl << 4; qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid); bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index); bth0 |= extra_bytes << 20; ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); ohdr->bth[2] = cpu_to_be32(bth2); /* Check for more work to do. */ goto again; clear: clear_bit(IPATH_S_BUSY, &qp->s_busy); bail: return; }