/** * ipath_no_bufs_available - tell the layer driver we need buffers * @qp: the QP that caused the problem * @dev: the device we ran out of buffers on * * Called when we run out of PIO buffers. * If we are now in the error state, return zero to flush the * send work request. */ static int ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) { unsigned long flags; int ret = 1; /* * Note that as soon as want_buffer() is called and * possibly before it returns, ipath_ib_piobufavail() * could be called. Therefore, put QP on the piowait list before * enabling the PIO avail interrupt. */ spin_lock_irqsave(&qp->s_lock, flags); if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) { dev->n_piowait++; qp->s_flags |= IPATH_S_WAITING; qp->s_flags &= ~IPATH_S_BUSY; spin_lock(&dev->pending_lock); if (list_empty(&qp->piowait)) list_add_tail(&qp->piowait, &dev->piowait); spin_unlock(&dev->pending_lock); } else ret = 0; spin_unlock_irqrestore(&qp->s_lock, flags); if (ret) want_buffer(dev->dd, qp); return ret; }
/** * ipath_no_bufs_available - tell the layer driver we need buffers * @qp: the QP that caused the problem * @dev: the device we ran out of buffers on * * Called when we run out of PIO buffers. */ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) { unsigned long flags; spin_lock_irqsave(&dev->pending_lock, flags); if (list_empty(&qp->piowait)) list_add_tail(&qp->piowait, &dev->piowait); spin_unlock_irqrestore(&dev->pending_lock, flags); /* * Note that as soon as want_buffer() is called and * possibly before it returns, ipath_ib_piobufavail() * could be called. If we are still in the tasklet function, * tasklet_hi_schedule() will not call us until the next time * tasklet_hi_schedule() is called. * We clear the tasklet flag now since we are committing to return * from the tasklet function. */ clear_bit(IPATH_S_BUSY, &qp->s_busy); tasklet_unlock(&qp->s_task); want_buffer(dev->dd); dev->n_piowait++; }
static int ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) { unsigned long flags; int ret = 1; spin_lock_irqsave(&qp->s_lock, flags); if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) { dev->n_piowait++; qp->s_flags |= IPATH_S_WAITING; qp->s_flags &= ~IPATH_S_BUSY; spin_lock(&dev->pending_lock); if (list_empty(&qp->piowait)) list_add_tail(&qp->piowait, &dev->piowait); spin_unlock(&dev->pending_lock); } else ret = 0; spin_unlock_irqrestore(&qp->s_lock, flags); if (ret) want_buffer(dev->dd, qp); return ret; }