int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate, int nr_sectors) { int ret = 1; BUG_ON(!(rq->flags & REQ_STARTED)); /* * if failfast is set on a request, override number of sectors and * complete the whole request right now */ if (blk_noretry_request(rq) && end_io_error(uptodate)) nr_sectors = rq->hard_nr_sectors; if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) rq->errors = -EIO; /* * decide whether to reenable DMA -- 3 is a random magic for now, * if we DMA timeout more than 3 times, just stay in PIO */ if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) { drive->state = 0; HWGROUP(drive)->hwif->ide_dma_on(drive); } if (!end_that_request_first(rq, uptodate, nr_sectors)) { add_disk_randomness(rq->rq_disk); if (blk_rq_tagged(rq)) blk_queue_end_tag(drive->queue, rq); blkdev_dequeue_request(rq); HWGROUP(drive)->rq = NULL; end_that_request_last(rq); ret = 0; } return ret; }
void __scsi_release_request(struct scsi_request *sreq) { struct request *req = sreq->sr_request; /* unlikely because the tag was usually ended earlier by the * mid-layer. However, for layering reasons ULD's don't end * the tag of commands they generate. */ if (unlikely(blk_rq_tagged(req))) { unsigned long flags; struct request_queue *q = req->q; spin_lock_irqsave(q->queue_lock, flags); blk_queue_end_tag(q, req); spin_unlock_irqrestore(q->queue_lock, flags); } if (likely(sreq->sr_command != NULL)) { struct scsi_cmnd *cmd = sreq->sr_command; sreq->sr_command = NULL; scsi_next_command(cmd); } }