void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) { unsigned long flags; struct request *rq; spin_lock_irqsave(&ide_lock, flags); rq = HWGROUP(drive)->rq; spin_unlock_irqrestore(&ide_lock, flags); if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { ide_task_t *task = (ide_task_t *)rq->special; if (rq->errors == 0) rq->errors = !OK_STAT(stat, ATA_DRDY, BAD_STAT); if (task) { struct ide_taskfile *tf = &task->tf; tf->error = err; tf->status = stat; drive->hwif->tp_ops->tf_read(drive, task); if (task->tf_flags & IDE_TFLAG_DYN) kfree(task); } } else if (blk_pm_request(rq)) { struct request_pm_state *pm = rq->data; ide_complete_power_step(drive, rq); if (pm->pm_step == IDE_PM_COMPLETED) ide_complete_pm_request(drive, rq); return; } spin_lock_irqsave(&ide_lock, flags); HWGROUP(drive)->rq = NULL; rq->errors = err; if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0), blk_rq_bytes(rq)))) BUG(); spin_unlock_irqrestore(&ide_lock, flags); }
void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) { ide_hwif_t *hwif = drive->hwif; struct request *rq = hwif->rq; if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { ide_task_t *task = (ide_task_t *)rq->special; if (task) { struct ide_taskfile *tf = &task->tf; tf->error = err; tf->status = stat; drive->hwif->tp_ops->tf_read(drive, task); if (task->tf_flags & IDE_TFLAG_DYN) kfree(task); } } else if (blk_pm_request(rq)) { struct request_pm_state *pm = rq->data; ide_complete_power_step(drive, rq); if (pm->pm_step == IDE_PM_COMPLETED) ide_complete_pm_request(drive, rq); return; } hwif->rq = NULL; rq->errors = err; if (unlikely(blk_end_request(rq, (rq->errors ? -EIO : 0), blk_rq_bytes(rq)))) BUG(); }
static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) { ide_startstop_t startstop; BUG_ON(!blk_rq_started(rq)); #ifdef DEBUG printk("%s: start_request: current=0x%08lx\n", drive->hwif->name, (unsigned long) rq); #endif /* bail early if we've exceeded max_failures */ if (drive->max_failures && (drive->failures > drive->max_failures)) { rq->cmd_flags |= REQ_FAILED; goto kill_rq; } if (blk_pm_request(rq)) ide_check_pm_state(drive, rq); SELECT_DRIVE(drive); if (ide_wait_stat(&startstop, drive, drive->ready_stat, ATA_BUSY | ATA_DRQ, WAIT_READY)) { printk(KERN_ERR "%s: drive not ready for command\n", drive->name); return startstop; } if (!drive->special.all) { struct ide_driver *drv; /* * We reset the drive so we need to issue a SETFEATURES. * Do it _after_ do_special() restored device parameters. */ if (drive->current_speed == 0xff) ide_config_drive_speed(drive, drive->desired_speed); if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) return execute_drive_cmd(drive, rq); else if (blk_pm_request(rq)) { struct request_pm_state *pm = rq->data; #ifdef DEBUG_PM printk("%s: start_power_step(step: %d)\n", drive->name, pm->pm_step); #endif startstop = ide_start_power_step(drive, rq); if (startstop == ide_stopped && pm->pm_step == IDE_PM_COMPLETED) ide_complete_pm_request(drive, rq); return startstop; } else if (!rq->rq_disk && blk_special_request(rq)) /* * TODO: Once all ULDs have been modified to * check for specific op codes rather than * blindly accepting any special request, the * check for ->rq_disk above may be replaced * by a more suitable mechanism or even * dropped entirely. */ return ide_special_rq(drive, rq); drv = *(struct ide_driver **)rq->rq_disk->private_data; return drv->do_request(drive, rq, rq->sector); } return do_special(drive); kill_rq: ide_kill_rq(drive, rq); return ide_stopped; }