/* * promise_read_intr() is the handler for disk read/multread interrupts */ static ide_startstop_t promise_read_intr (ide_drive_t *drive) { byte stat; int i; unsigned int sectors_left, sectors_avail, nsect; struct request *rq; if (!OK_STAT(stat=GET_STAT(),DATA_READY,BAD_R_STAT)) { return ide_error(drive, "promise_read_intr", stat); } read_again: do { sectors_left = IN_BYTE(IDE_NSECTOR_REG); IN_BYTE(IDE_SECTOR_REG); } while (IN_BYTE(IDE_NSECTOR_REG) != sectors_left); rq = HWGROUP(drive)->rq; sectors_avail = rq->nr_sectors - sectors_left; read_next: rq = HWGROUP(drive)->rq; if ((nsect = rq->current_nr_sectors) > sectors_avail) nsect = sectors_avail; sectors_avail -= nsect; ide_input_data(drive, rq->buffer, nsect * SECTOR_WORDS); #ifdef DEBUG printk("%s: promise_read: sectors(%ld-%ld), buffer=0x%08lx, " "remaining=%ld\n", drive->name, rq->sector, rq->sector+nsect-1, (unsigned long) rq->buffer+(nsect<<9), rq->nr_sectors-nsect); #endif rq->sector += nsect; rq->buffer += nsect<<9; rq->errors = 0; i = (rq->nr_sectors -= nsect); if ((rq->current_nr_sectors -= nsect) <= 0) ide_end_request(1, HWGROUP(drive)); if (i > 0) { if (sectors_avail) goto read_next; stat = GET_STAT(); if(stat & DRQ_STAT) goto read_again; if(stat & BUSY_STAT) { ide_set_handler (drive, &promise_read_intr, WAIT_CMD, NULL); return ide_started; } printk("Ah! promise read intr: sectors left !DRQ !BUSY\n"); return ide_error(drive, "promise read intr", stat); } return ide_stopped; }
/* * This routine busy-waits for the drive status to be not "busy". * It then checks the status for all of the "good" bits and none * of the "bad" bits, and if all is okay it returns 0. All other * cases return 1 after invoking ide_error() -- caller should just return. * * This routine should get fixed to not hog the cpu during extra long waits.. * That could be done by busy-waiting for the first jiffy or two, and then * setting a timer to wake up at half second intervals thereafter, * until timeout is achieved, before timing out. */ int ide_wait_stat (ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout) { ide_hwif_t *hwif = HWIF(drive); u8 stat; int i; unsigned long flags; /* bail early if we've exceeded max_failures */ if (drive->max_failures && (drive->failures > drive->max_failures)) { *startstop = ide_stopped; return 1; } udelay(1); /* spec allows drive 400ns to assert "BUSY" */ if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) { local_irq_set(flags); timeout += jiffies; while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) { if (time_after(jiffies, timeout)) { /* * One last read after the timeout in case * heavy interrupt load made us not make any * progress during the timeout.. */ stat = hwif->INB(IDE_STATUS_REG); if (!(stat & BUSY_STAT)) break; local_irq_restore(flags); *startstop = ide_error(drive, "status timeout", stat); return 1; } } local_irq_restore(flags); } /* * Allow status to settle, then read it again. * A few rare drives vastly violate the 400ns spec here, * so we'll wait up to 10usec for a "good" status * rather than expensively fail things immediately. * This fix courtesy of Matthew Faupel & Niccolo Rigacci. */ for (i = 0; i < 10; i++) { udelay(1); if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), good, bad)) return 0; } *startstop = ide_error(drive, "status error", stat); return 1; }
static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq, const char *s, u8 stat) { if (rq->bio) { ide_hwif_t *hwif = drive->hwif; int sectors = hwif->nsect - hwif->nleft; switch (hwif->data_phase) { case TASKFILE_IN: if (hwif->nleft) break; /* fall through */ case TASKFILE_OUT: sectors--; break; case TASKFILE_MULTI_IN: if (hwif->nleft) break; /* fall through */ case TASKFILE_MULTI_OUT: sectors -= drive->mult_count; default: break; } if (sectors > 0) { struct ide_driver *drv; drv = *(struct ide_driver **)rq->rq_disk->private_data; drv->end_request(drive, 1, sectors); } } return ide_error(drive, s, stat); }
/* * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd. */ static ide_startstop_t recal_intr(ide_drive_t *drive) { u8 stat = ide_read_status(drive); if (!OK_STAT(stat, READY_STAT, BAD_STAT)) return ide_error(drive, "recal_intr", stat); return ide_stopped; }
/* * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd. */ static ide_startstop_t recal_intr(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 stat; local_irq_enable_in_hardirq(); stat = hwif->tp_ops->read_status(hwif); if (!OK_STAT(stat, READY_STAT, BAD_STAT)) return ide_error(drive, "recal_intr", stat); return ide_stopped; }
/* * Handler for commands without a data phase */ static ide_startstop_t task_no_data_intr(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; ide_task_t *task = &hwif->task; struct ide_taskfile *tf = &task->tf; int custom = (task->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0; int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1; u8 stat; local_irq_enable_in_hardirq(); while (1) { stat = hwif->tp_ops->read_status(hwif); if ((stat & ATA_BUSY) == 0 || retries-- == 0) break; udelay(10); }; if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { if (custom && tf->command == ATA_CMD_SET_MULTI) { drive->mult_req = drive->mult_count = 0; drive->special.b.recalibrate = 1; (void)ide_dump_status(drive, __func__, stat); return ide_stopped; } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) { if ((stat & (ATA_ERR | ATA_DRQ)) == 0) { ide_set_handler(drive, &task_no_data_intr, WAIT_WORSTCASE, NULL); return ide_started; } } return ide_error(drive, "task_no_data_intr", stat); /* calls ide_end_drive_cmd */ } if (!custom) ide_end_drive_cmd(drive, stat, ide_read_error(drive)); else if (tf->command == ATA_CMD_IDLEIMMEDIATE) { hwif->tp_ops->tf_read(drive, task); if (tf->lbal != 0xc4) { printk(KERN_ERR "%s: head unload failed!\n", drive->name); ide_tf_dump(drive->name, tf); } else drive->dev_flags |= IDE_DFLAG_PARKED; ide_end_drive_cmd(drive, stat, ide_read_error(drive)); } else if (tf->command == ATA_CMD_SET_MULTI) drive->mult_count = drive->mult_req; return ide_stopped; }
/* * un-busy the port etc, and clear any pending DMA status. we want to * retry the current request in pio mode instead of risking tossing it * all away */ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { ide_hwif_t *hwif = drive->hwif; struct request *rq; ide_startstop_t ret = ide_stopped; /* * end current dma transaction */ if (error < 0) { printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); (void)hwif->dma_ops->dma_end(drive); ret = ide_error(drive, "dma timeout error", hwif->tp_ops->read_status(hwif)); } else { printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); hwif->dma_ops->dma_timeout(drive); } /* * disable dma for now, but remember that we did so because of * a timeout -- we'll reenable after we finish this next request * (or rather the first chunk of it) in pio. */ drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY; drive->retry_pio++; ide_dma_off_quietly(drive); /* * un-busy drive etc and make sure request is sane */ rq = hwif->rq; if (!rq) goto out; hwif->rq = NULL; rq->errors = 0; if (!rq->bio) goto out; rq->sector = rq->bio->bi_sector; rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9; rq->hard_cur_sectors = rq->current_nr_sectors; rq->buffer = bio_data(rq->bio); out: return ret; }
static ide_startstop_t task_no_data_intr(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct ide_cmd *cmd = &hwif->cmd; struct ide_taskfile *tf = &cmd->tf; int custom = (cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0; int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1; u8 stat; local_irq_enable_in_hardirq(); while (1) { stat = hwif->tp_ops->read_status(hwif); if ((stat & ATA_BUSY) == 0 || retries-- == 0) break; udelay(10); }; if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { if (custom && tf->command == ATA_CMD_SET_MULTI) { drive->mult_req = drive->mult_count = 0; drive->special_flags |= IDE_SFLAG_RECALIBRATE; (void)ide_dump_status(drive, __func__, stat); return ide_stopped; } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) { if ((stat & (ATA_ERR | ATA_DRQ)) == 0) { ide_set_handler(drive, &task_no_data_intr, WAIT_WORSTCASE); return ide_started; } } return ide_error(drive, "task_no_data_intr", stat); } if (custom && tf->command == ATA_CMD_SET_MULTI) drive->mult_count = drive->mult_req; if (custom == 0 || tf->command == ATA_CMD_IDLEIMMEDIATE || tf->command == ATA_CMD_CHK_POWER) { struct request *rq = hwif->rq; if (blk_pm_request(rq)) ide_complete_pm_rq(drive, rq); else ide_finish_cmd(drive, cmd, stat); } return ide_stopped; }
/* * Handler for commands without a data phase */ static ide_startstop_t task_no_data_intr(ide_drive_t *drive) { ide_task_t *args = HWGROUP(drive)->rq->special; u8 stat; local_irq_enable_in_hardirq(); stat = ide_read_status(drive); if (!OK_STAT(stat, READY_STAT, BAD_STAT)) return ide_error(drive, "task_no_data_intr", stat); /* calls ide_end_drive_cmd */ if (args) ide_end_drive_cmd(drive, stat, ide_read_error(drive)); return ide_stopped; }
/* * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd. */ static ide_startstop_t set_geometry_intr(ide_drive_t *drive) { int retries = 5; u8 stat; while (((stat = ide_read_status(drive)) & BUSY_STAT) && retries--) udelay(10); if (OK_STAT(stat, READY_STAT, BAD_STAT)) return ide_stopped; if (stat & (ERR_STAT|DRQ_STAT)) return ide_error(drive, "set_geometry_intr", stat); BUG_ON(HWGROUP(drive)->handler != NULL); ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL); return ide_started; }
/* * Handler for commands without a data phase */ static ide_startstop_t task_no_data_intr(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; ide_task_t *args = hwif->hwgroup->rq->special; u8 stat; local_irq_enable_in_hardirq(); stat = hwif->tp_ops->read_status(hwif); if (!OK_STAT(stat, READY_STAT, BAD_STAT)) return ide_error(drive, "task_no_data_intr", stat); /* calls ide_end_drive_cmd */ if (args) ide_end_drive_cmd(drive, stat, ide_read_error(drive)); return ide_stopped; }
int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout) { int err; u8 stat; if (drive->max_failures && (drive->failures > drive->max_failures)) { *startstop = ide_stopped; return 1; } err = __ide_wait_stat(drive, good, bad, timeout, &stat); if (err) { char *s = (err == -EBUSY) ? "status timeout" : "status error"; *startstop = ide_error(drive, s, stat); } return err; }
/* * promise_write_pollfunc() is the handler for disk write completion polling. */ static ide_startstop_t promise_write_pollfunc (ide_drive_t *drive) { int i; ide_hwgroup_t *hwgroup = HWGROUP(drive); struct request *rq; if (IN_BYTE(IDE_NSECTOR_REG) != 0) { if (time_before(jiffies, hwgroup->poll_timeout)) { ide_set_handler (drive, &promise_write_pollfunc, 1, NULL); return ide_started; /* continue polling... */ } printk("%s: write timed-out!\n",drive->name); return ide_error (drive, "write timeout", GET_STAT()); } if (ide_multwrite(drive, 4)) return ide_stopped; rq = hwgroup->rq; for (i = rq->nr_sectors; i > 0;) { i -= rq->current_nr_sectors; ide_end_request(1, hwgroup); } return ide_stopped; }
/* * dma_intr() is the handler for disk read/write DMA interrupts */ void ide_dma_intr (ide_drive_t *drive) { int i; byte stat, dma_stat; DPRINT("ide_dma_intr\n"); dma_stat = HWIF(drive)->dmaproc(ide_dma_end, drive); stat = GET_STAT(); /* get drive status */ DPRINT("stat=%02x\n", stat); if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { if (!dma_stat) { struct request *rq = HWGROUP(drive)->rq; rq = HWGROUP(drive)->rq; for (i = rq->nr_sectors; i > 0;) { i -= rq->current_nr_sectors; ide_end_request(1, HWGROUP(drive)); } return; } printk("%s: dma_intr: bad DMA status\n", drive->name); } ide__sti(); /* local CPU only */ ide_error(drive, "dma_intr", stat); }
/* * dma_intr() is the handler for disk read/write DMA interrupts */ static void dma_intr (ide_drive_t *drive) { byte stat, dma_stat; int i; struct request *rq = HWGROUP(drive)->rq; unsigned short dma_base = HWIF(drive)->dma_base; dma_stat = inb(dma_base+2); /* get DMA status */ outb(inb(dma_base)&~1, dma_base); /* stop DMA operation */ stat = GET_STAT(); /* get drive status */ if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { if ((dma_stat & 7) == 4) { /* verify good DMA status */ rq = HWGROUP(drive)->rq; for (i = rq->nr_sectors; i > 0;) { i -= rq->current_nr_sectors; ide_end_request(1, HWGROUP(drive)); } return; } printk("%s: bad DMA status: 0x%02x\n", drive->name, dma_stat); } sti(); ide_error(drive, "dma_intr", stat); }
/* * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd. */ static ide_startstop_t set_geometry_intr(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; int retries = 5; u8 stat; local_irq_enable_in_hardirq(); while (1) { stat = hwif->tp_ops->read_status(hwif); if ((stat & BUSY_STAT) == 0 || retries-- == 0) break; udelay(10); }; if (OK_STAT(stat, READY_STAT, BAD_STAT)) return ide_stopped; if (stat & (ERR_STAT|DRQ_STAT)) return ide_error(drive, "set_geometry_intr", stat); ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL); return ide_started; }
static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct ide_cmd *cmd = &hwif->cmd; struct request *rq = hwif->rq; ide_expiry_t *expiry = NULL; int dma_error = 0, dma, stat, thislen, uptodate = 0; int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc, nsectors; int sense = blk_sense_request(rq); unsigned int timeout; u16 len; u8 ireason; ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x", rq->cmd[0], write); /* check for errors */ dma = drive->dma; if (dma) { drive->dma = 0; drive->waiting_for_dma = 0; dma_error = hwif->dma_ops->dma_end(drive); ide_dma_unmap_sg(drive, cmd); if (dma_error) { printk(KERN_ERR PFX "%s: DMA %s error\n", drive->name, write ? "write" : "read"); ide_dma_off(drive); } } rc = cdrom_decode_status(drive, 0, &stat); if (rc) { if (rc == 2) goto out_end; return ide_stopped; } /* using dma, transfer is complete now */ if (dma) { if (dma_error) return ide_error(drive, "dma error", stat); uptodate = 1; goto out_end; } ide_read_bcount_and_ireason(drive, &len, &ireason); thislen = blk_fs_request(rq) ? len : cmd->nleft; if (thislen > len) thislen = len; ide_debug_log(IDE_DBG_PC, "DRQ: stat: 0x%x, thislen: %d", stat, thislen); /* If DRQ is clear, the command has completed. */ if ((stat & ATA_DRQ) == 0) { if (blk_fs_request(rq)) { /* * If we're not done reading/writing, complain. * Otherwise, complete the command normally. */ uptodate = 1; if (cmd->nleft > 0) { printk(KERN_ERR PFX "%s: %s: data underrun " "(%u bytes)\n", drive->name, __func__, cmd->nleft); if (!write) rq->cmd_flags |= REQ_FAILED; uptodate = 0; } } else if (!blk_pc_request(rq)) { ide_cd_request_sense_fixup(drive, cmd); /* complain if we still have data left to transfer */ uptodate = cmd->nleft ? 0 : 1; if (uptodate == 0) rq->cmd_flags |= REQ_FAILED; } goto out_end; } /* check which way to transfer data */ rc = ide_cd_check_ireason(drive, rq, len, ireason, write); if (rc) goto out_end; cmd->last_xfer_len = 0; ide_debug_log(IDE_DBG_PC, "data transfer, rq->cmd_type: 0x%x, " "ireason: 0x%x", rq->cmd_type, ireason); /* transfer data */ while (thislen > 0) { int blen = min_t(int, thislen, cmd->nleft); if (cmd->nleft == 0) break; ide_pio_bytes(drive, cmd, write, blen); cmd->last_xfer_len += blen; thislen -= blen; len -= blen; if (sense && write == 0) rq->sense_len += blen; } /* pad, if necessary */ if (len > 0) { if (blk_fs_request(rq) == 0 || write == 0) ide_pad_transfer(drive, write, len); else { printk(KERN_ERR PFX "%s: confused, missing data\n", drive->name); blk_dump_rq_flags(rq, "cdrom_newpc_intr"); } } if (blk_pc_request(rq)) { timeout = rq->timeout; } else { timeout = ATAPI_WAIT_PC; if (!blk_fs_request(rq)) expiry = ide_cd_expiry; } hwif->expiry = expiry; ide_set_handler(drive, cdrom_newpc_intr, timeout); return ide_started; out_end: if (blk_pc_request(rq) && rc == 0) { unsigned int dlen = rq->data_len; rq->data_len = 0; if (blk_end_request(rq, 0, dlen)) BUG(); hwif->rq = NULL; } else { if (sense && uptodate) ide_cd_complete_failed_rq(drive, rq); if (blk_fs_request(rq)) { if (cmd->nleft == 0) uptodate = 1; } else { if (uptodate <= 0 && rq->errors == 0) rq->errors = -EIO; } if (uptodate == 0) ide_cd_error_cmd(drive, cmd); /* make sure it's fully ended */ if (blk_pc_request(rq)) nsectors = (rq->data_len + 511) >> 9; else nsectors = rq->hard_nr_sectors; if (nsectors == 0) nsectors = 1; if (blk_fs_request(rq) == 0) { rq->data_len -= (cmd->nbytes - cmd->nleft); if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) rq->data_len += cmd->last_xfer_len; } ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9); if (sense && rc == 2) ide_error(drive, "request sense failure", stat); }
/* * Returns: * 0: if the request should be continued. * 1: if the request will be going through error recovery. * 2: if the request should be ended. */ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) { ide_hwif_t *hwif = drive->hwif; struct request *rq = hwif->rq; int stat, err, sense_key; /* check for errors */ stat = hwif->tp_ops->read_status(hwif); if (stat_ret) *stat_ret = stat; if (OK_STAT(stat, good_stat, BAD_R_STAT)) return 0; /* get the IDE error register */ err = ide_read_error(drive); sense_key = err >> 4; ide_debug_log(IDE_DBG_RQ, "stat: 0x%x, good_stat: 0x%x, cmd[0]: 0x%x, " "rq->cmd_type: 0x%x, err: 0x%x", stat, good_stat, rq->cmd[0], rq->cmd_type, err); if (blk_sense_request(rq)) { /* * We got an error trying to get sense info from the drive * (probably while trying to recover from a former error). * Just give up. */ rq->cmd_flags |= REQ_FAILED; return 2; } else if (blk_pc_request(rq) || rq->cmd_type == REQ_TYPE_ATA_PC) { /* All other functions, except for READ. */ /* * if we have an error, pass back CHECK_CONDITION as the * scsi status byte */ if (blk_pc_request(rq) && !rq->errors) rq->errors = SAM_STAT_CHECK_CONDITION; /* check for tray open */ if (sense_key == NOT_READY) { cdrom_saw_media_change(drive); } else if (sense_key == UNIT_ATTENTION) { /* check for media change */ cdrom_saw_media_change(drive); return 0; } else if (sense_key == ILLEGAL_REQUEST && rq->cmd[0] == GPCMD_START_STOP_UNIT) { /* * Don't print error message for this condition-- * SFF8090i indicates that 5/24/00 is the correct * response to a request to close the tray if the * drive doesn't have that capability. * cdrom_log_sense() knows this! */ } else if (!(rq->cmd_flags & REQ_QUIET)) { /* otherwise, print an error */ ide_dump_status(drive, "packet command error", stat); } rq->cmd_flags |= REQ_FAILED; /* * instead of playing games with moving completions around, * remove failed request completely and end it when the * request sense has completed */ goto end_request; } else if (blk_fs_request(rq)) { int do_end_request = 0; /* handle errors from READ and WRITE requests */ if (blk_noretry_request(rq)) do_end_request = 1; if (sense_key == NOT_READY) { /* tray open */ if (rq_data_dir(rq) == READ) { cdrom_saw_media_change(drive); /* fail the request */ printk(KERN_ERR PFX "%s: tray open\n", drive->name); do_end_request = 1; } else { struct cdrom_info *info = drive->driver_data; /* * Allow the drive 5 seconds to recover, some * devices will return this error while flushing * data from cache. */ if (!rq->errors) info->write_timeout = jiffies + ATAPI_WAIT_WRITE_BUSY; rq->errors = 1; if (time_after(jiffies, info->write_timeout)) do_end_request = 1; else { struct request_queue *q = drive->queue; unsigned long flags; /* * take a breather relying on the unplug * timer to kick us again */ spin_lock_irqsave(q->queue_lock, flags); blk_plug_device(q); spin_unlock_irqrestore(q->queue_lock, flags); return 1; } } } else if (sense_key == UNIT_ATTENTION) { /* media change */ cdrom_saw_media_change(drive); /* * Arrange to retry the request but be sure to give up * if we've retried too many times. */ if (++rq->errors > ERROR_MAX) do_end_request = 1; } else if (sense_key == ILLEGAL_REQUEST || sense_key == DATA_PROTECT) { /* * No point in retrying after an illegal request or data * protect error. */ ide_dump_status(drive, "command error", stat); do_end_request = 1; } else if (sense_key == MEDIUM_ERROR) { /* * No point in re-trying a zillion times on a bad * sector. If we got here the error is not correctable. */ ide_dump_status(drive, "media error (bad sector)", stat); do_end_request = 1; } else if (sense_key == BLANK_CHECK) { /* disk appears blank ?? */ ide_dump_status(drive, "media error (blank)", stat); do_end_request = 1; } else if ((err & ~ATA_ABORTED) != 0) { /* go to the default handler for other errors */ ide_error(drive, "cdrom_decode_status", stat); return 1; } else if ((++rq->errors > ERROR_MAX)) { /* we've racked up too many retries, abort */ do_end_request = 1; } /* * End a request through request sense analysis when we have * sense data. We need this in order to perform end of media * processing. */ if (do_end_request) goto end_request; /* * If we got a CHECK_CONDITION status, queue * a request sense command. */ if (stat & ATA_ERR) cdrom_queue_request_sense(drive, NULL, NULL); return 1; } else { blk_dump_rq_flags(rq, PFX "bad rq"); return 2; } end_request: if (stat & ATA_ERR) { struct request_queue *q = drive->queue; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); blkdev_dequeue_request(rq); spin_unlock_irqrestore(q->queue_lock, flags); hwif->rq = NULL; cdrom_queue_request_sense(drive, rq->sense, rq); return 1; } else return 2; }
void ide_timer_expiry (unsigned long data) { ide_hwif_t *hwif = (ide_hwif_t *)data; ide_drive_t *uninitialized_var(drive); ide_handler_t *handler; unsigned long flags; unsigned long wait = -1; int plug_device = 0; spin_lock_irqsave(&hwif->lock, flags); handler = hwif->handler; if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) { /* * Either a marginal timeout occurred * (got the interrupt just as timer expired), * or we were "sleeping" to give other devices a chance. * Either way, we don't really want to complain about anything. */ } else { ide_expiry_t *expiry = hwif->expiry; ide_startstop_t startstop = ide_stopped; drive = hwif->cur_dev; if (expiry) { wait = expiry(drive); if (wait > 0) { /* continue */ /* reset timer */ hwif->timer.expires = jiffies + wait; hwif->req_gen_timer = hwif->req_gen; add_timer(&hwif->timer); spin_unlock_irqrestore(&hwif->lock, flags); return; } } hwif->handler = NULL; /* * We need to simulate a real interrupt when invoking * the handler() function, which means we need to * globally mask the specific IRQ: */ spin_unlock(&hwif->lock); /* disable_irq_nosync ?? */ disable_irq(hwif->irq); /* local CPU only, as if we were handling an interrupt */ local_irq_disable(); if (hwif->polling) { startstop = handler(drive); } else if (drive_is_ready(drive)) { if (drive->waiting_for_dma) hwif->dma_ops->dma_lost_irq(drive); (void)ide_ack_intr(hwif); printk(KERN_WARNING "%s: lost interrupt\n", drive->name); startstop = handler(drive); } else { if (drive->waiting_for_dma) startstop = ide_dma_timeout_retry(drive, wait); else startstop = ide_error(drive, "irq timeout", hwif->tp_ops->read_status(hwif)); } spin_lock_irq(&hwif->lock); enable_irq(hwif->irq); if (startstop == ide_stopped) { ide_unlock_port(hwif); plug_device = 1; } } spin_unlock_irqrestore(&hwif->lock, flags); if (plug_device) { ide_unlock_host(hwif->host); ide_plug_device(drive); } }