static int scc_dma_setup(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct request *rq = HWGROUP(drive)->rq; unsigned int reading; u8 dma_stat; if (rq_data_dir(rq)) reading = 0; else reading = 1 << 3; /* fall back to pio! */ if (!ide_build_dmatable(drive, rq)) { ide_map_sg(drive, rq); return 1; } /* PRD table */ out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma); /* specify r/w */ out_be32((void __iomem *)hwif->dma_base, reading); /* read DMA status for INTR & ERROR flags */ dma_stat = in_be32((void __iomem *)(hwif->dma_base + 4)); /* clear INTR & ERROR flags */ out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6); drive->waiting_for_dma = 1; return 0; }
static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq) { ide_hwif_t *hwif = drive->hwif; ide_task_t *task = rq->special; if (task) { hwif->data_phase = task->data_phase; switch (hwif->data_phase) { case TASKFILE_MULTI_OUT: case TASKFILE_OUT: case TASKFILE_MULTI_IN: case TASKFILE_IN: ide_init_sg_cmd(drive, rq); ide_map_sg(drive, rq); default: break; } return do_rw_taskfile(drive, task); } /* * NULL is actually a valid way of waiting for * all current requests to be flushed from the queue. */ #ifdef DEBUG printk("%s: DRIVE_CMD (null)\n", drive->name); #endif ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif), ide_read_error(drive)); return ide_stopped; }
static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq) { struct ide_cmd *cmd = rq->special; if (cmd) { if (cmd->protocol == ATA_PROT_PIO) { ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9); ide_map_sg(drive, cmd); } return do_rw_taskfile(drive, cmd); } /* * NULL is actually a valid way of waiting for * all current requests to be flushed from the queue. */ #ifdef DEBUG printk("%s: DRIVE_CMD (null)\n", drive->name); #endif rq->errors = 0; ide_complete_rq(drive, 0, blk_rq_bytes(rq)); return ide_stopped; }
static int sgiioc4_ide_dma_setup(ide_drive_t *drive) { struct request *rq = HWGROUP(drive)->rq; unsigned int count = 0; int ddir; if (rq_data_dir(rq)) ddir = PCI_DMA_TODEVICE; else ddir = PCI_DMA_FROMDEVICE; if (!(count = sgiioc4_build_dma_table(drive, rq, ddir))) { /* try PIO instead of DMA */ ide_map_sg(drive, rq); return 1; } if (rq_data_dir(rq)) /* Writes TO the IOC4 FROM Main Memory */ ddir = IOC4_DMA_READ; else /* Writes FROM the IOC4 TO Main Memory */ ddir = IOC4_DMA_WRITE; sgiioc4_configure_for_dma(ddir, drive); return 0; }
/* * __ide_do_rw_disk() issues READ and WRITE commands to a disk, * using LBA if supported, or CHS otherwise, to address sectors. */ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, sector_t block) { ide_hwif_t *hwif = HWIF(drive); u16 nsectors = (u16)rq->nr_sectors; u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48); u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); ide_task_t task; struct ide_taskfile *tf = &task.tf; ide_startstop_t rc; if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) { if (block + rq->nr_sectors > 1ULL << 28) dma = 0; else lba48 = 0; } if (!dma) { ide_init_sg_cmd(drive, rq); ide_map_sg(drive, rq); } memset(&task, 0, sizeof(task)); task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; if (drive->dev_flags & IDE_DFLAG_LBA) { if (lba48) { pr_debug("%s: LBA=0x%012llx\n", drive->name, (unsigned long long)block); tf->hob_nsect = (nsectors >> 8) & 0xff; tf->hob_lbal = (u8)(block >> 24); if (sizeof(block) != 4) { tf->hob_lbam = (u8)((u64)block >> 32); tf->hob_lbah = (u8)((u64)block >> 40); } tf->nsect = nsectors & 0xff; tf->lbal = (u8) block; tf->lbam = (u8)(block >> 8); tf->lbah = (u8)(block >> 16); task.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB); } else {
static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, struct request *rq, sector_t block) { struct ide_disk_obj *floppy = drive->driver_data; struct ide_cmd cmd; struct ide_atapi_pc *pc; ide_debug_log(IDE_DBG_FUNC, "enter, cmd: 0x%x\n", rq->cmd[0]); if (drive->debug_mask & IDE_DBG_RQ) blk_dump_rq_flags(rq, (rq->rq_disk ? rq->rq_disk->disk_name : "dev?")); if (rq->errors >= ERROR_MAX) { if (drive->failed_pc) { ide_floppy_report_error(floppy, drive->failed_pc); drive->failed_pc = NULL; } else printk(KERN_ERR PFX "%s: I/O error\n", drive->name); if (blk_special_request(rq)) { rq->errors = 0; ide_complete_rq(drive, 0, blk_rq_bytes(rq)); return ide_stopped; } else goto out_end; } if (blk_fs_request(rq)) { if (((long)blk_rq_pos(rq) % floppy->bs_factor) || (blk_rq_sectors(rq) % floppy->bs_factor)) { printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", drive->name); goto out_end; } pc = &floppy->queued_pc; idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); } else if (blk_special_request(rq) || blk_sense_request(rq)) { pc = (struct ide_atapi_pc *)rq->special; } else if (blk_pc_request(rq)) { pc = &floppy->queued_pc; idefloppy_blockpc_cmd(floppy, pc, rq); } else BUG(); ide_prep_sense(drive, rq); memset(&cmd, 0, sizeof(cmd)); if (rq_data_dir(rq)) cmd.tf_flags |= IDE_TFLAG_WRITE; cmd.rq = rq; if (blk_fs_request(rq) || blk_rq_bytes(rq)) { ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); ide_map_sg(drive, &cmd); } pc->rq = rq; return ide_floppy_issue_pc(drive, &cmd, pc); out_end: drive->failed_pc = NULL; if (blk_fs_request(rq) == 0 && rq->errors == 0) rq->errors = -EIO; ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); return ide_stopped; }