/* * Prepare a MMC request. Essentially, this means passing the * preparation off to the media driver. The media driver will * create a mmc_io_request in req->special. */ static int mmc_prep_request(struct request_queue *q, struct request *req) { struct mmc_queue *mq = q->queuedata; int ret = BLKPREP_KILL; if (req->flags & REQ_SPECIAL) { /* * Special commands already have the command * blocks already setup in req->special. */ BUG_ON(!req->special); ret = BLKPREP_OK; } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { /* * Block I/O requests need translating according * to the protocol. */ ret = mq->prep_fn(mq, req); } else { /* * Everything else is invalid. */ blk_dump_rq_flags(req, "MMC bad request"); } if (ret == BLKPREP_OK) req->flags |= REQ_DONTPREP; return ret; }
static void htifblk_request(struct request_queue *q) { struct htifblk_device *dev; struct request *req; int ret; dev = q->queuedata; if (dev->req != NULL) return; while ((req = blk_fetch_request(q)) != NULL) { if (req->cmd_type == REQ_TYPE_FS) { ret = htifblk_segment(dev, req); if (unlikely(ret)) { WARN_ON(__blk_end_request_cur(req, ret)); continue; } blk_stop_queue(q); break; } else { blk_dump_rq_flags(req, DRIVER_NAME ": ignored non-fs request"); __blk_end_request_all(req, -EIO); continue; } } }
static void htifblk_request(struct request_queue *q) { struct htifblk_device *dev; struct request *req; unsigned long flags; int ret; dev = q->queuedata; spin_lock_irqsave(q->queue_lock, flags); if (dev->req != NULL) goto out; while ((req = blk_fetch_request(q)) != NULL) { if (req->cmd_type == REQ_TYPE_FS) { ret = htifblk_segment(dev, req); if (unlikely(ret)) { WARN_ON(__blk_end_request_cur(req, ret)); continue; } blk_stop_queue(q); break; } else { blk_dump_rq_flags(req, DRIVER_NAME ": ignored non-fs request"); __blk_end_request_all(req, -EIO); continue; } } out: spin_unlock_irqrestore(q->queue_lock, flags); }
/* * Prepare a MMC request. This just filters out odd stuff. */ static int mmc_prep_request(struct request_queue *q, struct request *req) { /* * We only like normal block requests and discards. */ if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { blk_dump_rq_flags(req, "MMC bad request"); return BLKPREP_KILL; } req->cmd_flags |= REQ_DONTPREP; return BLKPREP_OK; }
/* * Prepare a MMC request. This just filters out odd stuff. */ static int mmc_prep_request(struct request_queue *q, struct request *req) { /* * We only like normal block requests. */ if (!blk_fs_request(req)) { blk_dump_rq_flags(req, "MMC bad request"); return BLKPREP_KILL; } req->cmd_flags |= REQ_DONTPREP; return BLKPREP_OK; }
static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) { u8 cmd = rq->cmd[0]; if (cmd == REQ_PARK_HEADS || cmd == REQ_UNPARK_HEADS) { ide_task_t task; struct ide_taskfile *tf = &task.tf; memset(&task, 0, sizeof(task)); if (cmd == REQ_PARK_HEADS) { drive->sleep = *(unsigned long *)rq->special; drive->dev_flags |= IDE_DFLAG_SLEEPING; tf->command = ATA_CMD_IDLEIMMEDIATE; tf->feature = 0x44; tf->lbal = 0x4c; tf->lbam = 0x4e; tf->lbah = 0x55; task.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER; } else /* cmd == REQ_UNPARK_HEADS */ tf->command = ATA_CMD_CHK_POWER; task.tf_flags |= IDE_TFLAG_TF | IDE_TFLAG_DEVICE; task.rq = rq; drive->hwif->data_phase = task.data_phase = TASKFILE_NO_DATA; return do_rw_taskfile(drive, &task); } switch (cmd) { case REQ_DEVSET_EXEC: { int err, (*setfunc)(ide_drive_t *, int) = rq->special; err = setfunc(drive, *(int *)&rq->cmd[1]); if (err) rq->errors = err; else err = 1; ide_end_request(drive, err, 0); return ide_stopped; } case REQ_DRIVE_RESET: return ide_do_reset(drive); default: blk_dump_rq_flags(rq, "ide_special_rq - bad request"); ide_end_request(drive, 0, 0); return ide_stopped; } }
static int mmc_prep_request(struct request_queue *q, struct request *req) { struct mmc_queue *mq = q->queuedata; if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { blk_dump_rq_flags(req, "MMC bad request"); return BLKPREP_KILL; } if (mq && mmc_card_removed(mq->card)) return BLKPREP_KILL; req->cmd_flags |= REQ_DONTPREP; return BLKPREP_OK; }
/* * Prepare a MMC request. This just filters out odd stuff. */ static int mmc_prep_request(struct request_queue *q, struct request *req) { struct mmc_queue *mq = q->queuedata; /* * We only like normal block requests and discards. */ if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { blk_dump_rq_flags(req, "MMC bad request"); return BLKPREP_KILL; } if (mq && mq->card && !mmc_card_inserted(mq->card)) return BLKPREP_KILL; req->cmd_flags |= REQ_DONTPREP; return BLKPREP_OK; }
/* * idescsi_do_request is our request handling function. */ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *rq, sector_t block) { debug_log("dev: %s, cmd: %x, errors: %d\n", rq->rq_disk->disk_name, rq->cmd[0], rq->errors); debug_log("sector: %ld, nr_sectors: %ld, current_nr_sectors: %d\n", rq->sector, rq->nr_sectors, rq->current_nr_sectors); if (blk_sense_request(rq) || blk_special_request(rq)) { struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special; if (drive->using_dma && !idescsi_map_sg(drive, pc)) pc->flags |= PC_FLAG_DMA_OK; return idescsi_issue_pc(drive, pc); } blk_dump_rq_flags(rq, "ide-scsi: unsup command"); idescsi_end_request (drive, 0, 0); return ide_stopped; }
/* * Prepare a -BLK_DEV request. Essentially, this means passing the * preparation off to the media driver. The media driver will * create request to CyAsDev. */ static int cyasblkdev_prep_request( struct request_queue *q, struct request *req) { DBGPRN_FUNC_NAME; /* we only like normal block requests.*/ if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s:%x bad request received\n", __func__, current->pid); #endif blk_dump_rq_flags(req, "cyasblkdev bad request"); return BLKPREP_KILL; } req->cmd_flags |= REQ_DONTPREP; return BLKPREP_OK; }
/* * Prepare a MMC request. This just filters out odd stuff. */ static int mmc_prep_request(struct request_queue *q, struct request *req) { struct mmc_queue *mq = q->queuedata; /* * We only like normal block requests and discards. */ if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD && req_op(req) != REQ_OP_SECURE_ERASE) { blk_dump_rq_flags(req, "MMC bad request"); return BLKPREP_KILL; } if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) return BLKPREP_KILL; req->rq_flags |= RQF_DONTPREP; return BLKPREP_OK; }
/** * row_reinsert_req() - Reinsert request back to the scheduler * @q: requests queue * @rq: request to add * * Reinsert the given request back to the queue it was * dispatched from as if it was never dispatched. * * Returns 0 on success, error code otherwise */ static int row_reinsert_req(struct request_queue *q, struct request *rq) { struct row_data *rd = q->elevator->elevator_data; struct row_queue *rqueue = RQ_ROWQ(rq); /* Verify rqueue is legitimate */ if (rqueue->prio >= ROWQ_MAX_PRIO) { pr_err("\n\nROW BUG: row_reinsert_req() rqueue->prio = %d\n", rqueue->prio); blk_dump_rq_flags(rq, ""); return -EIO; } list_add(&rq->queuelist, &rqueue->fifo); rd->nr_reqs[rq_data_dir(rq)]++; row_log_rowq(rd, rqueue->prio, "request reinserted"); return 0; }
/** * row_reinsert_req() - Reinsert request back to the scheduler * @q: requests queue * @rq: request to add * * Reinsert the given request back to the queue it was * dispatched from as if it was never dispatched. * * Returns 0 on success, error code otherwise */ static int row_reinsert_req(struct request_queue *q, struct request *rq) { struct row_data *rd = q->elevator->elevator_data; struct row_queue *rqueue = RQ_ROWQ(rq); if (rqueue->prio >= ROWQ_MAX_PRIO) { pr_err("\n\n%s:ROW BUG: row_reinsert_req() rqueue->prio = %d\n", rq->rq_disk->disk_name, rqueue->prio); blk_dump_rq_flags(rq, ""); return -EIO; } list_add(&rq->queuelist, &rqueue->fifo); rd->nr_reqs[rq_data_dir(rq)]++; rqueue->nr_req++; row_log_rowq(rd, rqueue->prio, "request reinserted (total on queue=%d)", rqueue->nr_req); return 0; }
static int card_prep_request(struct request_queue *q, struct request *req) { struct card_queue *cq = q->queuedata; int ret = BLKPREP_KILL; if (!cq) { //printk(KERN_ERR "[card_prep_request] %s: killing request - no device/host\n", req->rq_disk->disk_name); return BLKPREP_KILL; } if (blk_special_request(req)) { /* * Special commands already have the command * blocks already setup in req->special. */ BUG_ON(!req->special); ret = BLKPREP_OK; } else if (blk_fs_request(req) || blk_pc_request(req)) { /* * Block I/O requests need translating according * to the protocol. */ ret = cq->prep_fn(cq, req); } else { /* * Everything else is invalid. */ blk_dump_rq_flags(req, "CARD bad request"); } if (ret == BLKPREP_OK) req->cmd_flags |= REQ_DONTPREP; return ret; }
/* * row_add_request() - Add request to the scheduler * @q: requests queue * @rq: request to add * */ static void row_add_request(struct request_queue *q, struct request *rq) { struct row_data *rd = (struct row_data *)q->elevator->elevator_data; struct row_queue *rqueue = RQ_ROWQ(rq); s64 diff_ms; bool queue_was_empty = list_empty(&rqueue->fifo); list_add_tail(&rq->queuelist, &rqueue->fifo); rd->nr_reqs[rq_data_dir(rq)]++; rqueue->nr_req++; rq_set_fifo_time(rq, jiffies); /* for statistics*/ if (rq->cmd_flags & REQ_URGENT) { WARN_ON(1); blk_dump_rq_flags(rq, ""); rq->cmd_flags &= ~REQ_URGENT; } if (row_queues_def[rqueue->prio].idling_enabled) { if (rd->rd_idle_data.idling_queue_idx == rqueue->prio && hrtimer_active(&rd->rd_idle_data.hr_timer)) { if (hrtimer_try_to_cancel( &rd->rd_idle_data.hr_timer) >= 0) { row_log_rowq(rd, rqueue->prio, "Canceled delayed work on %d", rd->rd_idle_data.idling_queue_idx); rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO; } } diff_ms = ktime_to_ms(ktime_sub(ktime_get(), rqueue->idle_data.last_insert_time)); if (unlikely(diff_ms < 0)) { pr_err("%s(): time delta error: diff_ms < 0", __func__); rqueue->idle_data.begin_idling = false; return; } if (diff_ms < rd->rd_idle_data.freq_ms) { rqueue->idle_data.begin_idling = true; row_log_rowq(rd, rqueue->prio, "Enable idling"); } else { rqueue->idle_data.begin_idling = false; row_log_rowq(rd, rqueue->prio, "Disable idling (%ldms)", (long)diff_ms); } rqueue->idle_data.last_insert_time = ktime_get(); } if (row_queues_def[rqueue->prio].is_urgent && !rd->pending_urgent_rq && !rd->urgent_in_flight) { /* Handle High Priority queues */ if (rqueue->prio < ROWQ_REG_PRIO_IDX && rd->last_served_ioprio_class != IOPRIO_CLASS_RT && queue_was_empty) { row_log_rowq(rd, rqueue->prio, "added (high prio) urgent request"); rq->cmd_flags |= REQ_URGENT; rd->pending_urgent_rq = rq; } else if (row_rowq_unserved(rd, rqueue->prio)) { /* Handle Regular priotity queues */ row_log_rowq(rd, rqueue->prio, "added urgent request (total on queue=%d)", rqueue->nr_req); rq->cmd_flags |= REQ_URGENT; rd->pending_urgent_rq = rq; } } else row_log_rowq(rd, rqueue->prio, "added request (total on queue=%d)", rqueue->nr_req); }
static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, struct request *rq, sector_t block) { struct ide_disk_obj *floppy = drive->driver_data; struct ide_cmd cmd; struct ide_atapi_pc *pc; ide_debug_log(IDE_DBG_FUNC, "enter, cmd: 0x%x\n", rq->cmd[0]); if (drive->debug_mask & IDE_DBG_RQ) blk_dump_rq_flags(rq, (rq->rq_disk ? rq->rq_disk->disk_name : "dev?")); if (rq->errors >= ERROR_MAX) { if (drive->failed_pc) { ide_floppy_report_error(floppy, drive->failed_pc); drive->failed_pc = NULL; } else printk(KERN_ERR PFX "%s: I/O error\n", drive->name); if (blk_special_request(rq)) { rq->errors = 0; ide_complete_rq(drive, 0, blk_rq_bytes(rq)); return ide_stopped; } else goto out_end; } if (blk_fs_request(rq)) { if (((long)blk_rq_pos(rq) % floppy->bs_factor) || (blk_rq_sectors(rq) % floppy->bs_factor)) { printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", drive->name); goto out_end; } pc = &floppy->queued_pc; idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); } else if (blk_special_request(rq) || blk_sense_request(rq)) { pc = (struct ide_atapi_pc *)rq->special; } else if (blk_pc_request(rq)) { pc = &floppy->queued_pc; idefloppy_blockpc_cmd(floppy, pc, rq); } else BUG(); ide_prep_sense(drive, rq); memset(&cmd, 0, sizeof(cmd)); if (rq_data_dir(rq)) cmd.tf_flags |= IDE_TFLAG_WRITE; cmd.rq = rq; if (blk_fs_request(rq) || blk_rq_bytes(rq)) { ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); ide_map_sg(drive, &cmd); } pc->rq = rq; return ide_floppy_issue_pc(drive, &cmd, pc); out_end: drive->failed_pc = NULL; if (blk_fs_request(rq) == 0 && rq->errors == 0) rq->errors = -EIO; ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); return ide_stopped; }
/* * Returns: * 0: if the request should be continued. * 1: if the request will be going through error recovery. * 2: if the request should be ended. */ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) { ide_hwif_t *hwif = drive->hwif; struct request *rq = hwif->rq; int stat, err, sense_key; /* check for errors */ stat = hwif->tp_ops->read_status(hwif); if (stat_ret) *stat_ret = stat; if (OK_STAT(stat, good_stat, BAD_R_STAT)) return 0; /* get the IDE error register */ err = ide_read_error(drive); sense_key = err >> 4; ide_debug_log(IDE_DBG_RQ, "stat: 0x%x, good_stat: 0x%x, cmd[0]: 0x%x, " "rq->cmd_type: 0x%x, err: 0x%x", stat, good_stat, rq->cmd[0], rq->cmd_type, err); if (blk_sense_request(rq)) { /* * We got an error trying to get sense info from the drive * (probably while trying to recover from a former error). * Just give up. */ rq->cmd_flags |= REQ_FAILED; return 2; } else if (blk_pc_request(rq) || rq->cmd_type == REQ_TYPE_ATA_PC) { /* All other functions, except for READ. */ /* * if we have an error, pass back CHECK_CONDITION as the * scsi status byte */ if (blk_pc_request(rq) && !rq->errors) rq->errors = SAM_STAT_CHECK_CONDITION; /* check for tray open */ if (sense_key == NOT_READY) { cdrom_saw_media_change(drive); } else if (sense_key == UNIT_ATTENTION) { /* check for media change */ cdrom_saw_media_change(drive); return 0; } else if (sense_key == ILLEGAL_REQUEST && rq->cmd[0] == GPCMD_START_STOP_UNIT) { /* * Don't print error message for this condition-- * SFF8090i indicates that 5/24/00 is the correct * response to a request to close the tray if the * drive doesn't have that capability. * cdrom_log_sense() knows this! */ } else if (!(rq->cmd_flags & REQ_QUIET)) { /* otherwise, print an error */ ide_dump_status(drive, "packet command error", stat); } rq->cmd_flags |= REQ_FAILED; /* * instead of playing games with moving completions around, * remove failed request completely and end it when the * request sense has completed */ goto end_request; } else if (blk_fs_request(rq)) { int do_end_request = 0; /* handle errors from READ and WRITE requests */ if (blk_noretry_request(rq)) do_end_request = 1; if (sense_key == NOT_READY) { /* tray open */ if (rq_data_dir(rq) == READ) { cdrom_saw_media_change(drive); /* fail the request */ printk(KERN_ERR PFX "%s: tray open\n", drive->name); do_end_request = 1; } else { struct cdrom_info *info = drive->driver_data; /* * Allow the drive 5 seconds to recover, some * devices will return this error while flushing * data from cache. */ if (!rq->errors) info->write_timeout = jiffies + ATAPI_WAIT_WRITE_BUSY; rq->errors = 1; if (time_after(jiffies, info->write_timeout)) do_end_request = 1; else { struct request_queue *q = drive->queue; unsigned long flags; /* * take a breather relying on the unplug * timer to kick us again */ spin_lock_irqsave(q->queue_lock, flags); blk_plug_device(q); spin_unlock_irqrestore(q->queue_lock, flags); return 1; } } } else if (sense_key == UNIT_ATTENTION) { /* media change */ cdrom_saw_media_change(drive); /* * Arrange to retry the request but be sure to give up * if we've retried too many times. */ if (++rq->errors > ERROR_MAX) do_end_request = 1; } else if (sense_key == ILLEGAL_REQUEST || sense_key == DATA_PROTECT) { /* * No point in retrying after an illegal request or data * protect error. */ ide_dump_status(drive, "command error", stat); do_end_request = 1; } else if (sense_key == MEDIUM_ERROR) { /* * No point in re-trying a zillion times on a bad * sector. If we got here the error is not correctable. */ ide_dump_status(drive, "media error (bad sector)", stat); do_end_request = 1; } else if (sense_key == BLANK_CHECK) { /* disk appears blank ?? */ ide_dump_status(drive, "media error (blank)", stat); do_end_request = 1; } else if ((err & ~ATA_ABORTED) != 0) { /* go to the default handler for other errors */ ide_error(drive, "cdrom_decode_status", stat); return 1; } else if ((++rq->errors > ERROR_MAX)) { /* we've racked up too many retries, abort */ do_end_request = 1; } /* * End a request through request sense analysis when we have * sense data. We need this in order to perform end of media * processing. */ if (do_end_request) goto end_request; /* * If we got a CHECK_CONDITION status, queue * a request sense command. */ if (stat & ATA_ERR) cdrom_queue_request_sense(drive, NULL, NULL); return 1; } else { blk_dump_rq_flags(rq, PFX "bad rq"); return 2; } end_request: if (stat & ATA_ERR) { struct request_queue *q = drive->queue; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); blkdev_dequeue_request(rq); spin_unlock_irqrestore(q->queue_lock, flags); hwif->rq = NULL; cdrom_queue_request_sense(drive, rq->sense, rq); return 1; } else return 2; }
static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct ide_cmd *cmd = &hwif->cmd; struct request *rq = hwif->rq; ide_expiry_t *expiry = NULL; int dma_error = 0, dma, stat, thislen, uptodate = 0; int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc, nsectors; int sense = blk_sense_request(rq); unsigned int timeout; u16 len; u8 ireason; ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x", rq->cmd[0], write); /* check for errors */ dma = drive->dma; if (dma) { drive->dma = 0; drive->waiting_for_dma = 0; dma_error = hwif->dma_ops->dma_end(drive); ide_dma_unmap_sg(drive, cmd); if (dma_error) { printk(KERN_ERR PFX "%s: DMA %s error\n", drive->name, write ? "write" : "read"); ide_dma_off(drive); } } rc = cdrom_decode_status(drive, 0, &stat); if (rc) { if (rc == 2) goto out_end; return ide_stopped; } /* using dma, transfer is complete now */ if (dma) { if (dma_error) return ide_error(drive, "dma error", stat); uptodate = 1; goto out_end; } ide_read_bcount_and_ireason(drive, &len, &ireason); thislen = blk_fs_request(rq) ? len : cmd->nleft; if (thislen > len) thislen = len; ide_debug_log(IDE_DBG_PC, "DRQ: stat: 0x%x, thislen: %d", stat, thislen); /* If DRQ is clear, the command has completed. */ if ((stat & ATA_DRQ) == 0) { if (blk_fs_request(rq)) { /* * If we're not done reading/writing, complain. * Otherwise, complete the command normally. */ uptodate = 1; if (cmd->nleft > 0) { printk(KERN_ERR PFX "%s: %s: data underrun " "(%u bytes)\n", drive->name, __func__, cmd->nleft); if (!write) rq->cmd_flags |= REQ_FAILED; uptodate = 0; } } else if (!blk_pc_request(rq)) { ide_cd_request_sense_fixup(drive, cmd); /* complain if we still have data left to transfer */ uptodate = cmd->nleft ? 0 : 1; if (uptodate == 0) rq->cmd_flags |= REQ_FAILED; } goto out_end; } /* check which way to transfer data */ rc = ide_cd_check_ireason(drive, rq, len, ireason, write); if (rc) goto out_end; cmd->last_xfer_len = 0; ide_debug_log(IDE_DBG_PC, "data transfer, rq->cmd_type: 0x%x, " "ireason: 0x%x", rq->cmd_type, ireason); /* transfer data */ while (thislen > 0) { int blen = min_t(int, thislen, cmd->nleft); if (cmd->nleft == 0) break; ide_pio_bytes(drive, cmd, write, blen); cmd->last_xfer_len += blen; thislen -= blen; len -= blen; if (sense && write == 0) rq->sense_len += blen; } /* pad, if necessary */ if (len > 0) { if (blk_fs_request(rq) == 0 || write == 0) ide_pad_transfer(drive, write, len); else { printk(KERN_ERR PFX "%s: confused, missing data\n", drive->name); blk_dump_rq_flags(rq, "cdrom_newpc_intr"); } } if (blk_pc_request(rq)) { timeout = rq->timeout; } else { timeout = ATAPI_WAIT_PC; if (!blk_fs_request(rq)) expiry = ide_cd_expiry; } hwif->expiry = expiry; ide_set_handler(drive, cdrom_newpc_intr, timeout); return ide_started; out_end: if (blk_pc_request(rq) && rc == 0) { unsigned int dlen = rq->data_len; rq->data_len = 0; if (blk_end_request(rq, 0, dlen)) BUG(); hwif->rq = NULL; } else { if (sense && uptodate) ide_cd_complete_failed_rq(drive, rq); if (blk_fs_request(rq)) { if (cmd->nleft == 0) uptodate = 1; } else { if (uptodate <= 0 && rq->errors == 0) rq->errors = -EIO; } if (uptodate == 0) ide_cd_error_cmd(drive, cmd); /* make sure it's fully ended */ if (blk_pc_request(rq)) nsectors = (rq->data_len + 511) >> 9; else nsectors = rq->hard_nr_sectors; if (nsectors == 0) nsectors = 1; if (blk_fs_request(rq) == 0) { rq->data_len -= (cmd->nbytes - cmd->nleft); if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) rq->data_len += cmd->last_xfer_len; } ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9); if (sense && rc == 2) ide_error(drive, "request sense failure", stat); }