/* always call with the tx_lock held */ static int nbd_send_req(struct nbd_device *nbd, struct request *req) { int result, flags; struct nbd_request request; unsigned long size = blk_rq_bytes(req); u32 type; if (req->cmd_type == REQ_TYPE_DRV_PRIV) type = NBD_CMD_DISC; else if (req->cmd_flags & REQ_DISCARD) type = NBD_CMD_TRIM; else if (req->cmd_flags & REQ_FLUSH) type = NBD_CMD_FLUSH; else if (rq_data_dir(req) == WRITE) type = NBD_CMD_WRITE; else type = NBD_CMD_READ; memset(&request, 0, sizeof(request)); request.magic = htonl(NBD_REQUEST_MAGIC); request.type = htonl(type); if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) { request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); request.len = htonl(size); } memcpy(request.handle, &req, sizeof(req)); dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", req, nbdcmd_to_ascii(type), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); result = sock_xmit(nbd, 1, &request, sizeof(request), (type == NBD_CMD_WRITE) ? MSG_MORE : 0); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send control failed (result %d)\n", result); return -EIO; } if (type == NBD_CMD_WRITE) { struct req_iterator iter; struct bio_vec bvec; /* * we are really probing at internals to determine * whether to set MSG_MORE or not... */ rq_for_each_segment(bvec, req, iter) { flags = 0; if (!rq_iter_last(bvec, iter)) flags = MSG_MORE; dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", req, bvec.bv_len); result = sock_send_bvec(nbd, &bvec, flags); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", result); return -EIO; } } }
static void sbd_request_func(struct request_queue *q) { struct request *req; while((req = blk_fetch_request(q)) != NULL) { if (req->cmd_type != REQ_TYPE_FS) { printk (KERN_NOTICE "Skip non-fs request\n"); __blk_end_request_cur(req, -EIO); continue; } if (((blk_rq_pos(req) << 9) + blk_rq_bytes(req)) > SBD_BYTES) { printk (KERN_INFO "out of disk boundary\n"); __blk_end_request_cur(req, -EIO); break; } printk (KERN_INFO "%s, rq_pos << 9 = %lu, rq_bytes = %lu\n", (rq_data_dir(req) == WRITE) ? "WRITE" : "READ", (unsigned long)(blk_rq_pos(req) << 9), (unsigned long)blk_rq_bytes(req)); if(rq_data_dir(req) == WRITE) memcpy(sbd_data + (blk_rq_pos(req) << 9), req->buffer, blk_rq_bytes(req)); else memcpy(req->buffer, sbd_data + (blk_rq_pos(req) << 9), blk_rq_bytes(req)); __blk_end_request_cur(req, 0); } }
/** * blk_add_trace_rq - Add a trace for a request oriented action * @q: queue the io is for * @rq: the source request * @what: the action * * Description: * Records an action against a request. Will log the bio offset + size. * **/ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, u32 what) { struct blk_trace *bt = q->blk_trace; int rw = rq->cmd_flags & 0x03; if (likely(!bt)) return; if (rq->cmd_flags & REQ_SYNC) rw |= (1 << BIO_RW_SYNCIO); if (rq->cmd_flags & REQ_META) rw |= (1 << BIO_RW_META); if (rq->cmd_flags & REQ_DISCARD) rw |= BIO_DISCARD; if (rq->cmd_flags & REQ_FLUSH) rw |= BIO_FLUSH; if (rq->cmd_flags & REQ_FUA) rw |= BIO_FUA; if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { what |= BLK_TC_ACT(BLK_TC_PC); __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, what, rq->errors, rq->cmd_len, rq->cmd); } else { what |= BLK_TC_ACT(BLK_TC_FS); __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw, what, rq->errors, 0, NULL); } }
/* always call with the tx_lock held */ static int nbd_send_req(struct nbd_device *nbd, struct request *req) { int result, flags; struct nbd_request request; unsigned long size = blk_rq_bytes(req); request.magic = htonl(NBD_REQUEST_MAGIC); request.type = htonl(nbd_cmd(req)); if (nbd_cmd(req) == NBD_CMD_FLUSH) { /* Other values are reserved for FLUSH requests. */ request.from = 0; request.len = 0; } else { request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); request.len = htonl(size); } memcpy(request.handle, &req, sizeof(req)); dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n", nbd->disk->disk_name, req, nbdcmd_to_ascii(nbd_cmd(req)), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); result = sock_xmit(nbd, 1, &request, sizeof(request), (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send control failed (result %d)\n", result); goto error_out; } if (nbd_cmd(req) == NBD_CMD_WRITE) { struct req_iterator iter; struct bio_vec *bvec; /* * we are really probing at internals to determine * whether to set MSG_MORE or not... */ rq_for_each_segment(bvec, req, iter) { flags = 0; if (!rq_iter_last(req, iter)) flags = MSG_MORE; dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", nbd->disk->disk_name, req, bvec->bv_len); result = sock_send_bvec(nbd, bvec, flags); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", result); goto error_out; } } }
static void zvol_discard(void *arg) { struct request *req = (struct request *)arg; struct request_queue *q = req->q; zvol_state_t *zv = q->queuedata; uint64_t start = blk_rq_pos(req) << 9; uint64_t end = start + blk_rq_bytes(req); int error; rl_t *rl; /* * Annotate this call path with a flag that indicates that it is * unsafe to use KM_SLEEP during memory allocations due to the * potential for a deadlock. KM_PUSHPAGE should be used instead. */ ASSERT(!(current->flags & PF_NOFS)); current->flags |= PF_NOFS; if (end > zv->zv_volsize) { blk_end_request(req, -EIO, blk_rq_bytes(req)); goto out; } /* * Align the request to volume block boundaries. If we don't, * then this will force dnode_free_range() to zero out the * unaligned parts, which is slow (read-modify-write) and * useless since we are not freeing any space by doing so. */ start = P2ROUNDUP(start, zv->zv_volblocksize); end = P2ALIGN(end, zv->zv_volblocksize); if (start >= end) { blk_end_request(req, 0, blk_rq_bytes(req)); goto out; } rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER); error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end - start); /* * TODO: maybe we should add the operation to the log. */ zfs_range_unlock(rl); blk_end_request(req, -error, blk_rq_bytes(req)); out: current->flags &= ~PF_NOFS; }
void probe_block_rq_requeue(void *data, struct request_queue *q, struct request *rq) { int rw = rq->cmd_flags & 0x03; if (blk_discard_rq(rq)) rw |= (1 << BIO_RW_DISCARD); if (blk_pc_request(rq)) { trace_mark_tp(block, rq_requeue_pc, block_rq_requeue, probe_block_rq_requeue, "data_len %u rw %d errors %d", blk_rq_bytes(rq), rw, rq->errors); } else { /* * FIXME Using a simple trace_mark for the second event * possibility because tracepoints do not support multiple * connections to the same probe yet. They should have some * refcounting. Need to enable both rq_requeue_pc and * rq_requeue_fs markers to have the rq_requeue_fs marker * enabled. */ trace_mark(block, rq_requeue_fs, "hard_sector %llu " "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), rw, rq->errors); } }
ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat) { struct request *rq; u8 err; err = ide_dump_status(drive, msg, stat); rq = drive->hwif->rq; if (rq == NULL) return ide_stopped; /* retry only "normal" I/O: */ if (blk_rq_is_passthrough(rq)) { if (ata_taskfile_request(rq)) { struct ide_cmd *cmd = rq->special; if (cmd) ide_complete_cmd(drive, cmd, stat, err); } else if (ata_pm_request(rq)) { rq->errors = 1; ide_complete_pm_rq(drive, rq); return ide_stopped; } rq->errors = err; ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq)); return ide_stopped; } return __ide_error(drive, rq, stat, err); }
/* * Called when an error was detected during the last packet command. * We queue a request sense packet command at the head of the request * queue. */ void ide_retry_pc(ide_drive_t *drive) { struct request *failed_rq = drive->hwif->rq; struct request *sense_rq = &drive->sense_rq; struct ide_atapi_pc *pc = &drive->request_sense_pc; (void)ide_read_error(drive); /* init pc from sense_rq */ ide_init_pc(pc); memcpy(pc->c, sense_rq->cmd, 12); if (drive->media == ide_tape) drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC; /* * Push back the failed request and put request sense on top * of it. The failed command will be retried after sense data * is acquired. */ drive->hwif->rq = NULL; ide_requeue_and_plug(drive, failed_rq); if (ide_queue_sense_rq(drive, pc)) { blk_start_request(failed_rq); ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq)); } }
static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq) { struct ide_cmd *cmd = rq->special; if (cmd) { if (cmd->protocol == ATA_PROT_PIO) { ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9); ide_map_sg(drive, cmd); } return do_rw_taskfile(drive, cmd); } /* * NULL is actually a valid way of waiting for * all current requests to be flushed from the queue. */ #ifdef DEBUG printk("%s: DRIVE_CMD (null)\n", drive->name); #endif rq->errors = 0; ide_complete_rq(drive, 0, blk_rq_bytes(rq)); return ide_stopped; }
/** * blk_add_driver_data - Add binary message with driver-specific data * @q: queue the io is for * @rq: io request * @data: driver-specific data * @len: length of driver-specific data * * Description: * Some drivers might want to write driver-specific data per request. * **/ void blk_add_driver_data(struct request_queue *q, struct request *rq, void *data, size_t len) { struct blk_trace *bt = q->blk_trace; if (likely(!bt)) return; if (rq->cmd_type == REQ_TYPE_BLOCK_PC) __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, BLK_TA_DRV_DATA, rq->errors, len, data); else __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0, BLK_TA_DRV_DATA, rq->errors, len, data); }
static void zvol_discard(void *arg) { struct request *req = (struct request *)arg; struct request_queue *q = req->q; zvol_state_t *zv = q->queuedata; uint64_t offset = blk_rq_pos(req) << 9; uint64_t size = blk_rq_bytes(req); int error; rl_t *rl; if (offset + size > zv->zv_volsize) { blk_end_request(req, -EIO, size); return; } if (size == 0) { blk_end_request(req, 0, size); return; } rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER); error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, size); /* * TODO: maybe we should add the operation to the log. */ zfs_range_unlock(rl); blk_end_request(req, -error, size); }
static ide_startstop_t ide_floppy_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd, struct ide_atapi_pc *pc) { struct ide_disk_obj *floppy = drive->driver_data; if (drive->failed_pc == NULL && pc->c[0] != GPCMD_REQUEST_SENSE) drive->failed_pc = pc; drive->pc = pc; if (pc->retries > IDEFLOPPY_MAX_PC_RETRIES) { unsigned int done = blk_rq_bytes(drive->hwif->rq); if (!(pc->flags & PC_FLAG_SUPPRESS_ERROR)) ide_floppy_report_error(floppy, pc); pc->error = IDE_DRV_ERROR_GENERAL; drive->failed_pc = NULL; drive->pc_callback(drive, 0); ide_complete_rq(drive, -EIO, done); return ide_stopped; } ide_debug_log(IDE_DBG_FUNC, "retry #%d", pc->retries); pc->retries++; return ide_issue_pc(drive, cmd); }
/* * Common read path running under the zvol taskq context. This function * is responsible for copying the requested data out of the DMU and in to * a linux request structure. It then must signal the request queue with * an error code describing the result of the copy. */ static void zvol_read(void *arg) { struct request *req = (struct request *)arg; struct request_queue *q = req->q; zvol_state_t *zv = q->queuedata; uint64_t offset = blk_rq_pos(req) << 9; uint64_t size = blk_rq_bytes(req); int error; rl_t *rl; if (size == 0) { blk_end_request(req, 0, size); return; } rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER); error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req); zfs_range_unlock(rl); /* convert checksum errors into IO errors */ if (error == ECKSUM) error = EIO; blk_end_request(req, -error, size); }
/* * Common read path running under the zvol taskq context. This function * is responsible for copying the requested data out of the DMU and in to * a linux request structure. It then must signal the request queue with * an error code describing the result of the copy. */ static void zvol_read(void *arg) { struct request *req = (struct request *)arg; struct request_queue *q = req->q; zvol_state_t *zv = q->queuedata; fstrans_cookie_t cookie = spl_fstrans_mark(); uint64_t offset = blk_rq_pos(req) << 9; uint64_t size = blk_rq_bytes(req); int error; rl_t *rl; if (size == 0) { error = 0; goto out; } rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER); error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req); zfs_range_unlock(rl); /* convert checksum errors into IO errors */ if (error == ECKSUM) error = SET_ERROR(EIO); out: blk_end_request(req, -error, size); spl_fstrans_unmark(cookie); }
static void zvol_discard(void *arg) { struct request *req = (struct request *)arg; struct request_queue *q = req->q; zvol_state_t *zv = q->queuedata; fstrans_cookie_t cookie = spl_fstrans_mark(); uint64_t start = blk_rq_pos(req) << 9; uint64_t end = start + blk_rq_bytes(req); int error; rl_t *rl; if (end > zv->zv_volsize) { error = EIO; goto out; } /* * Align the request to volume block boundaries. If we don't, * then this will force dnode_free_range() to zero out the * unaligned parts, which is slow (read-modify-write) and * useless since we are not freeing any space by doing so. */ start = P2ROUNDUP(start, zv->zv_volblocksize); end = P2ALIGN(end, zv->zv_volblocksize); if (start >= end) { error = 0; goto out; } rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER); error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end-start); /* * TODO: maybe we should add the operation to the log. */ zfs_range_unlock(rl); out: blk_end_request(req, -error, blk_rq_bytes(req)); spl_fstrans_unmark(cookie); }
/* * Common request path. Rather than registering a custom make_request() * function we use the generic Linux version. This is done because it allows * us to easily merge read requests which would otherwise we performed * synchronously by the DMU. This is less critical in write case where the * DMU will perform the correct merging within a transaction group. Using * the generic make_request() also let's use leverage the fact that the * elevator with ensure correct ordering in regards to barrior IOs. On * the downside it means that in the write case we end up doing request * merging twice once in the elevator and once in the DMU. * * The request handler is called under a spin lock so all the real work * is handed off to be done in the context of the zvol taskq. This function * simply performs basic request sanity checking and hands off the request. */ static void zvol_request(struct request_queue *q) { zvol_state_t *zv = q->queuedata; struct request *req; unsigned int size; while ((req = blk_fetch_request(q)) != NULL) { size = blk_rq_bytes(req); if (size != 0 && blk_rq_pos(req) + blk_rq_sectors(req) > get_capacity(zv->zv_disk)) { printk(KERN_INFO "%s: bad access: block=%llu, count=%lu\n", req->rq_disk->disk_name, (long long unsigned)blk_rq_pos(req), (long unsigned)blk_rq_sectors(req)); __blk_end_request(req, -EIO, size); continue; } if (!blk_fs_request(req)) { printk(KERN_INFO "%s: non-fs cmd\n", req->rq_disk->disk_name); __blk_end_request(req, -EIO, size); continue; } switch (rq_data_dir(req)) { case READ: zvol_dispatch(zvol_read, req); break; case WRITE: if (unlikely(get_disk_ro(zv->zv_disk)) || unlikely(zv->zv_flags & ZVOL_RDONLY)) { __blk_end_request(req, -EROFS, size); break; } #ifdef HAVE_BLK_QUEUE_DISCARD if (req->cmd_flags & VDEV_REQ_DISCARD) { zvol_dispatch(zvol_discard, req); break; } #endif /* HAVE_BLK_QUEUE_DISCARD */ zvol_dispatch(zvol_write, req); break; default: printk(KERN_INFO "%s: unknown cmd: %d\n", req->rq_disk->disk_name, (int)rq_data_dir(req)); __blk_end_request(req, -EIO, size); break; } } }
ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq) { int err, (*setfunc)(ide_drive_t *, int) = ide_req(rq)->special; err = setfunc(drive, *(int *)&scsi_req(rq)->cmd[1]); if (err) scsi_req(rq)->result = err; ide_complete_rq(drive, 0, blk_rq_bytes(rq)); return ide_stopped; }
ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq) { int err, (*setfunc)(ide_drive_t *, int) = rq->special; err = setfunc(drive, *(int *)&rq->cmd[1]); if (err) rq->errors = err; ide_complete_rq(drive, err, blk_rq_bytes(rq)); return ide_stopped; }
static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) { struct request *rq = drive->hwif->rq; if (rq && rq->cmd_type == REQ_TYPE_SPECIAL && rq->cmd[0] == REQ_DRIVE_RESET) { if (err <= 0 && rq->errors == 0) rq->errors = -EIO; ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq)); } }
static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) { struct request *rq = drive->hwif->rq; if (rq && ata_misc_request(rq) && scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) { if (err <= 0 && rq->errors == 0) rq->errors = -EIO; ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq)); } }
void blk_fill_rwbs_rq(char *rwbs, struct request *rq) { int rw = rq->cmd_flags & 0x03; int bytes; if (rq->cmd_flags & REQ_DISCARD) rw |= REQ_DISCARD; bytes = blk_rq_bytes(rq); blk_fill_rwbs(rwbs, rw, bytes); }
void blk_fill_rwbs_rq(char *rwbs, struct request *rq) { int rw = rq->cmd_flags & 0x03; int bytes; if (blk_discard_rq(rq)) rw |= (1 << BIO_RW_DISCARD); bytes = blk_rq_bytes(rq); blk_fill_rwbs(rwbs, rw, bytes); }
static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy, struct ide_atapi_pc *pc, struct request *rq) { ide_init_pc(pc); memcpy(pc->c, rq->cmd, sizeof(pc->c)); pc->rq = rq; if (blk_rq_bytes(rq)) { pc->flags |= PC_FLAG_DMA_OK; if (rq_data_dir(rq) == WRITE) pc->flags |= PC_FLAG_WRITING; } }
/* * Common write path running under the zvol taskq context. This function * is responsible for copying the request structure data in to the DMU and * signaling the request queue with the result of the copy. */ static void zvol_write(void *arg) { struct request *req = (struct request *)arg; struct request_queue *q = req->q; zvol_state_t *zv = q->queuedata; fstrans_cookie_t cookie = spl_fstrans_mark(); uint64_t offset = blk_rq_pos(req) << 9; uint64_t size = blk_rq_bytes(req); int error = 0; dmu_tx_t *tx; rl_t *rl; if (req->cmd_flags & VDEV_REQ_FLUSH) zil_commit(zv->zv_zilog, ZVOL_OBJ); /* * Some requests are just for flush and nothing else. */ if (size == 0) { error = 0; goto out; } rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER); tx = dmu_tx_create(zv->zv_objset); dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size); /* This will only fail for ENOSPC */ error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); zfs_range_unlock(rl); goto out; } error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx); if (error == 0) zvol_log_write(zv, tx, offset, size, req->cmd_flags & VDEV_REQ_FUA); dmu_tx_commit(tx); zfs_range_unlock(rl); if ((req->cmd_flags & VDEV_REQ_FUA) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS) zil_commit(zv->zv_zilog, ZVOL_OBJ); out: blk_end_request(req, -error, size); spl_fstrans_unmark(cookie); }
static void blk_add_trace_rq(struct request_queue *q, struct request *rq, u32 what) { struct blk_trace *bt = q->blk_trace; int rw = rq->cmd_flags & 0x03; if (likely(!bt)) return; if (blk_discard_rq(rq)) rw |= (1 << BIO_RW_DISCARD); if (blk_pc_request(rq)) { what |= BLK_TC_ACT(BLK_TC_PC); __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, what, rq->errors, rq->cmd_len, rq->cmd); } else { what |= BLK_TC_ACT(BLK_TC_FS); __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw, what, rq->errors, 0, NULL); } }
int ide_cd_get_xferlen(struct request *rq) { switch (rq->cmd_type) { case REQ_TYPE_FS: return 32768; case REQ_TYPE_ATA_SENSE: case REQ_TYPE_BLOCK_PC: case REQ_TYPE_ATA_PC: return blk_rq_bytes(rq); default: return 0; } }
static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) { size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); BUG_ON(!req->nr_phys_segments); buf->sg_list = kzalloc(sz, GFP_KERNEL); if (!buf->sg_list) return -ENOMEM; sg_init_table(buf->sg_list, req->nr_phys_segments); buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); buf->payload_len = blk_rq_bytes(req); return 0; }
int blk_do_ordered(struct request_queue *q, struct request **rqp) { struct request *rq = *rqp; const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); if (!q->ordseq) { if (!is_barrier) return 1; if (q->next_ordered != QUEUE_ORDERED_NONE) { *rqp = start_ordered(q, rq); return 1; } else { /* * This can happen when the queue switches to * ORDERED_NONE while this request is on it. */ blkdev_dequeue_request(rq); if (__blk_end_request(rq, -EOPNOTSUPP, blk_rq_bytes(rq))) BUG(); *rqp = NULL; return 0; } } /* * Ordered sequence in progress */ /* Special requests are not subject to ordering rules. */ if (!blk_fs_request(rq) && rq != &q->pre_flush_rq && rq != &q->post_flush_rq) return 1; if (q->ordered & QUEUE_ORDERED_TAG) { /* Ordered by tag. Blocking the next barrier is enough. */ if (is_barrier && rq != &q->bar_rq) *rqp = NULL; } else { /* Ordered by draining. Wait for turn. */ WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) *rqp = NULL; } return 1; }
bool blk_do_ordered(struct request_queue *q, struct request **rqp) { struct request *rq = *rqp; const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); if (!q->ordseq) { if (!is_barrier) return true; if (q->next_ordered != QUEUE_ORDERED_NONE) return start_ordered(q, rqp); else { /* * Queue ordering not supported. Terminate * with prejudice. */ elv_dequeue_request(q, rq); if (__blk_end_request(rq, -EOPNOTSUPP, blk_rq_bytes(rq))) BUG(); *rqp = NULL; return false; } } /* * Ordered sequence in progress */ /* Special requests are not subject to ordering rules. */ if (!blk_fs_request(rq) && rq != &q->pre_flush_rq && rq != &q->post_flush_rq) return true; if (q->ordered & QUEUE_ORDERED_BY_TAG) { /* Ordered by tag. Blocking the next barrier is enough. */ if (is_barrier && rq != &q->bar_rq) *rqp = NULL; } else { /* Ordered by draining. Wait for turn. */ WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) *rqp = NULL; } return true; }
void ide_kill_rq(ide_drive_t *drive, struct request *rq) { u8 drv_req = (rq->cmd_type == REQ_TYPE_SPECIAL) && rq->rq_disk; u8 media = drive->media; drive->failed_pc = NULL; if ((media == ide_floppy || media == ide_tape) && drv_req) { rq->errors = 0; } else { if (media == ide_tape) rq->errors = IDE_DRV_ERROR_GENERAL; else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) rq->errors = -EIO; } ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); }