/* * Complete the clone and the original request with the error status * through softirq context. */ static void dm_complete_request(struct request *rq, int error) { struct dm_rq_target_io *tio = tio_from_request(rq); tio->error = error; if (!rq->q->mq_ops) blk_complete_request(rq); else blk_mq_complete_request(rq, error); }
static void rq_end_stats(struct mapped_device *md, struct request *orig) { if (unlikely(dm_stats_used(&md->stats))) { struct dm_rq_target_io *tio = tio_from_request(orig); tio->duration_jiffies = jiffies - tio->duration_jiffies; dm_stats_account_io(&md->stats, rq_data_dir(orig), blk_rq_pos(orig), tio->n_sectors, true, tio->duration_jiffies, &tio->stats_aux); } }
/* * q->request_fn for old request-based dm. * Called with the queue lock held. */ static void dm_old_request_fn(struct request_queue *q) { struct mapped_device *md = q->queuedata; struct dm_target *ti = md->immutable_target; struct request *rq; struct dm_rq_target_io *tio; sector_t pos = 0; if (unlikely(!ti)) { int srcu_idx; struct dm_table *map = dm_get_live_table(md, &srcu_idx); if (unlikely(!map)) { dm_put_live_table(md, srcu_idx); return; } ti = dm_table_find_target(map, pos); dm_put_live_table(md, srcu_idx); } /* * For suspend, check blk_queue_stopped() and increment * ->pending within a single queue_lock not to increment the * number of in-flight I/Os after the queue is stopped in * dm_suspend(). */ while (!blk_queue_stopped(q)) { rq = blk_peek_request(q); if (!rq) return; /* always use block 0 to find the target for flushes for now */ pos = 0; if (req_op(rq) != REQ_OP_FLUSH) pos = blk_rq_pos(rq); if ((dm_old_request_peeked_before_merge_deadline(md) && md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) && md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) || (ti->type->busy && ti->type->busy(ti))) { blk_delay_queue(q, 10); return; } dm_start_request(md, rq); tio = tio_from_request(rq); init_tio(tio, rq, md); /* Establish tio->ti before queuing work (map_tio_request) */ tio->ti = ti; kthread_queue_work(&md->kworker, &tio->work); BUG_ON(!irqs_disabled()); } }
static void dm_unprep_request(struct request *rq) { struct dm_rq_target_io *tio = tio_from_request(rq); struct request *clone = tio->clone; if (!rq->q->mq_ops) { rq->special = NULL; rq->cmd_flags &= ~REQ_DONTPREP; } if (clone) free_rq_clone(clone); else if (!tio->md->queue->mq_ops) free_old_rq_tio(tio); }