示例#1
0
static inline struct request *start_ordered(struct request_queue *q,
					    struct request *rq)
{
	q->orderr = 0;
	q->ordered = q->next_ordered;
	q->ordseq |= QUEUE_ORDSEQ_STARTED;

	/*
	 * Prep proxy barrier request.
	 */
	blkdev_dequeue_request(rq);
	q->orig_bar_rq = rq;
	rq = &q->bar_rq;
	rq->cmd_flags = 0;
	rq_init(q, rq);
	if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
		rq->cmd_flags |= REQ_RW;
	if (q->ordered & QUEUE_ORDERED_FUA)
		rq->cmd_flags |= REQ_FUA;
	rq->elevator_private = NULL;
	rq->elevator_private2 = NULL;
	init_request_from_bio(rq, q->orig_bar_rq->bio);
	rq->end_io = bar_end_io;

	/*
	 * Queue ordered sequence.  As we stack them at the head, we
	 * need to queue in reverse order.  Note that we rely on that
	 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
	 * request gets inbetween ordered sequence. If this request is
	 * an empty barrier, we don't need to do a postflush ever since
	 * there will be no data written between the pre and post flush.
	 * Hence a single flush will suffice.
	 */
	if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
		queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
	else
		q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;

	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);

	if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
		queue_flush(q, QUEUE_ORDERED_PREFLUSH);
		rq = &q->pre_flush_rq;
	} else
		q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;

	if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
		q->ordseq |= QUEUE_ORDSEQ_DRAIN;
	else
		rq = NULL;

	return rq;
}
示例#2
0
文件: blk-flush.c 项目: 710leo/LVS
static struct request *queue_next_fseq(struct request_queue *q)
{
	struct request *orig_rq = q->orig_flush_rq;
	struct request *rq = &q->flush_rq;

	blk_rq_init(q, rq);

	switch (blk_flush_cur_seq(q)) {
	case QUEUE_FSEQ_PREFLUSH:
		rq->end_io = pre_flush_end_io;
		init_flush_request(q, rq, orig_rq->rq_disk);
		break;
	case QUEUE_FSEQ_DATA:
		init_request_from_bio(rq, orig_rq->bio);
		/*
		 * orig_rq->rq_disk may be different from
		 * bio->bi_bdev->bd_disk if orig_rq got here through
		 * remapping drivers.  Make sure rq->rq_disk points
		 * to the same one as orig_rq.
		 */
		rq->rq_disk = orig_rq->rq_disk;
		rq->cmd_flags &= ~(REQ_FLUSH | REQ_FUA);
		rq->cmd_flags |= orig_rq->cmd_flags & (REQ_FLUSH | REQ_FUA);
		rq->end_io = flush_data_end_io;
		break;
	case QUEUE_FSEQ_POSTFLUSH:
		rq->end_io = post_flush_end_io;
		init_flush_request(q, rq, orig_rq->rq_disk);
		break;
	default:
		BUG();
	}

	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
	return rq;
}
示例#3
0
static inline bool start_ordered(struct request_queue *q, struct request **rqp)
{
	struct request *rq = *rqp;
	unsigned skip = 0;

	q->orderr = 0;
	q->ordered = q->next_ordered;
	q->ordseq |= QUEUE_ORDSEQ_STARTED;

	/*
	 * For an empty barrier, there's no actual BAR request, which
	 * in turn makes POSTFLUSH unnecessary.  Mask them off.
	 */
	if (!rq->hard_nr_sectors) {
		q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
				QUEUE_ORDERED_DO_POSTFLUSH);
		/*
		 * Empty barrier on a write-through device w/ ordered
		 * tag has no command to issue and without any command
		 * to issue, ordering by tag can't be used.  Drain
		 * instead.
		 */
		if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
		    !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
			q->ordered &= ~QUEUE_ORDERED_BY_TAG;
			q->ordered |= QUEUE_ORDERED_BY_DRAIN;
		}
	}

	/* stash away the original request */
	elv_dequeue_request(q, rq);
	q->orig_bar_rq = rq;
	rq = NULL;

	/*
	 * Queue ordered sequence.  As we stack them at the head, we
	 * need to queue in reverse order.  Note that we rely on that
	 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
	 * request gets inbetween ordered sequence.
	 */
	if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
		queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
		rq = &q->post_flush_rq;
	} else
		skip |= QUEUE_ORDSEQ_POSTFLUSH;

	if (q->ordered & QUEUE_ORDERED_DO_BAR) {
		rq = &q->bar_rq;

		/* initialize proxy request and queue it */
		blk_rq_init(q, rq);
		if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
			rq->cmd_flags |= REQ_RW;
		if (q->ordered & QUEUE_ORDERED_DO_FUA)
			rq->cmd_flags |= REQ_FUA;
		init_request_from_bio(rq, q->orig_bar_rq->bio);
		rq->end_io = bar_end_io;

		elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
	} else
		skip |= QUEUE_ORDSEQ_BAR;

	if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
		queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
		rq = &q->pre_flush_rq;
	} else
		skip |= QUEUE_ORDSEQ_PREFLUSH;

	if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
		rq = NULL;
	else
		skip |= QUEUE_ORDSEQ_DRAIN;

	*rqp = rq;

	/*
	 * Complete skipped sequences.  If whole sequence is complete,
	 * return false to tell elevator that this request is gone.
	 */
	return !blk_ordered_complete_seq(q, skip, 0);
}