Example #1
0
static void __ide_requeue_and_plug(struct request_queue *q, struct request *rq)
{
	if (rq)
		blk_requeue_request(q, rq);
	if (rq || blk_peek_request(q)) {
		/* Use 3ms as that was the old plug delay */
		blk_delay_queue(q, 3);
	}
}
Example #2
0
/*
 * q->request_fn for old request-based dm.
 * Called with the queue lock held.
 */
static void dm_old_request_fn(struct request_queue *q)
{
	struct mapped_device *md = q->queuedata;
	struct dm_target *ti = md->immutable_target;
	struct request *rq;
	struct dm_rq_target_io *tio;
	sector_t pos = 0;

	if (unlikely(!ti)) {
		int srcu_idx;
		struct dm_table *map = dm_get_live_table(md, &srcu_idx);

		if (unlikely(!map)) {
			dm_put_live_table(md, srcu_idx);
			return;
		}
		ti = dm_table_find_target(map, pos);
		dm_put_live_table(md, srcu_idx);
	}

	/*
	 * For suspend, check blk_queue_stopped() and increment
	 * ->pending within a single queue_lock not to increment the
	 * number of in-flight I/Os after the queue is stopped in
	 * dm_suspend().
	 */
	while (!blk_queue_stopped(q)) {
		rq = blk_peek_request(q);
		if (!rq)
			return;

		/* always use block 0 to find the target for flushes for now */
		pos = 0;
		if (req_op(rq) != REQ_OP_FLUSH)
			pos = blk_rq_pos(rq);

		if ((dm_old_request_peeked_before_merge_deadline(md) &&
		     md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
		     md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
		    (ti->type->busy && ti->type->busy(ti))) {
			blk_delay_queue(q, 10);
			return;
		}

		dm_start_request(md, rq);

		tio = tio_from_request(rq);
		init_tio(tio, rq, md);
		/* Establish tio->ti before queuing work (map_tio_request) */
		tio->ti = ti;
		kthread_queue_work(&md->kworker, &tio->work);
		BUG_ON(!irqs_disabled());
	}
}
Example #3
0
/* Get the next read/write request; ending requests that we don't handle */
struct request *ace_get_next_request(struct request_queue * q)
{
	struct request *req;

	while ((req = blk_peek_request(q)) != NULL) {
		if (req->cmd_type == REQ_TYPE_FS)
			break;
		blk_start_request(req);
		__blk_end_request_all(req, -EIO);
	}
	return req;
}
/* Get the next read/write request; ending requests that we don't handle */
struct request *ace_get_next_request(struct request_queue * q)
{
	struct request *req;

	while ((req = blk_peek_request(q)) != NULL) {
		if (blk_fs_request(req))
			break;
		blk_start_request(req);
		__blk_end_request_all(req, -EIO);
	}
	return req;
}
Example #5
0
/*
 *****************************************************************************
 * READ/WRITE REQUESTS
 *****************************************************************************
 */
static void skd_fail_all_pending(struct skd_device *skdev)
{
	struct request_queue *q = skdev->queue;
	struct request *req;

	for (;; ) {
		req = blk_peek_request(q);
		if (req == NULL)
			break;
		blk_start_request(req);
		__blk_end_request_all(req, -EIO);
	}
}
Example #6
0
/*
 * do_blkif_request
 *  read a block; request is in a request queue
 */
static void do_blkif_request(struct request_queue *rq)
{
	struct blkfront_info *info = NULL;
	struct request *req;
	int queued;

	pr_debug("Entered do_blkif_request\n");

	queued = 0;

	while ((req = blk_peek_request(rq)) != NULL) {
		info = req->rq_disk->private_data;

		if (RING_FULL(&info->ring))
			goto wait;

		blk_start_request(req);

		if (!blk_fs_request(req)) {
			__blk_end_request_all(req, -EIO);
			continue;
		}

		pr_debug("do_blk_req %p: cmd %p, sec %lx, "
			 "(%u/%u) buffer:%p [%s]\n",
			 req, req->cmd, (unsigned long)blk_rq_pos(req),
			 blk_rq_cur_sectors(req), blk_rq_sectors(req),
			 req->buffer, rq_data_dir(req) ? "write" : "read");

		if (blkif_queue_request(req)) {
			blk_requeue_request(rq, req);
wait:
			/* Avoid pointless unplugs. */
			blk_stop_queue(rq);
			break;
		}

		queued++;
	}

	if (queued != 0)
		flush_requests(info);
}
Example #7
0
void
aoedev_downdev(struct aoedev *d)
{
	struct aoetgt *t, **tt, **te;
	struct list_head *head, *pos, *nx;
	struct request *rq;
	int i;

	d->flags &= ~DEVFL_UP;

	/* clean out active and to-be-retransmitted buffers */
	for (i = 0; i < NFACTIVE; i++) {
		head = &d->factive[i];
		list_for_each_safe(pos, nx, head)
			downdev_frame(pos);
	}
	head = &d->rexmitq;
	list_for_each_safe(pos, nx, head)
		downdev_frame(pos);

	/* reset window dressings */
	tt = d->targets;
	te = tt + d->ntargets;
	for (; tt < te && (t = *tt); tt++) {
		aoecmd_wreset(t);
		t->nout = 0;
	}

	/* clean out the in-process request (if any) */
	aoe_failip(d);

	/* fast fail all pending I/O */
	if (d->blkq) {
		while ((rq = blk_peek_request(d->blkq))) {
			blk_start_request(rq);
			aoe_end_request(d, rq, 1);
		}
	}

	if (d->gd)
		set_capacity(d->gd, 0);
}
Example #8
0
static void
aoeblk_request(struct request_queue *q)
{
	struct aoedev *d;
	struct request *rq;

	d = q->queuedata;
	if ((d->flags & DEVFL_UP) == 0) {
		printk(KERN_INFO "aoe: device %ld.%d is not up\n",
			d->aoemajor, d->aoeminor);
		while ((rq = blk_peek_request(q))) {
			blk_start_request(rq);
			aoe_end_request(d, rq, 1);
		}
		return;
	}
	aoecmd_work(d);

	return;
}
Example #9
0
/*
 * The simple form of the request function.
 */
static void sbull_request(struct request_queue *q)
{
	struct request *req;

	while ((req = blk_peek_request(q)) != NULL) {
		struct sbull_dev *dev = req->rq_disk->private_data;
                blk_start_request(req);
		if (! req->cmd_type != REQ_TYPE_FS) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			__blk_end_request_all(req, -EIO);
			continue;
		}
    //    	printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n",
    //    			dev - Devices, rq_data_dir(req),
    //    			req->sector, req->current_nr_sectors,
    //    			req->flags);
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_bytes(req),
                               req->buffer, rq_data_dir(req));
		__blk_end_request_all(req, 0);
	}
}
Example #10
0
static void skd_request_fn(struct request_queue *q)
{
	struct skd_device *skdev = q->queuedata;
	struct skd_fitmsg_context *skmsg = NULL;
	struct fit_msg_hdr *fmh = NULL;
	struct skd_request_context *skreq;
	struct request *req = NULL;
	struct skd_scsi_request *scsi_req;
	struct page *page;
	unsigned long io_flags;
	int error;
	u32 lba;
	u32 count;
	int data_dir;
	u32 be_lba;
	u32 be_count;
	u64 be_dmaa;
	u64 cmdctxt;
	u32 timo_slot;
	void *cmd_ptr;
	int flush, fua;

	if (skdev->state != SKD_DRVR_STATE_ONLINE) {
		skd_request_fn_not_online(q);
		return;
	}

	if (blk_queue_stopped(skdev->queue)) {
		if (skdev->skmsg_free_list == NULL ||
		    skdev->skreq_free_list == NULL ||
		    skdev->in_flight >= skdev->queue_low_water_mark)
			/* There is still some kind of shortage */
			return;

		queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
	}

	/*
	 * Stop conditions:
	 *  - There are no more native requests
	 *  - There are already the maximum number of requests in progress
	 *  - There are no more skd_request_context entries
	 *  - There are no more FIT msg buffers
	 */
	for (;; ) {

		flush = fua = 0;

		req = blk_peek_request(q);

		/* Are there any native requests to start? */
		if (req == NULL)
			break;

		lba = (u32)blk_rq_pos(req);
		count = blk_rq_sectors(req);
		data_dir = rq_data_dir(req);
		io_flags = req->cmd_flags;

		if (io_flags & REQ_FLUSH)
			flush++;

		if (io_flags & REQ_FUA)
			fua++;

		pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
			 "count=%u(0x%x) dir=%d\n",
			 skdev->name, __func__, __LINE__,
			 req, lba, lba, count, count, data_dir);

		/* At this point we know there is a request */

		/* Are too many requets already in progress? */
		if (skdev->in_flight >= skdev->cur_max_queue_depth) {
			pr_debug("%s:%s:%d qdepth %d, limit %d\n",
				 skdev->name, __func__, __LINE__,
				 skdev->in_flight, skdev->cur_max_queue_depth);
			break;
		}

		/* Is a skd_request_context available? */
		skreq = skdev->skreq_free_list;
		if (skreq == NULL) {
			pr_debug("%s:%s:%d Out of req=%p\n",
				 skdev->name, __func__, __LINE__, q);
			break;
		}
		SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
		SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);

		/* Now we check to see if we can get a fit msg */
		if (skmsg == NULL) {
			if (skdev->skmsg_free_list == NULL) {
				pr_debug("%s:%s:%d Out of msg\n",
					 skdev->name, __func__, __LINE__);
				break;
			}
		}

		skreq->flush_cmd = 0;
		skreq->n_sg = 0;
		skreq->sg_byte_count = 0;
		skreq->discard_page = 0;

		/*
		 * OK to now dequeue request from q.
		 *
		 * At this point we are comitted to either start or reject
		 * the native request. Note that skd_request_context is
		 * available but is still at the head of the free list.
		 */
		blk_start_request(req);
		skreq->req = req;
		skreq->fitmsg_id = 0;

		/* Either a FIT msg is in progress or we have to start one. */
		if (skmsg == NULL) {
			/* Are there any FIT msg buffers available? */
			skmsg = skdev->skmsg_free_list;
			if (skmsg == NULL) {
				pr_debug("%s:%s:%d Out of msg skdev=%p\n",
					 skdev->name, __func__, __LINE__,
					 skdev);
				break;
			}
			SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
			SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);

			skdev->skmsg_free_list = skmsg->next;

			skmsg->state = SKD_MSG_STATE_BUSY;
			skmsg->id += SKD_ID_INCR;

			/* Initialize the FIT msg header */
			fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
			memset(fmh, 0, sizeof(*fmh));
			fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
			skmsg->length = sizeof(*fmh);
		}

		skreq->fitmsg_id = skmsg->id;

		/*
		 * Note that a FIT msg may have just been started
		 * but contains no SoFIT requests yet.
		 */

		/*
		 * Transcode the request, checking as we go. The outcome of
		 * the transcoding is represented by the error variable.
		 */
		cmd_ptr = &skmsg->msg_buf[skmsg->length];
		memset(cmd_ptr, 0, 32);

		be_lba = cpu_to_be32(lba);
		be_count = cpu_to_be32(count);
		be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
		cmdctxt = skreq->id + SKD_ID_INCR;

		scsi_req = cmd_ptr;
		scsi_req->hdr.tag = cmdctxt;
		scsi_req->hdr.sg_list_dma_address = be_dmaa;

		if (data_dir == READ)
			skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
		else
			skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;

		if (io_flags & REQ_DISCARD) {
			page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
			if (!page) {
				pr_err("request_fn:Page allocation failed.\n");
				skd_end_request(skdev, skreq, -ENOMEM);
				break;
			}
			skreq->discard_page = 1;
			req->completion_data = page;
			skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);

		} else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
			skd_prep_zerosize_flush_cdb(scsi_req, skreq);
			SKD_ASSERT(skreq->flush_cmd == 1);

		} else {
			skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
		}

		if (fua)
			scsi_req->cdb[1] |= SKD_FUA_NV;

		if (!req->bio)
			goto skip_sg;

		error = skd_preop_sg_list(skdev, skreq);

		if (error != 0) {
			/*
			 * Complete the native request with error.
			 * Note that the request context is still at the
			 * head of the free list, and that the SoFIT request
			 * was encoded into the FIT msg buffer but the FIT
			 * msg length has not been updated. In short, the
			 * only resource that has been allocated but might
			 * not be used is that the FIT msg could be empty.
			 */
			pr_debug("%s:%s:%d error Out\n",
				 skdev->name, __func__, __LINE__);
			skd_end_request(skdev, skreq, error);
			continue;
		}

skip_sg:
		scsi_req->hdr.sg_list_len_bytes =
			cpu_to_be32(skreq->sg_byte_count);

		/* Complete resource allocations. */
		skdev->skreq_free_list = skreq->next;
		skreq->state = SKD_REQ_STATE_BUSY;
		skreq->id += SKD_ID_INCR;

		skmsg->length += sizeof(struct skd_scsi_request);
		fmh->num_protocol_cmds_coalesced++;

		/*
		 * Update the active request counts.
		 * Capture the timeout timestamp.
		 */
		skreq->timeout_stamp = skdev->timeout_stamp;
		timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
		skdev->timeout_slot[timo_slot]++;
		skdev->in_flight++;
		pr_debug("%s:%s:%d req=0x%x busy=%d\n",
			 skdev->name, __func__, __LINE__,
			 skreq->id, skdev->in_flight);

		/*
		 * If the FIT msg buffer is full send it.
		 */
		if (skmsg->length >= SKD_N_FITMSG_BYTES ||
		    fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
			skd_send_fitmsg(skdev, skmsg);
			skmsg = NULL;
			fmh = NULL;
		}
	}

	/*
	 * Is a FIT msg in progress? If it is empty put the buffer back
	 * on the free list. If it is non-empty send what we got.
	 * This minimizes latency when there are fewer requests than
	 * what fits in a FIT msg.
	 */
	if (skmsg != NULL) {
		/* Bigger than just a FIT msg header? */
		if (skmsg->length > sizeof(struct fit_msg_hdr)) {
			pr_debug("%s:%s:%d sending msg=%p, len %d\n",
				 skdev->name, __func__, __LINE__,
				 skmsg, skmsg->length);
			skd_send_fitmsg(skdev, skmsg);
		} else {
			/*
			 * The FIT msg is empty. It means we got started
			 * on the msg, but the requests were rejected.
			 */
			skmsg->state = SKD_MSG_STATE_IDLE;
			skmsg->id += SKD_ID_INCR;
			skmsg->next = skdev->skmsg_free_list;
			skdev->skmsg_free_list = skmsg;
		}
		skmsg = NULL;
		fmh = NULL;
	}

	/*
	 * If req is non-NULL it means there is something to do but
	 * we are out of a resource.
	 */
	if (req)
		blk_stop_queue(skdev->queue);
}