Exemple #1
0
static void end_cmd(struct nullb_cmd *cmd)
{
	struct request_queue *q = NULL;

	switch (queue_mode)  {
	case NULL_Q_MQ:
		blk_mq_end_request(cmd->rq, 0);
		return;
	case NULL_Q_RQ:
		INIT_LIST_HEAD(&cmd->rq->queuelist);
		blk_end_request_all(cmd->rq, 0);
		break;
	case NULL_Q_BIO:
		bio_endio(cmd->bio);
		goto free_cmd;
	}

	if (cmd->rq)
		q = cmd->rq->q;

	/* Restart queue if needed, as we are freeing a tag */
	if (q && !q->mq_ops && blk_queue_stopped(q)) {
		unsigned long flags;

		spin_lock_irqsave(q->queue_lock, flags);
		if (blk_queue_stopped(q))
			blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
free_cmd:
	free_cmd(cmd);
}
Exemple #2
0
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
{
	struct completion_queue *cq;
	struct llist_node *entry;
	struct nullb_cmd *cmd;

	cq = &per_cpu(completion_queues, smp_processor_id());

	while ((entry = llist_del_all(&cq->list)) != NULL) {
		entry = llist_reverse_order(entry);
		do {
			struct request_queue *q = NULL;

			cmd = container_of(entry, struct nullb_cmd, ll_list);
			entry = entry->next;
			if (cmd->rq)
				q = cmd->rq->q;
			end_cmd(cmd);

			if (q && !q->mq_ops && blk_queue_stopped(q)) {
				spin_lock(q->queue_lock);
				if (blk_queue_stopped(q))
					blk_start_queue(q);
				spin_unlock(q->queue_lock);
			}
		} while (entry);
	}

	return HRTIMER_NORESTART;
}
void __blk_run_queue(struct request_queue *q)
{
	if (unlikely(blk_queue_stopped(q)))
		return;

	__blk_run_queue_uncond(q);
}
Exemple #4
0
static void dm_old_stop_queue(struct request_queue *q)
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	if (!blk_queue_stopped(q))
		blk_stop_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
Exemple #5
0
/*
 * q->request_fn for old request-based dm.
 * Called with the queue lock held.
 */
static void dm_old_request_fn(struct request_queue *q)
{
	struct mapped_device *md = q->queuedata;
	struct dm_target *ti = md->immutable_target;
	struct request *rq;
	struct dm_rq_target_io *tio;
	sector_t pos = 0;

	if (unlikely(!ti)) {
		int srcu_idx;
		struct dm_table *map = dm_get_live_table(md, &srcu_idx);

		if (unlikely(!map)) {
			dm_put_live_table(md, srcu_idx);
			return;
		}
		ti = dm_table_find_target(map, pos);
		dm_put_live_table(md, srcu_idx);
	}

	/*
	 * For suspend, check blk_queue_stopped() and increment
	 * ->pending within a single queue_lock not to increment the
	 * number of in-flight I/Os after the queue is stopped in
	 * dm_suspend().
	 */
	while (!blk_queue_stopped(q)) {
		rq = blk_peek_request(q);
		if (!rq)
			return;

		/* always use block 0 to find the target for flushes for now */
		pos = 0;
		if (req_op(rq) != REQ_OP_FLUSH)
			pos = blk_rq_pos(rq);

		if ((dm_old_request_peeked_before_merge_deadline(md) &&
		     md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
		     md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
		    (ti->type->busy && ti->type->busy(ti))) {
			blk_delay_queue(q, 10);
			return;
		}

		dm_start_request(md, rq);

		tio = tio_from_request(rq);
		init_tio(tio, rq, md);
		/* Establish tio->ti before queuing work (map_tio_request) */
		tio->ti = ti;
		kthread_queue_work(&md->kworker, &tio->work);
		BUG_ON(!irqs_disabled());
	}
}
Exemple #6
0
static void dm_mq_requeue_request(struct request *rq)
{
	struct request_queue *q = rq->q;
	unsigned long flags;

	blk_mq_requeue_request(rq);
	spin_lock_irqsave(q->queue_lock, flags);
	if (!blk_queue_stopped(q))
		blk_mq_kick_requeue_list(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
/*
 * Input/Output thread.
 */
static int sd_io_thread(void *param)
{
	struct sd_host *host = param;
	struct request *req;
	unsigned long flags;
	int nr_sectors;
	int error;

#if 0
	/*
	 * We are going to perfom badly due to the read problem explained
	 * above. At least, be nice with other processes trying to use the
	 * cpu.
	 */
	set_user_nice(current, 0);
#endif

	current->flags |= PF_NOFREEZE|PF_MEMALLOC;

	mutex_lock(&host->io_mutex);
	for (;;) {
		req = NULL;
		set_current_state(TASK_INTERRUPTIBLE);

		spin_lock_irqsave(&host->queue_lock, flags);
		if (!blk_queue_stopped(host->queue))
			req = blk_fetch_request(host->queue);
		spin_unlock_irqrestore(&host->queue_lock, flags);

		if (!req) {
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
				break;
			}
			mutex_unlock(&host->io_mutex);
			schedule();
			mutex_lock(&host->io_mutex);
			continue;
		}
		set_current_state(TASK_INTERRUPTIBLE);
		nr_sectors = sd_do_request(host, req);
		error = (nr_sectors < 0) ? nr_sectors : 0;

		spin_lock_irqsave(&host->queue_lock, flags);
		__blk_end_request(req, error, nr_sectors << 9);
		spin_unlock_irqrestore(&host->queue_lock, flags);
	}
	mutex_unlock(&host->io_mutex);

	return 0;
}
void blk_run_queue_async(struct request_queue *q)
{
	if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
		mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
Exemple #9
0
static void skd_request_fn(struct request_queue *q)
{
	struct skd_device *skdev = q->queuedata;
	struct skd_fitmsg_context *skmsg = NULL;
	struct fit_msg_hdr *fmh = NULL;
	struct skd_request_context *skreq;
	struct request *req = NULL;
	struct skd_scsi_request *scsi_req;
	struct page *page;
	unsigned long io_flags;
	int error;
	u32 lba;
	u32 count;
	int data_dir;
	u32 be_lba;
	u32 be_count;
	u64 be_dmaa;
	u64 cmdctxt;
	u32 timo_slot;
	void *cmd_ptr;
	int flush, fua;

	if (skdev->state != SKD_DRVR_STATE_ONLINE) {
		skd_request_fn_not_online(q);
		return;
	}

	if (blk_queue_stopped(skdev->queue)) {
		if (skdev->skmsg_free_list == NULL ||
		    skdev->skreq_free_list == NULL ||
		    skdev->in_flight >= skdev->queue_low_water_mark)
			/* There is still some kind of shortage */
			return;

		queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
	}

	/*
	 * Stop conditions:
	 *  - There are no more native requests
	 *  - There are already the maximum number of requests in progress
	 *  - There are no more skd_request_context entries
	 *  - There are no more FIT msg buffers
	 */
	for (;; ) {

		flush = fua = 0;

		req = blk_peek_request(q);

		/* Are there any native requests to start? */
		if (req == NULL)
			break;

		lba = (u32)blk_rq_pos(req);
		count = blk_rq_sectors(req);
		data_dir = rq_data_dir(req);
		io_flags = req->cmd_flags;

		if (io_flags & REQ_FLUSH)
			flush++;

		if (io_flags & REQ_FUA)
			fua++;

		pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
			 "count=%u(0x%x) dir=%d\n",
			 skdev->name, __func__, __LINE__,
			 req, lba, lba, count, count, data_dir);

		/* At this point we know there is a request */

		/* Are too many requets already in progress? */
		if (skdev->in_flight >= skdev->cur_max_queue_depth) {
			pr_debug("%s:%s:%d qdepth %d, limit %d\n",
				 skdev->name, __func__, __LINE__,
				 skdev->in_flight, skdev->cur_max_queue_depth);
			break;
		}

		/* Is a skd_request_context available? */
		skreq = skdev->skreq_free_list;
		if (skreq == NULL) {
			pr_debug("%s:%s:%d Out of req=%p\n",
				 skdev->name, __func__, __LINE__, q);
			break;
		}
		SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
		SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);

		/* Now we check to see if we can get a fit msg */
		if (skmsg == NULL) {
			if (skdev->skmsg_free_list == NULL) {
				pr_debug("%s:%s:%d Out of msg\n",
					 skdev->name, __func__, __LINE__);
				break;
			}
		}

		skreq->flush_cmd = 0;
		skreq->n_sg = 0;
		skreq->sg_byte_count = 0;
		skreq->discard_page = 0;

		/*
		 * OK to now dequeue request from q.
		 *
		 * At this point we are comitted to either start or reject
		 * the native request. Note that skd_request_context is
		 * available but is still at the head of the free list.
		 */
		blk_start_request(req);
		skreq->req = req;
		skreq->fitmsg_id = 0;

		/* Either a FIT msg is in progress or we have to start one. */
		if (skmsg == NULL) {
			/* Are there any FIT msg buffers available? */
			skmsg = skdev->skmsg_free_list;
			if (skmsg == NULL) {
				pr_debug("%s:%s:%d Out of msg skdev=%p\n",
					 skdev->name, __func__, __LINE__,
					 skdev);
				break;
			}
			SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
			SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);

			skdev->skmsg_free_list = skmsg->next;

			skmsg->state = SKD_MSG_STATE_BUSY;
			skmsg->id += SKD_ID_INCR;

			/* Initialize the FIT msg header */
			fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
			memset(fmh, 0, sizeof(*fmh));
			fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
			skmsg->length = sizeof(*fmh);
		}

		skreq->fitmsg_id = skmsg->id;

		/*
		 * Note that a FIT msg may have just been started
		 * but contains no SoFIT requests yet.
		 */

		/*
		 * Transcode the request, checking as we go. The outcome of
		 * the transcoding is represented by the error variable.
		 */
		cmd_ptr = &skmsg->msg_buf[skmsg->length];
		memset(cmd_ptr, 0, 32);

		be_lba = cpu_to_be32(lba);
		be_count = cpu_to_be32(count);
		be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
		cmdctxt = skreq->id + SKD_ID_INCR;

		scsi_req = cmd_ptr;
		scsi_req->hdr.tag = cmdctxt;
		scsi_req->hdr.sg_list_dma_address = be_dmaa;

		if (data_dir == READ)
			skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
		else
			skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;

		if (io_flags & REQ_DISCARD) {
			page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
			if (!page) {
				pr_err("request_fn:Page allocation failed.\n");
				skd_end_request(skdev, skreq, -ENOMEM);
				break;
			}
			skreq->discard_page = 1;
			req->completion_data = page;
			skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);

		} else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
			skd_prep_zerosize_flush_cdb(scsi_req, skreq);
			SKD_ASSERT(skreq->flush_cmd == 1);

		} else {
			skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
		}

		if (fua)
			scsi_req->cdb[1] |= SKD_FUA_NV;

		if (!req->bio)
			goto skip_sg;

		error = skd_preop_sg_list(skdev, skreq);

		if (error != 0) {
			/*
			 * Complete the native request with error.
			 * Note that the request context is still at the
			 * head of the free list, and that the SoFIT request
			 * was encoded into the FIT msg buffer but the FIT
			 * msg length has not been updated. In short, the
			 * only resource that has been allocated but might
			 * not be used is that the FIT msg could be empty.
			 */
			pr_debug("%s:%s:%d error Out\n",
				 skdev->name, __func__, __LINE__);
			skd_end_request(skdev, skreq, error);
			continue;
		}

skip_sg:
		scsi_req->hdr.sg_list_len_bytes =
			cpu_to_be32(skreq->sg_byte_count);

		/* Complete resource allocations. */
		skdev->skreq_free_list = skreq->next;
		skreq->state = SKD_REQ_STATE_BUSY;
		skreq->id += SKD_ID_INCR;

		skmsg->length += sizeof(struct skd_scsi_request);
		fmh->num_protocol_cmds_coalesced++;

		/*
		 * Update the active request counts.
		 * Capture the timeout timestamp.
		 */
		skreq->timeout_stamp = skdev->timeout_stamp;
		timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
		skdev->timeout_slot[timo_slot]++;
		skdev->in_flight++;
		pr_debug("%s:%s:%d req=0x%x busy=%d\n",
			 skdev->name, __func__, __LINE__,
			 skreq->id, skdev->in_flight);

		/*
		 * If the FIT msg buffer is full send it.
		 */
		if (skmsg->length >= SKD_N_FITMSG_BYTES ||
		    fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
			skd_send_fitmsg(skdev, skmsg);
			skmsg = NULL;
			fmh = NULL;
		}
	}

	/*
	 * Is a FIT msg in progress? If it is empty put the buffer back
	 * on the free list. If it is non-empty send what we got.
	 * This minimizes latency when there are fewer requests than
	 * what fits in a FIT msg.
	 */
	if (skmsg != NULL) {
		/* Bigger than just a FIT msg header? */
		if (skmsg->length > sizeof(struct fit_msg_hdr)) {
			pr_debug("%s:%s:%d sending msg=%p, len %d\n",
				 skdev->name, __func__, __LINE__,
				 skmsg, skmsg->length);
			skd_send_fitmsg(skdev, skmsg);
		} else {
			/*
			 * The FIT msg is empty. It means we got started
			 * on the msg, but the requests were rejected.
			 */
			skmsg->state = SKD_MSG_STATE_IDLE;
			skmsg->id += SKD_ID_INCR;
			skmsg->next = skdev->skmsg_free_list;
			skdev->skmsg_free_list = skmsg;
		}
		skmsg = NULL;
		fmh = NULL;
	}

	/*
	 * If req is non-NULL it means there is something to do but
	 * we are out of a resource.
	 */
	if (req)
		blk_stop_queue(skdev->queue);
}