Пример #1
0
static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
				    size_t count)
{
	unsigned long nm;
	ssize_t ret = queue_var_store(&nm, page, count);

	if (ret < 0)
		return ret;

	spin_lock_irq(q->queue_lock);
	queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
	queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
	if (nm == 2)
		queue_flag_set(QUEUE_FLAG_NOMERGES, q);
	else if (nm)
		queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
	spin_unlock_irq(q->queue_lock);

	return ret;
}
Пример #2
0
static ssize_t
queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
{
	ssize_t ret = -EINVAL;
#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
	unsigned long val;

	ret = queue_var_store(&val, page, count);
	spin_lock_irq(q->queue_lock);
	if (val) {
		queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
		if (val == 2)
			queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
	} else {
		queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
		queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
	}
	spin_unlock_irq(q->queue_lock);
#endif
	return ret;
}
Пример #3
0
static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
				   size_t count)
{
	unsigned long stats;
	ssize_t ret = queue_var_store(&stats, page, count);

	spin_lock_irq(q->queue_lock);
	if (stats)
		queue_flag_set(QUEUE_FLAG_IO_STAT, q);
	else
		queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
	spin_unlock_irq(q->queue_lock);

	return ret;
}
Пример #4
0
static ssize_t queue_nonrot_store(struct request_queue *q, const char *page,
				  size_t count)
{
	unsigned long nm;
	ssize_t ret = queue_var_store(&nm, page, count);

	spin_lock_irq(q->queue_lock);
	if (nm)
		queue_flag_clear(QUEUE_FLAG_NONROT, q);
	else
		queue_flag_set(QUEUE_FLAG_NONROT, q);
	spin_unlock_irq(q->queue_lock);

	return ret;
}
Пример #5
0
static ssize_t queue_random_store(struct request_queue *q, const char *page,
				  size_t count)
{
	unsigned long val;
	ssize_t ret = queue_var_store(&val, page, count);

	spin_lock_irq(q->queue_lock);
	if (val)
		queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
	else
		queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
	spin_unlock_irq(q->queue_lock);

	return ret;
}
Пример #6
0
static ssize_t
queue_store_unpriv_sgio(struct request_queue *q, const char *page, size_t count)
{
	unsigned long val;
	ssize_t ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	ret = queue_var_store(&val, page, count);
	spin_lock_irq(q->queue_lock);
	if (val)
		queue_flag_set(QUEUE_FLAG_UNPRIV_SGIO, q);
	else
		queue_flag_clear(QUEUE_FLAG_UNPRIV_SGIO, q);
	spin_unlock_irq(q->queue_lock);
	return ret;
}
Пример #7
0
ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
			   const char *buf, size_t count)
{
	struct gendisk *disk = dev_to_disk(dev);
	int val;

	if (count) {
		struct request_queue *q = disk->queue;
		char *p = (char *) buf;

		val = simple_strtoul(p, &p, 10);
		spin_lock_irq(q->queue_lock);
		if (val)
			queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
		else
			queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
		spin_unlock_irq(q->queue_lock);
	}

	return count;
}
Пример #8
0
static ssize_t queue_poll_store(struct request_queue *q, const char *page,
				size_t count)
{
	unsigned long poll_on;
	ssize_t ret;

	if (!q->mq_ops || !q->mq_ops->poll)
		return -EINVAL;

	ret = queue_var_store(&poll_on, page, count);
	if (ret < 0)
		return ret;

	spin_lock_irq(q->queue_lock);
	if (poll_on)
		queue_flag_set(QUEUE_FLAG_POLL, q);
	else
		queue_flag_clear(QUEUE_FLAG_POLL, q);
	spin_unlock_irq(q->queue_lock);

	return ret;
}
Пример #9
0
static ssize_t queue_wc_store(struct request_queue *q, const char *page,
			      size_t count)
{
	int set = -1;

	if (!strncmp(page, "write back", 10))
		set = 1;
	else if (!strncmp(page, "write through", 13) ||
		 !strncmp(page, "none", 4))
		set = 0;

	if (set == -1)
		return -EINVAL;

	spin_lock_irq(q->queue_lock);
	if (set)
		queue_flag_set(QUEUE_FLAG_WC, q);
	else
		queue_flag_clear(QUEUE_FLAG_WC, q);
	spin_unlock_irq(q->queue_lock);

	return count;
}
Пример #10
0
static void skd_request_fn(struct request_queue *q)
{
	struct skd_device *skdev = q->queuedata;
	struct skd_fitmsg_context *skmsg = NULL;
	struct fit_msg_hdr *fmh = NULL;
	struct skd_request_context *skreq;
	struct request *req = NULL;
	struct skd_scsi_request *scsi_req;
	struct page *page;
	unsigned long io_flags;
	int error;
	u32 lba;
	u32 count;
	int data_dir;
	u32 be_lba;
	u32 be_count;
	u64 be_dmaa;
	u64 cmdctxt;
	u32 timo_slot;
	void *cmd_ptr;
	int flush, fua;

	if (skdev->state != SKD_DRVR_STATE_ONLINE) {
		skd_request_fn_not_online(q);
		return;
	}

	if (blk_queue_stopped(skdev->queue)) {
		if (skdev->skmsg_free_list == NULL ||
		    skdev->skreq_free_list == NULL ||
		    skdev->in_flight >= skdev->queue_low_water_mark)
			/* There is still some kind of shortage */
			return;

		queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
	}

	/*
	 * Stop conditions:
	 *  - There are no more native requests
	 *  - There are already the maximum number of requests in progress
	 *  - There are no more skd_request_context entries
	 *  - There are no more FIT msg buffers
	 */
	for (;; ) {

		flush = fua = 0;

		req = blk_peek_request(q);

		/* Are there any native requests to start? */
		if (req == NULL)
			break;

		lba = (u32)blk_rq_pos(req);
		count = blk_rq_sectors(req);
		data_dir = rq_data_dir(req);
		io_flags = req->cmd_flags;

		if (io_flags & REQ_FLUSH)
			flush++;

		if (io_flags & REQ_FUA)
			fua++;

		pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
			 "count=%u(0x%x) dir=%d\n",
			 skdev->name, __func__, __LINE__,
			 req, lba, lba, count, count, data_dir);

		/* At this point we know there is a request */

		/* Are too many requets already in progress? */
		if (skdev->in_flight >= skdev->cur_max_queue_depth) {
			pr_debug("%s:%s:%d qdepth %d, limit %d\n",
				 skdev->name, __func__, __LINE__,
				 skdev->in_flight, skdev->cur_max_queue_depth);
			break;
		}

		/* Is a skd_request_context available? */
		skreq = skdev->skreq_free_list;
		if (skreq == NULL) {
			pr_debug("%s:%s:%d Out of req=%p\n",
				 skdev->name, __func__, __LINE__, q);
			break;
		}
		SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
		SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);

		/* Now we check to see if we can get a fit msg */
		if (skmsg == NULL) {
			if (skdev->skmsg_free_list == NULL) {
				pr_debug("%s:%s:%d Out of msg\n",
					 skdev->name, __func__, __LINE__);
				break;
			}
		}

		skreq->flush_cmd = 0;
		skreq->n_sg = 0;
		skreq->sg_byte_count = 0;
		skreq->discard_page = 0;

		/*
		 * OK to now dequeue request from q.
		 *
		 * At this point we are comitted to either start or reject
		 * the native request. Note that skd_request_context is
		 * available but is still at the head of the free list.
		 */
		blk_start_request(req);
		skreq->req = req;
		skreq->fitmsg_id = 0;

		/* Either a FIT msg is in progress or we have to start one. */
		if (skmsg == NULL) {
			/* Are there any FIT msg buffers available? */
			skmsg = skdev->skmsg_free_list;
			if (skmsg == NULL) {
				pr_debug("%s:%s:%d Out of msg skdev=%p\n",
					 skdev->name, __func__, __LINE__,
					 skdev);
				break;
			}
			SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
			SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);

			skdev->skmsg_free_list = skmsg->next;

			skmsg->state = SKD_MSG_STATE_BUSY;
			skmsg->id += SKD_ID_INCR;

			/* Initialize the FIT msg header */
			fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
			memset(fmh, 0, sizeof(*fmh));
			fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
			skmsg->length = sizeof(*fmh);
		}

		skreq->fitmsg_id = skmsg->id;

		/*
		 * Note that a FIT msg may have just been started
		 * but contains no SoFIT requests yet.
		 */

		/*
		 * Transcode the request, checking as we go. The outcome of
		 * the transcoding is represented by the error variable.
		 */
		cmd_ptr = &skmsg->msg_buf[skmsg->length];
		memset(cmd_ptr, 0, 32);

		be_lba = cpu_to_be32(lba);
		be_count = cpu_to_be32(count);
		be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
		cmdctxt = skreq->id + SKD_ID_INCR;

		scsi_req = cmd_ptr;
		scsi_req->hdr.tag = cmdctxt;
		scsi_req->hdr.sg_list_dma_address = be_dmaa;

		if (data_dir == READ)
			skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
		else
			skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;

		if (io_flags & REQ_DISCARD) {
			page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
			if (!page) {
				pr_err("request_fn:Page allocation failed.\n");
				skd_end_request(skdev, skreq, -ENOMEM);
				break;
			}
			skreq->discard_page = 1;
			req->completion_data = page;
			skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);

		} else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
			skd_prep_zerosize_flush_cdb(scsi_req, skreq);
			SKD_ASSERT(skreq->flush_cmd == 1);

		} else {
			skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
		}

		if (fua)
			scsi_req->cdb[1] |= SKD_FUA_NV;

		if (!req->bio)
			goto skip_sg;

		error = skd_preop_sg_list(skdev, skreq);

		if (error != 0) {
			/*
			 * Complete the native request with error.
			 * Note that the request context is still at the
			 * head of the free list, and that the SoFIT request
			 * was encoded into the FIT msg buffer but the FIT
			 * msg length has not been updated. In short, the
			 * only resource that has been allocated but might
			 * not be used is that the FIT msg could be empty.
			 */
			pr_debug("%s:%s:%d error Out\n",
				 skdev->name, __func__, __LINE__);
			skd_end_request(skdev, skreq, error);
			continue;
		}

skip_sg:
		scsi_req->hdr.sg_list_len_bytes =
			cpu_to_be32(skreq->sg_byte_count);

		/* Complete resource allocations. */
		skdev->skreq_free_list = skreq->next;
		skreq->state = SKD_REQ_STATE_BUSY;
		skreq->id += SKD_ID_INCR;

		skmsg->length += sizeof(struct skd_scsi_request);
		fmh->num_protocol_cmds_coalesced++;

		/*
		 * Update the active request counts.
		 * Capture the timeout timestamp.
		 */
		skreq->timeout_stamp = skdev->timeout_stamp;
		timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
		skdev->timeout_slot[timo_slot]++;
		skdev->in_flight++;
		pr_debug("%s:%s:%d req=0x%x busy=%d\n",
			 skdev->name, __func__, __LINE__,
			 skreq->id, skdev->in_flight);

		/*
		 * If the FIT msg buffer is full send it.
		 */
		if (skmsg->length >= SKD_N_FITMSG_BYTES ||
		    fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
			skd_send_fitmsg(skdev, skmsg);
			skmsg = NULL;
			fmh = NULL;
		}
	}

	/*
	 * Is a FIT msg in progress? If it is empty put the buffer back
	 * on the free list. If it is non-empty send what we got.
	 * This minimizes latency when there are fewer requests than
	 * what fits in a FIT msg.
	 */
	if (skmsg != NULL) {
		/* Bigger than just a FIT msg header? */
		if (skmsg->length > sizeof(struct fit_msg_hdr)) {
			pr_debug("%s:%s:%d sending msg=%p, len %d\n",
				 skdev->name, __func__, __LINE__,
				 skmsg, skmsg->length);
			skd_send_fitmsg(skdev, skmsg);
		} else {
			/*
			 * The FIT msg is empty. It means we got started
			 * on the msg, but the requests were rejected.
			 */
			skmsg->state = SKD_MSG_STATE_IDLE;
			skmsg->id += SKD_ID_INCR;
			skmsg->next = skdev->skmsg_free_list;
			skdev->skmsg_free_list = skmsg;
		}
		skmsg = NULL;
		fmh = NULL;
	}

	/*
	 * If req is non-NULL it means there is something to do but
	 * we are out of a resource.
	 */
	if (req)
		blk_stop_queue(skdev->queue);
}