static struct request *
sio_latter_request(struct request_queue *q, struct request *rq)
{
	struct sio_data *sd = q->elevator->elevator_data;
	const int sync = rq_is_sync(rq);
	const int data_dir = rq_data_dir(rq);

	if (rq->queuelist.next == &sd->fifo_list[sync][data_dir])
		return NULL;

	/* Return latter request */
	return list_entry(rq->queuelist.next, struct request, queuelist);
}
Пример #2
0
/*
* add rq to rbtree and fifo
*/
static void
vr_add_request(struct request_queue *q, struct request *rq)
{
struct vr_data *vd = vr_get_data(q);
const int dir = rq_is_sync(rq);

vr_add_rq_rb(vd, rq);

if (vd->fifo_expire[dir]) {
rq_set_fifo_time(rq, jiffies + vd->fifo_expire[dir]);
list_add_tail(&rq->queuelist, &vd->fifo_list[dir]);
}
}
Пример #3
0
static void
sio_add_request(struct request_queue *q, struct request *rq)
{
	struct sio_data *sd = q->elevator->elevator_data;
	const int sync = rq_is_sync(rq);

	/*
	 * Add request to the proper fifo list and set its
	 * expire time.
	 */
	rq_set_fifo_time(rq, jiffies + sd->fifo_expire[sync]);
	list_add_tail(&rq->queuelist, &sd->fifo_list[sync]);
}
Пример #4
0
/*
 * Common write path running under the zvol taskq context.  This function
 * is responsible for copying the request structure data in to the DMU and
 * signaling the request queue with the result of the copy.
 */
static void
zvol_write(void *arg)
{
	struct request *req = (struct request *)arg;
	struct request_queue *q = req->q;
	zvol_state_t *zv = q->queuedata;
	uint64_t offset = blk_rq_pos(req) << 9;
	uint64_t size = blk_rq_bytes(req);
	int error = 0;
	dmu_tx_t *tx;
	rl_t *rl;

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);

	/* This will only fail for ENOSPC */
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		zfs_range_unlock(rl);
		blk_end_request(req, -error, size);
		return;
	}

	error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
	if (error == 0)
		zvol_log_write(zv, tx, offset, size, rq_is_sync(req));

	dmu_tx_commit(tx);
	zfs_range_unlock(rl);

	if (rq_is_sync(req))
		zil_commit(zv->zv_zilog, ZVOL_OBJ);

	blk_end_request(req, -error, size);
}
Пример #5
0
/*
 * row_get_queue_prio() - Get queue priority for a given request
 *
 * This is a helping function which purpose is to determine what
 * ROW queue the given request should be added to (and
 * dispatched from later on)
 *
 */
static enum row_queue_prio row_get_queue_prio(struct request *rq,
				struct row_data *rd)
{
	const int data_dir = rq_data_dir(rq);
	const bool is_sync = rq_is_sync(rq);
	enum row_queue_prio q_type = ROWQ_MAX_PRIO;
	int ioprio_class = IOPRIO_PRIO_CLASS(rq->elv.icq->ioc->ioprio);

	if (unlikely(row_get_current()->flags & PF_MUTEX_GC)) {
		if (data_dir == READ)
			q_type = ROWQ_PRIO_HIGH_READ;
		else
			q_type = ROWQ_PRIO_HIGH_SWRITE;
		return q_type;
	}
	switch (ioprio_class) {
	case IOPRIO_CLASS_RT:
		if (data_dir == READ)
			q_type = ROWQ_PRIO_HIGH_READ;
		else if (is_sync)
			q_type = ROWQ_PRIO_HIGH_SWRITE;
		else {
			q_type = ROWQ_PRIO_REG_WRITE;
		}
		break;
	case IOPRIO_CLASS_IDLE:
		if (data_dir == READ)
			q_type = ROWQ_PRIO_LOW_READ;
		else if (is_sync)
			q_type = ROWQ_PRIO_LOW_SWRITE;
		else {
			pr_err("%s:%s(): got a simple write from IDLE_CLASS. How???",/*lint !e585*/
				rq->rq_disk->disk_name, __func__);
			q_type = ROWQ_PRIO_REG_WRITE;
		}
		break;
	case IOPRIO_CLASS_NONE:
	case IOPRIO_CLASS_BE:
	default:
		if (data_dir == READ)
			q_type = ROWQ_PRIO_REG_READ;
		else if (is_sync)
			q_type = ROWQ_PRIO_REG_SWRITE;
		else
			q_type = ROWQ_PRIO_REG_WRITE;
		break;
	}

	return q_type;
}
static void fiops_completed_request(struct request_queue *q, struct request *rq)
{
	struct fiops_data *fiopsd = q->elevator->elevator_data;
	struct fiops_ioc *ioc = RQ_CIC(rq);

	fiopsd->in_flight[rq_is_sync(rq)]--;
	ioc->in_flight--;

	fiops_log_ioc(fiopsd, ioc, "in_flight %d, busy queues %d",
		ioc->in_flight, fiopsd->busy_queues);

	if (fiopsd->in_flight[0] + fiopsd->in_flight[1] == 0)
		fiops_schedule_dispatch(fiopsd);
}
static u64 fiops_scaled_vios(struct fiops_data *fiopsd,
	struct fiops_ioc *ioc, struct request *rq)
{
	int vios = VIOS_SCALE;

	if (rq_data_dir(rq) == WRITE)
		vios = vios * fiopsd->write_scale / fiopsd->read_scale;

	if (!rq_is_sync(rq))
		vios = vios * fiopsd->async_scale / fiopsd->sync_scale;

	vios +=  vios * (ioc->ioprio - IOPRIO_NORM) / VIOS_PRIO_SCALE;

	return vios;
}
Пример #8
0
/**
 * blk_queue_start_tag - find a free tag and assign it
 * @q:  the request queue for the device
 * @rq:  the block request that needs tagging
 *
 *  Description:
 *    This can either be used as a stand-alone helper, or possibly be
 *    assigned as the queue &prep_rq_fn (in which case &struct request
 *    automagically gets a tag assigned). Note that this function
 *    assumes that any type of request can be queued! if this is not
 *    true for your device, you must check the request type before
 *    calling this function.  The request will also be removed from
 *    the request queue, so it's the drivers responsibility to readd
 *    it if it should need to be restarted for some reason.
 *
 *  Notes:
 *   queue lock must be held.
 **/
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
	struct blk_queue_tag *bqt = q->queue_tags;
	unsigned max_depth;
	int tag;

	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
		printk(KERN_ERR
		       "%s: request %p for device [%s] already tagged %d",
		       __func__, rq,
		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
		BUG();
	}

	/*
	 * Protect against shared tag maps, as we may not have exclusive
	 * access to the tag map.
	 *
	 * We reserve a few tags just for sync IO, since we don't want
	 * to starve sync IO on behalf of flooding async IO.
	 */
	max_depth = bqt->max_depth;
	if (!rq_is_sync(rq) && max_depth > 1) {
		max_depth -= 2;
		if (!max_depth)
			max_depth = 1;
		if (q->in_flight[0] > max_depth)
			return 1;
	}

	do {
		tag = find_first_zero_bit(bqt->tag_map, max_depth);
		if (tag >= max_depth)
			return 1;

	} while (test_and_set_bit_lock(tag, bqt->tag_map));
	/*
	 * We need lock ordering semantics given by test_and_set_bit_lock.
	 * See blk_queue_end_tag for details.
	 */

	rq->cmd_flags |= REQ_QUEUED;
	rq->tag = tag;
	bqt->tag_index[tag] = rq;
	blk_start_request(rq);
	list_add(&rq->queuelist, &q->tag_busy_list);
	return 0;
}
/* return vios dispatched */
static u64 fiops_dispatch_request(struct fiops_data *fiopsd,
	struct fiops_ioc *ioc)
{
	struct request *rq;
	struct request_queue *q = fiopsd->queue;

	rq = rq_entry_fifo(ioc->fifo.next);

	fiops_remove_request(rq);
	elv_dispatch_add_tail(q, rq);

	fiopsd->in_flight[rq_is_sync(rq)]++;
	ioc->in_flight++;

	return fiops_scaled_vios(fiopsd, ioc, rq);
}
Пример #10
0
/*
 * row_get_queue_prio() - Get queue priority for a given request
 *
 * This is a helping function which purpose is to determine what
 * ROW queue the given request should be added to (and
 * dispatched from later on)
 *
 */
static enum row_queue_prio row_get_queue_prio(struct request *rq)
{
	const int data_dir = rq_data_dir(rq);
	const bool is_sync = rq_is_sync(rq);
	enum row_queue_prio q_type = ROWQ_MAX_PRIO;
	int ioprio_class = IOPRIO_PRIO_CLASS(rq->elv.icq->ioc->ioprio);

	switch (ioprio_class) {
	case IOPRIO_CLASS_RT:
		if (data_dir == READ)
			q_type = ROWQ_PRIO_HIGH_READ;
		else if (is_sync)
			q_type = ROWQ_PRIO_HIGH_SWRITE;
		else {
			pr_err("%s:%s(): got a simple write from RT_CLASS. How???",
				rq->rq_disk->disk_name, __func__);
			q_type = ROWQ_PRIO_REG_WRITE;
		}
		rq->cmd_flags |= REQ_URGENT;
		break;
	case IOPRIO_CLASS_IDLE:
		if (data_dir == READ)
			q_type = ROWQ_PRIO_LOW_READ;
		else if (is_sync)
			q_type = ROWQ_PRIO_LOW_SWRITE;
		else {
			pr_err("%s:%s(): got a simple write from IDLE_CLASS. How???",
				rq->rq_disk->disk_name, __func__);
			q_type = ROWQ_PRIO_REG_WRITE;
		}
		break;
	case IOPRIO_CLASS_NONE:
	case IOPRIO_CLASS_BE:
	default:
		if (data_dir == READ)
			q_type = ROWQ_PRIO_REG_READ;
		else if (is_sync)
			q_type = ROWQ_PRIO_REG_SWRITE;
		else
			q_type = ROWQ_PRIO_REG_WRITE;
		break;
	}

	return q_type;
}
Пример #11
0
/**
* @brief 	Request service function.
* @param 	sd[in]: Card information.
* @param 	req[in]: Start sector.
* @return 	SUCCESS/ERROR_ID.
*/
static int gp_sdcard_xfer_request(gpSDInfo_t *sd, struct request *req)
{
	int ret = 1;

	while (ret)
	{
		unsigned int ln;
		unsigned int retry = 0;

		ln = blk_rq_map_sg(sd->queue, req, sd->sg);

#if 0	/* This is used for usb disk check */
		{
			bool do_sync = (rq_is_sync(req) && rq_data_dir(req) == WRITE);
			if (do_sync)
			{
				DEBUG("[Jerry] detect do write sync\n");
			}
		}
#endif
		while(1)
		{
			ret = gp_sdcard_transfer_scatter(sd, blk_rq_pos(req), sd->sg, ln, rq_data_dir(req));
			/* ----- Re-try procedure ----- */
			if(ret<0)
			{
				unsigned int cid[4];
				unsigned int capacity;
				if((retry>=SD_RETRY)||(gp_sdcard_ckinsert(sd)==0)||sd->fremove)
					goto out_error;
				/* ----- Re-initialize sd card ----- */
				memcpy(cid, sd->CID, sizeof(cid));
				capacity = sd->capacity;
				if(gp_sdcard_cardinit(sd)!=0)
				{
					DERROR("[%d]: Re-initialize fail\n",sd->device_id);
					goto out_error;
				}
				else if((cid[0]!=sd->CID[0])||(cid[1]!=sd->CID[1])||(cid[2]!=sd->CID[2])||(cid[3]!=sd->CID[3])||(capacity!=sd->capacity))
				{
					DERROR("[%d]: Different card insert\n",sd->device_id);
					goto out_error;
				}
				retry ++;
			}
			else
				break;
		}
		/* ----- End of request ----- */
		spin_lock_irq(&sd->lock);
		ret = __blk_end_request(req, 0, ret<<9);
		spin_unlock_irq(&sd->lock);
	}
	return 1;
out_error:
	spin_lock_irq(&sd->lock);
	DEBUG("[%d]: txrx fail %d\n", sd->device_id, ret);
	__blk_end_request_all(req, ret);;
	spin_unlock_irq(&sd->lock);
	return -ENXIO;
}
Пример #12
0
/**
 * blk_queue_start_tag - find a free tag and assign it
 * @q:  the request queue for the device
 * @rq:  the block request that needs tagging
 *
 *  Description:
 *    This can either be used as a stand-alone helper, or possibly be
 *    assigned as the queue &prep_rq_fn (in which case &struct request
 *    automagically gets a tag assigned). Note that this function
 *    assumes that any type of request can be queued! if this is not
 *    true for your device, you must check the request type before
 *    calling this function.  The request will also be removed from
 *    the request queue, so it's the drivers responsibility to readd
 *    it if it should need to be restarted for some reason.
 *
 *  Notes:
 *   queue lock must be held.
 **/
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
    struct blk_queue_tag *bqt = q->queue_tags;
    unsigned max_depth;
    int tag;

    if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
        printk(KERN_ERR
               "%s: request %p for device [%s] already tagged %d",
               __func__, rq,
               rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
        BUG();
    }

    /*
     * Protect against shared tag maps, as we may not have exclusive
     * access to the tag map.
     *
     * We reserve a few tags just for sync IO, since we don't want
     * to starve sync IO on behalf of flooding async IO.
     */
    max_depth = bqt->max_depth;
    if (!rq_is_sync(rq) && max_depth > 1) {
        switch (max_depth) {
        case 2:
            max_depth = 1;
            break;
        case 3:
            max_depth = 2;
            break;
        default:
            max_depth -= 2;
        }
        if (q->in_flight[BLK_RW_ASYNC] > max_depth)
            return 1;
    }

    do {
        if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
            tag = find_first_zero_bit(bqt->tag_map, max_depth);
            if (tag >= max_depth)
                return 1;
        } else {
            int start = bqt->next_tag;
            int size = min_t(int, bqt->max_depth, max_depth + start);
            tag = find_next_zero_bit(bqt->tag_map, size, start);
            if (tag >= size && start + size > bqt->max_depth) {
                size = start + size - bqt->max_depth;
                tag = find_first_zero_bit(bqt->tag_map, size);
            }
            if (tag >= size)
                return 1;
        }

    } while (test_and_set_bit_lock(tag, bqt->tag_map));
    /*
     * We need lock ordering semantics given by test_and_set_bit_lock.
     * See blk_queue_end_tag for details.
     */

    bqt->next_tag = (tag + 1) % bqt->max_depth;
    rq->cmd_flags |= REQ_QUEUED;
    rq->tag = tag;
    bqt->tag_index[tag] = rq;
    blk_start_request(rq);
    list_add(&rq->queuelist, &q->tag_busy_list);
    return 0;
}