Ejemplo n.º 1
0
static ssize_t
queue_requests_store(struct request_queue *q, const char *page, size_t count)
{
    struct request_list *rl = &q->rq;
    unsigned long nr;
    int ret;

    if (!q->request_fn)
        return -EINVAL;

    ret = queue_var_store(&nr, page, count);
    if (ret < 0)
        return ret;

    if (nr < BLKDEV_MIN_RQ)
        nr = BLKDEV_MIN_RQ;

    spin_lock_irq(q->queue_lock);
    q->nr_requests = nr;
    blk_queue_congestion_threshold(q);

    if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
        blk_set_queue_congested(q, BLK_RW_SYNC);
    else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
        blk_clear_queue_congested(q, BLK_RW_SYNC);

    if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
        blk_set_queue_congested(q, BLK_RW_ASYNC);
    else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
        blk_clear_queue_congested(q, BLK_RW_ASYNC);

    if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
        blk_set_queue_full(q, BLK_RW_SYNC);
    } else {
        blk_clear_queue_full(q, BLK_RW_SYNC);
        wake_up(&rl->wait[BLK_RW_SYNC]);
    }

    if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
        blk_set_queue_full(q, BLK_RW_ASYNC);
    } else {
        blk_clear_queue_full(q, BLK_RW_ASYNC);
        wake_up(&rl->wait[BLK_RW_ASYNC]);
    }
    spin_unlock_irq(q->queue_lock);
    return ret;
}
Ejemplo n.º 2
0
static ssize_t
queue_requests_store(struct request_queue *q, const char *page, size_t count)
{
	struct request_list *rl = &q->rq;
	unsigned long nr;
	int ret = queue_var_store(&nr, page, count);
	if (nr < BLKDEV_MIN_RQ)
		nr = BLKDEV_MIN_RQ;

	spin_lock_irq(q->queue_lock);
	q->nr_requests = nr;
	blk_queue_congestion_threshold(q);

	if (rl->count[READ] >= queue_congestion_on_threshold(q))
		blk_set_queue_congested(q, READ);
	else if (rl->count[READ] < queue_congestion_off_threshold(q))
		blk_clear_queue_congested(q, READ);

	if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
		blk_set_queue_congested(q, WRITE);
	else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
		blk_clear_queue_congested(q, WRITE);

	if (rl->count[READ] >= q->nr_requests) {
		blk_set_queue_full(q, READ);
	} else if (rl->count[READ]+1 <= q->nr_requests) {
		blk_clear_queue_full(q, READ);
		wake_up(&rl->wait[READ]);
	}

	if (rl->count[WRITE] >= q->nr_requests) {
		blk_set_queue_full(q, WRITE);
	} else if (rl->count[WRITE]+1 <= q->nr_requests) {
		blk_clear_queue_full(q, WRITE);
		wake_up(&rl->wait[WRITE]);
	}
	spin_unlock_irq(q->queue_lock);
	return ret;
}