static ssize_t queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) { ssize_t ret = -EINVAL; #ifdef CONFIG_SMP unsigned long val; ret = queue_var_store(&val, page, count); if (ret < 0) return ret; spin_lock_irq(q->queue_lock); if (val == 2) { queue_flag_set(QUEUE_FLAG_SAME_COMP, q); queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); } else if (val == 1) { queue_flag_set(QUEUE_FLAG_SAME_COMP, q); queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); } else if (val == 0) { queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); } spin_unlock_irq(q->queue_lock); #endif return ret; }
static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { unsigned long nr; int ret, err; if (!q->request_fn && !q->mq_ops) return -EINVAL; ret = queue_var_store(&nr, page, count); if (ret < 0) return ret; if (nr < BLKDEV_MIN_RQ) nr = BLKDEV_MIN_RQ; if (q->request_fn) err = blk_update_nr_requests(q, nr); else err = blk_mq_update_nr_requests(q, nr); if (err) return err; return ret; }
static ssize_t queue_ra_store(struct request_queue *q, const char *page, size_t count) { unsigned long ra_kb; ssize_t ret = queue_var_store(&ra_kb, page, count); q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); return ret; }
static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { struct request_list *rl; unsigned long nr; int ret; if (!q->request_fn) return -EINVAL; ret = queue_var_store(&nr, page, count); if (ret < 0) return ret; if (nr < BLKDEV_MIN_RQ) nr = BLKDEV_MIN_RQ; spin_lock_irq(q->queue_lock); q->nr_requests = nr; blk_queue_congestion_threshold(q); /* congestion isn't cgroup aware and follows root blkcg for now */ rl = &q->root_rl; if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) blk_set_queue_congested(q, BLK_RW_SYNC); else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, BLK_RW_SYNC); if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) blk_set_queue_congested(q, BLK_RW_ASYNC); else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, BLK_RW_ASYNC); blk_queue_for_each_rl(rl, q) { if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { blk_set_rl_full(rl, BLK_RW_SYNC); } else { blk_clear_rl_full(rl, BLK_RW_SYNC); wake_up(&rl->wait[BLK_RW_SYNC]); } if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { blk_set_rl_full(rl, BLK_RW_ASYNC); } else { blk_clear_rl_full(rl, BLK_RW_ASYNC); wake_up(&rl->wait[BLK_RW_ASYNC]); } } spin_unlock_irq(q->queue_lock); return ret; }
static ssize_t queue_iostats_store(struct request_queue *q, const char *page, size_t count) { unsigned long stats; ssize_t ret = queue_var_store(&stats, page, count); spin_lock_irq(q->queue_lock); if (stats) queue_flag_set(QUEUE_FLAG_IO_STAT, q); else queue_flag_clear(QUEUE_FLAG_IO_STAT, q); spin_unlock_irq(q->queue_lock); return ret; }
static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, size_t count) { unsigned long nm; ssize_t ret = queue_var_store(&nm, page, count); spin_lock_irq(q->queue_lock); if (nm) queue_flag_set(QUEUE_FLAG_NOMERGES, q); else queue_flag_clear(QUEUE_FLAG_NOMERGES, q); spin_unlock_irq(q->queue_lock); return ret; }
static ssize_t queue_random_store(struct request_queue *q, const char *page, size_t count) { unsigned long val; ssize_t ret = queue_var_store(&val, page, count); spin_lock_irq(q->queue_lock); if (val) queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); else queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); spin_unlock_irq(q->queue_lock); return ret; }
static ssize_t queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) { unsigned long max_sectors_kb, max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, page_kb = 1 << (PAGE_CACHE_SHIFT - 10); ssize_t ret = queue_var_store(&max_sectors_kb, page, count); if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) return -EINVAL; spin_lock_irq(q->queue_lock); q->limits.max_sectors = max_sectors_kb << 1; spin_unlock_irq(q->queue_lock); return ret; }
static ssize_t queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) { ssize_t ret = -EINVAL; #if defined(CONFIG_USE_GENERIC_SMP_HELPERS) unsigned long val; ret = queue_var_store(&val, page, count); spin_lock_irq(q->queue_lock); if (val) queue_flag_set(QUEUE_FLAG_SAME_COMP, q); else queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); spin_unlock_irq(q->queue_lock); #endif return ret; }
static ssize_t queue_store_unpriv_sgio(struct request_queue *q, const char *page, size_t count) { unsigned long val; ssize_t ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = queue_var_store(&val, page, count); spin_lock_irq(q->queue_lock); if (val) queue_flag_set(QUEUE_FLAG_UNPRIV_SGIO, q); else queue_flag_clear(QUEUE_FLAG_UNPRIV_SGIO, q); spin_unlock_irq(q->queue_lock); return ret; }
static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { struct request_list *rl = &q->rq; unsigned long nr; int ret; if (!q->request_fn) return -EINVAL; ret = queue_var_store(&nr, page, count); if (nr < BLKDEV_MIN_RQ) nr = BLKDEV_MIN_RQ; spin_lock_irq(q->queue_lock); q->nr_requests = nr; blk_queue_congestion_threshold(q); if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) blk_set_queue_congested(q, BLK_RW_SYNC); else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, BLK_RW_SYNC); if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) blk_set_queue_congested(q, BLK_RW_ASYNC); else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) blk_clear_queue_congested(q, BLK_RW_ASYNC); if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { blk_set_queue_full(q, BLK_RW_SYNC); } else { blk_clear_queue_full(q, BLK_RW_SYNC); wake_up(&rl->wait[BLK_RW_SYNC]); } if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { blk_set_queue_full(q, BLK_RW_ASYNC); } else { blk_clear_queue_full(q, BLK_RW_ASYNC); wake_up(&rl->wait[BLK_RW_ASYNC]); } spin_unlock_irq(q->queue_lock); return ret; }
static ssize_t queue_poll_store(struct request_queue *q, const char *page, size_t count) { unsigned long poll_on; ssize_t ret; if (!q->mq_ops || !q->mq_ops->poll) return -EINVAL; ret = queue_var_store(&poll_on, page, count); if (ret < 0) return ret; spin_lock_irq(q->queue_lock); if (poll_on) queue_flag_set(QUEUE_FLAG_POLL, q); else queue_flag_clear(QUEUE_FLAG_POLL, q); spin_unlock_irq(q->queue_lock); return ret; }
static ssize_t queue_discard_max_store(struct request_queue *q, const char *page, size_t count) { unsigned long max_discard; ssize_t ret = queue_var_store(&max_discard, page, count); if (ret < 0) return ret; if (max_discard & (q->limits.discard_granularity - 1)) return -EINVAL; max_discard >>= 9; if (max_discard > UINT_MAX) return -EINVAL; if (max_discard > q->limits.max_hw_discard_sectors) max_discard = q->limits.max_hw_discard_sectors; q->limits.max_discard_sectors = max_discard; return ret; }
static ssize_t queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) { unsigned long max_sectors_kb, max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, page_kb = 1 << (PAGE_SHIFT - 10); ssize_t ret = queue_var_store(&max_sectors_kb, page, count); if (ret < 0) return ret; max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) q->limits.max_dev_sectors >> 1); if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) return -EINVAL; spin_lock_irq(q->queue_lock); q->limits.max_sectors = max_sectors_kb << 1; q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); spin_unlock_irq(q->queue_lock); return ret; }