static void test_enabled(void) { int i; memset(&cfg, 0, sizeof(cfg)); g_assert(!throttle_enabled(&cfg)); for (i = 0; i < BUCKETS_COUNT; i++) { memset(&cfg, 0, sizeof(cfg)); set_cfg_value(false, i, 150); g_assert(throttle_enabled(&cfg)); } for (i = 0; i < BUCKETS_COUNT; i++) { memset(&cfg, 0, sizeof(cfg)); set_cfg_value(false, i, -150); g_assert(!throttle_enabled(&cfg)); } }
void fsdev_throttle_init(FsThrottle *fst) { if (throttle_enabled(&fst->cfg)) { throttle_init(&fst->ts); throttle_timers_init(&fst->tt, qemu_get_aio_context(), QEMU_CLOCK_REALTIME, fsdev_throttle_read_timer_cb, fsdev_throttle_write_timer_cb, fst); throttle_config(&fst->ts, QEMU_CLOCK_REALTIME, &fst->cfg); qemu_co_queue_init(&fst->throttled_reqs[0]); qemu_co_queue_init(&fst->throttled_reqs[1]); } }
void coroutine_fn fsdev_co_throttle_request(FsThrottle *fst, bool is_write, struct iovec *iov, int iovcnt) { if (throttle_enabled(&fst->cfg)) { if (throttle_schedule_timer(&fst->ts, &fst->tt, is_write) || !qemu_co_queue_empty(&fst->throttled_reqs[is_write])) { qemu_co_queue_wait(&fst->throttled_reqs[is_write], NULL); } throttle_account(&fst->ts, is_write, iov_size(iov, iovcnt)); if (!qemu_co_queue_empty(&fst->throttled_reqs[is_write]) && !throttle_schedule_timer(&fst->ts, &fst->tt, is_write)) { qemu_co_queue_next(&fst->throttled_reqs[is_write]); } } }
void fsdev_throttle_cleanup(FsThrottle *fst) { if (throttle_enabled(&fst->cfg)) { throttle_timers_destroy(&fst->tt); } }