Esempio n. 1
0
void aio_wait_kick(AioWait *wait)
{
    /* The barrier (or an atomic op) is in the caller.  */
    if (atomic_read(&wait->need_kick)) {
        aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
    }
}
Esempio n. 2
0
static void test_pair_jobs_fail_cancel_race(void)
{
    BlockJob *job1;
    BlockJob *job2;
    BlockJobTxn *txn;
    int result1 = -EINPROGRESS;
    int result2 = -EINPROGRESS;

    txn = block_job_txn_new();
    job1 = test_block_job_start(1, true, -ECANCELED, &result1, txn);
    job2 = test_block_job_start(2, false, 0, &result2, txn);
    block_job_start(job1);
    block_job_start(job2);

    block_job_cancel(job1, false);

    /* Now make job2 finish before the main loop kicks jobs.  This simulates
     * the race between a pending kick and another job completing.
     */
    block_job_enter(job2);
    block_job_enter(job2);

    while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) {
        aio_poll(qemu_get_aio_context(), true);
    }

    g_assert_cmpint(result1, ==, -ECANCELED);
    g_assert_cmpint(result2, ==, -ECANCELED);

    block_job_txn_unref(txn);
}
Esempio n. 3
0
int main(int argc, char **argv)
{
    qemu_init_main_loop(&error_fatal);
    ctx = qemu_get_aio_context();
    bdrv_init();

    do {} while (g_main_context_iteration(NULL, false));

    /* tests in the same order as the header function declarations */
    g_test_init(&argc, &argv, NULL);
    g_test_add_func("/throttle/leak_bucket",        test_leak_bucket);
    g_test_add_func("/throttle/compute_wait",       test_compute_wait);
    g_test_add_func("/throttle/init",               test_init);
    g_test_add_func("/throttle/destroy",            test_destroy);
    g_test_add_func("/throttle/have_timer",         test_have_timer);
    g_test_add_func("/throttle/detach_attach",      test_detach_attach);
    g_test_add_func("/throttle/config/enabled",     test_enabled);
    g_test_add_func("/throttle/config/conflicting", test_conflicting_config);
    g_test_add_func("/throttle/config/is_valid",    test_is_valid);
    g_test_add_func("/throttle/config/max",         test_max_is_missing_limit);
    g_test_add_func("/throttle/config_functions",   test_config_functions);
    g_test_add_func("/throttle/accounting",         test_accounting);
    g_test_add_func("/throttle/groups",             test_groups);
    return g_test_run();
}
Esempio n. 4
0
void iothread_stop_all(void)
{
    Object *container = object_get_objects_root();
    BlockDriverState *bs;
    BdrvNextIterator it;

    for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
        AioContext *ctx = bdrv_get_aio_context(bs);
        if (ctx == qemu_get_aio_context()) {
            continue;
        }
        aio_context_acquire(ctx);
        bdrv_set_aio_context(bs, qemu_get_aio_context());
        aio_context_release(ctx);
    }

    object_child_foreach(container, iothread_stop, NULL);
}
Esempio n. 5
0
void fsdev_throttle_init(FsThrottle *fst)
{
    if (throttle_enabled(&fst->cfg)) {
        throttle_init(&fst->ts);
        throttle_timers_init(&fst->tt,
                             qemu_get_aio_context(),
                             QEMU_CLOCK_REALTIME,
                             fsdev_throttle_read_timer_cb,
                             fsdev_throttle_write_timer_cb,
                             fst);
        throttle_config(&fst->ts, QEMU_CLOCK_REALTIME, &fst->cfg);
        qemu_co_queue_init(&fst->throttled_reqs[0]);
        qemu_co_queue_init(&fst->throttled_reqs[1]);
    }
}
Esempio n. 6
0
/* Context: QEMU global mutex held */
void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
{
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
    VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
    unsigned i;
    unsigned nvqs = s->conf->num_queues;

    if (!vblk->dataplane_started || s->stopping) {
        return;
    }

    /* Better luck next time. */
    if (vblk->dataplane_disabled) {
        vblk->dataplane_disabled = false;
        vblk->dataplane_started = false;
        return;
    }
    s->stopping = true;
    trace_virtio_blk_data_plane_stop(s);

    aio_context_acquire(s->ctx);

    /* Stop notifications for new requests from guest */
    for (i = 0; i < nvqs; i++) {
        VirtQueue *vq = virtio_get_queue(s->vdev, i);

        virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL);
    }

    /* Drain and switch bs back to the QEMU main loop */
    blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());

    aio_context_release(s->ctx);

    for (i = 0; i < nvqs; i++) {
        virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
    }

    /* Clean up guest notifier (irq) */
    k->set_guest_notifiers(qbus->parent, nvqs, false);

    vblk->dataplane_started = false;
    s->stopping = false;
}
Esempio n. 7
0
static int do_sgio(int fd, const uint8_t *cdb, uint8_t *sense,
                    uint8_t *buf, int *sz, int dir)
{
    ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
    int r;

    PRHelperSGIOData data = {
        .fd = fd,
        .cdb = cdb,
        .sense = sense,
        .buf = buf,
        .sz = *sz,
        .dir = dir,
    };

    r = thread_pool_submit_co(pool, do_sgio_worker, &data);
    *sz = data.sz;
    return r;
}
Esempio n. 8
0
/* Context: QEMU global mutex held */
void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
{
    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
    VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);


    /* Better luck next time. */
    if (s->disabled) {
        s->disabled = false;
        return;
    }
    if (!s->started || s->stopping) {
        return;
    }
    s->stopping = true;
    vblk->complete_request = s->saved_complete_request;
    trace_virtio_blk_data_plane_stop(s);

    aio_context_acquire(s->ctx);

    /* Stop notifications for new requests from guest */
    aio_set_event_notifier(s->ctx, &s->host_notifier, true, NULL);

    /* Drain and switch bs back to the QEMU main loop */
    blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());

    aio_context_release(s->ctx);

    /* Sync vring state back to virtqueue so that non-dataplane request
     * processing can continue when we disable the host notifier below.
     */
    vring_teardown(&s->vring, s->vdev, 0);

    k->set_host_notifier(qbus->parent, 0, false);

    /* Clean up guest notifier (irq) */
    k->set_guest_notifiers(qbus->parent, 1, false);

    s->started = false;
    s->stopping = false;
}
Esempio n. 9
0
void xen_block_dataplane_stop(XenBlockDataPlane *dataplane)
{
    XenDevice *xendev;

    if (!dataplane) {
        return;
    }

    aio_context_acquire(dataplane->ctx);
    blk_set_aio_context(dataplane->blk, qemu_get_aio_context());
    aio_context_release(dataplane->ctx);

    xendev = dataplane->xendev;

    if (dataplane->event_channel) {
        Error *local_err = NULL;

        xen_device_unbind_event_channel(xendev, dataplane->event_channel,
                                        &local_err);
        dataplane->event_channel = NULL;

        if (local_err) {
            error_report_err(local_err);
        }
    }

    if (dataplane->sring) {
        Error *local_err = NULL;

        xen_device_unmap_grant_refs(xendev, dataplane->sring,
                                    dataplane->nr_ring_ref, &local_err);
        dataplane->sring = NULL;

        if (local_err) {
            error_report_err(local_err);
        }
    }

    g_free(dataplane->ring_ref);
    dataplane->ring_ref = NULL;
}
Esempio n. 10
0
static void test_single_job(int expected)
{
    BlockJob *job;
    BlockJobTxn *txn;
    int result = -EINPROGRESS;

    txn = block_job_txn_new();
    job = test_block_job_start(1, true, expected, &result, txn);
    block_job_start(job);

    if (expected == -ECANCELED) {
        block_job_cancel(job, false);
    }

    while (result == -EINPROGRESS) {
        aio_poll(qemu_get_aio_context(), true);
    }
    g_assert_cmpint(result, ==, expected);

    block_job_txn_unref(txn);
}
Esempio n. 11
0
static void test_pair_jobs(int expected1, int expected2)
{
    BlockJob *job1;
    BlockJob *job2;
    BlockJobTxn *txn;
    int result1 = -EINPROGRESS;
    int result2 = -EINPROGRESS;

    txn = block_job_txn_new();
    job1 = test_block_job_start(1, true, expected1, &result1, txn);
    job2 = test_block_job_start(2, true, expected2, &result2, txn);
    block_job_start(job1);
    block_job_start(job2);

    /* Release our reference now to trigger as many nice
     * use-after-free bugs as possible.
     */
    block_job_txn_unref(txn);

    if (expected1 == -ECANCELED) {
        block_job_cancel(job1, false);
    }
    if (expected2 == -ECANCELED) {
        block_job_cancel(job2, false);
    }

    while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) {
        aio_poll(qemu_get_aio_context(), true);
    }

    /* Failure or cancellation of one job cancels the other job */
    if (expected1 != 0) {
        expected2 = -ECANCELED;
    } else if (expected2 != 0) {
        expected1 = -ECANCELED;
    }

    g_assert_cmpint(result1, ==, expected1);
    g_assert_cmpint(result2, ==, expected2);
}
Esempio n. 12
0
static void test_pair_jobs(int expected1, int expected2)
{
    BlockJob *job1;
    BlockJob *job2;
    BlockJobTxn *txn;
    int result1 = -EINPROGRESS;
    int result2 = -EINPROGRESS;

    txn = block_job_txn_new();
    job1 = test_block_job_start(1, true, expected1, &result1);
    block_job_txn_add_job(txn, job1);
    job2 = test_block_job_start(2, true, expected2, &result2);
    block_job_txn_add_job(txn, job2);

    if (expected1 == -ECANCELED) {
        block_job_cancel(job1);
    }
    if (expected2 == -ECANCELED) {
        block_job_cancel(job2);
    }

    while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) {
        aio_poll(qemu_get_aio_context(), true);
    }

    /* Failure or cancellation of one job cancels the other job */
    if (expected1 != 0) {
        expected2 = -ECANCELED;
    } else if (expected2 != 0) {
        expected1 = -ECANCELED;
    }

    g_assert_cmpint(result1, ==, expected1);
    g_assert_cmpint(result2, ==, expected2);

    block_job_txn_unref(txn);
}
Esempio n. 13
0
void qmp_blockdev_create(const char *job_id, BlockdevCreateOptions *options,
                         Error **errp)
{
    BlockdevCreateJob *s;
    const char *fmt = BlockdevDriver_str(options->driver);
    BlockDriver *drv = bdrv_find_format(fmt);

    /* If the driver is in the schema, we know that it exists. But it may not
     * be whitelisted. */
    assert(drv);
    if (bdrv_uses_whitelist() && !bdrv_is_whitelisted(drv, false)) {
        error_setg(errp, "Driver is not whitelisted");
        return;
    }

    /* Error out if the driver doesn't support .bdrv_co_create */
    if (!drv->bdrv_co_create) {
        error_setg(errp, "Driver does not support blockdev-create");
        return;
    }

    /* Create the block job */
    /* TODO Running in the main context. Block drivers need to error out or add
     * locking when they use a BDS in a different AioContext. */
    s = job_create(job_id, &blockdev_create_job_driver, NULL,
                   qemu_get_aio_context(), JOB_DEFAULT | JOB_MANUAL_DISMISS,
                   NULL, NULL, errp);
    if (!s) {
        return;
    }

    s->drv = drv,
    s->opts = QAPI_CLONE(BlockdevCreateOptions, options),

    job_start(&s->common);
}
Esempio n. 14
0
XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev,
                                              BlockConf *conf,
                                              IOThread *iothread)
{
    XenBlockDataPlane *dataplane = g_new0(XenBlockDataPlane, 1);

    dataplane->xendev = xendev;
    dataplane->blk = conf->blk;

    QLIST_INIT(&dataplane->inflight);
    QLIST_INIT(&dataplane->freelist);

    if (iothread) {
        dataplane->iothread = iothread;
        object_ref(OBJECT(dataplane->iothread));
        dataplane->ctx = iothread_get_aio_context(dataplane->iothread);
    } else {
        dataplane->ctx = qemu_get_aio_context();
    }
    dataplane->bh = aio_bh_new(dataplane->ctx, xen_block_dataplane_bh,
                               dataplane);

    return dataplane;
}
Esempio n. 15
0
AioContext *qemu_get_current_aio_context(void)
{
    return my_iothread ? my_iothread->ctx : qemu_get_aio_context();
}