Example #1
0
void aio_wait_kick(AioWait *wait)
{
    /* The barrier (or an atomic op) is in the caller.  */
    if (atomic_read(&wait->need_kick)) {
        aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
    }
}
Example #2
0
void iothread_stop(IOThread *iothread)
{
    if (!iothread->ctx || iothread->stopping) {
        return;
    }
    iothread->stopping = true;
    aio_bh_schedule_oneshot(iothread->ctx, iothread_stop_bh, iothread);
    qemu_thread_join(&iothread->thread);
}
Example #3
0
File: rbd.c Project: nikunjad/qemu
/*
 * This is the callback function for rbd_aio_read and _write
 *
 * Note: this function is being called from a non qemu thread so
 * we need to be careful about what we do here. Generally we only
 * schedule a BH, and do the rest of the io completion handling
 * from rbd_finish_bh() which runs in a qemu context.
 */
static void rbd_finish_aiocb(rbd_completion_t c, RADOSCB *rcb)
{
    RBDAIOCB *acb = rcb->acb;

    rcb->ret = rbd_aio_get_return_value(c);
    rbd_aio_release(c);

    aio_bh_schedule_oneshot(bdrv_get_aio_context(acb->common.bs),
                            rbd_finish_bh, rcb);
}
Example #4
0
static void net_vhost_user_event(void *opaque, int event)
{
    const char *name = opaque;
    NetClientState *ncs[MAX_QUEUE_NUM];
    VhostUserState *s;
    Chardev *chr;
    Error *err = NULL;
    int queues;

    queues = qemu_find_net_clients_except(name, ncs,
                                          NET_CLIENT_DRIVER_NIC,
                                          MAX_QUEUE_NUM);
    assert(queues < MAX_QUEUE_NUM);

    s = DO_UPCAST(VhostUserState, nc, ncs[0]);
    chr = qemu_chr_fe_get_driver(&s->chr);
    trace_vhost_user_event(chr->label, event);
    switch (event) {
    case CHR_EVENT_OPENED:
        if (vhost_user_start(queues, ncs, &s->chr) < 0) {
            qemu_chr_fe_disconnect(&s->chr);
            return;
        }
        s->watch = qemu_chr_fe_add_watch(&s->chr, G_IO_HUP,
                                         net_vhost_user_watch, s);
        qmp_set_link(name, true, &err);
        s->started = true;
        break;
    case CHR_EVENT_CLOSED:
        /* a close event may happen during a read/write, but vhost
         * code assumes the vhost_dev remains setup, so delay the
         * stop & clear to idle.
         * FIXME: better handle failure in vhost code, remove bh
         */
        if (s->watch) {
            AioContext *ctx = qemu_get_current_aio_context();

            g_source_remove(s->watch);
            s->watch = 0;
            qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, NULL, NULL,
                                     NULL, NULL, false);

            aio_bh_schedule_oneshot(ctx, chr_closed_bh, opaque);
        }
        break;
    }

    if (err) {
        error_report_err(err);
    }
}
Example #5
0
File: vxhs.c Project: Mellanox/qemu
/*
 * Called from a libqnio thread
 */
static void vxhs_iio_callback(void *ctx, uint32_t opcode, uint32_t error)
{
    VXHSAIOCB *acb = NULL;

    switch (opcode) {
    case IRP_READ_REQUEST:
    case IRP_WRITE_REQUEST:

        /*
         * ctx is VXHSAIOCB*
         * ctx is NULL if error is QNIOERROR_CHANNEL_HUP
         */
        if (ctx) {
            acb = ctx;
        } else {
            trace_vxhs_iio_callback(error);
            goto out;
        }

        if (error) {
            if (!acb->err) {
                acb->err = error;
            }
            trace_vxhs_iio_callback(error);
        }

        aio_bh_schedule_oneshot(bdrv_get_aio_context(acb->common.bs),
                                vxhs_complete_aio_bh, acb);
        break;

    default:
        if (error == QNIOERROR_HUP) {
            /*
             * Channel failed, spontaneous notification,
             * not in response to I/O
             */
            trace_vxhs_iio_callback_chnfail(error, errno);
        } else {
            trace_vxhs_iio_callback_unknwn(opcode, error);
        }
        break;
    }
out:
    return;
}