Пример #1
0
Файл: ssh.c Проект: J-Liu/qemu
/* A non-blocking call returned EAGAIN, so yield, ensuring the
 * handlers are set up so that we'll be rescheduled when there is an
 * interesting event on the socket.
 */
static coroutine_fn void co_yield(BDRVSSHState *s, BlockDriverState *bs)
{
    int r;
    IOHandler *rd_handler = NULL, *wr_handler = NULL;
    Coroutine *co = qemu_coroutine_self();

    r = libssh2_session_block_directions(s->session);

    if (r & LIBSSH2_SESSION_BLOCK_INBOUND) {
        rd_handler = restart_coroutine;
    }
    if (r & LIBSSH2_SESSION_BLOCK_OUTBOUND) {
        wr_handler = restart_coroutine;
    }

    DPRINTF("s->sock=%d rd_handler=%p wr_handler=%p", s->sock,
            rd_handler, wr_handler);

    aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
                       false, rd_handler, wr_handler, NULL, co);
    qemu_coroutine_yield();
    DPRINTF("s->sock=%d - back", s->sock);
    aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock, false,
                       NULL, NULL, NULL, NULL);
}
Пример #2
0
static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
{
    VirtIOBlock *s = VIRTIO_BLK(vdev);
    struct virtio_blk_config blkcfg;

    memcpy(&blkcfg, config, sizeof(blkcfg));

    aio_context_acquire(bdrv_get_aio_context(s->bs));
    bdrv_set_enable_write_cache(s->bs, blkcfg.wce != 0);
    aio_context_release(bdrv_get_aio_context(s->bs));
}
Пример #3
0
BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
                                     bool query_nodes,
                                     Error **errp)
{
    BlockStatsList *head = NULL, **p_next = &head;
    BlockBackend *blk = NULL;
    BlockDriverState *bs = NULL;

    /* Just to be safe if query_nodes is not always initialized */
    query_nodes = has_query_nodes && query_nodes;

    while (next_query_bds(&blk, &bs, query_nodes)) {
        BlockStatsList *info = g_malloc0(sizeof(*info));
        AioContext *ctx = blk ? blk_get_aio_context(blk)
                              : bdrv_get_aio_context(bs);

        aio_context_acquire(ctx);
        info->value = bdrv_query_stats(blk, bs, !query_nodes);
        aio_context_release(ctx);

        *p_next = info;
        p_next = &info->next;
    }

    return head;
}
Пример #4
0
static coroutine_fn void clear_fd_handler(BDRVSSHState *s,
                                          BlockDriverState *bs)
{
    DPRINTF("s->sock=%d", s->sock);
    aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
                       false, NULL, NULL, NULL);
}
Пример #5
0
static int block_job_finish_sync(BlockJob *job,
                                 void (*finish)(BlockJob *, Error **errp),
                                 Error **errp)
{
    struct BlockFinishData data;
    BlockDriverState *bs = job->bs;
    Error *local_err = NULL;

    assert(bs->job == job);

    /* Set up our own callback to store the result and chain to
     * the original callback.
     */
    data.job = job;
    data.cb = job->cb;
    data.opaque = job->opaque;
    data.ret = -EINPROGRESS;
    job->cb = block_job_finish_cb;
    job->opaque = &data;
    finish(job, &local_err);
    if (local_err) {
        error_propagate(errp, local_err);
        return -EBUSY;
    }
    while (data.ret == -EINPROGRESS) {
        aio_poll(bdrv_get_aio_context(bs), true);
    }
    return (data.cancelled && data.ret == 0) ? -ECANCELED : data.ret;
}
Пример #6
0
int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn,
                             BlockDriverState *vm_state_bs,
                             uint64_t vm_state_size,
                             BlockDriverState **first_bad_bs)
{
    int err = 0;
    BlockDriverState *bs = NULL;

    while (err == 0 && (bs = bdrv_next(bs))) {
        AioContext *ctx = bdrv_get_aio_context(bs);

        aio_context_acquire(ctx);
        if (bs == vm_state_bs) {
            sn->vm_state_size = vm_state_size;
            err = bdrv_snapshot_create(bs, sn);
        } else if (bdrv_can_snapshot(bs)) {
            sn->vm_state_size = 0;
            err = bdrv_snapshot_create(bs, sn);
        }
        aio_context_release(ctx);
    }

    *first_bad_bs = bs;
    return err;
}
Пример #7
0
static void qemu_archipelago_aio_cancel(BlockDriverAIOCB *blockacb)
{
    ArchipelagoAIOCB *aio_cb = (ArchipelagoAIOCB *) blockacb;
    aio_cb->cancelled = true;
    while (aio_cb->status == -EINPROGRESS) {
        aio_poll(bdrv_get_aio_context(aio_cb->common.bs), true);
    }
    qemu_aio_release(aio_cb);
}
Пример #8
0
/*
 * This is the callback function for rbd_aio_read and _write
 *
 * Note: this function is being called from a non qemu thread so
 * we need to be careful about what we do here. Generally we only
 * schedule a BH, and do the rest of the io completion handling
 * from rbd_finish_bh() which runs in a qemu context.
 */
static void rbd_finish_aiocb(rbd_completion_t c, RADOSCB *rcb)
{
    RBDAIOCB *acb = rcb->acb;

    rcb->ret = rbd_aio_get_return_value(c);
    rbd_aio_release(c);

    aio_bh_schedule_oneshot(bdrv_get_aio_context(acb->common.bs),
                            rbd_finish_bh, rcb);
}
Пример #9
0
int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset)
{
    int ret = -EINPROGRESS;

    qed_read_l2_table(s, request, offset, qed_sync_cb, &ret);
    while (ret == -EINPROGRESS) {
        aio_poll(bdrv_get_aio_context(s->bs), true);
    }

    return ret;
}
Пример #10
0
/*
 * Cancel aio. Since we don't reference acb in a non qemu threads,
 * it is safe to access it here.
 */
static void qemu_rbd_aio_cancel(BlockDriverAIOCB *blockacb)
{
    RBDAIOCB *acb = (RBDAIOCB *) blockacb;
    acb->cancelled = 1;

    while (acb->status == -EINPROGRESS) {
        aio_poll(bdrv_get_aio_context(acb->common.bs), true);
    }

    qemu_aio_release(acb);
}
Пример #11
0
static void win32_aio_cancel(BlockDriverAIOCB *blockacb)
{
    QEMUWin32AIOCB *waiocb = (QEMUWin32AIOCB *)blockacb;

    /*
     * CancelIoEx is only supported in Vista and newer.  For now, just
     * wait for completion.
     */
    while (!HasOverlappedIoCompleted(&waiocb->ov)) {
        aio_poll(bdrv_get_aio_context(blockacb->bs), true);
    }
}
Пример #12
0
static void blkverify_aio_cancel(BlockDriverAIOCB *blockacb)
{
    BlkverifyAIOCB *acb = (BlkverifyAIOCB *)blockacb;
    AioContext *aio_context = bdrv_get_aio_context(blockacb->bs);
    bool finished = false;

    /* Wait until request completes, invokes its callback, and frees itself */
    acb->finished = &finished;
    while (!finished) {
        aio_poll(aio_context, true);
    }
}
Пример #13
0
int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request,
                            unsigned int index, unsigned int n, bool flush)
{
    int ret = -EINPROGRESS;

    qed_write_l2_table(s, request, index, n, flush, qed_sync_cb, &ret);
    while (ret == -EINPROGRESS) {
        aio_poll(bdrv_get_aio_context(s->bs), true);
    }

    return ret;
}
Пример #14
0
static void qed_aio_cancel(BlockDriverAIOCB *blockacb)
{
    QEDAIOCB *acb = (QEDAIOCB *)blockacb;
    AioContext *aio_context = bdrv_get_aio_context(blockacb->bs);
    bool finished = false;

    /* Wait for the request to finish */
    acb->finished = &finished;
    while (!finished) {
        aio_poll(aio_context, true);
    }
}
Пример #15
0
int qed_read_l1_table_sync(BDRVQEDState *s)
{
    int ret = -EINPROGRESS;

    qed_read_table(s, s->header.l1_table_offset,
                   s->l1_table, qed_sync_cb, &ret);
    while (ret == -EINPROGRESS) {
        aio_poll(bdrv_get_aio_context(s->bs), true);
    }

    return ret;
}
Пример #16
0
int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index,
                            unsigned int n)
{
    int ret = -EINPROGRESS;

    qed_write_l1_table(s, index, n, qed_sync_cb, &ret);
    while (ret == -EINPROGRESS) {
        aio_poll(bdrv_get_aio_context(s->bs), true);
    }

    return ret;
}
Пример #17
0
static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
{
    VirtIOBlock *s = VIRTIO_BLK(vdev);
    uint32_t features;

#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
    if (s->dataplane && !(status & (VIRTIO_CONFIG_S_DRIVER |
                                    VIRTIO_CONFIG_S_DRIVER_OK))) {
        virtio_blk_data_plane_stop(s->dataplane);
    }
#endif

    if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
        return;
    }

    features = vdev->guest_features;

    /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
     * cache flushes.  Thus, the "auto writethrough" behavior is never
     * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature.
     * Leaving it enabled would break the following sequence:
     *
     *     Guest started with "-drive cache=writethrough"
     *     Guest sets status to 0
     *     Guest sets DRIVER bit in status field
     *     Guest reads host features (WCE=0, CONFIG_WCE=1)
     *     Guest writes guest features (WCE=0, CONFIG_WCE=1)
     *     Guest writes 1 to the WCE configuration field (writeback mode)
     *     Guest sets DRIVER_OK bit in status field
     *
     * s->bs would erroneously be placed in writethrough mode.
     */
    if (!(features & (1 << VIRTIO_BLK_F_CONFIG_WCE))) {
        aio_context_acquire(bdrv_get_aio_context(s->bs));
        bdrv_set_enable_write_cache(s->bs,
                                    !!(features & (1 << VIRTIO_BLK_F_WCE)));
        aio_context_release(bdrv_get_aio_context(s->bs));
    }
}
Пример #18
0
static void archipelago_finish_aiocb(AIORequestData *reqdata)
{
    if (reqdata->aio_cb->ret != reqdata->segreq->total) {
        reqdata->aio_cb->ret = -EIO;
    } else if (reqdata->aio_cb->ret == reqdata->segreq->total) {
        reqdata->aio_cb->ret = 0;
    }
    reqdata->aio_cb->bh = aio_bh_new(
                        bdrv_get_aio_context(reqdata->aio_cb->common.bs),
                        qemu_archipelago_complete_aio, reqdata
                        );
    qemu_bh_schedule(reqdata->aio_cb->bh);
}
Пример #19
0
void block_job_defer_to_main_loop(BlockJob *job,
                                  BlockJobDeferToMainLoopFn *fn,
                                  void *opaque)
{
    BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data));
    data->job = job;
    data->bh = qemu_bh_new(block_job_defer_to_main_loop_bh, data);
    data->aio_context = bdrv_get_aio_context(job->bs);
    data->fn = fn;
    data->opaque = opaque;

    qemu_bh_schedule(data->bh);
}
Пример #20
0
static int nbd_co_send_request(BlockDriverState *bs,
                               struct nbd_request *request,
                               QEMUIOVector *qiov, int offset)
{
    NbdClientSession *s = nbd_get_client_session(bs);
    AioContext *aio_context;
    int rc, ret, i;

    qemu_co_mutex_lock(&s->send_mutex);

    for (i = 0; i < MAX_NBD_REQUESTS; i++) {
        if (s->recv_coroutine[i] == NULL) {
            s->recv_coroutine[i] = qemu_coroutine_self();
            break;
        }
    }

    g_assert(qemu_in_coroutine());
    assert(i < MAX_NBD_REQUESTS);
    request->handle = INDEX_TO_HANDLE(s, i);

    if (!s->ioc) {
        qemu_co_mutex_unlock(&s->send_mutex);
        return -EPIPE;
    }

    s->send_coroutine = qemu_coroutine_self();
    aio_context = bdrv_get_aio_context(bs);

    aio_set_fd_handler(aio_context, s->sioc->fd, false,
                       nbd_reply_ready, nbd_restart_write, bs);
    if (qiov) {
        qio_channel_set_cork(s->ioc, true);
        rc = nbd_send_request(s->ioc, request);
        if (rc >= 0) {
            ret = nbd_wr_syncv(s->ioc, qiov->iov, qiov->niov,
                               offset, request->len, 0);
            if (ret != request->len) {
                rc = -EIO;
            }
        }
        qio_channel_set_cork(s->ioc, false);
    } else {
        rc = nbd_send_request(s->ioc, request);
    }
    aio_set_fd_handler(aio_context, s->sioc->fd, false,
                       nbd_reply_ready, NULL, bs);
    s->send_coroutine = NULL;
    qemu_co_mutex_unlock(&s->send_mutex);
    return rc;
}
Пример #21
0
static void mirror_complete(BlockJob *job, Error **errp)
{
    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
    BlockDriverState *target;

    target = blk_bs(s->target);

    if (!s->synced) {
        error_setg(errp, "The active block job '%s' cannot be completed",
                   job->id);
        return;
    }

    if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
        int ret;

        assert(!target->backing);
        ret = bdrv_open_backing_file(target, NULL, "backing", errp);
        if (ret < 0) {
            return;
        }
    }

    /* block all operations on to_replace bs */
    if (s->replaces) {
        AioContext *replace_aio_context;

        s->to_replace = bdrv_find_node(s->replaces);
        if (!s->to_replace) {
            error_setg(errp, "Node name '%s' not found", s->replaces);
            return;
        }

        replace_aio_context = bdrv_get_aio_context(s->to_replace);
        aio_context_acquire(replace_aio_context);

        /* TODO Translate this into permission system. Current definition of
         * GRAPH_MOD would require to request it for the parents; they might
         * not even be BlockDriverStates, however, so a BdrvChild can't address
         * them. May need redefinition of GRAPH_MOD. */
        error_setg(&s->replace_blocker,
                   "block device is in use by block-job-complete");
        bdrv_op_block_all(s->to_replace, s->replace_blocker);
        bdrv_ref(s->to_replace);

        aio_context_release(replace_aio_context);
    }

    s->should_complete = true;
    block_job_enter(&s->common);
}
Пример #22
0
BlockDriverState *bdrv_all_find_vmstate_bs(void)
{
    bool not_found = true;
    BlockDriverState *bs = NULL;

    while (not_found && (bs = bdrv_next(bs))) {
        AioContext *ctx = bdrv_get_aio_context(bs);

        aio_context_acquire(ctx);
        not_found = !bdrv_can_snapshot(bs);
        aio_context_release(ctx);
    }
    return bs;
}
Пример #23
0
void nbd_client_close(BlockDriverState *bs)
{
    NbdClientSession *client = nbd_get_client_session(bs);
    struct nbd_request request = {
        .type = NBD_CMD_DISC,
        .from = 0,
        .len = 0
    };

    if (client->sock == -1) {
        return;
    }

    nbd_send_request(client->sock, &request);

    nbd_teardown_connection(bs);
}

int nbd_client_init(BlockDriverState *bs, int sock, const char *export,
                    Error **errp)
{
    NbdClientSession *client = nbd_get_client_session(bs);
    int ret;

    /* NBD handshake */
    logout("session init %s\n", export);
    qemu_set_block(sock);
    ret = nbd_receive_negotiate(sock, export,
                                &client->nbdflags, &client->size,
                                &client->blocksize, errp);
    if (ret < 0) {
        logout("Failed to negotiate with the NBD server\n");
        closesocket(sock);
        return ret;
    }

    qemu_co_mutex_init(&client->send_mutex);
    qemu_co_mutex_init(&client->free_sema);
    client->sock = sock;

    /* Now that we're connected, set the socket to be non-blocking and
     * kick the reply mechanism.  */
    qemu_set_nonblock(sock);
    nbd_client_attach_aio_context(bs, bdrv_get_aio_context(bs));

    logout("Established connection with NBD server\n");
    return 0;
}
Пример #24
0
static int nbd_co_send_request(BlockDriverState *bs,
                               struct nbd_request *request,
                               QEMUIOVector *qiov, int offset)
{
    NbdClientSession *s = nbd_get_client_session(bs);
    AioContext *aio_context;
    int rc, ret, i;

    qemu_co_mutex_lock(&s->send_mutex);

    for (i = 0; i < MAX_NBD_REQUESTS; i++) {
        if (s->recv_coroutine[i] == NULL) {
            s->recv_coroutine[i] = qemu_coroutine_self();
            break;
        }
    }

    assert(i < MAX_NBD_REQUESTS);
    request->handle = INDEX_TO_HANDLE(s, i);
    s->send_coroutine = qemu_coroutine_self();
    aio_context = bdrv_get_aio_context(bs);

    aio_set_fd_handler(aio_context, s->sock, AIO_CLIENT_PROTOCOL,
                       nbd_reply_ready, nbd_restart_write, bs);
    if (qiov) {
        if (!s->is_unix) {
            socket_set_cork(s->sock, 1);
        }
        rc = nbd_send_request(s->sock, request);
        if (rc >= 0) {
            ret = qemu_co_sendv(s->sock, qiov->iov, qiov->niov,
                                offset, request->len);
            if (ret != request->len) {
                rc = -EIO;
            }
        }
        if (!s->is_unix) {
            socket_set_cork(s->sock, 0);
        }
    } else {
        rc = nbd_send_request(s->sock, request);
    }
    aio_set_fd_handler(aio_context, s->sock, AIO_CLIENT_PROTOCOL,
                       nbd_reply_ready, NULL, bs);
    s->send_coroutine = NULL;
    qemu_co_mutex_unlock(&s->send_mutex);
    return rc;
}
Пример #25
0
static BlockAIOCB *curl_aio_readv(BlockDriverState *bs,
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
        BlockCompletionFunc *cb, void *opaque)
{
    CURLAIOCB *acb;

    acb = qemu_aio_get(&curl_aiocb_info, bs, cb, opaque);

    acb->qiov = qiov;
    acb->sector_num = sector_num;
    acb->nb_sectors = nb_sectors;

    acb->bh = aio_bh_new(bdrv_get_aio_context(bs), curl_readv_bh_cb, acb);
    qemu_bh_schedule(acb->bh);
    return &acb->common;
}
Пример #26
0
void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns)
{
    assert(job->busy);

    /* Check cancellation *before* setting busy = false, too!  */
    if (block_job_is_cancelled(job)) {
        return;
    }

    job->busy = false;
    if (block_job_is_paused(job)) {
        qemu_coroutine_yield();
    } else {
        co_aio_sleep_ns(bdrv_get_aio_context(job->bs), type, ns);
    }
    job->busy = true;
}
Пример #27
0
/*
 * Called from a libqnio thread
 */
static void vxhs_iio_callback(void *ctx, uint32_t opcode, uint32_t error)
{
    VXHSAIOCB *acb = NULL;

    switch (opcode) {
    case IRP_READ_REQUEST:
    case IRP_WRITE_REQUEST:

        /*
         * ctx is VXHSAIOCB*
         * ctx is NULL if error is QNIOERROR_CHANNEL_HUP
         */
        if (ctx) {
            acb = ctx;
        } else {
            trace_vxhs_iio_callback(error);
            goto out;
        }

        if (error) {
            if (!acb->err) {
                acb->err = error;
            }
            trace_vxhs_iio_callback(error);
        }

        aio_bh_schedule_oneshot(bdrv_get_aio_context(acb->common.bs),
                                vxhs_complete_aio_bh, acb);
        break;

    default:
        if (error == QNIOERROR_HUP) {
            /*
             * Channel failed, spontaneous notification,
             * not in response to I/O
             */
            trace_vxhs_iio_callback_chnfail(error, errno);
        } else {
            trace_vxhs_iio_callback_unknwn(opcode, error);
        }
        break;
    }
out:
    return;
}
Пример #28
0
int bdrv_all_goto_snapshot(const char *name, BlockDriverState **first_bad_bs)
{
    int err = 0;
    BlockDriverState *bs = NULL;

    while (err == 0 && (bs = bdrv_next(bs))) {
        AioContext *ctx = bdrv_get_aio_context(bs);

        aio_context_acquire(ctx);
        if (bdrv_can_snapshot(bs)) {
            err = bdrv_snapshot_goto(bs, name);
        }
        aio_context_release(ctx);
    }

    *first_bad_bs = bs;
    return err;
}
Пример #29
0
bool bdrv_all_can_snapshot(BlockDriverState **first_bad_bs)
{
    bool ok = true;
    BlockDriverState *bs = NULL;

    while (ok && (bs = bdrv_next(bs))) {
        AioContext *ctx = bdrv_get_aio_context(bs);

        aio_context_acquire(ctx);
        if (bdrv_is_inserted(bs) && !bdrv_is_read_only(bs)) {
            ok = bdrv_can_snapshot(bs);
        }
        aio_context_release(ctx);
    }

    *first_bad_bs = bs;
    return ok;
}
Пример #30
0
void iothread_stop_all(void)
{
    Object *container = object_get_objects_root();
    BlockDriverState *bs;
    BdrvNextIterator it;

    for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
        AioContext *ctx = bdrv_get_aio_context(bs);
        if (ctx == qemu_get_aio_context()) {
            continue;
        }
        aio_context_acquire(ctx);
        bdrv_set_aio_context(bs, qemu_get_aio_context());
        aio_context_release(ctx);
    }

    object_child_foreach(container, iothread_stop, NULL);
}