Exemplo n.º 1
0
Arquivo: tar.c Projeto: jkivilin/qemu
/* This is where we get a request from a caller to read something */
static BlockDriverAIOCB *tar_aio_readv(BlockDriverState *bs,
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
        BlockDriverCompletionFunc *cb, void *opaque)
{
    BDRVTarState *s = bs->opaque;
    SparseCache *sparse;
    int64_t sec_file = sector_num + s->file_sec;
    int64_t start = sector_num * SECTOR_SIZE;
    int64_t end = start + (nb_sectors * SECTOR_SIZE);
    int i;
    TarAIOCB *acb;

    for (i = 0; i < s->sparse_num; i++) {
        sparse = &s->sparse[i];
        if (sparse->start > end) {
            /* We expect the cache to be start increasing */
            break;
        } else if ((sparse->start < start) && (sparse->end <= start)) {
            /* sparse before our offset */
            sec_file -= (sparse->end - sparse->start) / SECTOR_SIZE;
        } else if ((sparse->start <= start) && (sparse->end >= end)) {
            /* all our sectors are sparse */
            char *buf = g_malloc0(nb_sectors * SECTOR_SIZE);

            acb = qemu_aio_get(&tar_aiocb_info, bs, cb, opaque);
            qemu_iovec_from_buf(qiov, 0, buf, nb_sectors * SECTOR_SIZE);
            g_free(buf);
            acb->bh = qemu_bh_new(tar_sparse_cb, acb);
            qemu_bh_schedule(acb->bh);

            return &acb->common;
        } else if (((sparse->start >= start) && (sparse->start < end)) ||
                   ((sparse->end >= start) && (sparse->end < end))) {
            /* we're semi-sparse (worst case) */
            /* let's go synchronous and read all sectors individually */
            char *buf = g_malloc(nb_sectors * SECTOR_SIZE);
            uint64_t offs;

            for (offs = 0; offs < (nb_sectors * SECTOR_SIZE);
                 offs += SECTOR_SIZE) {
                bdrv_pread(bs, (sector_num * SECTOR_SIZE) + offs,
                           buf + offs, SECTOR_SIZE);
            }

            qemu_iovec_from_buf(qiov, 0, buf, nb_sectors * SECTOR_SIZE);
            acb = qemu_aio_get(&tar_aiocb_info, bs, cb, opaque);
            acb->bh = qemu_bh_new(tar_sparse_cb, acb);
            qemu_bh_schedule(acb->bh);

            return &acb->common;
        }
    }

    return bdrv_aio_readv(s->hd, sec_file, qiov, nb_sectors,
                          cb, opaque);
}
Exemplo n.º 2
0
/* called from spice server thread context only */
int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext)
{
    QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
    QXLCursor *cursor;
    QEMUCursor *c;

    if (!cmd) {
        return 1;
    }

    if (!dpy_cursor_define_supported(qxl->vga.con)) {
        return 0;
    }

    if (qxl->debug > 1 && cmd->type != QXL_CURSOR_MOVE) {
        fprintf(stderr, "%s", __FUNCTION__);
        qxl_log_cmd_cursor(qxl, cmd, ext->group_id);
        fprintf(stderr, "\n");
    }
    switch (cmd->type) {
    case QXL_CURSOR_SET:
        cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id);
        if (!cursor) {
            return 1;
        }
        if (cursor->chunk.data_size != cursor->data_size) {
            fprintf(stderr, "%s: multiple chunks\n", __FUNCTION__);
            return 1;
        }
        c = qxl_cursor(qxl, cursor);
        if (c == NULL) {
            c = cursor_builtin_left_ptr();
        }
        qemu_mutex_lock(&qxl->ssd.lock);
        if (qxl->ssd.cursor) {
            cursor_put(qxl->ssd.cursor);
        }
        qxl->ssd.cursor = c;
        qxl->ssd.mouse_x = cmd->u.set.position.x;
        qxl->ssd.mouse_y = cmd->u.set.position.y;
        qemu_mutex_unlock(&qxl->ssd.lock);
        qemu_bh_schedule(qxl->ssd.cursor_bh);
        break;
    case QXL_CURSOR_MOVE:
        qemu_mutex_lock(&qxl->ssd.lock);
        qxl->ssd.mouse_x = cmd->u.position.x;
        qxl->ssd.mouse_y = cmd->u.position.y;
        qemu_mutex_unlock(&qxl->ssd.lock);
        qemu_bh_schedule(qxl->ssd.cursor_bh);
        break;
    }
    return 0;
}
Exemplo n.º 3
0
/* The completion BH fetches completed I/O requests and invokes their
 * callbacks.
 *
 * The function is somewhat tricky because it supports nested event loops, for
 * example when a request callback invokes aio_poll().  In order to do this,
 * the completion events array and index are kept in qemu_laio_state.  The BH
 * reschedules itself as long as there are completions pending so it will
 * either be called again in a nested event loop or will be called after all
 * events have been completed.  When there are no events left to complete, the
 * BH returns without rescheduling.
 */
static void qemu_laio_completion_bh(void *opaque)
{
    struct qemu_laio_state *s = opaque;

    /* Fetch more completion events when empty */
    if (s->event_idx == s->event_max) {
        do {
            struct timespec ts = { 0 };
            s->event_max = io_getevents(s->ctx, MAX_EVENTS, MAX_EVENTS,
                                        s->events, &ts);
        } while (s->event_max == -EINTR);

        s->event_idx = 0;
        if (s->event_max <= 0) {
            s->event_max = 0;
            return; /* no more events */
        }
    }

    /* Reschedule so nested event loops see currently pending completions */
    qemu_bh_schedule(s->completion_bh);

    /* Process completion events */
    while (s->event_idx < s->event_max) {
        struct iocb *iocb = s->events[s->event_idx].obj;
        struct qemu_laiocb *laiocb =
                container_of(iocb, struct qemu_laiocb, iocb);

        laiocb->ret = io_event_ret(&s->events[s->event_idx]);
        s->event_idx++;

        qemu_laio_process_completion(s, laiocb);
    }
}
Exemplo n.º 4
0
void qemu_notify_event(void)
{
    if (!qemu_aio_context) {
        return;
    }
    qemu_bh_schedule(qemu_notify_bh);
}
Exemplo n.º 5
0
static void continue_after_map_failure(void *opaque)
{
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;

    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
    qemu_bh_schedule(dbs->bh);
}
Exemplo n.º 6
0
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
{
    VirtIONet *n = to_virtio_net(vdev);

    virtio_net_vhost_status(n, status);

    if (!n->tx_waiting) {
        return;
    }

    if (virtio_net_started(n, status) && !n->vhost_started) {
        if (n->tx_timer) {
            qemu_mod_timer(n->tx_timer,
                           qemu_get_clock_ns(vm_clock) + n->tx_timeout);
        } else {
            qemu_bh_schedule(n->tx_bh);
        }
    } else {
        if (n->tx_timer) {
            qemu_del_timer(n->tx_timer);
        } else {
            qemu_bh_cancel(n->tx_bh);
        }
    }
}
Exemplo n.º 7
0
static void qemu_aio_complete(void *opaque, int ret)
{
    struct ioreq *ioreq = opaque;

    if (ret != 0) {
        xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
                      ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
        ioreq->aio_errors++;
    }

    ioreq->aio_inflight--;
    if (ioreq->presync) {
        ioreq->presync = 0;
        ioreq_runio_qemu_aio(ioreq);
        return;
    }
    if (ioreq->aio_inflight > 0) {
        return;
    }
    if (ioreq->postsync) {
        ioreq->postsync = 0;
        ioreq->aio_inflight++;
        bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
        return;
    }

    ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
    ioreq_unmap(ioreq);
    ioreq_finish(ioreq);
    bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
    qemu_bh_schedule(ioreq->blkdev->bh);
}
Exemplo n.º 8
0
/*
 * This aio completion is being called from qemu_rbd_aio_event_reader()
 * and runs in qemu context. It schedules a bh, but just in case the aio
 * was not cancelled before.
 */
static void qemu_rbd_complete_aio(RADOSCB *rcb)
{
    RBDAIOCB *acb = rcb->acb;
    int64_t r;

    r = rcb->ret;

    if (acb->cmd != RBD_AIO_READ) {
        if (r < 0) {
            acb->ret = r;
            acb->error = 1;
        } else if (!acb->error) {
            acb->ret = rcb->size;
        }
    } else {
        if (r < 0) {
            memset(rcb->buf, 0, rcb->size);
            acb->ret = r;
            acb->error = 1;
        } else if (r < rcb->size) {
            memset(rcb->buf + r, 0, rcb->size - r);
            if (!acb->error) {
                acb->ret = rcb->size;
            }
        } else if (!acb->error) {
            acb->ret = r;
        }
    }
    /* Note that acb->bh can be NULL in case where the aio was cancelled */
    acb->bh = qemu_bh_new(rbd_aio_bh_cb, acb);
    qemu_bh_schedule(acb->bh);
    g_free(rcb);
}
Exemplo n.º 9
0
static void
iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
                        void *command_data, void *opaque)
{
    struct IscsiTask *iTask = opaque;
    struct scsi_task *task = command_data;

    iTask->complete = 1;
    iTask->status = status;
    iTask->do_retry = 0;
    iTask->task = task;

    if (iTask->retries-- > 0 && status == SCSI_STATUS_CHECK_CONDITION
        && task->sense.key == SCSI_SENSE_UNIT_ATTENTION) {
        iTask->do_retry = 1;
        goto out;
    }

    if (status != SCSI_STATUS_GOOD) {
        error_report("iSCSI: Failure. %s", iscsi_get_error(iscsi));
    }

out:
    if (iTask->co) {
        iTask->bh = qemu_bh_new(iscsi_co_generic_bh_cb, iTask);
        qemu_bh_schedule(iTask->bh);
    }
}
Exemplo n.º 10
0
static void bh_test_cb(void *opaque)
{
    BHTestData *data = opaque;
    if (++data->n < data->max) {
        qemu_bh_schedule(data->bh);
    }
}
Exemplo n.º 11
0
static BlockDriverAIOCB *raw_aio_write(BlockDriverState *bs,
        int64_t sector_num, const uint8_t *buf, int nb_sectors,
        BlockDriverCompletionFunc *cb, void *opaque)
{
    RawAIOCB *acb;

    /*
     * If O_DIRECT is used and the buffer is not aligned fall back
     * to synchronous IO.
     */
    BDRVRawState *s = bs->opaque;

    if (unlikely(s->aligned_buf != NULL && ((uintptr_t) buf % 512))) {
        QEMUBH *bh;
        acb = qemu_aio_get(bs, cb, opaque);
        acb->ret = raw_pwrite(bs, 512 * sector_num, buf, 512 * nb_sectors);
        bh = qemu_bh_new(raw_aio_em_cb, acb);
        qemu_bh_schedule(bh);
        return &acb->common;
    }

    acb = raw_aio_setup(bs, sector_num, (uint8_t*)buf, nb_sectors, cb, opaque);
    if (!acb)
        return NULL;
    if (qemu_paio_write(&acb->aiocb) < 0) {
        raw_aio_remove(acb);
        return NULL;
    }
    return &acb->common;
}
Exemplo n.º 12
0
/*
 * Callback from the TPM to indicate that the response was received.
 */
static void tpm_tis_receive_cb(TPMState *s, uint8_t locty)
{
    TPMTISEmuState *tis = &s->s.tis;

    assert(s->locty_number == locty);

    qemu_bh_schedule(tis->bh);
}
Exemplo n.º 13
0
static void qemu_laio_completion_cb(EventNotifier *e)
{
    struct qemu_laio_state *s = container_of(e, struct qemu_laio_state, e);

    if (event_notifier_test_and_clear(&s->e)) {
        qemu_bh_schedule(s->completion_bh);
    }
}
Exemplo n.º 14
0
static void blk_handle_requests(struct XenBlkDev *blkdev)
{
    RING_IDX rc, rp;
    struct ioreq *ioreq;

    blkdev->more_work = 0;

    rc = blkdev->rings.common.req_cons;
    rp = blkdev->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    blk_send_response_all(blkdev);
    while (rc != rp) {
        /* pull request from ring */
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
            break;
        }
        ioreq = ioreq_start(blkdev);
        if (ioreq == NULL) {
            blkdev->more_work++;
            break;
        }
        blk_get_request(blkdev, ioreq, rc);
        blkdev->rings.common.req_cons = ++rc;

        /* parse them */
        if (ioreq_parse(ioreq) != 0) {

            switch (ioreq->req.operation) {
            case BLKIF_OP_READ:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_READ);
                break;
            case BLKIF_OP_WRITE:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_WRITE);
                break;
            case BLKIF_OP_FLUSH_DISKCACHE:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_FLUSH);
            default:
                break;
            };

            if (blk_send_response_one(ioreq)) {
                xen_pv_send_notify(&blkdev->xendev);
            }
            ioreq_release(ioreq, false);
            continue;
        }

        ioreq_runio_qemu_aio(ioreq);
    }

    if (blkdev->more_work && blkdev->requests_inflight < blkdev->max_requests) {
        qemu_bh_schedule(blkdev->bh);
    }
}
Exemplo n.º 15
0
static void
qlooper_addPendingIo(QLooper* looper, QLoopIo* io)
{
    if (looper->io_pending == NULL) {
        qemu_bh_schedule(looper->io_bh);
    }
    io->pendingNext    = looper->io_pending;
    looper->io_pending = io;
}
Exemplo n.º 16
0
static void
iscsi_schedule_bh(IscsiAIOCB *acb)
{
    if (acb->bh) {
        return;
    }
    acb->bh = aio_bh_new(acb->iscsilun->aio_context, iscsi_bh_cb, acb);
    qemu_bh_schedule(acb->bh);
}
Exemplo n.º 17
0
static void vigs_fence_ack(void *user_data,
                           uint32_t fence_seq)
{
    VIGSState *s = user_data;

    vigs_fenceman_ack(s->fenceman, fence_seq);

    qemu_bh_schedule(s->fence_ack_bh);
}
Exemplo n.º 18
0
void qxl_render_update_area_done(PCIQXLDevice *qxl, QXLCookie *cookie)
{
    qemu_mutex_lock(&qxl->ssd.lock);
    trace_qxl_render_update_area_done(cookie);
    qemu_bh_schedule(qxl->update_area_bh);
    qxl->render_update_cookie_num--;
    qemu_mutex_unlock(&qxl->ssd.lock);
    g_free(cookie);
}
Exemplo n.º 19
0
static void
iscsi_schedule_bh(IscsiAIOCB *acb)
{
    if (acb->bh) {
        return;
    }
    acb->bh = qemu_bh_new(iscsi_bh_cb, acb);
    qemu_bh_schedule(acb->bh);
}
Exemplo n.º 20
0
static void xen_9pfs_evtchn_event(void *opaque)
{
    Xen9pfsRing *ring = opaque;
    evtchn_port_t port;

    port = xenevtchn_pending(ring->evtchndev);
    xenevtchn_unmask(ring->evtchndev, port);

    qemu_bh_schedule(ring->bh);
}
Exemplo n.º 21
0
/*
 * This is the callback function for rbd_aio_read and _write
 *
 * Note: this function is being called from a non qemu thread so
 * we need to be careful about what we do here. Generally we only
 * schedule a BH, and do the rest of the io completion handling
 * from rbd_finish_bh() which runs in a qemu context.
 */
static void rbd_finish_aiocb(rbd_completion_t c, RADOSCB *rcb)
{
    RBDAIOCB *acb = rcb->acb;

    rcb->ret = rbd_aio_get_return_value(c);
    rbd_aio_release(c);

    acb->bh = qemu_bh_new(rbd_finish_bh, rcb);
    qemu_bh_schedule(acb->bh);
}
Exemplo n.º 22
0
static void bh_delete_cb(void *opaque)
{
    BHTestData *data = opaque;
    if (++data->n < data->max) {
        qemu_bh_schedule(data->bh);
    } else {
        qemu_bh_delete(data->bh);
        data->bh = NULL;
    }
}
Exemplo n.º 23
0
void failover_request_active(Error **errp)
{
   if (failover_set_state(FAILOVER_STATUS_NONE,
        FAILOVER_STATUS_REQUIRE) != FAILOVER_STATUS_NONE) {
        error_setg(errp, "COLO failover is already actived");
        return;
    }
    failover_bh = qemu_bh_new(colo_failover_bh, NULL);
    qemu_bh_schedule(failover_bh);
}
Exemplo n.º 24
0
static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
    VirtIOBlock *s = to_virtio_blk(vdev);
    VirtIOBlockReq *req;
    MultiReqBuffer mrb = {
        .num_writes = 0,
    };

    while ((req = virtio_blk_get_request(s))) {
        virtio_blk_handle_request(req, &mrb);
    }

    virtio_submit_multiwrite(s->bs, &mrb);

    /*
     * FIXME: Want to check for completions before returning to guest mode,
     * so cached reads and writes are reported as quickly as possible. But
     * that should be done in the generic block layer.
     */
}

static void virtio_blk_dma_restart_bh(void *opaque)
{
    VirtIOBlock *s = opaque;
    VirtIOBlockReq *req = s->rq;
    MultiReqBuffer mrb = {
        .num_writes = 0,
    };

    qemu_bh_delete(s->bh);
    s->bh = NULL;

    s->rq = NULL;

    while (req) {
        virtio_blk_handle_request(req, &mrb);
        req = req->next;
    }

    virtio_submit_multiwrite(s->bs, &mrb);
}

static void virtio_blk_dma_restart_cb(void *opaque, int running,
                                      RunState state)
{
    VirtIOBlock *s = opaque;

    if (!running)
        return;

    if (!s->bh) {
        s->bh = qemu_bh_new(virtio_blk_dma_restart_bh, s);
        qemu_bh_schedule(s->bh);
    }
}
Exemplo n.º 25
0
Arquivo: iscsi.c Projeto: breuerr/qemu
static int
iscsi_schedule_bh(QEMUBHFunc *cb, IscsiAIOCB *acb)
{
    acb->bh = qemu_bh_new(cb, acb);
    if (!acb->bh) {
        error_report("oom: could not create iscsi bh");
        return -EIO;
    }

    qemu_bh_schedule(acb->bh);
    return 0;
}
Exemplo n.º 26
0
static void blk_handle_requests(struct XenBlkDev *blkdev)
{
    RING_IDX rc, rp;
    struct ioreq *ioreq;

    blkdev->more_work = 0;

    rc = blkdev->rings.common.req_cons;
    rp = blkdev->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    if (use_aio) {
        blk_send_response_all(blkdev);
    }
    while (rc != rp) {
        /* pull request from ring */
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
            break;
        }
        ioreq = ioreq_start(blkdev);
        if (ioreq == NULL) {
            blkdev->more_work++;
            break;
        }
        blk_get_request(blkdev, ioreq, rc);
        blkdev->rings.common.req_cons = ++rc;

        /* parse them */
        if (ioreq_parse(ioreq) != 0) {
            if (blk_send_response_one(ioreq)) {
                xen_be_send_notify(&blkdev->xendev);
            }
            ioreq_release(ioreq);
            continue;
        }

        if (use_aio) {
            /* run i/o in aio mode */
            ioreq_runio_qemu_aio(ioreq);
        } else {
            /* run i/o in sync mode */
            ioreq_runio_qemu_sync(ioreq);
        }
    }
    if (!use_aio) {
        blk_send_response_all(blkdev);
    }

    if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
        qemu_bh_schedule(blkdev->bh);
    }
}
Exemplo n.º 27
0
void block_job_defer_to_main_loop(BlockJob *job,
                                  BlockJobDeferToMainLoopFn *fn,
                                  void *opaque)
{
    BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data));
    data->job = job;
    data->bh = qemu_bh_new(block_job_defer_to_main_loop_bh, data);
    data->aio_context = bdrv_get_aio_context(job->bs);
    data->fn = fn;
    data->opaque = opaque;

    qemu_bh_schedule(data->bh);
}
Exemplo n.º 28
0
static void archipelago_finish_aiocb(AIORequestData *reqdata)
{
    if (reqdata->aio_cb->ret != reqdata->segreq->total) {
        reqdata->aio_cb->ret = -EIO;
    } else if (reqdata->aio_cb->ret == reqdata->segreq->total) {
        reqdata->aio_cb->ret = 0;
    }
    reqdata->aio_cb->bh = aio_bh_new(
                        bdrv_get_aio_context(reqdata->aio_cb->common.bs),
                        qemu_archipelago_complete_aio, reqdata
                        );
    qemu_bh_schedule(reqdata->aio_cb->bh);
}
Exemplo n.º 29
0
static void
virtio_crypto_handle_dataq_bh(VirtIODevice *vdev, VirtQueue *vq)
{
    VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
    VirtIOCryptoQueue *q =
         &vcrypto->vqs[virtio_crypto_vq2q(virtio_get_queue_index(vq))];

    /* This happens when device was stopped but VCPU wasn't. */
    if (!vdev->vm_running) {
        return;
    }
    virtio_queue_set_notification(vq, 0);
    qemu_bh_schedule(q->dataq_bh);
}
Exemplo n.º 30
0
static void complete_request_vring(VirtIOBlockReq *req, unsigned char status)
{
    VirtIOBlockDataPlane *s = req->dev->dataplane;
    stb_p(&req->in->status, status);

    vring_push(s->vdev, &req->dev->dataplane->vring, &req->elem, req->in_len);

    /* Suppress notification to guest by BH and its scheduled
     * flag because requests are completed as a batch after io
     * plug & unplug is introduced, and the BH can still be
     * executed in dataplane aio context even after it is
     * stopped, so needn't worry about notification loss with BH.
     */
    qemu_bh_schedule(s->bh);
}