static void virtio_balloon_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOBalloon *s = VIRTIO_BALLOON(dev); int ret; virtio_init(vdev, "virtio-balloon", VIRTIO_ID_BALLOON, sizeof(struct virtio_balloon_config)); ret = qemu_add_balloon_handler(virtio_balloon_to_target, virtio_balloon_stat, s); if (ret < 0) { error_setg(errp, "Adding balloon handler failed"); virtio_cleanup(vdev); return; } s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output); s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output); s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats); register_savevm(dev, "virtio-balloon", -1, 1, virtio_balloon_save, virtio_balloon_load, s); object_property_add(OBJECT(dev), "guest-stats", "guest statistics", balloon_stats_get_all, NULL, NULL, s, NULL); object_property_add(OBJECT(dev), "guest-stats-polling-interval", "int", balloon_stats_get_poll_interval, balloon_stats_set_poll_interval, NULL, s, NULL); }
static void virtio_cuda_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); virtio_init(vdev, "virtio-cuda", 13, 0); virtio_add_queue(vdev, 1024, vq_handle_output); }
static void virtio_balloon_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOBalloon *s = VIRTIO_BALLOON(dev); int ret; virtio_init(vdev, "virtio-balloon", VIRTIO_ID_BALLOON, sizeof(struct virtio_balloon_config)); ret = qemu_add_balloon_handler(virtio_balloon_to_target, virtio_balloon_stat, s); if (ret < 0) { error_setg(errp, "Only one balloon device is supported"); virtio_cleanup(vdev); return; } s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output); s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output); s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats); reset_stats(s); register_savevm(dev, "virtio-balloon", -1, 1, virtio_balloon_save, virtio_balloon_load, s); }
static int virtio_balloon_device_init(VirtIODevice *vdev) { DeviceState *qdev = DEVICE(vdev); VirtIOBalloon *s = VIRTIO_BALLOON(vdev); int ret; virtio_init(vdev, "virtio-balloon", VIRTIO_ID_BALLOON, 8); ret = qemu_add_balloon_handler(virtio_balloon_to_target, virtio_balloon_stat, s); if (ret < 0) { virtio_cleanup(VIRTIO_DEVICE(s)); return -1; } s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output); s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output); s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats); register_savevm(qdev, "virtio-balloon", -1, 1, virtio_balloon_save, virtio_balloon_load, s); object_property_add(OBJECT(qdev), "guest-stats", "guest statistics", balloon_stats_get_all, NULL, NULL, s, NULL); object_property_add(OBJECT(qdev), "guest-stats-polling-interval", "int", balloon_stats_get_poll_interval, balloon_stats_set_poll_interval, NULL, s, NULL); return 0; }
static void virtio_rng_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIORNG *vrng = VIRTIO_RNG(dev); Error *local_err = NULL; if (vrng->conf.period_ms <= 0) { error_setg(errp, "'period' parameter expects a positive integer"); return; } /* Workaround: Property parsing does not enforce unsigned integers, * So this is a hack to reject such numbers. */ if (vrng->conf.max_bytes > INT64_MAX) { error_setg(errp, "'max-bytes' parameter must be non-negative, " "and less than 2^63"); return; } if (vrng->conf.rng == NULL) { vrng->conf.default_backend = RNG_RANDOM(object_new(TYPE_RNG_RANDOM)); user_creatable_complete(OBJECT(vrng->conf.default_backend), &local_err); if (local_err) { error_propagate(errp, local_err); object_unref(OBJECT(vrng->conf.default_backend)); return; } object_property_add_child(OBJECT(dev), "default-backend", OBJECT(vrng->conf.default_backend), NULL); /* The child property took a reference, we can safely drop ours now */ object_unref(OBJECT(vrng->conf.default_backend)); object_property_set_link(OBJECT(dev), OBJECT(vrng->conf.default_backend), "rng", NULL); } vrng->rng = vrng->conf.rng; if (vrng->rng == NULL) { error_setg(errp, "'rng' parameter expects a valid object"); return; } virtio_init(vdev, "virtio-rng", VIRTIO_ID_RNG, 0); vrng->vq = virtio_add_queue(vdev, 8, handle_input); vrng->quota_remaining = vrng->conf.max_bytes; vrng->rate_limit_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, check_rate_limit, vrng); vrng->activate_timer = true; register_savevm(dev, "virtio-rng", -1, 1, virtio_rng_save, virtio_rng_load, vrng); }
static void virtio_blk_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOBlock *s = VIRTIO_BLK(dev); VirtIOBlkConf *blk = &(s->blk); #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE Error *err = NULL; #endif static int virtio_blk_id; if (!blk->conf.bs) { error_setg(errp, "drive property not set"); return; } if (!bdrv_is_inserted(blk->conf.bs)) { error_setg(errp, "Device needs media, but drive is empty"); return; } blkconf_serial(&blk->conf, &blk->serial); s->original_wce = bdrv_enable_write_cache(blk->conf.bs); if (blkconf_geometry(&blk->conf, NULL, 65535, 255, 255) < 0) { error_setg(errp, "Error setting geometry"); return; } virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, sizeof(struct virtio_blk_config)); s->bs = blk->conf.bs; s->conf = &blk->conf; s->rq = NULL; s->sector_mask = (s->conf->logical_block_size / BDRV_SECTOR_SIZE) - 1; s->vq = virtio_add_queue(vdev, 128, virtio_blk_handle_output); s->complete_request = virtio_blk_complete_request; #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE virtio_blk_data_plane_create(vdev, blk, &s->dataplane, &err); if (err != NULL) { error_propagate(errp, err); virtio_cleanup(vdev); return; } s->migration_state_notifier.notify = virtio_blk_migration_state_changed; add_migration_state_change_notifier(&s->migration_state_notifier); #endif s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); register_savevm(dev, "virtio-blk", virtio_blk_id++, 2, virtio_blk_save, virtio_blk_load, s); bdrv_set_dev_ops(s->bs, &virtio_block_ops, s); bdrv_set_guest_block_size(s->bs, s->conf->logical_block_size); bdrv_iostatus_enable(s->bs); add_boot_device_path(s->conf->bootindex, dev, "/disk@0,0"); }
static int virtio_blk_device_init(VirtIODevice *vdev) { DeviceState *qdev = DEVICE(vdev); VirtIOBlock *s = VIRTIO_BLK(vdev); VirtIOBlkConf *blk = &(s->blk); static int virtio_blk_id; if (!blk->conf.bs) { error_report("drive property not set"); return -1; } if (!bdrv_is_inserted(blk->conf.bs)) { error_report("Device needs media, but drive is empty"); return -1; } blkconf_serial(&blk->conf, &blk->serial); if (blkconf_geometry(&blk->conf, NULL, 65535, 255, 255) < 0) { return -1; } virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK, sizeof(struct virtio_blk_config)); vdev->get_config = virtio_blk_update_config; vdev->set_config = virtio_blk_set_config; vdev->get_features = virtio_blk_get_features; vdev->set_status = virtio_blk_set_status; vdev->reset = virtio_blk_reset; s->bs = blk->conf.bs; s->conf = &blk->conf; memcpy(&(s->blk), blk, sizeof(struct VirtIOBlkConf)); s->rq = NULL; s->sector_mask = (s->conf->logical_block_size / BDRV_SECTOR_SIZE) - 1; s->vq = virtio_add_queue(vdev, 128, virtio_blk_handle_output); #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE if (!virtio_blk_data_plane_create(vdev, blk, &s->dataplane)) { virtio_common_cleanup(vdev); return -1; } #endif s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); register_savevm(qdev, "virtio-blk", virtio_blk_id++, 2, virtio_blk_save, virtio_blk_load, s); bdrv_set_dev_ops(s->bs, &virtio_block_ops, s); bdrv_set_buffer_alignment(s->bs, s->conf->logical_block_size); bdrv_iostatus_enable(s->bs); add_boot_device_path(s->conf->bootindex, qdev, "/disk@0,0"); return 0; }
static void virtio_balloon_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOBalloon *s = VIRTIO_BALLOON(dev); int ret; virtio_init(vdev, "virtio-balloon", VIRTIO_ID_BALLOON, sizeof(struct virtio_balloon_config)); ret = qemu_add_balloon_handler(virtio_balloon_to_target, virtio_balloon_stat, s); if (ret < 0) { error_setg(errp, "Only one balloon device is supported"); virtio_cleanup(vdev); return; } s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output); s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output); s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats); if (virtio_has_feature(s->host_features, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { s->free_page_vq = virtio_add_queue(vdev, VIRTQUEUE_MAX_SIZE, virtio_balloon_handle_free_page_vq); s->free_page_report_status = FREE_PAGE_REPORT_S_STOP; s->free_page_report_cmd_id = VIRTIO_BALLOON_FREE_PAGE_REPORT_CMD_ID_MIN; s->free_page_report_notify.notify = virtio_balloon_free_page_report_notify; precopy_add_notifier(&s->free_page_report_notify); if (s->iothread) { object_ref(OBJECT(s->iothread)); s->free_page_bh = aio_bh_new(iothread_get_aio_context(s->iothread), virtio_ballloon_get_free_page_hints, s); qemu_mutex_init(&s->free_page_lock); qemu_cond_init(&s->free_page_cond); s->block_iothread = false; } else { /* Simply disable this feature if the iothread wasn't created. */ s->host_features &= ~(1 << VIRTIO_BALLOON_F_FREE_PAGE_HINT); virtio_error(vdev, "iothread is missing"); } } reset_stats(s); }
static void virtio_9p_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); V9fsVirtioState *v = VIRTIO_9P(dev); V9fsState *s = &v->state; if (v9fs_device_realize_common(s, errp)) { goto out; } v->config_size = sizeof(struct virtio_9p_config) + strlen(s->fsconf.tag); virtio_init(vdev, "virtio-9p", VIRTIO_ID_9P, v->config_size); v->vq = virtio_add_queue(vdev, MAX_REQ, handle_9p_output); out: return; }
static void virtio_crypto_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev); int i; vcrypto->cryptodev = vcrypto->conf.cryptodev; if (vcrypto->cryptodev == NULL) { error_setg(errp, "'cryptodev' parameter expects a valid object"); return; } else if (cryptodev_backend_is_used(vcrypto->cryptodev)) { char *path = object_get_canonical_path_component(OBJECT(vcrypto->conf.cryptodev)); error_setg(errp, "can't use already used cryptodev backend: %s", path); g_free(path); return; } vcrypto->max_queues = MAX(vcrypto->cryptodev->conf.peers.queues, 1); if (vcrypto->max_queues + 1 > VIRTIO_QUEUE_MAX) { error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " "must be a positive integer less than %d.", vcrypto->max_queues, VIRTIO_QUEUE_MAX); return; } virtio_init(vdev, "virtio-crypto", VIRTIO_ID_CRYPTO, vcrypto->config_size); vcrypto->curr_queues = 1; vcrypto->vqs = g_malloc0(sizeof(VirtIOCryptoQueue) * vcrypto->max_queues); for (i = 0; i < vcrypto->max_queues; i++) { vcrypto->vqs[i].dataq = virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh); vcrypto->vqs[i].dataq_bh = qemu_bh_new(virtio_crypto_dataq_bh, &vcrypto->vqs[i]); vcrypto->vqs[i].vcrypto = vcrypto; } vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl); if (!cryptodev_backend_is_ready(vcrypto->cryptodev)) { vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY; } else { vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY; } virtio_crypto_init_config(vdev); cryptodev_backend_set_used(vcrypto->cryptodev, true); }
int virtio_bln__init(struct kvm *kvm) { if (!kvm->cfg.balloon) return 0; kvm_ipc__register_handler(KVM_IPC_BALLOON, handle_mem); kvm_ipc__register_handler(KVM_IPC_STAT, virtio_bln__print_stats); bdev.stat_waitfd = eventfd(0, 0); memset(&bdev.config, 0, sizeof(struct virtio_balloon_config)); virtio_init(kvm, &bdev, &bdev.vdev, &bln_dev_virtio_ops, VIRTIO_DEFAULT_TRANS, PCI_DEVICE_ID_VIRTIO_BLN, VIRTIO_ID_BALLOON, PCI_CLASS_BLN); if (compat_id == -1) compat_id = virtio_compat_add_message("virtio-balloon", "CONFIG_VIRTIO_BALLOON"); return 0; }
static void virtio_9p_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); V9fsState *s = VIRTIO_9P(dev); int i, len; struct stat stat; FsDriverEntry *fse; V9fsPath path; virtio_init(vdev, "virtio-9p", VIRTIO_ID_9P, sizeof(struct virtio_9p_config) + MAX_TAG_LEN); /* initialize pdu allocator */ QLIST_INIT(&s->free_list); QLIST_INIT(&s->active_list); for (i = 0; i < (MAX_REQ - 1); i++) { QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next); } s->vq = virtio_add_queue(vdev, MAX_REQ, handle_9p_output); v9fs_path_init(&path); fse = get_fsdev_fsentry(s->fsconf.fsdev_id); if (!fse) { /* We don't have a fsdev identified by fsdev_id */ error_setg(errp, "Virtio-9p device couldn't find fsdev with the " "id = %s", s->fsconf.fsdev_id ? s->fsconf.fsdev_id : "NULL"); goto out; } if (!s->fsconf.tag) { /* we haven't specified a mount_tag */ error_setg(errp, "fsdev with id %s needs mount_tag arguments", s->fsconf.fsdev_id); goto out; } s->ctx.export_flags = fse->export_flags; s->ctx.fs_root = g_strdup(fse->path); s->ctx.exops.get_st_gen = NULL; len = strlen(s->fsconf.tag); if (len > MAX_TAG_LEN - 1) { error_setg(errp, "mount tag '%s' (%d bytes) is longer than " "maximum (%d bytes)", s->fsconf.tag, len, MAX_TAG_LEN - 1); goto out; } s->tag = g_strdup(s->fsconf.tag); s->ctx.uid = -1; s->ops = fse->ops; s->config_size = sizeof(struct virtio_9p_config) + len; s->fid_list = NULL; qemu_co_rwlock_init(&s->rename_lock); if (s->ops->init(&s->ctx) < 0) { error_setg(errp, "Virtio-9p Failed to initialize fs-driver with id:%s" " and export path:%s", s->fsconf.fsdev_id, s->ctx.fs_root); goto out; } if (v9fs_init_worker_threads() < 0) { error_setg(errp, "worker thread initialization failed"); goto out; } /* * Check details of export path, We need to use fs driver * call back to do that. Since we are in the init path, we don't * use co-routines here. */ if (s->ops->name_to_path(&s->ctx, NULL, "/", &path) < 0) { error_setg(errp, "error in converting name to path %s", strerror(errno)); goto out; } if (s->ops->lstat(&s->ctx, &path, &stat)) { error_setg(errp, "share path %s does not exist", fse->path); goto out; } else if (!S_ISDIR(stat.st_mode)) { error_setg(errp, "share path %s is not a directory", fse->path); goto out; } v9fs_path_free(&path); register_savevm(dev, "virtio-9p", -1, 1, virtio_9p_save, virtio_9p_load, s); return; out: g_free(s->ctx.fs_root); g_free(s->tag); virtio_cleanup(vdev); v9fs_path_free(&path); }
static void vhost_vsock_device_realize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostVSock *vsock = VHOST_VSOCK(dev); int vhostfd; int ret; /* Refuse to use reserved CID numbers */ if (vsock->conf.guest_cid <= 2) { error_setg(errp, "guest-cid property must be greater than 2"); return; } if (vsock->conf.guest_cid > UINT32_MAX) { error_setg(errp, "guest-cid property must be a 32-bit number"); return; } if (vsock->conf.vhostfd) { vhostfd = monitor_fd_param(cur_mon, vsock->conf.vhostfd, errp); if (vhostfd == -1) { error_prepend(errp, "vhost-vsock: unable to parse vhostfd: "); return; } } else { vhostfd = open("/dev/vhost-vsock", O_RDWR); if (vhostfd < 0) { error_setg_errno(errp, -errno, "vhost-vsock: failed to open vhost device"); return; } } virtio_init(vdev, "vhost-vsock", VIRTIO_ID_VSOCK, sizeof(struct virtio_vsock_config)); /* Receive and transmit queues belong to vhost */ virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE, vhost_vsock_handle_output); virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE, vhost_vsock_handle_output); /* The event queue belongs to QEMU */ vsock->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE, vhost_vsock_handle_output); vsock->vhost_dev.nvqs = ARRAY_SIZE(vsock->vhost_vqs); vsock->vhost_dev.vqs = vsock->vhost_vqs; ret = vhost_dev_init(&vsock->vhost_dev, (void *)(uintptr_t)vhostfd, VHOST_BACKEND_TYPE_KERNEL, 0); if (ret < 0) { error_setg_errno(errp, -ret, "vhost-vsock: vhost_dev_init failed"); goto err_virtio; } ret = vhost_vsock_set_guest_cid(vsock); if (ret < 0) { error_setg_errno(errp, -ret, "vhost-vsock: unable to set guest cid"); goto err_vhost_dev; } vsock->post_load_timer = NULL; return; err_vhost_dev: vhost_dev_cleanup(&vsock->vhost_dev); err_virtio: virtio_cleanup(vdev); close(vhostfd); return; }