/* * In addition to updating the iplstate, this function returns: * - 0 if system was ipled with external kernel * - -1 if no valid boot device was found * - ccw id of the boot device otherwise */ static uint64_t s390_update_iplstate(S390IPLState *ipl) { DeviceState *dev_st; if (ipl->iplb_valid) { ipl->cssid = 0; ipl->ssid = 0; ipl->devno = ipl->iplb.devno; goto out; } if (ipl->kernel) { return 0; } dev_st = get_boot_device(0); if (dev_st) { VirtioCcwDevice *ccw_dev = (VirtioCcwDevice *) object_dynamic_cast( OBJECT(qdev_get_parent_bus(dev_st)->parent), TYPE_VIRTIO_CCW_DEVICE); if (ccw_dev) { ipl->cssid = ccw_dev->sch->cssid; ipl->ssid = ccw_dev->sch->ssid; ipl->devno = ccw_dev->sch->devno; goto out; } } return -1; out: return (uint32_t) (ipl->cssid << 24 | ipl->ssid << 16 | ipl->devno); }
static void apic_common_realize(struct uc_struct *uc, DeviceState *dev, Error **errp) { APICCommonState *s = APIC_COMMON(uc, dev); APICCommonClass *info; if (uc->apic_no >= MAX_APICS) { error_setg(errp, "%s initialization failed.", object_get_typename(OBJECT(dev))); return; } s->idx = uc->apic_no++; info = APIC_COMMON_GET_CLASS(uc, s); info->realize(uc, dev, errp); if (!uc->mmio_registered) { ICCBus *b = ICC_BUS(uc, qdev_get_parent_bus(dev)); memory_region_add_subregion(b->apic_address_space, 0, &s->io_memory); uc->mmio_registered = true; } /* Note: We need at least 1M to map the VAPIC option ROM */ if (!uc->vapic && s->vapic_control & VAPIC_ENABLE_MASK) { // ram_size >= 1024 * 1024) { // FIXME uc->vapic = NULL; } s->vapic = uc->vapic; if (uc->apic_report_tpr_access && info->enable_tpr_reporting) { info->enable_tpr_reporting(s, true); } }
static void vhost_vsock_stop(VirtIODevice *vdev) { VHostVSock *vsock = VHOST_VSOCK(vdev); BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); int ret; if (!k->set_guest_notifiers) { return; } ret = vhost_vsock_set_running(vsock, 0); if (ret < 0) { error_report("vhost vsock set running failed: %d", ret); return; } vhost_dev_stop(&vsock->vhost_dev, vdev); ret = k->set_guest_notifiers(qbus->parent, vsock->vhost_dev.nvqs, false); if (ret < 0) { error_report("vhost guest notifier cleanup failed: %d", ret); return; } vhost_dev_disable_notifiers(&vsock->vhost_dev, vdev); }
/* Plug the VirtIODevice */ int virtio_bus_plug_device(VirtIODevice *vdev) { DeviceState *qdev = DEVICE(vdev); BusState *qbus = BUS(qdev_get_parent_bus(qdev)); VirtioBusState *bus = VIRTIO_BUS(qbus); VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus); DPRINTF("%s: plug device.\n", qbus->name); bus->vdev = vdev; /* * The lines below will disappear when we drop VirtIOBindings, at the end * of the series. */ bus->bindings.notify = klass->notify; bus->bindings.save_config = klass->save_config; bus->bindings.save_queue = klass->save_queue; bus->bindings.load_config = klass->load_config; bus->bindings.load_queue = klass->load_queue; bus->bindings.load_done = klass->load_done; bus->bindings.get_features = klass->get_features; bus->bindings.query_guest_notifiers = klass->query_guest_notifiers; bus->bindings.set_guest_notifiers = klass->set_guest_notifiers; bus->bindings.set_host_notifier = klass->set_host_notifier; bus->bindings.vmstate_change = klass->vmstate_change; virtio_bind_device(bus->vdev, &bus->bindings, qbus->parent); if (klass->device_plugged != NULL) { klass->device_plugged(qbus->parent); } return 0; }
static CcwDevice *s390_get_ccw_device(DeviceState *dev_st) { CcwDevice *ccw_dev = NULL; if (dev_st) { VirtioCcwDevice *virtio_ccw_dev = (VirtioCcwDevice *) object_dynamic_cast(OBJECT(qdev_get_parent_bus(dev_st)->parent), TYPE_VIRTIO_CCW_DEVICE); if (virtio_ccw_dev) { ccw_dev = CCW_DEVICE(virtio_ccw_dev); } else { SCSIDevice *sd = (SCSIDevice *) object_dynamic_cast(OBJECT(dev_st), TYPE_SCSI_DEVICE); if (sd) { SCSIBus *bus = scsi_bus_from_device(sd); VirtIOSCSI *vdev = container_of(bus, VirtIOSCSI, bus); VirtIOSCSICcw *scsi_ccw = container_of(vdev, VirtIOSCSICcw, vdev); ccw_dev = (CcwDevice *)object_dynamic_cast(OBJECT(scsi_ccw), TYPE_CCW_DEVICE); } } } return ccw_dev; }
static void s390_ipl_reset(DeviceState *dev) { S390IPLState *ipl = S390_IPL(dev); S390CPU *cpu = S390_CPU(qemu_get_cpu(0)); CPUS390XState *env = &cpu->env; env->psw.addr = ipl->start_addr; env->psw.mask = IPL_PSW_MASK; if (!ipl->kernel) { /* Tell firmware, if there is a preferred boot device */ env->regs[7] = -1; DeviceState *dev_st = get_boot_device(0); if (dev_st) { VirtioCcwDevice *ccw_dev = (VirtioCcwDevice *) object_dynamic_cast( OBJECT(qdev_get_parent_bus(dev_st)->parent), TYPE_VIRTIO_CCW_DEVICE); if (ccw_dev) { env->regs[7] = ccw_dev->sch->cssid << 24 | ccw_dev->sch->ssid << 16 | ccw_dev->sch->devno; } } } s390_add_running_cpu(cpu); }
static void apic_common_realize(DeviceState *dev, Error **errp) { APICCommonState *s = APIC_COMMON(dev); APICCommonClass *info; static DeviceState *vapic; static int apic_no; static bool mmio_registered; if (apic_no >= MAX_APICS) { error_setg(errp, "%s initialization failed.", object_get_typename(OBJECT(dev))); return; } s->idx = apic_no++; info = APIC_COMMON_GET_CLASS(s); info->realize(dev, errp); if (!mmio_registered) { ICCBus *b = ICC_BUS(qdev_get_parent_bus(dev)); memory_region_add_subregion(b->apic_address_space, 0, &s->io_memory); mmio_registered = true; } /* Note: We need at least 1M to map the VAPIC option ROM */ if (!vapic && s->vapic_control & VAPIC_ENABLE_MASK && ram_size >= 1024 * 1024) { vapic = sysbus_create_simple("kvmvapic", -1, NULL); } s->vapic = vapic; if (apic_report_tpr_access && info->enable_tpr_reporting) { info->enable_tpr_reporting(s, true); } }
/* Context: QEMU global mutex held */ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, VirtIOBlockDataPlane **dataplane, Error **errp) { VirtIOBlockDataPlane *s; BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); *dataplane = NULL; if (!conf->iothread) { return; } /* Don't try if transport does not support notifiers. */ if (!k->set_guest_notifiers || !k->set_host_notifier) { error_setg(errp, "device is incompatible with dataplane " "(transport does not support notifiers)"); return; } /* If dataplane is (re-)enabled while the guest is running there could be * block jobs that can conflict. */ if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { error_prepend(errp, "cannot start dataplane thread: "); return; } s = g_new0(VirtIOBlockDataPlane, 1); s->vdev = vdev; s->conf = conf; if (conf->iothread) { s->iothread = conf->iothread; object_ref(OBJECT(s->iothread)); } s->ctx = iothread_get_aio_context(s->iothread); s->bh = aio_bh_new(s->ctx, notify_guest_bh, s); error_setg(&s->blocker, "block device is in use by data plane"); blk_op_block_all(conf->conf.blk, s->blocker); blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_RESIZE, s->blocker); blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_DRIVE_DEL, s->blocker); blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_BACKUP_SOURCE, s->blocker); blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_CHANGE, s->blocker); blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_COMMIT_SOURCE, s->blocker); blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_COMMIT_TARGET, s->blocker); blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_EJECT, s->blocker); blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, s->blocker); blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, s->blocker); blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, s->blocker); blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_MIRROR_SOURCE, s->blocker); blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_STREAM, s->blocker); blk_op_unblock(conf->conf.blk, BLOCK_OP_TYPE_REPLACE, s->blocker); *dataplane = s; }
static bool s390_gen_initial_iplb(S390IPLState *ipl) { DeviceState *dev_st; dev_st = get_boot_device(0); if (dev_st) { VirtioCcwDevice *virtio_ccw_dev = (VirtioCcwDevice *) object_dynamic_cast(OBJECT(qdev_get_parent_bus(dev_st)->parent), TYPE_VIRTIO_CCW_DEVICE); SCSIDevice *sd = (SCSIDevice *) object_dynamic_cast(OBJECT(dev_st), TYPE_SCSI_DEVICE); VirtIONet *vn = (VirtIONet *) object_dynamic_cast(OBJECT(dev_st), TYPE_VIRTIO_NET); if (vn) { ipl->netboot = true; } if (virtio_ccw_dev) { CcwDevice *ccw_dev = CCW_DEVICE(virtio_ccw_dev); ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN); ipl->iplb.blk0_len = cpu_to_be32(S390_IPLB_MIN_CCW_LEN - S390_IPLB_HEADER_LEN); ipl->iplb.pbt = S390_IPL_TYPE_CCW; ipl->iplb.ccw.devno = cpu_to_be16(ccw_dev->sch->devno); ipl->iplb.ccw.ssid = ccw_dev->sch->ssid & 3; } else if (sd) { SCSIBus *bus = scsi_bus_from_device(sd); VirtIOSCSI *vdev = container_of(bus, VirtIOSCSI, bus); VirtIOSCSICcw *scsi_ccw = container_of(vdev, VirtIOSCSICcw, vdev); CcwDevice *ccw_dev; ccw_dev = (CcwDevice *)object_dynamic_cast(OBJECT(scsi_ccw), TYPE_CCW_DEVICE); if (!ccw_dev) { /* It might be a PCI device instead */ return false; } ipl->iplb.len = cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN); ipl->iplb.blk0_len = cpu_to_be32(S390_IPLB_MIN_QEMU_SCSI_LEN - S390_IPLB_HEADER_LEN); ipl->iplb.pbt = S390_IPL_TYPE_QEMU_SCSI; ipl->iplb.scsi.lun = cpu_to_be32(sd->lun); ipl->iplb.scsi.target = cpu_to_be16(sd->id); ipl->iplb.scsi.channel = cpu_to_be16(sd->channel); ipl->iplb.scsi.devno = cpu_to_be16(ccw_dev->sch->devno); ipl->iplb.scsi.ssid = ccw_dev->sch->ssid & 3; } else { return false; /* unknown device */ } if (!s390_ipl_set_loadparm(ipl->iplb.loadparm)) { ipl->iplb.flags |= DIAG308_FLAGS_LP_VALID; } return true; } return false; }
/* * isa_get_irq() returns the corresponding qemu_irq entry for the i8259. * * This function is only for special cases such as the 'ferr', and * temporary use for normal devices until they are converted to qdev. */ qemu_irq isa_get_irq(ISADevice *dev, int isairq) { assert(!dev || ISA_BUS(qdev_get_parent_bus(DEVICE(dev))) == isabus); if (isairq < 0 || isairq > 15) { hw_error("isa irq %d invalid", isairq); } return isabus->irqs[isairq]; }
/* Context: QEMU global mutex held */ void virtio_scsi_dataplane_start(VirtIOSCSI *s) { int i; int rc; BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); if (s->dataplane_started || s->dataplane_starting || s->dataplane_fenced || s->ctx != iothread_get_aio_context(vs->conf.iothread)) { return; } s->dataplane_starting = true; /* Set up guest notifier (irq) */ rc = k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, true); if (rc != 0) { fprintf(stderr, "virtio-scsi: Failed to set guest notifiers (%d), " "ensure -enable-kvm is set\n", rc); goto fail_guest_notifiers; } aio_context_acquire(s->ctx); rc = virtio_scsi_vring_init(s, vs->ctrl_vq, 0); if (rc) { goto fail_vrings; } rc = virtio_scsi_vring_init(s, vs->event_vq, 1); if (rc) { goto fail_vrings; } for (i = 0; i < vs->conf.num_queues; i++) { rc = virtio_scsi_vring_init(s, vs->cmd_vqs[i], i + 2); if (rc) { goto fail_vrings; } } s->dataplane_starting = false; s->dataplane_started = true; aio_context_release(s->ctx); return; fail_vrings: virtio_scsi_clear_aio(s); aio_context_release(s->ctx); for (i = 0; i < vs->conf.num_queues + 2; i++) { k->set_host_notifier(qbus->parent, i, false); } k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false); fail_guest_notifiers: s->dataplane_fenced = true; s->dataplane_starting = false; s->dataplane_started = true; }
static void vhost_vsock_start(VirtIODevice *vdev) { VHostVSock *vsock = VHOST_VSOCK(vdev); BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); int ret; int i; if (!k->set_guest_notifiers) { error_report("binding does not support guest notifiers"); return; } ret = vhost_dev_enable_notifiers(&vsock->vhost_dev, vdev); if (ret < 0) { error_report("Error enabling host notifiers: %d", -ret); return; } ret = k->set_guest_notifiers(qbus->parent, vsock->vhost_dev.nvqs, true); if (ret < 0) { error_report("Error binding guest notifier: %d", -ret); goto err_host_notifiers; } vsock->vhost_dev.acked_features = vdev->guest_features; ret = vhost_dev_start(&vsock->vhost_dev, vdev); if (ret < 0) { error_report("Error starting vhost: %d", -ret); goto err_guest_notifiers; } ret = vhost_vsock_set_running(vsock, 1); if (ret < 0) { error_report("Error starting vhost vsock: %d", -ret); goto err_dev_start; } /* guest_notifier_mask/pending not used yet, so just unmask * everything here. virtio-pci will do the right thing by * enabling/disabling irqfd. */ for (i = 0; i < vsock->vhost_dev.nvqs; i++) { vhost_virtqueue_mask(&vsock->vhost_dev, vdev, i, false); } return; err_dev_start: vhost_dev_stop(&vsock->vhost_dev, vdev); err_guest_notifiers: k->set_guest_notifiers(qbus->parent, vsock->vhost_dev.nvqs, false); err_host_notifiers: vhost_dev_disable_notifiers(&vsock->vhost_dev, vdev); }
static void vhost_net_stop_one(struct vhost_net *net, VirtIODevice *dev) { struct vhost_vring_file file = { .fd = -1 }; if (!net->dev.started) { return; } for (file.index = 0; file.index < net->dev.nvqs; ++file.index) { int r = ioctl(net->dev.control, VHOST_NET_SET_BACKEND, &file); assert(r >= 0); } net->nc->info->poll(net->nc, true); vhost_dev_stop(&net->dev, dev); vhost_dev_disable_notifiers(&net->dev, dev); } int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); VirtioBusState *vbus = VIRTIO_BUS(qbus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); int r, i = 0; if (!k->set_guest_notifiers) { error_report("binding does not support guest notifiers"); r = -ENOSYS; goto err; } for (i = 0; i < total_queues; i++) { r = vhost_net_start_one(tap_get_vhost_net(ncs[i].peer), dev, i * 2); if (r < 0) { goto err; } } r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true); if (r < 0) { error_report("Error binding guest notifier: %d", -r); goto err; } return 0; err: while (--i >= 0) { vhost_net_stop_one(tap_get_vhost_net(ncs[i].peer), dev); } return r; }
DeviceState *aux_create_slave(AUXBus *bus, const char *type, uint32_t addr) { DeviceState *dev; dev = DEVICE(object_new(type)); assert(dev); qdev_set_parent_bus(dev, &bus->qbus); qdev_init_nofail(dev); aux_bus_map_device(AUX_BUS(qdev_get_parent_bus(dev)), AUX_SLAVE(dev), addr); return dev; }
static void adb_device_realizefn(DeviceState *dev, Error **errp) { ADBDevice *d = ADB_DEVICE(dev); ADBBusState *bus = ADB_BUS(qdev_get_parent_bus(dev)); if (bus->nb_devices >= MAX_ADB_DEVICES) { return; } bus->devices[bus->nb_devices++] = d; }
static void mch_init_dmar(MCHPCIState *mch) { PCIBus *pci_bus = PCI_BUS(qdev_get_parent_bus(DEVICE(mch))); mch->iommu = INTEL_IOMMU_DEVICE(qdev_create(NULL, TYPE_INTEL_IOMMU_DEVICE)); object_property_add_child(OBJECT(mch), "intel-iommu", OBJECT(mch->iommu), NULL); qdev_init_nofail(DEVICE(mch->iommu)); sysbus_mmio_map(SYS_BUS_DEVICE(mch->iommu), 0, Q35_HOST_BRIDGE_IOMMU_ADDR); pci_setup_iommu(pci_bus, q35_host_dma_iommu, mch->iommu); }
/* Context: QEMU global mutex held */ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); VirtIOBlock *vblk = VIRTIO_BLK(s->vdev); int r; if (vblk->dataplane_started || s->starting) { return; } s->starting = true; s->vq = virtio_get_queue(s->vdev, 0); /* Set up guest notifier (irq) */ r = k->set_guest_notifiers(qbus->parent, 1, true); if (r != 0) { fprintf(stderr, "virtio-blk failed to set guest notifier (%d), " "ensure -enable-kvm is set\n", r); goto fail_guest_notifiers; } s->guest_notifier = virtio_queue_get_guest_notifier(s->vq); /* Set up virtqueue notify */ r = k->set_host_notifier(qbus->parent, 0, true); if (r != 0) { fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r); goto fail_host_notifier; } s->starting = false; vblk->dataplane_started = true; trace_virtio_blk_data_plane_start(s); blk_set_aio_context(s->conf->conf.blk, s->ctx); /* Kick right away to begin processing requests already in vring */ event_notifier_set(virtio_queue_get_host_notifier(s->vq)); /* Get this show started by hooking up our callbacks */ aio_context_acquire(s->ctx); virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, virtio_blk_data_plane_handle_output); aio_context_release(s->ctx); return; fail_host_notifier: k->set_guest_notifiers(qbus->parent, 1, false); fail_guest_notifiers: vblk->dataplane_disabled = true; s->starting = false; vblk->dataplane_started = true; }
/* A VirtIODevice is being plugged */ int virtio_bus_device_plugged(VirtIODevice *vdev) { DeviceState *qdev = DEVICE(vdev); BusState *qbus = BUS(qdev_get_parent_bus(qdev)); VirtioBusState *bus = VIRTIO_BUS(qbus); VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus); DPRINTF("%s: plug device.\n", qbus->name); if (klass->device_plugged != NULL) { klass->device_plugged(qbus->parent); } return 0; }
/* Context: QEMU global mutex held */ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, VirtIOBlockDataPlane **dataplane, Error **errp) { VirtIOBlockDataPlane *s; BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); *dataplane = NULL; if (!conf->iothread) { return; } /* Don't try if transport does not support notifiers. */ if (!k->set_guest_notifiers || !k->set_host_notifier) { error_setg(errp, "device is incompatible with dataplane " "(transport does not support notifiers)"); return; } /* If dataplane is (re-)enabled while the guest is running there could be * block jobs that can conflict. */ if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { error_prepend(errp, "cannot start dataplane thread: "); return; } s = g_new0(VirtIOBlockDataPlane, 1); s->vdev = vdev; s->conf = conf; if (conf->iothread) { s->iothread = conf->iothread; object_ref(OBJECT(s->iothread)); } s->ctx = iothread_get_aio_context(s->iothread); s->bh = aio_bh_new(s->ctx, notify_guest_bh, s); s->insert_notifier.notify = data_plane_blk_insert_notifier; s->remove_notifier.notify = data_plane_blk_remove_notifier; blk_add_insert_bs_notifier(conf->conf.blk, &s->insert_notifier); blk_add_remove_bs_notifier(conf->conf.blk, &s->remove_notifier); data_plane_set_up_op_blockers(s); *dataplane = s; }
/* A VirtIODevice is being unplugged */ void virtio_bus_device_unplugged(VirtIODevice *vdev) { DeviceState *qdev = DEVICE(vdev); BusState *qbus = BUS(qdev_get_parent_bus(qdev)); VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(qbus); DPRINTF("%s: remove device.\n", qbus->name); if (vdev != NULL) { if (klass->device_unplugged != NULL) { klass->device_unplugged(qbus->parent); } } }
/* PCIe MMCFG */ static void mch_update_pciexbar(MCHPCIState *mch) { PCIDevice *pci_dev = PCI_DEVICE(mch); BusState *bus = qdev_get_parent_bus(DEVICE(mch)); PCIExpressHost *pehb = PCIE_HOST_BRIDGE(bus->parent); uint64_t pciexbar; int enable; uint64_t addr; uint64_t addr_mask; uint32_t length; pciexbar = pci_get_quad(pci_dev->config + MCH_HOST_BRIDGE_PCIEXBAR); enable = pciexbar & MCH_HOST_BRIDGE_PCIEXBAREN; addr_mask = MCH_HOST_BRIDGE_PCIEXBAR_ADMSK; switch (pciexbar & MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_MASK) { case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_256M: length = 256 * 1024 * 1024; break; case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_128M: length = 128 * 1024 * 1024; addr_mask |= MCH_HOST_BRIDGE_PCIEXBAR_128ADMSK | MCH_HOST_BRIDGE_PCIEXBAR_64ADMSK; break; case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_64M: length = 64 * 1024 * 1024; addr_mask |= MCH_HOST_BRIDGE_PCIEXBAR_64ADMSK; break; case MCH_HOST_BRIDGE_PCIEXBAR_LENGTH_RVD: default: abort(); } addr = pciexbar & addr_mask; pcie_host_mmcfg_update(pehb, enable, addr, length); /* Leave enough space for the MCFG BAR */ /* * TODO: this matches current bios behaviour, but it's not a power of two, * which means an MTRR can't cover it exactly. */ if (enable) { range_set_bounds(&mch->pci_hole, addr + length, IO_APIC_DEFAULT_ADDRESS - 1); } else { range_set_bounds(&mch->pci_hole, MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT, IO_APIC_DEFAULT_ADDRESS - 1); } }
static char *virtual_css_bus_get_dev_path(DeviceState *dev) { CcwDevice *ccw_dev = CCW_DEVICE(dev); SubchDev *sch = ccw_dev->sch; VirtualCssBridge *bridge = VIRTUAL_CSS_BRIDGE(qdev_get_parent_bus(dev)->parent); /* * We can't provide a dev path for backward compatibility on * older machines, as it is visible in the migration stream. */ return bridge->css_dev_path ? g_strdup_printf("/%02x.%1x.%04x", sch->cssid, sch->ssid, sch->devno) : NULL; }
/* Context: QEMU global mutex held */ void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); assert(!s->ctx); s->ctx = iothread_get_aio_context(vs->conf.iothread); /* Don't try if transport does not support notifiers. */ if (!k->set_guest_notifiers || !k->set_host_notifier) { fprintf(stderr, "virtio-scsi: Failed to set iothread " "(transport does not support notifiers)"); exit(1); } }
static void xilinx_pcie_root_realize(PCIDevice *pci_dev, Error **errp) { BusState *bus = qdev_get_parent_bus(DEVICE(pci_dev)); XilinxPCIEHost *s = XILINX_PCIE_HOST(bus->parent); pci_set_word(pci_dev->config + PCI_COMMAND, PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); pci_set_word(pci_dev->config + PCI_MEMORY_BASE, s->mmio_base >> 16); pci_set_word(pci_dev->config + PCI_MEMORY_LIMIT, ((s->mmio_base + s->mmio_size - 1) >> 16) & 0xfff0); pci_bridge_initfn(pci_dev, TYPE_PCI_BUS); if (pcie_endpoint_cap_v1_init(pci_dev, 0x80) < 0) { error_setg(errp, "Failed to initialize PCIe capability"); } }
/* aux-slave implementation */ static void aux_slave_dev_print(Monitor *mon, DeviceState *dev, int indent) { AUXBus *bus = AUX_BUS(qdev_get_parent_bus(dev)); AUXSlave *s; /* Don't print anything if the device is I2C "bridge". */ if (aux_bus_is_bridge(bus, dev)) { return; } s = AUX_SLAVE(dev); monitor_printf(mon, "%*smemory " TARGET_FMT_plx "/" TARGET_FMT_plx "\n", indent, "", object_property_get_uint(OBJECT(s->mmio), "addr", NULL), memory_region_size(s->mmio)); }
int vhost_scsi_common_start(VHostSCSICommon *vsc) { int ret, i; VirtIODevice *vdev = VIRTIO_DEVICE(vsc); BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); if (!k->set_guest_notifiers) { error_report("binding does not support guest notifiers"); return -ENOSYS; } ret = vhost_dev_enable_notifiers(&vsc->dev, vdev); if (ret < 0) { return ret; } ret = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, true); if (ret < 0) { error_report("Error binding guest notifier"); goto err_host_notifiers; } vsc->dev.acked_features = vdev->guest_features; ret = vhost_dev_start(&vsc->dev, vdev); if (ret < 0) { error_report("Error start vhost dev"); goto err_guest_notifiers; } /* guest_notifier_mask/pending not used yet, so just unmask * everything here. virtio-pci will do the right thing by * enabling/disabling irqfd. */ for (i = 0; i < vsc->dev.nvqs; i++) { vhost_virtqueue_mask(&vsc->dev, vdev, vsc->dev.vq_index + i, false); } return ret; err_guest_notifiers: k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false); err_host_notifiers: vhost_dev_disable_notifiers(&vsc->dev, vdev); return ret; }
/* Context: QEMU global mutex held */ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); VirtIOBlock *vblk = VIRTIO_BLK(s->vdev); unsigned i; unsigned nvqs = s->conf->num_queues; if (!vblk->dataplane_started || s->stopping) { return; } /* Better luck next time. */ if (vblk->dataplane_disabled) { vblk->dataplane_disabled = false; vblk->dataplane_started = false; return; } s->stopping = true; trace_virtio_blk_data_plane_stop(s); aio_context_acquire(s->ctx); /* Stop notifications for new requests from guest */ for (i = 0; i < nvqs; i++) { VirtQueue *vq = virtio_get_queue(s->vdev, i); virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL); } /* Drain and switch bs back to the QEMU main loop */ blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context()); aio_context_release(s->ctx); for (i = 0; i < nvqs; i++) { virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false); } /* Clean up guest notifier (irq) */ k->set_guest_notifiers(qbus->parent, nvqs, false); vblk->dataplane_started = false; s->stopping = false; }
static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); int rc; /* Set up virtqueue notify */ rc = k->set_host_notifier(qbus->parent, n, true); if (rc != 0) { fprintf(stderr, "virtio-scsi: Failed to set host notifier (%d)\n", rc); s->dataplane_fenced = true; return rc; } virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, true, true); return 0; }
static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n, void (*fn)(VirtIODevice *vdev, VirtQueue *vq)) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); int rc; /* Set up virtqueue notify */ rc = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), n, true); if (rc != 0) { fprintf(stderr, "virtio-scsi: Failed to set host notifier (%d)\n", rc); s->dataplane_fenced = true; return rc; } virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, fn); return 0; }
/* A VirtIODevice is being plugged */ void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp) { DeviceState *qdev = DEVICE(vdev); BusState *qbus = BUS(qdev_get_parent_bus(qdev)); VirtioBusState *bus = VIRTIO_BUS(qbus); VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus); VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); bool has_iommu = virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); Error *local_err = NULL; DPRINTF("%s: plug device.\n", qbus->name); if (klass->pre_plugged != NULL) { klass->pre_plugged(qbus->parent, &local_err); if (local_err) { error_propagate(errp, local_err); return; } } /* Get the features of the plugged device. */ assert(vdc->get_features != NULL); vdev->host_features = vdc->get_features(vdev, vdev->host_features, &local_err); if (local_err) { error_propagate(errp, local_err); return; } if (klass->device_plugged != NULL) { klass->device_plugged(qbus->parent, &local_err); } if (local_err) { error_propagate(errp, local_err); return; } if (klass->get_dma_as != NULL && has_iommu) { virtio_add_feature(&vdev->host_features, VIRTIO_F_IOMMU_PLATFORM); vdev->dma_as = klass->get_dma_as(qbus->parent); } else { vdev->dma_as = &address_space_memory; } }