static void powernv_populate_ipmi_bt(ISADevice *d, void *fdt, int lpc_off) { const char compatible[] = "bt\0ipmi-bt"; uint32_t io_base; uint32_t io_regs[] = { cpu_to_be32(1), 0, /* 'io_base' retrieved from the 'ioport' property of 'isa-ipmi-bt' */ cpu_to_be32(3) }; uint32_t irq; char *name; int node; io_base = object_property_get_int(OBJECT(d), "ioport", &error_fatal); io_regs[1] = cpu_to_be32(io_base); irq = object_property_get_int(OBJECT(d), "irq", &error_fatal); name = g_strdup_printf("%s@i%x", qdev_fw_name(DEVICE(d)), io_base); node = fdt_add_subnode(fdt, lpc_off, name); _FDT(node); g_free(name); _FDT((fdt_setprop(fdt, node, "reg", io_regs, sizeof(io_regs)))); _FDT((fdt_setprop(fdt, node, "compatible", compatible, sizeof(compatible)))); /* Mark it as reserved to avoid Linux trying to claim it */ _FDT((fdt_setprop_string(fdt, node, "status", "reserved"))); _FDT((fdt_setprop_cell(fdt, node, "interrupts", irq))); _FDT((fdt_setprop_cell(fdt, node, "interrupt-parent", fdt_get_phandle(fdt, lpc_off)))); }
/* * ACPI 6.0: 5.2.25.2 Memory Device to System Physical Address Range Mapping * Structure */ static void nvdimm_build_structure_memdev(GArray *structures, DeviceState *dev) { NvdimmNfitMemDev *nfit_memdev; uint64_t addr = object_property_get_int(OBJECT(dev), PC_DIMM_ADDR_PROP, NULL); uint64_t size = object_property_get_int(OBJECT(dev), PC_DIMM_SIZE_PROP, NULL); int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, NULL); uint32_t handle = nvdimm_slot_to_handle(slot); nfit_memdev = acpi_data_push(structures, sizeof(*nfit_memdev)); nfit_memdev->type = cpu_to_le16(1 /* Memory Device to System Address Range Map Structure*/); nfit_memdev->length = cpu_to_le16(sizeof(*nfit_memdev)); nfit_memdev->nfit_handle = cpu_to_le32(handle); /* * associate memory device with System Physical Address Range * Structure. */ nfit_memdev->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot)); /* associate memory device with Control Region Structure. */ nfit_memdev->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot)); /* The memory region on the device. */ nfit_memdev->region_len = cpu_to_le64(size); nfit_memdev->region_dpa = cpu_to_le64(addr); /* Only one interleave for PMEM. */ nfit_memdev->interleave_ways = cpu_to_le16(1); }
static uint64_t acpi_memory_hotplug_read(void *opaque, hwaddr addr, unsigned int size) { uint32_t val = 0; MemHotplugState *mem_st = opaque; MemStatus *mdev; Object *o; if (mem_st->selector >= mem_st->dev_count) { trace_mhp_acpi_invalid_slot_selected(mem_st->selector); return 0; } mdev = &mem_st->devs[mem_st->selector]; o = OBJECT(mdev->dimm); switch (addr) { case 0x0: /* Lo part of phys address where DIMM is mapped */ val = o ? object_property_get_int(o, PC_DIMM_ADDR_PROP, NULL) : 0; trace_mhp_acpi_read_addr_lo(mem_st->selector, val); break; case 0x4: /* Hi part of phys address where DIMM is mapped */ val = o ? object_property_get_int(o, PC_DIMM_ADDR_PROP, NULL) >> 32 : 0; trace_mhp_acpi_read_addr_hi(mem_st->selector, val); break; case 0x8: /* Lo part of DIMM size */ val = o ? object_property_get_int(o, PC_DIMM_SIZE_PROP, NULL) : 0; trace_mhp_acpi_read_size_lo(mem_st->selector, val); break; case 0xc: /* Hi part of DIMM size */ val = o ? object_property_get_int(o, PC_DIMM_SIZE_PROP, NULL) >> 32 : 0; trace_mhp_acpi_read_size_hi(mem_st->selector, val); break; case 0x10: /* node proximity for _PXM method */ val = o ? object_property_get_int(o, PC_DIMM_NODE_PROP, NULL) : 0; trace_mhp_acpi_read_pxm(mem_st->selector, val); break; case 0x14: /* pack and return is_* fields */ val |= mdev->is_enabled ? 1 : 0; val |= mdev->is_inserting ? 2 : 0; val |= mdev->is_removing ? 4 : 0; trace_mhp_acpi_read_flags(mem_st->selector, val); break; default: val = ~0; break; } return val; }
/* * ACPI 6.0: 5.2.25.5 NVDIMM Control Region Structure. */ static void nvdimm_build_structure_dcr(GArray *structures, DeviceState *dev) { NvdimmNfitControlRegion *nfit_dcr; int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, NULL); uint32_t sn = nvdimm_slot_to_sn(slot); nfit_dcr = acpi_data_push(structures, sizeof(*nfit_dcr)); nfit_dcr->type = cpu_to_le16(4 /* NVDIMM Control Region Structure */); nfit_dcr->length = cpu_to_le16(sizeof(*nfit_dcr)); nfit_dcr->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot)); /* vendor: Intel. */ nfit_dcr->vendor_id = cpu_to_le16(0x8086); nfit_dcr->device_id = cpu_to_le16(1); /* The _DSM method is following Intel's DSM specification. */ nfit_dcr->revision_id = cpu_to_le16(1 /* Current Revision supported in ACPI 6.0 is 1. */); nfit_dcr->serial_number = cpu_to_le32(sn); nfit_dcr->fic = cpu_to_le16(0x301 /* Format Interface Code: Byte addressable, no energy backed. See ACPI 6.2, sect 5.2.25.6 and JEDEC Annex L Release 3. */); }
static int query_memdev(Object *obj, void *opaque) { MemdevList **list = opaque; MemdevList *m = NULL; Error *err = NULL; if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { m = g_malloc0(sizeof(*m)); m->value = g_malloc0(sizeof(*m->value)); m->value->size = object_property_get_int(obj, "size", &err); if (err) { goto error; } m->value->merge = object_property_get_bool(obj, "merge", &err); if (err) { goto error; } m->value->dump = object_property_get_bool(obj, "dump", &err); if (err) { goto error; } m->value->prealloc = object_property_get_bool(obj, "prealloc", &err); if (err) { goto error; } m->value->policy = object_property_get_enum(obj, "policy", HostMemPolicy_lookup, &err); if (err) { goto error; } object_property_get_uint16List(obj, "host-nodes", &m->value->host_nodes, &err); if (err) { goto error; } m->next = *list; *list = m; } return 0; error: g_free(m->value); g_free(m); return -1; }
static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev) { for (; device_list; device_list = device_list->next) { DeviceState *dev = device_list->data; int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, NULL); uint32_t handle = nvdimm_slot_to_handle(slot); Aml *nvdimm_dev; nvdimm_dev = aml_device("NV%02X", slot); /* * ACPI 6.0: 9.20 NVDIMM Devices: * * _ADR object that is used to supply OSPM with unique address * of the NVDIMM device. This is done by returning the NFIT Device * handle that is used to identify the associated entries in ACPI * table NFIT or _FIT. */ aml_append(nvdimm_dev, aml_name_decl("_ADR", aml_int(handle))); nvdimm_build_device_dsm(nvdimm_dev); aml_append(root_dev, nvdimm_dev); } }
/* RP helper function to attach a device to an adaptor. */ void rp_device_attach(Object *adaptor, Object *dev, int rp_nr, int dev_nr, Error **errp) { Error *err = NULL; uint32_t nr_devs; char *name; int i; assert(adaptor); assert(dev); /* Verify that the adaptor is of Remote Port type. */ if (!object_dynamic_cast(adaptor, TYPE_REMOTE_PORT)) { error_setg(errp, "%s is not a Remote-Port adaptor!\n", object_get_canonical_path(adaptor)); return; } name = g_strdup_printf("rp-adaptor%d", rp_nr); object_property_set_link(dev, adaptor, name, &err); g_free(name); if (err != NULL) { error_propagate(errp, err); return; } name = g_strdup_printf("rp-chan%d", rp_nr); object_property_set_int(dev, dev_nr, name, &err); g_free(name); if (err != NULL && !object_dynamic_cast(dev, TYPE_REMOTE_PORT_DEVICE)) { /* * RP devices that only receive requests may not need to * know their channel/dev number. If not, treat this as * an error. */ error_propagate(errp, err); return; } err = NULL; nr_devs = object_property_get_int(dev, "nr-devs", &err); if (err) { nr_devs = 1; err = NULL; } /* Multi-channel devs use consecutive numbering. */ for (i = 0; i < nr_devs; i++) { name = g_strdup_printf("remote-port-dev%d", dev_nr + i); object_property_set_link(adaptor, dev, name, &err); g_free(name); if (err != NULL) { error_propagate(errp, err); return; } } }
uint16_t pvpanic_port(void) { Object *o = object_resolve_path_type("", TYPE_ISA_PVPANIC_DEVICE, NULL); if (!o) { return 0; } return object_property_get_int(o, PVPANIC_IOPORT_PROP, NULL); }
uint64_t pc_dimm_get_free_addr(uint64_t address_space_start, uint64_t address_space_size, uint64_t *hint, uint64_t size, Error **errp) { GSList *list = NULL, *item; uint64_t new_addr, ret = 0; uint64_t address_space_end = address_space_start + address_space_size; if (!address_space_size) { error_setg(errp, "memory hotplug is not enabled, " "please add maxmem option"); goto out; } assert(address_space_end > address_space_start); object_child_foreach(qdev_get_machine(), pc_dimm_built_list, &list); if (hint) { new_addr = *hint; } else { new_addr = address_space_start; } /* find address range that will fit new DIMM */ for (item = list; item; item = g_slist_next(item)) { PCDIMMDevice *dimm = item->data; uint64_t dimm_size = object_property_get_int(OBJECT(dimm), PC_DIMM_SIZE_PROP, errp); if (errp && *errp) { goto out; } if (ranges_overlap(dimm->addr, dimm_size, new_addr, size)) { if (hint) { DeviceState *d = DEVICE(dimm); error_setg(errp, "address range conflicts with '%s'", d->id); goto out; } new_addr = dimm->addr + dimm_size; } } ret = new_addr; if (new_addr < address_space_start) { error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64 "] at 0x%" PRIx64, new_addr, size, address_space_start); } else if ((new_addr + size) > address_space_end) { error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64 "] beyond 0x%" PRIx64, new_addr, size, address_space_end); } out: g_slist_free(list); return ret; }
static const char *get_elf_platform(void) { static char elf_platform[] = "i386"; int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL); if (family > 6) family = 6; if (family >= 3) elf_platform[1] = '0' + family; return elf_platform; }
/* ACPI 6.0: 5.2.25.1 System Physical Address Range Structure */ static void nvdimm_build_structure_spa(GArray *structures, DeviceState *dev) { NvdimmNfitSpa *nfit_spa; uint64_t addr = object_property_get_int(OBJECT(dev), PC_DIMM_ADDR_PROP, NULL); uint64_t size = object_property_get_int(OBJECT(dev), PC_DIMM_SIZE_PROP, NULL); uint32_t node = object_property_get_int(OBJECT(dev), PC_DIMM_NODE_PROP, NULL); int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, NULL); nfit_spa = acpi_data_push(structures, sizeof(*nfit_spa)); nfit_spa->type = cpu_to_le16(0 /* System Physical Address Range Structure */); nfit_spa->length = cpu_to_le16(sizeof(*nfit_spa)); nfit_spa->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot)); /* * Control region is strict as all the device info, such as SN, index, * is associated with slot id. */ nfit_spa->flags = cpu_to_le16(1 /* Control region is strictly for management during hot add/online operation */ | 2 /* Data in Proximity Domain field is valid*/); /* NUMA node. */ nfit_spa->proximity_domain = cpu_to_le32(node); /* the region reported as PMEM. */ memcpy(nfit_spa->type_guid, nvdimm_nfit_spa_uuid, sizeof(nvdimm_nfit_spa_uuid)); nfit_spa->spa_base = cpu_to_le64(addr); nfit_spa->spa_length = cpu_to_le64(size); /* It is the PMEM and can be cached as writeback. */ nfit_spa->mem_attr = cpu_to_le64(0x8ULL /* EFI_MEMORY_WB */ | 0x8000ULL /* EFI_MEMORY_NV */); }
void pc_dimm_plug(DeviceState *dev, MachineState *machine, uint64_t align, Error **errp) { int slot; PCDIMMDevice *dimm = PC_DIMM(dev); PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm); MemoryRegion *vmstate_mr = ddc->get_vmstate_memory_region(dimm, &error_abort); MemoryRegion *mr = ddc->get_memory_region(dimm, &error_abort); Error *local_err = NULL; uint64_t addr; addr = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP, &local_err); if (local_err) { goto out; } addr = memory_device_get_free_addr(machine, !addr ? NULL : &addr, align, memory_region_size(mr), &local_err); if (local_err) { goto out; } object_property_set_uint(OBJECT(dev), addr, PC_DIMM_ADDR_PROP, &local_err); if (local_err) { goto out; } trace_mhp_pc_dimm_assigned_address(addr); slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, &local_err); if (local_err) { goto out; } slot = pc_dimm_get_free_slot(slot == PC_DIMM_UNASSIGNED_SLOT ? NULL : &slot, machine->ram_slots, &local_err); if (local_err) { goto out; } object_property_set_int(OBJECT(dev), slot, PC_DIMM_SLOT_PROP, &local_err); if (local_err) { goto out; } trace_mhp_pc_dimm_assigned_slot(slot); memory_device_plug_region(machine, mr, addr); vmstate_register_ram(vmstate_mr, dev); out: error_propagate(errp, local_err); }
static void acpi_get_pci_info(PcPciInfo *info) { Object *pci_host; bool ambiguous; pci_host = object_resolve_path_type("", TYPE_PCI_HOST_BRIDGE, &ambiguous); g_assert(!ambiguous); g_assert(pci_host); info->w32.begin = object_property_get_int(pci_host, PCI_HOST_PROP_PCI_HOLE_START, NULL); info->w32.end = object_property_get_int(pci_host, PCI_HOST_PROP_PCI_HOLE_END, NULL); info->w64.begin = object_property_get_int(pci_host, PCI_HOST_PROP_PCI_HOLE64_START, NULL); info->w64.end = object_property_get_int(pci_host, PCI_HOST_PROP_PCI_HOLE64_END, NULL); }
static int acpi_pcihp_get_bsel(PCIBus *bus) { Error *local_err = NULL; int64_t bsel = object_property_get_int(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, &local_err); if (local_err || bsel < 0 || bsel >= ACPI_PCIHP_MAX_HOTPLUG_BUS) { if (local_err) { error_free(local_err); } return -1; } else { return bsel; } }
static int acpi_add_cpu_info(Object *o, void *opaque) { AcpiCpuInfo *cpu = opaque; uint64_t apic_id; if (object_dynamic_cast(o, TYPE_CPU)) { apic_id = object_property_get_int(o, "apic-id", NULL); assert(apic_id <= MAX_CPUMASK_BITS); set_bit(apic_id, cpu->found_cpus); } object_child_foreach(o, acpi_add_cpu_info, opaque); return 0; }
static ram_addr_t get_current_ram_size(void) { GSList *list = NULL, *item; ram_addr_t size = ram_size; build_dimm_list(qdev_get_machine(), &list); for (item = list; item; item = g_slist_next(item)) { Object *obj = OBJECT(item->data); if (!strcmp(object_get_typename(obj), TYPE_PC_DIMM)) { size += object_property_get_int(obj, PC_DIMM_SIZE_PROP, &error_abort); } } g_slist_free(list); return size; }
static void acpi_get_pm_info(AcpiPmInfo *pm) { Object *piix = piix4_pm_find(); Object *lpc = ich9_lpc_find(); Object *obj = NULL; QObject *o; if (piix) { obj = piix; } if (lpc) { obj = lpc; } assert(obj); /* Fill in optional s3/s4 related properties */ o = object_property_get_qobject(obj, ACPI_PM_PROP_S3_DISABLED, NULL); if (o) { pm->s3_disabled = qint_get_int(qobject_to_qint(o)); } else { pm->s3_disabled = false; } o = object_property_get_qobject(obj, ACPI_PM_PROP_S4_DISABLED, NULL); if (o) { pm->s4_disabled = qint_get_int(qobject_to_qint(o)); } else { pm->s4_disabled = false; } o = object_property_get_qobject(obj, ACPI_PM_PROP_S4_VAL, NULL); if (o) { pm->s4_val = qint_get_int(qobject_to_qint(o)); } else { pm->s4_val = false; } /* Fill in mandatory properties */ pm->sci_int = object_property_get_int(obj, ACPI_PM_PROP_SCI_INT, NULL); pm->acpi_enable_cmd = object_property_get_int(obj, ACPI_PM_PROP_ACPI_ENABLE_CMD, NULL); pm->acpi_disable_cmd = object_property_get_int(obj, ACPI_PM_PROP_ACPI_DISABLE_CMD, NULL); pm->io_base = object_property_get_int(obj, ACPI_PM_PROP_PM_IO_BASE, NULL); pm->gpe0_blk = object_property_get_int(obj, ACPI_PM_PROP_GPE0_BLK, NULL); pm->gpe0_blk_len = object_property_get_int(obj, ACPI_PM_PROP_GPE0_BLK_LEN, NULL); }
static NVDIMMDevice *nvdimm_get_device_by_handle(uint32_t handle) { NVDIMMDevice *nvdimm = NULL; GSList *list, *device_list = nvdimm_get_device_list(); for (list = device_list; list; list = list->next) { NVDIMMDevice *nvd = list->data; int slot = object_property_get_int(OBJECT(nvd), PC_DIMM_SLOT_PROP, NULL); if (nvdimm_slot_to_handle(slot) == handle) { nvdimm = nvd; break; } } g_slist_free(device_list); return nvdimm; }
static int pc_existing_dimms_capacity_internal(Object *obj, void *opaque) { pc_dimms_capacity *cap = opaque; uint64_t *size = &cap->size; if (object_dynamic_cast(obj, TYPE_PC_DIMM)) { DeviceState *dev = DEVICE(obj); if (dev->realized) { (*size) += object_property_get_int(obj, PC_DIMM_SIZE_PROP, cap->errp); } if (cap->errp && *cap->errp) { return 1; } } object_child_foreach(obj, pc_existing_dimms_capacity_internal, opaque); return 0; }
int qmp_pc_dimm_device_list(Object *obj, void *opaque) { MemoryDeviceInfoList ***prev = opaque; if (object_dynamic_cast(obj, TYPE_PC_DIMM)) { DeviceState *dev = DEVICE(obj); if (dev->realized) { MemoryDeviceInfoList *elem = g_new0(MemoryDeviceInfoList, 1); MemoryDeviceInfo *info = g_new0(MemoryDeviceInfo, 1); PCDIMMDeviceInfo *di = g_new0(PCDIMMDeviceInfo, 1); DeviceClass *dc = DEVICE_GET_CLASS(obj); PCDIMMDevice *dimm = PC_DIMM(obj); if (dev->id) { di->has_id = true; di->id = g_strdup(dev->id); } di->hotplugged = dev->hotplugged; di->hotpluggable = dc->hotpluggable; di->addr = dimm->addr; di->slot = dimm->slot; di->node = dimm->node; di->size = object_property_get_int(OBJECT(dimm), PC_DIMM_SIZE_PROP, NULL); di->memdev = object_get_canonical_path(OBJECT(dimm->hostmem)); info->dimm = di; elem->value = info; elem->next = NULL; **prev = elem; *prev = &elem->next; } } object_child_foreach(obj, qmp_pc_dimm_device_list, opaque); return 0; }
/** * acpi_memory_slot_status: * @mem_st: memory hotplug state * @dev: device * @errp: set in case of an error * * Obtain a single memory slot status. * * This function will be called by memory unplug request cb and unplug cb. */ static MemStatus * acpi_memory_slot_status(MemHotplugState *mem_st, DeviceState *dev, Error **errp) { Error *local_err = NULL; int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, &local_err); if (local_err) { error_propagate(errp, local_err); return NULL; } if (slot >= mem_st->dev_count) { char *dev_path = object_get_canonical_path(OBJECT(dev)); error_setg(errp, "acpi_memory_slot_status: " "device [%s] returned invalid memory slot[%d]", dev_path, slot); g_free(dev_path); return NULL; } return &mem_st->devs[slot]; }
void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms, MemoryRegion *mr, uint64_t align, Error **errp) { int slot; MachineState *machine = MACHINE(qdev_get_machine()); PCDIMMDevice *dimm = PC_DIMM(dev); PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm); MemoryRegion *vmstate_mr = ddc->get_vmstate_memory_region(dimm); Error *local_err = NULL; uint64_t existing_dimms_capacity = 0; uint64_t addr; addr = object_property_get_uint(OBJECT(dimm), PC_DIMM_ADDR_PROP, &local_err); if (local_err) { goto out; } addr = pc_dimm_get_free_addr(hpms->base, memory_region_size(&hpms->mr), !addr ? NULL : &addr, align, memory_region_size(mr), &local_err); if (local_err) { goto out; } existing_dimms_capacity = pc_existing_dimms_capacity(&local_err); if (local_err) { goto out; } if (existing_dimms_capacity + memory_region_size(mr) > machine->maxram_size - machine->ram_size) { error_setg(&local_err, "not enough space, currently 0x%" PRIx64 " in use of total hot pluggable 0x" RAM_ADDR_FMT, existing_dimms_capacity, machine->maxram_size - machine->ram_size); goto out; } object_property_set_uint(OBJECT(dev), addr, PC_DIMM_ADDR_PROP, &local_err); if (local_err) { goto out; } trace_mhp_pc_dimm_assigned_address(addr); slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, &local_err); if (local_err) { goto out; } slot = pc_dimm_get_free_slot(slot == PC_DIMM_UNASSIGNED_SLOT ? NULL : &slot, machine->ram_slots, &local_err); if (local_err) { goto out; } object_property_set_int(OBJECT(dev), slot, PC_DIMM_SLOT_PROP, &local_err); if (local_err) { goto out; } trace_mhp_pc_dimm_assigned_slot(slot); if (kvm_enabled() && !kvm_has_free_slot(machine)) { error_setg(&local_err, "hypervisor has no free memory slots left"); goto out; } if (!vhost_has_free_slot()) { error_setg(&local_err, "a used vhost backend has no free" " memory slots left"); goto out; } memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr); vmstate_register_ram(vmstate_mr, dev); numa_set_mem_node_id(addr, memory_region_size(mr), dimm->node); out: error_propagate(errp, local_err); }
static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp) { uint16_t nodenr; uint16List *cpus = NULL; if (node->has_nodeid) { nodenr = node->nodeid; } else { nodenr = nb_numa_nodes; } if (nodenr >= MAX_NODES) { error_setg(errp, "Max number of NUMA nodes reached: %" PRIu16 "", nodenr); return; } if (numa_info[nodenr].present) { error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr); return; } for (cpus = node->cpus; cpus; cpus = cpus->next) { if (cpus->value > MAX_CPUMASK_BITS) { error_setg(errp, "CPU number %" PRIu16 " is bigger than %d", cpus->value, MAX_CPUMASK_BITS); return; } bitmap_set(numa_info[nodenr].node_cpu, cpus->value, 1); } if (node->has_mem && node->has_memdev) { error_setg(errp, "qemu: cannot specify both mem= and memdev="); return; } if (have_memdevs == -1) { have_memdevs = node->has_memdev; } if (node->has_memdev != have_memdevs) { error_setg(errp, "qemu: memdev option must be specified for either " "all or no nodes"); return; } if (node->has_mem) { uint64_t mem_size = node->mem; const char *mem_str = qemu_opt_get(opts, "mem"); /* Fix up legacy suffix-less format */ if (g_ascii_isdigit(mem_str[strlen(mem_str) - 1])) { mem_size <<= 20; } numa_info[nodenr].node_mem = mem_size; } if (node->has_memdev) { Object *o; o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL); if (!o) { error_setg(errp, "memdev=%s is ambiguous", node->memdev); return; } object_ref(o); numa_info[nodenr].node_mem = object_property_get_int(o, "size", NULL); numa_info[nodenr].node_memdev = MEMORY_BACKEND(o); } numa_info[nodenr].present = true; max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1); }
static void bcm2835_peripherals_realize(DeviceState *dev, Error **errp) { BCM2835PeripheralState *s = BCM2835_PERIPHERALS(dev); Object *obj; MemoryRegion *ram; Error *err = NULL; uint32_t ram_size, vcram_size; CharDriverState *chr; int n; obj = object_property_get_link(OBJECT(dev), "ram", &err); if (obj == NULL) { error_setg(errp, "%s: required ram link not found: %s", __func__, error_get_pretty(err)); return; } ram = MEMORY_REGION(obj); ram_size = memory_region_size(ram); /* Map peripherals and RAM into the GPU address space. */ memory_region_init_alias(&s->peri_mr_alias, OBJECT(s), "bcm2835-peripherals", &s->peri_mr, 0, memory_region_size(&s->peri_mr)); memory_region_add_subregion_overlap(&s->gpu_bus_mr, BCM2835_VC_PERI_BASE, &s->peri_mr_alias, 1); /* RAM is aliased four times (different cache configurations) on the GPU */ for (n = 0; n < 4; n++) { memory_region_init_alias(&s->ram_alias[n], OBJECT(s), "bcm2835-gpu-ram-alias[*]", ram, 0, ram_size); memory_region_add_subregion_overlap(&s->gpu_bus_mr, (hwaddr)n << 30, &s->ram_alias[n], 0); } /* Interrupt Controller */ object_property_set_bool(OBJECT(&s->ic), true, "realized", &err); if (err) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->peri_mr, ARMCTRL_IC_OFFSET, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->ic), 0)); sysbus_pass_irq(SYS_BUS_DEVICE(s), SYS_BUS_DEVICE(&s->ic)); /* UART0 */ object_property_set_bool(OBJECT(s->uart0), true, "realized", &err); if (err) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->peri_mr, UART0_OFFSET, sysbus_mmio_get_region(s->uart0, 0)); sysbus_connect_irq(s->uart0, 0, qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, INTERRUPT_UART)); /* AUX / UART1 */ /* TODO: don't call qemu_char_get_next_serial() here, instead set * chardev properties for each uart at the board level, once pl011 * (uart0) has been updated to avoid qemu_char_get_next_serial() */ chr = qemu_char_get_next_serial(); if (chr == NULL) { chr = qemu_chr_new("bcm2835.uart1", "null", NULL); } qdev_prop_set_chr(DEVICE(&s->aux), "chardev", chr); object_property_set_bool(OBJECT(&s->aux), true, "realized", &err); if (err) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->peri_mr, UART1_OFFSET, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->aux), 0)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->aux), 0, qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, INTERRUPT_AUX)); /* Mailboxes */ object_property_set_bool(OBJECT(&s->mboxes), true, "realized", &err); if (err) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->peri_mr, ARMCTRL_0_SBM_OFFSET, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->mboxes), 0)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->mboxes), 0, qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_ARM_IRQ, INTERRUPT_ARM_MAILBOX)); /* Framebuffer */ vcram_size = (uint32_t)object_property_get_int(OBJECT(s), "vcram-size", &err); if (err) { error_propagate(errp, err); return; } object_property_set_int(OBJECT(&s->fb), ram_size - vcram_size, "vcram-base", &err); if (err) { error_propagate(errp, err); return; } object_property_set_bool(OBJECT(&s->fb), true, "realized", &err); if (err) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->mbox_mr, MBOX_CHAN_FB << MBOX_AS_CHAN_SHIFT, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->fb), 0)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->fb), 0, qdev_get_gpio_in(DEVICE(&s->mboxes), MBOX_CHAN_FB)); /* Property channel */ object_property_set_bool(OBJECT(&s->property), true, "realized", &err); if (err) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->mbox_mr, MBOX_CHAN_PROPERTY << MBOX_AS_CHAN_SHIFT, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->property), 0)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->property), 0, qdev_get_gpio_in(DEVICE(&s->mboxes), MBOX_CHAN_PROPERTY)); /* Extended Mass Media Controller */ object_property_set_int(OBJECT(&s->sdhci), BCM2835_SDHC_CAPAREG, "capareg", &err); if (err) { error_propagate(errp, err); return; } object_property_set_bool(OBJECT(&s->sdhci), true, "pending-insert-quirk", &err); if (err) { error_propagate(errp, err); return; } object_property_set_bool(OBJECT(&s->sdhci), true, "realized", &err); if (err) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->peri_mr, EMMC_OFFSET, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->sdhci), 0)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0, qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, INTERRUPT_ARASANSDIO)); object_property_add_alias(OBJECT(s), "sd-bus", OBJECT(&s->sdhci), "sd-bus", &err); if (err) { error_propagate(errp, err); return; } /* DMA Channels */ object_property_set_bool(OBJECT(&s->dma), true, "realized", &err); if (err) { error_propagate(errp, err); return; } memory_region_add_subregion(&s->peri_mr, DMA_OFFSET, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->dma), 0)); memory_region_add_subregion(&s->peri_mr, DMA15_OFFSET, sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->dma), 1)); for (n = 0; n <= 12; n++) { sysbus_connect_irq(SYS_BUS_DEVICE(&s->dma), n, qdev_get_gpio_in_named(DEVICE(&s->ic), BCM2835_IC_GPU_IRQ, INTERRUPT_DMA0 + n)); } }
static void microblaze_generic_fdt_init(MachineState *machine) { CPUState *cpu; ram_addr_t ram_kernel_base = 0, ram_kernel_size = 0; void *fdt = NULL; const char *dtb_arg, *hw_dtb_arg; QemuOpts *machine_opts; int fdt_size; /* for memory node */ char node_path[DT_PATH_LENGTH]; FDTMachineInfo *fdti; MemoryRegion *main_mem; /* For DMA node */ char dma_path[DT_PATH_LENGTH] = { 0 }; uint32_t memory_phandle; /* For Ethernet nodes */ char **eth_paths; char *phy_path; char *mdio_path; uint32_t n_eth; uint32_t prop_val; machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0); if (!machine_opts) { goto no_dtb_arg; } dtb_arg = qemu_opt_get(machine_opts, "dtb"); hw_dtb_arg = qemu_opt_get(machine_opts, "hw-dtb"); if (!dtb_arg && !hw_dtb_arg) { goto no_dtb_arg; } /* If the user only provided a -dtb, use it as the hw description. */ if (!hw_dtb_arg) { hw_dtb_arg = dtb_arg; } fdt = load_device_tree(hw_dtb_arg, &fdt_size); if (!fdt) { hw_error("Error: Unable to load Device Tree %s\n", hw_dtb_arg); return; } if (IS_PETALINUX_MACHINE) { /* Mark the simple-bus as incompatible as it breaks the Microblaze * PetaLinux boot */ add_to_compat_table(NULL, "compatible:simple-bus", NULL); } /* find memory node or add new one if needed */ while (qemu_fdt_get_node_by_name(fdt, node_path, "memory")) { qemu_fdt_add_subnode(fdt, "/memory@0"); qemu_fdt_setprop_cells(fdt, "/memory@0", "reg", 0, machine->ram_size); } if (!qemu_fdt_getprop(fdt, "/memory", "compatible", NULL, 0, NULL)) { qemu_fdt_setprop_string(fdt, "/memory", "compatible", "qemu:memory-region"); qemu_fdt_setprop_cells(fdt, "/memory", "qemu,ram", 1); } if (IS_PETALINUX_MACHINE) { /* If using a *-plnx machine, the AXI DMA memory links are not included * in the DTB by default. To avoid seg faults, add the links in here if * they have not already been added by the user */ qemu_fdt_get_node_by_name(fdt, dma_path, "dma"); if (strcmp(dma_path, "") != 0) { memory_phandle = qemu_fdt_check_phandle(fdt, node_path); if (!memory_phandle) { memory_phandle = qemu_fdt_alloc_phandle(fdt); qemu_fdt_setprop_cells(fdt, "/memory", "linux,phandle", memory_phandle); qemu_fdt_setprop_cells(fdt, "/memory", "phandle", memory_phandle); } if (!qemu_fdt_getprop(fdt, dma_path, "sg", NULL, 0, NULL)) { qemu_fdt_setprop_phandle(fdt, dma_path, "sg", node_path); } if (!qemu_fdt_getprop(fdt, dma_path, "s2mm", NULL, 0, NULL)) { qemu_fdt_setprop_phandle(fdt, dma_path, "s2mm", node_path); } if (!qemu_fdt_getprop(fdt, dma_path, "mm2s", NULL, 0, NULL)) { qemu_fdt_setprop_phandle(fdt, dma_path, "mm2s", node_path); } } /* Copy phyaddr value from phy node reg property */ n_eth = qemu_fdt_get_n_nodes_by_name(fdt, ð_paths, "ethernet"); while (n_eth--) { mdio_path = qemu_fdt_get_child_by_name(fdt, eth_paths[n_eth], "mdio"); if (mdio_path) { phy_path = qemu_fdt_get_child_by_name(fdt, mdio_path, "phy"); if (phy_path) { prop_val = qemu_fdt_getprop_cell(fdt, phy_path, "reg", NULL, 0, NULL, &error_abort); qemu_fdt_setprop_cell(fdt, eth_paths[n_eth], "xlnx,phyaddr", prop_val); g_free(phy_path); } else { qemu_log_mask(LOG_GUEST_ERROR, "phy not found in %s", mdio_path); } g_free(mdio_path); } g_free(eth_paths[n_eth]); } g_free(eth_paths); } /* Instantiate peripherals from the FDT. */ fdti = fdt_generic_create_machine(fdt, NULL); main_mem = MEMORY_REGION(object_resolve_path(node_path, NULL)); ram_kernel_base = object_property_get_int(OBJECT(main_mem), "addr", NULL); ram_kernel_size = object_property_get_int(OBJECT(main_mem), "size", NULL); if (!memory_region_is_mapped(main_mem)) { /* If the memory region is not mapped, map it here. * It has to be mapped somewhere, so guess that the base address * is where the kernel starts */ memory_region_add_subregion(get_system_memory(), ram_kernel_base, main_mem); if (ram_kernel_base && IS_PETALINUX_MACHINE) { /* If the memory added is at an offset from zero QEMU will error * when an ISR/exception is triggered. Add a small amount of hack * RAM to handle this. */ MemoryRegion *hack_ram = g_new(MemoryRegion, 1); memory_region_init_ram(hack_ram, NULL, "hack_ram", 0x1000, &error_abort); vmstate_register_ram_global(hack_ram); memory_region_add_subregion(get_system_memory(), 0, hack_ram); } } fdt_init_destroy_fdti(fdti); fdt_g = fdt; microblaze_load_kernel(MICROBLAZE_CPU(first_cpu), ram_kernel_base, ram_kernel_size, machine->initrd_filename, NULL, microblaze_generic_fdt_reset, 0, fdt, fdt_size); /* Register FDT to prop mapper for secondary cores. */ cpu = CPU_NEXT(first_cpu); while (cpu) { qemu_register_reset(secondary_cpu_reset, cpu); cpu = CPU_NEXT(cpu); } return; no_dtb_arg: if (!QTEST_RUNNING) { hw_error("DTB must be specified for %s machine model\n", MACHINE_NAME); } return; }