static void file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(backend); if (!backend->size) { error_setg(errp, "can't create backend with size 0"); return; } if (!fb->mem_path) { error_setg(errp, "mem-path property not set"); return; } #ifndef CONFIG_LINUX error_setg(errp, "-mem-path not supported on this host"); #else if (!host_memory_backend_mr_inited(backend)) { gchar *path; backend->force_prealloc = mem_prealloc; path = object_get_canonical_path(OBJECT(backend)); memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), path, backend->size, fb->share, fb->mem_path, errp); g_free(path); } #endif }
static void pc_dimm_md_fill_device_info(const MemoryDeviceState *md, MemoryDeviceInfo *info) { PCDIMMDeviceInfo *di = g_new0(PCDIMMDeviceInfo, 1); const DeviceClass *dc = DEVICE_GET_CLASS(md); const PCDIMMDevice *dimm = PC_DIMM(md); const DeviceState *dev = DEVICE(md); if (dev->id) { di->has_id = true; di->id = g_strdup(dev->id); } di->hotplugged = dev->hotplugged; di->hotpluggable = dc->hotpluggable; di->addr = dimm->addr; di->slot = dimm->slot; di->node = dimm->node; di->size = object_property_get_uint(OBJECT(dimm), PC_DIMM_SIZE_PROP, NULL); di->memdev = object_get_canonical_path(OBJECT(dimm->hostmem)); if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { info->u.nvdimm.data = di; info->type = MEMORY_DEVICE_INFO_KIND_NVDIMM; } else { info->u.dimm.data = di; info->type = MEMORY_DEVICE_INFO_KIND_DIMM; } }
static void memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(backend); char *name; int fd; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); return; } if (host_memory_backend_mr_inited(backend)) { return; } backend->force_prealloc = mem_prealloc; fd = qemu_memfd_create(TYPE_MEMORY_BACKEND_MEMFD, backend->size, m->hugetlb, m->hugetlbsize, m->seal ? F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL : 0, errp); if (fd == -1) { return; } name = object_get_canonical_path(OBJECT(backend)); memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name, backend->size, true, fd, errp); g_free(name); }
static void gic_proxy_realize(DeviceState *dev, Error **errp) { GICProxy *s = XILINX_GIC_PROXY(dev); const char *prefix = object_get_canonical_path(OBJECT(dev)); unsigned int i; for (i = 0; i < ARRAY_SIZE(gic_proxy_regs_info); ++i) { RegisterInfo *r = &s->regs_info[gic_proxy_regs_info[i].decode.addr/4]; *r = (RegisterInfo) { .data = (uint8_t *)&s->regs[ gic_proxy_regs_info[i].decode.addr/4], .data_size = sizeof(uint32_t), .access = &gic_proxy_regs_info[i], .debug = GIC_PROXY_ERR_DEBUG, .prefix = prefix, .opaque = s, }; register_init(r); } } static void gic_proxy_set_irq(void *opaque, int irq, int level) { GICProxy *s = XILINX_GIC_PROXY(opaque); int group = irq / 32; int bit = irq % 32; if (level) { s->regs[GICPN_STATUS_REG(group)] |= 1 << bit; } else { s->regs[GICPN_STATUS_REG(group)] &= ~(1 << bit); } gicp_update(s, group); }
HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine) { int i; HotpluggableCPUList *head = NULL; MachineClass *mc = MACHINE_GET_CLASS(machine); /* force board to initialize possible_cpus if it hasn't been done yet */ mc->possible_cpu_arch_ids(machine); for (i = 0; i < machine->possible_cpus->len; i++) { Object *cpu; HotpluggableCPUList *list_item = g_new0(typeof(*list_item), 1); HotpluggableCPU *cpu_item = g_new0(typeof(*cpu_item), 1); cpu_item->type = g_strdup(machine->possible_cpus->cpus[i].type); cpu_item->vcpus_count = machine->possible_cpus->cpus[i].vcpus_count; cpu_item->props = g_memdup(&machine->possible_cpus->cpus[i].props, sizeof(*cpu_item->props)); cpu = machine->possible_cpus->cpus[i].cpu; if (cpu) { cpu_item->has_qom_path = true; cpu_item->qom_path = object_get_canonical_path(cpu); } list_item->value = cpu_item; list_item->next = head; head = list_item; } return head; }
static void pmc_sss_realize(DeviceState *dev, Error **errp) { PMCSSS *s = PMC_SSS(dev); SSSBase *p = SSS_BASE(dev); Error *local_errp = NULL; int r, i; for (i = 0; i < R_MAX; ++i) { DepRegisterInfo *r = &s->regs_info[i]; *r = (DepRegisterInfo) { .data = (uint8_t *)&s->regs[i], .data_size = sizeof(uint32_t), .access = &pmc_sss_regs_info[i], .debug = PMC_SSS_ERR_DEBUG, .prefix = object_get_canonical_path(OBJECT(dev)), .opaque = s, }; memory_region_init_io(&r->mem, OBJECT(dev), &sss_ops, r, "sss-regs", 4); memory_region_add_subregion(&s->iomem, i * 4, &r->mem); } for (r = 0; r < NO_REMOTE; ++r) { SSSStream *ss = SSS_STREAM(&p->rx_devs[r]); object_property_add_link(OBJECT(ss), "sss", TYPE_PMC_SSS, (Object **)&ss->sss, qdev_prop_allow_set_link_before_realize, OBJ_PROP_LINK_UNREF_ON_RELEASE, &local_errp); if (local_errp) { goto pmc_sss_realize_fail; } object_property_set_link(OBJECT(ss), OBJECT(s), "sss", &local_errp); if (local_errp) { goto pmc_sss_realize_fail; } } return; pmc_sss_realize_fail: if (!*errp) { *errp = local_errp; } } static void sss_reset(DeviceState *dev) { PMCSSS *s = PMC_SSS(dev); SSSBase *p = SSS_BASE(dev); int i; for (i = 0; i < R_MAX; ++i) { dep_register_reset(&s->regs_info[i]); } sss_notify_all(p); }
/* RP helper function to attach a device to an adaptor. */ void rp_device_attach(Object *adaptor, Object *dev, int rp_nr, int dev_nr, Error **errp) { Error *err = NULL; uint32_t nr_devs; char *name; int i; assert(adaptor); assert(dev); /* Verify that the adaptor is of Remote Port type. */ if (!object_dynamic_cast(adaptor, TYPE_REMOTE_PORT)) { error_setg(errp, "%s is not a Remote-Port adaptor!\n", object_get_canonical_path(adaptor)); return; } name = g_strdup_printf("rp-adaptor%d", rp_nr); object_property_set_link(dev, adaptor, name, &err); g_free(name); if (err != NULL) { error_propagate(errp, err); return; } name = g_strdup_printf("rp-chan%d", rp_nr); object_property_set_int(dev, dev_nr, name, &err); g_free(name); if (err != NULL && !object_dynamic_cast(dev, TYPE_REMOTE_PORT_DEVICE)) { /* * RP devices that only receive requests may not need to * know their channel/dev number. If not, treat this as * an error. */ error_propagate(errp, err); return; } err = NULL; nr_devs = object_property_get_int(dev, "nr-devs", &err); if (err) { nr_devs = 1; err = NULL; } /* Multi-channel devs use consecutive numbering. */ for (i = 0; i < nr_devs; i++) { name = g_strdup_printf("remote-port-dev%d", dev_nr + i); object_property_set_link(adaptor, dev, name, &err); g_free(name); if (err != NULL) { error_propagate(errp, err); return; } } }
static void ipi_update_irq(IPI *s) { bool pending = s->regs[R_IPI_ISR] & ~s->regs[R_IPI_IMR]; DB_PRINT("%s: irq=%d isr=%x mask=%x\n", object_get_canonical_path(OBJECT(s)), pending, s->regs[R_IPI_ISR], s->regs[R_IPI_IMR]); qemu_set_irq(s->irq, pending); }
/* Return 0 and log if reading from write-only register. */ static uint64_t gicp_wo_postr(RegisterInfo *reg, uint64_t val64) { GICProxy *s = XILINX_GIC_PROXY(reg->opaque); qemu_log_mask(LOG_GUEST_ERROR, "%s: Reading from wo register at %" HWADDR_PRIx "\n", object_get_canonical_path(OBJECT(s)), reg->access->decode.addr); return 0; }
static void mem_ctrl_pd_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { MemCtrl *s = MEM_CTRL(opaque); qemu_log_mask(LOG_GUEST_ERROR, "%s: Error: Memory unavailable (powered down/retained)!\n" "\tAttempted write to %" HWADDR_PRIx "=%" PRIx64 "\n", object_get_canonical_path(OBJECT(s)), addr, value); }
static uint64_t mem_ctrl_pd_read(void *opaque, hwaddr addr, unsigned size) { MemCtrl *s = MEM_CTRL(opaque); qemu_log_mask(LOG_GUEST_ERROR, "%s: Error: Memory unavailable (powered down/retained)!\n" "\tAttempted read from %" HWADDR_PRIx "\n", object_get_canonical_path(OBJECT(s)), addr); return 0; }
static void xlx_iom_realize(DeviceState *dev, Error **errp) { XilinxUART *s = XILINX_IO_MODULE_UART(dev); unsigned int i, rmap; uint32_t *regmaps[3] = { &s->regs[0], &s->baud }; s->prefix = object_get_canonical_path(OBJECT(dev)); for (rmap = 0; rmap < ARRAY_SIZE(uart_reginfos); rmap++) { for (i = 0; i < uart_reginfo_sizes[rmap]; ++i) { DepRegisterInfo *r = &s->regs_infos[rmap][i]; *r = (DepRegisterInfo) { .data = (uint8_t *)®maps[rmap][i], .data_size = sizeof(uint32_t), .access = &uart_reginfos[rmap][i], .debug = XILINX_IO_MODULE_UART_ERR_DEBUG, .prefix = s->prefix, .opaque = s, }; memory_region_init_io(&r->mem, OBJECT(dev), &iom_uart_ops, r, r->access->name, 4); memory_region_add_subregion(&s->iomem[rmap], i * 4, &r->mem); } } if (s->cfg.use_rx || s->cfg.use_tx) { qemu_chr_fe_set_handlers(&s->chr, uart_can_rx, uart_rx, uart_event, NULL, s, NULL, true); } } static void xlx_iom_init(Object *obj) { XilinxUART *s = XILINX_IO_MODULE_UART(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); unsigned int i; s->regs_infos[0] = s->regs_info0; s->regs_infos[1] = s->regs_info1; for (i = 0; i < ARRAY_SIZE(s->iomem); i++) { char *region_name = g_strdup_printf("%s-%d", TYPE_XILINX_IO_MODULE_UART, i); memory_region_init_io(&s->iomem[i], obj, &iom_uart_ops, s, region_name, uart_reginfo_sizes[i] * 4); g_free(region_name); sysbus_init_mmio(sbd, &s->iomem[i]); } sysbus_init_irq(sbd, &s->irq_err); sysbus_init_irq(sbd, &s->irq_tx); sysbus_init_irq(sbd, &s->irq_rx); }
static uint64_t sbi_read(void *opaque, hwaddr addr, unsigned size) { SlaveBootInt *s = SBI(opaque); DepRegisterInfo *r = &s->regs_info[addr / 4]; if (!r->data) { qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n", object_get_canonical_path(OBJECT(s)), addr); return 0; } return dep_register_read(r); }
static uint64_t ipi_read(void *opaque, hwaddr addr, unsigned size) { IPI *s = XILINX_IPI(opaque); DepRegisterInfo *r = &s->regs_info[addr / 4]; if (!r->data) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Decode error: read from %" HWADDR_PRIx "\n", object_get_canonical_path(OBJECT(s)), addr); return 0; } return dep_register_read(r); }
static uint64_t rpu_read(void *opaque, hwaddr addr, unsigned size) { RPU *s = XILINX_RPU(opaque); RegisterInfo *r = &s->regs_info[addr / 4]; if (!r->data) { qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n", object_get_canonical_path(OBJECT(s)), addr); return 0; } return register_read(r); }
static uint64_t xlnx_axi_gpio_read(void *opaque, hwaddr addr, unsigned size) { XlnxAXIGPIO *s = XLNX_AXI_GPIO(opaque); DepRegisterInfo *r = &s->regs_info[addr / 4]; if (!r->data) { qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n", object_get_canonical_path(OBJECT(s)), addr); return 0; } return dep_register_read(r); }
static void rpu_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { RPU *s = XILINX_RPU(opaque); RegisterInfo *r = &s->regs_info[addr / 4]; if (!r->data) { qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n", object_get_canonical_path(OBJECT(s)), addr, value); return; } register_write(r, value, ~0); }
static void xlnx_axi_gpio_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { XlnxAXIGPIO *s = XLNX_AXI_GPIO(opaque); DepRegisterInfo *r = &s->regs_info[addr / 4]; if (!r->data) { qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n", object_get_canonical_path(OBJECT(s)), addr, value); return; } dep_register_write(r, value, ~0); }
static void ipi_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { IPI *s = XILINX_IPI(opaque); DepRegisterInfo *r = &s->regs_info[addr / 4]; if (!r->data) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Decode error: write to" " %" HWADDR_PRIx "=%" PRIx64 "\n", object_get_canonical_path(OBJECT(s)), addr, value); return; } dep_register_write(r, value, ~0); }
static void ipi_handler(void *opaque, int n, int level) { IPI *s = XILINX_IPI(opaque); DepRegisterInfo *r_isr = &s->regs_info[A_IPI_ISR / 4]; uint32_t val = (!!level) << n; uint64_t old_value = s->regs[R_IPI_ISR]; DB_PRINT("%s: %s: irq[%d]=%d\n", __func__, object_get_canonical_path(OBJECT(s)), n, level); s->regs[R_IPI_ISR] |= val; ipi_update_irq(s); dep_register_refresh_gpios(r_isr, old_value); }
static void fdt_init_all_irqs(FDTMachineInfo *fdti) { while (fdti->irqs) { FDTIRQConnection *first = fdti->irqs; qemu_irq sink = first->irq; bool (*merge_fn)(bool *, int) = first->merge_fn; int num_sources = 0; FDTIRQConnection *irq; for (irq = first; irq; irq = irq->next) { if (irq->irq == sink) { /* Same sink */ num_sources++; } } if (num_sources > 1) { QEMUIRQSharedState *s = g_malloc0(sizeof *s); s->sink = sink; s->merge_fn = merge_fn; qemu_irq *sources = qemu_allocate_irqs(qemu_irq_shared_handler, s, num_sources); for (irq = first; irq; irq = irq->next) { if (irq->irq == sink) { char *shared_irq_name = g_strdup_printf("shared-irq-%p", *sources); if (irq->merge_fn != merge_fn) { fprintf(stderr, "ERROR: inconsistent IRQ merge fns\n"); exit(1); } object_property_add_child(OBJECT(irq->dev), shared_irq_name, OBJECT(*sources), &error_abort); g_free(shared_irq_name); irq->irq = *(sources++); s->num++; } } } DB_PRINT(0, "%s: connected to %s irq line %d (%s)\n", first->sink_info ? first->sink_info : "", object_get_canonical_path(OBJECT(first->dev)), first->i, first->name ? first->name : ""); qdev_connect_gpio_out_named(DEVICE(first->dev), first->name, first->i, first->irq); fdti->irqs = first->next; g_free(first); } }
static void gic_proxy_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { GICProxy *s = XILINX_GIC_PROXY(opaque); RegisterInfo *r = &s->regs_info[addr / 4]; if (!r->data) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Decode error: write %" HWADDR_PRIx "=%" PRIx64 "\n", object_get_canonical_path(OBJECT(s)), addr, value); return; } register_write(r, value, ~0); }
static void sbi_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { SlaveBootInt *s = SBI(opaque); DepRegisterInfo *r = &s->regs_info[addr / 4]; if (!r->data) { qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n", object_get_canonical_path(OBJECT(s)), addr, value); return; } dep_register_write(r, value, ~0); smap_data_rdwr(s); }
static void xlx_iom_realize(DeviceState *dev, Error **errp) { XilinxPIT *s = XILINX_IO_MODULE_PIT(dev); unsigned int i; s->prefix = object_get_canonical_path(OBJECT(dev)); for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { RegisterInfo *r = &s->regs_info[i]; *r = (RegisterInfo) { .data = (uint8_t *)&s->regs[i], .data_size = sizeof(uint32_t), .access = &pit_regs_info[i], .debug = XILINX_IO_MODULE_PIT_ERR_DEBUG, .prefix = s->prefix, .opaque = s, }; memory_region_init_io(&r->mem, OBJECT(dev), &iom_pit_ops, r, r->access->name, 4); memory_region_add_subregion(&s->iomem, i * 4, &r->mem); } if (s->cfg.use) { s->bh = qemu_bh_new(pit_timer_hit, s); s->ptimer = ptimer_init(s->bh); ptimer_set_freq(s->ptimer, s->frequency); /* IRQ out to pulse when present timer expires/reloads */ qdev_init_gpio_out(dev, &s->hit_out, 1); /* IRQ in to enable pre-scalar mode. Routed from gpo1 */ qdev_init_gpio_in_named(dev, iom_pit_ps_config, "ps_config", 1); /* hit_out of neighbouring PIT is received as hit_in */ qdev_init_gpio_in_named(dev, iom_pit_ps_hit_in, "ps_hit_in", 1); } } static void xlx_iom_pit_init(Object *obj) { XilinxPIT *s = XILINX_IO_MODULE_PIT(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); memory_region_init_io(&s->iomem, obj, &iom_pit_ops, s, TYPE_XILINX_IO_MODULE_PIT, R_MAX * 4); sysbus_init_mmio(sbd, &s->iomem); sysbus_init_irq(sbd, &s->irq); }
static void xlnx_axi_gpio_realize(DeviceState *dev, Error **errp) { XlnxAXIGPIO *s = XLNX_AXI_GPIO(dev); const char *prefix = object_get_canonical_path(OBJECT(dev)); unsigned int i; for (i = 0; i < ARRAY_SIZE(xlnx_axi_gpio_regs_info); ++i) { DepRegisterInfo *r = &s->regs_info[xlnx_axi_gpio_regs_info[i].decode.addr/4]; *r = (DepRegisterInfo) { .data = (uint8_t *)&s->regs[ xlnx_axi_gpio_regs_info[i].decode.addr/4], .data_size = sizeof(uint32_t), .access = &xlnx_axi_gpio_regs_info[i], .debug = XLNX_AXI_GPIO_ERR_DEBUG, .prefix = prefix, .opaque = s, }; } /* Create two GPIO in banks that QTest can use */ qdev_init_gpio_in(dev, data_handler1, 32); qdev_init_gpio_in(dev, data_handler2, 32); /* Create GPIO banks as well */ qdev_init_gpio_out(dev, s->outputs1, 32); qdev_init_gpio_out(dev, s->outputs2, 32); } static void xlnx_axi_gpio_init(Object *obj) { XlnxAXIGPIO *s = XLNX_AXI_GPIO(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); memory_region_init_io(&s->iomem, obj, &xlnx_axi_gpio_ops, s, TYPE_XLNX_AXI_GPIO, R_MAX * 4); sysbus_init_mmio(sbd, &s->iomem); sysbus_init_irq(sbd, &s->parent_irq); }
int qmp_pc_dimm_device_list(Object *obj, void *opaque) { MemoryDeviceInfoList ***prev = opaque; if (object_dynamic_cast(obj, TYPE_PC_DIMM)) { DeviceState *dev = DEVICE(obj); if (dev->realized) { MemoryDeviceInfoList *elem = g_new0(MemoryDeviceInfoList, 1); MemoryDeviceInfo *info = g_new0(MemoryDeviceInfo, 1); PCDIMMDeviceInfo *di = g_new0(PCDIMMDeviceInfo, 1); DeviceClass *dc = DEVICE_GET_CLASS(obj); PCDIMMDevice *dimm = PC_DIMM(obj); if (dev->id) { di->has_id = true; di->id = g_strdup(dev->id); } di->hotplugged = dev->hotplugged; di->hotpluggable = dc->hotpluggable; di->addr = dimm->addr; di->slot = dimm->slot; di->node = dimm->node; di->size = object_property_get_uint(OBJECT(dimm), PC_DIMM_SIZE_PROP, NULL); di->memdev = object_get_canonical_path(OBJECT(dimm->hostmem)); info->u.dimm.data = di; elem->value = info; elem->next = NULL; **prev = elem; *prev = &elem->next; } } object_child_foreach(obj, qmp_pc_dimm_device_list, opaque); return 0; }
/** * acpi_memory_slot_status: * @mem_st: memory hotplug state * @dev: device * @errp: set in case of an error * * Obtain a single memory slot status. * * This function will be called by memory unplug request cb and unplug cb. */ static MemStatus * acpi_memory_slot_status(MemHotplugState *mem_st, DeviceState *dev, Error **errp) { Error *local_err = NULL; int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, &local_err); if (local_err) { error_propagate(errp, local_err); return NULL; } if (slot >= mem_st->dev_count) { char *dev_path = object_get_canonical_path(OBJECT(dev)); error_setg(errp, "acpi_memory_slot_status: " "device [%s] returned invalid memory slot[%d]", dev_path, slot); g_free(dev_path); return NULL; } return &mem_st->devs[slot]; }
static void ipi_realize(DeviceState *dev, Error **errp) { IPI *s = XILINX_IPI(dev); const char *prefix = object_get_canonical_path(OBJECT(dev)); unsigned int i; for (i = 0; i < ARRAY_SIZE(ipi_regs_info); ++i) { DepRegisterInfo *r = &s->regs_info[ipi_regs_info[i].decode.addr/4]; *r = (DepRegisterInfo) { .data = (uint8_t *)&s->regs[ ipi_regs_info[i].decode.addr/4], .data_size = sizeof(uint32_t), .access = &ipi_regs_info[i], .debug = XILINX_IPI_ERR_DEBUG, .prefix = prefix, .opaque = s, }; dep_register_init(r); qdev_pass_all_gpios(DEVICE(r), dev); } qdev_init_gpio_in_named(dev, ipi_handler, "IPI_INPUTS", 32); qdev_init_gpio_in_named(dev, obs_handler, "OBS_INPUTS", 32); } static void ipi_init(Object *obj) { IPI *s = XILINX_IPI(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); memory_region_init_io(&s->iomem, obj, &ipi_ops, s, TYPE_XILINX_IPI, R_MAX * 4); sysbus_init_mmio(sbd, &s->iomem); sysbus_init_irq(sbd, &s->irq); }
static void rpu_realize(DeviceState *dev, Error **errp) { RPU *s = XILINX_RPU(dev); const char *prefix = object_get_canonical_path(OBJECT(dev)); unsigned int i; for (i = 0; i < ARRAY_SIZE(rpu_regs_info); ++i) { RegisterInfo *r = &s->regs_info[rpu_regs_info[i].decode.addr/4]; *r = (RegisterInfo) { .data = (uint8_t *)&s->regs[ rpu_regs_info[i].decode.addr/4], .data_size = sizeof(uint32_t), .access = &rpu_regs_info[i], .debug = XILINX_RPU_ERR_DEBUG, .prefix = prefix, .opaque = s, }; register_init(r); qdev_pass_all_gpios(DEVICE(r), dev); } if (!s->atcm1_for_rpu0) { error_set(errp, QERR_MISSING_PARAMETER, "atcm1-for-rpu0"); return; } if (!s->btcm1_for_rpu0) { error_set(errp, QERR_MISSING_PARAMETER, "btcm1-for-rpu0"); return; } if (!s->icache_for_rpu1) { error_set(errp, QERR_MISSING_PARAMETER, "icache-for-rpu1"); return; } if (!s->dcache_for_rpu1) { error_set(errp, QERR_MISSING_PARAMETER, "dcache-for-rpu1"); return; } if (!s->ddr) { error_set(errp, QERR_MISSING_PARAMETER, "ddr-mem-for-rpu"); return; } /* RPUs starts in lockstep mode, so the rpu1 caches are not accessible. */ memory_region_set_enabled(s->icache_for_rpu1, false); memory_region_set_enabled(s->dcache_for_rpu1, false); memory_region_set_enabled(s->ddr, false); } static void rpu_init(Object *obj) { RPU *s = XILINX_RPU(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); memory_region_init_io(&s->iomem, obj, &rpu_ops, s, TYPE_XILINX_RPU, R_MAX * 4); sysbus_init_mmio(sbd, &s->iomem); sysbus_init_irq(sbd, &s->irq_rpu_1); sysbus_init_irq(sbd, &s->irq_rpu_0); /* xtcm1-for-rpu0 are the aliases for the tcm in lockstep mode. * This link allows to enable/disable those aliases when we are in * lock-step/normal mode. */ object_property_add_link(obj, "atcm1-for-rpu0", TYPE_MEMORY_REGION, (Object **)&s->atcm1_for_rpu0, qdev_prop_allow_set_link_before_realize, OBJ_PROP_LINK_UNREF_ON_RELEASE, &error_abort); object_property_add_link(obj, "btcm1-for-rpu0", TYPE_MEMORY_REGION, (Object **)&s->btcm1_for_rpu0, qdev_prop_allow_set_link_before_realize, OBJ_PROP_LINK_UNREF_ON_RELEASE, &error_abort); object_property_add_link(obj, "rpu1-for-main-bus", TYPE_MEMORY_REGION, (Object **)&s->atcm1_for_rpu0, qdev_prop_allow_set_link_before_realize, OBJ_PROP_LINK_UNREF_ON_RELEASE, &error_abort); /* This link allows to enable/disable those memory region when we are in * lock-step/normal mode. */ object_property_add_link(obj, "icache-for-rpu1", TYPE_MEMORY_REGION, (Object **)&s->icache_for_rpu1, qdev_prop_allow_set_link_before_realize, OBJ_PROP_LINK_UNREF_ON_RELEASE, &error_abort); object_property_add_link(obj, "dcache-for-rpu1", TYPE_MEMORY_REGION, (Object **)&s->dcache_for_rpu1, qdev_prop_allow_set_link_before_realize, OBJ_PROP_LINK_UNREF_ON_RELEASE, &error_abort); /* Link to the second part of the DDR which is enabled in split mode and * disabled in lockstep mode. */ object_property_add_link(obj, "ddr-mem-for-rpu", TYPE_MEMORY_REGION, (Object **)&s->ddr, qdev_prop_allow_set_link_before_realize, OBJ_PROP_LINK_UNREF_ON_RELEASE, &error_abort); /* wfi_out is used to connect to PMU GPIs. */ qdev_init_gpio_out_named(DEVICE(obj), s->wfi_out, "wfi_out", 2); /* wfi_in is used as input from CPUs as wfi request. */ qdev_init_gpio_in_named(DEVICE(obj), zynqmp_rpu_0_handle_wfi, "wfi_in_0", 1); qdev_init_gpio_in_named(DEVICE(obj), zynqmp_rpu_1_handle_wfi, "wfi_in_1", 1); }
static void ss_realize(DeviceState *dev, Error **errp) { SlaveBootInt *s = SBI(dev); const char *prefix = object_get_canonical_path(OBJECT(dev)); unsigned int i; const char *port_name; Chardev *chr; for (i = 0; i < ARRAY_SIZE(slave_boot_regs_info); ++i) { DepRegisterInfo *r = &s->regs_info[ slave_boot_regs_info[i].decode.addr / 4]; *r = (DepRegisterInfo) { .data = (uint8_t *)&s->regs[ slave_boot_regs_info[i].decode.addr / 4], .data_size = sizeof(uint32_t), .access = &slave_boot_regs_info[i], .debug = SBI_ERR_DEBUG, .prefix = prefix, .opaque = s, }; } port_name = g_strdup("smap_busy_b"); qdev_init_gpio_out_named(dev, &s->smap_busy, port_name, 1); g_free((gpointer) port_name); port_name = g_strdup("smap_in_b"); qdev_init_gpio_in_named(dev, smap_update, port_name, 2); g_free((gpointer) port_name); chr = qemu_chr_find("sbi"); qdev_prop_set_chr(dev, "chardev", chr); if (!qemu_chr_fe_get_driver(&s->chr)) { DPRINT("SBI interface not connected\n"); } else { qemu_chr_fe_set_handlers(&s->chr, ss_sbi_can_receive, ss_sbi_receive, NULL, NULL, s, NULL, true); } fifo_create8(&s->fifo, 1024 * 4); } static void ss_reset(DeviceState *dev) { SlaveBootInt *s = SBI(dev); uint32_t i; for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { dep_register_reset(&s->regs_info[i]); } fifo_reset(&s->fifo); s->busy_line = 1; qemu_set_irq(s->smap_busy, s->busy_line); ss_update_busy_line(s); sbi_update_irq(s); /* Note : cs always 0 when rp is not connected * i.e slave always respond to master data irrespective of * master state * * as rdwr is also 0, initial state of sbi is data load. Hack this bit * to become 1, when sbi changes to write mode. So, its assumed in * non remote-port model master should expect data when slave wishes * to send. */ }