static void close_peer_eventfds(IVShmemState *s, int posn) { int i, n; if (!ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { return; } if (posn < 0 || posn >= s->nb_peers) { error_report("invalid peer %d", posn); return; } n = s->peers[posn].nb_eventfds; memory_region_transaction_begin(); for (i = 0; i < n; i++) { ivshmem_del_eventfd(s, posn, i); } memory_region_transaction_commit(); for (i = 0; i < n; i++) { event_notifier_cleanup(&s->peers[posn].eventfds[i]); } g_free(s->peers[posn].eventfds); s->peers[posn].nb_eventfds = 0; }
/* SMRAM */ static void mch_update_smram(MCHPCIState *mch) { memory_region_transaction_begin(); smram_update(&mch->smram_region, mch->d.config[MCH_HOST_BRDIGE_SMRAM], mch->smm_enabled); memory_region_transaction_commit(); }
static void close_guest_eventfds(IVShmemState *s, int posn) { int i, guest_curr_max; if (!ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { return; } if (posn < 0 || posn >= s->nb_peers) { return; } guest_curr_max = s->peers[posn].nb_eventfds; memory_region_transaction_begin(); for (i = 0; i < guest_curr_max; i++) { ivshmem_del_eventfd(s, posn, i); } memory_region_transaction_commit(); for (i = 0; i < guest_curr_max; i++) { event_notifier_cleanup(&s->peers[posn].eventfds[i]); } g_free(s->peers[posn].eventfds); s->peers[posn].nb_eventfds = 0; }
FDTMachineInfo *fdt_generic_create_machine(void *fdt, qemu_irq *cpu_irq) { char node_path[DT_PATH_LENGTH]; FDTMachineInfo *fdti = fdt_init_new_fdti(fdt); fdti->irq_base = cpu_irq; /* parse the device tree */ if (!qemu_devtree_get_root_node(fdt, node_path)) { memory_region_transaction_begin(); fdt_init_set_opaque(fdti, node_path, NULL); simple_bus_fdt_init(node_path, fdti); while (qemu_co_enter_next(fdti->cq)); fdt_init_all_irqs(fdti); memory_region_transaction_commit(); } else { fprintf(stderr, "FDT: ERROR: cannot get root node from device tree %s\n" , node_path); } DB_PRINT(0, "FDT: Device tree scan complete\n"); FDTMachineInfo *ret = g_malloc0(sizeof(*ret)); return fdti; }
static void mch_set_smm(int smm, void *arg) { MCHPCIState *mch = arg; memory_region_transaction_begin(); smram_set_smm(&mch->smm_enabled, smm, mch->d.config[MCH_HOST_BRDIGE_SMRAM], &mch->smram_region); memory_region_transaction_commit(); }
static void pci_bridge_update_mappings(PCIBridge *br) { /* Make updates atomic to: handle the case of one VCPU updating the bridge * while another accesses an unaffected region. */ memory_region_transaction_begin(); pci_bridge_region_cleanup(br); pci_bridge_region_init(br); memory_region_transaction_commit(); }
/* SMRAM */ static void mch_update_smram(MCHPCIState *mch) { PCIDevice *pd = PCI_DEVICE(mch); memory_region_transaction_begin(); smram_update(&mch->smram_region, pd->config[MCH_HOST_BRDIGE_SMRAM], mch->smm_enabled); memory_region_transaction_commit(); }
static void smbus_io_space_update(PIIX4PMState *s) { s->smb_io_base = le32_to_cpu(*(uint32_t *)(s->dev.config + 0x90)); s->smb_io_base &= 0xffc0; memory_region_transaction_begin(); memory_region_set_enabled(&s->smb.io, s->dev.config[0xd2] & 1); memory_region_set_address(&s->smb.io, s->smb_io_base); memory_region_transaction_commit(); }
static void mch_set_smm(int smm, void *arg) { MCHPCIState *mch = arg; PCIDevice *pd = PCI_DEVICE(mch); memory_region_transaction_begin(); smram_set_smm(&mch->smm_enabled, smm, pd->config[MCH_HOST_BRDIGE_SMRAM], &mch->smram_region); memory_region_transaction_commit(); }
/* PAM */ static void mch_update_pam(MCHPCIState *mch) { int i; memory_region_transaction_begin(); for (i = 0; i < 13; i++) { pam_update(&mch->pam_regions[i], i, mch->d.config[MCH_HOST_BRIDGE_PAM0 + ((i + 1) / 2)]); } memory_region_transaction_commit(); }
static void pm_io_space_update(PIIX4PMState *s) { PCIDevice *d = PCI_DEVICE(s); s->io_base = le32_to_cpu(*(uint32_t *)(d->config + 0x40)); s->io_base &= 0xffc0; memory_region_transaction_begin(); memory_region_set_enabled(&s->io, d->config[0x80] & 1); memory_region_set_address(&s->io, s->io_base); memory_region_transaction_commit(); }
static inline void update_cpc_base(MIPSGCRState *gcr, uint64_t val) { if (is_cpc_connected(gcr)) { gcr->cpc_base = val & GCR_CPC_BASE_MSK; memory_region_transaction_begin(); memory_region_set_address(gcr->cpc_mr, gcr->cpc_base & GCR_CPC_BASE_CPCBASE_MSK); memory_region_set_enabled(gcr->cpc_mr, gcr->cpc_base & GCR_CPC_BASE_CPCEN_MSK); memory_region_transaction_commit(); } }
/* PAM */ static void mch_update_pam(MCHPCIState *mch) { PCIDevice *pd = PCI_DEVICE(mch); int i; memory_region_transaction_begin(); for (i = 0; i < 13; i++) { pam_update(&mch->pam_regions[i], i, pd->config[MCH_HOST_BRIDGE_PAM0 + ((i + 1) / 2)]); } memory_region_transaction_commit(); }
void pci_bridge_update_mappings(PCIBridge *br) { PCIBridgeWindows *w = br->windows; /* Make updates atomic to: handle the case of one VCPU updating the bridge * while another accesses an unaffected region. */ memory_region_transaction_begin(); pci_bridge_region_del(br, br->windows); br->windows = pci_bridge_region_init(br); memory_region_transaction_commit(); pci_bridge_region_cleanup(br, w); }
void ich9_pm_iospace_update(ICH9LPCPMRegs *pm, uint32_t pm_io_base) { ICH9_DEBUG("to 0x%x\n", pm_io_base); assert((pm_io_base & ICH9_PMIO_MASK) == 0); pm->pm_io_base = pm_io_base; memory_region_transaction_begin(); memory_region_set_enabled(&pm->io, pm->pm_io_base != 0); memory_region_set_address(&pm->io, pm->pm_io_base); memory_region_transaction_commit(); }
static void pm_io_space_update(PIIX4PMState *s) { uint32_t pm_io_base; pm_io_base = le32_to_cpu(*(uint32_t *)(s->dev.config + 0x40)); pm_io_base &= 0xffc0; memory_region_transaction_begin(); memory_region_set_enabled(&s->io, s->dev.config[0x80] & 1); memory_region_set_address(&s->io, pm_io_base); memory_region_transaction_commit(); }
void pcie_host_mmcfg_update(PCIExpressHost *e, int enable, hwaddr addr, uint32_t size) { memory_region_transaction_begin(); pcie_host_mmcfg_unmap(e); if (enable) { pcie_host_mmcfg_map(e, addr, size); } memory_region_transaction_commit(); }
static void itc_reconfigure(MIPSITUState *tag) { uint64_t *am = &tag->ITCAddressMap[0]; MemoryRegion *mr = &tag->storage_io; hwaddr address = am[0] & ITC_AM0_BASE_ADDRESS_MASK; uint64_t size = (1 << 10) + (am[1] & ITC_AM1_ADDR_MASK_MASK); bool is_enabled = (am[0] & ITC_AM0_EN_MASK) != 0; memory_region_transaction_begin(); if (!(size & (size - 1))) { memory_region_set_size(mr, size); } memory_region_set_address(mr, address); memory_region_set_enabled(mr, is_enabled); memory_region_transaction_commit(); }
static void close_guest_eventfds(IVShmemState *s, int posn) { int i, guest_curr_max; guest_curr_max = s->peers[posn].nb_eventfds; memory_region_transaction_begin(); for (i = 0; i < guest_curr_max; i++) { ivshmem_del_eventfd(s, posn, i); } memory_region_transaction_commit(); for (i = 0; i < guest_curr_max; i++) { event_notifier_cleanup(&s->peers[posn].eventfds[i]); } g_free(s->peers[posn].eventfds); s->peers[posn].nb_eventfds = 0; }
static void close_peer_eventfds(IVShmemState *s, int posn) { int i, n; assert(posn >= 0 && posn < s->nb_peers); n = s->peers[posn].nb_eventfds; if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { memory_region_transaction_begin(); for (i = 0; i < n; i++) { ivshmem_del_eventfd(s, posn, i); } memory_region_transaction_commit(); } for (i = 0; i < n; i++) { event_notifier_cleanup(&s->peers[posn].eventfds[i]); } g_free(s->peers[posn].eventfds); s->peers[posn].nb_eventfds = 0; }
/* * Dino can forward memory accesses from the CPU in the range between * 0xf0800000 and 0xff000000 to the PCI bus. */ static void gsc_to_pci_forwarding(DinoState *s) { uint32_t io_addr_en, tmp; int enabled, i; tmp = extract32(s->io_control, 7, 2); enabled = (tmp == 0x01); io_addr_en = s->io_addr_en; memory_region_transaction_begin(); for (i = 1; i < 31; i++) { MemoryRegion *mem = &s->pci_mem_alias[i]; if (enabled && (io_addr_en & (1U << i))) { if (!memory_region_is_mapped(mem)) { uint32_t addr = 0xf0000000 + i * DINO_MEM_CHUNK_SIZE; memory_region_add_subregion(get_system_memory(), addr, mem); } } else if (memory_region_is_mapped(mem)) { memory_region_del_subregion(get_system_memory(), mem); } } memory_region_transaction_commit(); }
/* SMRAM */ static void mch_update_smram(MCHPCIState *mch) { PCIDevice *pd = PCI_DEVICE(mch); bool h_smrame = (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & MCH_HOST_BRIDGE_ESMRAMC_H_SMRAME); uint32_t tseg_size; /* implement SMRAM.D_LCK */ if (pd->config[MCH_HOST_BRIDGE_SMRAM] & MCH_HOST_BRIDGE_SMRAM_D_LCK) { pd->config[MCH_HOST_BRIDGE_SMRAM] &= ~MCH_HOST_BRIDGE_SMRAM_D_OPEN; pd->wmask[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_WMASK_LCK; pd->wmask[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_WMASK_LCK; } memory_region_transaction_begin(); if (pd->config[MCH_HOST_BRIDGE_SMRAM] & SMRAM_D_OPEN) { /* Hide (!) low SMRAM if H_SMRAME = 1 */ memory_region_set_enabled(&mch->smram_region, h_smrame); /* Show high SMRAM if H_SMRAME = 1 */ memory_region_set_enabled(&mch->open_high_smram, h_smrame); } else { /* Hide high SMRAM and low SMRAM */ memory_region_set_enabled(&mch->smram_region, true); memory_region_set_enabled(&mch->open_high_smram, false); } if (pd->config[MCH_HOST_BRIDGE_SMRAM] & SMRAM_G_SMRAME) { memory_region_set_enabled(&mch->low_smram, !h_smrame); memory_region_set_enabled(&mch->high_smram, h_smrame); } else { memory_region_set_enabled(&mch->low_smram, false); memory_region_set_enabled(&mch->high_smram, false); } if (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & MCH_HOST_BRIDGE_ESMRAMC_T_EN) { switch (pd->config[MCH_HOST_BRIDGE_ESMRAMC] & MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_MASK) { case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_1MB: tseg_size = 1024 * 1024; break; case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_2MB: tseg_size = 1024 * 1024 * 2; break; case MCH_HOST_BRIDGE_ESMRAMC_TSEG_SZ_8MB: tseg_size = 1024 * 1024 * 8; break; default: tseg_size = 0; break; } } else { tseg_size = 0; } memory_region_del_subregion(mch->system_memory, &mch->tseg_blackhole); memory_region_set_enabled(&mch->tseg_blackhole, tseg_size); memory_region_set_size(&mch->tseg_blackhole, tseg_size); memory_region_add_subregion_overlap(mch->system_memory, mch->below_4g_mem_size - tseg_size, &mch->tseg_blackhole, 1); memory_region_set_enabled(&mch->tseg_window, tseg_size); memory_region_set_size(&mch->tseg_window, tseg_size); memory_region_set_address(&mch->tseg_window, mch->below_4g_mem_size - tseg_size); memory_region_set_alias_offset(&mch->tseg_window, mch->below_4g_mem_size - tseg_size); memory_region_transaction_commit(); }
static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr, uint32_t val, int len) { XenPCIPassthroughState *s = DO_UPCAST(XenPCIPassthroughState, dev, d); int index = 0; XenPTRegGroup *reg_grp_entry = NULL; int rc = 0; uint32_t read_val = 0; int emul_len = 0; XenPTReg *reg_entry = NULL; uint32_t find_addr = addr; XenPTRegInfo *reg = NULL; if (xen_pt_pci_config_access_check(d, addr, len)) { return; } XEN_PT_LOG_CONFIG(d, addr, val, len); /* check unused BAR register */ index = xen_pt_bar_offset_to_index(addr); if ((index >= 0) && (val > 0 && val < XEN_PT_BAR_ALLF) && (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED)) { XEN_PT_WARN(d, "Guest attempt to set address to unused Base Address " "Register. (addr: 0x%02x, len: %d)\n", addr, len); } /* find register group entry */ reg_grp_entry = xen_pt_find_reg_grp(s, addr); if (reg_grp_entry) { /* check 0-Hardwired register group */ if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) { /* ignore silently */ XEN_PT_WARN(d, "Access to 0-Hardwired register. " "(addr: 0x%02x, len: %d)\n", addr, len); return; } } rc = xen_host_pci_get_block(&s->real_device, addr, (uint8_t *)&read_val, len); if (rc < 0) { XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc); memset(&read_val, 0xff, len); } /* pass directly to the real device for passthrough type register group */ if (reg_grp_entry == NULL) { goto out; } memory_region_transaction_begin(); pci_default_write_config(d, addr, val, len); /* adjust the read and write value to appropriate CFC-CFF window */ read_val <<= (addr & 3) << 3; val <<= (addr & 3) << 3; emul_len = len; /* loop around the guest requested size */ while (emul_len > 0) { /* find register entry to be emulated */ reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr); if (reg_entry) { reg = reg_entry->reg; uint32_t real_offset = reg_grp_entry->base_offset + reg->offset; uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3); uint8_t *ptr_val = NULL; valid_mask <<= (find_addr - real_offset) << 3; ptr_val = (uint8_t *)&val + (real_offset & 3); /* do emulation based on register size */ switch (reg->size) { case 1: if (reg->u.b.write) { rc = reg->u.b.write(s, reg_entry, ptr_val, read_val >> ((real_offset & 3) << 3), valid_mask); } break; case 2: if (reg->u.w.write) { rc = reg->u.w.write(s, reg_entry, (uint16_t *)ptr_val, (read_val >> ((real_offset & 3) << 3)), valid_mask); } break; case 4: if (reg->u.dw.write) { rc = reg->u.dw.write(s, reg_entry, (uint32_t *)ptr_val, (read_val >> ((real_offset & 3) << 3)), valid_mask); } break; }