/* default write_config function for PCI-to-PCI bridge */ void pci_bridge_write_config(PCIDevice *d, uint32_t address, uint32_t val, int len) { PCIBridge *s = container_of(d, PCIBridge, dev); uint16_t oldctl = pci_get_word(d->config + PCI_BRIDGE_CONTROL); uint16_t newctl; pci_default_write_config(d, address, val, len); if (ranges_overlap(address, len, PCI_COMMAND, 2) || /* io base/limit */ ranges_overlap(address, len, PCI_IO_BASE, 2) || /* memory base/limit, prefetchable base/limit and io base/limit upper 16 */ ranges_overlap(address, len, PCI_MEMORY_BASE, 20)) { pci_bridge_update_mappings(s); } newctl = pci_get_word(d->config + PCI_BRIDGE_CONTROL); if (~oldctl & newctl & PCI_BRIDGE_CTL_BUS_RESET) { /* Trigger hot reset on 0->1 transition. */ pci_bus_reset(&s->sec_bus); } }
void pcie_cap_slot_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len) { uint32_t pos = dev->exp.exp_cap; uint8_t *exp_cap = dev->config + pos; uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA); if (ranges_overlap(addr, len, pos + PCI_EXP_SLTSTA, 2)) { hotplug_event_clear(dev); } if (!ranges_overlap(addr, len, pos + PCI_EXP_SLTCTL, 2)) { return; } if (pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_EIC)) { sltsta ^= PCI_EXP_SLTSTA_EIS; /* toggle PCI_EXP_SLTSTA_EIS bit */ pci_set_word(exp_cap + PCI_EXP_SLTSTA, sltsta); PCIE_DEV_PRINTF(dev, "PCI_EXP_SLTCTL_EIC: " "sltsta -> 0x%02"PRIx16"\n", sltsta); } /* * If the slot is polulated, power indicator is off and power * controller is off, it is safe to detach the devices. */ if ((sltsta & PCI_EXP_SLTSTA_PDS) && (val & PCI_EXP_SLTCTL_PCC) && ((val & PCI_EXP_SLTCTL_PIC_OFF) == PCI_EXP_SLTCTL_PIC_OFF)) { PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev)); pci_for_each_device(sec_bus, pci_bus_num(sec_bus), pcie_unplug_device, NULL); pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDS); pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDC); } hotplug_event_notify(dev); /* * 6.7.3.2 Command Completed Events * * Software issues a command to a hot-plug capable Downstream Port by * issuing a write transaction that targets any portion of the Port’s Slot * Control register. A single write to the Slot Control register is * considered to be a single command, even if the write affects more than * one field in the Slot Control register. In response to this transaction, * the Port must carry out the requested actions and then set the * associated status field for the command completed event. */ /* Real hardware might take a while to complete requested command because * physical movement would be involved like locking the electromechanical * lock. However in our case, command is completed instantaneously above, * so send a command completion event right now. */ pcie_cap_slot_event(dev, PCI_EXP_HP_EV_CCI); }
static void gss_adaptive_dash_range_async (GssTransaction * t, gpointer priv) { GssAdaptiveQuery *query = priv; guint64 offset; guint64 n_bytes; guint64 header_size; GssAdaptiveLevel *level = query->level; int i; offset = t->start; n_bytes = t->end - t->start; if (ranges_overlap (offset, n_bytes, 0, level->track->dash_header_and_sidx_size)) { gss_soup_message_body_append_clipped (t->msg->response_body, SOUP_MEMORY_COPY, level->track->dash_header_data, offset, n_bytes, 0, level->track->dash_header_and_sidx_size); } header_size = level->track->dash_header_and_sidx_size; for (i = 0; i < level->track->n_fragments; i++) { GssIsomFragment *fragment = level->track->fragments[i]; guint8 *mdat_data; if (offset + n_bytes <= fragment->offset) break; if (ranges_overlap (offset, n_bytes, header_size + fragment->offset, fragment->moof_size)) { gss_soup_message_body_append_clipped (t->msg->response_body, SOUP_MEMORY_COPY, fragment->moof_data, offset, n_bytes, header_size + fragment->offset, fragment->moof_size); } if (ranges_overlap (offset, n_bytes, header_size + fragment->offset + fragment->moof_size, fragment->mdat_size)) { mdat_data = gss_adaptive_assemble_chunk (t, query->adaptive, level, fragment); if (query->adaptive->drm_type != GSS_DRM_CLEAR) { gss_playready_encrypt_samples (fragment, mdat_data, query->adaptive->content_key); } gss_soup_message_body_append_clipped (t->msg->response_body, SOUP_MEMORY_COPY, mdat_data + 8, offset, n_bytes, header_size + fragment->offset + fragment->moof_size, fragment->mdat_size - 8); g_free (mdat_data); } } }
static void pm_write_config(PCIDevice *d, uint32_t address, uint32_t val, int len) { pci_default_write_config(d, address, val, len); if (range_covers_byte(address, len, 0x80) || ranges_overlap(address, len, 0x40, 4)) { pm_io_space_update((PIIX4PMState *)d); } if (range_covers_byte(address, len, 0xd2) || ranges_overlap(address, len, 0x90, 4)) { smbus_io_space_update((PIIX4PMState *)d); } }
void shpc_cap_write_config(PCIDevice *d, uint32_t addr, uint32_t val, int l) { if (!ranges_overlap(addr, l, d->shpc->cap, SHPC_CAP_LENGTH)) { return; } if (ranges_overlap(addr, l, d->shpc->cap + SHPC_CAP_DWORD_DATA, 4)) { unsigned dword_data; dword_data = pci_get_long(d->shpc->config + d->shpc->cap + SHPC_CAP_DWORD_DATA); shpc_write(d, shpc_cap_dword(d) * 4, dword_data, 4); } /* Update cap dword data in case guest is going to read it. */ shpc_cap_update_dword(d); }
/** @brief Test wether two jobs may be re-ordered * @param q1 Serialization queue entry * @param q2 Serialization queue entry * @param flags Flags for @p q1 * @return Nonzero if the @p q1 and @p q2 may be re-ordered * * @todo Reordering is currently partially disabled because it breaks Paramiko. * See https://github.com/robey/paramiko/issues/34 for details. */ static int reorderable(const struct sqnode *q1, const struct sqnode *q2, unsigned flags) { if((q1->type == SSH_FXP_READ || q1->type == SSH_FXP_WRITE) && (q2->type == SSH_FXP_READ || q2->type == SSH_FXP_WRITE)) { /* We allow reads and writes to be re-ordered up to a point */ if(!handles_equal(&q1->hid, &q2->hid)) { /* Operations on different handles can always be re-ordered. */ return 1; } /* Paramiko's prefetch algorithm assumes that response order matches * request order. As a workaround we avoid re-ordering reads until a fix * is adequately widely deployed ("in Debian stable" seems like a good * measure). */ if(q1->type == SSH_FXP_READ && q2->type == SSH_FXP_READ) return 0; if(flags & (HANDLE_TEXT|HANDLE_APPEND)) /* Operations on text or append-write files cannot be re-oredered. */ return 0; if(q1->type == SSH_FXP_WRITE || q2->type == SSH_FXP_WRITE) if(ranges_overlap(q1, q2)) /* If one of the operations is a write and the ranges overlap then no * re-ordering is allowed. */ return 0; return 1; } else /* Nothing else may be re-ordered with respect to anything */ return 0; }
static void hostmem_client_set_memory(CPUPhysMemoryClient *client, target_phys_addr_t start_addr, ram_addr_t size, ram_addr_t phys_offset) { HostMem *hostmem = container_of(client, HostMem, client); ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; size_t s = offsetof(struct vhost_memory, regions) + (hostmem->mem->nregions + 1) * sizeof hostmem->mem->regions[0]; /* TODO: this is a hack. * At least one vga card (cirrus) changes the gpa to hva * memory maps on data path, which slows us down. * Since we should never need to DMA into VGA memory * anyway, lets just skip these regions. */ if (ranges_overlap(start_addr, size, 0xa0000, 0x10000)) { return; } qemu_mutex_lock(&hostmem->mem_lock); hostmem->mem = qemu_realloc(hostmem->mem, s); assert(size); vhost_mem_unassign_memory(hostmem->mem, start_addr, size); if (flags == IO_MEM_RAM) { /* Add given mapping, merging adjacent regions if any */ vhost_mem_assign_memory(hostmem->mem, start_addr, size, (uintptr_t)qemu_get_ram_ptr(phys_offset)); } qemu_mutex_unlock(&hostmem->mem_lock); }
static int vhost_verify_ring_mappings(struct vhost_dev *dev, uint64_t start_addr, uint64_t size) { int i; for (i = 0; i < dev->nvqs; ++i) { struct vhost_virtqueue *vq = dev->vqs + i; target_phys_addr_t l; void *p; if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) { continue; } l = vq->ring_size; p = cpu_physical_memory_map(vq->ring_phys, &l, 1); if (!p || l != vq->ring_size) { fprintf(stderr, "Unable to map ring buffer for ring %d\n", i); return -ENOMEM; } if (p != vq->ring) { fprintf(stderr, "Ring buffer relocated for ring %d\n", i); return -EBUSY; } cpu_physical_memory_unmap(p, l, 0, 0); } return 0; }
uint64_t pc_dimm_get_free_addr(uint64_t address_space_start, uint64_t address_space_size, uint64_t *hint, uint64_t size, Error **errp) { GSList *list = NULL, *item; uint64_t new_addr, ret = 0; uint64_t address_space_end = address_space_start + address_space_size; if (!address_space_size) { error_setg(errp, "memory hotplug is not enabled, " "please add maxmem option"); goto out; } assert(address_space_end > address_space_start); object_child_foreach(qdev_get_machine(), pc_dimm_built_list, &list); if (hint) { new_addr = *hint; } else { new_addr = address_space_start; } /* find address range that will fit new DIMM */ for (item = list; item; item = g_slist_next(item)) { PCDIMMDevice *dimm = item->data; uint64_t dimm_size = object_property_get_int(OBJECT(dimm), PC_DIMM_SIZE_PROP, errp); if (errp && *errp) { goto out; } if (ranges_overlap(dimm->addr, dimm_size, new_addr, size)) { if (hint) { DeviceState *d = DEVICE(dimm); error_setg(errp, "address range conflicts with '%s'", d->id); goto out; } new_addr = dimm->addr + dimm_size; } } ret = new_addr; if (new_addr < address_space_start) { error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64 "] at 0x%" PRIx64, new_addr, size, address_space_start); } else if ((new_addr + size) > address_space_end) { error_setg(errp, "can't add memory [0x%" PRIx64 ":0x%" PRIx64 "] beyond 0x%" PRIx64, new_addr, size, address_space_end); } out: g_slist_free(list); return ret; }
static gboolean have_partition_in_range (StoragedPartitionTable *table, StoragedObject *object, guint64 start, guint64 end, gboolean ignore_container) { gboolean ret = FALSE; StoragedDaemon *daemon = NULL; GDBusObjectManager *object_manager = NULL; const gchar *table_object_path; GList *objects = NULL, *l; daemon = storaged_linux_block_object_get_daemon (STORAGED_LINUX_BLOCK_OBJECT (object)); object_manager = G_DBUS_OBJECT_MANAGER (storaged_daemon_get_object_manager (daemon)); table_object_path = g_dbus_object_get_object_path (G_DBUS_OBJECT (object)); objects = g_dbus_object_manager_get_objects (object_manager); for (l = objects; l != NULL; l = l->next) { StoragedObject *i_object = STORAGED_OBJECT (l->data); StoragedPartition *i_partition = NULL; i_partition = storaged_object_get_partition (i_object); if (i_partition == NULL) goto cont; if (g_strcmp0 (storaged_partition_get_table (i_partition), table_object_path) != 0) goto cont; if (ignore_container && storaged_partition_get_is_container (i_partition)) goto cont; if (!ranges_overlap (start, end - start, storaged_partition_get_offset (i_partition), storaged_partition_get_size (i_partition))) goto cont; ret = TRUE; g_clear_object (&i_partition); goto out; cont: g_clear_object (&i_partition); } out: g_list_foreach (objects, (GFunc) g_object_unref, NULL); g_list_free (objects); return ret; }
static void mch_write_config(PCIDevice *d, uint32_t address, uint32_t val, int len) { MCHPCIState *mch = MCH_PCI_DEVICE(d); pci_default_write_config(d, address, val, len); if (ranges_overlap(address, len, MCH_HOST_BRIDGE_PAM0, MCH_HOST_BRIDGE_PAM_SIZE)) { mch_update_pam(mch); } if (ranges_overlap(address, len, MCH_HOST_BRIDGE_PCIEXBAR, MCH_HOST_BRIDGE_PCIEXBAR_SIZE)) { mch_update_pciexbar(mch); } if (ranges_overlap(address, len, MCH_HOST_BRIDGE_SMRAM, MCH_HOST_BRIDGE_SMRAM_SIZE)) { mch_update_smram(mch); } }
static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev, uint64_t start_addr, uint64_t size) { int i, n = dev->mem->nregions; for (i = 0; i < n; ++i) { struct vhost_memory_region *reg = dev->mem->regions + i; if (ranges_overlap(reg->guest_phys_addr, reg->memory_size, start_addr, size)) { return reg; } } return NULL; }
/* Check if there is an overlapping region. */ static int fbmem_region_reserved(unsigned long start, size_t size) { struct omapfb_mem_region *rg; int i; rg = &omapfb_config.mem_desc.region[0]; for (i = 0; i < OMAPFB_PLANE_NUM; i++, rg++) { if (!rg->paddr) /* Empty slot. */ continue; if (ranges_overlap(start, size, rg->paddr, rg->size)) return 1; } return 0; }
static int set_fbmem_region_type(struct omapfb_mem_region *rg, int mem_type, unsigned long mem_start, unsigned long mem_size) { /* * Check if the configuration specifies the type explicitly. * type = 0 && paddr = 0, a default don't care case maps to * the SDRAM type. */ if (rg->type || !rg->paddr) return 0; if (ranges_overlap(rg->paddr, rg->size, mem_start, mem_size)) { rg->type = mem_type; return 0; } /* Can't determine it. */ return -1; }
static void shpc_write(PCIDevice *d, unsigned addr, uint64_t val, int l) { SHPCDevice *shpc = d->shpc; int i; if (addr >= SHPC_SIZEOF(d)) { return; } l = MIN(l, SHPC_SIZEOF(d) - addr); /* TODO: code duplicated from pci.c */ for (i = 0; i < l; val >>= 8, ++i) { unsigned a = addr + i; uint8_t wmask = shpc->wmask[a]; uint8_t w1cmask = shpc->w1cmask[a]; assert(!(wmask & w1cmask)); shpc->config[a] = (shpc->config[a] & ~wmask) | (val & wmask); shpc->config[a] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */ } if (ranges_overlap(addr, l, SHPC_CMD_CODE, 2)) { shpc_command(shpc); } shpc_interrupt_update(d); }
/* Assign/unassign. Keep an unsorted array of non-overlapping * memory regions in dev->mem. */ static void vhost_dev_unassign_memory(struct vhost_dev *dev, uint64_t start_addr, uint64_t size) { int from, to, n = dev->mem->nregions; /* Track overlapping/split regions for sanity checking. */ int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0; for (from = 0, to = 0; from < n; ++from, ++to) { struct vhost_memory_region *reg = dev->mem->regions + to; uint64_t reglast; uint64_t memlast; uint64_t change; /* clone old region */ if (to != from) { memcpy(reg, dev->mem->regions + from, sizeof *reg); } /* No overlap is simple */ if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size, start_addr, size)) { continue; } /* Split only happens if supplied region * is in the middle of an existing one. Thus it can not * overlap with any other existing region. */ assert(!split); reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); memlast = range_get_last(start_addr, size); /* Remove whole region */ if (start_addr <= reg->guest_phys_addr && memlast >= reglast) { --dev->mem->nregions; --to; assert(to >= 0); ++overlap_middle; continue; } /* Shrink region */ if (memlast >= reglast) { reg->memory_size = start_addr - reg->guest_phys_addr; assert(reg->memory_size); assert(!overlap_end); ++overlap_end; continue; } /* Shift region */ if (start_addr <= reg->guest_phys_addr) { change = memlast + 1 - reg->guest_phys_addr; reg->memory_size -= change; reg->guest_phys_addr += change; reg->userspace_addr += change; assert(reg->memory_size); assert(!overlap_start); ++overlap_start; continue; } /* This only happens if supplied region * is in the middle of an existing one. Thus it can not * overlap with any other existing region. */ assert(!overlap_start); assert(!overlap_end); assert(!overlap_middle); /* Split region: shrink first part, shift second part. */ memcpy(dev->mem->regions + n, reg, sizeof *reg); reg->memory_size = start_addr - reg->guest_phys_addr; assert(reg->memory_size); change = memlast + 1 - reg->guest_phys_addr; reg = dev->mem->regions + n; reg->memory_size -= change; assert(reg->memory_size); reg->guest_phys_addr += change; reg->userspace_addr += change; /* Never add more than 1 region */ assert(dev->mem->nregions == n); ++dev->mem->nregions; ++split; } }
static void vhost_client_set_memory(CPUPhysMemoryClient *client, target_phys_addr_t start_addr, ram_addr_t size, ram_addr_t phys_offset) { struct vhost_dev *dev = container_of(client, struct vhost_dev, client); ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; int s = offsetof(struct vhost_memory, regions) + (dev->mem->nregions + 1) * sizeof dev->mem->regions[0]; uint64_t log_size; int r; /* TODO: this is a hack. * At least one vga card (cirrus) changes the gpa to hva * memory maps on data path, which slows us down. * Since we should never need to DMA into VGA memory * anyway, lets just skip these regions. */ if (ranges_overlap(start_addr, size, 0xa0000, 0x10000)) { return; } dev->mem = qemu_realloc(dev->mem, s); assert(size); vhost_mem_unassign_memory(dev->mem, start_addr, size); if (flags == IO_MEM_RAM) { /* Add given mapping, merging adjacent regions if any */ vhost_mem_assign_memory(dev->mem, start_addr, size, (uintptr_t)qemu_get_ram_ptr(phys_offset)); } else { /* Remove old mapping for this memory, if any. */ vhost_mem_unassign_memory(dev->mem, start_addr, size); } if (!dev->started) { return; } if (dev->started) { r = vhost_verify_ring_mappings(dev, start_addr, size); assert(r >= 0); } if (!dev->log_enabled) { r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); assert(r >= 0); return; } log_size = vhost_get_log_size(dev); /* We allocate an extra 4K bytes to log, * to reduce the * number of reallocations. */ #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) /* To log more, must increase log size before table update. */ if (dev->log_size < log_size) { vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); } r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); assert(r >= 0); /* To log less, can only decrease log size after table update. */ if (dev->log_size > log_size + VHOST_LOG_BUFFER) { vhost_dev_log_resize(dev, log_size); } }
/* Initialize the MSI-X structures */ int msix_init(struct PCIDevice *dev, unsigned short nentries, MemoryRegion *table_bar, uint8_t table_bar_nr, unsigned table_offset, MemoryRegion *pba_bar, uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos) { int cap; unsigned table_size, pba_size; uint8_t *config; /* Nothing to do if MSI is not supported by interrupt controller */ if (!msi_supported) { return -ENOTSUP; } if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) { return -EINVAL; } table_size = nentries * PCI_MSIX_ENTRY_SIZE; pba_size = QEMU_ALIGN_UP(nentries, 64) / 8; /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */ if ((table_bar_nr == pba_bar_nr && ranges_overlap(table_offset, table_size, pba_offset, pba_size)) || table_offset + table_size > memory_region_size(table_bar) || pba_offset + pba_size > memory_region_size(pba_bar) || (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) { return -EINVAL; } cap = pci_add_capability(dev, PCI_CAP_ID_MSIX, cap_pos, MSIX_CAP_LENGTH); if (cap < 0) { return cap; } dev->msix_cap = cap; dev->cap_present |= QEMU_PCI_CAP_MSIX; config = dev->config + cap; pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1); dev->msix_entries_nr = nentries; dev->msix_function_masked = true; pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr); pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr); /* Make flags bit writable. */ dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK | MSIX_MASKALL_MASK; dev->msix_table = g_malloc0(table_size); dev->msix_pba = g_malloc0(pba_size); dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used); msix_mask_all(dev, nentries); memory_region_init_io(&dev->msix_table_mmio, OBJECT(dev), &msix_table_mmio_ops, dev, "msix-table", table_size); memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio); memory_region_init_io(&dev->msix_pba_mmio, OBJECT(dev), &msix_pba_mmio_ops, dev, "msix-pba", pba_size); memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio); return 0; }