static int __init scan_caps_for_iommu(int bus, int dev, int func, iommu_detect_callback_ptr_t iommu_detect_callback) { int cap_ptr, cap_id, cap_type; u32 cap_header; int count, error = 0; count = 0; cap_ptr = read_pci_config_byte(bus, dev, func, PCI_CAPABILITY_LIST); while ( cap_ptr >= PCI_MIN_CAP_OFFSET && count < PCI_MAX_CAP_BLOCKS && !error ) { cap_ptr &= PCI_CAP_PTR_MASK; cap_header = read_pci_config(bus, dev, func, cap_ptr); cap_id = get_field_from_reg_u32(cap_header, PCI_CAP_ID_MASK, PCI_CAP_ID_SHIFT); if ( cap_id == PCI_CAP_ID_SECURE_DEVICE ) { cap_type = get_field_from_reg_u32(cap_header, PCI_CAP_TYPE_MASK, PCI_CAP_TYPE_SHIFT); if ( cap_type == PCI_CAP_TYPE_IOMMU ) { error = iommu_detect_callback( bus, dev, func, cap_ptr); } } cap_ptr = get_field_from_reg_u32(cap_header, PCI_CAP_NEXT_PTR_MASK, PCI_CAP_NEXT_PTR_SHIFT); ++count; } return error; }
int amd_iommu_is_dte_page_translation_valid(u32 *entry) { return (get_field_from_reg_u32(entry[0], IOMMU_DEV_TABLE_VALID_MASK, IOMMU_DEV_TABLE_VALID_SHIFT) && get_field_from_reg_u32(entry[0], IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK, IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT)); }
static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[]) { u32 tail, head, *cmd_buffer; int i; tail = iommu->cmd_buffer_tail; if ( ++tail == iommu->cmd_buffer.entries ) tail = 0; head = get_field_from_reg_u32( readl(iommu->mmio_base+IOMMU_CMD_BUFFER_HEAD_OFFSET), IOMMU_CMD_BUFFER_HEAD_MASK, IOMMU_CMD_BUFFER_HEAD_SHIFT); if ( head != tail ) { cmd_buffer = (u32 *)(iommu->cmd_buffer.buffer + (iommu->cmd_buffer_tail * IOMMU_CMD_BUFFER_ENTRY_SIZE)); for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; i++ ) cmd_buffer[i] = cmd[i]; iommu->cmd_buffer_tail = tail; return 1; } return 0; }
u64 amd_iommu_get_next_table_from_pte(u32 *entry) { u64 addr_lo, addr_hi, ptr; addr_lo = get_field_from_reg_u32( entry[0], IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK, IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT); addr_hi = get_field_from_reg_u32( entry[1], IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK, IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT); ptr = (addr_hi << 32) | (addr_lo << PAGE_SHIFT); return ptr; }
static int __init get_iommu_capabilities( u16 seg, u8 bus, u8 dev, u8 func, u16 cap_ptr, struct amd_iommu *iommu) { u8 type; iommu->cap.header = pci_conf_read32(seg, bus, dev, func, cap_ptr); type = get_field_from_reg_u32(iommu->cap.header, PCI_CAP_TYPE_MASK, PCI_CAP_TYPE_SHIFT); if ( type != PCI_CAP_TYPE_IOMMU ) return -ENODEV; return 0; }
int __init get_iommu_capabilities(u8 bus, u8 dev, u8 func, u8 cap_ptr, struct amd_iommu *iommu) { u32 cap_header, cap_range; u64 mmio_bar; /* remove it when BIOS available */ write_pci_config(bus, dev, func, cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET, 0x00000000); write_pci_config(bus, dev, func, cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET, 0x40000001); /* remove it when BIOS available */ mmio_bar = (u64)read_pci_config(bus, dev, func, cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32; mmio_bar |= read_pci_config(bus, dev, func, cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET) & PCI_CAP_MMIO_BAR_LOW_MASK; iommu->mmio_base_phys = (unsigned long)mmio_bar; if ( (mmio_bar == 0) || ( (mmio_bar & 0x3FFF) != 0 ) ) { dprintk(XENLOG_ERR , "AMD IOMMU: Invalid MMIO_BAR = 0x%"PRIx64"\n", mmio_bar); return -ENODEV; } cap_header = read_pci_config(bus, dev, func, cap_ptr); iommu->revision = get_field_from_reg_u32(cap_header, PCI_CAP_REV_MASK, PCI_CAP_REV_SHIFT); iommu->iotlb_support = get_field_from_reg_u32(cap_header, PCI_CAP_IOTLB_MASK, PCI_CAP_IOTLB_SHIFT); iommu->ht_tunnel_support = get_field_from_reg_u32(cap_header, PCI_CAP_HT_TUNNEL_MASK, PCI_CAP_HT_TUNNEL_SHIFT); iommu->not_present_cached = get_field_from_reg_u32(cap_header, PCI_CAP_NP_CACHE_MASK, PCI_CAP_NP_CACHE_SHIFT); cap_range = read_pci_config(bus, dev, func, cap_ptr + PCI_CAP_RANGE_OFFSET); iommu->root_bus = get_field_from_reg_u32(cap_range, PCI_CAP_BUS_NUMBER_MASK, PCI_CAP_BUS_NUMBER_SHIFT); iommu->first_devfn = get_field_from_reg_u32(cap_range, PCI_CAP_FIRST_DEVICE_MASK, PCI_CAP_FIRST_DEVICE_SHIFT); iommu->last_devfn = get_field_from_reg_u32(cap_range, PCI_CAP_LAST_DEVICE_MASK, PCI_CAP_LAST_DEVICE_SHIFT); return 0; }
void flush_command_buffer(struct amd_iommu *iommu) { u32 cmd[4], status; int loop_count, comp_wait; /* clear 'ComWaitInt' in status register (WIC) */ set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0, IOMMU_STATUS_COMP_WAIT_INT_MASK, IOMMU_STATUS_COMP_WAIT_INT_SHIFT, &status); writel(status, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); /* send an empty COMPLETION_WAIT command to flush command buffer */ cmd[3] = cmd[2] = 0; set_field_in_reg_u32(IOMMU_CMD_COMPLETION_WAIT, 0, IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, &cmd[1]); set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0, IOMMU_COMP_WAIT_I_FLAG_MASK, IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]); send_iommu_command(iommu, cmd); /* wait for 'ComWaitInt' to signal comp#endifletion? */ if ( amd_iommu_poll_comp_wait ) { loop_count = amd_iommu_poll_comp_wait; do { status = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); comp_wait = get_field_from_reg_u32( status, IOMMU_STATUS_COMP_WAIT_INT_MASK, IOMMU_STATUS_COMP_WAIT_INT_SHIFT); --loop_count; } while ( loop_count && !comp_wait ); if ( comp_wait ) { /* clear 'ComWaitInt' in status register (WIC) */ status &= IOMMU_STATUS_COMP_WAIT_INT_MASK; writel(status, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); } else { amd_iov_warning("Warning: ComWaitInt bit did not assert!\n"); } } }
static int amd_iommu_is_pte_present(u32 *entry) { return (get_field_from_reg_u32(entry[0], IOMMU_PDE_PRESENT_MASK, IOMMU_PDE_PRESENT_SHIFT)); }