static void __init iommu_vio_init(void) { iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table); veth_iommu_table.it_size /= 2; vio_iommu_table = veth_iommu_table; vio_iommu_table.it_offset += veth_iommu_table.it_size; if (!iommu_init_table(&veth_iommu_table, -1)) printk("Virtual Bus VETH TCE table failed.\n"); if (!iommu_init_table(&vio_iommu_table, -1)) printk("Virtual Bus VIO TCE table failed.\n"); }
static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) { const unsigned char *dma_window; struct iommu_table *tbl; unsigned long offset, size; if (firmware_has_feature(FW_FEATURE_ISERIES)) return vio_build_iommu_table_iseries(dev); dma_window = of_get_property(dev->dev.archdata.of_node, "ibm,my-dma-window", NULL); if (!dma_window) return NULL; tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); of_parse_dma_window(dev->dev.archdata.of_node, dma_window, &tbl->it_index, &offset, &size); /* TCE table size - measured in tce entries */ tbl->it_size = size >> IOMMU_PAGE_SHIFT; /* offset for VIO should always be 0 */ tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; tbl->it_busno = 0; tbl->it_type = TCE_VB; return iommu_init_table(tbl, -1); }
static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) { #ifdef CONFIG_PPC_ISERIES if (firmware_has_feature(FW_FEATURE_ISERIES)) { if (strcmp(dev->type, "network") == 0) return &veth_iommu_table; return &vio_iommu_table; } else #endif { const unsigned char *dma_window; struct iommu_table *tbl; unsigned long offset, size; dma_window = get_property(dev->dev.platform_data, "ibm,my-dma-window", NULL); if (!dma_window) return NULL; tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); of_parse_dma_window(dev->dev.platform_data, dma_window, &tbl->it_index, &offset, &size); /* TCE table size - measured in tce entries */ tbl->it_size = size >> IOMMU_PAGE_SHIFT; /* offset for VIO should always be 0 */ tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; tbl->it_busno = 0; tbl->it_type = TCE_VB; return iommu_init_table(tbl, -1); } }
static void pnv_pci_p5ioc2_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev) { if (phb->p5ioc2.iommu_table.it_map == NULL) { iommu_init_table(&phb->p5ioc2.iommu_table, phb->hose->node); iommu_register_group(&phb->p5ioc2.iommu_table, pci_domain_nr(phb->hose->bus), phb->opal_id); } set_iommu_table_base_and_group(&pdev->dev, &phb->p5ioc2.iommu_table); }
void iommu_devnode_init(struct device_node *dn) { struct iommu_table *tbl; tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table), GFP_KERNEL); if (systemcfg->platform == PLATFORM_PSERIES_LPAR) iommu_table_setparms_lpar(dn->phb, dn, tbl); else iommu_table_setparms(dn->phb, dn, tbl); dn->iommu_table = iommu_init_table(tbl); }
static void pnv_pci_p5ioc2_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev) { struct iommu_table *tbl = phb->p5ioc2.table_group.tables[0]; if (!tbl->it_map) { tbl->it_ops = &pnv_p5ioc2_iommu_ops; iommu_init_table(tbl, phb->hose->node); iommu_register_group(&phb->p5ioc2.table_group, pci_domain_nr(phb->hose->bus), phb->opal_id); INIT_LIST_HEAD_RCU(&tbl->it_group_list); pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &phb->p5ioc2.table_group); } set_iommu_table_base(&pdev->dev, tbl); iommu_add_device(&pdev->dev); }
return; } /* If we have ISA, then we probably have an IDE * controller too. Allocate a 128MB table but * skip the first 128MB to avoid stepping on ISA * space. */ pci->phb->dma_window_size = 0x8000000ul; pci->phb->dma_window_base_cur = 0x8000000ul; tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, pci->phb->node); iommu_table_setparms(pci->phb, dn, tbl); pci->iommu_table = iommu_init_table(tbl, pci->phb->node); /* Divide the rest (1.75GB) among the children */ pci->phb->dma_window_size = 0x80000000ul; while (pci->phb->dma_window_size * children > 0x70000000ul) pci->phb->dma_window_size >>= 1; pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size); } static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) { struct iommu_table *tbl; struct device_node *dn, *pdn; struct pci_dn *ppci;
return; } /* If we have ISA, then we probably have an IDE * controller too. Allocate a 128MB table but * skip the first 128MB to avoid stepping on ISA * space. */ pci->phb->dma_window_size = 0x8000000ul; pci->phb->dma_window_base_cur = 0x8000000ul; tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); iommu_table_setparms(pci->phb, dn, tbl); pci->iommu_table = iommu_init_table(tbl); /* Divide the rest (1.75GB) among the children */ pci->phb->dma_window_size = 0x80000000ul; while (pci->phb->dma_window_size * children > 0x70000000ul) pci->phb->dma_window_size >>= 1; DBG("ISA/IDE, window size is 0x%lx\n", pci->phb->dma_window_size); } static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus) { struct iommu_table *tbl; struct device_node *dn, *pdn;