static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova) { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); arm_lpae_iopte pte, *ptep = data->pgd; int lvl = ARM_LPAE_START_LVL(data); do { /* Valid IOPTE pointer? */ if (!ptep) return 0; /* Grab the IOPTE we're interested in */ pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data)); /* Valid entry? */ if (!pte) return 0; /* Leaf entry? */ if (iopte_leaf(pte,lvl)) goto found_translation; /* Take it to the next level */ ptep = iopte_deref(pte, data); } while (++lvl < ARM_LPAE_MAX_LEVELS); /* Ran out of page tables to walk */ return 0; found_translation: iova &= ((1 << data->pg_shift) - 1); return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova; }
static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, arm_lpae_iopte *ptep) { arm_lpae_iopte *start, *end; unsigned long table_size; if (lvl == ARM_LPAE_START_LVL(data)) table_size = data->pgd_size; else table_size = 1UL << data->pg_shift; start = ptep; /* Only leaf entries at the last level */ if (lvl == ARM_LPAE_MAX_LEVELS - 1) end = ptep; else end = (void *)ptep + table_size; while (ptep != end) { arm_lpae_iopte pte = *ptep++; if (!pte || iopte_leaf(pte, lvl)) continue; __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); } __arm_lpae_free_pages(start, table_size, &data->iop.cfg); }
static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, unsigned long iova, size_t size, int lvl, arm_lpae_iopte *ptep) { arm_lpae_iopte pte; struct io_pgtable *iop = &data->iop; size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); /* Something went horribly wrong and we ran out of page table */ if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) return 0; ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); pte = *ptep; if (WARN_ON(!pte)) return 0; /* If the size matches this level, we're in the right place */ if (size == blk_size) { __arm_lpae_set_pte(ptep, 0, &iop->cfg); if (!iopte_leaf(pte, lvl)) { /* Also flush any partial walks */ io_pgtable_tlb_add_flush(iop, iova, size, ARM_LPAE_GRANULE(data), false); io_pgtable_tlb_sync(iop); ptep = iopte_deref(pte, data); __arm_lpae_free_pgtable(data, lvl + 1, ptep); } else { io_pgtable_tlb_add_flush(iop, iova, size, size, true); } return size; } else if (iopte_leaf(pte, lvl)) { /* * Insert a table at the next level to map the old region, * minus the part we want to unmap */ return arm_lpae_split_blk_unmap(data, iova, size, iopte_prot(pte), lvl, ptep, blk_size); } /* Keep on walkin' */ ptep = iopte_deref(pte, data); return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); }
static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, physical_addr_t iova, physical_addr_t paddr, size_t size, arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep) { int rc; physical_addr_t pa; arm_lpae_iopte *cptep, pte; size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); struct io_pgtable_cfg *cfg = &data->iop.cfg; /* Find our entry at the current level */ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); /* If we can install a leaf entry at this level, then do so */ if (size == block_size && (size & cfg->pgsize_bitmap)) return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); /* We can't allocate tables at the final level */ if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) return VMM_EINVALID; /* Grab a pointer to the next level */ pte = *ptep; if (!pte) { cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data), cfg); if (!cptep) return VMM_ENOMEM; rc = vmm_host_va2pa((virtual_addr_t)cptep, &pa); if (rc) return rc; pte = pa | ARM_LPAE_PTE_TYPE_TABLE; if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) pte |= ARM_LPAE_PTE_NSTABLE; __arm_lpae_set_pte(ptep, pte, cfg); } else { cptep = iopte_deref(pte, data); } /* Rinse, repeat */ return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); }
static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, size_t size, arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep) { arm_lpae_iopte *cptep, pte; size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); struct io_pgtable_cfg *cfg = &data->iop.cfg; /* Find our entry at the current level */ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); /* If we can install a leaf entry at this level, then do so */ if (size == block_size && (size & cfg->pgsize_bitmap)) return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); /* We can't allocate tables at the final level */ if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) return -EINVAL; /* Grab a pointer to the next level */ pte = *ptep; if (!pte) { cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data), GFP_ATOMIC, cfg); if (!cptep) return -ENOMEM; pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE; if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) pte |= ARM_LPAE_PTE_NSTABLE; __arm_lpae_set_pte(ptep, pte, cfg); } else if (!iopte_leaf(pte, lvl)) { cptep = iopte_deref(pte, data); } else { /* We require an unmap first */ WARN_ON(!selftest_running); return -EEXIST; } /* Rinse, repeat */ return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); }
static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, unsigned long iova, size_t size, arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep, size_t blk_size) { unsigned long blk_start, blk_end; phys_addr_t blk_paddr; arm_lpae_iopte table = 0; struct io_pgtable_cfg *cfg = &data->iop.cfg; blk_start = iova & ~(blk_size - 1); blk_end = blk_start + blk_size; blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift; for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { arm_lpae_iopte *tablep; /* Unmap! */ if (blk_start == iova) continue; /* __arm_lpae_map expects a pointer to the start of the table */ tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data); if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl, tablep) < 0) { if (table) { /* Free the table we allocated */ tablep = iopte_deref(table, data); __arm_lpae_free_pgtable(data, lvl + 1, tablep); } return 0; /* Bytes unmapped */ } } __arm_lpae_set_pte(ptep, table, cfg); iova &= ~(blk_size - 1); cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie); return size; }