static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova, size_t size) { struct exynos_iommu_owner *owner = dev->archdata.iommu; unsigned long flags; struct sysmmu_drvdata *data; data = dev_get_drvdata(owner->sysmmu); spin_lock_irqsave(&data->lock, flags); if (is_sysmmu_active(data)) { unsigned int num_inv = 1; if (!IS_ERR(data->clk_master)) clk_enable(data->clk_master); /* * L2TLB invalidation required * 4KB page: 1 invalidation * 64KB page: 16 invalidation * 1MB page: 64 invalidation * because it is set-associative TLB * with 8-way and 64 sets. * 1MB page can be cached in one of all sets. * 64KB page can be one of 16 consecutive sets. */ if (MMU_MAJ_VER(__raw_sysmmu_version(data)) == 2) num_inv = min_t(unsigned int, size / PAGE_SIZE, 64); if (sysmmu_block(data->sfrbase)) { __sysmmu_tlb_invalidate_entry( data->sfrbase, iova, num_inv); sysmmu_unblock(data->sfrbase); } if (!IS_ERR(data->clk_master)) clk_disable(data->clk_master); } else {
void exynos_sysmmu_set_pbuf(struct device *dev, int nbufs, struct sysmmu_prefbuf prefbuf[]) { struct device *sysmmu; int nsfrs; if (WARN_ON(nbufs < 1)) return; for_each_sysmmu(dev, sysmmu) { unsigned long flags; struct sysmmu_drvdata *drvdata; drvdata = dev_get_drvdata(sysmmu); spin_lock_irqsave(&drvdata->lock, flags); if (!is_sysmmu_active(drvdata)) { spin_unlock_irqrestore(&drvdata->lock, flags); continue; } for (nsfrs = 0; nsfrs < drvdata->nsfrs; nsfrs++) { int min; if (!has_sysmmu_capable_pbuf( drvdata, nsfrs, prefbuf, &min)) continue; if (sysmmu_block(drvdata->sfrbases[nsfrs])) { func_set_pbuf[min](drvdata, nsfrs, nbufs, prefbuf); sysmmu_unblock(drvdata->sfrbases[nsfrs]); } } /* while (nsfrs < drvdata->nsfrs) */ spin_unlock_irqrestore(&drvdata->lock, flags); }