static unsigned int __sysmmu_version(struct sysmmu_drvdata *drvdata, int idx, unsigned int *minor) { unsigned int major; major = readl(drvdata->sfrbases[idx] + REG_MMU_VERSION); if ((MMU_MAJ_VER(major) == 0) || (MMU_MAJ_VER(major) > 3)) { /* register MMU_VERSION is used for special purpose */ if (drvdata->ver.major == 0) { /* min ver. is not important for System MMU 1 and 2 */ major = 1; } else { if (minor) *minor = drvdata->ver.minor; major = drvdata->ver.major; } return major; } if (minor) *minor = MMU_MIN_VER(major); major = MMU_MAJ_VER(major); return major; }
static void __sysmmu_init_config(struct sysmmu_drvdata *data) { unsigned int cfg = CFG_LRU | CFG_QOS(15); unsigned int ver; ver = __raw_sysmmu_version(data); if (MMU_MAJ_VER(ver) == 3) { if (MMU_MIN_VER(ver) >= 2) { cfg |= CFG_FLPDCACHE; if (MMU_MIN_VER(ver) == 3) { cfg |= CFG_ACGEN; cfg &= ~CFG_LRU; } else { cfg |= CFG_SYSSEL; } } } __raw_writel(cfg, data->sfrbase + REG_MMU_CFG); }
static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova, size_t size) { struct exynos_iommu_owner *owner = dev->archdata.iommu; unsigned long flags; struct sysmmu_drvdata *data; data = dev_get_drvdata(owner->sysmmu); spin_lock_irqsave(&data->lock, flags); if (is_sysmmu_active(data)) { unsigned int num_inv = 1; if (!IS_ERR(data->clk_master)) clk_enable(data->clk_master); /* * L2TLB invalidation required * 4KB page: 1 invalidation * 64KB page: 16 invalidation * 1MB page: 64 invalidation * because it is set-associative TLB * with 8-way and 64 sets. * 1MB page can be cached in one of all sets. * 64KB page can be one of 16 consecutive sets. */ if (MMU_MAJ_VER(__raw_sysmmu_version(data)) == 2) num_inv = min_t(unsigned int, size / PAGE_SIZE, 64); if (sysmmu_block(data->sfrbase)) { __sysmmu_tlb_invalidate_entry( data->sfrbase, iova, num_inv); sysmmu_unblock(data->sfrbase); } if (!IS_ERR(data->clk_master)) clk_disable(data->clk_master); } else {