/* * kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb * of the primary context bank * @mmu - Pointer to mmu structure * @flags - Flags indicating whether pagetable has to chnage or tlb is to be * flushed or both * * Based on flags set the new pagetable fo the IOMMU unit or flush it's tlb or * do both by doing direct register writes to the IOMMu registers through the * cpu * Return - void */ static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu, uint32_t flags) { struct kgsl_iommu *iommu = mmu->priv; int temp; int i; unsigned int pt_base = kgsl_iommu_get_pt_base_addr(mmu, mmu->hwpagetable); unsigned int pt_val; if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) { KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n"); return; } /* Mask off the lsb of the pt base address since lsb will not change */ pt_base &= (iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask << iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift); /* For v1 SMMU GPU needs to be idle for tlb invalidate as well */ if (msm_soc_version_supports_iommu_v1()) kgsl_idle(mmu->device); /* Acquire GPU-CPU sync Lock here */ msm_iommu_lock(); if (flags & KGSL_MMUFLAGS_PTUPDATE) { if (!msm_soc_version_supports_iommu_v1()) kgsl_idle(mmu->device); for (i = 0; i < iommu->unit_count; i++) { /* get the lsb value which should not change when * changing ttbr0 */ pt_val = kgsl_iommu_get_pt_lsb(mmu, i, KGSL_IOMMU_CONTEXT_USER); pt_val += pt_base; KGSL_IOMMU_SET_CTX_REG(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val); mb(); temp = KGSL_IOMMU_GET_CTX_REG(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TTBR0); } } /* Flush tlb */ if (flags & KGSL_MMUFLAGS_TLBFLUSH) { for (i = 0; i < iommu->unit_count; i++) { KGSL_IOMMU_SET_CTX_REG(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TLBIALL, 1); mb(); } } /* Release GPU-CPU sync Lock here */ msm_iommu_unlock(); /* Disable smmu clock */ kgsl_iommu_disable_clk_on_ts(mmu, 0, false); }
static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu, uint32_t flags) { struct kgsl_iommu *iommu = mmu->priv; int temp; int i; unsigned int pt_base = kgsl_iommu_pt_get_base_addr( mmu->hwpagetable); unsigned int pt_val; if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) { KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n"); return; } pt_base &= (KGSL_IOMMU_TTBR0_PA_MASK << KGSL_IOMMU_TTBR0_PA_SHIFT); if (msm_soc_version_supports_iommu_v1()) kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT); msm_iommu_lock(); if (flags & KGSL_MMUFLAGS_PTUPDATE) { if (!msm_soc_version_supports_iommu_v1()) kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT); for (i = 0; i < iommu->unit_count; i++) { pt_val = kgsl_iommu_get_pt_lsb(mmu, i, KGSL_IOMMU_CONTEXT_USER); pt_val += pt_base; KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val); mb(); temp = KGSL_IOMMU_GET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, TTBR0); } } if (flags & KGSL_MMUFLAGS_TLBFLUSH) { for (i = 0; i < iommu->unit_count; i++) { KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, CTX_TLBIALL, 1); mb(); } } msm_iommu_unlock(); kgsl_iommu_disable_clk_on_ts(mmu, 0, false); }
/* * kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb * of the primary context bank * @mmu - Pointer to mmu structure * @flags - Flags indicating whether pagetable has to chnage or tlb is to be * flushed or both * * Based on flags set the new pagetable fo the IOMMU unit or flush it's tlb or * do both by doing direct register writes to the IOMMu registers through the * cpu * Return - void */ static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu, uint32_t flags) { struct kgsl_iommu *iommu = mmu->priv; int temp; int i; unsigned int pt_base = kgsl_iommu_pt_get_base_addr( mmu->hwpagetable); unsigned int pt_val; if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) { KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n"); return; } /* Mask off the lsb of the pt base address since lsb will not change */ pt_base &= (KGSL_IOMMU_TTBR0_PA_MASK << KGSL_IOMMU_TTBR0_PA_SHIFT); if (flags & KGSL_MMUFLAGS_PTUPDATE) { kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT); for (i = 0; i < iommu->unit_count; i++) { /* get the lsb value which should not change when * changing ttbr0 */ pt_val = kgsl_iommu_get_pt_lsb(mmu, i, KGSL_IOMMU_CONTEXT_USER); pt_val += pt_base; KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val); mb(); temp = KGSL_IOMMU_GET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, TTBR0); /* Set asid */ KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR, kgsl_iommu_get_hwpagetable_asid(mmu)); mb(); temp = KGSL_IOMMU_GET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR); } } /* Flush tlb */ if (flags & KGSL_MMUFLAGS_TLBFLUSH) { for (i = 0; i < iommu->unit_count; i++) { KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, CTX_TLBIASID, kgsl_iommu_get_hwpagetable_asid(mmu)); mb(); } } /* Disable smmu clock */ kgsl_iommu_disable_clk_on_ts(mmu, 0, false); }
static void kgsl_iommu_setstate(struct kgsl_mmu *mmu, struct kgsl_pagetable *pagetable) { if (mmu->flags & KGSL_FLAGS_STARTED) { /* page table not current, then setup mmu to use new * specified page table */ if (mmu->hwpagetable != pagetable) { kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT); kgsl_detach_pagetable_iommu_domain(mmu); mmu->hwpagetable = pagetable; if (mmu->hwpagetable) kgsl_attach_pagetable_iommu_domain(mmu); } } }