static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu, uint32_t flags) { struct kgsl_iommu *iommu = mmu->priv; int temp; int i; unsigned int pt_base = kgsl_iommu_pt_get_base_addr( mmu->hwpagetable); unsigned int pt_val; if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) { KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n"); return; } pt_base &= (KGSL_IOMMU_TTBR0_PA_MASK << KGSL_IOMMU_TTBR0_PA_SHIFT); if (msm_soc_version_supports_iommu_v1()) kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT); msm_iommu_lock(); if (flags & KGSL_MMUFLAGS_PTUPDATE) { if (!msm_soc_version_supports_iommu_v1()) kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT); for (i = 0; i < iommu->unit_count; i++) { pt_val = kgsl_iommu_get_pt_lsb(mmu, i, KGSL_IOMMU_CONTEXT_USER); pt_val += pt_base; KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val); mb(); temp = KGSL_IOMMU_GET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, TTBR0); } } if (flags & KGSL_MMUFLAGS_TLBFLUSH) { for (i = 0; i < iommu->unit_count; i++) { KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, CTX_TLBIALL, 1); mb(); } } msm_iommu_unlock(); kgsl_iommu_disable_clk_on_ts(mmu, 0, false); }
/* * kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb * of the primary context bank * @mmu - Pointer to mmu structure * @flags - Flags indicating whether pagetable has to chnage or tlb is to be * flushed or both * * Based on flags set the new pagetable fo the IOMMU unit or flush it's tlb or * do both by doing direct register writes to the IOMMu registers through the * cpu * Return - void */ static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu, uint32_t flags) { struct kgsl_iommu *iommu = mmu->priv; int temp; int i; unsigned int pt_base = kgsl_iommu_pt_get_base_addr( mmu->hwpagetable); unsigned int pt_val; if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) { KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n"); return; } /* Mask off the lsb of the pt base address since lsb will not change */ pt_base &= (KGSL_IOMMU_TTBR0_PA_MASK << KGSL_IOMMU_TTBR0_PA_SHIFT); if (flags & KGSL_MMUFLAGS_PTUPDATE) { kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT); for (i = 0; i < iommu->unit_count; i++) { /* get the lsb value which should not change when * changing ttbr0 */ pt_val = kgsl_iommu_get_pt_lsb(mmu, i, KGSL_IOMMU_CONTEXT_USER); pt_val += pt_base; KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val); mb(); temp = KGSL_IOMMU_GET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, TTBR0); /* Set asid */ KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR, kgsl_iommu_get_hwpagetable_asid(mmu)); mb(); temp = KGSL_IOMMU_GET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR); } } /* Flush tlb */ if (flags & KGSL_MMUFLAGS_TLBFLUSH) { for (i = 0; i < iommu->unit_count; i++) { KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, CTX_TLBIASID, kgsl_iommu_get_hwpagetable_asid(mmu)); mb(); } } /* Disable smmu clock */ kgsl_iommu_disable_clk_on_ts(mmu, 0, false); }
/* * kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb * of the primary context bank * @mmu - Pointer to mmu structure * @flags - Flags indicating whether pagetable has to chnage or tlb is to be * flushed or both * * Based on flags set the new pagetable fo the IOMMU unit or flush it's tlb or * do both by doing direct register writes to the IOMMu registers through the * cpu * Return - void */ static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu, uint32_t flags) { struct kgsl_iommu *iommu = mmu->priv; int temp; int i; unsigned int pt_base = kgsl_iommu_pt_get_base_addr( mmu->hwpagetable); unsigned int pt_val; if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) { KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n"); return; } /* Mask off the lsb of the pt base address since lsb will not change */ pt_base &= (KGSL_IOMMU_TTBR0_PA_MASK << KGSL_IOMMU_TTBR0_PA_SHIFT); /* For v1 SMMU GPU needs to be idle for tlb invalidate as well */ if (msm_soc_version_supports_iommu_v1()) kgsl_idle(mmu->device); /* Acquire GPU-CPU sync Lock here */ msm_iommu_lock(); if (flags & KGSL_MMUFLAGS_PTUPDATE) { if (!msm_soc_version_supports_iommu_v1()) kgsl_idle(mmu->device); for (i = 0; i < iommu->unit_count; i++) { /* get the lsb value which should not change when * changing ttbr0 */ pt_val = kgsl_iommu_get_pt_lsb(mmu, i, KGSL_IOMMU_CONTEXT_USER); pt_val += pt_base; KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val); mb(); temp = KGSL_IOMMU_GET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, TTBR0); } } /* Flush tlb */ if (flags & KGSL_MMUFLAGS_TLBFLUSH) { for (i = 0; i < iommu->unit_count; i++) { KGSL_IOMMU_SET_IOMMU_REG( iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, CTX_TLBIALL, 1); mb(); } } /* Release GPU-CPU sync Lock here */ msm_iommu_unlock(); /* Disable smmu clock */ kgsl_iommu_disable_clk_on_ts(mmu, 0, false); }
static int kgsl_iommu_start(struct kgsl_mmu *mmu) { int status; struct kgsl_iommu *iommu = mmu->priv; int i, j; if (mmu->flags & KGSL_FLAGS_STARTED) return 0; if (mmu->defaultpagetable == NULL) { status = kgsl_iommu_setup_defaultpagetable(mmu); if (status) return -ENOMEM; } /* We use the GPU MMU to control access to IOMMU registers on 8960 with * a225, hence we still keep the MMU active on 8960 */ if (cpu_is_msm8960()) { struct kgsl_mh *mh = &(mmu->device->mh); kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000001); kgsl_regwrite(mmu->device, MH_MMU_MPU_END, mh->mpu_base + iommu->iommu_units [iommu->unit_count - 1].reg_map.gpuaddr - PAGE_SIZE); } else { kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000); } mmu->hwpagetable = mmu->defaultpagetable; status = kgsl_attach_pagetable_iommu_domain(mmu); if (status) { mmu->hwpagetable = NULL; goto done; } status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER); if (status) { KGSL_CORE_ERR("clk enable failed\n"); goto done; } status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV); if (status) { KGSL_CORE_ERR("clk enable failed\n"); goto done; } /* Get the lsb value of pagetables set in the IOMMU ttbr0 register as * that value should not change when we change pagetables, so while * changing pagetables we can use this lsb value of the pagetable w/o * having to read it again */ for (i = 0; i < iommu->unit_count; i++) { struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i]; /* Make sure that the ASID of the priv bank is set to 1. * When we a different pagetable for the priv bank then the * iommu driver sets the ASID to 0 instead of 1 */ KGSL_IOMMU_SET_IOMMU_REG(iommu->iommu_units[i].reg_map.hostptr, KGSL_IOMMU_CONTEXT_PRIV, CONTEXTIDR, 1); for (j = 0; j < iommu_unit->dev_count; j++) iommu_unit->dev[j].pt_lsb = KGSL_IOMMMU_PT_LSB( KGSL_IOMMU_GET_IOMMU_REG( iommu_unit->reg_map.hostptr, iommu_unit->dev[j].ctx_id, TTBR0)); } iommu->asid = KGSL_IOMMU_GET_IOMMU_REG( iommu->iommu_units[0].reg_map.hostptr, KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR); kgsl_iommu_disable_clk_on_ts(mmu, 0, false); mmu->flags |= KGSL_FLAGS_STARTED; done: if (status) { kgsl_iommu_disable_clk_on_ts(mmu, 0, false); kgsl_detach_pagetable_iommu_domain(mmu); } return status; }