/* * kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb * of the primary context bank * @mmu - Pointer to mmu structure * @flags - Flags indicating whether pagetable has to chnage or tlb is to be * flushed or both * * Based on flags set the new pagetable fo the IOMMU unit or flush it's tlb or * do both by doing direct register writes to the IOMMu registers through the * cpu * Return - void */ static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu, uint32_t flags) { struct kgsl_iommu *iommu = mmu->priv; int temp; int i; unsigned int pt_base = kgsl_iommu_get_pt_base_addr(mmu, mmu->hwpagetable); unsigned int pt_val; if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) { KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n"); return; } /* Mask off the lsb of the pt base address since lsb will not change */ pt_base &= (iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask << iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift); /* For v1 SMMU GPU needs to be idle for tlb invalidate as well */ if (msm_soc_version_supports_iommu_v1()) kgsl_idle(mmu->device); /* Acquire GPU-CPU sync Lock here */ msm_iommu_lock(); if (flags & KGSL_MMUFLAGS_PTUPDATE) { if (!msm_soc_version_supports_iommu_v1()) kgsl_idle(mmu->device); for (i = 0; i < iommu->unit_count; i++) { /* get the lsb value which should not change when * changing ttbr0 */ pt_val = kgsl_iommu_get_pt_lsb(mmu, i, KGSL_IOMMU_CONTEXT_USER); pt_val += pt_base; KGSL_IOMMU_SET_CTX_REG(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val); mb(); temp = KGSL_IOMMU_GET_CTX_REG(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TTBR0); } } /* Flush tlb */ if (flags & KGSL_MMUFLAGS_TLBFLUSH) { for (i = 0; i < iommu->unit_count; i++) { KGSL_IOMMU_SET_CTX_REG(iommu, (&iommu->iommu_units[i]), KGSL_IOMMU_CONTEXT_USER, TLBIALL, 1); mb(); } } /* Release GPU-CPU sync Lock here */ msm_iommu_unlock(); /* Disable smmu clock */ kgsl_iommu_disable_clk_on_ts(mmu, 0, false); }
static int kgsl_iommu_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long addr, int flags) { int ret = 0; struct kgsl_mmu *mmu; struct kgsl_iommu *iommu; struct kgsl_iommu_unit *iommu_unit; struct kgsl_iommu_device *iommu_dev; unsigned int ptbase, fsr; ret = get_iommu_unit(dev, &mmu, &iommu_unit); if (ret) goto done; iommu_dev = get_iommu_device(iommu_unit, dev); if (!iommu_dev) { KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev); ret = -ENOSYS; goto done; } iommu = mmu->priv; ptbase = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_dev->ctx_id, TTBR0); fsr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_dev->ctx_id, FSR); KGSL_MEM_CRIT(iommu_dev->kgsldev, "GPU PAGE FAULT: addr = %lX pid = %d\n", addr, kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase)); KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n", iommu_dev->ctx_id, fsr); trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr, kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase), 0); done: return ret; }
static unsigned int kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu) { unsigned int pt_base; struct kgsl_iommu *iommu = mmu->priv; /* We cannot enable or disable the clocks in interrupt context, this function is called from interrupt context if there is an axi error */ if (in_interrupt()) return 0; /* Return the current pt base by reading IOMMU pt_base register */ kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER); pt_base = KGSL_IOMMU_GET_CTX_REG(iommu, (&iommu->iommu_units[0]), KGSL_IOMMU_CONTEXT_USER, TTBR0); kgsl_iommu_disable_clk_on_ts(mmu, 0, false); return pt_base & (iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask << iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift); }
static int kgsl_iommu_start(struct kgsl_mmu *mmu) { struct kgsl_device *device = mmu->device; int status; struct kgsl_iommu *iommu = mmu->priv; int i, j; if (mmu->flags & KGSL_FLAGS_STARTED) return 0; if (mmu->defaultpagetable == NULL) { status = kgsl_iommu_setup_defaultpagetable(mmu); if (status) return -ENOMEM; /* Initialize the sync lock between GPU and CPU */ if (msm_soc_version_supports_iommu_v1() && (device->id == KGSL_DEVICE_3D0)) kgsl_iommu_init_sync_lock(mmu); } /* We use the GPU MMU to control access to IOMMU registers on 8960 with * a225, hence we still keep the MMU active on 8960 */ if (cpu_is_msm8960()) { struct kgsl_mh *mh = &(mmu->device->mh); kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000001); kgsl_regwrite(mmu->device, MH_MMU_MPU_END, mh->mpu_base + iommu->iommu_units[0].reg_map.gpuaddr); } else { kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000); } mmu->hwpagetable = mmu->defaultpagetable; status = kgsl_attach_pagetable_iommu_domain(mmu); if (status) { mmu->hwpagetable = NULL; goto done; } status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER); if (status) { KGSL_CORE_ERR("clk enable failed\n"); goto done; } status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV); if (status) { KGSL_CORE_ERR("clk enable failed\n"); goto done; } /* Get the lsb value of pagetables set in the IOMMU ttbr0 register as * that value should not change when we change pagetables, so while * changing pagetables we can use this lsb value of the pagetable w/o * having to read it again */ for (i = 0; i < iommu->unit_count; i++) { struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i]; for (j = 0; j < iommu_unit->dev_count; j++) iommu_unit->dev[j].pt_lsb = KGSL_IOMMMU_PT_LSB(iommu, KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_unit->dev[j].ctx_id, TTBR0)); } kgsl_iommu_disable_clk_on_ts(mmu, 0, false); mmu->flags |= KGSL_FLAGS_STARTED; done: if (status) { kgsl_iommu_disable_clk_on_ts(mmu, 0, false); kgsl_detach_pagetable_iommu_domain(mmu); } return status; }
static int kgsl_iommu_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long addr, int flags) { int ret = 0; struct kgsl_mmu *mmu; struct kgsl_iommu *iommu; struct kgsl_iommu_unit *iommu_unit; struct kgsl_iommu_device *iommu_dev; unsigned int ptbase, fsr; struct kgsl_device *device; struct adreno_device *adreno_dev; unsigned int no_page_fault_log = 0; unsigned int curr_context_id = 0; unsigned int curr_global_ts = 0; static struct adreno_context *curr_context; static struct kgsl_context *context; ret = get_iommu_unit(dev, &mmu, &iommu_unit); if (ret) goto done; iommu_dev = get_iommu_device(iommu_unit, dev); if (!iommu_dev) { KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev); ret = -ENOSYS; goto done; } iommu = mmu->priv; device = mmu->device; adreno_dev = ADRENO_DEVICE(device); ptbase = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_dev->ctx_id, TTBR0); fsr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, iommu_dev->ctx_id, FSR); if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE) no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr); if (!no_page_fault_log) { KGSL_MEM_CRIT(iommu_dev->kgsldev, "GPU PAGE FAULT: addr = %lX pid = %d\n", addr, kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase)); KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n", iommu_dev->ctx_id, fsr); } mmu->fault = 1; iommu_dev->fault = 1; kgsl_sharedmem_readl(&device->memstore, &curr_context_id, KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context)); context = idr_find(&device->context_idr, curr_context_id); if (context != NULL) curr_context = context->devctxt; kgsl_sharedmem_readl(&device->memstore, &curr_global_ts, KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, eoptimestamp)); /* * Store pagefault's timestamp and ib1 addr in context, * this information is used in GFT */ curr_context->pagefault = 1; curr_context->pagefault_ts = curr_global_ts; trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr, kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase), 0); /* * We do not want the h/w to resume fetching data from an iommu unit * that has faulted, this is better for debugging as it will stall * the GPU and trigger a snapshot. To stall the transaction return * EBUSY error. */ if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) ret = -EBUSY; done: return ret; }