static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) { /* SYSMMU is in blocked when interrupt occurred. */ struct sysmmu_drvdata *data = dev_id; enum exynos_sysmmu_inttype itype; sysmmu_iova_t addr = -1; int ret = -ENOSYS; WARN_ON(!is_sysmmu_active(data)); spin_lock(&data->lock); if (!IS_ERR(data->clk_master)) clk_enable(data->clk_master); itype = (enum exynos_sysmmu_inttype) __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS)); if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN)))) itype = SYSMMU_FAULT_UNKNOWN; else addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]); if (itype == SYSMMU_FAULT_UNKNOWN) { pr_err("%s: Fault is not occurred by System MMU '%s'!\n", __func__, dev_name(data->sysmmu)); pr_err("%s: Please check if IRQ is correctly configured.\n", __func__); BUG(); } else { unsigned int base = __raw_readl(data->sfrbase + REG_PT_BASE_ADDR); show_fault_information(dev_name(data->sysmmu), itype, base, addr); if (data->domain) ret = report_iommu_fault(data->domain, data->master, addr, itype); } /* fault is not recovered by fault handler */ BUG_ON(ret != 0); __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR); sysmmu_unblock(data->sfrbase); if (!IS_ERR(data->clk_master)) clk_disable(data->clk_master); spin_unlock(&data->lock); return IRQ_HANDLED; }
/* SMMU ISR, handler that SMMU reports fault to */ static irqreturn_t ivp_smmu_isr(int irq, void *dev_id) { struct ivp_smmu_dev *smmu_dev = (struct ivp_smmu_dev *)dev_id; u32 gfsr, gfsynr0, gfsynr1, gfsynr2; u32 fsr, far, fsynr; unsigned long iova; int flags = 0; if(!dsm_client_ocuppy(client_ivp)) { dsm_client_record(client_ivp, "ivp\n"); dsm_client_notify(client_ivp, DSM_IVP_SMMU_ERROR_NO); pr_info("[I/DSM] %s dsm_client_ivp_smmu", client_ivp->client_name); } spin_lock(&smmu_dev->spinlock); /* global fault, It seems not supported yet */ gfsr = readl(smmu_dev->reg_base + SMMU_NS_GFSR); if (gfsr) { gfsynr0 = readl(smmu_dev->reg_base + SMMU_NS_GFSYNR0); gfsynr1 = readl(smmu_dev->reg_base + SMMU_NS_GFSYNR1); gfsynr2 = readl(smmu_dev->reg_base + SMMU_NS_GFSYNR2); pr_err("Unexpected global fault, this could be serious\n"); pr_err("\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", gfsr, gfsynr0, gfsynr1, gfsynr2); writel(gfsr, smmu_dev->reg_base + SMMU_NS_GFSR); } /* context bank fault */ fsr = readl(smmu_dev->reg_base + SMMU_NS_CB0_FSR); fsynr = readl(smmu_dev->reg_base + SMMU_NS_CB0_FSYNR0); flags = fsynr & SMMU_CB_FSYNR0_WNR ? SMMU_FAULT_WRITE : SMMU_FAULT_READ; far = readl(smmu_dev->reg_base + SMMU_NS_CB0_FAR_LOW); iova = far; far = readl(smmu_dev->reg_base + SMMU_NS_CB0_FAR_HIGH); iova |= ((unsigned long)far << 32); pr_err("Unexpected context fault (fsr 0x%x)\n", fsr); pr_err("Unhandled context fault: iova=0x%08lx, fsynr=0x%x\n", iova, fsynr); /* Report about an MMU fault to high-level users */ if (smmu_dev->domain) { report_iommu_fault(smmu_dev->domain, smmu_dev->dev, iova, flags); } /* Clear the faulting FSR */ writel(fsr, smmu_dev->reg_base + SMMU_NS_CB0_FSR); /* Retry or terminate any stalled transactions */ if (fsr & SMMU_CB_FSR_SS) { writel(SMMU_CB_RESUME_TERMINATE, smmu_dev->reg_base + SMMU_NS_CB0_RESUME); } /* * Because ivp dma error or cause error may cause many smmu fault * continuously, there we only report once. * */ pr_info("Disable smmu irq"); writel(0x25, (smmu_dev->reg_base + SMMU_NS_CB0_SCTLR)); writel(0x200032, (smmu_dev->reg_base + SMMU_NS_CR0)); spin_unlock(&smmu_dev->spinlock); return IRQ_HANDLED; }