Example #1
0
static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
{
    /* SYSMMU is in blocked when interrupt occurred. */
    struct sysmmu_drvdata *data = dev_id;
    enum exynos_sysmmu_inttype itype;
    sysmmu_iova_t addr = -1;
    int ret = -ENOSYS;

    WARN_ON(!is_sysmmu_active(data));

    spin_lock(&data->lock);

    if (!IS_ERR(data->clk_master))
        clk_enable(data->clk_master);

    itype = (enum exynos_sysmmu_inttype)
            __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
    if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
        itype = SYSMMU_FAULT_UNKNOWN;
    else
        addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);

    if (itype == SYSMMU_FAULT_UNKNOWN) {
        pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
               __func__, dev_name(data->sysmmu));
        pr_err("%s: Please check if IRQ is correctly configured.\n",
               __func__);
        BUG();
    } else {
        unsigned int base =
            __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
        show_fault_information(dev_name(data->sysmmu),
                               itype, base, addr);
        if (data->domain)
            ret = report_iommu_fault(data->domain,
                                     data->master, addr, itype);
    }

    /* fault is not recovered by fault handler */
    BUG_ON(ret != 0);

    __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);

    sysmmu_unblock(data->sfrbase);

    if (!IS_ERR(data->clk_master))
        clk_disable(data->clk_master);

    spin_unlock(&data->lock);

    return IRQ_HANDLED;
}
Example #2
0
static void sysmmu_tlb_invalidate_flpdcache(struct device *dev,
        sysmmu_iova_t iova)
{
    unsigned long flags;
    struct exynos_iommu_owner *owner = dev->archdata.iommu;
    struct sysmmu_drvdata *data = dev_get_drvdata(owner->sysmmu);

    if (!IS_ERR(data->clk_master))
        clk_enable(data->clk_master);

    spin_lock_irqsave(&data->lock, flags);
    if (is_sysmmu_active(data))
        __sysmmu_tlb_invalidate_flpdcache(data, iova);
    spin_unlock_irqrestore(&data->lock, flags);

    if (!IS_ERR(data->clk_master))
        clk_disable(data->clk_master);
}
void sysmmu_tlb_invalidate_flpdcache(struct device *dev, dma_addr_t iova)
{
	struct sysmmu_list_data *list;

	for_each_sysmmu_list(dev, list) {
		unsigned long flags;
		struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);

		spin_lock_irqsave(&drvdata->lock, flags);
		if (is_sysmmu_active(drvdata) && drvdata->runtime_active) {
			TRACE_LOG_DEV(drvdata->sysmmu,
				"FLPD invalidation @ %#x\n", iova);
			__master_clk_enable(drvdata);
			__sysmmu_tlb_invalidate_flpdcache(
					drvdata->sfrbase, iova);
			__master_clk_disable(drvdata);
		} else {
			TRACE_LOG_DEV(drvdata->sysmmu,
				"Skip FLPD invalidation @ %#x\n", iova);
		}
		spin_unlock_irqrestore(&drvdata->lock, flags);
	}
Example #4
0
void exynos_sysmmu_set_pbuf(struct device *dev, int nbufs,
				struct sysmmu_prefbuf prefbuf[])
{
	struct device *sysmmu;
	int nsfrs;

	if (WARN_ON(nbufs < 1))
		return;

	for_each_sysmmu(dev, sysmmu) {
		unsigned long flags;
		struct sysmmu_drvdata *drvdata;

		drvdata = dev_get_drvdata(sysmmu);

		spin_lock_irqsave(&drvdata->lock, flags);
		if (!is_sysmmu_active(drvdata)) {
			spin_unlock_irqrestore(&drvdata->lock, flags);
			continue;
		}

		for (nsfrs = 0; nsfrs < drvdata->nsfrs; nsfrs++) {
			int min;

			if (!has_sysmmu_capable_pbuf(
					drvdata, nsfrs, prefbuf, &min))
				continue;

			if (sysmmu_block(drvdata->sfrbases[nsfrs])) {
				func_set_pbuf[min](drvdata, nsfrs,
							nbufs, prefbuf);
				sysmmu_unblock(drvdata->sfrbases[nsfrs]);
			}
		} /* while (nsfrs < drvdata->nsfrs) */
		spin_unlock_irqrestore(&drvdata->lock, flags);
	}