Example #1
0
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
	struct device *dev, unsigned long addr, int flags)
{
	struct kgsl_iommu_unit *iommu_unit = get_iommu_unit(dev);
	struct kgsl_iommu_device *iommu_dev = get_iommu_device(iommu_unit, dev);
	unsigned int ptbase, fsr;

	if (!iommu_dev) {
		KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
		return -ENOSYS;
	}

	ptbase = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
					iommu_dev->ctx_id, TTBR0);

	fsr = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
		iommu_dev->ctx_id, FSR);

	KGSL_MEM_CRIT(iommu_dev->kgsldev,
		"GPU PAGE FAULT: addr = %lX pid = %d\n",
		addr, kgsl_mmu_get_ptname_from_ptbase(ptbase));
	KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
		iommu_dev->ctx_id, fsr);

	trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
			kgsl_mmu_get_ptname_from_ptbase(ptbase), 0);

	return 0;
}
Example #2
0
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
	struct device *dev, unsigned long addr, int flags)
{
	struct kgsl_iommu_unit *iommu_unit = get_iommu_unit(dev);
	struct kgsl_iommu_device *iommu_dev = get_iommu_device(iommu_unit, dev);
	unsigned int ptbase, fsr;
	static unsigned long last_pagefault_jiffies;
	static int last_pid;
	int current_pid;
	unsigned long wait_time_jiff = 0;

	if (!iommu_dev) {
		KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
		return -ENOSYS;
	}

	wait_time_jiff = last_pagefault_jiffies + msecs_to_jiffies(500);
	last_pagefault_jiffies = jiffies;

	ptbase = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
			iommu_dev->ctx_id, TTBR0);
	current_pid = kgsl_mmu_get_ptname_from_ptbase(ptbase);

        /*
        * Lots of pagefaults in the same process that range over a short amount
        * of time are likely to be part of the same problem, so try to throttle
        * the number of messages being printed.
        */
	if ((last_pid != current_pid) ||
	    (time_after(jiffies, wait_time_jiff))
	   ) {
		fsr = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
			iommu_dev->ctx_id, FSR);

		KGSL_MEM_CRIT(iommu_dev->kgsldev,
			"GPU PAGE FAULT: addr = %lX pid = %d\n",
			addr, kgsl_mmu_get_ptname_from_ptbase(ptbase));
		KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
			iommu_dev->ctx_id, fsr);

		last_pid = current_pid;
	}

	trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
			kgsl_mmu_get_ptname_from_ptbase(ptbase), 0);

	return 0;
}
Example #3
0
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
	struct device *dev, unsigned long addr, int flags)
{
	struct kgsl_iommu_unit *iommu_unit = get_iommu_unit(dev);
	struct kgsl_iommu_device *iommu_dev = get_iommu_device(iommu_unit, dev);
	unsigned int ptbase, fsr;
	static unsigned long last_pagefault_jiffies;
	static int last_pid;
	int current_pid;
	unsigned long wait_time_jiff = 0;

	if (!iommu_dev) {
		KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
		return -ENOSYS;
	}

	wait_time_jiff = last_pagefault_jiffies + msecs_to_jiffies(500);
	last_pagefault_jiffies = jiffies;

	ptbase = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
			iommu_dev->ctx_id, TTBR0);
	current_pid = kgsl_mmu_get_ptname_from_ptbase(ptbase);

	if ((last_pid != current_pid) ||
	    (time_after(jiffies, wait_time_jiff))
	   ) {
		fsr = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
			iommu_dev->ctx_id, FSR);

		KGSL_MEM_CRIT(iommu_dev->kgsldev,
			"GPU PAGE FAULT: addr = %lX pid = %d\n",
			addr, kgsl_mmu_get_ptname_from_ptbase(ptbase));
		KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
			iommu_dev->ctx_id, fsr);

		last_pid = current_pid;
	}

	trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
			kgsl_mmu_get_ptname_from_ptbase(ptbase), 0);

	return 0;
}
Example #4
0
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
	struct device *dev, unsigned long addr, int flags)
{
	int ret = 0;
	struct kgsl_mmu *mmu;
	struct kgsl_iommu *iommu;
	struct kgsl_iommu_unit *iommu_unit;
	struct kgsl_iommu_device *iommu_dev;
	unsigned int ptbase, fsr;

	ret = get_iommu_unit(dev, &mmu, &iommu_unit);
	if (ret)
		goto done;
	iommu_dev = get_iommu_device(iommu_unit, dev);
	if (!iommu_dev) {
		KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
		ret = -ENOSYS;
		goto done;
	}
	iommu = mmu->priv;

	ptbase = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
					iommu_dev->ctx_id, TTBR0);

	fsr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
		iommu_dev->ctx_id, FSR);

	KGSL_MEM_CRIT(iommu_dev->kgsldev,
		"GPU PAGE FAULT: addr = %lX pid = %d\n",
		addr, kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase));
	KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
		iommu_dev->ctx_id, fsr);

	trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
			kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase), 0);

done:
	return ret;
}
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
	struct device *dev, unsigned long addr, int flags)
{
	int ret = 0;
	struct kgsl_mmu *mmu;
	struct kgsl_iommu *iommu;
	struct kgsl_iommu_unit *iommu_unit;
	struct kgsl_iommu_device *iommu_dev;
	unsigned int ptbase, fsr;
	struct kgsl_device *device;
	struct adreno_device *adreno_dev;
	unsigned int no_page_fault_log = 0;
	unsigned int curr_context_id = 0;
	unsigned int curr_global_ts = 0;
	static struct adreno_context *curr_context;
	static struct kgsl_context *context;

	ret = get_iommu_unit(dev, &mmu, &iommu_unit);
	if (ret)
		goto done;
	iommu_dev = get_iommu_device(iommu_unit, dev);
	if (!iommu_dev) {
		KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
		ret = -ENOSYS;
		goto done;
	}
	iommu = mmu->priv;
	device = mmu->device;
	adreno_dev = ADRENO_DEVICE(device);

	ptbase = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
					iommu_dev->ctx_id, TTBR0);

	fsr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
		iommu_dev->ctx_id, FSR);

	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE)
		no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);

	if (!no_page_fault_log) {
		KGSL_MEM_CRIT(iommu_dev->kgsldev,
			"GPU PAGE FAULT: addr = %lX pid = %d\n",
			addr, kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase));
		KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
			iommu_dev->ctx_id, fsr);
	}

	mmu->fault = 1;
	iommu_dev->fault = 1;

	kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
	context = idr_find(&device->context_idr, curr_context_id);
	if (context != NULL)
			curr_context = context->devctxt;

	kgsl_sharedmem_readl(&device->memstore, &curr_global_ts,
		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, eoptimestamp));

	/*
	 * Store pagefault's timestamp and ib1 addr in context,
	 * this information is used in GFT
	 */
	curr_context->pagefault = 1;
	curr_context->pagefault_ts = curr_global_ts;

	trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
			kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase), 0);

	/*
	 * We do not want the h/w to resume fetching data from an iommu unit
	 * that has faulted, this is better for debugging as it will stall
	 * the GPU and trigger a snapshot. To stall the transaction return
	 * EBUSY error.
	 */
	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
		ret = -EBUSY;
done:
	return ret;
}