static unsigned int find_faulting_ib1_size(struct adreno_ringbuffer *rb,
				unsigned int rptr, unsigned int ib1)
{
	unsigned int value;
	unsigned int temp_rptr = rptr * sizeof(unsigned int);
	unsigned int rb_size = rb->buffer_desc.size;

	do {
		temp_rptr = adreno_ringbuffer_dec_wrapped(temp_rptr, rb_size);
		kgsl_sharedmem_readl(&rb->buffer_desc, &value, temp_rptr);

		if (ib1 == value) {
			temp_rptr = adreno_ringbuffer_dec_wrapped(temp_rptr,
								rb_size);
			kgsl_sharedmem_readl(&rb->buffer_desc, &value,
						temp_rptr);
			if (adreno_cmd_is_ib(value)) {
				temp_rptr += 2 * sizeof(unsigned int);
				kgsl_sharedmem_readl(&rb->buffer_desc, &value,
						temp_rptr);
				return value;
			} else {
				temp_rptr += sizeof(unsigned int);
			}
		}
	} while (temp_rptr != rptr);

	return 0;
}
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
	struct device *dev, unsigned long addr, int flags)
{
	int ret = 0;
	struct kgsl_mmu *mmu;
	struct kgsl_iommu *iommu;
	struct kgsl_iommu_unit *iommu_unit;
	struct kgsl_iommu_device *iommu_dev;
	unsigned int ptbase, fsr;
	struct kgsl_device *device;
	struct adreno_device *adreno_dev;
	unsigned int no_page_fault_log = 0;
	unsigned int curr_context_id = 0;
	unsigned int curr_global_ts = 0;
	static struct adreno_context *curr_context;
	static struct kgsl_context *context;

	ret = get_iommu_unit(dev, &mmu, &iommu_unit);
	if (ret)
		goto done;
	iommu_dev = get_iommu_device(iommu_unit, dev);
	if (!iommu_dev) {
		KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
		ret = -ENOSYS;
		goto done;
	}
	iommu = mmu->priv;
	device = mmu->device;
	adreno_dev = ADRENO_DEVICE(device);

	ptbase = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
					iommu_dev->ctx_id, TTBR0);

	fsr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
		iommu_dev->ctx_id, FSR);

	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE)
		no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);

	if (!no_page_fault_log) {
		KGSL_MEM_CRIT(iommu_dev->kgsldev,
			"GPU PAGE FAULT: addr = %lX pid = %d\n",
			addr, kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase));
		KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
			iommu_dev->ctx_id, fsr);
	}

	mmu->fault = 1;
	iommu_dev->fault = 1;

	kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
	context = idr_find(&device->context_idr, curr_context_id);
	if (context != NULL)
			curr_context = context->devctxt;

	kgsl_sharedmem_readl(&device->memstore, &curr_global_ts,
		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, eoptimestamp));

	/*
	 * Store pagefault's timestamp and ib1 addr in context,
	 * this information is used in GFT
	 */
	curr_context->pagefault = 1;
	curr_context->pagefault_ts = curr_global_ts;

	trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
			kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase), 0);

	/*
	 * We do not want the h/w to resume fetching data from an iommu unit
	 * that has faulted, this is better for debugging as it will stall
	 * the GPU and trigger a snapshot. To stall the transaction return
	 * EBUSY error.
	 */
	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
		ret = -EBUSY;
done:
	return ret;
}
static void a2xx_cp_intrcallback(struct kgsl_device *device)
{
	unsigned int status = 0, num_reads = 0, master_status = 0;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
	int i;

	adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status);
	while (!status && (num_reads < VALID_STATUS_COUNT_MAX) &&
		(master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) {
		adreno_regread(device, REG_CP_INT_STATUS, &status);
		adreno_regread(device, REG_MASTER_INT_SIGNAL,
					&master_status);
		num_reads++;
	}
	if (num_reads > 1)
		KGSL_DRV_WARN(device,
			"Looped %d times to read REG_CP_INT_STATUS\n",
			num_reads);

	trace_kgsl_a2xx_irq_status(device, master_status, status);

	if (!status) {
		if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
			KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
			wake_up_interruptible_all(&device->wait_queue);
		} else
			KGSL_DRV_WARN(device, "Spurious interrput detected\n");
		return;
	}

	if (status & CP_INT_CNTL__RB_INT_MASK) {
		
		unsigned int context_id;
		kgsl_sharedmem_readl(&device->memstore,
				&context_id,
				KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
					current_context));
		if (context_id < KGSL_MEMSTORE_MAX) {
			
			kgsl_sharedmem_writel(&device->memstore,
					KGSL_MEMSTORE_OFFSET(context_id,
						ts_cmp_enable), 0);
			
			kgsl_sharedmem_writel(&device->memstore,
					KGSL_MEMSTORE_OFFSET(
						KGSL_MEMSTORE_GLOBAL,
						ts_cmp_enable), 0);
			wmb();
		}
		KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n");
	}

	for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) {
		if (status & kgsl_cp_error_irqs[i].mask) {
			KGSL_CMD_CRIT(rb->device, "%s\n",
				 kgsl_cp_error_irqs[i].message);

			kgsl_pwrctrl_irq(rb->device, KGSL_PWRFLAGS_OFF);
		}
	}

	
	status &= CP_INT_MASK;
	adreno_regwrite(device, REG_CP_INT_ACK, status);

	if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
		KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n");
		queue_work(device->work_queue, &device->ts_expired_ws);
		wake_up_interruptible_all(&device->wait_queue);
		atomic_notifier_call_chain(&(device->ts_notifier_list),
					   device->id,
					   NULL);
	}
}