Exemple #1
0
int adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
{
	if (rb->flags & KGSL_FLAGS_STARTED) {
		adreno_regwrite(rb->device, REG_CP_INT_CNTL, 0);

		/* ME_HALT */
		adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);

		rb->flags &= ~KGSL_FLAGS_STARTED;
	}

	return 0;
}
static int a3xx_snapshot_cp_pm4_ram(struct kgsl_device *device, void *snapshot,
		int remain, void *priv)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct kgsl_snapshot_debug *header = snapshot;
	unsigned int *data = snapshot + sizeof(*header);
	int i, size = adreno_dev->pm4_fw_size - 1;

	if (remain < DEBUG_SECTION_SZ(size)) {
		SNAPSHOT_ERR_NOMEM(device, "CP PM4 RAM DEBUG");
		return 0;
	}

	header->type = SNAPSHOT_DEBUG_CP_PM4_RAM;
	header->size = size;

	/*
	 * Read the firmware from the GPU rather than use our cache in order to
	 * try to catch mis-programming or corruption in the hardware.  We do
	 * use the cached version of the size, however, instead of trying to
	 * maintain always changing hardcoded constants
	 */

	adreno_regwrite(device, REG_CP_ME_RAM_RADDR, 0x0);
	for (i = 0; i < size; i++)
		adreno_regread(device, REG_CP_ME_RAM_DATA, &data[i]);

	return DEBUG_SECTION_SZ(size);
}
static int a330_snapshot_cp_merciu(struct kgsl_device *device, void *snapshot,
		int remain, void *priv)
{
	struct kgsl_snapshot_debug *header = snapshot;
	unsigned int *data = snapshot + sizeof(*header);
	int i, size;

	
	size = A330_CP_MERCIU_QUEUE_SIZE << 1;

	if (remain < DEBUG_SECTION_SZ(size)) {
		SNAPSHOT_ERR_NOMEM(device, "CP MERCIU DEBUG");
		return 0;
	}

	header->type = SNAPSHOT_DEBUG_CP_MERCIU;
	header->size = size;

	adreno_regwrite(device, A3XX_CP_MERCIU_ADDR, 0x0);

	for (i = 0; i < A330_CP_MERCIU_QUEUE_SIZE; i++) {
		adreno_regread(device, A3XX_CP_MERCIU_DATA,
			&data[(i * 2)]);
		adreno_regread(device, A3XX_CP_MERCIU_DATA2,
			&data[(i * 2) + 1]);
	}

	return DEBUG_SECTION_SZ(size);
}
static void _rbbm_debug_bus_read(struct kgsl_device *device,
	unsigned int block_id, unsigned int index, unsigned int *val)
{
	unsigned int block = (block_id << 8) | 1 << 16;
	adreno_regwrite(device, A3XX_RBBM_DEBUG_BUS_CTL, block | index);
	adreno_regread(device, A3XX_RBBM_DEBUG_BUS_DATA_STATUS, val);
}
static int a3xx_snapshot_debugbus_block(struct kgsl_device *device,
	void *snapshot, int remain, void *priv)
{
	struct kgsl_snapshot_debugbus *header = snapshot;
	unsigned int id = (unsigned int) priv;
	unsigned int val;
	int i;
	unsigned int *data = snapshot + sizeof(*header);
	int size =
		(DEBUGFS_BLOCK_SIZE * sizeof(unsigned int)) + sizeof(*header);

	if (remain < size) {
		SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
		return 0;
	}

	val = (id << 8) | (1 << 16);

	header->id = id;
	header->count = DEBUGFS_BLOCK_SIZE;

	for (i = 0; i < DEBUGFS_BLOCK_SIZE; i++) {
		adreno_regwrite(device, A3XX_RBBM_DEBUG_BUS_CTL, val | i);
		adreno_regread(device, A3XX_RBBM_DEBUG_BUS_DATA_STATUS,
			&data[i]);
	}

	return size;
}
static int a3xx_snapshot_cp_roq(struct kgsl_device *device, void *snapshot,
		int remain, void *priv)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct kgsl_snapshot_debug *header = snapshot;
	unsigned int *data = snapshot + sizeof(*header);
	int i, size;

	
	size = adreno_is_a330(adreno_dev) ?
		A330_CP_ROQ_SIZE : A320_CP_ROQ_SIZE;

	if (remain < DEBUG_SECTION_SZ(size)) {
		SNAPSHOT_ERR_NOMEM(device, "CP ROQ DEBUG");
		return 0;
	}

	header->type = SNAPSHOT_DEBUG_CP_ROQ;
	header->size = size;

	adreno_regwrite(device, A3XX_CP_ROQ_ADDR, 0x0);
	for (i = 0; i < size; i++)
		adreno_regread(device, A3XX_CP_ROQ_DATA, &data[i]);

	return DEBUG_SECTION_SZ(size);
}
static int a3xx_snapshot_vpc_memory(struct kgsl_device *device, void *snapshot,
		int remain, void *priv)
{
	struct kgsl_snapshot_debug *header = snapshot;
	unsigned int *data = snapshot + sizeof(*header);
	int size = VPC_MEMORY_BANKS * VPC_MEMORY_SIZE;
	int bank, addr, i = 0;

	if (remain < DEBUG_SECTION_SZ(size)) {
		SNAPSHOT_ERR_NOMEM(device, "VPC MEMORY");
		return 0;
	}

	header->type = SNAPSHOT_DEBUG_VPC_MEMORY;
	header->size = size;

	for (bank = 0; bank < VPC_MEMORY_BANKS; bank++) {
		for (addr = 0; addr < VPC_MEMORY_SIZE; addr++) {
			unsigned int val = bank | (addr << 4);
			adreno_regwrite(device,
				A3XX_VPC_VPC_DEBUG_RAM_SEL, val);
			adreno_regread(device,
				A3XX_VPC_VPC_DEBUG_RAM_READ, &data[i++]);
		}
	}

	return DEBUG_SECTION_SZ(size);
}
static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	const char *fwfile;
	int i, ret = 0;

	if (adreno_is_a220(adreno_dev)) {
		fwfile =  A220_PM4_470_FW;
	} else if (adreno_is_a225(adreno_dev)) {
		fwfile =  A225_PM4_FW;
	} else if (adreno_is_a20x(adreno_dev)) {
		fwfile =  A200_PM4_FW;
	} else {
		KGSL_DRV_ERR(device, "Could not load PM4 file\n");
		return -EINVAL;
	}

	if (adreno_dev->pm4_fw == NULL) {
		int len;
		unsigned int *ptr;

		ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
		if (ret)
			goto err;

		/* PM4 size is 3 dword aligned plus 1 dword of version */
		if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
			KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
			ret = -EINVAL;
			goto err;
		}

		adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
		adreno_dev->pm4_fw = ptr;
	}

	KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
		adreno_dev->pm4_fw[0]);

	adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
	adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
	for (i = 1; i < adreno_dev->pm4_fw_size; i++)
		adreno_regwrite(device, REG_CP_ME_RAM_DATA,
				     adreno_dev->pm4_fw[i]);
err:
	return ret;
}
void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
{
	if (rb->flags & KGSL_FLAGS_STARTED) {
		
		adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
		rb->flags &= ~KGSL_FLAGS_STARTED;
	}
}
static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	const char *fwfile;
	int i, ret = 0;

	if (adreno_is_a220(adreno_dev)) {
		fwfile =  A220_PFP_470_FW;
	} else if (adreno_is_a225(adreno_dev)) {
		fwfile =  A225_PFP_FW;
	} else if (adreno_is_a20x(adreno_dev)) {
		fwfile = A200_PFP_FW;
	} else {
		KGSL_DRV_ERR(device, "Could not load PFP firmware\n");
		return -EINVAL;
	}

	if (adreno_dev->pfp_fw == NULL) {
		int len;
		unsigned int *ptr;

		ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
		if (ret)
			goto err;

		/* PFP size shold be dword aligned */
		if (len % sizeof(uint32_t) != 0) {
			KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
			ret = -EINVAL;
			goto err;
		}

		adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
		adreno_dev->pfp_fw = ptr;
	}

	KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
		adreno_dev->pfp_fw[0]);

	adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0);
	for (i = 1; i < adreno_dev->pfp_fw_size; i++)
		adreno_regwrite(device, REG_CP_PFP_UCODE_DATA,
				     adreno_dev->pfp_fw[i]);
err:
	return ret;
}
void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
{
	BUG_ON(rb->wptr == 0);

	kgsl_pwrscale_busy(rb->device);

	mb();

	adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
}
int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	int i;

	if (adreno_dev->pfp_fw == NULL) {
		int ret = adreno_ringbuffer_read_pfp_ucode(device);
		if (ret)
			return ret;
	}

	KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
		adreno_dev->pfp_fw_version);

	adreno_regwrite(device, adreno_dev->gpudev->reg_cp_pfp_ucode_addr, 0);
	for (i = 1; i < adreno_dev->pfp_fw_size; i++)
		adreno_regwrite(device,
			adreno_dev->gpudev->reg_cp_pfp_ucode_data,
			adreno_dev->pfp_fw[i]);
	return 0;
}
static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	int i, ret = 0;

	if (adreno_dev->pm4_fw == NULL) {
		int len;
		void *ptr;

		ret = _load_firmware(device, adreno_dev->pm4_fwfile,
			&ptr, &len);

		if (ret)
			goto err;

		/* PM4 size is 3 dword aligned plus 1 dword of version */
		if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
			KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
			ret = -EINVAL;
			kfree(ptr);
			goto err;
		}

		adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
		adreno_dev->pm4_fw = ptr;
	}

	KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
		adreno_dev->pm4_fw[0]);
       //2012-11-29 liuyan kgsl patch begin
	//adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
	adreno_regwrite(device, REG_CP_DEBUG, CP_DEBUG_DEFAULT);
	//2012-11-29 liuyan kgsl patch end
	adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
	for (i = 1; i < adreno_dev->pm4_fw_size; i++)
		adreno_regwrite(device, REG_CP_ME_RAM_DATA,
				     adreno_dev->pm4_fw[i]);
err:
	return ret;
}
Exemple #14
0
static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
{
	BUG_ON(rb->wptr == 0);

	/*synchronize memory before informing the hardware of the
	 *new commands.
	 */
	mb();

	adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);

	rb->flags |= KGSL_FLAGS_ACTIVE;
}
int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	int i;

	if (adreno_dev->pm4_fw == NULL) {
		int ret = adreno_ringbuffer_read_pm4_ucode(device);
		if (ret)
			return ret;
	}

	KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
		adreno_dev->pm4_fw_version);

	adreno_regwrite(device, REG_CP_DEBUG, CP_DEBUG_DEFAULT);
	adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
	for (i = 1; i < adreno_dev->pm4_fw_size; i++)
		adreno_regwrite(device, REG_CP_ME_RAM_DATA,
			adreno_dev->pm4_fw[i]);

	return 0;
}
Exemple #16
0
static void a2xx_cp_intrcallback(struct kgsl_device *device)
{
	unsigned int status = 0, num_reads = 0, master_status = 0;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
	int i;

	adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status);
	while (!status && (num_reads < VALID_STATUS_COUNT_MAX) &&
		(master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) {
		adreno_regread(device, REG_CP_INT_STATUS, &status);
		adreno_regread(device, REG_MASTER_INT_SIGNAL,
					&master_status);
		num_reads++;
	}
	if (num_reads > 1)
		KGSL_DRV_WARN(device,
			"Looped %d times to read REG_CP_INT_STATUS\n",
			num_reads);

	trace_kgsl_a2xx_irq_status(device, master_status, status);

	if (!status) {
		if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
			KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
			wake_up_interruptible_all(&device->wait_queue);
		} else
			KGSL_DRV_WARN(device, "Spurious interrput detected\n");
		return;
	}

	for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) {
		if (status & kgsl_cp_error_irqs[i].mask) {
			KGSL_CMD_CRIT(rb->device, "%s\n",
				 kgsl_cp_error_irqs[i].message);

			kgsl_pwrctrl_irq(rb->device, KGSL_PWRFLAGS_OFF);
		}
	}

	
	status &= CP_INT_MASK;
	adreno_regwrite(device, REG_CP_INT_ACK, status);

	if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
		KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n");
		queue_work(device->work_queue, &device->ts_expired_ws);
		wake_up_interruptible_all(&device->wait_queue);
	}
}
Exemple #17
0
static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
{
    struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
    int i, ret = 0;

    if (adreno_dev->pfp_fw == NULL) {
        int len;
        void *ptr;

        ret = _load_firmware(device, adreno_dev->pfp_fwfile,
                             &ptr, &len);
        if (ret)
            goto err;

        /* PFP size shold be dword aligned */
        if (len % sizeof(uint32_t) != 0) {
            KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
            ret = -EINVAL;
            kfree(ptr);
            goto err;
        }

        adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
        adreno_dev->pfp_fw = ptr;
    }

    KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
                  adreno_dev->pfp_fw[0]);

    adreno_regwrite(device, adreno_dev->gpudev->reg_cp_pfp_ucode_addr, 0);
    for (i = 1; i < adreno_dev->pfp_fw_size; i++)
        adreno_regwrite(device,
                        adreno_dev->gpudev->reg_cp_pfp_ucode_data,
                        adreno_dev->pfp_fw[i]);
err:
    return ret;
}
static int a2xx_snapshot_cpdebug(struct kgsl_device *device, void *snapshot,
	int remain, void *priv)
{
	struct kgsl_snapshot_debug *header = snapshot;
	unsigned int *data = snapshot + sizeof(*header);
	int i;

	if (remain < DEBUG_SECTION_SZ(CPDEBUG_COUNT)) {
		SNAPSHOT_ERR_NOMEM(device, "CP DEBUG");
		return 0;
	}

	header->type = SNAPSHOT_DEBUG_CP;
	header->size = CPDEBUG_COUNT;

	for (i = 0; i < CPDEBUG_COUNT; i++) {
		adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1628);
		adreno_regread(device, REG_RBBM_DEBUG_OUT, &data[i]);
	}

	adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);

	return DEBUG_SECTION_SZ(CPDEBUG_COUNT);
}
Exemple #19
0
void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
{
    BUG_ON(rb->wptr == 0);

    /* Let the pwrscale policy know that new commands have
     been submitted. */
    kgsl_pwrscale_busy(rb->device);

    /*synchronize memory before informing the hardware of the
     *new commands.
     */
    mb();

    adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
}
static int a3xx_snapshot_cp_meq(struct kgsl_device *device, void *snapshot,
		int remain, void *priv)
{
	struct kgsl_snapshot_debug *header = snapshot;
	unsigned int *data = snapshot + sizeof(*header);
	int i;

	if (remain < DEBUG_SECTION_SZ(CP_MEQ_SIZE)) {
		SNAPSHOT_ERR_NOMEM(device, "CP MEQ DEBUG");
		return 0;
	}

	header->type = SNAPSHOT_DEBUG_CP_MEQ;
	header->size = CP_MEQ_SIZE;

	adreno_regwrite(device, A3XX_CP_MEQ_ADDR, 0x0);
	for (i = 0; i < CP_MEQ_SIZE; i++)
		adreno_regread(device, A3XX_CP_MEQ_DATA, &data[i]);

	return DEBUG_SECTION_SZ(CP_MEQ_SIZE);
}
Exemple #21
0
static void a2xx_irq_control(struct adreno_device *adreno_dev, int state)
{
	struct kgsl_device *device = &adreno_dev->dev;

	if (state) {
		adreno_regwrite(device, REG_RBBM_INT_CNTL, RBBM_INT_MASK);
		adreno_regwrite(device, REG_CP_INT_CNTL, CP_INT_MASK);
		adreno_regwrite(device, MH_INTERRUPT_MASK, KGSL_MMU_INT_MASK);
	} else {
		adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
		adreno_regwrite(device, REG_CP_INT_CNTL, 0);
		adreno_regwrite(device, MH_INTERRUPT_MASK, 0);
	}

	/* Force the writes to post before touching the IRQ line */
	wmb();
}
static int a2xx_snapshot_sqthreaddebug(struct kgsl_device *device,
	void *snapshot, int remain, void *priv)
{
	struct kgsl_snapshot_debug *header = snapshot;
	unsigned int *data = snapshot + sizeof(*header);
	int i, offset = 0;
	int size = SQ_DEBUG_THREAD_SIZE * 2 * 16;

	if (remain < DEBUG_SECTION_SZ(size)) {
		SNAPSHOT_ERR_NOMEM(device, "SQ THREAD DEBUG");
		return 0;
	}

	header->type = SNAPSHOT_DEBUG_SQTHREAD;
	header->size = size;

	for (i = 0; i < 16; i++) {
		adreno_regwrite(device, REG_SQ_DEBUG_TB_STATUS_SEL,
				i | (6<<4) | (i<<7) | (1<<11) | (1<<12)
				| (i<<16) | (6<<20) | (i<<23));
		SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_STATE_MEM,
			 data, offset);
		SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_STATUS_REG,
			 data, offset);
		SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATE_MEM,
			 data, offset);
		SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_0,
			 data, offset);
		SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_1,
			 data, offset);
		SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_2,
			 data, offset);
		SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_3,
			 data, offset);
	}

	return DEBUG_SECTION_SZ(size);
}
static int a3xx_snapshot_cp_pm4_ram(struct kgsl_device *device, void *snapshot,
		int remain, void *priv)
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct kgsl_snapshot_debug *header = snapshot;
	unsigned int *data = snapshot + sizeof(*header);
	int i, size = adreno_dev->pm4_fw_size - 1;

	if (remain < DEBUG_SECTION_SZ(size)) {
		SNAPSHOT_ERR_NOMEM(device, "CP PM4 RAM DEBUG");
		return 0;
	}

	header->type = SNAPSHOT_DEBUG_CP_PM4_RAM;
	header->size = size;


	adreno_regwrite(device, REG_CP_ME_RAM_RADDR, 0x0);
	for (i = 0; i < size; i++)
		adreno_regread(device, REG_CP_ME_RAM_DATA, &data[i]);

	return DEBUG_SECTION_SZ(size);
}
Exemple #24
0
static void a2xx_rbbm_intrcallback(struct kgsl_device *device)
{
	unsigned int status = 0;
	unsigned int rderr = 0;

	adreno_regread(device, REG_RBBM_INT_STATUS, &status);

	if (status & RBBM_INT_CNTL__RDERR_INT_MASK) {
		union rbbm_read_error_u rerr;
		adreno_regread(device, REG_RBBM_READ_ERROR, &rderr);
		rerr.val = rderr;
		if (rerr.f.read_address == REG_CP_INT_STATUS &&
			rerr.f.read_error &&
			rerr.f.read_requester)
			KGSL_DRV_WARN(device,
				"rbbm read error interrupt: %08x\n", rderr);
		else
			KGSL_DRV_CRIT(device,
				"rbbm read error interrupt: %08x\n", rderr);
	}

	status &= RBBM_INT_MASK;
	adreno_regwrite(device, REG_RBBM_INT_ACK, status);
}
int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
{
	int status;
	/*cp_rb_cntl_u cp_rb_cntl; */
	union reg_cp_rb_cntl cp_rb_cntl;
	unsigned int *cmds, rb_cntl;
	struct kgsl_device *device = rb->device;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	uint cmds_gpu;

	if (rb->flags & KGSL_FLAGS_STARTED)
		return 0;

	if (init_ram) {
		rb->timestamp = 0;
		GSL_RB_INIT_TIMESTAMP(rb);
	}

	kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
			   sizeof(struct kgsl_rbmemptrs));

	kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
			   (rb->sizedwords << 2));

	adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
			     (rb->memptrs_desc.gpuaddr
			      + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));

	/* setup WPTR delay */
	adreno_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */);

	/*setup REG_CP_RB_CNTL */
	adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
	cp_rb_cntl.val = rb_cntl;

	/*
	 * The size of the ringbuffer in the hardware is the log2
	 * representation of the size in quadwords (sizedwords / 2)
	 */
	cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);

	/*
	 * Specify the quadwords to read before updating mem RPTR.
	 * Like above, pass the log2 representation of the blocksize
	 * in quadwords.
	*/
	cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);

	cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */
	/* mem RPTR writebacks */
	cp_rb_cntl.f.rb_no_update =  GSL_RB_CNTL_NO_UPDATE;

	adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);

	adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);

	adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
			     rb->memptrs_desc.gpuaddr +
			     GSL_RB_MEMPTRS_RPTR_OFFSET);

	/* explicitly clear all cp interrupts */
	adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);

	/* setup scratch/timestamp */
	adreno_regwrite(device, REG_SCRATCH_ADDR,
			     device->memstore.gpuaddr +
			     KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));

	adreno_regwrite(device, REG_SCRATCH_UMSK,
			     GSL_RB_MEMPTRS_SCRATCH_MASK);

	/* update the eoptimestamp field with the last retired timestamp */
	kgsl_sharedmem_writel(&device->memstore,
			     KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp),
			     rb->timestamp);

	/* load the CP ucode */

	status = adreno_ringbuffer_load_pm4_ucode(device);
	if (status != 0)
		return status;

	/* load the prefetch parser ucode */
	status = adreno_ringbuffer_load_pfp_ucode(device);
	if (status != 0)
		return status;

	adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804);

	rb->rptr = 0;
	rb->wptr = 0;

	/* clear ME_HALT to start micro engine */
	adreno_regwrite(device, REG_CP_ME_CNTL, 0);

	/* ME_INIT */
	cmds = adreno_ringbuffer_allocspace(rb, 19);
	cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);

	GSL_RB_WRITE(cmds, cmds_gpu, CP_HDR_ME_INIT);
	/* All fields present (bits 9:0) */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
	/* Disable/Enable Real-Time Stream processing (present but ignored) */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
	/* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);

	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
	GSL_RB_WRITE(cmds, cmds_gpu,
		SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));

	/* Instruction memory size: */
	GSL_RB_WRITE(cmds, cmds_gpu,
		     (adreno_encode_istore_size(adreno_dev)
		      | adreno_dev->pix_shader_start));
	/* Maximum Contexts */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
	/* Write Confirm Interval and The CP will wait the
	* wait_interval * 16 clocks between polling  */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);

	/* NQ and External Memory Swap */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
	/* Protected mode error checking */
	GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
	/* Disable header dumping and Header dump address */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
	/* Header dump size */
	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);

	adreno_ringbuffer_submit(rb);

	/* idle device to validate ME INIT */
	status = adreno_idle(device);

	if (status == 0)
		rb->flags |= KGSL_FLAGS_STARTED;

	return status;
}
void *a2xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
	int *remain, int hang)
{
	struct kgsl_device *device = &adreno_dev->dev;
	struct kgsl_snapshot_registers regs;
	unsigned int pmoverride;

	/* Choose the register set to dump */

	if (adreno_is_a20x(adreno_dev)) {
		regs.regs = (unsigned int *) a200_registers;
		regs.count = a200_registers_count;
	} else {
		regs.regs = (unsigned int *) a220_registers;
		regs.count = a220_registers_count;
	}

	/* Master set of (non debug) registers */
	snapshot = kgsl_snapshot_add_section(device,
		KGSL_SNAPSHOT_SECTION_REGS, snapshot, remain,
		kgsl_snapshot_dump_regs, &regs);

	/* CP_STATE_DEBUG indexed registers */
	snapshot = a2xx_snapshot_indexed_registers(device, snapshot,
			remain, REG_CP_STATE_DEBUG_INDEX,
			REG_CP_STATE_DEBUG_DATA, 0x0, 0x14);

	/* CP_ME indexed registers */
	snapshot = a2xx_snapshot_indexed_registers(device, snapshot,
			remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS,
			64, 44);

	/*
	 * Need to temporarily turn off clock gating for the debug bus to
	 * work
	 */

	adreno_regread(device, REG_RBBM_PM_OVERRIDE2, &pmoverride);
	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xFF);

	/* SX debug registers */
	snapshot = kgsl_snapshot_add_section(device,
			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
			a2xx_snapshot_sxdebug, NULL);

	/* SU debug indexed registers (only for < 470) */
	if (!adreno_is_a22x(adreno_dev))
		snapshot = a2xx_snapshot_indexed_registers(device, snapshot,
				remain, REG_PA_SU_DEBUG_CNTL,
				REG_PA_SU_DEBUG_DATA,
				0, 0x1B);

	/* CP debug registers */
	snapshot = kgsl_snapshot_add_section(device,
			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
			a2xx_snapshot_cpdebug, NULL);

	/* MH debug indexed registers */
	snapshot = a2xx_snapshot_indexed_registers(device, snapshot,
			remain, MH_DEBUG_CTRL, MH_DEBUG_DATA, 0x0, 0x40);

	/* Leia only register sets */
	if (adreno_is_a22x(adreno_dev)) {
		/* RB DEBUG indexed regisers */
		snapshot = a2xx_snapshot_indexed_registers(device, snapshot,
			remain, REG_RB_DEBUG_CNTL, REG_RB_DEBUG_DATA, 0, 8);

		/* RB DEBUG indexed registers bank 2 */
		snapshot = a2xx_snapshot_indexed_registers(device, snapshot,
			remain, REG_RB_DEBUG_CNTL, REG_RB_DEBUG_DATA + 0x1000,
			0, 8);

		/* PC_DEBUG indexed registers */
		snapshot = a2xx_snapshot_indexed_registers(device, snapshot,
			remain, REG_PC_DEBUG_CNTL, REG_PC_DEBUG_DATA, 0, 8);

		/* GRAS_DEBUG indexed registers */
		snapshot = a2xx_snapshot_indexed_registers(device, snapshot,
			remain, REG_GRAS_DEBUG_CNTL, REG_GRAS_DEBUG_DATA, 0, 4);

		/* MIU debug registers */
		snapshot = kgsl_snapshot_add_section(device,
			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
			a2xx_snapshot_miudebug, NULL);

		/* SQ DEBUG debug registers */
		snapshot = kgsl_snapshot_add_section(device,
			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
			a2xx_snapshot_sqdebug, NULL);

		/*
		 * Reading SQ THREAD causes bad things to happen on a running
		 * system, so only read it if the GPU is already hung
		 */

		if (hang) {
			/* SQ THREAD debug registers */
			snapshot = kgsl_snapshot_add_section(device,
				KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
				a2xx_snapshot_sqthreaddebug, NULL);
		}
	}

	/* Reset the clock gating */
	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, pmoverride);

	return snapshot;
}
Exemple #27
0
int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
{
    int status;
    /*cp_rb_cntl_u cp_rb_cntl; */
    union reg_cp_rb_cntl cp_rb_cntl;
    unsigned int rb_cntl;
    struct kgsl_device *device = rb->device;
    struct adreno_device *adreno_dev = ADRENO_DEVICE(device);

    if (rb->flags & KGSL_FLAGS_STARTED)
        return 0;

    if (init_ram)
        rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;

    kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
                       sizeof(struct kgsl_rbmemptrs));

    kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
                       (rb->sizedwords << 2));

    if (adreno_is_a2xx(adreno_dev)) {
        adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
                        (rb->memptrs_desc.gpuaddr
                         + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));

        /* setup WPTR delay */
        adreno_regwrite(device, REG_CP_RB_WPTR_DELAY,
                        0 /*0x70000010 */);
    }

    /*setup REG_CP_RB_CNTL */
    adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
    cp_rb_cntl.val = rb_cntl;

    /*
     * The size of the ringbuffer in the hardware is the log2
     * representation of the size in quadwords (sizedwords / 2)
     */
    cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);

    /*
     * Specify the quadwords to read before updating mem RPTR.
     * Like above, pass the log2 representation of the blocksize
     * in quadwords.
    */
    cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);

    if (adreno_is_a2xx(adreno_dev)) {
        /* WPTR polling */
        cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
    }

    /* mem RPTR writebacks */
    cp_rb_cntl.f.rb_no_update =  GSL_RB_CNTL_NO_UPDATE;

    adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);

    adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);

    adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
                    rb->memptrs_desc.gpuaddr +
                    GSL_RB_MEMPTRS_RPTR_OFFSET);

    if (adreno_is_a3xx(adreno_dev)) {
        /* enable access protection to privileged registers */
        adreno_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);

        /* RBBM registers */
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_0, 0x63000040);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_1, 0x62000080);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_2, 0x600000CC);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_3, 0x60000108);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_4, 0x64000140);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_5, 0x66000400);

        /* CP registers */
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_6, 0x65000700);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_7, 0x610007D8);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_8, 0x620007E0);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_9, 0x61001178);
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_A, 0x64001180);

        /* RB registers */
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_B, 0x60003300);

        /* VBIF registers */
        adreno_regwrite(device, A3XX_CP_PROTECT_REG_C, 0x6B00C000);
    }

    if (adreno_is_a2xx(adreno_dev)) {
        /* explicitly clear all cp interrupts */
        adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
    }

    /* setup scratch/timestamp */
    adreno_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr +
                    KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
                                         soptimestamp));

    adreno_regwrite(device, REG_SCRATCH_UMSK,
                    GSL_RB_MEMPTRS_SCRATCH_MASK);

    /* load the CP ucode */

    status = adreno_ringbuffer_load_pm4_ucode(device);
    if (status != 0)
        return status;

    /* load the prefetch parser ucode */
    status = adreno_ringbuffer_load_pfp_ucode(device);
    if (status != 0)
        return status;

    if (adreno_is_a305(adreno_dev) || adreno_is_a320(adreno_dev))
        adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000F0602);

    rb->rptr = 0;
    rb->wptr = 0;

    /* clear ME_HALT to start micro engine */
    adreno_regwrite(device, REG_CP_ME_CNTL, 0);

    /* ME init is GPU specific, so jump into the sub-function */
    adreno_dev->gpudev->rb_init(adreno_dev, rb);

    /* idle device to validate ME INIT */
    status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);

    if (status == 0)
        rb->flags |= KGSL_FLAGS_STARTED;

    return status;
}
void *a3xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
	int *remain, int hang)
{
	struct kgsl_device *device = &adreno_dev->dev;
	struct kgsl_snapshot_registers_list list;
	struct kgsl_snapshot_registers regs[5];

	list.registers = regs;
	list.count = 0;

	
	adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL, 0x00);

	
	_snapshot_a3xx_regs(regs, &list);
	_snapshot_hlsq_regs(regs, &list, adreno_dev);
	if (adreno_is_a330(adreno_dev))
		_snapshot_a330_regs(regs, &list);

	
	snapshot = kgsl_snapshot_add_section(device,
		KGSL_SNAPSHOT_SECTION_REGS, snapshot, remain,
		kgsl_snapshot_dump_regs, &list);

	
	snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
			remain, REG_CP_STATE_DEBUG_INDEX,
			REG_CP_STATE_DEBUG_DATA, 0x0, 0x14);

	
	snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
			remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS,
			64, 44);

	
	snapshot = kgsl_snapshot_add_section(device,
			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
			a3xx_snapshot_vpc_memory, NULL);

	
	snapshot = kgsl_snapshot_add_section(device,
			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
			a3xx_snapshot_cp_meq, NULL);

	
	snapshot = kgsl_snapshot_add_section(device,
			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
			a3xx_snapshot_shader_memory, NULL);


	
	

	if (hang) {
		snapshot = kgsl_snapshot_add_section(device,
			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
			a3xx_snapshot_cp_pfp_ram, NULL);

		snapshot = kgsl_snapshot_add_section(device,
			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
			a3xx_snapshot_cp_pm4_ram, NULL);
	}

	
	snapshot = kgsl_snapshot_add_section(device,
			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
			a3xx_snapshot_cp_roq, NULL);

	if (adreno_is_a330(adreno_dev)) {
		snapshot = kgsl_snapshot_add_section(device,
			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
			a330_snapshot_cp_merciu, NULL);
	}

	snapshot = a3xx_snapshot_debugbus(device, snapshot, remain);

	
	adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL,
			A3XX_RBBM_CLOCK_CTL_DEFAULT);

	return snapshot;
}
int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
{
	int status;
	/*cp_rb_cntl_u cp_rb_cntl; */
	union reg_cp_rb_cntl cp_rb_cntl;
	unsigned int rb_cntl;
	struct kgsl_device *device = rb->device;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);

	if (rb->flags & KGSL_FLAGS_STARTED)
		return 0;

	if (init_ram)
		rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;

	kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
			   sizeof(struct kgsl_rbmemptrs));

	kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
			   (rb->sizedwords << 2));

	if (adreno_is_a2xx(adreno_dev)) {
		adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
			(rb->memptrs_desc.gpuaddr
			+ GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));

		/* setup WPTR delay */
		adreno_regwrite(device, REG_CP_RB_WPTR_DELAY,
			0 /*0x70000010 */);
	}

	/*setup REG_CP_RB_CNTL */
	adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
	cp_rb_cntl.val = rb_cntl;

	/*
	 * The size of the ringbuffer in the hardware is the log2
	 * representation of the size in quadwords (sizedwords / 2)
	 */
	cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);

	/*
	 * Specify the quadwords to read before updating mem RPTR.
	 * Like above, pass the log2 representation of the blocksize
	 * in quadwords.
	*/
	cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);

	if (adreno_is_a2xx(adreno_dev)) {
		/* WPTR polling */
		cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
	}

	/* mem RPTR writebacks */
	cp_rb_cntl.f.rb_no_update =  GSL_RB_CNTL_NO_UPDATE;

	adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);

	adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);

	adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
			     rb->memptrs_desc.gpuaddr +
			     GSL_RB_MEMPTRS_RPTR_OFFSET);

	if (adreno_is_a2xx(adreno_dev)) {
		/* explicitly clear all cp interrupts */
		adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
	}

	/* setup scratch/timestamp */
	adreno_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr +
			     KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
				     soptimestamp));

	adreno_regwrite(device, REG_SCRATCH_UMSK,
			     GSL_RB_MEMPTRS_SCRATCH_MASK);

	/* load the CP ucode */

	status = adreno_ringbuffer_load_pm4_ucode(device);
	if (status != 0)
		return status;

	/* load the prefetch parser ucode */
	status = adreno_ringbuffer_load_pfp_ucode(device);
	if (status != 0)
		return status;

	rb->rptr = 0;
	rb->wptr = 0;

	/* clear ME_HALT to start micro engine */
	adreno_regwrite(device, REG_CP_ME_CNTL, 0);

	/* ME init is GPU specific, so jump into the sub-function */
	adreno_dev->gpudev->rb_init(adreno_dev, rb);

	/* idle device to validate ME INIT */
	status = adreno_idle(device);

	if (status == 0)
		rb->flags |= KGSL_FLAGS_STARTED;

	return status;
}
Exemple #30
0
static void a2xx_cp_intrcallback(struct kgsl_device *device)
{
	unsigned int status = 0, num_reads = 0, master_status = 0;
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
	int i;

	adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status);
	while (!status && (num_reads < VALID_STATUS_COUNT_MAX) &&
		(master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) {
		adreno_regread(device, REG_CP_INT_STATUS, &status);
		adreno_regread(device, REG_MASTER_INT_SIGNAL,
					&master_status);
		num_reads++;
	}
	if (num_reads > 1)
		KGSL_DRV_WARN(device,
			"Looped %d times to read REG_CP_INT_STATUS\n",
			num_reads);
	if (!status) {
		if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
			/* This indicates that we could not read CP_INT_STAT.
			 * As a precaution just wake up processes so
			 * they can check their timestamps. Since, we
			 * did not ack any interrupts this interrupt will
			 * be generated again */
			KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
			wake_up_interruptible_all(&device->wait_queue);
		} else
			KGSL_DRV_WARN(device, "Spurious interrput detected\n");
		return;
	}

	if (status & CP_INT_CNTL__RB_INT_MASK) {
		/* signal intr completion event */
		unsigned int enableflag = 0;
		kgsl_sharedmem_writel(&rb->device->memstore,
			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable),
			enableflag);
		wmb();
		KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n");
	}

	for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) {
		if (status & kgsl_cp_error_irqs[i].mask) {
			KGSL_CMD_CRIT(rb->device, "%s\n",
				 kgsl_cp_error_irqs[i].message);
			/*
			 * on fatal errors, turn off the interrupts to
			 * avoid storming. This has the side effect of
			 * forcing a PM dump when the timestamp times out
			 */

			kgsl_pwrctrl_irq(rb->device, KGSL_PWRFLAGS_OFF);
		}
	}

	/* only ack bits we understand */
	status &= CP_INT_MASK;
	adreno_regwrite(device, REG_CP_INT_ACK, status);

	if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
		KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n");
		queue_work(device->work_queue, &device->ts_expired_ws);
		wake_up_interruptible_all(&device->wait_queue);
		atomic_notifier_call_chain(&(device->ts_notifier_list),
					   device->id,
					   NULL);
	}
}