Exemple #1
0
static uint32_t
kgsl_ringbuffer_addcmds(struct kgsl_ringbuffer *rb,
				unsigned int flags, unsigned int *cmds,
				int sizedwords)
{
	unsigned int *ringcmds;
	unsigned int timestamp;
	unsigned int total_sizedwords = sizedwords + 6;

	
	total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 9 : 0;

	ringcmds = kgsl_ringbuffer_allocspace(rb, total_sizedwords);

	if (flags & KGSL_CMD_FLAGS_PMODE) {
		
		*ringcmds++ = pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1);
		*ringcmds++ = 0;
	}

	memcpy(ringcmds, cmds, (sizedwords << 2));

	ringcmds += sizedwords;

	if (flags & KGSL_CMD_FLAGS_PMODE) {
		
		*ringcmds++ = pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1);
		*ringcmds++ = 1;
	}

	rb->timestamp++;
	timestamp = rb->timestamp;

	
	*ringcmds++ = pm4_type0_packet(REG_CP_TIMESTAMP, 1);
	*ringcmds++ = rb->timestamp;
	*ringcmds++ = pm4_type3_packet(PM4_EVENT_WRITE, 3);
	*ringcmds++ = CACHE_FLUSH_TS;
	*ringcmds++ =
		     (rb->device->memstore.gpuaddr +
		      KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp));
	*ringcmds++ = rb->timestamp;

	if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
		
		*ringcmds++ = pm4_type3_packet(PM4_COND_EXEC, 4);
		*ringcmds++ = (rb->device->memstore.gpuaddr +
			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2;
		*ringcmds++ = (rb->device->memstore.gpuaddr +
			KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2;
		*ringcmds++ = rb->timestamp;
		
		*ringcmds++ = 4;
		*ringcmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
		*ringcmds++ = 0x00000000;
		*ringcmds++ = pm4_type3_packet(PM4_INTERRUPT, 1);
		*ringcmds++ = CP_INT_CNTL__RB_INT_MASK;
	}
Exemple #2
0
/* chicken restore */
static unsigned int *build_chicken_restore_cmds(
					struct adreno_context *drawctxt,
					struct tmp_ctx *ctx)
{
	unsigned int *start = ctx->cmd;
	unsigned int *cmds = start;

	*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
	*cmds++ = 0;

	*cmds++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
	ctx->chicken_restore = gpuaddr(cmds, &drawctxt->gpustate);
	*cmds++ = 0x00000000;

	/* create indirect buffer command for above command sequence */
	create_ib1(drawctxt, drawctxt->chicken_restore, start, cmds);

	return cmds;
}
static uint32_t
kgsl_ringbuffer_addcmds(struct kgsl_ringbuffer *rb,
				unsigned int flags, unsigned int *cmds,
				int sizedwords)
{
	unsigned int *ringcmds;
	unsigned int timestamp;
	unsigned int total_sizedwords = sizedwords + 6;
	unsigned int i;
	unsigned int rcmd_gpu;

	/* reserve space to temporarily turn off protected mode
	*  error checking if needed
	*/
	total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;

	ringcmds = kgsl_ringbuffer_allocspace(rb, total_sizedwords);
	rcmd_gpu = rb->buffer_desc.gpuaddr
		+ sizeof(uint)*(rb->wptr-total_sizedwords);

	if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
		GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_nop_packet(1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
	}
	if (flags & KGSL_CMD_FLAGS_PMODE) {
		/* disable protected mode error checking */
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
	}

	for (i = 0; i < sizedwords; i++) {
		GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
		cmds++;
	}

	if (flags & KGSL_CMD_FLAGS_PMODE) {
		/* re-enable protected mode error checking */
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
	}

	rb->timestamp++;
	timestamp = rb->timestamp;

	/* start-of-pipeline and end-of-pipeline timestamps */
	GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type0_packet(REG_CP_TIMESTAMP, 1));
	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
	GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type3_packet(PM4_EVENT_WRITE, 3));
	GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
	GSL_RB_WRITE(ringcmds, rcmd_gpu,
		     (rb->device->memstore.gpuaddr +
		      KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)));
	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);

	if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
		/* Conditional execution based on memory values */
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_COND_EXEC, 4));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
		GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
			KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2);
		GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
		/* # of conditional command DWORDs */
		GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_INTERRUPT, 1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
	}
Exemple #4
0
/*copy colour, depth, & stencil buffers from system memory to graphics memory*/
static unsigned int *build_sys2gmem_cmds(struct adreno_device *adreno_dev,
					 struct adreno_context *drawctxt,
					 struct tmp_ctx *ctx,
					 struct gmem_shadow_t *shadow)
{
	unsigned int *cmds = shadow->gmem_restore_commands;
	unsigned int *start = cmds;

	/* Store TP0_CHICKEN register */
	*cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
	*cmds++ = REG_TP0_CHICKEN;
	if (ctx)
		*cmds++ = ctx->chicken_restore;
	else
		cmds++;

	*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
	*cmds++ = 0;

	/* Set TP0_CHICKEN to zero */
	*cmds++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
	*cmds++ = 0x00000000;

	/* Set PA_SC_AA_CONFIG to 0 */
	*cmds++ = pm4_type0_packet(REG_PA_SC_AA_CONFIG, 1);
	*cmds++ = 0x00000000;
	/* shader constants */

	/* vertex buffer constants */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 7);

	*cmds++ = (0x1 << 16) | (9 * 6);
	/* valid(?) vtx constant flag & addr */
	*cmds++ = shadow->quad_vertices.gpuaddr | 0x3;
	/* limit = 12 dwords */
	*cmds++ = 0x00000030;
	/* valid(?) vtx constant flag & addr */
	*cmds++ = shadow->quad_texcoords.gpuaddr | 0x3;
	/* limit = 8 dwords */
	*cmds++ = 0x00000020;
	*cmds++ = 0;
	*cmds++ = 0;

	/* Invalidate L2 cache to make sure vertices are updated */
	*cmds++ = pm4_type0_packet(REG_TC_CNTL_STATUS, 1);
	*cmds++ = 0x1;

	cmds = program_shader(cmds, 0, sys2gmem_vtx_pgm, SYS2GMEM_VTX_PGM_LEN);

	/* Load the patched fragment shader stream */
	cmds =
	    program_shader(cmds, 1, sys2gmem_frag_pgm, SYS2GMEM_FRAG_PGM_LEN);

	/* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
	*cmds++ = PM4_REG(REG_SQ_PROGRAM_CNTL);
	*cmds++ = 0x10030002;
	*cmds++ = 0x00000008;

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_PA_SC_AA_MASK);
	*cmds++ = 0x0000ffff;	/* REG_PA_SC_AA_MASK */

	if (!adreno_is_a220(adreno_dev)) {
		/* PA_SC_VIZ_QUERY */
		*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
		*cmds++ = PM4_REG(REG_PA_SC_VIZ_QUERY);
		*cmds++ = 0x0;		/*REG_PA_SC_VIZ_QUERY */
	}

	/* RB_COLORCONTROL */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_RB_COLORCONTROL);
	*cmds++ = 0x00000c20;

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
	*cmds++ = PM4_REG(REG_VGT_MAX_VTX_INDX);
	*cmds++ = 0x00ffffff;	/* mmVGT_MAX_VTX_INDX */
	*cmds++ = 0x0;		/* mmVGT_MIN_VTX_INDX */
	*cmds++ = 0x00000000;	/* mmVGT_INDX_OFFSET */

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
	*cmds++ = PM4_REG(REG_VGT_VERTEX_REUSE_BLOCK_CNTL);
	*cmds++ = 0x00000002;	/* mmVGT_VERTEX_REUSE_BLOCK_CNTL */
	*cmds++ = 0x00000002;	/* mmVGT_OUT_DEALLOC_CNTL */

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_SQ_INTERPOLATOR_CNTL);
	*cmds++ = 0xffffffff;	/* mmSQ_INTERPOLATOR_CNTL */

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_PA_SC_AA_CONFIG);
	*cmds++ = 0x00000000;	/* REG_PA_SC_AA_CONFIG */

	/* set REG_PA_SU_SC_MODE_CNTL
	 * Front_ptype = draw triangles
	 * Back_ptype = draw triangles
	 * Provoking vertex = last
	 */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_PA_SU_SC_MODE_CNTL);
	*cmds++ = 0x00080240;

	/* texture constants */
	*cmds++ =
	    pm4_type3_packet(PM4_SET_CONSTANT, (SYS2GMEM_TEX_CONST_LEN + 1));
	*cmds++ = (0x1 << 16) | (0 * 6);
	memcpy(cmds, sys2gmem_tex_const, SYS2GMEM_TEX_CONST_LEN << 2);
	cmds[0] |= (shadow->pitch >> 5) << 22;
	cmds[1] |=
	    shadow->gmemshadow.gpuaddr | surface_format_table[shadow->format];
	cmds[2] |= (shadow->width - 1) | (shadow->height - 1) << 13;
	cmds += SYS2GMEM_TEX_CONST_LEN;

	/* program surface info */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
	*cmds++ = PM4_REG(REG_RB_SURFACE_INFO);
	*cmds++ = shadow->gmem_pitch;	/* pitch, MSAA = 1 */

	/* RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0,
	 *                Base=gmem_base
	 */
	if (ctx)
		*cmds++ =
		    (shadow->
		     format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | ctx->
		    gmem_base;
	else {
		unsigned int temp = *cmds;
		*cmds++ = (temp & ~RB_COLOR_INFO__COLOR_FORMAT_MASK) |
			(shadow->format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT);
	}

	/* RB_DEPTHCONTROL */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_RB_DEPTHCONTROL);

	if (adreno_is_a220(adreno_dev))
		*cmds++ = 8;		/* disable Z */
	else
		*cmds++ = 0;		/* disable Z */

	/* Use maximum scissor values -- quad vertices already
	 * have the correct bounds */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
	*cmds++ = PM4_REG(REG_PA_SC_SCREEN_SCISSOR_TL);
	*cmds++ = (0 << 16) | 0;
	*cmds++ = ((0x1fff) << 16) | 0x1fff;
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
	*cmds++ = PM4_REG(REG_PA_SC_WINDOW_SCISSOR_TL);
	*cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0);
	*cmds++ = ((0x1fff) << 16) | 0x1fff;

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_PA_CL_VTE_CNTL);
	/* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */
	*cmds++ = 0x00000b00;

	/*load the viewport so that z scale = clear depth and z offset = 0.0f */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
	*cmds++ = PM4_REG(REG_PA_CL_VPORT_ZSCALE);
	*cmds++ = 0xbf800000;
	*cmds++ = 0x0;

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_RB_COLOR_MASK);
	*cmds++ = 0x0000000f;	/* R = G = B = 1:enabled */

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_RB_COLOR_DEST_MASK);
	*cmds++ = 0xffffffff;

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
	*cmds++ = PM4_REG(REG_SQ_WRAPPING_0);
	*cmds++ = 0x00000000;
	*cmds++ = 0x00000000;

	/* load the stencil ref value
	 *  $AAM - do this later
	 */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_RB_MODECONTROL);
	/* draw pixels with color and depth/stencil component */
	*cmds++ = 0x4;

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_PA_CL_CLIP_CNTL);
	*cmds++ = 0x00010000;

	if (adreno_is_a220(adreno_dev)) {
		*cmds++ = pm4_type3_packet(PM4_SET_DRAW_INIT_FLAGS, 1);
		*cmds++ = 0;

		*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
		*cmds++ = PM4_REG(REG_LEIA_RB_LRZ_VSC_CONTROL);
		*cmds++ = 0x0000000;

		*cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 3);
		*cmds++ = 0;           /* viz query info. */
		/* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore */
		*cmds++ = 0x00004088;
		*cmds++ = 3;	       /* NumIndices=3 */
	} else {
		/* queue the draw packet */
		*cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 2);
		*cmds++ = 0;		/* viz query info. */
		/* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */
		*cmds++ = 0x00030088;
	}

	/* create indirect buffer command for above command sequence */
	create_ib1(drawctxt, shadow->gmem_restore, start, cmds);

	return cmds;
}
Exemple #5
0
/*copy colour, depth, & stencil buffers from graphics memory to system memory*/
static unsigned int *build_gmem2sys_cmds(struct adreno_device *adreno_dev,
					 struct adreno_context *drawctxt,
					 struct tmp_ctx *ctx,
					 struct gmem_shadow_t *shadow)
{
	unsigned int *cmds = shadow->gmem_save_commands;
	unsigned int *start = cmds;
	/* Calculate the new offset based on the adjusted base */
	unsigned int bytesperpixel = format2bytesperpixel[shadow->format];
	unsigned int addr = shadow->gmemshadow.gpuaddr;
	unsigned int offset = (addr - (addr & 0xfffff000)) / bytesperpixel;

	/* Store TP0_CHICKEN register */
	*cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
	*cmds++ = REG_TP0_CHICKEN;
	if (ctx)
		*cmds++ = ctx->chicken_restore;
	else
		cmds++;

	*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
	*cmds++ = 0;

	/* Set TP0_CHICKEN to zero */
	*cmds++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
	*cmds++ = 0x00000000;

	/* Set PA_SC_AA_CONFIG to 0 */
	*cmds++ = pm4_type0_packet(REG_PA_SC_AA_CONFIG, 1);
	*cmds++ = 0x00000000;

	/* program shader */

	/* load shader vtx constants ... 5 dwords */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
	*cmds++ = (0x1 << 16) | SHADER_CONST_ADDR;
	*cmds++ = 0;
	/* valid(?) vtx constant flag & addr */
	*cmds++ = shadow->quad_vertices.gpuaddr | 0x3;
	/* limit = 12 dwords */
	*cmds++ = 0x00000030;

	/* Invalidate L2 cache to make sure vertices are updated */
	*cmds++ = pm4_type0_packet(REG_TC_CNTL_STATUS, 1);
	*cmds++ = 0x1;

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
	*cmds++ = PM4_REG(REG_VGT_MAX_VTX_INDX);
	*cmds++ = 0x00ffffff;	/* REG_VGT_MAX_VTX_INDX */
	*cmds++ = 0x0;		/* REG_VGT_MIN_VTX_INDX */
	*cmds++ = 0x00000000;	/* REG_VGT_INDX_OFFSET */

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_PA_SC_AA_MASK);
	*cmds++ = 0x0000ffff;	/* REG_PA_SC_AA_MASK */

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_RB_COLORCONTROL);
	*cmds++ = 0x00000c20;

	/* load the patched vertex shader stream */
	cmds = program_shader(cmds, 0, gmem2sys_vtx_pgm, GMEM2SYS_VTX_PGM_LEN);

	/* Load the patched fragment shader stream */
	cmds =
	    program_shader(cmds, 1, gmem2sys_frag_pgm, GMEM2SYS_FRAG_PGM_LEN);

	/* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
	*cmds++ = PM4_REG(REG_SQ_PROGRAM_CNTL);
	if (adreno_is_a220(adreno_dev))
		*cmds++ = 0x10018001;
	else
		*cmds++ = 0x10010001;
	*cmds++ = 0x00000008;

	/* resolve */

	/* PA_CL_VTE_CNTL */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_PA_CL_VTE_CNTL);
	/* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */
	*cmds++ = 0x00000b00;

	/* program surface info */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
	*cmds++ = PM4_REG(REG_RB_SURFACE_INFO);
	*cmds++ = shadow->gmem_pitch;	/* pitch, MSAA = 1 */

	/* RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0,
	 *                Base=gmem_base
	 */
	/* gmem base assumed 4K aligned. */
	if (ctx) {
		BUG_ON(ctx->gmem_base & 0xFFF);
		*cmds++ =
		    (shadow->
		     format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | ctx->
		    gmem_base;
	} else {
		unsigned int temp = *cmds;
		*cmds++ = (temp & ~RB_COLOR_INFO__COLOR_FORMAT_MASK) |
			(shadow->format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT);
	}

	/* disable Z */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_RB_DEPTHCONTROL);
	if (adreno_is_a220(adreno_dev))
		*cmds++ = 0x08;
	else
		*cmds++ = 0;

	/* set REG_PA_SU_SC_MODE_CNTL
	 *              Front_ptype = draw triangles
	 *              Back_ptype = draw triangles
	 *              Provoking vertex = last
	 */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_PA_SU_SC_MODE_CNTL);
	*cmds++ = 0x00080240;

	/* Use maximum scissor values -- quad vertices already have the
	 * correct bounds */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
	*cmds++ = PM4_REG(REG_PA_SC_SCREEN_SCISSOR_TL);
	*cmds++ = (0 << 16) | 0;
	*cmds++ = (0x1fff << 16) | (0x1fff);
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
	*cmds++ = PM4_REG(REG_PA_SC_WINDOW_SCISSOR_TL);
	*cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0);
	*cmds++ = (0x1fff << 16) | (0x1fff);

	/* load the viewport so that z scale = clear depth and
	 *  z offset = 0.0f
	 */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
	*cmds++ = PM4_REG(REG_PA_CL_VPORT_ZSCALE);
	*cmds++ = 0xbf800000;	/* -1.0f */
	*cmds++ = 0x0;

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_RB_COLOR_MASK);
	*cmds++ = 0x0000000f;	/* R = G = B = 1:enabled */

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_RB_COLOR_DEST_MASK);
	*cmds++ = 0xffffffff;

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
	*cmds++ = PM4_REG(REG_SQ_WRAPPING_0);
	*cmds++ = 0x00000000;
	*cmds++ = 0x00000000;

	/* load the stencil ref value
	 * $AAM - do this later
	 */

	/* load the COPY state */
	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 6);
	*cmds++ = PM4_REG(REG_RB_COPY_CONTROL);
	*cmds++ = 0;		/* RB_COPY_CONTROL */
	*cmds++ = addr & 0xfffff000;	/* RB_COPY_DEST_BASE */
	*cmds++ = shadow->pitch >> 5;	/* RB_COPY_DEST_PITCH */

	/* Endian=none, Linear, Format=RGBA8888,Swap=0,!Dither,
	 *  MaskWrite:R=G=B=A=1
	 */
	*cmds++ = 0x0003c008 |
	    (shadow->format << RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT);
	/* Make sure we stay in offsetx field. */
	BUG_ON(offset & 0xfffff000);
	*cmds++ = offset;

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_RB_MODECONTROL);
	*cmds++ = 0x6;		/* EDRAM copy */

	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
	*cmds++ = PM4_REG(REG_PA_CL_CLIP_CNTL);
	*cmds++ = 0x00010000;

	if (adreno_is_a220(adreno_dev)) {
		*cmds++ = pm4_type3_packet(PM4_SET_DRAW_INIT_FLAGS, 1);
		*cmds++ = 0;

		*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
		*cmds++ = PM4_REG(REG_LEIA_RB_LRZ_VSC_CONTROL);
		*cmds++ = 0x0000000;

		*cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 3);
		*cmds++ = 0;           /* viz query info. */
		/* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore */
		*cmds++ = 0x00004088;
		*cmds++ = 3;	       /* NumIndices=3 */
	} else {
		/* queue the draw packet */
		*cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 2);
		*cmds++ = 0;		/* viz query info. */
		/* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */
		*cmds++ = 0x00030088;
	}

	/* create indirect buffer command for above command sequence */
	create_ib1(drawctxt, shadow->gmem_save, start, cmds);

	return cmds;
}
Exemple #6
0
static void
build_shader_save_restore_cmds(struct adreno_context *drawctxt,
			       struct tmp_ctx *ctx)
{
	unsigned int *cmd = ctx->cmd;
	unsigned int *save, *restore, *fixup;
#if defined(PM4_IM_STORE)
	unsigned int *startSizeVtx, *startSizePix, *startSizeShared;
#endif
	unsigned int *partition1;
	unsigned int *shaderBases, *partition2;

#if defined(PM4_IM_STORE)
	/* compute vertex, pixel and shared instruction shadow GPU addresses */
	ctx->shader_vertex = drawctxt->gpustate.gpuaddr + SHADER_OFFSET;
	ctx->shader_pixel = ctx->shader_vertex + SHADER_SHADOW_SIZE;
	ctx->shader_shared = ctx->shader_pixel + SHADER_SHADOW_SIZE;
#endif

	/* restore shader partitioning and instructions */

	restore = cmd;		/* start address */

	/* Invalidate Vertex & Pixel instruction code address and sizes */
	*cmd++ = pm4_type3_packet(PM4_INVALIDATE_STATE, 1);
	*cmd++ = 0x00000300;	/* 0x100 = Vertex, 0x200 = Pixel */

	/* Restore previous shader vertex & pixel instruction bases. */
	*cmd++ = pm4_type3_packet(PM4_SET_SHADER_BASES, 1);
	shaderBases = cmd++;	/* TBD #5: shader bases (from fixup) */

	/* write the shader partition information to a scratch register */
	*cmd++ = pm4_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
	partition1 = cmd++;	/* TBD #4a: partition info (from save) */

#if defined(PM4_IM_STORE)
	/* load vertex shader instructions from the shadow. */
	*cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
	*cmd++ = ctx->shader_vertex + 0x0;	/* 0x0 = Vertex */
	startSizeVtx = cmd++;	/* TBD #1: start/size (from save) */

	/* load pixel shader instructions from the shadow. */
	*cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
	*cmd++ = ctx->shader_pixel + 0x1;	/* 0x1 = Pixel */
	startSizePix = cmd++;	/* TBD #2: start/size (from save) */

	/* load shared shader instructions from the shadow. */
	*cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
	*cmd++ = ctx->shader_shared + 0x2;	/* 0x2 = Shared */
	startSizeShared = cmd++;	/* TBD #3: start/size (from save) */
#endif

	/* create indirect buffer command for above command sequence */
	create_ib1(drawctxt, drawctxt->shader_restore, restore, cmd);

	/*
	 *  fixup SET_SHADER_BASES data
	 *
	 *  since self-modifying PM4 code is being used here, a seperate
	 *  command buffer is used for this fixup operation, to ensure the
	 *  commands are not read by the PM4 engine before the data fields
	 *  have been written.
	 */

	fixup = cmd;		/* start address */

	/* write the shader partition information to a scratch register */
	*cmd++ = pm4_type0_packet(REG_SCRATCH_REG2, 1);
	partition2 = cmd++;	/* TBD #4b: partition info (from save) */

	/* mask off unused bits, then OR with shader instruction memory size */
	*cmd++ = pm4_type3_packet(PM4_REG_RMW, 3);
	*cmd++ = REG_SCRATCH_REG2;
	/* AND off invalid bits. */
	*cmd++ = 0x0FFF0FFF;
	/* OR in instruction memory size */
	*cmd++ = (unsigned int)((SHADER_INSTRUCT_LOG2 - 5U) << 29);

	/* write the computed value to the SET_SHADER_BASES data field */
	*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
	*cmd++ = REG_SCRATCH_REG2;
	/* TBD #5: shader bases (to restore) */
	*cmd++ = gpuaddr(shaderBases, &drawctxt->gpustate);

	/* create indirect buffer command for above command sequence */
	create_ib1(drawctxt, drawctxt->shader_fixup, fixup, cmd);

	/* save shader partitioning and instructions */

	save = cmd;		/* start address */

	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
	*cmd++ = 0;

	/* fetch the SQ_INST_STORE_MANAGMENT register value,
	 *  store the value in the data fields of the SET_CONSTANT commands
	 *  above.
	 */
	*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
	*cmd++ = REG_SQ_INST_STORE_MANAGMENT;
	/* TBD #4a: partition info (to restore) */
	*cmd++ = gpuaddr(partition1, &drawctxt->gpustate);
	*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
	*cmd++ = REG_SQ_INST_STORE_MANAGMENT;
	/* TBD #4b: partition info (to fixup) */
	*cmd++ = gpuaddr(partition2, &drawctxt->gpustate);

#if defined(PM4_IM_STORE)

	/* store the vertex shader instructions */
	*cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
	*cmd++ = ctx->shader_vertex + 0x0;	/* 0x0 = Vertex */
	/* TBD #1: start/size (to restore) */
	*cmd++ = gpuaddr(startSizeVtx, &drawctxt->gpustate);

	/* store the pixel shader instructions */
	*cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
	*cmd++ = ctx->shader_pixel + 0x1;	/* 0x1 = Pixel */
	/* TBD #2: start/size (to restore) */
	*cmd++ = gpuaddr(startSizePix, &drawctxt->gpustate);

	/* store the shared shader instructions if vertex base is nonzero */

	*cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
	*cmd++ = ctx->shader_shared + 0x2;	/* 0x2 = Shared */
	/* TBD #3: start/size (to restore) */
	*cmd++ = gpuaddr(startSizeShared, &drawctxt->gpustate);

#endif

	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
	*cmd++ = 0;

	/* create indirect buffer command for above command sequence */
	create_ib1(drawctxt, drawctxt->shader_save, save, cmd);

	ctx->cmd = cmd;
}
Exemple #7
0
static void build_regrestore_cmds(struct adreno_device *adreno_dev,
				  struct adreno_context *drawctxt,
				  struct tmp_ctx *ctx)
{
	unsigned int *start = ctx->cmd;
	unsigned int *cmd = start;

	unsigned int i = 0;
	unsigned int reg_array_size = 0;
	const unsigned int *ptr_register_ranges;

	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
	*cmd++ = 0;

	/* H/W Registers */
	/* deferred pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, ???); */
	cmd++;
#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
	/* Force mismatch */
	*cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1;
#else
	*cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
#endif

	/* Based on chip id choose the registers ranges*/
	if (adreno_is_a220(adreno_dev)) {
		ptr_register_ranges = register_ranges_a22x;
		reg_array_size = ARRAY_SIZE(register_ranges_a22x);
	} else {
		ptr_register_ranges = register_ranges_a20x;
		reg_array_size = ARRAY_SIZE(register_ranges_a20x);
	}


	for (i = 0; i < (reg_array_size/2); i++) {
		cmd = reg_range(cmd, ptr_register_ranges[i*2],
				ptr_register_ranges[i*2+1]);
	}

	/* Now we know how many register blocks we have, we can compute command
	 * length
	 */
	start[2] =
	    pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, (cmd - start) - 3);
	/* Enable shadowing for the entire register block. */
#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
	start[4] |= (0 << 24) | (4 << 16);	/* Disable shadowing. */
#else
	start[4] |= (1 << 24) | (4 << 16);
#endif

	/* Need to handle some of the registers separately */
	*cmd++ = pm4_type0_packet(REG_SQ_GPR_MANAGEMENT, 1);
	ctx->reg_values[0] = gpuaddr(cmd, &drawctxt->gpustate);
	*cmd++ = 0x00040400;

	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
	*cmd++ = 0;
	*cmd++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
	ctx->reg_values[1] = gpuaddr(cmd, &drawctxt->gpustate);
	*cmd++ = 0x00000000;

	if (adreno_is_a220(adreno_dev)) {
		unsigned int i;
		unsigned int j = 2;
		for (i = REG_LEIA_VSC_BIN_SIZE; i <=
				REG_LEIA_VSC_PIPE_DATA_LENGTH_7; i++) {
			*cmd++ = pm4_type0_packet(i, 1);
			ctx->reg_values[j] = gpuaddr(cmd, &drawctxt->gpustate);
			*cmd++ = 0x00000000;
			j++;
		}
	}

	/* ALU Constants */
	*cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
	*cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
	*cmd++ = (0 << 24) | (0 << 16) | 0;	/* Disable shadowing */
#else
	*cmd++ = (1 << 24) | (0 << 16) | 0;
#endif
	*cmd++ = ALU_CONSTANTS;

	/* Texture Constants */
	*cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
	*cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
	/* Disable shadowing */
	*cmd++ = (0 << 24) | (1 << 16) | 0;
#else
	*cmd++ = (1 << 24) | (1 << 16) | 0;
#endif
	*cmd++ = TEX_CONSTANTS;

	/* Boolean Constants */
	*cmd++ = pm4_type3_packet(PM4_SET_CONSTANT, 1 + BOOL_CONSTANTS);
	*cmd++ = (2 << 16) | 0;

	/* the next BOOL_CONSTANT dwords is the shadow area for
	 *  boolean constants.
	 */
	ctx->bool_shadow = gpuaddr(cmd, &drawctxt->gpustate);
	cmd += BOOL_CONSTANTS;

	/* Loop Constants */
	*cmd++ = pm4_type3_packet(PM4_SET_CONSTANT, 1 + LOOP_CONSTANTS);
	*cmd++ = (3 << 16) | 0;

	/* the next LOOP_CONSTANTS dwords is the shadow area for
	 * loop constants.
	 */
	ctx->loop_shadow = gpuaddr(cmd, &drawctxt->gpustate);
	cmd += LOOP_CONSTANTS;

	/* create indirect buffer command for above command sequence */
	create_ib1(drawctxt, drawctxt->reg_restore, start, cmd);

	ctx->cmd = cmd;
}
Exemple #8
0
static uint32_t
kgsl_ringbuffer_addcmds(struct kgsl_ringbuffer *rb,
				unsigned int flags, unsigned int *cmds,
				int sizedwords)
{
	unsigned int *ringcmds;
	unsigned int timestamp;
	unsigned int total_sizedwords = sizedwords + 6;

	/* reserve space to temporarily turn off protected mode
	*  error checking if needed
	*/
	total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 9 : 0;

	ringcmds = kgsl_ringbuffer_allocspace(rb, total_sizedwords);

	if (flags & KGSL_CMD_FLAGS_PMODE) {
		/* disable protected mode error checking */
		*ringcmds++ = pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1);
		*ringcmds++ = 0;
	}

	memcpy(ringcmds, cmds, (sizedwords << 2));

	ringcmds += sizedwords;

	if (flags & KGSL_CMD_FLAGS_PMODE) {
		/* re-enable protected mode error checking */
		*ringcmds++ = pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1);
		*ringcmds++ = 1;
	}

	rb->timestamp++;
	timestamp = rb->timestamp;

	/* start-of-pipeline and end-of-pipeline timestamps */
	*ringcmds++ = pm4_type0_packet(REG_CP_TIMESTAMP, 1);
	*ringcmds++ = rb->timestamp;
	*ringcmds++ = pm4_type3_packet(PM4_EVENT_WRITE, 3);
	*ringcmds++ = CACHE_FLUSH_TS;
	*ringcmds++ =
		     (rb->device->memstore.gpuaddr +
		      KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp));
	*ringcmds++ = rb->timestamp;

	if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
		/* Conditional execution based on memory values */
		*ringcmds++ = pm4_type3_packet(PM4_COND_EXEC, 4);
		*ringcmds++ = (rb->device->memstore.gpuaddr +
			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2;
		*ringcmds++ = (rb->device->memstore.gpuaddr +
			KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2;
		*ringcmds++ = rb->timestamp;
		/* # of conditional command DWORDs */
		*ringcmds++ = 4;
		*ringcmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
		*ringcmds++ = 0x00000000;
		*ringcmds++ = pm4_type3_packet(PM4_INTERRUPT, 1);
		*ringcmds++ = CP_INT_CNTL__RB_INT_MASK;
	}