static GLboolean r300RunTCLRender(GLcontext * ctx,
				  struct tnl_pipeline_stage *stage)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	struct r300_vertex_program *vp;

	hw_tcl_on = future_hw_tcl_on;

	if (RADEON_DEBUG & DEBUG_PRIMS)
		fprintf(stderr, "%s\n", __FUNCTION__);

	if (hw_tcl_on == GL_FALSE)
		return GL_TRUE;

	if (r300Fallback(ctx) >= R300_FALLBACK_TCL) {
		hw_tcl_on = GL_FALSE;
		return GL_TRUE;
	}

	r300UpdateShaders(rmesa);

	vp = (struct r300_vertex_program *)CURRENT_VERTEX_SHADER(ctx);
	if (vp->native == GL_FALSE) {
		hw_tcl_on = GL_FALSE;
		return GL_TRUE;
	}

	return r300RunRender(ctx, stage);
}
Ejemplo n.º 2
0
static GLboolean r300RunRender(GLcontext * ctx,
			       struct tnl_pipeline_stage *stage)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	struct radeon_vertex_buffer *VB = &rmesa->state.VB;
	int i;
	int cmd_reserved = 0;
	int cmd_written = 0;
	drm_radeon_cmd_header_t *cmd = NULL;

	if (RADEON_DEBUG & DEBUG_PRIMS)
		fprintf(stderr, "%s\n", __FUNCTION__);

	if (stage) {
		TNLcontext *tnl = TNL_CONTEXT(ctx);
		radeon_vb_to_rvb(rmesa, VB, &tnl->vb);
	}

	r300UpdateShaders(rmesa);
	if (r300EmitArrays(ctx))
		return GL_TRUE;

	r300UpdateShaderStates(rmesa);

	reg_start(R300_RB3D_DSTCACHE_CTLSTAT, 0);
	e32(R300_RB3D_DSTCACHE_UNKNOWN_0A);

	reg_start(R300_RB3D_ZCACHE_CTLSTAT, 0);
	e32(R300_RB3D_ZCACHE_UNKNOWN_03);

	r300EmitState(rmesa);

	for (i = 0; i < VB->PrimitiveCount; i++) {
		GLuint prim = _tnl_translate_prim(&VB->Primitive[i]);
		GLuint start = VB->Primitive[i].start;
		GLuint end = VB->Primitive[i].start + VB->Primitive[i].count;
		r300RunRenderPrimitive(rmesa, ctx, start, end, prim);
	}

	reg_start(R300_RB3D_DSTCACHE_CTLSTAT, 0);
	e32(R300_RB3D_DSTCACHE_UNKNOWN_0A);

	reg_start(R300_RB3D_ZCACHE_CTLSTAT, 0);
	e32(R300_RB3D_ZCACHE_UNKNOWN_03);

#ifdef USER_BUFFERS
	r300UseArrays(ctx);
#endif

	r300ReleaseArrays(ctx);

	return GL_FALSE;
}
Ejemplo n.º 3
0
static GLboolean r300_run_tcl_render(GLcontext *ctx,
				 struct tnl_pipeline_stage *stage)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	struct r300_vertex_program *vp;
   
   	hw_tcl_on=future_hw_tcl_on;
   
	if (RADEON_DEBUG & DEBUG_PRIMS)
		fprintf(stderr, "%s\n", __FUNCTION__);
	if(hw_tcl_on == GL_FALSE)
		return GL_TRUE;
	
	if (r300Fallback(ctx) >= R300_FALLBACK_TCL) {
		hw_tcl_on = GL_FALSE;
		return GL_TRUE;
	}
	
	r300UpdateShaders(rmesa);

	vp = (struct r300_vertex_program *)CURRENT_VERTEX_SHADER(ctx);
#if 0 /* Draw every second request with software arb vp */
	vp->native++;
	vp->native &= 1;
	//vp->native = GL_FALSE;
#endif

#if 0 /* You dont want to know what this does... */
	TNLcontext *tnl = TNL_CONTEXT(ctx);
	struct tnl_cache *cache;
	struct tnl_cache_item *c;
	
	cache = tnl->vp_cache;
	c = cache->items[0xc000cc0e % cache->size];
	
	if(c && c->data == vp)
		vp->native = GL_FALSE;
	
#endif
#if 0
	vp->native = GL_FALSE;
#endif
	if (vp->native == GL_FALSE) {
		hw_tcl_on = GL_FALSE;
		return GL_TRUE;
	}
	//r300UpdateShaderStates(rmesa);
	
	return r300_run_vb_render(ctx, stage);
}
Ejemplo n.º 4
0
void r300RenderStart(GLcontext *ctx)
{
	radeon_print(RADEON_SWRENDER, RADEON_VERBOSE, "%s\n", __func__);
	r300ContextPtr rmesa = R300_CONTEXT( ctx );

	r300ChooseRenderState(ctx);

	r300UpdateShaders(rmesa);

	r300PrepareVertices(ctx);

	r300ValidateBuffers(ctx);

	r300UpdateShaderStates(rmesa);


	/* investigate if we can put back flush optimisation if needed */
	if (rmesa->radeon.dma.flush != NULL) {
		rmesa->radeon.dma.flush(ctx);
	}
}
static GLboolean r300RunRender(GLcontext * ctx,
			       struct tnl_pipeline_stage *stage)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	int i;
	TNLcontext *tnl = TNL_CONTEXT(ctx);
	struct vertex_buffer *vb = &tnl->vb;


	if (RADEON_DEBUG & DEBUG_PRIMS)
		fprintf(stderr, "%s\n", __FUNCTION__);

	r300UpdateShaders(rmesa);
	if (r300EmitArrays(ctx))
		return GL_TRUE;

	r300UpdateShaderStates(rmesa);

	r300EmitCacheFlush(rmesa);
	r300EmitState(rmesa);

	for (i = 0; i < vb->PrimitiveCount; i++) {
		GLuint prim = _tnl_translate_prim(&vb->Primitive[i]);
		GLuint start = vb->Primitive[i].start;
		GLuint end = vb->Primitive[i].start + vb->Primitive[i].count;
		r300RunRenderPrimitive(rmesa, ctx, start, end, prim);
	}

	r300EmitCacheFlush(rmesa);

#ifdef USER_BUFFERS
	r300UseArrays(ctx);
#endif

	r300ReleaseArrays(ctx);

	return GL_FALSE;
}
Ejemplo n.º 6
0
GLboolean r300_run_vb_render(GLcontext *ctx,
				 struct tnl_pipeline_stage *stage)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	struct radeon_vertex_buffer *VB = &rmesa->state.VB;
	int i;
	LOCAL_VARS
   
	if (RADEON_DEBUG & DEBUG_PRIMS)
		fprintf(stderr, "%s\n", __FUNCTION__);

	if (stage) {
 		TNLcontext *tnl = TNL_CONTEXT(ctx);
		radeon_vb_to_rvb(rmesa, VB, &tnl->vb);
	}
	
	r300UpdateShaders(rmesa);
	if (rmesa->state.VB.LockCount == 0 || 1) {
 	  	r300ReleaseArrays(ctx);
		r300EmitArrays(ctx, GL_FALSE);

		r300UpdateShaderStates(rmesa);
	} else {
		/* TODO: Figure out why do we need these. */
		R300_STATECHANGE(rmesa, vir[0]);
		R300_STATECHANGE(rmesa, vir[1]);
		R300_STATECHANGE(rmesa, vic);
		R300_STATECHANGE(rmesa, vof);
		
#if 0		
		fprintf(stderr, "dt:\n");
		for(i=0; i < VERT_ATTRIB_MAX; i++){
			fprintf(stderr, "dt %d:", i);
			dump_dt(&rmesa->state.VB.AttribPtr[i], VB->Count);
		}
		
		fprintf(stderr, "before:\n");
		for(i=0; i < rmesa->state.aos_count; i++){
			fprintf(stderr, "aos %d:", i);
			dump_array(&rmesa->state.aos[i], VB->Count);
		}
#endif
#if 0
 	  	r300ReleaseArrays(ctx);
		r300EmitArrays(ctx, GL_FALSE);
			
		fprintf(stderr, "after:\n");
		for(i=0; i < rmesa->state.aos_count; i++){
			fprintf(stderr, "aos %d:", i);
			dump_array(&rmesa->state.aos[i], VB->Count);
		}
#endif
	}
	
	reg_start(R300_RB3D_DSTCACHE_CTLSTAT,0);
	e32(0x0000000a);

	reg_start(0x4f18,0);
	e32(0x00000003);
#if 0
	reg_start(R300_VAP_PVS_WAITIDLE,0);
		e32(0x00000000);
#endif
	r300EmitState(rmesa);
	
	for(i=0; i < VB->PrimitiveCount; i++){
		GLuint prim = VB->Primitive[i].mode;
		GLuint start = VB->Primitive[i].start;
		GLuint length = VB->Primitive[i].count;
		
		r300_render_vb_primitive(rmesa, ctx, start, start + length, prim);
	}

	reg_start(R300_RB3D_DSTCACHE_CTLSTAT,0);
	e32(0x0000000a/*0x2*/);

	reg_start(0x4f18,0);
	e32(0x00000003/*0x1*/);

#ifdef USER_BUFFERS
	r300UseArrays(ctx);
#endif
	return GL_FALSE;
}
Ejemplo n.º 7
0
static GLboolean r300TryDrawPrims(GLcontext *ctx,
					 const struct gl_client_array *arrays[],
					 const struct _mesa_prim *prim,
					 GLuint nr_prims,
					 const struct _mesa_index_buffer *ib,
					 GLuint min_index,
					 GLuint max_index )
{
	struct r300_context *r300 = R300_CONTEXT(ctx);
	GLuint i;

	radeon_print(RADEON_RENDER, RADEON_NORMAL, "%s: %u (%d-%d) cs begin at %d\n",
				__FUNCTION__, nr_prims, min_index, max_index, r300->radeon.cmdbuf.cs->cdw );

	if (ctx->NewState)
		_mesa_update_state( ctx );

	if (r300->options.hw_tcl_enabled)
		_tnl_UpdateFixedFunctionProgram(ctx);

	r300UpdateShaders(r300);

	r300SwitchFallback(ctx, R300_FALLBACK_INVALID_BUFFERS, !r300ValidateBuffers(ctx));

	r300SetVertexFormat(ctx, arrays, max_index + 1);

	if (r300->fallback)
		return GL_FALSE;

	r300SetupVAP(ctx, r300->selected_vp->code.InputsRead, r300->selected_vp->code.OutputsWritten);

	r300UpdateShaderStates(r300);

	/* ensure we have the cmd buf space in advance to cover
	 * the state + DMA AOS pointers */
	GLuint emit_end = r300PredictTryDrawPrimsSize(ctx, nr_prims, prim)
		+ r300->radeon.cmdbuf.cs->cdw;

	r300SetupIndexBuffer(ctx, ib);

	r300AllocDmaRegions(ctx, arrays, max_index + 1);

	if (r300->fallback)
		return GL_FALSE;

	r300EmitCacheFlush(r300);
	radeonEmitState(&r300->radeon);

	for (i = 0; i < nr_prims; ++i) {
		r300RunRenderPrimitive(ctx, prim[i].start, prim[i].start + prim[i].count, prim[i].mode);
	}

	r300EmitCacheFlush(r300);

	r300FreeData(ctx);

	radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s: %u (%d-%d) cs ending at %d\n",
			__FUNCTION__, nr_prims, min_index, max_index, r300->radeon.cmdbuf.cs->cdw );

	if (emit_end < r300->radeon.cmdbuf.cs->cdw)
		WARN_ONCE("Rendering was %d commands larger than predicted size."
				" We might overflow  command buffer.\n", r300->radeon.cmdbuf.cs->cdw - emit_end);

	return GL_TRUE;
}