Example #1
0
static void r300EmitElts(GLcontext * ctx, void *elts, unsigned long n_elts,
			 int elt_size)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	struct r300_dma_region *rvb = &rmesa->state.elt_dma;
	void *out;

	assert(elt_size == 2 || elt_size == 4);

	if (r300IsGartMemory(rmesa, elts, n_elts * elt_size)) {
		rvb->address = rmesa->radeon.radeonScreen->gartTextures.map;
		rvb->start = ((char *)elts) - rvb->address;
		rvb->aos_offset =
		    rmesa->radeon.radeonScreen->gart_texture_offset +
		    rvb->start;
		return;
	} else if (r300IsGartMemory(rmesa, elts, 1)) {
		WARN_ONCE("Pointer not within GART memory!\n");
		_mesa_exit(-1);
	}

	r300AllocDmaRegion(rmesa, rvb, n_elts * elt_size, elt_size);
	rvb->aos_offset = GET_START(rvb);

	out = rvb->address + rvb->start;
	memcpy(out, elts, n_elts * elt_size);
}
Example #2
0
static void r300SetVertexFormat(GLcontext *ctx, const struct gl_client_array *arrays[], int count)
{
	r300ContextPtr r300 = R300_CONTEXT(ctx);
	struct r300_vertex_buffer *vbuf = &r300->vbuf;
	radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s\n", __func__);
	{
		int i, tmp;

		tmp = r300->selected_vp->code.InputsRead;
		i = 0;
		vbuf->num_attribs = 0;
		while (tmp) {
			/* find first enabled bit */
			while (!(tmp & 1)) {
				tmp >>= 1;
				++i;
			}

			r300TranslateAttrib(ctx, i, count, arrays[i]);

			tmp >>= 1;
			++i;
		}
	}

	r300SwitchFallback(ctx, R300_FALLBACK_AOS_LIMIT, vbuf->num_attribs > R300_MAX_AOS_ARRAYS);
	if (r300->fallback)
		return;
}
Example #3
0
static void r300ChooseRenderState( GLcontext *ctx )
{
	TNLcontext *tnl = TNL_CONTEXT(ctx);
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	GLuint index = 0;
	GLuint flags = ctx->_TriangleCaps;
	radeon_print(RADEON_SWRENDER, RADEON_VERBOSE, "%s\n", __func__);

	if (flags & DD_TRI_UNFILLED)      index |= R300_UNFILLED_BIT;

	if (index != rmesa->radeon.swtcl.RenderIndex) {
		tnl->Driver.Render.Points = rast_tab[index].points;
		tnl->Driver.Render.Line = rast_tab[index].line;
		tnl->Driver.Render.ClippedLine = rast_tab[index].line;
		tnl->Driver.Render.Triangle = rast_tab[index].triangle;
		tnl->Driver.Render.Quad = rast_tab[index].quad;

		if (index == 0) {
			tnl->Driver.Render.PrimTabVerts = r300_render_tab_verts;
			tnl->Driver.Render.PrimTabElts = r300_render_tab_elts;
			tnl->Driver.Render.ClippedPolygon = r300_fast_clipped_poly;
		} else {
			tnl->Driver.Render.PrimTabVerts = _tnl_render_tab_verts;
			tnl->Driver.Render.PrimTabElts = _tnl_render_tab_elts;
			tnl->Driver.Render.ClippedPolygon = _tnl_RenderClippedPolygon;
		}

		rmesa->radeon.swtcl.RenderIndex = index;
	}
}
Example #4
0
void r300InitSwtcl(GLcontext *ctx)
{
	TNLcontext *tnl = TNL_CONTEXT(ctx);
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	static int firsttime = 1;
	radeon_print(RADEON_SWRENDER, RADEON_NORMAL, "%s\n", __func__);

	if (firsttime) {
		init_rast_tab();
		firsttime = 0;
	}
	rmesa->radeon.swtcl.emit_prediction = 0;

	tnl->Driver.Render.Start = r300RenderStart;
	tnl->Driver.Render.Finish = r300RenderFinish;
	tnl->Driver.Render.PrimitiveNotify = r300RenderPrimitive;
	tnl->Driver.Render.ResetLineStipple = r300ResetLineStipple;
	tnl->Driver.Render.BuildVertices = _tnl_build_vertices;
	tnl->Driver.Render.CopyPV = _tnl_copy_pv;
	tnl->Driver.Render.Interp = _tnl_interp;

	/* FIXME: what are these numbers? */
	_tnl_init_vertices( ctx, ctx->Const.MaxArrayLockSize + 12,
			    48 * sizeof(GLfloat) );

	rmesa->radeon.swtcl.verts = (GLubyte *)tnl->clipspace.vertex_buf;
	rmesa->radeon.swtcl.RenderIndex = ~0;
	rmesa->radeon.swtcl.render_primitive = GL_TRIANGLES;
	rmesa->radeon.swtcl.hw_primitive = 0;

	_tnl_invalidate_vertex_state( ctx, ~0 );
	_tnl_invalidate_vertices( ctx, ~0 );

	_tnl_need_projected_coords( ctx, GL_FALSE );
}
Example #5
0
void r300_swtcl_flush(GLcontext *ctx, uint32_t current_offset)
{
	radeon_print(RADEON_SWRENDER, RADEON_TRACE, "%s\n", __func__);
	r300ContextPtr rmesa = R300_CONTEXT(ctx);

	r300EmitCacheFlush(rmesa);

	radeonEmitState(&rmesa->radeon);
    r300_emit_scissor(ctx);
	r300EmitVertexAOS(rmesa,
			rmesa->radeon.swtcl.vertex_size,
			first_elem(&rmesa->radeon.dma.reserved)->bo,
			current_offset);

	r300EmitVbufPrim(rmesa,
		   rmesa->radeon.swtcl.hw_primitive,
		   rmesa->radeon.swtcl.numverts);
	r300EmitCacheFlush(rmesa);
	if ( rmesa->radeon.swtcl.emit_prediction < rmesa->radeon.cmdbuf.cs->cdw )
		WARN_ONCE("Rendering was %d commands larger than predicted size."
			" We might overflow  command buffer.\n",
			rmesa->radeon.cmdbuf.cs->cdw - rmesa->radeon.swtcl.emit_prediction );
	rmesa->radeon.swtcl.emit_prediction = 0;
	COMMIT_BATCH();
}
Example #6
0
static void r300FreeData(GLcontext *ctx)
{
	/* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
	 * to prevent double unref in radeonReleaseArrays
	 * called during context destroy
	 */
	radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s\n", __func__);
	r300ContextPtr r300 = R300_CONTEXT(ctx);
	{
		int i;

		for (i = 0; i < r300->vbuf.num_attribs; i++) {
			if (!r300->vbuf.attribs[i].is_named_bo) {
				radeon_bo_unref(r300->vbuf.attribs[i].bo);
			}
			r300->radeon.tcl.aos[i].bo = NULL;
		}
	}

	{
		if (r300->ind_buf.bo != NULL) {
			radeon_bo_unref(r300->ind_buf.bo);
		}
	}
}
static GLboolean r300RunTCLRender(GLcontext * ctx,
				  struct tnl_pipeline_stage *stage)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	struct r300_vertex_program *vp;

	hw_tcl_on = future_hw_tcl_on;

	if (RADEON_DEBUG & DEBUG_PRIMS)
		fprintf(stderr, "%s\n", __FUNCTION__);

	if (hw_tcl_on == GL_FALSE)
		return GL_TRUE;

	if (r300Fallback(ctx) >= R300_FALLBACK_TCL) {
		hw_tcl_on = GL_FALSE;
		return GL_TRUE;
	}

	r300UpdateShaders(rmesa);

	vp = (struct r300_vertex_program *)CURRENT_VERTEX_SHADER(ctx);
	if (vp->native == GL_FALSE) {
		hw_tcl_on = GL_FALSE;
		return GL_TRUE;
	}

	return r300RunRender(ctx, stage);
}
Example #8
0
static void r300RasterPrimitive( GLcontext *ctx, GLuint hwprim )
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	radeon_print(RADEON_SWRENDER, RADEON_TRACE, "%s\n", __func__);

	if (rmesa->radeon.swtcl.hw_primitive != hwprim) {
		R300_NEWPRIM( rmesa );
		rmesa->radeon.swtcl.hw_primitive = hwprim;
	}
}
Example #9
0
void r300RenderPrimitive(GLcontext *ctx, GLenum prim)
{

	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	rmesa->radeon.swtcl.render_primitive = prim;
	radeon_print(RADEON_SWRENDER, RADEON_TRACE, "%s\n", __func__);

	if ((prim == GL_TRIANGLES) && (ctx->_TriangleCaps & DD_TRI_UNFILLED))
		return;

	r300RasterPrimitive( ctx, reduced_prim[prim] );
}
Example #10
0
static GLboolean r300RunRender(GLcontext * ctx,
			       struct tnl_pipeline_stage *stage)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	struct radeon_vertex_buffer *VB = &rmesa->state.VB;
	int i;
	int cmd_reserved = 0;
	int cmd_written = 0;
	drm_radeon_cmd_header_t *cmd = NULL;

	if (RADEON_DEBUG & DEBUG_PRIMS)
		fprintf(stderr, "%s\n", __FUNCTION__);

	if (stage) {
		TNLcontext *tnl = TNL_CONTEXT(ctx);
		radeon_vb_to_rvb(rmesa, VB, &tnl->vb);
	}

	r300UpdateShaders(rmesa);
	if (r300EmitArrays(ctx))
		return GL_TRUE;

	r300UpdateShaderStates(rmesa);

	reg_start(R300_RB3D_DSTCACHE_CTLSTAT, 0);
	e32(R300_RB3D_DSTCACHE_UNKNOWN_0A);

	reg_start(R300_RB3D_ZCACHE_CTLSTAT, 0);
	e32(R300_RB3D_ZCACHE_UNKNOWN_03);

	r300EmitState(rmesa);

	for (i = 0; i < VB->PrimitiveCount; i++) {
		GLuint prim = _tnl_translate_prim(&VB->Primitive[i]);
		GLuint start = VB->Primitive[i].start;
		GLuint end = VB->Primitive[i].start + VB->Primitive[i].count;
		r300RunRenderPrimitive(rmesa, ctx, start, end, prim);
	}

	reg_start(R300_RB3D_DSTCACHE_CTLSTAT, 0);
	e32(R300_RB3D_DSTCACHE_UNKNOWN_0A);

	reg_start(R300_RB3D_ZCACHE_CTLSTAT, 0);
	e32(R300_RB3D_ZCACHE_UNKNOWN_03);

#ifdef USER_BUFFERS
	r300UseArrays(ctx);
#endif

	r300ReleaseArrays(ctx);

	return GL_FALSE;
}
Example #11
0
/**
 * Ensure all enabled and complete textures are uploaded along with any buffers being used.
 */
GLboolean r300ValidateBuffers(GLcontext * ctx)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	struct radeon_renderbuffer *rrb;
	int i;
	int ret;

	radeon_cs_space_reset_bos(rmesa->radeon.cmdbuf.cs);

	rrb = radeon_get_colorbuffer(&rmesa->radeon);
	/* color buffer */
	if (rrb && rrb->bo) {
		radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
						  rrb->bo, 0,
						  RADEON_GEM_DOMAIN_VRAM);
	}

	/* depth buffer */
	rrb = radeon_get_depthbuffer(&rmesa->radeon);
	if (rrb && rrb->bo) {
		radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
						  rrb->bo, 0,
						  RADEON_GEM_DOMAIN_VRAM);
	}
	
	for (i = 0; i < ctx->Const.MaxTextureImageUnits; ++i) {
		radeonTexObj *t;

		if (!ctx->Texture.Unit[i]._ReallyEnabled)
			continue;

		if (!r300_validate_texture(ctx, ctx->Texture.Unit[i]._Current)) {
			_mesa_warning(ctx,
				      "failed to validate texture for unit %d.\n",
				      i);
		}
		t = radeon_tex_obj(ctx->Texture.Unit[i]._Current);
		if (t->image_override && t->bo)
			radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
							  t->bo,
							  RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
		else if (t->mt->bo)
			radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
							  t->mt->bo,
							  RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
	}

	ret = radeon_cs_space_check_with_bo(rmesa->radeon.cmdbuf.cs, first_elem(&rmesa->radeon.dma.reserved)->bo, RADEON_GEM_DOMAIN_GTT, 0);
	if (ret)
		return GL_FALSE;
	return GL_TRUE;
}
Example #12
0
/**
 * Allocate a new texture object.
 * Called via ctx->Driver.NewTextureObject.
 * Note: this function will be called during context creation to
 * allocate the default texture objects.
 * Note: we could use containment here to 'derive' the driver-specific
 * texture object from the core mesa gl_texture_object.  Not done at this time.
 * Fixup MaxAnisotropy according to user preference.
 */
static struct gl_texture_object *r300NewTextureObject(GLcontext * ctx,
						      GLuint name,
						      GLenum target)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	struct gl_texture_object *obj;
	obj = _mesa_new_texture_object(ctx, name, target);
	if (!obj)
		return NULL;
	obj->MaxAnisotropy = rmesa->initialMaxAnisotropy;

	r300AllocTexObj(obj);
	return obj;
}
Example #13
0
static GLboolean r300_run_tcl_render(GLcontext *ctx,
				 struct tnl_pipeline_stage *stage)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	struct r300_vertex_program *vp;
   
   	hw_tcl_on=future_hw_tcl_on;
   
	if (RADEON_DEBUG & DEBUG_PRIMS)
		fprintf(stderr, "%s\n", __FUNCTION__);
	if(hw_tcl_on == GL_FALSE)
		return GL_TRUE;
	
	if (r300Fallback(ctx) >= R300_FALLBACK_TCL) {
		hw_tcl_on = GL_FALSE;
		return GL_TRUE;
	}
	
	r300UpdateShaders(rmesa);

	vp = (struct r300_vertex_program *)CURRENT_VERTEX_SHADER(ctx);
#if 0 /* Draw every second request with software arb vp */
	vp->native++;
	vp->native &= 1;
	//vp->native = GL_FALSE;
#endif

#if 0 /* You dont want to know what this does... */
	TNLcontext *tnl = TNL_CONTEXT(ctx);
	struct tnl_cache *cache;
	struct tnl_cache_item *c;
	
	cache = tnl->vp_cache;
	c = cache->items[0xc000cc0e % cache->size];
	
	if(c && c->data == vp)
		vp->native = GL_FALSE;
	
#endif
#if 0
	vp->native = GL_FALSE;
#endif
	if (vp->native == GL_FALSE) {
		hw_tcl_on = GL_FALSE;
		return GL_TRUE;
	}
	//r300UpdateShaderStates(rmesa);
	
	return r300_run_vb_render(ctx, stage);
}
Example #14
0
/**
 * Ensure the given texture is ready for rendering.
 *
 * Mostly this means populating the texture object's mipmap tree.
 */
static GLboolean r300_validate_texture(GLcontext * ctx, struct gl_texture_object *texObj)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	radeonTexObj *t = radeon_tex_obj(texObj);

	if (!radeon_validate_texture_miptree(ctx, texObj))
		return GL_FALSE;

	/* Configure the hardware registers (more precisely, the cached version
	 * of the hardware registers). */
	setup_hardware_state(rmesa, t);

	t->validated = GL_TRUE;
	return GL_TRUE;
}
static GLboolean r300RunNonTCLRender(GLcontext * ctx,
				     struct tnl_pipeline_stage *stage)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);

	if (RADEON_DEBUG & DEBUG_PRIMS)
		fprintf(stderr, "%s\n", __FUNCTION__);

	if (r300Fallback(ctx) >= R300_FALLBACK_RAST)
		return GL_TRUE;

	if (!(rmesa->radeon.radeonScreen->chip_flags & RADEON_CHIPSET_TCL))
 	        return GL_TRUE;

	return r300RunRender(ctx, stage);
}
static int r300Fallback(GLcontext * ctx)
{
	r300ContextPtr r300 = R300_CONTEXT(ctx);
	/* Do we need to use new-style shaders?
	 * Also is there a better way to do this? */
	if (r300->radeon.radeonScreen->chip_family >= CHIP_FAMILY_RV515) {
		struct r500_fragment_program *fp = (struct r500_fragment_program *)
	    (char *)ctx->FragmentProgram._Current;
		if (fp) {
			if (!fp->translated) {
				r500TranslateFragmentShader(r300, fp);
				FALLBACK_IF(!fp->translated);
			}
		}
	} else {
		struct r300_fragment_program *fp = (struct r300_fragment_program *)
	    (char *)ctx->FragmentProgram._Current;
		if (fp) {
			if (!fp->translated) {
				r300TranslateFragmentShader(r300, fp);
				FALLBACK_IF(!fp->translated);
			}
		}
	}

	FALLBACK_IF(ctx->RenderMode != GL_RENDER);

	FALLBACK_IF(ctx->Stencil._TestTwoSide
		    && (ctx->Stencil.Ref[0] != ctx->Stencil.Ref[1]
			|| ctx->Stencil.ValueMask[0] !=
			ctx->Stencil.ValueMask[1]
			|| ctx->Stencil.WriteMask[0] !=
			ctx->Stencil.WriteMask[1]));

	if (ctx->Extensions.NV_point_sprite || ctx->Extensions.ARB_point_sprite)
		FALLBACK_IF(ctx->Point.PointSprite);

	if (!r300->disable_lowimpact_fallback) {
		FALLBACK_IF(ctx->Polygon.StippleFlag);
		FALLBACK_IF(ctx->Multisample._Enabled);
		FALLBACK_IF(ctx->Line.StippleFlag);
		FALLBACK_IF(ctx->Line.SmoothFlag);
		FALLBACK_IF(ctx->Point.SmoothFlag);
	}

	return R300_FALLBACK_NONE;
}
Example #17
0
void r300InitTextureFuncs(struct dd_function_table *functions)
{
	/* Note: we only plug in the functions we implement in the driver
	 * since _mesa_init_driver_functions() was already called.
	 */
	functions->ChooseTextureFormat = r300ChooseTextureFormat;
	functions->TexImage1D = r300TexImage1D;
	functions->TexImage2D = r300TexImage2D;
#if ENABLE_HW_3D_TEXTURE
	functions->TexImage3D = r300TexImage3D;
#else
	functions->TexImage3D = _mesa_store_teximage3d;
#endif
	functions->TexSubImage1D = r300TexSubImage1D;
	functions->TexSubImage2D = r300TexSubImage2D;
#if ENABLE_HW_3D_TEXTURE
	functions->TexSubImage3D = r300TexSubImage3D;
#else
	functions->TexSubImage3D = _mesa_store_texsubimage3d;
#endif
	functions->NewTextureObject = r300NewTextureObject;
	functions->BindTexture = r300BindTexture;
	functions->DeleteTexture = r300DeleteTexture;
	functions->IsTextureResident = driIsTextureResident;

	functions->TexEnv = r300TexEnv;
	functions->TexParameter = r300TexParameter;
	
	functions->CompressedTexImage2D	= r300CompressedTexImage2D;
	functions->CompressedTexSubImage2D	= r300CompressedTexSubImage2D;

	driInitTextureFormats();

#if 0
	/* moved or obsolete code */
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	driInitTextureObjects(ctx, &rmesa->swapped,
			      DRI_TEXMGR_DO_TEXTURE_1D
			      | DRI_TEXMGR_DO_TEXTURE_2D);

	/* Hack: r300NewTextureObject is not yet installed when the
	 * default textures are created. Therefore set MaxAnisotropy of the
	 * default 2D texture now. */
	ctx->Shared->Default2D->MaxAnisotropy =
	    driQueryOptionf(&rmesa->optionCache, "def_max_anisotropy");
#endif
}
Example #18
0
static void r300PrepareVertices(GLcontext *ctx)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	GLuint InputsRead, OutputsWritten;
	radeon_print(RADEON_SWRENDER, RADEON_TRACE, "%s\n", __func__);

	r300ChooseSwtclVertexFormat(ctx, &InputsRead, &OutputsWritten);
	r300SetupVAP(ctx, InputsRead, OutputsWritten);

	rmesa->radeon.swtcl.vertex_size =
		_tnl_install_attrs( ctx,
				    rmesa->radeon.swtcl.vertex_attrs,
				    rmesa->radeon.swtcl.vertex_attr_count,
				    NULL, 0 );

	rmesa->radeon.swtcl.vertex_size /= 4;
}
Example #19
0
static GLboolean run_texrect_stage( GLcontext *ctx,
				    struct tnl_pipeline_stage *stage )
{
   struct texrect_stage_data *store = TEXRECT_STAGE_DATA(stage);
   r300ContextPtr rmesa = R300_CONTEXT(ctx);
   TNLcontext *tnl = TNL_CONTEXT(ctx);
   struct vertex_buffer *VB = &tnl->vb;
   GLuint i;

   if (rmesa->radeon.Fallback)
      return GL_TRUE;

   for (i = 0 ; i < ctx->Const.MaxTextureUnits ; i++) {
      if (ctx->Texture.Unit[i]._ReallyEnabled & TEXTURE_RECT_BIT) {
	 struct gl_texture_object *texObj = ctx->Texture.Unit[i].CurrentRect;
	 struct gl_texture_image *texImage = texObj->Image[0][texObj->BaseLevel];
	 const GLfloat iw = 1.0/texImage->Width;
	 const GLfloat ih = 1.0/texImage->Height;
	 GLfloat *in = (GLfloat *)VB->TexCoordPtr[i]->data;
	 GLint instride = VB->TexCoordPtr[i]->stride;
	 GLfloat (*out)[4] = store->texcoord[i].data;
	 GLint j;

	 store->texcoord[i].size = VB->TexCoordPtr[i]->size;
	 for (j = 0 ; j < VB->Count ; j++) {
	    switch (VB->TexCoordPtr[i]->size) {
	    case 4:
	       out[j][3] = in[3];
	    /* fallthrough */
	    case 3:
	       out[j][2] = in[2];
	    /* fallthrough */
	    default:
	       out[j][0] = in[0] * iw;
	       out[j][1] = in[1] * ih;
	    }
	    in = (GLfloat *)((GLubyte *)in + instride);
	 }

	 VB->AttribPtr[VERT_ATTRIB_TEX0+i] = VB->TexCoordPtr[i] = &store->texcoord[i];
      }
   }

   return GL_TRUE;
}
Example #20
0
static void r300DeleteTexture(GLcontext * ctx, struct gl_texture_object *texObj)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	driTextureObject *t = (driTextureObject *) texObj->DriverData;

	if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_TEXTURE)) {
		fprintf(stderr, "%s( %p (target = %s) )\n", __FUNCTION__,
			(void *)texObj,
			_mesa_lookup_enum_by_nr(texObj->Target));
	}

	if (t != NULL) {
		if (rmesa) {
			R300_FIREVERTICES(rmesa);
		}

		driDestroyTextureObject(t);
	}
	/* Free mipmap images and the texture object itself */
	_mesa_delete_texture_object(ctx, texObj);
}
Example #21
0
void r300RenderStart(GLcontext *ctx)
{
	radeon_print(RADEON_SWRENDER, RADEON_VERBOSE, "%s\n", __func__);
	r300ContextPtr rmesa = R300_CONTEXT( ctx );

	r300ChooseRenderState(ctx);

	r300UpdateShaders(rmesa);

	r300PrepareVertices(ctx);

	r300ValidateBuffers(ctx);

	r300UpdateShaderStates(rmesa);


	/* investigate if we can put back flush optimisation if needed */
	if (rmesa->radeon.dma.flush != NULL) {
		rmesa->radeon.dma.flush(ctx);
	}
}
Example #22
0
struct r300_vertex_program * r300SelectAndTranslateVertexShader(GLcontext *ctx)
{
	r300ContextPtr r300 = R300_CONTEXT(ctx);
	struct r300_vertex_program_key wanted_key = { 0 };
	struct r300_vertex_program_cont *vpc;
	struct r300_vertex_program *vp;

	vpc = (struct r300_vertex_program_cont *)ctx->VertexProgram._Current;

	if (!r300->selected_fp) {
		/* This can happen when GetProgramiv is called to check
		 * whether the program runs natively.
		 *
		 * To be honest, this is not a very good solution,
		 * but solving the problem of reporting good values
		 * for those queries is tough anyway considering that
		 * we recompile vertex programs based on the precise
		 * fragment program that is in use.
		 */
		r300SelectAndTranslateFragmentShader(ctx);
	}

	wanted_key.FpReads = r300->selected_fp->InputsRead;
	wanted_key.FogAttr = r300->selected_fp->fog_attr;
	wanted_key.WPosAttr = r300->selected_fp->wpos_attr;

	for (vp = vpc->progs; vp; vp = vp->next) {
		if (_mesa_memcmp(&vp->key, &wanted_key, sizeof(wanted_key))
		    == 0) {
			return r300->selected_vp = vp;
		}
	}

	vp = build_program(ctx, &wanted_key, &vpc->mesa_program);
	vp->next = vpc->progs;
	vpc->progs = vp;

	return r300->selected_vp = vp;
}
static GLboolean r300RunRender(GLcontext * ctx,
			       struct tnl_pipeline_stage *stage)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	int i;
	TNLcontext *tnl = TNL_CONTEXT(ctx);
	struct vertex_buffer *vb = &tnl->vb;


	if (RADEON_DEBUG & DEBUG_PRIMS)
		fprintf(stderr, "%s\n", __FUNCTION__);

	r300UpdateShaders(rmesa);
	if (r300EmitArrays(ctx))
		return GL_TRUE;

	r300UpdateShaderStates(rmesa);

	r300EmitCacheFlush(rmesa);
	r300EmitState(rmesa);

	for (i = 0; i < vb->PrimitiveCount; i++) {
		GLuint prim = _tnl_translate_prim(&vb->Primitive[i]);
		GLuint start = vb->Primitive[i].start;
		GLuint end = vb->Primitive[i].start + vb->Primitive[i].count;
		r300RunRenderPrimitive(rmesa, ctx, start, end, prim);
	}

	r300EmitCacheFlush(rmesa);

#ifdef USER_BUFFERS
	r300UseArrays(ctx);
#endif

	r300ReleaseArrays(ctx);

	return GL_FALSE;
}
Example #24
0
static int r300Fallback(GLcontext * ctx)
{
	r300ContextPtr r300 = R300_CONTEXT(ctx);
	struct r300_fragment_program *fp = (struct r300_fragment_program *)
	    (char *)ctx->FragmentProgram._Current;

	if (fp) {
		if (!fp->translated)
			r300TranslateFragmentShader(r300, fp);
		FALLBACK_IF(!fp->translated);
	}

	FALLBACK_IF(ctx->RenderMode != GL_RENDER);

	FALLBACK_IF(ctx->Stencil._TestTwoSide
		    && (ctx->Stencil.Ref[0] != ctx->Stencil.Ref[1]
			|| ctx->Stencil.ValueMask[0] !=
			ctx->Stencil.ValueMask[1]
			|| ctx->Stencil.WriteMask[0] !=
			ctx->Stencil.WriteMask[1]));

	FALLBACK_IF(ctx->Color.ColorLogicOpEnabled);

	if (ctx->Extensions.NV_point_sprite || ctx->Extensions.ARB_point_sprite)
		FALLBACK_IF(ctx->Point.PointSprite);

	if (!r300->disable_lowimpact_fallback) {
		FALLBACK_IF(ctx->Polygon.OffsetPoint);
		FALLBACK_IF(ctx->Polygon.OffsetLine);
		FALLBACK_IF(ctx->Polygon.StippleFlag);
		FALLBACK_IF(ctx->Multisample.Enabled);
		FALLBACK_IF(ctx->Line.StippleFlag);
		FALLBACK_IF(ctx->Line.SmoothFlag);
		FALLBACK_IF(ctx->Point.SmoothFlag);
	}

	return R300_FALLBACK_NONE;
}
Example #25
0
static void r300AlignDataToDword(GLcontext *ctx, const struct gl_client_array *input, int count, struct vertex_attribute *attr)
{
	r300ContextPtr r300 = R300_CONTEXT(ctx);
	const int dst_stride = (input->StrideB + 3) & ~3;
	const int size = getTypeSize(input->Type) * input->Size * count;
	GLboolean mapped_named_bo = GL_FALSE;

	radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, size, 32);

	radeon_bo_map(attr->bo, 1);

	if (!input->BufferObj->Pointer) {
		ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
		mapped_named_bo = GL_TRUE;
	}

	radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT, "%s. Vertex alignment doesn't match hw requirements.\n", __func__);

	{
		GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
		GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
		int i;

		for (i = 0; i < count; ++i) {
			memcpy(dst_ptr, src_ptr, input->StrideB);
			src_ptr += input->StrideB;
			dst_ptr += dst_stride;
		}
	}

	if (mapped_named_bo) {
		ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
	}

	radeon_bo_unmap(attr->bo);
	attr->stride = dst_stride;
}
Example #26
0
static GLuint r300PredictTryDrawPrimsSize(GLcontext *ctx,
		GLuint nr_prims, const struct _mesa_prim *prim)
{
	struct r300_context *r300 = R300_CONTEXT(ctx);
	struct r300_vertex_buffer *vbuf = &r300->vbuf;
	GLboolean flushed;
	GLuint dwords;
	GLuint state_size;
	int i;
	GLuint extra_prims = 0;

	/* Check for primitive splitting. */
	for (i = 0; i < nr_prims; ++i) {
		const GLuint num_verts =  r300NumVerts(r300, prim[i].count, prim[i].mode);
		extra_prims += num_verts/(65535 - 32);
	}
	nr_prims += extra_prims;

	dwords = 2*CACHE_FLUSH_BUFSZ;
	dwords += PRE_EMIT_STATE_BUFSZ;
	dwords += (AOS_BUFSZ(vbuf->num_attribs)
		+ SCISSORS_BUFSZ*2
		+ FIREAOS_BUFSZ )*nr_prims;

	state_size = radeonCountStateEmitSize(&r300->radeon);
	flushed = rcommonEnsureCmdBufSpace(&r300->radeon,
			dwords + state_size,
			__FUNCTION__);
	if (flushed)
		dwords += radeonCountStateEmitSize(&r300->radeon);
	else
		dwords += state_size;

	radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s: total prediction size is %d.\n", __FUNCTION__, dwords);
	return dwords;
}
Example #27
0
void r300ChooseSwtclVertexFormat(GLcontext *ctx, GLuint *_InputsRead,  GLuint *_OutputsWritten)
{
	r300ContextPtr rmesa = R300_CONTEXT( ctx );
	TNLcontext *tnl = TNL_CONTEXT(ctx);
	struct vertex_buffer *VB = &tnl->vb;
	int first_free_tex = 0;
	GLuint InputsRead = 0;
	GLuint OutputsWritten = 0;
	int num_attrs = 0;
	GLuint fp_reads = rmesa->selected_fp->InputsRead;
	struct vertex_attribute *attrs = rmesa->vbuf.attribs;

	radeon_print(RADEON_SWRENDER, RADEON_VERBOSE, "%s\n", __func__);
	rmesa->swtcl.coloroffset = rmesa->swtcl.specoffset = 0;
	rmesa->radeon.swtcl.vertex_attr_count = 0;

	if (RADEON_DEBUG & RADEON_VERTS)
		fprintf(stderr, "%s\n", __func__);

	/* We always want non Ndc coords format */
	VB->AttribPtr[VERT_ATTRIB_POS] = VB->ClipPtr;

	/* Always write position vector */
	InputsRead |= 1 << VERT_ATTRIB_POS;
	OutputsWritten |= 1 << VERT_RESULT_HPOS;
	EMIT_ATTR( _TNL_ATTRIB_POS, EMIT_4F );
	ADD_ATTR(VERT_ATTRIB_POS, R300_DATA_TYPE_FLOAT_4, SWTCL_OVM_POS, SWIZZLE_XYZW, MASK_XYZW, 0);
	rmesa->swtcl.coloroffset = 4;

	if (fp_reads & FRAG_BIT_COL0) {
		InputsRead |= 1 << VERT_ATTRIB_COLOR0;
		OutputsWritten |= 1 << VERT_RESULT_COL0;
#if MESA_LITTLE_ENDIAN
		EMIT_ATTR( _TNL_ATTRIB_COLOR0, EMIT_4UB_4F_RGBA );
		ADD_ATTR(VERT_ATTRIB_COLOR0, R300_DATA_TYPE_BYTE, SWTCL_OVM_COLOR0, SWIZZLE_XYZW, MASK_XYZW, 1);
#else
		EMIT_ATTR( _TNL_ATTRIB_COLOR0, EMIT_4UB_4F_ABGR );
		ADD_ATTR(VERT_ATTRIB_COLOR0, R300_DATA_TYPE_BYTE, SWTCL_OVM_COLOR0, SWIZZLE_XYZW, MASK_XYZW, 1);
#endif
	}

	if (fp_reads & FRAG_BIT_COL1) {
		GLuint swiz = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_ONE);
		InputsRead |= 1 << VERT_ATTRIB_COLOR1;
		OutputsWritten |= 1 << VERT_RESULT_COL1;
#if MESA_LITTLE_ENDIAN
		EMIT_ATTR( _TNL_ATTRIB_COLOR1, EMIT_4UB_4F_RGBA );
		ADD_ATTR(VERT_ATTRIB_COLOR1, R300_DATA_TYPE_BYTE, SWTCL_OVM_COLOR1, swiz, MASK_XYZW, 1);
#else
		EMIT_ATTR( _TNL_ATTRIB_COLOR1, EMIT_4UB_4F_ABGR );
		ADD_ATTR(VERT_ATTRIB_COLOR1, R300_DATA_TYPE_BYTE, SWTCL_OVM_COLOR1, swiz, MASK_XYZW, 1);
#endif
		rmesa->swtcl.specoffset = rmesa->swtcl.coloroffset + 1;
	}

	if (ctx->Light.Enabled && ctx->Light.Model.TwoSide) {
		VB->AttribPtr[VERT_ATTRIB_GENERIC0] = VB->ColorPtr[1];
		OutputsWritten |= 1 << VERT_RESULT_BFC0;
#if MESA_LITTLE_ENDIAN
		EMIT_ATTR( _TNL_ATTRIB_GENERIC0, EMIT_4UB_4F_RGBA );
		ADD_ATTR(VERT_ATTRIB_GENERIC0, R300_DATA_TYPE_BYTE, SWTCL_OVM_COLOR2, SWIZZLE_XYZW, MASK_XYZW, 1);
#else
		EMIT_ATTR( _TNL_ATTRIB_GENERIC0, EMIT_4UB_4F_ABGR );
		ADD_ATTR(VERT_ATTRIB_GENERIC0, R300_DATA_TYPE_BYTE, SWTCL_OVM_COLOR2, SWIZZLE_XYZW, MASK_XYZW, 1);
#endif
		if (fp_reads & FRAG_BIT_COL1) {
			VB->AttribPtr[VERT_ATTRIB_GENERIC1] = VB->SecondaryColorPtr[1];
			GLuint swiz = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_ONE);
			OutputsWritten |= 1 << VERT_RESULT_BFC1;
#if MESA_LITTLE_ENDIAN
			EMIT_ATTR( _TNL_ATTRIB_GENERIC1, EMIT_4UB_4F_RGBA );
			ADD_ATTR(VERT_ATTRIB_GENERIC1, R300_DATA_TYPE_BYTE, SWTCL_OVM_COLOR3, swiz, MASK_XYZW, 1);
#else
			EMIT_ATTR( _TNL_ATTRIB_GENERIC1, EMIT_4UB_4F_ABGR );
			ADD_ATTR(VERT_ATTRIB_GENERIC1, R300_DATA_TYPE_BYTE, SWTCL_OVM_COLOR3, swiz, MASK_XYZW, 1);
#endif
		}
	}

	if (RENDERINPUTS_TEST(tnl->render_inputs_bitset, _TNL_ATTRIB_POINTSIZE )) {
		GLuint swiz = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_ZERO, SWIZZLE_ZERO, SWIZZLE_ZERO);
		InputsRead |= 1 << VERT_ATTRIB_POINT_SIZE;
		OutputsWritten |= 1 << VERT_RESULT_PSIZ;
		EMIT_ATTR( _TNL_ATTRIB_POINTSIZE, EMIT_1F );
		ADD_ATTR(VERT_ATTRIB_POINT_SIZE, R300_DATA_TYPE_FLOAT_1, SWTCL_OVM_POINT_SIZE, swiz, MASK_X, 0);
	}

	if (rmesa->selected_fp->wpos_attr != FRAG_ATTRIB_MAX) {
		int tex_id = rmesa->selected_fp->wpos_attr - FRAG_ATTRIB_TEX0;

		VB->AttribPtr[VERT_ATTRIB_TEX0 + tex_id] = VB->AttribPtr[VERT_ATTRIB_POS];
		VB->TexCoordPtr[tex_id] = VB->AttribPtr[VERT_ATTRIB_POS];
		RENDERINPUTS_SET(tnl->render_inputs_bitset, _TNL_ATTRIB_TEX0 + tex_id);
	}

	if (rmesa->selected_fp->fog_attr != FRAG_ATTRIB_MAX) {
		int tex_id = rmesa->selected_fp->fog_attr - FRAG_ATTRIB_TEX0;

		VB->AttribPtr[VERT_ATTRIB_TEX0 + tex_id] = VB->AttribPtr[VERT_ATTRIB_FOG];
		VB->TexCoordPtr[tex_id] = VB->AttribPtr[VERT_ATTRIB_FOG];
		RENDERINPUTS_SET(tnl->render_inputs_bitset, _TNL_ATTRIB_TEX0 + tex_id);
	}

	/**
	 *  Sending only one texcoord component may lead to lock up,
	 *  so for all textures always output 4 texcoord components to RS.
	 */
	{
		int i;
		GLuint swiz, format, hw_format;
		for (i = 0; i < ctx->Const.MaxTextureUnits; i++) {
			if (fp_reads & FRAG_BIT_TEX(i)) {
				switch (VB->TexCoordPtr[i]->size) {
					case 1:
						format = EMIT_1F;
						hw_format = R300_DATA_TYPE_FLOAT_1;
						swiz = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_ZERO, SWIZZLE_ZERO, SWIZZLE_ONE);
						break;
					case 2:
						format = EMIT_2F;
						hw_format = R300_DATA_TYPE_FLOAT_2;
						swiz = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_ZERO, SWIZZLE_ONE);
						break;
					case 3:
						format = EMIT_3F;
						hw_format = R300_DATA_TYPE_FLOAT_3;
						swiz = MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_ONE);
						break;
					case 4:
						format = EMIT_4F;
						hw_format = R300_DATA_TYPE_FLOAT_4;
						swiz = SWIZZLE_XYZW;
						break;
					default:
						continue;
				}
				InputsRead |= 1 << (VERT_ATTRIB_TEX0 + i);
				OutputsWritten |= 1 << (VERT_RESULT_TEX0 + i);
				EMIT_ATTR(_TNL_ATTRIB_TEX(i), format);
				ADD_ATTR(VERT_ATTRIB_TEX0 + i, hw_format, SWTCL_OVM_TEX(first_free_tex), swiz, MASK_XYZW, 0);
				++first_free_tex;
			}
		}
	}

	if (first_free_tex >= ctx->Const.MaxTextureUnits) {
		fprintf(stderr, "\tout of free texcoords to write fog coordinate\n");
		_mesa_exit(-1);
	}

	R300_NEWPRIM(rmesa);
	rmesa->vbuf.num_attribs = num_attrs;
	*_InputsRead = InputsRead;
	*_OutputsWritten = OutputsWritten;

	RENDERINPUTS_COPY(rmesa->render_inputs_bitset, tnl->render_inputs_bitset);
}
Example #28
0
static const struct gl_texture_format *r300ChooseTextureFormat(GLcontext * ctx,
							       GLint
							       internalFormat,
							       GLenum format,
							       GLenum type)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	const GLboolean do32bpt =
	    (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
	const GLboolean force16bpt =
	    (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
	(void)format;

#if 0
	fprintf(stderr, "InternalFormat=%s(%d) type=%s format=%s\n",
		_mesa_lookup_enum_by_nr(internalFormat), internalFormat,
		_mesa_lookup_enum_by_nr(type),
		_mesa_lookup_enum_by_nr(format));
	fprintf(stderr, "do32bpt=%d force16bpt=%d\n",
		do32bpt, force16bpt);
#endif

	switch (internalFormat) {
	case 4:
	case GL_RGBA:
	case GL_COMPRESSED_RGBA:
		switch (type) {
		case GL_UNSIGNED_INT_10_10_10_2:
		case GL_UNSIGNED_INT_2_10_10_10_REV:
			return do32bpt ? _dri_texformat_argb8888 :
			    _dri_texformat_argb1555;
		case GL_UNSIGNED_SHORT_4_4_4_4:
		case GL_UNSIGNED_SHORT_4_4_4_4_REV:
			return _dri_texformat_argb4444;
		case GL_UNSIGNED_SHORT_5_5_5_1:
		case GL_UNSIGNED_SHORT_1_5_5_5_REV:
			return _dri_texformat_argb1555;
		default:
			return do32bpt ? _dri_texformat_rgba8888 :
			    _dri_texformat_argb4444;
		}

	case 3:
	case GL_RGB:
	case GL_COMPRESSED_RGB:
		switch (type) {
		case GL_UNSIGNED_SHORT_4_4_4_4:
		case GL_UNSIGNED_SHORT_4_4_4_4_REV:
			return _dri_texformat_argb4444;
		case GL_UNSIGNED_SHORT_5_5_5_1:
		case GL_UNSIGNED_SHORT_1_5_5_5_REV:
			return _dri_texformat_argb1555;
		case GL_UNSIGNED_SHORT_5_6_5:
		case GL_UNSIGNED_SHORT_5_6_5_REV:
			return _dri_texformat_rgb565;
		default:
			return do32bpt ? _dri_texformat_rgba8888 :
			    _dri_texformat_rgb565;
		}

	case GL_RGBA8:
	case GL_RGB10_A2:
	case GL_RGBA12:
	case GL_RGBA16:
		return !force16bpt ?
		    _dri_texformat_rgba8888 : _dri_texformat_argb4444;

	case GL_RGBA4:
	case GL_RGBA2:
		return _dri_texformat_argb4444;

	case GL_RGB5_A1:
		return _dri_texformat_argb1555;

	case GL_RGB8:
	case GL_RGB10:
	case GL_RGB12:
	case GL_RGB16:
		return !force16bpt ? _dri_texformat_rgba8888 :
		    _dri_texformat_rgb565;

	case GL_RGB5:
	case GL_RGB4:
	case GL_R3_G3_B2:
		return _dri_texformat_rgb565;

	case GL_ALPHA:
	case GL_ALPHA4:
	case GL_ALPHA8:
	case GL_ALPHA12:
	case GL_ALPHA16:
	case GL_COMPRESSED_ALPHA:
		return _dri_texformat_a8;

	case 1:
	case GL_LUMINANCE:
	case GL_LUMINANCE4:
	case GL_LUMINANCE8:
	case GL_LUMINANCE12:
	case GL_LUMINANCE16:
	case GL_COMPRESSED_LUMINANCE:
		return _dri_texformat_l8;

	case 2:
	case GL_LUMINANCE_ALPHA:
	case GL_LUMINANCE4_ALPHA4:
	case GL_LUMINANCE6_ALPHA2:
	case GL_LUMINANCE8_ALPHA8:
	case GL_LUMINANCE12_ALPHA4:
	case GL_LUMINANCE12_ALPHA12:
	case GL_LUMINANCE16_ALPHA16:
	case GL_COMPRESSED_LUMINANCE_ALPHA:
		return _dri_texformat_al88;

	case GL_INTENSITY:
	case GL_INTENSITY4:
	case GL_INTENSITY8:
	case GL_INTENSITY12:
	case GL_INTENSITY16:
	case GL_COMPRESSED_INTENSITY:
		return _dri_texformat_i8;

	case GL_YCBCR_MESA:
		if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
		    type == GL_UNSIGNED_BYTE)
			return &_mesa_texformat_ycbcr;
		else
			return &_mesa_texformat_ycbcr_rev;

	case GL_RGB_S3TC:
	case GL_RGB4_S3TC:
	case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
	  return &_mesa_texformat_rgb_dxt1;
	  
	case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
	  return &_mesa_texformat_rgba_dxt1;
	  
	case GL_RGBA_S3TC:
	case GL_RGBA4_S3TC:
	case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
	  return &_mesa_texformat_rgba_dxt3;
	  
	case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
	  return &_mesa_texformat_rgba_dxt5;
	  
 	case GL_ALPHA16F_ARB:
	  return &_mesa_texformat_alpha_float16;
	case GL_ALPHA32F_ARB:
	  return &_mesa_texformat_alpha_float32;
	case GL_LUMINANCE16F_ARB:
	  return &_mesa_texformat_luminance_float16;
	case GL_LUMINANCE32F_ARB:
	  return &_mesa_texformat_luminance_float32;
	case GL_LUMINANCE_ALPHA16F_ARB:
	  return &_mesa_texformat_luminance_alpha_float16;
	case GL_LUMINANCE_ALPHA32F_ARB:
	  return &_mesa_texformat_luminance_alpha_float32;
	case GL_INTENSITY16F_ARB:
	  return &_mesa_texformat_intensity_float16;
	case GL_INTENSITY32F_ARB:
	  return &_mesa_texformat_intensity_float32;
	case GL_RGB16F_ARB:
	  return &_mesa_texformat_rgba_float16;
	case GL_RGB32F_ARB:
	  return &_mesa_texformat_rgba_float32;
	case GL_RGBA16F_ARB:
	  return &_mesa_texformat_rgba_float16;
	case GL_RGBA32F_ARB:
	  return &_mesa_texformat_rgba_float32;

	default:
		_mesa_problem(ctx,
			      "unexpected internalFormat 0x%x in r300ChooseTextureFormat",
			      (int)internalFormat);
		return NULL;
	}

	return NULL;		/* never get here */
}
Example #29
0
static GLboolean
r300ValidateClientStorage(GLcontext * ctx, GLenum target,
			  GLint internalFormat,
			  GLint srcWidth, GLint srcHeight,
			  GLenum format, GLenum type, const void *pixels,
			  const struct gl_pixelstore_attrib *packing,
			  struct gl_texture_object *texObj,
			  struct gl_texture_image *texImage)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);

	if (0)
		fprintf(stderr, "intformat %s format %s type %s\n",
			_mesa_lookup_enum_by_nr(internalFormat),
			_mesa_lookup_enum_by_nr(format),
			_mesa_lookup_enum_by_nr(type));

	if (!ctx->Unpack.ClientStorage)
		return 0;

	if (ctx->_ImageTransferState ||
	    texImage->IsCompressed || texObj->GenerateMipmap)
		return 0;

	/* This list is incomplete, may be different on ppc???
	 */
	switch (internalFormat) {
	case GL_RGBA:
		if (format == GL_BGRA && type == GL_UNSIGNED_INT_8_8_8_8_REV) {
			texImage->TexFormat = _dri_texformat_argb8888;
		} else
			return 0;
		break;

	case GL_RGB:
		if (format == GL_RGB && type == GL_UNSIGNED_SHORT_5_6_5) {
			texImage->TexFormat = _dri_texformat_rgb565;
		} else
			return 0;
		break;

	case GL_YCBCR_MESA:
		if (format == GL_YCBCR_MESA &&
		    type == GL_UNSIGNED_SHORT_8_8_REV_APPLE) {
			texImage->TexFormat = &_mesa_texformat_ycbcr_rev;
		} else if (format == GL_YCBCR_MESA &&
			   (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
			    type == GL_UNSIGNED_BYTE)) {
			texImage->TexFormat = &_mesa_texformat_ycbcr;
		} else
			return 0;
		break;

	default:
		return 0;
	}

	/* Could deal with these packing issues, but currently don't:
	 */
	if (packing->SkipPixels ||
	    packing->SkipRows || packing->SwapBytes || packing->LsbFirst) {
		return 0;
	}

	{
		GLint srcRowStride = _mesa_image_row_stride(packing, srcWidth,
							    format, type);

		if (0)
			fprintf(stderr, "%s: srcRowStride %d/%x\n",
				__FUNCTION__, srcRowStride, srcRowStride);

		/* Could check this later in upload, pitch restrictions could be
		 * relaxed, but would need to store the image pitch somewhere,
		 * as packing details might change before image is uploaded:
		 */
		if (!r300IsGartMemory(rmesa, pixels, srcHeight * srcRowStride)
		    || (srcRowStride & 63))
			return 0;

		/* Have validated that _mesa_transfer_teximage would be a straight
		 * memcpy at this point.  NOTE: future calls to TexSubImage will
		 * overwrite the client data.  This is explicitly mentioned in the
		 * extension spec.
		 */
		texImage->Data = (void *)pixels;
		texImage->IsClientData = GL_TRUE;
		texImage->RowStride =
		    srcRowStride / texImage->TexFormat->TexelBytes;

		return 1;
	}
}
Example #30
0
GLboolean r300_run_vb_render(GLcontext *ctx,
				 struct tnl_pipeline_stage *stage)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	struct radeon_vertex_buffer *VB = &rmesa->state.VB;
	int i;
	LOCAL_VARS
   
	if (RADEON_DEBUG & DEBUG_PRIMS)
		fprintf(stderr, "%s\n", __FUNCTION__);

	if (stage) {
 		TNLcontext *tnl = TNL_CONTEXT(ctx);
		radeon_vb_to_rvb(rmesa, VB, &tnl->vb);
	}
	
	r300UpdateShaders(rmesa);
	if (rmesa->state.VB.LockCount == 0 || 1) {
 	  	r300ReleaseArrays(ctx);
		r300EmitArrays(ctx, GL_FALSE);

		r300UpdateShaderStates(rmesa);
	} else {
		/* TODO: Figure out why do we need these. */
		R300_STATECHANGE(rmesa, vir[0]);
		R300_STATECHANGE(rmesa, vir[1]);
		R300_STATECHANGE(rmesa, vic);
		R300_STATECHANGE(rmesa, vof);
		
#if 0		
		fprintf(stderr, "dt:\n");
		for(i=0; i < VERT_ATTRIB_MAX; i++){
			fprintf(stderr, "dt %d:", i);
			dump_dt(&rmesa->state.VB.AttribPtr[i], VB->Count);
		}
		
		fprintf(stderr, "before:\n");
		for(i=0; i < rmesa->state.aos_count; i++){
			fprintf(stderr, "aos %d:", i);
			dump_array(&rmesa->state.aos[i], VB->Count);
		}
#endif
#if 0
 	  	r300ReleaseArrays(ctx);
		r300EmitArrays(ctx, GL_FALSE);
			
		fprintf(stderr, "after:\n");
		for(i=0; i < rmesa->state.aos_count; i++){
			fprintf(stderr, "aos %d:", i);
			dump_array(&rmesa->state.aos[i], VB->Count);
		}
#endif
	}
	
	reg_start(R300_RB3D_DSTCACHE_CTLSTAT,0);
	e32(0x0000000a);

	reg_start(0x4f18,0);
	e32(0x00000003);
#if 0
	reg_start(R300_VAP_PVS_WAITIDLE,0);
		e32(0x00000000);
#endif
	r300EmitState(rmesa);
	
	for(i=0; i < VB->PrimitiveCount; i++){
		GLuint prim = VB->Primitive[i].mode;
		GLuint start = VB->Primitive[i].start;
		GLuint length = VB->Primitive[i].count;
		
		r300_render_vb_primitive(rmesa, ctx, start, start + length, prim);
	}

	reg_start(R300_RB3D_DSTCACHE_CTLSTAT,0);
	e32(0x0000000a/*0x2*/);

	reg_start(0x4f18,0);
	e32(0x00000003/*0x1*/);

#ifdef USER_BUFFERS
	r300UseArrays(ctx);
#endif
	return GL_FALSE;
}