static void teximage_assign_miptree(radeonContextPtr rmesa,
	struct gl_texture_object *texObj,
	struct gl_texture_image *texImage,
	unsigned face,
	unsigned level)
{
	radeonTexObj *t = radeon_tex_obj(texObj);
	radeon_texture_image* image = get_radeon_texture_image(texImage);

	/* Since miptree holds only images for levels <BaseLevel..MaxLevel>
	 * don't allocate the miptree if the teximage won't fit.
	 */
	if (!image_matches_texture_obj(texObj, texImage, level))
		return;

	/* Try using current miptree, or create new if there isn't any */
	if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
		radeon_miptree_unreference(&t->mt);
		radeon_try_alloc_miptree(rmesa, t);
		radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
				"%s: texObj %p, texImage %p, face %d, level %d, "
				"texObj miptree doesn't match, allocated new miptree %p\n",
				__FUNCTION__, texObj, texImage, face, level, t->mt);
	}

	/* Miptree alocation may have failed,
	 * when there was no image for baselevel specified */
	if (t->mt) {
		image->mtface = face;
		image->mtlevel = level;
		radeon_miptree_reference(t->mt, &image->mt);
	} else
		radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
				"%s Failed to allocate miptree.\n", __func__);
}
void r200FlushElts(struct gl_context *ctx)
{
   r200ContextPtr rmesa = R200_CONTEXT(ctx);
   int nr, elt_used = rmesa->tcl.elt_used;

   radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s %x %d\n", __FUNCTION__, rmesa->tcl.hw_primitive, elt_used);

   assert( rmesa->radeon.dma.flush == r200FlushElts );
   rmesa->radeon.dma.flush = NULL;

   nr = elt_used / 2;

   radeon_bo_unmap(rmesa->radeon.tcl.elt_dma_bo);

   r200FireEB(rmesa, nr, rmesa->tcl.hw_primitive);

   radeon_bo_unref(rmesa->radeon.tcl.elt_dma_bo);
   rmesa->radeon.tcl.elt_dma_bo = NULL;

   if (R200_ELT_BUF_SZ > elt_used)
     radeonReturnDmaRegion(&rmesa->radeon, R200_ELT_BUF_SZ - elt_used);

   if (radeon_is_debug_enabled(RADEON_SYNC, RADEON_CRITICAL)
         && !rmesa->radeon.radeonScreen->kernel_mm) {
      radeon_print(RADEON_SYNC, RADEON_NORMAL, "%s: Syncing\n", __FUNCTION__);
      radeonFinish( rmesa->radeon.glCtx );
   }
}
Beispiel #3
0
/** Check if given image is valid within current texture object.
 */
static void teximage_assign_miptree(radeonContextPtr rmesa,
                                    struct gl_texture_object *texObj,
                                    struct gl_texture_image *texImage)
{
    radeonTexObj *t = radeon_tex_obj(texObj);
    radeon_texture_image* image = get_radeon_texture_image(texImage);

    /* Try using current miptree, or create new if there isn't any */
    if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage)) {
        radeon_miptree_unreference(&t->mt);
        t->mt = radeon_miptree_create_for_teximage(rmesa,
                texObj,
                texImage);

        radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
                     "%s: texObj %p, texImage %p, "
                     "texObj miptree doesn't match, allocated new miptree %p\n",
                     __FUNCTION__, texObj, texImage, t->mt);
    }

    /* Miptree alocation may have failed,
     * when there was no image for baselevel specified */
    if (t->mt) {
        radeon_miptree_reference(t->mt, &image->mt);
    } else
        radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
                     "%s Failed to allocate miptree.\n", __func__);
}
void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
{
	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
	struct radeon_bo *bo;
	GLuint face = _mesa_tex_target_to_face(target);
	radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
	bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;

	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
		"%s(%p, target %s, tex %p)\n",
		__func__, ctx, _mesa_lookup_enum_by_nr(target),
		texObj);

	if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
		radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s(%p, tex %p) Trying to generate mipmap for texture "
			"in processing by GPU.\n",
			__func__, ctx, texObj);
		radeon_firevertices(rmesa);
	}

	if (_mesa_meta_check_generate_mipmap_fallback(ctx, target, texObj)) {
		radeon_teximage_map(baseimage, GL_FALSE);
		radeon_generate_mipmap(ctx, target, texObj);
		radeon_teximage_unmap(baseimage);
	} else {
		_mesa_meta_GenerateMipmap(ctx, target, texObj);
	}
}
/**
 * All glTexSubImage calls go through this function.
 */
static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
		GLint xoffset, GLint yoffset, GLint zoffset,
		GLsizei width, GLsizei height, GLsizei depth,
		GLsizei imageSize,
		GLenum format, GLenum type,
		const GLvoid * pixels,
		const struct gl_pixelstore_attrib *packing,
		struct gl_texture_object *texObj,
		struct gl_texture_image *texImage,
		int compressed)
{
	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
	radeonTexObj* t = radeon_tex_obj(texObj);
	radeon_texture_image* image = get_radeon_texture_image(texImage);

	radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s %dd: texObj %p, texImage %p, face %d, level %d\n",
			__func__, dims, texObj, texImage,
			_mesa_tex_target_to_face(target), level);
	{
		struct radeon_bo *bo;
		bo = !image->mt ? image->bo : image->mt->bo;
		if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
			radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
				"%s Calling texsubimage for texture that is "
				"queued for GPU processing.\n",
				__func__);
			radeon_firevertices(rmesa);
		}
	}


	t->validated = GL_FALSE;
	if (compressed) {
		pixels = _mesa_validate_pbo_compressed_teximage(
			ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
	} else {
		pixels = _mesa_validate_pbo_teximage(ctx, dims,
			width, height, depth, format, type, pixels, packing, "glTexSubImage");
	}

	if (pixels) {
		radeon_store_teximage(ctx, dims,
			xoffset, yoffset, zoffset,
			width, height, depth,
			imageSize, format, type,
			pixels, packing,
			texObj, texImage,
			compressed);
	}

	_mesa_unmap_teximage_pbo(ctx, packing);
}
Beispiel #6
0
void r300_swtcl_flush(GLcontext *ctx, uint32_t current_offset)
{
	radeon_print(RADEON_SWRENDER, RADEON_TRACE, "%s\n", __func__);
	r300ContextPtr rmesa = R300_CONTEXT(ctx);

	r300EmitCacheFlush(rmesa);

	radeonEmitState(&rmesa->radeon);
    r300_emit_scissor(ctx);
	r300EmitVertexAOS(rmesa,
			rmesa->radeon.swtcl.vertex_size,
			first_elem(&rmesa->radeon.dma.reserved)->bo,
			current_offset);

	r300EmitVbufPrim(rmesa,
		   rmesa->radeon.swtcl.hw_primitive,
		   rmesa->radeon.swtcl.numverts);
	r300EmitCacheFlush(rmesa);
	if ( rmesa->radeon.swtcl.emit_prediction < rmesa->radeon.cmdbuf.cs->cdw )
		WARN_ONCE("Rendering was %d commands larger than predicted size."
			" We might overflow  command buffer.\n",
			rmesa->radeon.cmdbuf.cs->cdw - rmesa->radeon.swtcl.emit_prediction );
	rmesa->radeon.swtcl.emit_prediction = 0;
	COMMIT_BATCH();
}
void
radeonReadPixels(struct gl_context * ctx,
                 GLint x, GLint y, GLsizei width, GLsizei height,
                 GLenum format, GLenum type,
                 const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
    radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    radeon_prepare_render(radeon);

    if (do_blit_readpixels(ctx, x, y, width, height, format, type, pack, pixels))
        return;

    /* Update Mesa state before calling down into _swrast_ReadPixels, as
     * the spans code requires the computed buffer states to be up to date,
     * but _swrast_ReadPixels only updates Mesa state after setting up
     * the spans code.
     */

    radeon_print(RADEON_FALLBACKS, RADEON_NORMAL,
                 "Falling back to sw for ReadPixels (format %s, type %s)\n",
                 _mesa_lookup_enum_by_nr(format), _mesa_lookup_enum_by_nr(type));

    if (ctx->NewState)
        _mesa_update_state(ctx);

    _swrast_ReadPixels(ctx, x, y, width, height, format, type, pack, pixels);
}
Beispiel #8
0
static void r300ChooseRenderState( GLcontext *ctx )
{
	TNLcontext *tnl = TNL_CONTEXT(ctx);
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	GLuint index = 0;
	GLuint flags = ctx->_TriangleCaps;
	radeon_print(RADEON_SWRENDER, RADEON_VERBOSE, "%s\n", __func__);

	if (flags & DD_TRI_UNFILLED)      index |= R300_UNFILLED_BIT;

	if (index != rmesa->radeon.swtcl.RenderIndex) {
		tnl->Driver.Render.Points = rast_tab[index].points;
		tnl->Driver.Render.Line = rast_tab[index].line;
		tnl->Driver.Render.ClippedLine = rast_tab[index].line;
		tnl->Driver.Render.Triangle = rast_tab[index].triangle;
		tnl->Driver.Render.Quad = rast_tab[index].quad;

		if (index == 0) {
			tnl->Driver.Render.PrimTabVerts = r300_render_tab_verts;
			tnl->Driver.Render.PrimTabElts = r300_render_tab_elts;
			tnl->Driver.Render.ClippedPolygon = r300_fast_clipped_poly;
		} else {
			tnl->Driver.Render.PrimTabVerts = _tnl_render_tab_verts;
			tnl->Driver.Render.PrimTabElts = _tnl_render_tab_elts;
			tnl->Driver.Render.ClippedPolygon = _tnl_RenderClippedPolygon;
		}

		rmesa->radeon.swtcl.RenderIndex = index;
	}
}
Beispiel #9
0
void r300InitSwtcl(GLcontext *ctx)
{
	TNLcontext *tnl = TNL_CONTEXT(ctx);
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	static int firsttime = 1;
	radeon_print(RADEON_SWRENDER, RADEON_NORMAL, "%s\n", __func__);

	if (firsttime) {
		init_rast_tab();
		firsttime = 0;
	}
	rmesa->radeon.swtcl.emit_prediction = 0;

	tnl->Driver.Render.Start = r300RenderStart;
	tnl->Driver.Render.Finish = r300RenderFinish;
	tnl->Driver.Render.PrimitiveNotify = r300RenderPrimitive;
	tnl->Driver.Render.ResetLineStipple = r300ResetLineStipple;
	tnl->Driver.Render.BuildVertices = _tnl_build_vertices;
	tnl->Driver.Render.CopyPV = _tnl_copy_pv;
	tnl->Driver.Render.Interp = _tnl_interp;

	/* FIXME: what are these numbers? */
	_tnl_init_vertices( ctx, ctx->Const.MaxArrayLockSize + 12,
			    48 * sizeof(GLfloat) );

	rmesa->radeon.swtcl.verts = (GLubyte *)tnl->clipspace.vertex_buf;
	rmesa->radeon.swtcl.RenderIndex = ~0;
	rmesa->radeon.swtcl.render_primitive = GL_TRIANGLES;
	rmesa->radeon.swtcl.hw_primitive = 0;

	_tnl_invalidate_vertex_state( ctx, ~0 );
	_tnl_invalidate_vertices( ctx, ~0 );

	_tnl_need_projected_coords( ctx, GL_FALSE );
}
Beispiel #10
0
/**
 * Changes variables and flags for a state update, which will happen at the
 * next UpdateTextureState
 */
static void r200TexParameter( struct gl_context *ctx,
				struct gl_texture_object *texObj,
				GLenum pname, const GLfloat *params )
{
   radeonTexObj* t = radeon_tex_obj(texObj);

   radeon_print(RADEON_TEXTURE | RADEON_STATE, RADEON_VERBOSE,
		"%s(%p, tex %p)  pname %s\n",
		__FUNCTION__, ctx, texObj,
	       _mesa_lookup_enum_by_nr( pname ) );

   switch ( pname ) {
   case GL_TEXTURE_MIN_FILTER:
   case GL_TEXTURE_MAG_FILTER:
   case GL_TEXTURE_MAX_ANISOTROPY_EXT:
   case GL_TEXTURE_WRAP_S:
   case GL_TEXTURE_WRAP_T:
   case GL_TEXTURE_WRAP_R:
   case GL_TEXTURE_BORDER_COLOR:
   case GL_TEXTURE_BASE_LEVEL:
   case GL_TEXTURE_MAX_LEVEL:
   case GL_TEXTURE_MIN_LOD:
   case GL_TEXTURE_MAX_LOD:
      t->validated = GL_FALSE;
      break;

   default:
      return;
   }
}
Beispiel #11
0
static void r700DeleteProgram(GLcontext * ctx, struct gl_program *prog)
{
    struct r700_vertex_program_cont *vpc = (struct r700_vertex_program_cont *)prog;
    struct r700_fragment_program * fp;

	radeon_print(RADEON_SHADER, RADEON_VERBOSE,
			"%s %p\n", __func__, prog);

    switch (prog->Target) 
    {
    case GL_VERTEX_STATE_PROGRAM_NV:
    case GL_VERTEX_PROGRAM_ARB:	    
	    freeVertProgCache(ctx, vpc);
	    break;
    case GL_FRAGMENT_PROGRAM_NV:
    case GL_FRAGMENT_PROGRAM_ARB:
		fp = (struct r700_fragment_program*)prog;
        /* Release DMA region */

        r600DeleteShader(ctx, fp->shaderbo);

        /* Clean up */
        Clean_Up_Assembler(&(fp->r700AsmCode));
        Clean_Up_Shader(&(fp->r700Shader));
	    break;
    default:
	    _mesa_problem(ctx, "Bad target in r700NewProgram");
    }

	_mesa_delete_program(ctx, prog);
}
Beispiel #12
0
static void
radeon_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
		     GLuint width, GLuint height)
{
     struct radeon_framebuffer *radeon_fb = (struct radeon_framebuffer*)fb;
   int i;

  radeon_print(RADEON_TEXTURE, RADEON_TRACE,
		"%s(%p, fb %p) \n",
		__func__, ctx, fb);

   _mesa_resize_framebuffer(ctx, fb, width, height);

   fb->Initialized = GL_TRUE; /* XXX remove someday */

   if (fb->Name != 0) {
      return;
   }

   /* Make sure all window system renderbuffers are up to date */
   for (i = 0; i < 2; i++) {
      struct gl_renderbuffer *rb = &radeon_fb->color_rb[i]->base;

      /* only resize if size is changing */
      if (rb && (rb->Width != width || rb->Height != height)) {
	 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
      }
   }
}
GLushort *r200AllocEltsOpenEnded( r200ContextPtr rmesa,
				    GLuint primitive,
				    GLuint min_nr )
{
   GLushort *retval;

   radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s %d prim %x\n", __FUNCTION__, min_nr, primitive);

   assert((primitive & R200_VF_PRIM_WALK_IND));
   
   radeonEmitState(&rmesa->radeon);

   radeonAllocDmaRegion(&rmesa->radeon, &rmesa->radeon.tcl.elt_dma_bo,
			&rmesa->radeon.tcl.elt_dma_offset, R200_ELT_BUF_SZ, 4);
   rmesa->tcl.elt_used = min_nr * 2;

   radeon_bo_map(rmesa->radeon.tcl.elt_dma_bo, 1);
   retval = rmesa->radeon.tcl.elt_dma_bo->ptr + rmesa->radeon.tcl.elt_dma_offset;
   
   assert(!rmesa->radeon.dma.flush);
   rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
   rmesa->radeon.dma.flush = r200FlushElts;

   return retval;
}
Beispiel #14
0
static void r700SendViewportState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	int id = 0;
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	if (id > R700_MAX_VIEWPORTS)
		return;

	if (!r700->viewport[id].enabled)
		return;

        BEGIN_BATCH_NO_AUTOSTATE(16);
	R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_SCISSOR_0_TL + (8 * id), 2);
	R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_TL.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_SCISSOR_0_BR.u32All);
	R600_OUT_BATCH_REGSEQ(PA_SC_VPORT_ZMIN_0 + (8 * id), 2);
	R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMIN_0.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_SC_VPORT_ZMAX_0.u32All);
	R600_OUT_BATCH_REGSEQ(PA_CL_VPORT_XSCALE_0 + (24 * id), 6);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XSCALE.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_XOFFSET.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YSCALE.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_YOFFSET.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZSCALE.u32All);
	R600_OUT_BATCH(r700->viewport[id].PA_CL_VPORT_ZOFFSET.u32All);
        END_BATCH();

	COMMIT_BATCH();

}
Beispiel #15
0
static struct radeon_renderbuffer *
radeon_wrap_texture(struct gl_context * ctx, struct gl_texture_image *texImage)
{
  const GLuint name = ~0;   /* not significant, but distinct for debugging */
  struct radeon_renderbuffer *rrb;

   /* make an radeon_renderbuffer to wrap the texture image */
   rrb = CALLOC_STRUCT(radeon_renderbuffer);

   radeon_print(RADEON_TEXTURE, RADEON_TRACE,
		"%s(%p, rrb %p, texImage %p) \n",
		__func__, ctx, rrb, texImage);

   if (!rrb) {
      _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
      return NULL;
   }

   _mesa_init_renderbuffer(&rrb->base, name);
   rrb->base.ClassID = RADEON_RB_CLASS;

   if (!radeon_update_wrapper(ctx, rrb, texImage)) {
      free(rrb);
      return NULL;
   }

   return rrb;
  
}
Beispiel #16
0
static void r700SendVTXState(GLcontext *ctx, struct radeon_state_atom *atom)
{
    context_t         *context = R700_CONTEXT(ctx);
    struct r700_vertex_program *vp = context->selected_vp;
    unsigned int i, j = 0;
    BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

    if (context->radeon.tcl.aos_count == 0)
	    return;

    BEGIN_BATCH_NO_AUTOSTATE(6);
    R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
    R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
    R600_OUT_BATCH(0);

    R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 1));
    R600_OUT_BATCH(mmSQ_VTX_START_INST_LOC - ASIC_CTL_CONST_BASE_INDEX);
    R600_OUT_BATCH(0);
    END_BATCH();
    COMMIT_BATCH();

    for(i=0; i<VERT_ATTRIB_MAX; i++) {
	    if(vp->mesa_program->Base.InputsRead & (1 << i))
	    {
                r700SetupVTXConstants(ctx,
				      (void*)(&context->radeon.tcl.aos[j]),
				      &(context->stream_desc[j]));
		j++;
	    }
    }
}
Beispiel #17
0
static void r700SendQueryBegin(GLcontext *ctx, struct radeon_state_atom *atom)
{
	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
	struct radeon_query_object *query = radeon->query.current;
	BATCH_LOCALS(radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	/* clear the buffer */
	radeon_bo_map(query->bo, GL_FALSE);
	memset(query->bo->ptr, 0, 4 * 2 * sizeof(uint64_t)); /* 4 DBs, 2 qwords each */
	radeon_bo_unmap(query->bo);

	radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
				      query->bo,
				      0, RADEON_GEM_DOMAIN_GTT);

	BEGIN_BATCH_NO_AUTOSTATE(4 + 2);
	R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 2));
	R600_OUT_BATCH(ZPASS_DONE);
	R600_OUT_BATCH(query->curr_offset); /* hw writes qwords */
	R600_OUT_BATCH(0x00000000);
	R600_OUT_BATCH_RELOC(VGT_EVENT_INITIATOR, query->bo, 0, 0, RADEON_GEM_DOMAIN_GTT, 0);
	END_BATCH();
	query->emitted_begin = GL_TRUE;
}
Beispiel #18
0
static void r700SendScissorState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	BEGIN_BATCH_NO_AUTOSTATE(22);
	R600_OUT_BATCH_REGSEQ(PA_SC_SCREEN_SCISSOR_TL, 2);
	R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_SCREEN_SCISSOR_BR.u32All);

	R600_OUT_BATCH_REGSEQ(PA_SC_WINDOW_OFFSET, 12);
	R600_OUT_BATCH(r700->PA_SC_WINDOW_OFFSET.u32All);
	R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_WINDOW_SCISSOR_BR.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_RULE.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_0_BR.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_1_BR.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_2_BR.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_CLIPRECT_3_BR.u32All);

	R600_OUT_BATCH_REGSEQ(PA_SC_GENERIC_SCISSOR_TL, 2);
	R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_TL.u32All);
	R600_OUT_BATCH(r700->PA_SC_GENERIC_SCISSOR_BR.u32All);
	END_BATCH();
	COMMIT_BATCH();
}
Beispiel #19
0
/**
 * Wraps Mesa's implementation to ensure that the base level image is mapped.
 *
 * This relies on internal details of _mesa_generate_mipmap, in particular
 * the fact that the memory for recreated texture images is always freed.
 */
static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
				   struct gl_texture_object *texObj)
{
	radeonTexObj* t = radeon_tex_obj(texObj);
	GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
	int i, face;

	radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
			"%s(%p, tex %p) Target type %s.\n",
			__func__, ctx, texObj,
			_mesa_lookup_enum_by_nr(target));

	_mesa_generate_mipmap(ctx, target, texObj);

	for (face = 0; face < nr_faces; face++) {
		for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
			radeon_texture_image *image;

			image = get_radeon_texture_image(texObj->Image[face][i]);

			if (image == NULL)
				break;

			image->mtlevel = i;
			image->mtface = face;

			radeon_miptree_unreference(&image->mt);
		}
	}
	
}
/**
 * Create a new mipmap tree, calculate its layout and allocate memory.
 */
radeon_mipmap_tree* radeon_miptree_create(radeonContextPtr rmesa,
					  GLenum target, mesa_format mesaFormat, GLuint baseLevel, GLuint numLevels,
					  GLuint width0, GLuint height0, GLuint depth0, GLuint tilebits)
{
	radeon_mipmap_tree *mt = CALLOC_STRUCT(_radeon_mipmap_tree);

	radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
		"%s(%p) new tree is %p.\n",
		__func__, rmesa, mt);

	mt->mesaFormat = mesaFormat;
	mt->refcount = 1;
	mt->target = target;
	mt->faces = _mesa_num_tex_faces(target);
	mt->baseLevel = baseLevel;
	mt->numLevels = numLevels;
	mt->width0 = width0;
	mt->height0 = height0;
	mt->depth0 = depth0;
	mt->tilebits = tilebits;

	calculate_miptree_layout(rmesa, mt);

	mt->bo = radeon_bo_open(rmesa->radeonScreen->bom,
                            0, mt->totalsize, 1024,
                            RADEON_GEM_DOMAIN_VRAM,
                            0);

	return mt;
}
Beispiel #21
0
static void r700SendDBState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	BEGIN_BATCH_NO_AUTOSTATE(17);

	R600_OUT_BATCH_REGSEQ(DB_STENCIL_CLEAR, 2);
	R600_OUT_BATCH(r700->DB_STENCIL_CLEAR.u32All);
	R600_OUT_BATCH(r700->DB_DEPTH_CLEAR.u32All);

	R600_OUT_BATCH_REGVAL(DB_DEPTH_CONTROL, r700->DB_DEPTH_CONTROL.u32All);
	R600_OUT_BATCH_REGVAL(DB_SHADER_CONTROL, r700->DB_SHADER_CONTROL.u32All);

	R600_OUT_BATCH_REGSEQ(DB_RENDER_CONTROL, 2);
	R600_OUT_BATCH(r700->DB_RENDER_CONTROL.u32All);
	R600_OUT_BATCH(r700->DB_RENDER_OVERRIDE.u32All);

	R600_OUT_BATCH_REGVAL(DB_ALPHA_TO_MASK, r700->DB_ALPHA_TO_MASK.u32All);

	END_BATCH();
	COMMIT_BATCH();
}
Beispiel #22
0
static void
radeon_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
{
	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
	gl_format mesa_format;
	int i;

	for (i = -2; i < (GLint) ctx->Const.MaxColorAttachments; i++) {
		struct gl_renderbuffer_attachment *att;
		if (i == -2) {
			att = &fb->Attachment[BUFFER_DEPTH];
		} else if (i == -1) {
			att = &fb->Attachment[BUFFER_STENCIL];
		} else {
			att = &fb->Attachment[BUFFER_COLOR0 + i];
		}

		if (att->Type == GL_TEXTURE) {
			mesa_format = att->Texture->Image[att->CubeMapFace][att->TextureLevel]->TexFormat;
		} else {
			/* All renderbuffer formats are renderable, but not sampable */
			continue;
		}

		if (!radeon->vtbl.is_format_renderable(mesa_format)){
			fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED;
			radeon_print(RADEON_TEXTURE, RADEON_TRACE,
						"%s: HW doesn't support format %s as output format of attachment %d\n",
						__FUNCTION__, _mesa_get_format_name(mesa_format), i);
			return;
		}
	}
}
Beispiel #23
0
static void r700SendCBState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
		BEGIN_BATCH_NO_AUTOSTATE(11);
		R600_OUT_BATCH_REGSEQ(CB_CLEAR_RED, 4);
		R600_OUT_BATCH(r700->CB_CLEAR_RED_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_CLEAR_GREEN_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_CLEAR_BLUE_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_CLEAR_ALPHA_R6XX.u32All);
		R600_OUT_BATCH_REGSEQ(CB_FOG_RED, 3);
		R600_OUT_BATCH(r700->CB_FOG_RED_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_FOG_GREEN_R6XX.u32All);
		R600_OUT_BATCH(r700->CB_FOG_BLUE_R6XX.u32All);
		END_BATCH();
	}

	BEGIN_BATCH_NO_AUTOSTATE(7);
	R600_OUT_BATCH_REGSEQ(CB_TARGET_MASK, 2);
	R600_OUT_BATCH(r700->CB_TARGET_MASK.u32All);
	R600_OUT_BATCH(r700->CB_SHADER_MASK.u32All);
	R600_OUT_BATCH_REGVAL(R7xx_CB_SHADER_CONTROL, r700->CB_SHADER_CONTROL.u32All);
	END_BATCH();
	COMMIT_BATCH();
}
Beispiel #24
0
static void r700SendCBBlendState(GLcontext *ctx, struct radeon_state_atom *atom)
{
	context_t *context = R700_CONTEXT(ctx);
	R700_CHIP_CONTEXT *r700 = R700_CONTEXT_STATES(context);
	BATCH_LOCALS(&context->radeon);
	unsigned int ui;
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770) {
		BEGIN_BATCH_NO_AUTOSTATE(3);
		R600_OUT_BATCH_REGVAL(CB_BLEND_CONTROL, r700->CB_BLEND_CONTROL.u32All);
		END_BATCH();
	}

	BEGIN_BATCH_NO_AUTOSTATE(3);
	R600_OUT_BATCH_REGVAL(CB_COLOR_CONTROL, r700->CB_COLOR_CONTROL.u32All);
	END_BATCH();

	if (context->radeon.radeonScreen->chip_family > CHIP_FAMILY_R600) {
		for (ui = 0; ui < R700_MAX_RENDER_TARGETS; ui++) {
			if (r700->render_target[ui].enabled) {
				BEGIN_BATCH_NO_AUTOSTATE(3);
				R600_OUT_BATCH_REGVAL(CB_BLEND0_CONTROL + (4 * ui),
						      r700->render_target[ui].CB_BLEND0_CONTROL.u32All);
				END_BATCH();
			}
		}
	}

	COMMIT_BATCH();
}
static void calculate_miptree_layout(radeonContextPtr rmesa, radeon_mipmap_tree *mt)
{
	GLuint curOffset, i, face, level;

	assert(mt->numLevels <= rmesa->glCtx.Const.MaxTextureLevels);

	curOffset = 0;
	for(face = 0; face < mt->faces; face++) {

		for(i = 0, level = mt->baseLevel; i < mt->numLevels; i++, level++) {
			mt->levels[level].valid = 1;
			mt->levels[level].width = minify(mt->width0, i);
			mt->levels[level].height = minify(mt->height0, i);
			mt->levels[level].depth = minify(mt->depth0, i);
			compute_tex_image_offset(rmesa, mt, face, level, &curOffset);
		}
	}

	/* Note the required size in memory */
	mt->totalsize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK;

	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
			"%s(%p, %p) total size %d\n",
			__func__, rmesa, mt, mt->totalsize);
}
void
radeonReadPixels(struct gl_context * ctx,
                 GLint x, GLint y, GLsizei width, GLsizei height,
                 GLenum format, GLenum type,
                 const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
    radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    radeon_prepare_render(radeon);

    if (do_blit_readpixels(ctx, x, y, width, height, format, type, pack, pixels))
        return;

    /* Update Mesa state before calling _mesa_readpixels().
     * XXX this may not be needed since ReadPixels no longer uses the
     * span code.
     */
    radeon_print(RADEON_FALLBACKS, RADEON_NORMAL,
                 "Falling back to sw for ReadPixels (format %s, type %s)\n",
                 _mesa_lookup_enum_by_nr(format), _mesa_lookup_enum_by_nr(type));

    if (ctx->NewState)
        _mesa_update_state(ctx);

    _mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels);
}
static unsigned get_aligned_compressed_row_stride(
		mesa_format format,
		unsigned width,
		unsigned minStride)
{
	const unsigned blockBytes = _mesa_get_format_bytes(format);
	unsigned blockWidth, blockHeight;
	unsigned stride;

	_mesa_get_format_block_size(format, &blockWidth, &blockHeight);

	/* Count number of blocks required to store the given width.
	 * And then multiple it with bytes required to store a block.
	 */
	stride = (width + blockWidth - 1) / blockWidth * blockBytes;

	/* Round the given minimum stride to the next full blocksize.
	 * (minStride + blockBytes - 1) / blockBytes * blockBytes
	 */
	if ( stride < minStride )
		stride = (minStride + blockBytes - 1) / blockBytes * blockBytes;

	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
			"%s width %u, minStride %u, block(bytes %u, width %u):"
			"stride %u\n",
			__func__, width, minStride,
			blockBytes, blockWidth,
			stride);

	return stride;
}
Beispiel #28
0
static void r300_predict_emit_size( r300ContextPtr rmesa )
{
	if (!rmesa->radeon.swtcl.emit_prediction) {
		const int vertex_size = 7;
		const int prim_size = 3;
		const int cache_flush_size = 4;
		const int pre_emit_state = 4;
		const int scissor_size = 3;
		const int state_size = radeonCountStateEmitSize(&rmesa->radeon);

		if (rcommonEnsureCmdBufSpace(&rmesa->radeon,
					state_size + pre_emit_state + scissor_size
					+ vertex_size + prim_size + cache_flush_size * 2,
					__FUNCTION__))
			rmesa->radeon.swtcl.emit_prediction = radeonCountStateEmitSize(&rmesa->radeon);
		else
			rmesa->radeon.swtcl.emit_prediction = state_size;

		rmesa->radeon.swtcl.emit_prediction += rmesa->radeon.cmdbuf.cs->cdw
			+ vertex_size + scissor_size + prim_size + cache_flush_size * 2 + pre_emit_state;
		radeon_print(RADEON_SWRENDER, RADEON_VERBOSE,
				"%s, size %d\n",
				__func__, rmesa->radeon.cmdbuf.cs->cdw
				+ vertex_size + scissor_size + prim_size + cache_flush_size * 2 + pre_emit_state);
	}
}
Beispiel #29
0
static void r200DeleteTexture(struct gl_context * ctx, struct gl_texture_object *texObj)
{
   r200ContextPtr rmesa = R200_CONTEXT(ctx);
   radeonTexObj* t = radeon_tex_obj(texObj);

   radeon_print(RADEON_TEXTURE | RADEON_STATE, RADEON_NORMAL,
           "%s( %p (target = %s) )\n", __FUNCTION__,
	   (void *)texObj,
	   _mesa_lookup_enum_by_nr(texObj->Target));

   if (rmesa) {
      int i;
      radeon_firevertices(&rmesa->radeon);
      for ( i = 0 ; i < rmesa->radeon.glCtx.Const.MaxTextureUnits ; i++ ) {
	 if ( t == rmesa->state.texture.unit[i].texobj ) {
	    rmesa->state.texture.unit[i].texobj = NULL;
	    rmesa->hw.tex[i].dirty = GL_FALSE;
	    rmesa->hw.cube[i].dirty = GL_FALSE;
	 }
      }      
   }

   radeon_miptree_unreference(&t->mt);

   _mesa_delete_texture_object(ctx, texObj);
}
static void radeonCheckQuery(struct gl_context *ctx, struct gl_query_object *q)
{
	radeon_print(RADEON_STATE, RADEON_TRACE, "%s: query id %d\n", __FUNCTION__, q->Id);

#ifdef DRM_RADEON_GEM_BUSY
	radeonContextPtr radeon = RADEON_CONTEXT(ctx);

	if (radeon->radeonScreen->kernel_mm) {
		struct radeon_query_object *query = (struct radeon_query_object *)q;
		uint32_t domain;

		/* Need to perform a flush, as per ARB_occlusion_query spec */
		if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs)) {
			ctx->Driver.Flush(ctx);
		}

		if (radeon_bo_is_busy(query->bo, &domain) == 0) {
			radeonQueryGetResult(ctx, q);
			query->Base.Ready = GL_TRUE;
		}
	} else {
		radeonWaitQuery(ctx, q);
	}
#else
	radeonWaitQuery(ctx, q);
#endif
}