Ejemplo n.º 1
0
static void r200DeleteTexture(struct gl_context * ctx, struct gl_texture_object *texObj)
{
   r200ContextPtr rmesa = R200_CONTEXT(ctx);
   radeonTexObj* t = radeon_tex_obj(texObj);

   radeon_print(RADEON_TEXTURE | RADEON_STATE, RADEON_NORMAL,
           "%s( %p (target = %s) )\n", __FUNCTION__,
	   (void *)texObj,
	   _mesa_lookup_enum_by_nr(texObj->Target));

   if (rmesa) {
      int i;
      radeon_firevertices(&rmesa->radeon);
      for ( i = 0 ; i < rmesa->radeon.glCtx.Const.MaxTextureUnits ; i++ ) {
	 if ( t == rmesa->state.texture.unit[i].texobj ) {
	    rmesa->state.texture.unit[i].texobj = NULL;
	    rmesa->hw.tex[i].dirty = GL_FALSE;
	    rmesa->hw.cube[i].dirty = GL_FALSE;
	 }
      }      
   }

   radeon_miptree_unreference(&t->mt);

   _mesa_delete_texture_object(ctx, texObj);
}
Ejemplo n.º 2
0
void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
{
	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
	struct radeon_bo *bo;
	GLuint face = _mesa_tex_target_to_face(target);
	radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
	bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;

	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
		"%s(%p, target %s, tex %p)\n",
		__func__, ctx, _mesa_lookup_enum_by_nr(target),
		texObj);

	if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
		radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s(%p, tex %p) Trying to generate mipmap for texture "
			"in processing by GPU.\n",
			__func__, ctx, texObj);
		radeon_firevertices(rmesa);
	}

	if (_mesa_meta_check_generate_mipmap_fallback(ctx, target, texObj)) {
		radeon_teximage_map(baseimage, GL_FALSE);
		radeon_generate_mipmap(ctx, target, texObj);
		radeon_teximage_unmap(baseimage);
	} else {
		_mesa_meta_GenerateMipmap(ctx, target, texObj);
	}
}
static void radeonClear( struct gl_context *ctx, GLbitfield mask )
{
   r100ContextPtr rmesa = R100_CONTEXT(ctx);
   GLuint hwmask, swmask;
   GLuint hwbits = BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_BACK_LEFT |
                   BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL |
                   BUFFER_BIT_COLOR0;

   if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
      rmesa->radeon.front_buffer_dirty = GL_TRUE;
   }

   if ( RADEON_DEBUG & RADEON_IOCTL ) {
      fprintf( stderr, "radeonClear\n");
   }

   radeon_firevertices(&rmesa->radeon);

   hwmask = mask & hwbits;
   swmask = mask & ~hwbits;

   if ( swmask ) {
      if (RADEON_DEBUG & RADEON_FALLBACKS)
	 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, swmask);
      _swrast_Clear( ctx, swmask );
   }

   if ( !hwmask )
      return;

   radeonUserClear(ctx, hwmask);
}
Ejemplo n.º 4
0
/**
 * Map texture memory/buffer into user space.
 * Note: the region of interest parameters are ignored here.
 * \param mapOut  returns start of mapping of region of interest
 * \param rowStrideOut  returns row stride in bytes
 */
static void
radeon_map_texture_image(struct gl_context *ctx,
                         struct gl_texture_image *texImage,
                         GLuint slice,
                         GLuint x, GLuint y, GLuint w, GLuint h,
                         GLbitfield mode,
                         GLubyte **map,
                         GLint *stride)
{
    radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
    radeon_texture_image *image = get_radeon_texture_image(texImage);
    radeon_mipmap_tree *mt = image->mt;
    GLuint texel_size = _mesa_get_format_bytes(texImage->TexFormat);
    GLuint width = texImage->Width;
    GLuint height = texImage->Height;
    struct radeon_bo *bo = !image->mt ? image->bo : image->mt->bo;
    unsigned int bw, bh;
    GLboolean write = (mode & GL_MAP_WRITE_BIT) != 0;

    _mesa_get_format_block_size(texImage->TexFormat, &bw, &bh);
    assert(y % bh == 0);
    y /= bh;
    texel_size /= bw;

    if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
        radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
                     "%s for texture that is "
                     "queued for GPU processing.\n",
                     __func__);
        radeon_firevertices(rmesa);
    }

    if (image->bo) {
        /* TFP case */
        radeon_bo_map(image->bo, write);
        *stride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0, texImage->TexObject->Target);
        *map = bo->ptr;
    } else if (likely(mt)) {
        void *base;
        radeon_mipmap_level *lvl = &image->mt->levels[texImage->Level];

        radeon_bo_map(mt->bo, write);
        base = mt->bo->ptr + lvl->faces[image->base.Base.Face].offset;

        *stride = lvl->rowstride;
        *map = base + (slice * height) * *stride;
    } else {
        /* texture data is in malloc'd memory */

        assert(map);

        *stride = _mesa_format_row_stride(texImage->TexFormat, width);
        *map = image->base.Buffer + (slice * height) * *stride;
    }

    *map += y * *stride + x * texel_size;
}
Ejemplo n.º 5
0
static void radeonClear( struct gl_context *ctx, GLbitfield mask )
{
   r100ContextPtr rmesa = R100_CONTEXT(ctx);
   GLuint flags = 0;
   GLuint orig_mask = mask;

   if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
      rmesa->radeon.front_buffer_dirty = GL_TRUE;
   }

   if ( RADEON_DEBUG & RADEON_IOCTL ) {
      fprintf( stderr, "radeonClear\n");
   }

   radeon_firevertices(&rmesa->radeon);

   if ( mask & BUFFER_BIT_FRONT_LEFT ) {
      flags |= RADEON_FRONT;
      mask &= ~BUFFER_BIT_FRONT_LEFT;
   }

   if ( mask & BUFFER_BIT_BACK_LEFT ) {
      flags |= RADEON_BACK;
      mask &= ~BUFFER_BIT_BACK_LEFT;
   }

   if ( mask & BUFFER_BIT_DEPTH ) {
      flags |= RADEON_DEPTH;
      mask &= ~BUFFER_BIT_DEPTH;
   }

   if ( (mask & BUFFER_BIT_STENCIL) ) {
      flags |= RADEON_STENCIL;
      mask &= ~BUFFER_BIT_STENCIL;
   }

   if ( mask ) {
      if (RADEON_DEBUG & RADEON_FALLBACKS)
	 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
      _swrast_Clear( ctx, mask );
   }

   if ( !flags )
      return;

   if (rmesa->using_hyperz) {
      flags |= RADEON_USE_COMP_ZBUF;
/*      if (rmesa->radeon.radeonScreen->chipset & RADEON_CHIPSET_TCL)
         flags |= RADEON_USE_HIERZ; */
      if (((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
	    ((rmesa->radeon.state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
	  flags |= RADEON_CLEAR_FASTZ;
      }
   }

   radeonUserClear(ctx, orig_mask);
}
Ejemplo n.º 6
0
/**
 * All glTexSubImage calls go through this function.
 */
static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
		GLint xoffset, GLint yoffset, GLint zoffset,
		GLsizei width, GLsizei height, GLsizei depth,
		GLsizei imageSize,
		GLenum format, GLenum type,
		const GLvoid * pixels,
		const struct gl_pixelstore_attrib *packing,
		struct gl_texture_object *texObj,
		struct gl_texture_image *texImage,
		int compressed)
{
	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
	radeonTexObj* t = radeon_tex_obj(texObj);
	radeon_texture_image* image = get_radeon_texture_image(texImage);

	radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s %dd: texObj %p, texImage %p, face %d, level %d\n",
			__func__, dims, texObj, texImage,
			_mesa_tex_target_to_face(target), level);
	{
		struct radeon_bo *bo;
		bo = !image->mt ? image->bo : image->mt->bo;
		if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
			radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
				"%s Calling texsubimage for texture that is "
				"queued for GPU processing.\n",
				__func__);
			radeon_firevertices(rmesa);
		}
	}


	t->validated = GL_FALSE;
	if (compressed) {
		pixels = _mesa_validate_pbo_compressed_teximage(
			ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
	} else {
		pixels = _mesa_validate_pbo_teximage(ctx, dims,
			width, height, depth, format, type, pixels, packing, "glTexSubImage");
	}

	if (pixels) {
		radeon_store_teximage(ctx, dims,
			xoffset, yoffset, zoffset,
			width, height, depth,
			imageSize, format, type,
			pixels, packing,
			texObj, texImage,
			compressed);
	}

	_mesa_unmap_teximage_pbo(ctx, packing);
}
Ejemplo n.º 7
0
static void radeonSpanRenderStart(struct gl_context * ctx)
{
	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);

	radeon_firevertices(rmesa);

	_swrast_map_textures(ctx);

	radeon_map_framebuffer(ctx, ctx->DrawBuffer);
	if (ctx->ReadBuffer != ctx->DrawBuffer)
		radeon_map_framebuffer(ctx, ctx->ReadBuffer);
}
Ejemplo n.º 8
0
void radeonFallback( struct gl_context *ctx, GLuint bit, GLboolean mode )
{
   r100ContextPtr rmesa = R100_CONTEXT(ctx);
   TNLcontext *tnl = TNL_CONTEXT(ctx);
   GLuint oldfallback = rmesa->radeon.Fallback;

   if (mode) {
      rmesa->radeon.Fallback |= bit;
      if (oldfallback == 0) {
	 radeon_firevertices(&rmesa->radeon);
	 TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_RASTER, GL_TRUE );
	 _swsetup_Wakeup( ctx );
	 rmesa->radeon.swtcl.RenderIndex = ~0;
         if (RADEON_DEBUG & RADEON_FALLBACKS) {
            fprintf(stderr, "Radeon begin rasterization fallback: 0x%x %s\n",
                    bit, getFallbackString(bit));
         }
      }
   }
   else {
      rmesa->radeon.Fallback &= ~bit;
      if (oldfallback == bit) {
	 _swrast_flush( ctx );
	 tnl->Driver.Render.Start = radeonRenderStart;
	 tnl->Driver.Render.PrimitiveNotify = radeonRenderPrimitive;
	 tnl->Driver.Render.Finish = radeonRenderFinish;

	 tnl->Driver.Render.BuildVertices = _tnl_build_vertices;
	 tnl->Driver.Render.CopyPV = _tnl_copy_pv;
	 tnl->Driver.Render.Interp = _tnl_interp;

	 tnl->Driver.Render.ResetLineStipple = radeonResetLineStipple;
	 TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_RASTER, GL_FALSE );
	 if (rmesa->radeon.TclFallback) {
	    /* These are already done if rmesa->radeon.TclFallback goes to
	     * zero above. But not if it doesn't (RADEON_NO_TCL for
	     * example?)
	     */
	    _tnl_invalidate_vertex_state( ctx, ~0 );
	    _tnl_invalidate_vertices( ctx, ~0 );
	    rmesa->radeon.tnl_index_bitset = 0;
	    radeonChooseVertexState( ctx );
	    radeonChooseRenderState( ctx );
	 }
         if (RADEON_DEBUG & RADEON_FALLBACKS) {
            fprintf(stderr, "Radeon end rasterization fallback: 0x%x %s\n",
                    bit, getFallbackString(bit));
         }
      }
   }
}
Ejemplo n.º 9
0
/**
 * Replace data in a subrange of buffer object.  If the data range
 * specified by size + offset extends beyond the end of the buffer or
 * if data is NULL, no copy is performed.
 * Called via glBufferSubDataARB().
 */
static void
radeonBufferSubData(struct gl_context * ctx,
                    GLintptrARB offset,
                    GLsizeiptrARB size,
                    const GLvoid * data,
                    struct gl_buffer_object *obj)
{
    radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);

    if (radeon_bo_is_referenced_by_cs(radeon_obj->bo, radeon->cmdbuf.cs)) {
        radeon_firevertices(radeon);
    }

    radeon_bo_map(radeon_obj->bo, GL_TRUE);

    memcpy(radeon_obj->bo->ptr + offset, data, size);

    radeon_bo_unmap(radeon_obj->bo);
}
Ejemplo n.º 10
0
/**
 * Validate texture mipmap tree.
 * If individual images are stored in different mipmap trees
 * use the mipmap tree that has the most of the correct data.
 */
int radeon_validate_texture_miptree(struct gl_context * ctx,
				    struct gl_sampler_object *samp,
				    struct gl_texture_object *texObj)
{
	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
	radeonTexObj *t = radeon_tex_obj(texObj);
	radeon_mipmap_tree *dst_miptree;

	if (samp == &texObj->Sampler && (t->validated || t->image_override)) {
		return GL_TRUE;
	}

	calculate_min_max_lod(samp, &t->base, &t->minLod, &t->maxLod);

	radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s: Validating texture %p now, minLod = %d, maxLod = %d\n",
			__func__, texObj ,t->minLod, t->maxLod);

	dst_miptree = get_biggest_matching_miptree(t, t->base.BaseLevel, t->base._MaxLevel);

	radeon_miptree_unreference(&t->mt);
	if (!dst_miptree) {
		radeon_try_alloc_miptree(rmesa, t);
		radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s: No matching miptree found, allocated new one %p\n",
			__func__, t->mt);

	} else {
		radeon_miptree_reference(dst_miptree, &t->mt);
		radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s: Using miptree %p\n", __func__, t->mt);
	}

	const unsigned faces = _mesa_num_tex_faces(texObj->Target);
	unsigned face, level;
	radeon_texture_image *img;
	/* Validate only the levels that will actually be used during rendering */
	for (face = 0; face < faces; ++face) {
		for (level = t->minLod; level <= t->maxLod; ++level) {
			img = get_radeon_texture_image(texObj->Image[face][level]);

			radeon_print(RADEON_TEXTURE, RADEON_TRACE,
				"Checking image level %d, face %d, mt %p ... ",
				level, face, img->mt);
			
			if (img->mt != t->mt && !img->used_as_render_target) {
				radeon_print(RADEON_TEXTURE, RADEON_TRACE,
					"MIGRATING\n");

				struct radeon_bo *src_bo = (img->mt) ? img->mt->bo : img->bo;
				if (src_bo && radeon_bo_is_referenced_by_cs(src_bo, rmesa->cmdbuf.cs)) {
					radeon_firevertices(rmesa);
				}
				migrate_image_to_miptree(t->mt, img, face, level);
			} else
				radeon_print(RADEON_TEXTURE, RADEON_TRACE, "OK\n");
		}
	}

	t->validated = GL_TRUE;

	return GL_TRUE;
}
Ejemplo n.º 11
0
/**
 * All glTexImage calls go through this function.
 */
static void radeon_teximage(
	GLcontext *ctx, int dims,
	GLenum target, GLint level,
	GLint internalFormat,
	GLint width, GLint height, GLint depth,
	GLsizei imageSize,
	GLenum format, GLenum type, const GLvoid * pixels,
	const struct gl_pixelstore_attrib *packing,
	struct gl_texture_object *texObj,
	struct gl_texture_image *texImage,
	int compressed)
{
	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
	radeonTexObj* t = radeon_tex_obj(texObj);
	radeon_texture_image* image = get_radeon_texture_image(texImage);
	GLint postConvWidth = width;
	GLint postConvHeight = height;
	GLuint face = _mesa_tex_target_to_face(target);

	radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s %dd: texObj %p, texImage %p, face %d, level %d\n",
			__func__, dims, texObj, texImage, face, level);
	{
		struct radeon_bo *bo;
		bo = !image->mt ? image->bo : image->mt->bo;
		if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
			radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
				"%s Calling teximage for texture that is "
				"queued for GPU processing.\n",
				__func__);
			radeon_firevertices(rmesa);
		}
	}


	t->validated = GL_FALSE;

	if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
	       _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
						  &postConvHeight);
	}

	if (!_mesa_is_format_compressed(texImage->TexFormat)) {
		GLuint texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
		/* Minimum pitch of 32 bytes */
		if (postConvWidth * texelBytes < 32) {
			postConvWidth = 32 / texelBytes;
			texImage->RowStride = postConvWidth;
		}
		if (!image->mt) {
			assert(texImage->RowStride == postConvWidth);
		}
	}

	/* Mesa core only clears texImage->Data but not image->mt */
	radeonFreeTexImageData(ctx, texImage);

	if (!t->bo) {
		teximage_assign_miptree(rmesa, texObj, texImage, face, level);
		if (!image->mt) {
			int size = _mesa_format_image_size(texImage->TexFormat,
								texImage->Width,
								texImage->Height,
								texImage->Depth);
			texImage->Data = _mesa_alloc_texmemory(size);
			radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
					"%s %dd: texObj %p, texImage %p, "
					" no miptree assigned, using local memory %p\n",
					__func__, dims, texObj, texImage, texImage->Data);
		}
	}

	/* Upload texture image; note that the spec allows pixels to be NULL */
	if (compressed) {
		pixels = _mesa_validate_pbo_compressed_teximage(
			ctx, imageSize, pixels, packing, "glCompressedTexImage");
	} else {
		pixels = _mesa_validate_pbo_teximage(
			ctx, dims, width, height, depth,
			format, type, pixels, packing, "glTexImage");
	}

	if (pixels) {
		radeon_store_teximage(ctx, dims,
			0, 0, 0,
			width, height, depth,
			imageSize, format, type,
			pixels, packing,
			texObj, texImage,
			compressed);
	}

	_mesa_unmap_teximage_pbo(ctx, packing);
}
Ejemplo n.º 12
0
static void radeonClear( GLcontext *ctx, GLbitfield mask )
{
   r100ContextPtr rmesa = R100_CONTEXT(ctx);
   __DRIdrawablePrivate *dPriv = radeon_get_drawable(&rmesa->radeon);
   GLuint flags = 0;
   GLuint color_mask = 0;
   GLuint orig_mask = mask;

   if ( RADEON_DEBUG & RADEON_IOCTL ) {
      fprintf( stderr, "radeonClear\n");
   }

   {
      LOCK_HARDWARE( &rmesa->radeon );
      UNLOCK_HARDWARE( &rmesa->radeon );
      if ( dPriv->numClipRects == 0 )
	 return;
   }

   radeon_firevertices(&rmesa->radeon);

   if ( mask & BUFFER_BIT_FRONT_LEFT ) {
      flags |= RADEON_FRONT;
      color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
      mask &= ~BUFFER_BIT_FRONT_LEFT;
   }

   if ( mask & BUFFER_BIT_BACK_LEFT ) {
      flags |= RADEON_BACK;
      color_mask = rmesa->hw.msk.cmd[MSK_RB3D_PLANEMASK];
      mask &= ~BUFFER_BIT_BACK_LEFT;
   }

   if ( mask & BUFFER_BIT_DEPTH ) {
      flags |= RADEON_DEPTH;
      mask &= ~BUFFER_BIT_DEPTH;
   }

   if ( (mask & BUFFER_BIT_STENCIL) ) {
      flags |= RADEON_STENCIL;
      mask &= ~BUFFER_BIT_STENCIL;
   }

   if ( mask ) {
      if (RADEON_DEBUG & RADEON_FALLBACKS)
	 fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
      _swrast_Clear( ctx, mask );
   }

   if ( !flags )
      return;

   if (rmesa->using_hyperz) {
      flags |= RADEON_USE_COMP_ZBUF;
/*      if (rmesa->radeon.radeonScreen->chipset & RADEON_CHIPSET_TCL)
         flags |= RADEON_USE_HIERZ; */
      if (((flags & RADEON_DEPTH) && (flags & RADEON_STENCIL) &&
	    ((rmesa->radeon.state.stencil.clear & RADEON_STENCIL_WRITE_MASK) == RADEON_STENCIL_WRITE_MASK))) {
	  flags |= RADEON_CLEAR_FASTZ;
      }
   }

   if (rmesa->radeon.radeonScreen->kernel_mm)
     radeonUserClear(ctx, orig_mask);
   else {
      radeonKernelClear(ctx, flags);
      rmesa->radeon.hw.all_dirty = GL_TRUE;
   }
}