Ejemplo n.º 1
0
static GLboolean
radeonAllocTextureImageBuffer(struct gl_context *ctx,
                              struct gl_texture_image *timage)
{
    radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
    radeon_texture_image *image = get_radeon_texture_image(timage);
    struct gl_texture_object *texobj = timage->TexObject;
    int slices;

    ctx->Driver.FreeTextureImageBuffer(ctx, timage);

    switch (texobj->Target) {
    case GL_TEXTURE_3D:
        slices = timage->Depth;
        break;
    default:
        slices = 1;
    }
    assert(!image->base.ImageOffsets);
    image->base.ImageOffsets = malloc(slices * sizeof(GLuint));
    teximage_assign_miptree(rmesa, texobj, timage);

    return GL_TRUE;
}
Ejemplo n.º 2
0
/**
 * All glTexImage calls go through this function.
 */
static void radeon_teximage(
	GLcontext *ctx, int dims,
	GLenum target, GLint level,
	GLint internalFormat,
	GLint width, GLint height, GLint depth,
	GLsizei imageSize,
	GLenum format, GLenum type, const GLvoid * pixels,
	const struct gl_pixelstore_attrib *packing,
	struct gl_texture_object *texObj,
	struct gl_texture_image *texImage,
	int compressed)
{
	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
	radeonTexObj* t = radeon_tex_obj(texObj);
	radeon_texture_image* image = get_radeon_texture_image(texImage);
	GLint postConvWidth = width;
	GLint postConvHeight = height;
	GLuint face = _mesa_tex_target_to_face(target);

	radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s %dd: texObj %p, texImage %p, face %d, level %d\n",
			__func__, dims, texObj, texImage, face, level);
	{
		struct radeon_bo *bo;
		bo = !image->mt ? image->bo : image->mt->bo;
		if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
			radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
				"%s Calling teximage for texture that is "
				"queued for GPU processing.\n",
				__func__);
			radeon_firevertices(rmesa);
		}
	}


	t->validated = GL_FALSE;

	if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
	       _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
						  &postConvHeight);
	}

	if (!_mesa_is_format_compressed(texImage->TexFormat)) {
		GLuint texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
		/* Minimum pitch of 32 bytes */
		if (postConvWidth * texelBytes < 32) {
			postConvWidth = 32 / texelBytes;
			texImage->RowStride = postConvWidth;
		}
		if (!image->mt) {
			assert(texImage->RowStride == postConvWidth);
		}
	}

	/* Mesa core only clears texImage->Data but not image->mt */
	radeonFreeTexImageData(ctx, texImage);

	if (!t->bo) {
		teximage_assign_miptree(rmesa, texObj, texImage, face, level);
		if (!image->mt) {
			int size = _mesa_format_image_size(texImage->TexFormat,
								texImage->Width,
								texImage->Height,
								texImage->Depth);
			texImage->Data = _mesa_alloc_texmemory(size);
			radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
					"%s %dd: texObj %p, texImage %p, "
					" no miptree assigned, using local memory %p\n",
					__func__, dims, texObj, texImage, texImage->Data);
		}
	}

	/* Upload texture image; note that the spec allows pixels to be NULL */
	if (compressed) {
		pixels = _mesa_validate_pbo_compressed_teximage(
			ctx, imageSize, pixels, packing, "glCompressedTexImage");
	} else {
		pixels = _mesa_validate_pbo_teximage(
			ctx, dims, width, height, depth,
			format, type, pixels, packing, "glTexImage");
	}

	if (pixels) {
		radeon_store_teximage(ctx, dims,
			0, 0, 0,
			width, height, depth,
			imageSize, format, type,
			pixels, packing,
			texObj, texImage,
			compressed);
	}

	_mesa_unmap_teximage_pbo(ctx, packing);
}