Exemplo n.º 1
0
static void teximage_assign_miptree(radeonContextPtr rmesa,
	struct gl_texture_object *texObj,
	struct gl_texture_image *texImage,
	unsigned face,
	unsigned level)
{
	radeonTexObj *t = radeon_tex_obj(texObj);
	radeon_texture_image* image = get_radeon_texture_image(texImage);

	/* Since miptree holds only images for levels <BaseLevel..MaxLevel>
	 * don't allocate the miptree if the teximage won't fit.
	 */
	if (!image_matches_texture_obj(texObj, texImage, level))
		return;

	/* Try using current miptree, or create new if there isn't any */
	if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
		radeon_miptree_unreference(&t->mt);
		radeon_try_alloc_miptree(rmesa, t);
		radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
				"%s: texObj %p, texImage %p, face %d, level %d, "
				"texObj miptree doesn't match, allocated new miptree %p\n",
				__FUNCTION__, texObj, texImage, face, level, t->mt);
	}

	/* Miptree alocation may have failed,
	 * when there was no image for baselevel specified */
	if (t->mt) {
		image->mtface = face;
		image->mtlevel = level;
		radeon_miptree_reference(t->mt, &image->mt);
	} else
		radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
				"%s Failed to allocate miptree.\n", __func__);
}
Exemplo n.º 2
0
/** Check if given image is valid within current texture object.
 */
static void teximage_assign_miptree(radeonContextPtr rmesa,
                                    struct gl_texture_object *texObj,
                                    struct gl_texture_image *texImage)
{
    radeonTexObj *t = radeon_tex_obj(texObj);
    radeon_texture_image* image = get_radeon_texture_image(texImage);

    /* Try using current miptree, or create new if there isn't any */
    if (!t->mt || !radeon_miptree_matches_image(t->mt, texImage)) {
        radeon_miptree_unreference(&t->mt);
        t->mt = radeon_miptree_create_for_teximage(rmesa,
                texObj,
                texImage);

        radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
                     "%s: texObj %p, texImage %p, "
                     "texObj miptree doesn't match, allocated new miptree %p\n",
                     __FUNCTION__, texObj, texImage, t->mt);
    }

    /* Miptree alocation may have failed,
     * when there was no image for baselevel specified */
    if (t->mt) {
        radeon_miptree_reference(t->mt, &image->mt);
    } else
        radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
                     "%s Failed to allocate miptree.\n", __func__);
}
Exemplo n.º 3
0
/**
 * Wraps Mesa's implementation to ensure that the base level image is mapped.
 *
 * This relies on internal details of _mesa_generate_mipmap, in particular
 * the fact that the memory for recreated texture images is always freed.
 */
static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
				   struct gl_texture_object *texObj)
{
	radeonTexObj* t = radeon_tex_obj(texObj);
	GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
	int i, face;

	radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
			"%s(%p, tex %p) Target type %s.\n",
			__func__, ctx, texObj,
			_mesa_lookup_enum_by_nr(target));

	_mesa_generate_mipmap(ctx, target, texObj);

	for (face = 0; face < nr_faces; face++) {
		for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
			radeon_texture_image *image;

			image = get_radeon_texture_image(texObj->Image[face][i]);

			if (image == NULL)
				break;

			image->mtlevel = i;
			image->mtface = face;

			radeon_miptree_unreference(&image->mt);
		}
	}
	
}
Exemplo n.º 4
0
void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
{
	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
	struct radeon_bo *bo;
	GLuint face = _mesa_tex_target_to_face(target);
	radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
	bo = !baseimage->mt ? baseimage->bo : baseimage->mt->bo;

	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
		"%s(%p, target %s, tex %p)\n",
		__func__, ctx, _mesa_lookup_enum_by_nr(target),
		texObj);

	if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
		radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s(%p, tex %p) Trying to generate mipmap for texture "
			"in processing by GPU.\n",
			__func__, ctx, texObj);
		radeon_firevertices(rmesa);
	}

	if (_mesa_meta_check_generate_mipmap_fallback(ctx, target, texObj)) {
		radeon_teximage_map(baseimage, GL_FALSE);
		radeon_generate_mipmap(ctx, target, texObj);
		radeon_teximage_unmap(baseimage);
	} else {
		_mesa_meta_GenerateMipmap(ctx, target, texObj);
	}
}
Exemplo n.º 5
0
static void unmap_override(GLcontext *ctx, radeonTexObj *t)
{
	radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);

	radeon_bo_unmap(t->bo);

	img->base.Data = NULL;
}
Exemplo n.º 6
0
static void map_override(GLcontext *ctx, radeonTexObj *t)
{
	radeon_texture_image *img = get_radeon_texture_image(t->base.Image[0][0]);

	radeon_bo_map(t->bo, GL_FALSE);

	img->base.Data = t->bo->ptr;
}
Exemplo n.º 7
0
void radeon_image_target_texture_2d(struct gl_context *ctx, GLenum target,
                                    struct gl_texture_object *texObj,
                                    struct gl_texture_image *texImage,
                                    GLeglImageOES image_handle)
{
    radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    radeonTexObj *t = radeon_tex_obj(texObj);
    radeon_texture_image *radeonImage = get_radeon_texture_image(texImage);
    __DRIscreen *screen;
    __DRIimage *image;

    screen = radeon->dri.screen;
    image = screen->dri2.image->lookupEGLImage(screen, image_handle,
            screen->loaderPrivate);
    if (image == NULL)
        return;

    radeonFreeTextureImageBuffer(ctx, texImage);

    texImage->Width = image->width;
    texImage->Height = image->height;
    texImage->Depth = 1;
    texImage->_BaseFormat = GL_RGBA;
    texImage->TexFormat = image->format;
    radeonImage->base.RowStride = image->pitch;
    texImage->InternalFormat = image->internal_format;

    if(t->mt)
    {
        radeon_miptree_unreference(&t->mt);
        t->mt = NULL;
    }

    /* NOTE: The following is *very* ugly and will probably break. But
       I don't know how to deal with it, without creating a whole new
       function like radeon_miptree_from_bo() so I'm going with the
       easy but error-prone way. */

    radeon_try_alloc_miptree(radeon, t);

    radeon_miptree_reference(t->mt, &radeonImage->mt);

    if (t->mt == NULL)
    {
        radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
                     "%s Failed to allocate miptree.\n", __func__);
        return;
    }

    /* Particularly ugly: this is guaranteed to break, if image->bo is
       not of the required size for a miptree. */
    radeon_bo_unref(t->mt->bo);
    radeon_bo_ref(image->bo);
    t->mt->bo = image->bo;

    if (!radeon_miptree_matches_image(t->mt, &radeonImage->base.Base))
        fprintf(stderr, "miptree doesn't match image\n");
}
Exemplo n.º 8
0
/**
 * Map texture memory/buffer into user space.
 * Note: the region of interest parameters are ignored here.
 * \param mapOut  returns start of mapping of region of interest
 * \param rowStrideOut  returns row stride in bytes
 */
static void
radeon_map_texture_image(struct gl_context *ctx,
                         struct gl_texture_image *texImage,
                         GLuint slice,
                         GLuint x, GLuint y, GLuint w, GLuint h,
                         GLbitfield mode,
                         GLubyte **map,
                         GLint *stride)
{
    radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
    radeon_texture_image *image = get_radeon_texture_image(texImage);
    radeon_mipmap_tree *mt = image->mt;
    GLuint texel_size = _mesa_get_format_bytes(texImage->TexFormat);
    GLuint width = texImage->Width;
    GLuint height = texImage->Height;
    struct radeon_bo *bo = !image->mt ? image->bo : image->mt->bo;
    unsigned int bw, bh;
    GLboolean write = (mode & GL_MAP_WRITE_BIT) != 0;

    _mesa_get_format_block_size(texImage->TexFormat, &bw, &bh);
    assert(y % bh == 0);
    y /= bh;
    texel_size /= bw;

    if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
        radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
                     "%s for texture that is "
                     "queued for GPU processing.\n",
                     __func__);
        radeon_firevertices(rmesa);
    }

    if (image->bo) {
        /* TFP case */
        radeon_bo_map(image->bo, write);
        *stride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0, texImage->TexObject->Target);
        *map = bo->ptr;
    } else if (likely(mt)) {
        void *base;
        radeon_mipmap_level *lvl = &image->mt->levels[texImage->Level];

        radeon_bo_map(mt->bo, write);
        base = mt->bo->ptr + lvl->faces[image->base.Base.Face].offset;

        *stride = lvl->rowstride;
        *map = base + (slice * height) * *stride;
    } else {
        /* texture data is in malloc'd memory */

        assert(map);

        *stride = _mesa_format_row_stride(texImage->TexFormat, width);
        *map = image->base.Buffer + (slice * height) * *stride;
    }

    *map += y * *stride + x * texel_size;
}
Exemplo n.º 9
0
/**
 * Filter matching miptrees, and select one with the most of data.
 * @param[in] texObj radeon texture object
 * @param[in] firstLevel first texture level to check
 * @param[in] lastLevel last texture level to check
 */
static radeon_mipmap_tree * get_biggest_matching_miptree(radeonTexObj *texObj,
														 unsigned firstLevel,
														 unsigned lastLevel)
{
	const unsigned numLevels = lastLevel - firstLevel + 1;
	unsigned *mtSizes = calloc(numLevels, sizeof(unsigned));
	radeon_mipmap_tree **mts = calloc(numLevels, sizeof(radeon_mipmap_tree *));
	unsigned mtCount = 0;
	unsigned maxMtIndex = 0;
	radeon_mipmap_tree *tmp;
	unsigned int level;
	int i;

	for (level = firstLevel; level <= lastLevel; ++level) {
		radeon_texture_image *img = get_radeon_texture_image(texObj->base.Image[0][level]);
		unsigned found = 0;
		// TODO: why this hack??
		if (!img)
			break;

		if (!img->mt)
			continue;

		for (i = 0; i < mtCount; ++i) {
			if (mts[i] == img->mt) {
				found = 1;
				mtSizes[i] += img->mt->levels[img->base.Base.Level].size;
				break;
			}
		}

		if (!found && radeon_miptree_matches_texture(img->mt, &texObj->base)) {
			mtSizes[mtCount] = img->mt->levels[img->base.Base.Level].size;
			mts[mtCount] = img->mt;
			mtCount++;
		}
	}

	if (mtCount == 0) {
		free(mtSizes);
		free(mts);
		return NULL;
	}

	for (i = 1; i < mtCount; ++i) {
		if (mtSizes[i] > mtSizes[maxMtIndex]) {
			maxMtIndex = i;
		}
	}

	tmp = mts[maxMtIndex];
	free(mtSizes);
	free(mts);

	return tmp;
}
Exemplo n.º 10
0
static void
radeon_unmap_texture_image(struct gl_context *ctx,
                           struct gl_texture_image *texImage, GLuint slice)
{
    radeon_texture_image *image = get_radeon_texture_image(texImage);

    if (image->bo)
        radeon_bo_unmap(image->bo);
    else if (image->mt)
        radeon_bo_unmap(image->mt->bo);
}
Exemplo n.º 11
0
/**
 * All glTexSubImage calls go through this function.
 */
static void radeon_texsubimage(GLcontext* ctx, int dims, GLenum target, int level,
		GLint xoffset, GLint yoffset, GLint zoffset,
		GLsizei width, GLsizei height, GLsizei depth,
		GLsizei imageSize,
		GLenum format, GLenum type,
		const GLvoid * pixels,
		const struct gl_pixelstore_attrib *packing,
		struct gl_texture_object *texObj,
		struct gl_texture_image *texImage,
		int compressed)
{
	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
	radeonTexObj* t = radeon_tex_obj(texObj);
	radeon_texture_image* image = get_radeon_texture_image(texImage);

	radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s %dd: texObj %p, texImage %p, face %d, level %d\n",
			__func__, dims, texObj, texImage,
			_mesa_tex_target_to_face(target), level);
	{
		struct radeon_bo *bo;
		bo = !image->mt ? image->bo : image->mt->bo;
		if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
			radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
				"%s Calling texsubimage for texture that is "
				"queued for GPU processing.\n",
				__func__);
			radeon_firevertices(rmesa);
		}
	}


	t->validated = GL_FALSE;
	if (compressed) {
		pixels = _mesa_validate_pbo_compressed_teximage(
			ctx, imageSize, pixels, packing, "glCompressedTexSubImage");
	} else {
		pixels = _mesa_validate_pbo_teximage(ctx, dims,
			width, height, depth, format, type, pixels, packing, "glTexSubImage");
	}

	if (pixels) {
		radeon_store_teximage(ctx, dims,
			xoffset, yoffset, zoffset,
			width, height, depth,
			imageSize, format, type,
			pixels, packing,
			texObj, texImage,
			compressed);
	}

	_mesa_unmap_teximage_pbo(ctx, packing);
}
Exemplo n.º 12
0
void
radeon_swrast_map_texture_images(struct gl_context *ctx,
                                 struct gl_texture_object *texObj)
{
    radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
    GLuint nr_faces = _mesa_num_tex_faces(texObj->Target);
    int i, face;

    for (i = texObj->BaseLevel; i <= texObj->_MaxLevel; i++) {
        for (face = 0; face < nr_faces; face++) {
            radeon_texture_image *image = get_radeon_texture_image(texObj->Image[face][i]);
            radeon_swrast_map_image(rmesa, image);
        }
    }
}
Exemplo n.º 13
0
/**
 * Free memory associated with this texture image.
 */
void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
{
	radeon_texture_image* image = get_radeon_texture_image(timage);

	if (image->mt) {
		radeon_miptree_unreference(&image->mt);
		assert(!image->base.Data);
	} else {
		_mesa_free_texture_image_data(ctx, timage);
	}
	if (image->bo) {
		radeon_bo_unref(image->bo);
		image->bo = NULL;
	}
	if (timage->Data) {
		_mesa_free_texmemory(timage->Data);
		timage->Data = NULL;
	}
}
Exemplo n.º 14
0
/**
 * Free memory associated with this texture image.
 */
void radeonFreeTextureImageBuffer(struct gl_context *ctx, struct gl_texture_image *timage)
{
	radeon_texture_image* image = get_radeon_texture_image(timage);

	if (image->mt) {
		radeon_miptree_unreference(&image->mt);
	} else {
		_swrast_free_texture_image_buffer(ctx, timage);
	}
	if (image->bo) {
		radeon_bo_unref(image->bo);
		image->bo = NULL;
	}
	if (image->base.Buffer) {
		_mesa_align_free(image->base.Buffer);
		image->base.Buffer = NULL;
	}

	free(image->base.ImageOffsets);
	image->base.ImageOffsets = NULL;
}
Exemplo n.º 15
0
/**
 * Map a validated texture for reading during software rendering.
 */
void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
{
	radeonTexObj* t = radeon_tex_obj(texObj);
	int face, level;

	radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
			"%s(%p, tex %p)\n",
			__func__, ctx, texObj);

	if (!radeon_validate_texture_miptree(ctx, texObj)) {
		radeon_error("%s(%p, tex %p) Failed to validate miptree for "
			"sw fallback.\n",
			__func__, ctx, texObj);
		return;
	}

	if (t->image_override && t->bo) {
		radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
			"%s(%p, tex %p) Work around for missing miptree in r100.\n",
			__func__, ctx, texObj);

		map_override(ctx, t);
	}

	/* for r100 3D sw fallbacks don't have mt */
	if (!t->mt) {
		radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
			__func__, ctx, texObj);
		return;
	}

	radeon_bo_map(t->mt->bo, GL_FALSE);
	for(face = 0; face < t->mt->faces; ++face) {
		for(level = t->minLod; level <= t->maxLod; ++level)
			teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
	}
}
Exemplo n.º 16
0
static GLboolean
radeonAllocTextureImageBuffer(struct gl_context *ctx,
                              struct gl_texture_image *timage)
{
    radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
    radeon_texture_image *image = get_radeon_texture_image(timage);
    struct gl_texture_object *texobj = timage->TexObject;
    int slices;

    ctx->Driver.FreeTextureImageBuffer(ctx, timage);

    switch (texobj->Target) {
    case GL_TEXTURE_3D:
        slices = timage->Depth;
        break;
    default:
        slices = 1;
    }
    assert(!image->base.ImageOffsets);
    image->base.ImageOffsets = malloc(slices * sizeof(GLuint));
    teximage_assign_miptree(rmesa, texobj, timage);

    return GL_TRUE;
}
Exemplo n.º 17
0
/**
 * Validate texture mipmap tree.
 * If individual images are stored in different mipmap trees
 * use the mipmap tree that has the most of the correct data.
 */
int radeon_validate_texture_miptree(struct gl_context * ctx,
				    struct gl_sampler_object *samp,
				    struct gl_texture_object *texObj)
{
	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
	radeonTexObj *t = radeon_tex_obj(texObj);
	radeon_mipmap_tree *dst_miptree;

	if (samp == &texObj->Sampler && (t->validated || t->image_override)) {
		return GL_TRUE;
	}

	calculate_min_max_lod(samp, &t->base, &t->minLod, &t->maxLod);

	radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s: Validating texture %p now, minLod = %d, maxLod = %d\n",
			__func__, texObj ,t->minLod, t->maxLod);

	dst_miptree = get_biggest_matching_miptree(t, t->base.BaseLevel, t->base._MaxLevel);

	radeon_miptree_unreference(&t->mt);
	if (!dst_miptree) {
		radeon_try_alloc_miptree(rmesa, t);
		radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s: No matching miptree found, allocated new one %p\n",
			__func__, t->mt);

	} else {
		radeon_miptree_reference(dst_miptree, &t->mt);
		radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s: Using miptree %p\n", __func__, t->mt);
	}

	const unsigned faces = _mesa_num_tex_faces(texObj->Target);
	unsigned face, level;
	radeon_texture_image *img;
	/* Validate only the levels that will actually be used during rendering */
	for (face = 0; face < faces; ++face) {
		for (level = t->minLod; level <= t->maxLod; ++level) {
			img = get_radeon_texture_image(texObj->Image[face][level]);

			radeon_print(RADEON_TEXTURE, RADEON_TRACE,
				"Checking image level %d, face %d, mt %p ... ",
				level, face, img->mt);
			
			if (img->mt != t->mt && !img->used_as_render_target) {
				radeon_print(RADEON_TEXTURE, RADEON_TRACE,
					"MIGRATING\n");

				struct radeon_bo *src_bo = (img->mt) ? img->mt->bo : img->bo;
				if (src_bo && radeon_bo_is_referenced_by_cs(src_bo, rmesa->cmdbuf.cs)) {
					radeon_firevertices(rmesa);
				}
				migrate_image_to_miptree(t->mt, img, face, level);
			} else
				radeon_print(RADEON_TEXTURE, RADEON_TRACE, "OK\n");
		}
	}

	t->validated = GL_TRUE;

	return GL_TRUE;
}
Exemplo n.º 18
0
/**
 * Update a subregion of the given texture image.
 */
static void radeon_store_teximage(GLcontext* ctx, int dims,
		GLint xoffset, GLint yoffset, GLint zoffset,
		GLsizei width, GLsizei height, GLsizei depth,
		GLsizei imageSize,
		GLenum format, GLenum type,
		const GLvoid * pixels,
		const struct gl_pixelstore_attrib *packing,
		struct gl_texture_object *texObj,
		struct gl_texture_image *texImage,
		int compressed)
{
	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
	radeonTexObj *t = radeon_tex_obj(texObj);
	radeon_texture_image* image = get_radeon_texture_image(texImage);

	GLuint dstRowStride;
	GLuint *dstImageOffsets;

	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
			"%s(%p, tex %p, image %p) compressed %d\n",
			__func__, ctx, texObj, texImage, compressed);

	if (image->mt) {
		dstRowStride = image->mt->levels[image->mtlevel].rowstride;
	} else if (t->bo) {
		/* TFP case */
		dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0);
	} else {
		dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
	}

	assert(dstRowStride);

	if (dims == 3) {
		unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat);
		dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth);
		if (!dstImageOffsets) {
			radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__);
			return;
		}
	} else {
		dstImageOffsets = texImage->ImageOffsets;
	}

	radeon_teximage_map(image, GL_TRUE);

	if (compressed) {
		uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height;
		GLubyte *img_start;

		_mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height);

		if (!image->mt) {
			dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width);
			img_start = _mesa_compressed_image_address(xoffset, yoffset, 0,
									texImage->TexFormat,
									texImage->Width, texImage->Data);
		}
		else {
			uint32_t offset;
			offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width;
			offset *= _mesa_get_format_bytes(texImage->TexFormat);
			img_start = texImage->Data + offset;
		}
		srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width);
		bytesPerRow = srcRowStride;
		rows = (height + block_height - 1) / block_height;

		copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow);
	}
	else {
		if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
					texImage->TexFormat, texImage->Data,
					xoffset, yoffset, zoffset,
					dstRowStride,
					dstImageOffsets,
					width, height, depth,
					format, type, pixels, packing)) {
			_mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
		}
	}

	if (dims == 3) {
		free(dstImageOffsets);
	}

	radeon_teximage_unmap(image);
}
Exemplo n.º 19
0
/**
 * All glTexImage calls go through this function.
 */
static void radeon_teximage(
	GLcontext *ctx, int dims,
	GLenum target, GLint level,
	GLint internalFormat,
	GLint width, GLint height, GLint depth,
	GLsizei imageSize,
	GLenum format, GLenum type, const GLvoid * pixels,
	const struct gl_pixelstore_attrib *packing,
	struct gl_texture_object *texObj,
	struct gl_texture_image *texImage,
	int compressed)
{
	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
	radeonTexObj* t = radeon_tex_obj(texObj);
	radeon_texture_image* image = get_radeon_texture_image(texImage);
	GLint postConvWidth = width;
	GLint postConvHeight = height;
	GLuint face = _mesa_tex_target_to_face(target);

	radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
			"%s %dd: texObj %p, texImage %p, face %d, level %d\n",
			__func__, dims, texObj, texImage, face, level);
	{
		struct radeon_bo *bo;
		bo = !image->mt ? image->bo : image->mt->bo;
		if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) {
			radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
				"%s Calling teximage for texture that is "
				"queued for GPU processing.\n",
				__func__);
			radeon_firevertices(rmesa);
		}
	}


	t->validated = GL_FALSE;

	if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
	       _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
						  &postConvHeight);
	}

	if (!_mesa_is_format_compressed(texImage->TexFormat)) {
		GLuint texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
		/* Minimum pitch of 32 bytes */
		if (postConvWidth * texelBytes < 32) {
			postConvWidth = 32 / texelBytes;
			texImage->RowStride = postConvWidth;
		}
		if (!image->mt) {
			assert(texImage->RowStride == postConvWidth);
		}
	}

	/* Mesa core only clears texImage->Data but not image->mt */
	radeonFreeTexImageData(ctx, texImage);

	if (!t->bo) {
		teximage_assign_miptree(rmesa, texObj, texImage, face, level);
		if (!image->mt) {
			int size = _mesa_format_image_size(texImage->TexFormat,
								texImage->Width,
								texImage->Height,
								texImage->Depth);
			texImage->Data = _mesa_alloc_texmemory(size);
			radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
					"%s %dd: texObj %p, texImage %p, "
					" no miptree assigned, using local memory %p\n",
					__func__, dims, texObj, texImage, texImage->Data);
		}
	}

	/* Upload texture image; note that the spec allows pixels to be NULL */
	if (compressed) {
		pixels = _mesa_validate_pbo_compressed_teximage(
			ctx, imageSize, pixels, packing, "glCompressedTexImage");
	} else {
		pixels = _mesa_validate_pbo_teximage(
			ctx, dims, width, height, depth,
			format, type, pixels, packing, "glTexImage");
	}

	if (pixels) {
		radeon_store_teximage(ctx, dims,
			0, 0, 0,
			width, height, depth,
			imageSize, format, type,
			pixels, packing,
			texObj, texImage,
			compressed);
	}

	_mesa_unmap_teximage_pbo(ctx, packing);
}
Exemplo n.º 20
0
void r300SetTexBuffer2(__DRIcontext *pDRICtx, GLint target, GLint glx_texture_format, __DRIdrawable *dPriv)
{
	struct gl_texture_unit *texUnit;
	struct gl_texture_object *texObj;
	struct gl_texture_image *texImage;
	struct radeon_renderbuffer *rb;
	radeon_texture_image *rImage;
	radeonContextPtr radeon;
	r300ContextPtr rmesa;
	struct radeon_framebuffer *rfb;
	radeonTexObjPtr t;
	uint32_t pitch_val;
	uint32_t internalFormat, type, format;

	type = GL_BGRA;
	format = GL_UNSIGNED_BYTE;
	internalFormat = (glx_texture_format == GLX_TEXTURE_FORMAT_RGB_EXT ? 3 : 4);

	radeon = pDRICtx->driverPrivate;
	rmesa = pDRICtx->driverPrivate;

	rfb = dPriv->driverPrivate;
        texUnit = &radeon->glCtx->Texture.Unit[radeon->glCtx->Texture.CurrentUnit];
	texObj = _mesa_select_tex_object(radeon->glCtx, texUnit, target);
        texImage = _mesa_get_tex_image(radeon->glCtx, texObj, target, 0);

	rImage = get_radeon_texture_image(texImage);
	t = radeon_tex_obj(texObj);
        if (t == NULL) {
    	    return;
    	}

	radeon_update_renderbuffers(pDRICtx, dPriv);
	/* back & depth buffer are useless free them right away */
	rb = (void*)rfb->base.Attachment[BUFFER_DEPTH].Renderbuffer;
	if (rb && rb->bo) {
		radeon_bo_unref(rb->bo);
        rb->bo = NULL;
	}
	rb = (void*)rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer;
	if (rb && rb->bo) {
		radeon_bo_unref(rb->bo);
		rb->bo = NULL;
	}
	rb = rfb->color_rb[0];
	if (rb->bo == NULL) {
		/* Failed to BO for the buffer */
		return;
	}
	
	_mesa_lock_texture(radeon->glCtx, texObj);
	if (t->bo) {
		radeon_bo_unref(t->bo);
		t->bo = NULL;
	}
	if (rImage->bo) {
		radeon_bo_unref(rImage->bo);
		rImage->bo = NULL;
	}

	radeon_miptree_unreference(&t->mt);
	radeon_miptree_unreference(&rImage->mt);

	_mesa_init_teximage_fields(radeon->glCtx, target, texImage,
				   rb->base.Width, rb->base.Height, 1, 0, rb->cpp);
	texImage->RowStride = rb->pitch / rb->cpp;
	rImage->bo = rb->bo;
	radeon_bo_ref(rImage->bo);
	t->bo = rb->bo;
	radeon_bo_ref(t->bo);
	t->tile_bits = 0;
	t->image_override = GL_TRUE;
	t->override_offset = 0;
	t->pp_txpitch &= (1 << 13) -1;
	pitch_val = rb->pitch;
	switch (rb->cpp) {
	case 4:
		if (glx_texture_format == GLX_TEXTURE_FORMAT_RGB_EXT)
			t->pp_txformat = R300_EASY_TX_FORMAT(X, Y, Z, ONE, W8Z8Y8X8);
		else
			t->pp_txformat = R300_EASY_TX_FORMAT(X, Y, Z, W, W8Z8Y8X8);
		t->pp_txfilter |= tx_table[2].filter;
		pitch_val /= 4;
		break;
	case 3:
	default:
		t->pp_txformat = R300_EASY_TX_FORMAT(X, Y, Z, ONE, W8Z8Y8X8);
		t->pp_txfilter |= tx_table[4].filter;
		pitch_val /= 4;
		break;
	case 2:
		t->pp_txformat = R300_EASY_TX_FORMAT(X, Y, Z, ONE, Z5Y6X5);
		t->pp_txfilter |= tx_table[5].filter;
		pitch_val /= 2;
		break;
	}
	pitch_val--;
	t->pp_txsize = (((R300_TX_WIDTHMASK_MASK & ((rb->base.Width - 1) << R300_TX_WIDTHMASK_SHIFT)))
			| ((R300_TX_HEIGHTMASK_MASK & ((rb->base.Height - 1) << R300_TX_HEIGHTMASK_SHIFT))));
	t->pp_txsize |= R300_TX_SIZE_TXPITCH_EN;
	t->pp_txpitch |= pitch_val;

	if (rmesa->radeon.radeonScreen->chip_family >= CHIP_FAMILY_RV515) {
	    if (rb->base.Width > 2048)
		t->pp_txpitch |= R500_TXWIDTH_BIT11;
            else
		t->pp_txpitch &= ~R500_TXWIDTH_BIT11;
	    if (rb->base.Height > 2048)
		t->pp_txpitch |= R500_TXHEIGHT_BIT11;
            else
		t->pp_txpitch &= ~R500_TXHEIGHT_BIT11;
	}
	t->validated = GL_TRUE;
	_mesa_unlock_texture(radeon->glCtx, texObj);
	return;
}