/** * Map texture memory/buffer into user space. * Note: the region of interest parameters are ignored here. * \param mapOut returns start of mapping of region of interest * \param rowStrideOut returns row stride in bytes */ static void radeon_map_texture_image(struct gl_context *ctx, struct gl_texture_image *texImage, GLuint slice, GLuint x, GLuint y, GLuint w, GLuint h, GLbitfield mode, GLubyte **map, GLint *stride) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); radeon_texture_image *image = get_radeon_texture_image(texImage); radeon_mipmap_tree *mt = image->mt; GLuint texel_size = _mesa_get_format_bytes(texImage->TexFormat); GLuint width = texImage->Width; GLuint height = texImage->Height; struct radeon_bo *bo = !image->mt ? image->bo : image->mt->bo; unsigned int bw, bh; GLboolean write = (mode & GL_MAP_WRITE_BIT) != 0; _mesa_get_format_block_size(texImage->TexFormat, &bw, &bh); assert(y % bh == 0); y /= bh; texel_size /= bw; if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) { radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, "%s for texture that is " "queued for GPU processing.\n", __func__); radeon_firevertices(rmesa); } if (image->bo) { /* TFP case */ radeon_bo_map(image->bo, write); *stride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0, texImage->TexObject->Target); *map = bo->ptr; } else if (likely(mt)) { void *base; radeon_mipmap_level *lvl = &image->mt->levels[texImage->Level]; radeon_bo_map(mt->bo, write); base = mt->bo->ptr + lvl->faces[image->base.Base.Face].offset; *stride = lvl->rowstride; *map = base + (slice * height) * *stride; } else { /* texture data is in malloc'd memory */ assert(map); *stride = _mesa_format_row_stride(texImage->TexFormat, width); *map = image->base.Buffer + (slice * height) * *stride; } *map += y * *stride + x * texel_size; }
/** * Compute sizes and fill in offset and blit information for the given * image (determined by \p face and \p level). * * \param curOffset points to the offset at which the image is to be stored * and is updated by this function according to the size of the image. */ static void compute_tex_image_offset(radeonContextPtr rmesa, radeon_mipmap_tree *mt, GLuint face, GLuint level, GLuint* curOffset) { radeon_mipmap_level *lvl = &mt->levels[level]; GLuint height; height = _mesa_next_pow_two_32(lvl->height); lvl->rowstride = get_texture_image_row_stride(rmesa, mt->mesaFormat, lvl->width, mt->tilebits, mt->target); lvl->size = get_texture_image_size(mt->mesaFormat, lvl->rowstride, height, lvl->depth, mt->tilebits); assert(lvl->size > 0); lvl->faces[face].offset = *curOffset; *curOffset += lvl->size; radeon_print(RADEON_TEXTURE, RADEON_TRACE, "%s(%p) level %d, face %d: rs:%d %dx%d at %d\n", __func__, rmesa, level, face, lvl->rowstride, lvl->width, height, lvl->faces[face].offset); }
/** * Update a subregion of the given texture image. */ static void radeon_store_teximage(GLcontext* ctx, int dims, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLsizei imageSize, GLenum format, GLenum type, const GLvoid * pixels, const struct gl_pixelstore_attrib *packing, struct gl_texture_object *texObj, struct gl_texture_image *texImage, int compressed) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); radeonTexObj *t = radeon_tex_obj(texObj); radeon_texture_image* image = get_radeon_texture_image(texImage); GLuint dstRowStride; GLuint *dstImageOffsets; radeon_print(RADEON_TEXTURE, RADEON_TRACE, "%s(%p, tex %p, image %p) compressed %d\n", __func__, ctx, texObj, texImage, compressed); if (image->mt) { dstRowStride = image->mt->levels[image->mtlevel].rowstride; } else if (t->bo) { /* TFP case */ dstRowStride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0); } else { dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width); } assert(dstRowStride); if (dims == 3) { unsigned alignedWidth = dstRowStride/_mesa_get_format_bytes(texImage->TexFormat); dstImageOffsets = allocate_image_offsets(ctx, alignedWidth, texImage->Height, texImage->Depth); if (!dstImageOffsets) { radeon_warning("%s Failed to allocate dstImaeOffset.\n", __func__); return; } } else { dstImageOffsets = texImage->ImageOffsets; } radeon_teximage_map(image, GL_TRUE); if (compressed) { uint32_t srcRowStride, bytesPerRow, rows, block_width, block_height; GLubyte *img_start; _mesa_get_format_block_size(texImage->TexFormat, &block_width, &block_height); if (!image->mt) { dstRowStride = _mesa_format_row_stride(texImage->TexFormat, texImage->Width); img_start = _mesa_compressed_image_address(xoffset, yoffset, 0, texImage->TexFormat, texImage->Width, texImage->Data); } else { uint32_t offset; offset = dstRowStride / _mesa_get_format_bytes(texImage->TexFormat) * yoffset / block_height + xoffset / block_width; offset *= _mesa_get_format_bytes(texImage->TexFormat); img_start = texImage->Data + offset; } srcRowStride = _mesa_format_row_stride(texImage->TexFormat, width); bytesPerRow = srcRowStride; rows = (height + block_height - 1) / block_height; copy_rows(img_start, dstRowStride, pixels, srcRowStride, rows, bytesPerRow); } else { if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat, texImage->TexFormat, texImage->Data, xoffset, yoffset, zoffset, dstRowStride, dstImageOffsets, width, height, depth, format, type, pixels, packing)) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage"); } } if (dims == 3) { free(dstImageOffsets); } radeon_teximage_unmap(image); }
static GLboolean do_blit_readpixels(struct gl_context * ctx, GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, const struct gl_pixelstore_attrib *pack, GLvoid * pixels) { radeonContextPtr radeon = RADEON_CONTEXT(ctx); const struct radeon_renderbuffer *rrb = radeon_renderbuffer(ctx->ReadBuffer->_ColorReadBuffer); const gl_format dst_format = gl_format_and_type_to_mesa_format(format, type); unsigned dst_rowstride, dst_imagesize, aligned_rowstride, flip_y; struct radeon_bo *dst_buffer; GLint dst_x = 0, dst_y = 0; intptr_t dst_offset; /* It's not worth if number of pixels to copy is really small */ if (width * height < 100) { return GL_FALSE; } if (dst_format == MESA_FORMAT_NONE || !radeon->vtbl.check_blit(dst_format) || !radeon->vtbl.blit) { return GL_FALSE; } if (ctx->_ImageTransferState || ctx->Color._LogicOpEnabled) { return GL_FALSE; } if (pack->SwapBytes || pack->LsbFirst) { return GL_FALSE; } if (pack->RowLength > 0) { dst_rowstride = pack->RowLength; } else { dst_rowstride = width; } if (!_mesa_clip_copytexsubimage(ctx, &dst_x, &dst_y, &x, &y, &width, &height)) { return GL_TRUE; } assert(x >= 0 && y >= 0); aligned_rowstride = get_texture_image_row_stride(radeon, dst_format, dst_rowstride, 0); dst_rowstride *= _mesa_get_format_bytes(dst_format); if (_mesa_is_bufferobj(pack->BufferObj) && aligned_rowstride != dst_rowstride) return GL_FALSE; dst_imagesize = get_texture_image_size(dst_format, aligned_rowstride, height, 1, 0); if (!_mesa_is_bufferobj(pack->BufferObj)) { dst_buffer = radeon_bo_open(radeon->radeonScreen->bom, 0, dst_imagesize, 1024, RADEON_GEM_DOMAIN_GTT, 0); dst_offset = 0; } else { dst_buffer = get_radeon_buffer_object(pack->BufferObj)->bo; dst_offset = (intptr_t)pixels; } /* Disable source Y flipping for FBOs */ flip_y = (ctx->ReadBuffer->Name == 0); if (pack->Invert) { y = rrb->base.Height - height - y; flip_y = !flip_y; } if (radeon->vtbl.blit(ctx, rrb->bo, rrb->draw_offset, rrb->base.Format, rrb->pitch / rrb->cpp, rrb->base.Width, rrb->base.Height, x, y, dst_buffer, dst_offset, dst_format, aligned_rowstride / _mesa_get_format_bytes(dst_format), width, height, 0, /* dst_x */ 0, /* dst_y */ width, height, flip_y)) { if (!_mesa_is_bufferobj(pack->BufferObj)) { radeon_bo_map(dst_buffer, 0); copy_rows(pixels, dst_rowstride, dst_buffer->ptr, aligned_rowstride, height, dst_rowstride); radeon_bo_unmap(dst_buffer); radeon_bo_unref(dst_buffer); } return GL_TRUE; } if (!_mesa_is_bufferobj(pack->BufferObj)) radeon_bo_unref(dst_buffer); return GL_FALSE; }